diff --git a/.gitignore b/.gitignore index c12dda081bc..74860f2531b 100644 --- a/.gitignore +++ b/.gitignore @@ -32,3 +32,4 @@ website/vendor # Keep windows files with windows line endings *.winfile eol=crlf +dist diff --git a/.goreleaser.yml b/.goreleaser.yml new file mode 100644 index 00000000000..0a01108d5ae --- /dev/null +++ b/.goreleaser.yml @@ -0,0 +1,25 @@ +# Build customization +builds: + - binary: terraform-provider-aws + goos: + - windows + - darwin + - linux + goarch: + - amd64 + - 386 + ignore: + - goos: darwin + goarch: 386 + +# Archive customization +archive: + format: zip + + replacements: + amd64: 64-bits + 386: 32-bits + darwin: macOS + + files: + - nothing.* diff --git a/.travis.yml b/.travis.yml index 1ba1993ab97..99366c8e31c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,25 +2,46 @@ dist: trusty sudo: false language: go go: -- 1.8.1 +- 1.9.1 + +before_install: +- if [ ${TRAVIS_TAG::1} = v ]; + then + cd ../.. && + mv coveo terraform-providers && + cd terraform-providers/terraform-provider-aws; + fi install: # This script is used by the Travis build to install a cookie for # go.googlesource.com so rate limits are higher when using `go get` to fetch # packages that live there. # See: https://github.com/golang/go/issues/12933 -- bash scripts/gogetcookie.sh -- go get github.com/kardianos/govendor +- if [ ${TRAVIS_TAG::1} = v ]; + then + bash scripts/gogetcookie.sh && + go get github.com/kardianos/govendor; + fi script: -- make test -- make vendor-status -- make vet +- if [ ${TRAVIS_TAG::1} = v ]; + then + make test && + make vendor-status && + make vet; + fi -branches: - only: - - master +# branches: +# only: +# - master + matrix: fast_finish: true allow_failures: - go: tip + +after_success: +- if [ ${TRAVIS_TAG::1} = v ]; + then + curl -sL https://git.io/goreleaser | bash; + fi diff --git a/CHANGELOG.md b/CHANGELOG.md index 54cb57904c4..057ee42d997 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,60 @@ -## 1.0.1 (Unreleased) +## 1.2.0 (October 31, 2017) + +INTERNAL: + +* Remove `id` fields from schema definitions ([#1626](https://github.com/terraform-providers/terraform-provider-aws/issues/1626)) + +FEATURES: + +* **New Resource:** `aws_servicecatalog_portfolio` ([#1694](https://github.com/terraform-providers/terraform-provider-aws/issues/1694)) +* **New Resource:** `aws_ses_domain_dkim` ([#1786](https://github.com/terraform-providers/terraform-provider-aws/issues/1786)) +* **New Resource:** `aws_cognito_identity_pool_roles_attachment` ([#863](https://github.com/terraform-providers/terraform-provider-aws/issues/863)) +* **New Resource:** `aws_ecr_lifecycle_policy` ([#2096](https://github.com/terraform-providers/terraform-provider-aws/issues/2096)) +* **New Data Source:** `aws_nat_gateway` ([#1294](https://github.com/terraform-providers/terraform-provider-aws/issues/1294)) +* **New Data Source:** `aws_dynamodb_table` ([#2062](https://github.com/terraform-providers/terraform-provider-aws/issues/2062)) +* **New Data Source:** `aws_cloudtrail_service_account` ([#1774](https://github.com/terraform-providers/terraform-provider-aws/issues/1774)) + +IMPROVEMENTS: + +* resource/aws_ami: Support configurable timeouts ([#1811](https://github.com/terraform-providers/terraform-provider-aws/issues/1811)) +* resource/ami_copy: Support configurable timeouts ([#1811](https://github.com/terraform-providers/terraform-provider-aws/issues/1811)) +* resource/ami_from_instance: Support configurable timeouts ([#1811](https://github.com/terraform-providers/terraform-provider-aws/issues/1811)) +* data-source/aws_security_group: add description ([#1943](https://github.com/terraform-providers/terraform-provider-aws/issues/1943)) +* resource/aws_cloudfront_distribution: Change the default minimum_protocol_version to TLSv1 ([#1856](https://github.com/terraform-providers/terraform-provider-aws/issues/1856)) +* resource/aws_sns_topic: Support SMS in protocols ([#1813](https://github.com/terraform-providers/terraform-provider-aws/issues/1813)) +* resource/aws_spot_fleet_request: Add support for `tags` ([#2042](https://github.com/terraform-providers/terraform-provider-aws/issues/2042)) +* resource/aws_kinesis_firehose_delivery_stream: Add `s3_backup_mode` option ([#1830](https://github.com/terraform-providers/terraform-provider-aws/issues/1830)) +* resource/aws_elasticsearch_domain: Support VPC configuration ([#1958](https://github.com/terraform-providers/terraform-provider-aws/issues/1958)) +* resource/aws_alb_target_group: Add support for `target_type` ([#1589](https://github.com/terraform-providers/terraform-provider-aws/issues/1589)) +* resource/aws_sqs_queue: Add support for `tags` ([#1987](https://github.com/terraform-providers/terraform-provider-aws/issues/1987)) +* resource/aws_security_group: Add `revoke_rules_on_delete` option to force a security group to revoke + rules before deleting the grou ([#2074](https://github.com/terraform-providers/terraform-provider-aws/issues/2074)) +* resource/aws_cloudwatch_log_metric_filter: Add support for DefaultValue ([#1578](https://github.com/terraform-providers/terraform-provider-aws/issues/1578)) +* resource/aws_emr_cluster: Expose error on `TERMINATED_WITH_ERRORS` ([#2081](https://github.com/terraform-providers/terraform-provider-aws/issues/2081)) + +BUG FIXES: + +* resource/aws_elasticache_parameter_group: Add missing return to retry logic ([#1891](https://github.com/terraform-providers/terraform-provider-aws/issues/1891)) +* resource/aws_batch_job_queue: Wait for update completion when disabling ([#1892](https://github.com/terraform-providers/terraform-provider-aws/issues/1892)) +* resource/aws_snapshot_create_volume_permission: Raise creation timeout to 10mins ([#1894](https://github.com/terraform-providers/terraform-provider-aws/issues/1894)) +* resource/aws_snapshot_create_volume_permission: Raise creation timeout to 20mins ([#2049](https://github.com/terraform-providers/terraform-provider-aws/issues/2049)) +* resource/aws_kms_alias: Retry creation on `NotFoundException` ([#1896](https://github.com/terraform-providers/terraform-provider-aws/issues/1896)) +* resource/aws_kms_key: Retry reading tags on `NotFoundException` ([#1900](https://github.com/terraform-providers/terraform-provider-aws/issues/1900)) +* resource/aws_db_snapshot: Raise creation timeout to 20mins ([#1905](https://github.com/terraform-providers/terraform-provider-aws/issues/1905)) +* resource/aws_lb: Allow assigning EIP to network LB ([#1956](https://github.com/terraform-providers/terraform-provider-aws/issues/1956)) +* resource/aws_s3_bucket: Retry tagging on OperationAborted ([#2008](https://github.com/terraform-providers/terraform-provider-aws/issues/2008)) +* resource/aws_cognito_identity_pool: Fixed refresh of providers ([#2015](https://github.com/terraform-providers/terraform-provider-aws/issues/2015)) +* resource/aws_elasticache_replication_group: Raise creation timeout to 50mins ([#2048](https://github.com/terraform-providers/terraform-provider-aws/issues/2048)) +* resource/aws_api_gateway_usag_plan: Fixed setting of rate_limit ([#2076](https://github.com/terraform-providers/terraform-provider-aws/issues/2076)) +* resource/aws_elastic_beanstalk_application: Expose error leading to failed deletion ([#2080](https://github.com/terraform-providers/terraform-provider-aws/issues/2080)) +* resource/aws_s3_bucket: Accept query strings in redirect hosts ([#2059](https://github.com/terraform-providers/terraform-provider-aws/issues/2059)) + +## 1.1.0 (October 16, 2017) NOTES: * resource/aws_alb_* & data-source/aws_alb_*: In order to support network LBs, ALBs were renamed to `aws_lb_*` due to the way APIs "new" (non-Classic) load balancers are structured in AWS. All existing ALB functionality remains untouched and new resources work the same way. `aws_alb_*` resources are still in place as "aliases", but documentation will only mention `aws_lb_*`. -`aws_alb_*` aliases will be removed in future major version. [GH-1806] +`aws_alb_*` aliases will be removed in future major version. ([#1806](https://github.com/terraform-providers/terraform-provider-aws/issues/1806)) * Deprecated: * data-source/aws_alb * data-source/aws_alb_listener @@ -16,58 +67,67 @@ NOTES: FEATURES: -* **New Resource:** `aws_batch_job_definition` [GH-1710] -* **New Resource:** `aws_batch_job_queue` [GH-1710] -* **New Resource:** `aws_lb` [GH-1806] -* **New Resource:** `aws_lb_listener` [GH-1806] -* **New Resource:** `aws_lb_listener_rule` [GH-1806] -* **New Resource:** `aws_lb_target_group` [GH-1806] -* **New Resource:** `aws_lb_target_group_attachment` [GH-1806] -* **New Data Source:** `aws_lb` [GH-1806] -* **New Data Source:** `aws_lb_listener` [GH-1806] -* **New Data Source:** `aws_lb_target_group` [GH-1806] -* **New Data Source:** `aws_iam_user` [GH-1805] -* **New Data Source:** `aws_s3_bucket` [GH-1505] +* **New Resource:** `aws_batch_job_definition` ([#1710](https://github.com/terraform-providers/terraform-provider-aws/issues/1710)) +* **New Resource:** `aws_batch_job_queue` ([#1710](https://github.com/terraform-providers/terraform-provider-aws/issues/1710)) +* **New Resource:** `aws_lb` ([#1806](https://github.com/terraform-providers/terraform-provider-aws/issues/1806)) +* **New Resource:** `aws_lb_listener` ([#1806](https://github.com/terraform-providers/terraform-provider-aws/issues/1806)) +* **New Resource:** `aws_lb_listener_rule` ([#1806](https://github.com/terraform-providers/terraform-provider-aws/issues/1806)) +* **New Resource:** `aws_lb_target_group` ([#1806](https://github.com/terraform-providers/terraform-provider-aws/issues/1806)) +* **New Resource:** `aws_lb_target_group_attachment` ([#1806](https://github.com/terraform-providers/terraform-provider-aws/issues/1806)) +* **New Data Source:** `aws_lb` ([#1806](https://github.com/terraform-providers/terraform-provider-aws/issues/1806)) +* **New Data Source:** `aws_lb_listener` ([#1806](https://github.com/terraform-providers/terraform-provider-aws/issues/1806)) +* **New Data Source:** `aws_lb_target_group` ([#1806](https://github.com/terraform-providers/terraform-provider-aws/issues/1806)) +* **New Data Source:** `aws_iam_user` ([#1805](https://github.com/terraform-providers/terraform-provider-aws/issues/1805)) +* **New Data Source:** `aws_s3_bucket` ([#1505](https://github.com/terraform-providers/terraform-provider-aws/issues/1505)) IMPROVEMENTS: -* data-source/aws_redshift_service_account: Add `arn` attribute [GH-1775] -* data-source/aws_vpc_endpoint: Expose `prefix_list_id` [GH-1733] -* resource/aws_kinesis_stream: Add support for encryption [GH-1139] -* resource/aws_cloudwatch_log_group: Add support for encryption via `kms_key_id` [GH-1751] -* resource/aws_spot_instance_request: Add support for `instance_interruption_behaviour` [GH-1735] -* resource/aws_ses_event_destination: Add support for `open` & `click` event types [GH-1773] -* resource/aws_efs_file_system: Expose `dns_name` [GH-1825] -* resource/aws_security_group+aws_security_group_rule: Add support for rule description [GH-1587] -* resource/aws_emr_cluster: enable configuration of ebs root volume size [GH-1375] -* resource/aws_aws_ami: Add `root_snapshot_id` attribute [GH-1572] -* resource/aws_vpn_connection: Mark preshared keys as sensitive [GH-1850] -* resource/aws_codedeploy_deployment_group: Support blue/green and in-place deployments with traffic control [GH-1162] -* resource/aws_elb: Update ELB idle timeout to 4000s [GH-1861] -* resource/aws_spot_fleet_request: Add support for instance_interruption_behaviour [GH-1847] -* resource/aws_kinesis_firehose_delivery_stream: Specify kinesis stream as the source of a aws_kinesis_firehose_delivery_stream [GH-1605] +* data-source/aws_redshift_service_account: Add `arn` attribute ([#1775](https://github.com/terraform-providers/terraform-provider-aws/issues/1775)) +* data-source/aws_vpc_endpoint: Expose `prefix_list_id` ([#1733](https://github.com/terraform-providers/terraform-provider-aws/issues/1733)) +* resource/aws_kinesis_stream: Add support for encryption ([#1139](https://github.com/terraform-providers/terraform-provider-aws/issues/1139)) +* resource/aws_cloudwatch_log_group: Add support for encryption via `kms_key_id` ([#1751](https://github.com/terraform-providers/terraform-provider-aws/issues/1751)) +* resource/aws_spot_instance_request: Add support for `instance_interruption_behaviour` ([#1735](https://github.com/terraform-providers/terraform-provider-aws/issues/1735)) +* resource/aws_ses_event_destination: Add support for `open` & `click` event types ([#1773](https://github.com/terraform-providers/terraform-provider-aws/issues/1773)) +* resource/aws_efs_file_system: Expose `dns_name` ([#1825](https://github.com/terraform-providers/terraform-provider-aws/issues/1825)) +* resource/aws_security_group+aws_security_group_rule: Add support for rule description ([#1587](https://github.com/terraform-providers/terraform-provider-aws/issues/1587)) +* resource/aws_emr_cluster: enable configuration of ebs root volume size ([#1375](https://github.com/terraform-providers/terraform-provider-aws/issues/1375)) +* resource/aws_ami: Add `root_snapshot_id` attribute ([#1572](https://github.com/terraform-providers/terraform-provider-aws/issues/1572)) +* resource/aws_vpn_connection: Mark preshared keys as sensitive ([#1850](https://github.com/terraform-providers/terraform-provider-aws/issues/1850)) +* resource/aws_codedeploy_deployment_group: Support blue/green and in-place deployments with traffic control ([#1162](https://github.com/terraform-providers/terraform-provider-aws/issues/1162)) +* resource/aws_elb: Update ELB idle timeout to 4000s ([#1861](https://github.com/terraform-providers/terraform-provider-aws/issues/1861)) +* resource/aws_spot_fleet_request: Add support for instance_interruption_behaviour ([#1847](https://github.com/terraform-providers/terraform-provider-aws/issues/1847)) +* resource/aws_kinesis_firehose_delivery_stream: Specify kinesis stream as the source of a aws_kinesis_firehose_delivery_stream ([#1605](https://github.com/terraform-providers/terraform-provider-aws/issues/1605)) +* resource/aws_kinesis_firehose_delivery_stream: Output complete error when creation fails ([#1881](https://github.com/terraform-providers/terraform-provider-aws/issues/1881)) BUG FIXES: -* data-source/aws_db_instance: Make `db_instance_arn` expose ARN instead of identifier (use `db_cluster_identifier` for identifier) [GH-1766] -* data-source/aws_db_snapshot: Expose `storage_type` (was not exposed) [GH-1833] -* resource/aws_cloudtrail: Raise update retry timeout [GH-1820] -* resource/aws_elasticache_parameter_group: Retry resetting group on pending changes [GH-1821] -* resource/aws_kms_key: Retry getting rotation status [GH-1818] -* resource/aws_kms_key: Retry getting key policy [GH-1854] -* resource/aws_vpn_connection: Raise timeout to 40mins [GH-1819] -* resource/aws_kinesis_firehose_delivery_stream: Fix crash caused by missing `processing_configuration` [GH-1738] -* resource/aws_rds_cluster_instance: Treat `configuring-enhanced-monitoring` as pending state [GH-1744] -* resource/aws_rds_cluster_instance: Treat more states as pending [GH-1790] -* resource/aws_route_table: Increase number of not-found checks/retries after creation [GH-1791] -* resource/aws_batch_compute_environment: Fix ARN attribute name/value (`ecc_cluster_arn` -> `ecs_cluster_arn`) [GH-1809] -* resource/aws_kinesis_stream: Retry creation of the stream on `LimitExceededException` (handle throttling) [GH-1339] -* resource/aws_vpn_connection_route: Treat route in state `deleted` as deleted [GH-1848] -* resource/aws_eip: Avoid disassociating if there's no association [GH-1683] -* resource/aws_elasticache_cluster: Allow scaling up cluster by modifying `az_mode` (avoid recreation) [GH-1758] -* resource/aws_lambda_function: Fix Lambda Function Updates When Published [GH-1797] -* resource/aws_appautoscaling_*: Use dimension to uniquely identify target/policy [GH-1808] -* Updated the Data Source Tags structure [GH-1706] +* data-source/aws_db_instance: Make `db_instance_arn` expose ARN instead of identifier (use `db_cluster_identifier` for identifier) ([#1766](https://github.com/terraform-providers/terraform-provider-aws/issues/1766)) +* data-source/aws_db_snapshot: Expose `storage_type` (was not exposed) ([#1833](https://github.com/terraform-providers/terraform-provider-aws/issues/1833)) +* data-source/aws_ami: Update the `tags` structure for easier referencing ([#1706](https://github.com/terraform-providers/terraform-provider-aws/issues/1706)) +* data-source/aws_ebs_snapshot: Update the `tags` structure for easier referencing ([#1706](https://github.com/terraform-providers/terraform-provider-aws/issues/1706)) +* data-source/aws_ebs_volume: Update the `tags` structure for easier referencing ([#1706](https://github.com/terraform-providers/terraform-provider-aws/issues/1706)) +* data-source/aws_instance: Update the `tags` structure for easier referencing ([#1706](https://github.com/terraform-providers/terraform-provider-aws/issues/1706)) +* resource/aws_spot_instance_request: Handle `closed` request correctly ([#1903](https://github.com/terraform-providers/terraform-provider-aws/issues/1903)) +* resource/aws_cloudtrail: Raise update retry timeout ([#1820](https://github.com/terraform-providers/terraform-provider-aws/issues/1820)) +* resource/aws_elasticache_parameter_group: Retry resetting group on pending changes ([#1821](https://github.com/terraform-providers/terraform-provider-aws/issues/1821)) +* resource/aws_kms_key: Retry getting rotation status ([#1818](https://github.com/terraform-providers/terraform-provider-aws/issues/1818)) +* resource/aws_kms_key: Retry getting key policy ([#1854](https://github.com/terraform-providers/terraform-provider-aws/issues/1854)) +* resource/aws_vpn_connection: Raise timeout to 40mins ([#1819](https://github.com/terraform-providers/terraform-provider-aws/issues/1819)) +* resource/aws_kinesis_firehose_delivery_stream: Fix crash caused by missing `processing_configuration` ([#1738](https://github.com/terraform-providers/terraform-provider-aws/issues/1738)) +* resource/aws_rds_cluster_instance: Treat `configuring-enhanced-monitoring` as pending state ([#1744](https://github.com/terraform-providers/terraform-provider-aws/issues/1744)) +* resource/aws_rds_cluster_instance: Treat more states as pending ([#1790](https://github.com/terraform-providers/terraform-provider-aws/issues/1790)) +* resource/aws_route_table: Increase number of not-found checks/retries after creation ([#1791](https://github.com/terraform-providers/terraform-provider-aws/issues/1791)) +* resource/aws_batch_compute_environment: Fix ARN attribute name/value (`ecc_cluster_arn` -> `ecs_cluster_arn`) ([#1809](https://github.com/terraform-providers/terraform-provider-aws/issues/1809)) +* resource/aws_kinesis_stream: Retry creation of the stream on `LimitExceededException` (handle throttling) ([#1339](https://github.com/terraform-providers/terraform-provider-aws/issues/1339)) +* resource/aws_vpn_connection_route: Treat route in state `deleted` as deleted ([#1848](https://github.com/terraform-providers/terraform-provider-aws/issues/1848)) +* resource/aws_eip: Avoid disassociating if there's no association ([#1683](https://github.com/terraform-providers/terraform-provider-aws/issues/1683)) +* resource/aws_elasticache_cluster: Allow scaling up cluster by modifying `az_mode` (avoid recreation) ([#1758](https://github.com/terraform-providers/terraform-provider-aws/issues/1758)) +* resource/aws_lambda_function: Fix Lambda Function Updates When Published ([#1797](https://github.com/terraform-providers/terraform-provider-aws/issues/1797)) +* resource/aws_appautoscaling_*: Use dimension to uniquely identify target/policy ([#1808](https://github.com/terraform-providers/terraform-provider-aws/issues/1808)) +* resource/aws_vpn_connection_route: Wait until route is available/deleted ([#1849](https://github.com/terraform-providers/terraform-provider-aws/issues/1849)) +* resource/aws_cloudfront_distribution: Ignore `minimum_protocol_version` if default certificate is used ([#1785](https://github.com/terraform-providers/terraform-provider-aws/issues/1785)) +* resource/aws_security_group: Using `self = false` with `cidr_blocks` should be allowed ([#1839](https://github.com/terraform-providers/terraform-provider-aws/issues/1839)) +* resource/aws_instance: Check VPC array size to avoid crashes on Eucalyptus Cloud ([#1882](https://github.com/terraform-providers/terraform-provider-aws/issues/1882)) ## 1.0.0 (September 27, 2017) diff --git a/README.md b/README.md index db331f3a7c1..e8947cc0e7e 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ $ make build Using the provider ---------------------- -## Fill in for each provider +If you're building the provider, follow the instructions to [install it as a plugin.](https://www.terraform.io/docs/plugins/basics.html#installing-a-plugin) to install it as a plugin. After placing it into your plugins directory, run `terraform init` to initialize it. Developing the Provider --------------------------- @@ -50,6 +50,8 @@ $ $GOPATH/bin/terraform-provider-aws In order to test the provider, you can simply run `make test`. +*Note:* Make sure no `AWS_ACCESS_KEY_ID` or `AWS_SECRET_ACCESS_KEY` variables are set, and there's no `[default]` section in the AWS credentials file `~/.aws/credentials`. + ```sh $ make test ``` diff --git a/aws/awserr.go b/aws/awserr.go index b0ca7400943..fb12edd6a0a 100644 --- a/aws/awserr.go +++ b/aws/awserr.go @@ -31,3 +31,24 @@ func retryOnAwsCode(code string, f func() (interface{}, error)) (interface{}, er }) return resp, err } + +func retryOnAwsCodes(codes []string, f func() (interface{}, error)) (interface{}, error) { + var resp interface{} + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + var err error + resp, err = f() + if err != nil { + awsErr, ok := err.(awserr.Error) + if ok { + for _, code := range codes { + if awsErr.Code() == code { + return resource.RetryableError(err) + } + } + } + return resource.NonRetryableError(err) + } + return nil + }) + return resp, err +} diff --git a/aws/cloudfront_distribution_configuration_structure.go b/aws/cloudfront_distribution_configuration_structure.go index a472176478e..64c440247b9 100644 --- a/aws/cloudfront_distribution_configuration_structure.go +++ b/aws/cloudfront_distribution_configuration_structure.go @@ -1096,8 +1096,12 @@ func viewerCertificateHash(v interface{}) int { } else { buf.WriteString(fmt.Sprintf("%t-", m["cloudfront_default_certificate"].(bool))) } - if v, ok := m["minimum_protocol_version"]; ok && v.(string) != "" { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) + // if minimum_protocol_version is not specified and we use cloudfront_default_certificate, + // ignore current value of minimum_protocol_version + if c, ok := m["cloudfront_default_certificate"]; !(ok && c.(bool)) { + if v, ok := m["minimum_protocol_version"]; ok && v.(string) != "" { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } } return hashcode.String(buf.String()) } diff --git a/aws/config.go b/aws/config.go index 3e1aa9f3f0d..9fc0a088bcc 100644 --- a/aws/config.go +++ b/aws/config.go @@ -60,6 +60,7 @@ import ( "github.com/aws/aws-sdk-go/service/redshift" "github.com/aws/aws-sdk-go/service/route53" "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/servicecatalog" "github.com/aws/aws-sdk-go/service/ses" "github.com/aws/aws-sdk-go/service/sfn" "github.com/aws/aws-sdk-go/service/simpledb" @@ -144,6 +145,7 @@ type AWSClient struct { appautoscalingconn *applicationautoscaling.ApplicationAutoScaling autoscalingconn *autoscaling.AutoScaling s3conn *s3.S3 + scconn *servicecatalog.ServiceCatalog sesConn *ses.SES simpledbconn *simpledb.SimpleDB sqsconn *sqs.SQS @@ -379,6 +381,7 @@ func (c *Config) Client() (interface{}, error) { client.redshiftconn = redshift.New(sess) client.simpledbconn = simpledb.New(sess) client.s3conn = s3.New(awsS3Sess) + client.scconn = servicecatalog.New(sess) client.sesConn = ses.New(sess) client.sfnconn = sfn.New(sess) client.snsconn = sns.New(awsSnsSess) diff --git a/aws/data_source_aws_availability_zone.go b/aws/data_source_aws_availability_zone.go index edab7c92615..6fdf34281df 100644 --- a/aws/data_source_aws_availability_zone.go +++ b/aws/data_source_aws_availability_zone.go @@ -79,7 +79,6 @@ func dataSourceAwsAvailabilityZoneRead(d *schema.ResourceData, meta interface{}) nameSuffix := (*az.ZoneName)[len(*az.RegionName):] d.SetId(*az.ZoneName) - d.Set("id", az.ZoneName) d.Set("name", az.ZoneName) d.Set("name_suffix", nameSuffix) d.Set("region", az.RegionName) diff --git a/aws/data_source_aws_canonical_user_id.go b/aws/data_source_aws_canonical_user_id.go index ba6a0b09884..0c8a89e390a 100644 --- a/aws/data_source_aws_canonical_user_id.go +++ b/aws/data_source_aws_canonical_user_id.go @@ -14,10 +14,6 @@ func dataSourceAwsCanonicalUserId() *schema.Resource { Read: dataSourceAwsCanonicalUserIdRead, Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, "display_name": { Type: schema.TypeString, Computed: true, @@ -41,7 +37,6 @@ func dataSourceAwsCanonicalUserIdRead(d *schema.ResourceData, meta interface{}) } d.SetId(aws.StringValue(resp.Owner.ID)) - d.Set("id", resp.Owner.ID) d.Set("display_name", resp.Owner.DisplayName) return nil diff --git a/aws/data_source_aws_cloudtrail_service_account.go b/aws/data_source_aws_cloudtrail_service_account.go new file mode 100644 index 00000000000..e41b6fb3d93 --- /dev/null +++ b/aws/data_source_aws_cloudtrail_service_account.go @@ -0,0 +1,57 @@ +package aws + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" +) + +// See http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-supported-regions.html +var cloudTrailServiceAccountPerRegionMap = map[string]string{ + "us-east-1": "086441151436", + "us-east-2": "475085895292", + "us-west-1": "388731089494", + "us-west-2": "113285607260", + "ap-south-1": "977081816279", + "ap-northeast-2": "492519147666", + "ap-southeast-1": "903692715234", + "ap-southeast-2": "284668455005", + "ap-northeast-1": "216624486486", + "ca-central-1": "819402241893", + "eu-central-1": "035351147821", + "eu-west-1": "859597730677", + "eu-west-2": "282025262664", + "sa-east-1": "814480443879", +} + +func dataSourceAwsCloudTrailServiceAccount() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsCloudTrailServiceAccountRead, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsCloudTrailServiceAccountRead(d *schema.ResourceData, meta interface{}) error { + region := meta.(*AWSClient).region + if v, ok := d.GetOk("region"); ok { + region = v.(string) + } + + if accid, ok := cloudTrailServiceAccountPerRegionMap[region]; ok { + d.SetId(accid) + d.Set("arn", iamArnString(meta.(*AWSClient).partition, accid, "root")) + return nil + } + + return fmt.Errorf("Unknown region (%q)", region) +} diff --git a/aws/data_source_aws_cloudtrail_service_account_test.go b/aws/data_source_aws_cloudtrail_service_account_test.go new file mode 100644 index 00000000000..0e5cdd4bc4d --- /dev/null +++ b/aws/data_source_aws_cloudtrail_service_account_test.go @@ -0,0 +1,40 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSCloudTrailServiceAccount_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckAwsCloudTrailServiceAccountConfig, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.aws_cloudtrail_service_account.main", "id", "113285607260"), + resource.TestCheckResourceAttr("data.aws_cloudtrail_service_account.main", "arn", "arn:aws:iam::113285607260:root"), + ), + }, + resource.TestStep{ + Config: testAccCheckAwsCloudTrailServiceAccountExplicitRegionConfig, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.aws_cloudtrail_service_account.regional", "id", "282025262664"), + resource.TestCheckResourceAttr("data.aws_cloudtrail_service_account.regional", "arn", "arn:aws:iam::282025262664:root"), + ), + }, + }, + }) +} + +const testAccCheckAwsCloudTrailServiceAccountConfig = ` +data "aws_cloudtrail_service_account" "main" { } +` + +const testAccCheckAwsCloudTrailServiceAccountExplicitRegionConfig = ` +data "aws_cloudtrail_service_account" "regional" { + region = "eu-west-2" +} +` diff --git a/aws/data_source_aws_dynamodb_table.go b/aws/data_source_aws_dynamodb_table.go new file mode 100644 index 00000000000..998d022eb5f --- /dev/null +++ b/aws/data_source_aws_dynamodb_table.go @@ -0,0 +1,195 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsDynamoDbTable() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsDynamoDbTableRead, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "attribute": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) + return hashcode.String(buf.String()) + }, + }, + "global_secondary_index": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "write_capacity": { + Type: schema.TypeInt, + Computed: true, + }, + "read_capacity": { + Type: schema.TypeInt, + Computed: true, + }, + "hash_key": { + Type: schema.TypeString, + Computed: true, + }, + "range_key": { + Type: schema.TypeString, + Computed: true, + }, + "projection_type": { + Type: schema.TypeString, + Computed: true, + }, + "non_key_attributes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "hash_key": { + Type: schema.TypeString, + Computed: true, + }, + "local_secondary_index": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "range_key": { + Type: schema.TypeString, + Computed: true, + }, + "projection_type": { + Type: schema.TypeString, + Computed: true, + }, + "non_key_attributes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) + return hashcode.String(buf.String()) + }, + }, + "range_key": { + Type: schema.TypeString, + Computed: true, + }, + "read_capacity": { + Type: schema.TypeInt, + Computed: true, + }, + "stream_arn": { + Type: schema.TypeString, + Computed: true, + }, + "stream_enabled": { + Type: schema.TypeBool, + Computed: true, + }, + "stream_label": { + Type: schema.TypeString, + Computed: true, + }, + "stream_view_type": { + Type: schema.TypeString, + Computed: true, + StateFunc: func(v interface{}) string { + value := v.(string) + return strings.ToUpper(value) + }, + }, + "tags": tagsSchemaComputed(), + "ttl": { + Type: schema.TypeSet, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "attribute_name": { + Type: schema.TypeString, + Computed: true, + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "write_capacity": { + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} + +func dataSourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) error { + dynamodbconn := meta.(*AWSClient).dynamodbconn + + name := d.Get("name").(string) + log.Printf("[DEBUG] Loading data for DynamoDB table %q", name) + req := &dynamodb.DescribeTableInput{ + TableName: aws.String(name), + } + + result, err := dynamodbconn.DescribeTable(req) + + if err != nil { + return errwrap.Wrapf("Error retrieving DynamoDB table: {{err}}", err) + } + + d.SetId(*result.Table.TableName) + + return flattenAwsDynamoDbTableResource(d, meta, result.Table) +} diff --git a/aws/data_source_aws_dynamodb_table_test.go b/aws/data_source_aws_dynamodb_table_test.go new file mode 100644 index 00000000000..11fe6687379 --- /dev/null +++ b/aws/data_source_aws_dynamodb_table_test.go @@ -0,0 +1,79 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccDataSourceAwsDynamoDbTable_basic(t *testing.T) { + tableName := fmt.Sprintf("testaccawsdynamodbtable-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAwsDynamoDbTableConfigBasic(tableName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("data.aws_dynamodb_table.dynamodb_table_test", "name", tableName), + resource.TestCheckResourceAttr("data.aws_dynamodb_table.dynamodb_table_test", "read_capacity", "20"), + resource.TestCheckResourceAttr("data.aws_dynamodb_table.dynamodb_table_test", "write_capacity", "20"), + resource.TestCheckResourceAttr("data.aws_dynamodb_table.dynamodb_table_test", "hash_key", "UserId"), + resource.TestCheckResourceAttr("data.aws_dynamodb_table.dynamodb_table_test", "range_key", "GameTitle"), + resource.TestCheckResourceAttr("data.aws_dynamodb_table.dynamodb_table_test", "attribute.#", "3"), + resource.TestCheckResourceAttr("data.aws_dynamodb_table.dynamodb_table_test", "global_secondary_index.#", "1"), + resource.TestCheckResourceAttr("data.aws_dynamodb_table.dynamodb_table_test", "tags.%", "2"), + resource.TestCheckResourceAttr("data.aws_dynamodb_table.dynamodb_table_test", "tags.Name", "dynamodb-table-1"), + resource.TestCheckResourceAttr("data.aws_dynamodb_table.dynamodb_table_test", "tags.Environment", "test"), + ), + }, + }, + }) +} + +func testAccDataSourceAwsDynamoDbTableConfigBasic(tableName string) string { + return fmt.Sprintf(`resource "aws_dynamodb_table" "dynamodb_table_test" { + name = "%s" + read_capacity = 20 + write_capacity = 20 + hash_key = "UserId" + range_key = "GameTitle" + + attribute { + name = "UserId" + type = "S" + } + + attribute { + name = "GameTitle" + type = "S" + } + + attribute { + name = "TopScore" + type = "N" + } + + global_secondary_index { + name = "GameTitleIndex" + hash_key = "GameTitle" + range_key = "TopScore" + write_capacity = 10 + read_capacity = 10 + projection_type = "INCLUDE" + non_key_attributes = ["UserId"] + } + + tags { + Name = "dynamodb-table-1" + Environment = "test" + } +} + +data "aws_dynamodb_table" "dynamodb_table_test" { + name = "${aws_dynamodb_table.dynamodb_table_test.name}" +}`, tableName) +} diff --git a/aws/data_source_aws_eip.go b/aws/data_source_aws_eip.go index 0352f48bf7a..fa6126e57a4 100644 --- a/aws/data_source_aws_eip.go +++ b/aws/data_source_aws_eip.go @@ -19,7 +19,6 @@ func dataSourceAwsEip() *schema.Resource { Optional: true, Computed: true, }, - "public_ip": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -34,7 +33,7 @@ func dataSourceAwsEipRead(d *schema.ResourceData, meta interface{}) error { req := &ec2.DescribeAddressesInput{} - if id := d.Get("id"); id != "" { + if id, ok := d.GetOk("id"); ok { req.AllocationIds = []*string{aws.String(id.(string))} } @@ -57,7 +56,6 @@ func dataSourceAwsEipRead(d *schema.ResourceData, meta interface{}) error { eip := resp.Addresses[0] d.SetId(*eip.AllocationId) - d.Set("id", eip.AllocationId) d.Set("public_ip", eip.PublicIp) return nil diff --git a/aws/data_source_aws_eip_test.go b/aws/data_source_aws_eip_test.go index e19db792417..d5d0c6ac708 100644 --- a/aws/data_source_aws_eip_test.go +++ b/aws/data_source_aws_eip_test.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform/terraform" ) -func TestAccDataSourceAwsEip(t *testing.T) { +func TestAccDataSourceAwsEip_basic(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, diff --git a/aws/data_source_aws_iam_server_certificate.go b/aws/data_source_aws_iam_server_certificate.go index 854bc39b442..4be79462334 100644 --- a/aws/data_source_aws_iam_server_certificate.go +++ b/aws/data_source_aws_iam_server_certificate.go @@ -58,11 +58,6 @@ func dataSourceAwsIAMServerCertificate() *schema.Resource { Computed: true, }, - "id": { - Type: schema.TypeString, - Computed: true, - }, - "path": { Type: schema.TypeString, Computed: true, @@ -130,7 +125,6 @@ func dataSourceAwsIAMServerCertificateRead(d *schema.ResourceData, meta interfac d.SetId(*metadata.ServerCertificateId) d.Set("arn", *metadata.Arn) d.Set("path", *metadata.Path) - d.Set("id", *metadata.ServerCertificateId) d.Set("name", *metadata.ServerCertificateName) if metadata.Expiration != nil { d.Set("expiration_date", metadata.Expiration.Format("2006-01-02T15:04:05")) diff --git a/aws/data_source_aws_nat_gateway.go b/aws/data_source_aws_nat_gateway.go new file mode 100644 index 00000000000..85297625240 --- /dev/null +++ b/aws/data_source_aws_nat_gateway.go @@ -0,0 +1,114 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsNatGateway() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsNatGatewayRead, + + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "vpc_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "subnet_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "tags": tagsSchemaComputed(), + + "filter": ec2CustomFiltersSchema(), + }, + } +} + +func dataSourceAwsNatGatewayRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + req := &ec2.DescribeNatGatewaysInput{} + + if id, ok := d.GetOk("id"); ok { + req.NatGatewayIds = aws.StringSlice([]string{id.(string)}) + } + + if vpc_id, ok := d.GetOk("vpc_id"); ok { + req.Filter = append(req.Filter, buildEC2AttributeFilterList( + map[string]string{ + "vpc-id": vpc_id.(string), + }, + )...) + } + + if state, ok := d.GetOk("state"); ok { + req.Filter = append(req.Filter, buildEC2AttributeFilterList( + map[string]string{ + "state": state.(string), + }, + )...) + } + + if subnet_id, ok := d.GetOk("subnet_id"); ok { + req.Filter = append(req.Filter, buildEC2AttributeFilterList( + map[string]string{ + "subnet-id": subnet_id.(string), + }, + )...) + } + + req.Filter = append(req.Filter, buildEC2CustomFilterList( + d.Get("filter").(*schema.Set), + )...) + if len(req.Filter) == 0 { + // Don't send an empty filters list; the EC2 API won't accept it. + req.Filter = nil + } + log.Printf("[DEBUG] Reading NAT Gateway: %s", req) + resp, err := conn.DescribeNatGateways(req) + if err != nil { + return err + } + if resp == nil || len(resp.NatGateways) == 0 { + return fmt.Errorf("no matching NAT gateway found: %#v", req) + } + if len(resp.NatGateways) > 1 { + return fmt.Errorf("multiple NAT gateways matched; use additional constraints to reduce matches to a single NAT gateway") + } + + ngw := resp.NatGateways[0] + + d.SetId(aws.StringValue(ngw.NatGatewayId)) + d.Set("state", ngw.State) + d.Set("subnet_id", ngw.SubnetId) + d.Set("vpc_id", ngw.VpcId) + + for _, address := range ngw.NatGatewayAddresses { + if *address.AllocationId != "" { + d.Set("allocation_id", address.AllocationId) + d.Set("network_interface_id", address.NetworkInterfaceId) + d.Set("private_ip", address.PrivateIp) + d.Set("public_ip", address.PublicIp) + break + } + } + + return nil +} diff --git a/aws/data_source_aws_nat_gateway_test.go b/aws/data_source_aws_nat_gateway_test.go new file mode 100644 index 00000000000..e1e516e288d --- /dev/null +++ b/aws/data_source_aws_nat_gateway_test.go @@ -0,0 +1,89 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccDataSourceAwsNatGateway(t *testing.T) { + // This is used as a portion of CIDR network addresses. + rInt := acctest.RandIntRange(4, 254) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccDataSourceAwsNatGatewayConfig(rInt), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair( + "data.aws_nat_gateway.test_by_id", "id", + "aws_nat_gateway.test", "id"), + resource.TestCheckResourceAttrPair( + "data.aws_nat_gateway.test_by_subnet_id", "subnet_id", + "aws_nat_gateway.test", "subnet_id"), + resource.TestCheckResourceAttrSet("data.aws_nat_gateway.test_by_id", "state"), + resource.TestCheckNoResourceAttr("data.aws_nat_gateway.test_by_id", "attached_vpc_id"), + ), + }, + }, + }) +} + +func testAccDataSourceAwsNatGatewayConfig(rInt int) string { + return fmt.Sprintf(` +provider "aws" { + region = "us-west-2" +} + +resource "aws_vpc" "test" { + cidr_block = "172.%d.0.0/16" + tags { + Name = "terraform-testacc-nat-gateway-data-source-%d" + } +} + +resource "aws_subnet" "test" { + vpc_id = "${aws_vpc.test.id}" + cidr_block = "172.%d.123.0/24" + availability_zone = "us-west-2a" + + tags { + Name = "terraform-testacc-nat-gateway-data-source-%d" + } +} + +# EIPs are not taggable +resource "aws_eip" "test" { + vpc = true +} + +# IGWs are required for an NGW to spin up; manual dependency +resource "aws_internet_gateway" "test" { + vpc_id = "${aws_vpc.test.id}" + tags { + Name = "terraform-testacc-nat-gateway-data-source-%d" + } +} + +# NGWs are not taggable, either +resource "aws_nat_gateway" "test" { + subnet_id = "${aws_subnet.test.id}" + allocation_id = "${aws_eip.test.id}" + + depends_on = ["aws_internet_gateway.test"] +} + +data "aws_nat_gateway" "test_by_id" { + id = "${aws_nat_gateway.test.id}" +} + +data "aws_nat_gateway" "test_by_subnet_id" { + subnet_id = "${aws_nat_gateway.test.subnet_id}" +} + +`, rInt, rInt, rInt, rInt, rInt) +} diff --git a/aws/data_source_aws_prefix_list.go b/aws/data_source_aws_prefix_list.go index 8bed8550678..876af586db3 100644 --- a/aws/data_source_aws_prefix_list.go +++ b/aws/data_source_aws_prefix_list.go @@ -23,11 +23,6 @@ func dataSourceAwsPrefixList() *schema.Resource { Optional: true, Computed: true, }, - // Computed values. - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, "cidr_blocks": &schema.Schema{ Type: schema.TypeList, Computed: true, @@ -63,7 +58,6 @@ func dataSourceAwsPrefixListRead(d *schema.ResourceData, meta interface{}) error pl := resp.PrefixLists[0] d.SetId(*pl.PrefixListId) - d.Set("id", pl.PrefixListId) d.Set("name", pl.PrefixListName) cidrs := make([]string, len(pl.Cidrs)) diff --git a/aws/data_source_aws_region.go b/aws/data_source_aws_region.go index ed75f705610..6d2a21d1d6d 100644 --- a/aws/data_source_aws_region.go +++ b/aws/data_source_aws_region.go @@ -75,7 +75,6 @@ func dataSourceAwsRegionRead(d *schema.ResourceData, meta interface{}) error { region := resp.Regions[0] d.SetId(*region.RegionName) - d.Set("id", region.RegionName) d.Set("name", region.RegionName) d.Set("endpoint", region.Endpoint) d.Set("current", *region.RegionName == currentRegion) diff --git a/aws/data_source_aws_s3_bucket_object.go b/aws/data_source_aws_s3_bucket_object.go index 2eff5e6dab2..ac3ddc329f0 100644 --- a/aws/data_source_aws_s3_bucket_object.go +++ b/aws/data_source_aws_s3_bucket_object.go @@ -62,6 +62,10 @@ func dataSourceAwsS3BucketObject() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "forced_content_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, "key": &schema.Schema{ Type: schema.TypeString, Required: true, @@ -99,7 +103,6 @@ func dataSourceAwsS3BucketObject() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "tags": tagsSchemaComputed(), }, } @@ -110,6 +113,7 @@ func dataSourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) e bucket := d.Get("bucket").(string) key := d.Get("key").(string) + forcedContentType := d.Get("forced_content_type").(string) input := s3.HeadObjectInput{ Bucket: aws.String(bucket), @@ -139,6 +143,11 @@ func dataSourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) e bucket+key, versionText) } + contentType := *out.ContentType + if forcedContentType == "" { + forcedContentType = contentType + } + log.Printf("[DEBUG] Received S3 object: %s", out) d.SetId(uniqueId) @@ -167,7 +176,7 @@ func dataSourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) e d.Set("storage_class", out.StorageClass) } - if isContentTypeAllowed(out.ContentType) { + if isContentTypeAllowed(&forcedContentType) { input := s3.GetObjectInput{ Bucket: aws.String(bucket), Key: aws.String(key), diff --git a/aws/data_source_aws_s3_bucket_object_test.go b/aws/data_source_aws_s3_bucket_object_test.go index 8bfe6c73f2d..24466ae67df 100644 --- a/aws/data_source_aws_s3_bucket_object_test.go +++ b/aws/data_source_aws_s3_bucket_object_test.go @@ -80,6 +80,40 @@ func TestAccDataSourceAWSS3BucketObject_readableBody(t *testing.T) { }) } +func TestAccDataSourceAWSS3BucketObject_forcedContentType_readableBody(t *testing.T) { + rInt := acctest.RandInt() + resourceOnlyConf, conf := testAccAWSDataSourceS3ObjectConfig_forcedContentType_readableBody(rInt) + + var rObj s3.GetObjectOutput + var dsObj s3.GetObjectOutput + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + PreventPostDestroyRefresh: true, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: resourceOnlyConf, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists("aws_s3_bucket_object.object", &rObj), + ), + }, + resource.TestStep{ + Config: conf, + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsS3ObjectDataSourceExists("data.aws_s3_bucket_object.obj", &dsObj), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_length", "120"), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "content_type", "application/x-x509-ca-cert"), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "etag", "877716107971cdd406981bbbe85c97f4"), + resource.TestMatchResourceAttr("data.aws_s3_bucket_object.obj", "last_modified", + regexp.MustCompile("^[a-zA-Z]{3}, [0-9]+ [a-zA-Z]+ [0-9]{4} [0-9:]+ [A-Z]+$")), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "body", "-----BEGIN CERTIFICATE-----\nbWFpbiBDb250cm9sIFZhbGlkYXRXDDEdMBsGA1UECxMUUG9zaXRpdmVTU0wgV2ls==\n-----END CERTIFICATE-----"), + ), + }, + }, + }) +} + func TestAccDataSourceAWSS3BucketObject_kmsEncrypted(t *testing.T) { rInt := acctest.RandInt() resourceOnlyConf, conf := testAccAWSDataSourceS3ObjectConfig_kmsEncrypted(rInt) @@ -238,6 +272,29 @@ data "aws_s3_bucket_object" "obj" { return resources, both } +func testAccAWSDataSourceS3ObjectConfig_forcedContentType_readableBody(randInt int) (string, string) { + resources := fmt.Sprintf(` +resource "aws_s3_bucket" "object_bucket" { + bucket = "tf-object-test-bucket-%d" +} +resource "aws_s3_bucket_object" "object" { + bucket = "${aws_s3_bucket.object_bucket.bucket}" + key = "tf-testing-obj-%d-forced-readable" + content = "-----BEGIN CERTIFICATE-----\nbWFpbiBDb250cm9sIFZhbGlkYXRXDDEdMBsGA1UECxMUUG9zaXRpdmVTU0wgV2ls==\n-----END CERTIFICATE-----" + content_type = "application/x-x509-ca-cert" +} +`, randInt, randInt) + + both := fmt.Sprintf(`%s +data "aws_s3_bucket_object" "obj" { + bucket = "tf-object-test-bucket-%d" + key = "tf-testing-obj-%d-forced-readable" + forced_content_type = "text/text" +}`, resources, randInt, randInt) + + return resources, both +} + func testAccAWSDataSourceS3ObjectConfig_kmsEncrypted(randInt int) (string, string) { resources := fmt.Sprintf(` resource "aws_s3_bucket" "object_bucket" { diff --git a/aws/data_source_aws_security_group.go b/aws/data_source_aws_security_group.go index 223d33823f1..ec659e4002b 100644 --- a/aws/data_source_aws_security_group.go +++ b/aws/data_source_aws_security_group.go @@ -38,6 +38,11 @@ func dataSourceAwsSecurityGroup() *schema.Resource { }, "tags": tagsSchemaComputed(), + + "description": { + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -46,7 +51,7 @@ func dataSourceAwsSecurityGroupRead(d *schema.ResourceData, meta interface{}) er conn := meta.(*AWSClient).ec2conn req := &ec2.DescribeSecurityGroupsInput{} - if id, idExists := d.GetOk("id"); idExists { + if id, ok := d.GetOk("id"); ok { req.GroupIds = []*string{aws.String(id.(string))} } @@ -82,7 +87,6 @@ func dataSourceAwsSecurityGroupRead(d *schema.ResourceData, meta interface{}) er sg := resp.SecurityGroups[0] d.SetId(*sg.GroupId) - d.Set("id", sg.VpcId) d.Set("name", sg.GroupName) d.Set("description", sg.Description) d.Set("vpc_id", sg.VpcId) diff --git a/aws/data_source_aws_security_group_test.go b/aws/data_source_aws_security_group_test.go index 6e1f1664a83..d619ccc9fda 100644 --- a/aws/data_source_aws_security_group_test.go +++ b/aws/data_source_aws_security_group_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform/terraform" ) -func TestAccDataSourceAwsSecurityGroup(t *testing.T) { +func TestAccDataSourceAwsSecurityGroup_basic(t *testing.T) { rInt := acctest.RandInt() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -21,6 +21,7 @@ func TestAccDataSourceAwsSecurityGroup(t *testing.T) { Config: testAccDataSourceAwsSecurityGroupConfig(rInt), Check: resource.ComposeTestCheckFunc( testAccDataSourceAwsSecurityGroupCheck("data.aws_security_group.by_id"), + resource.TestCheckResourceAttr("data.aws_security_group.by_id", "description", "sg description"), testAccDataSourceAwsSecurityGroupCheck("data.aws_security_group.by_tag"), testAccDataSourceAwsSecurityGroupCheck("data.aws_security_group.by_filter"), testAccDataSourceAwsSecurityGroupCheck("data.aws_security_group.by_name"), @@ -103,14 +104,11 @@ func testAccDataSourceAwsSecurityGroupCheckDefault(name string) resource.TestChe func testAccDataSourceAwsSecurityGroupConfig(rInt int) string { return fmt.Sprintf(` - provider "aws" { - region = "eu-west-1" - } resource "aws_vpc" "test" { cidr_block = "172.16.0.0/16" tags { - Name = "terraform-testacc-subnet-data-source" + Name = "terraform-testacc-security-group-data-source" } } @@ -121,6 +119,7 @@ func testAccDataSourceAwsSecurityGroupConfig(rInt int) string { Name = "tf-acctest" Seed = "%d" } + description = "sg description" } data "aws_security_group" "by_id" { diff --git a/aws/data_source_aws_ssm_parameter.go b/aws/data_source_aws_ssm_parameter.go index 388366686da..0de1a1ccaa8 100644 --- a/aws/data_source_aws_ssm_parameter.go +++ b/aws/data_source_aws_ssm_parameter.go @@ -34,11 +34,13 @@ func dataSourceAwsSsmParameter() *schema.Resource { func dataAwsSsmParameterRead(d *schema.ResourceData, meta interface{}) error { ssmconn := meta.(*AWSClient).ssmconn - log.Printf("[DEBUG] Reading SSM Parameter: %s", d.Id()) + name := d.Get("name").(string) + + log.Printf("[DEBUG] Reading SSM Parameter: %q", name) paramInput := &ssm.GetParametersInput{ Names: []*string{ - aws.String(d.Get("name").(string)), + aws.String(name), }, WithDecryption: aws.Bool(true), } @@ -50,7 +52,7 @@ func dataAwsSsmParameterRead(d *schema.ResourceData, meta interface{}) error { } if len(resp.InvalidParameters) > 0 { - return fmt.Errorf("[ERROR] SSM Parameter %s is invalid", d.Get("name").(string)) + return fmt.Errorf("[ERROR] SSM Parameter %s is invalid", name) } param := resp.Parameters[0] diff --git a/aws/data_source_aws_subnet.go b/aws/data_source_aws_subnet.go index 188a09dd2af..70cb8fbdfdc 100644 --- a/aws/data_source_aws_subnet.go +++ b/aws/data_source_aws_subnet.go @@ -83,7 +83,7 @@ func dataSourceAwsSubnetRead(d *schema.ResourceData, meta interface{}) error { req := &ec2.DescribeSubnetsInput{} - if id := d.Get("id"); id != "" { + if id, ok := d.GetOk("id"); ok { req.SubnetIds = []*string{aws.String(id.(string))} } @@ -139,7 +139,6 @@ func dataSourceAwsSubnetRead(d *schema.ResourceData, meta interface{}) error { subnet := resp.Subnets[0] d.SetId(*subnet.SubnetId) - d.Set("id", subnet.SubnetId) d.Set("vpc_id", subnet.VpcId) d.Set("availability_zone", subnet.AvailabilityZone) d.Set("cidr_block", subnet.CidrBlock) diff --git a/aws/data_source_aws_subnet_test.go b/aws/data_source_aws_subnet_test.go index 3c9c5ed6f36..f92e215ab5e 100644 --- a/aws/data_source_aws_subnet_test.go +++ b/aws/data_source_aws_subnet_test.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform/terraform" ) -func TestAccDataSourceAwsSubnet(t *testing.T) { +func TestAccDataSourceAwsSubnet_basic(t *testing.T) { rInt := acctest.RandIntRange(0, 256) resource.Test(t, resource.TestCase{ @@ -31,7 +31,7 @@ func TestAccDataSourceAwsSubnet(t *testing.T) { }) } -func TestAccDataSourceAwsSubnetIpv6ByIpv6Filter(t *testing.T) { +func TestAccDataSourceAwsSubnet_ipv6ByIpv6Filter(t *testing.T) { rInt := acctest.RandIntRange(0, 256) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -53,7 +53,7 @@ func TestAccDataSourceAwsSubnetIpv6ByIpv6Filter(t *testing.T) { }) } -func TestAccDataSourceAwsSubnetIpv6ByIpv6CidrBlock(t *testing.T) { +func TestAccDataSourceAwsSubnet_ipv6ByIpv6CidrBlock(t *testing.T) { rInt := acctest.RandIntRange(0, 256) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, diff --git a/aws/data_source_aws_vpc.go b/aws/data_source_aws_vpc.go index a4acbfc43bf..1b8852b1790 100644 --- a/aws/data_source_aws_vpc.go +++ b/aws/data_source_aws_vpc.go @@ -81,8 +81,13 @@ func dataSourceAwsVpcRead(d *schema.ResourceData, meta interface{}) error { req := &ec2.DescribeVpcsInput{} - if id := d.Get("id"); id != "" { - req.VpcIds = []*string{aws.String(id.(string))} + var id string + if cid, ok := d.GetOk("id"); ok { + id = cid.(string) + } + + if id != "" { + req.VpcIds = []*string{aws.String(id)} } // We specify "default" as boolean, but EC2 filters want @@ -129,7 +134,6 @@ func dataSourceAwsVpcRead(d *schema.ResourceData, meta interface{}) error { vpc := resp.Vpcs[0] d.SetId(*vpc.VpcId) - d.Set("id", vpc.VpcId) d.Set("cidr_block", vpc.CidrBlock) d.Set("dhcp_options_id", vpc.DhcpOptionsId) d.Set("instance_tenancy", vpc.InstanceTenancy) diff --git a/aws/data_source_aws_vpc_endpoint.go b/aws/data_source_aws_vpc_endpoint.go index 157a8c55ecf..0503fe43e7d 100644 --- a/aws/data_source_aws_vpc_endpoint.go +++ b/aws/data_source_aws_vpc_endpoint.go @@ -115,7 +115,6 @@ func dataSourceAwsVpcEndpointRead(d *schema.ResourceData, meta interface{}) erro } d.SetId(aws.StringValue(vpce.VpcEndpointId)) - d.Set("id", vpce.VpcEndpointId) d.Set("state", vpce.State) d.Set("vpc_id", vpce.VpcId) d.Set("service_name", vpce.ServiceName) diff --git a/aws/data_source_aws_vpc_endpoint_test.go b/aws/data_source_aws_vpc_endpoint_test.go index 51d1e959dec..b95af809641 100644 --- a/aws/data_source_aws_vpc_endpoint_test.go +++ b/aws/data_source_aws_vpc_endpoint_test.go @@ -25,6 +25,23 @@ func TestAccDataSourceAwsVpcEndpoint_basic(t *testing.T) { }) } +func TestAccDataSourceAwsVpcEndpoint_byId(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceAwsVpcEndpointConfigById, + Check: resource.ComposeTestCheckFunc( + testAccDataSourceAwsVpcEndpointCheckExists("data.aws_vpc_endpoint.by_id"), + resource.TestCheckResourceAttrSet("data.aws_vpc_endpoint.by_id", "prefix_list_id"), + resource.TestCheckResourceAttrSet("data.aws_vpc_endpoint.by_id", "id"), + ), + }, + }, + }) +} + func TestAccDataSourceAwsVpcEndpoint_withRouteTable(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -70,10 +87,6 @@ func testAccDataSourceAwsVpcEndpointCheckExists(name string) resource.TestCheckF } const testAccDataSourceAwsVpcEndpointConfig = ` -provider "aws" { - region = "us-west-2" -} - resource "aws_vpc" "foo" { cidr_block = "10.1.0.0/16" @@ -96,11 +109,26 @@ data "aws_vpc_endpoint" "s3" { } ` -const testAccDataSourceAwsVpcEndpointWithRouteTableConfig = ` -provider "aws" { - region = "us-west-2" +const testAccDataSourceAwsVpcEndpointConfigById = ` +resource "aws_vpc" "foo" { + cidr_block = "10.1.0.0/16" + + tags { + Name = "terraform-testacc-vpc-endpoint-data-source-foo" + } } +resource "aws_vpc_endpoint" "s3" { + vpc_id = "${aws_vpc.foo.id}" + service_name = "com.amazonaws.us-west-2.s3" +} + +data "aws_vpc_endpoint" "by_id" { + id = "${aws_vpc_endpoint.s3.id}" +} +` + +const testAccDataSourceAwsVpcEndpointWithRouteTableConfig = ` resource "aws_vpc" "foo" { cidr_block = "10.1.0.0/16" diff --git a/aws/data_source_aws_vpc_peering_connection.go b/aws/data_source_aws_vpc_peering_connection.go index 8d800751f53..489a7262414 100644 --- a/aws/data_source_aws_vpc_peering_connection.go +++ b/aws/data_source_aws_vpc_peering_connection.go @@ -117,7 +117,6 @@ func dataSourceAwsVpcPeeringConnectionRead(d *schema.ResourceData, meta interfac pcx := resp.VpcPeeringConnections[0] d.SetId(aws.StringValue(pcx.VpcPeeringConnectionId)) - d.Set("id", pcx.VpcPeeringConnectionId) d.Set("status", pcx.Status.Code) d.Set("vpc_id", pcx.RequesterVpcInfo.VpcId) d.Set("owner_id", pcx.RequesterVpcInfo.OwnerId) diff --git a/aws/data_source_aws_vpc_peering_connection_test.go b/aws/data_source_aws_vpc_peering_connection_test.go index db96839b7bc..9d2b7c9a4bb 100644 --- a/aws/data_source_aws_vpc_peering_connection_test.go +++ b/aws/data_source_aws_vpc_peering_connection_test.go @@ -17,11 +17,23 @@ func TestAccDataSourceAwsVpcPeeringConnection_basic(t *testing.T) { Config: testAccDataSourceAwsVpcPeeringConnectionConfig, Check: resource.ComposeTestCheckFunc( testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_id"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_id", "id"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_id", "cidr_block"), testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_requester_vpc_id"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_vpc_id", "id"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_vpc_id", "cidr_block"), testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_accepter_vpc_id"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_vpc_id", "id"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_vpc_id", "cidr_block"), testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_requester_cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_cidr_block", "id"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_requester_cidr_block", "cidr_block"), testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_accepter_cidr_block"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_cidr_block", "id"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_accepter_cidr_block", "cidr_block"), testAccDataSourceAwsVpcPeeringConnectionCheck("data.aws_vpc_peering_connection.test_by_owner_ids"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_owner_ids", "id"), + resource.TestCheckResourceAttrSet("data.aws_vpc_peering_connection.test_by_owner_ids", "cidr_block"), ), ExpectNonEmptyPlan: true, }, @@ -56,10 +68,6 @@ func testAccDataSourceAwsVpcPeeringConnectionCheck(name string) resource.TestChe } const testAccDataSourceAwsVpcPeeringConnectionConfig = ` -provider "aws" { - region = "us-west-2" -} - resource "aws_vpc" "foo" { cidr_block = "10.1.0.0/16" diff --git a/aws/import_aws_security_group_test.go b/aws/import_aws_security_group_test.go index a57313ae52d..88720ec1bbd 100644 --- a/aws/import_aws_security_group_test.go +++ b/aws/import_aws_security_group_test.go @@ -10,9 +10,9 @@ import ( func TestAccAWSSecurityGroup_importBasic(t *testing.T) { checkFn := func(s []*terraform.InstanceState) error { - // Expect 3: group, 2 rules - if len(s) != 3 { - return fmt.Errorf("expected 3 states: %#v", s) + // Expect 2: group, 2 rules + if len(s) != 2 { + return fmt.Errorf("expected 2 states: %#v", s) } return nil @@ -28,9 +28,10 @@ func TestAccAWSSecurityGroup_importBasic(t *testing.T) { }, { - ResourceName: "aws_security_group.web", - ImportState: true, - ImportStateCheck: checkFn, + ResourceName: "aws_security_group.web", + ImportState: true, + ImportStateCheck: checkFn, + ImportStateVerifyIgnore: []string{"revoke_rules_on_delete"}, }, }, }) @@ -75,9 +76,10 @@ func TestAccAWSSecurityGroup_importSelf(t *testing.T) { }, { - ResourceName: "aws_security_group.allow_all", - ImportState: true, - ImportStateVerify: true, + ResourceName: "aws_security_group.allow_all", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"revoke_rules_on_delete"}, }, }, }) @@ -94,9 +96,10 @@ func TestAccAWSSecurityGroup_importSourceSecurityGroup(t *testing.T) { }, { - ResourceName: "aws_security_group.test_group_1", - ImportState: true, - ImportStateVerify: true, + ResourceName: "aws_security_group.test_group_1", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"revoke_rules_on_delete"}, }, }, }) diff --git a/aws/opsworks_layers.go b/aws/opsworks_layers.go index c4bfeb6b2ac..42ba8628511 100644 --- a/aws/opsworks_layers.go +++ b/aws/opsworks_layers.go @@ -45,11 +45,6 @@ var ( func (lt *opsworksLayerType) SchemaResource() *schema.Resource { resourceSchema := map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "auto_assign_elastic_ips": &schema.Schema{ Type: schema.TypeBool, Optional: true, @@ -281,7 +276,7 @@ func (lt *opsworksLayerType) Read(d *schema.ResourceData, client *opsworks.OpsWo } layer := resp.Layers[0] - d.Set("id", layer.LayerId) + d.SetId(aws.StringValue(layer.LayerId)) d.Set("auto_assign_elastic_ips", layer.AutoAssignElasticIps) d.Set("auto_assign_public_ips", layer.AutoAssignPublicIps) d.Set("custom_instance_profile_arn", layer.CustomInstanceProfileArn) @@ -370,7 +365,6 @@ func (lt *opsworksLayerType) Create(d *schema.ResourceData, client *opsworks.Ops layerId := *resp.LayerId d.SetId(layerId) - d.Set("id", layerId) loadBalancer := aws.String(d.Get("elastic_load_balancer").(string)) if loadBalancer != nil && *loadBalancer != "" { diff --git a/aws/provider.go b/aws/provider.go index fea7bceb456..c0ad59f1747 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -160,28 +160,30 @@ func Provider() terraform.ResourceProvider { }, DataSourcesMap: map[string]*schema.Resource{ - "aws_acm_certificate": dataSourceAwsAcmCertificate(), - "aws_ami": dataSourceAwsAmi(), - "aws_ami_ids": dataSourceAwsAmiIds(), - "aws_autoscaling_groups": dataSourceAwsAutoscalingGroups(), - "aws_availability_zone": dataSourceAwsAvailabilityZone(), - "aws_availability_zones": dataSourceAwsAvailabilityZones(), - "aws_billing_service_account": dataSourceAwsBillingServiceAccount(), - "aws_caller_identity": dataSourceAwsCallerIdentity(), - "aws_canonical_user_id": dataSourceAwsCanonicalUserId(), - "aws_cloudformation_stack": dataSourceAwsCloudFormationStack(), - "aws_db_instance": dataSourceAwsDbInstance(), - "aws_db_snapshot": dataSourceAwsDbSnapshot(), - "aws_ebs_snapshot": dataSourceAwsEbsSnapshot(), - "aws_ebs_snapshot_ids": dataSourceAwsEbsSnapshotIds(), - "aws_ebs_volume": dataSourceAwsEbsVolume(), - "aws_ecr_repository": dataSourceAwsEcrRepository(), - "aws_ecs_cluster": dataSourceAwsEcsCluster(), - "aws_ecs_container_definition": dataSourceAwsEcsContainerDefinition(), - "aws_ecs_task_definition": dataSourceAwsEcsTaskDefinition(), - "aws_efs_file_system": dataSourceAwsEfsFileSystem(), - "aws_efs_mount_target": dataSourceAwsEfsMountTarget(), - "aws_eip": dataSourceAwsEip(), + "aws_acm_certificate": dataSourceAwsAcmCertificate(), + "aws_ami": dataSourceAwsAmi(), + "aws_ami_ids": dataSourceAwsAmiIds(), + "aws_autoscaling_groups": dataSourceAwsAutoscalingGroups(), + "aws_availability_zone": dataSourceAwsAvailabilityZone(), + "aws_availability_zones": dataSourceAwsAvailabilityZones(), + "aws_billing_service_account": dataSourceAwsBillingServiceAccount(), + "aws_caller_identity": dataSourceAwsCallerIdentity(), + "aws_canonical_user_id": dataSourceAwsCanonicalUserId(), + "aws_cloudformation_stack": dataSourceAwsCloudFormationStack(), + "aws_cloudtrail_service_account": dataSourceAwsCloudTrailServiceAccount(), + "aws_db_instance": dataSourceAwsDbInstance(), + "aws_db_snapshot": dataSourceAwsDbSnapshot(), + "aws_dynamodb_table": dataSourceAwsDynamoDbTable(), + "aws_ebs_snapshot": dataSourceAwsEbsSnapshot(), + "aws_ebs_snapshot_ids": dataSourceAwsEbsSnapshotIds(), + "aws_ebs_volume": dataSourceAwsEbsVolume(), + "aws_ecr_repository": dataSourceAwsEcrRepository(), + "aws_ecs_cluster": dataSourceAwsEcsCluster(), + "aws_ecs_container_definition": dataSourceAwsEcsContainerDefinition(), + "aws_ecs_task_definition": dataSourceAwsEcsTaskDefinition(), + "aws_efs_file_system": dataSourceAwsEfsFileSystem(), + "aws_efs_mount_target": dataSourceAwsEfsMountTarget(), + "aws_eip": dataSourceAwsEip(), "aws_elastic_beanstalk_solution_stack": dataSourceAwsElasticBeanstalkSolutionStack(), "aws_elasticache_cluster": dataSourceAwsElastiCacheCluster(), "aws_elb_hosted_zone_id": dataSourceAwsElbHostedZoneId(), @@ -200,6 +202,7 @@ func Provider() terraform.ResourceProvider { "aws_kms_alias": dataSourceAwsKmsAlias(), "aws_kms_ciphertext": dataSourceAwsKmsCiphertext(), "aws_kms_secret": dataSourceAwsKmsSecret(), + "aws_nat_gateway": dataSourceAwsNatGateway(), "aws_partition": dataSourceAwsPartition(), "aws_prefix_list": dataSourceAwsPrefixList(), "aws_redshift_service_account": dataSourceAwsRedshiftServiceAccount(), @@ -278,6 +281,7 @@ func Provider() terraform.ResourceProvider { "aws_config_configuration_recorder_status": resourceAwsConfigConfigurationRecorderStatus(), "aws_config_delivery_channel": resourceAwsConfigDeliveryChannel(), "aws_cognito_identity_pool": resourceAwsCognitoIdentityPool(), + "aws_cognito_identity_pool_roles_attachment": resourceAwsCognitoIdentityPoolRolesAttachment(), "aws_autoscaling_lifecycle_hook": resourceAwsAutoscalingLifecycleHook(), "aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(), "aws_cloudwatch_dashboard": resourceAwsCloudWatchDashboard(), @@ -306,6 +310,7 @@ func Provider() terraform.ResourceProvider { "aws_dynamodb_table": resourceAwsDynamoDbTable(), "aws_ebs_snapshot": resourceAwsEbsSnapshot(), "aws_ebs_volume": resourceAwsEbsVolume(), + "aws_ecr_lifecycle_policy": resourceAwsEcrLifecyclePolicy(), "aws_ecr_repository": resourceAwsEcrRepository(), "aws_ecr_repository_policy": resourceAwsEcrRepositoryPolicy(), "aws_ecs_cluster": resourceAwsEcsCluster(), @@ -427,6 +432,7 @@ func Provider() terraform.ResourceProvider { "aws_route_table_association": resourceAwsRouteTableAssociation(), "aws_ses_active_receipt_rule_set": resourceAwsSesActiveReceiptRuleSet(), "aws_ses_domain_identity": resourceAwsSesDomainIdentity(), + "aws_ses_domain_dkim": resourceAwsSesDomainDkim(), "aws_ses_receipt_filter": resourceAwsSesReceiptFilter(), "aws_ses_receipt_rule": resourceAwsSesReceiptRule(), "aws_ses_receipt_rule_set": resourceAwsSesReceiptRuleSet(), @@ -440,6 +446,7 @@ func Provider() terraform.ResourceProvider { "aws_network_interface_sg_attachment": resourceAwsNetworkInterfaceSGAttachment(), "aws_default_security_group": resourceAwsDefaultSecurityGroup(), "aws_security_group_rule": resourceAwsSecurityGroupRule(), + "aws_servicecatalog_portfolio": resourceAwsServiceCatalogPortfolio(), "aws_simpledb_domain": resourceAwsSimpleDBDomain(), "aws_ssm_activation": resourceAwsSsmActivation(), "aws_ssm_association": resourceAwsSsmAssociation(), diff --git a/aws/resource_aws_alb_target_group_test.go b/aws/resource_aws_alb_target_group_test.go new file mode 100644 index 00000000000..966b1e1a8a9 --- /dev/null +++ b/aws/resource_aws_alb_target_group_test.go @@ -0,0 +1,789 @@ +package aws + +import ( + "errors" + "fmt" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestALBTargetGroupCloudwatchSuffixFromARN(t *testing.T) { + cases := []struct { + name string + arn *string + suffix string + }{ + { + name: "valid suffix", + arn: aws.String(`arn:aws:elasticloadbalancing:us-east-1:123456:targetgroup/my-targets/73e2d6bc24d8a067`), + suffix: `targetgroup/my-targets/73e2d6bc24d8a067`, + }, + { + name: "no suffix", + arn: aws.String(`arn:aws:elasticloadbalancing:us-east-1:123456:targetgroup`), + suffix: ``, + }, + { + name: "nil ARN", + arn: nil, + suffix: ``, + }, + } + + for _, tc := range cases { + actual := lbTargetGroupSuffixFromARN(tc.arn) + if actual != tc.suffix { + t.Fatalf("bad suffix: %q\nExpected: %s\n Got: %s", tc.name, tc.suffix, actual) + } + } +} + +func TestAccAWSALBTargetGroup_basic(t *testing.T) { + var conf elbv2.TargetGroup + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "aws_alb_target_group.test", + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSALBTargetGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSALBTargetGroupConfig_basic(targetGroupName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &conf), + resource.TestCheckResourceAttrSet("aws_alb_target_group.test", "arn"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "name", targetGroupName), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "port", "443"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "protocol", "HTTPS"), + resource.TestCheckResourceAttrSet("aws_alb_target_group.test", "vpc_id"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "deregistration_delay", "200"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "target_type", "instance"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.#", "1"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.enabled", "true"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.type", "lb_cookie"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.cookie_duration", "10000"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.#", "1"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.path", "/health"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.interval", "60"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.port", "8081"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.protocol", "HTTP"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.timeout", "3"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.healthy_threshold", "3"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.unhealthy_threshold", "3"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.matcher", "200-299"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "tags.%", "1"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "tags.TestName", "TestAccAWSALBTargetGroup_basic"), + ), + }, + }, + }) +} + +func TestAccAWSALBTargetGroup_namePrefix(t *testing.T) { + var conf elbv2.TargetGroup + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "aws_alb_target_group.test", + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSALBTargetGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSALBTargetGroupConfig_namePrefix, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &conf), + resource.TestMatchResourceAttr("aws_alb_target_group.test", "name", regexp.MustCompile("^tf-")), + ), + }, + }, + }) +} + +func TestAccAWSALBTargetGroup_generatedName(t *testing.T) { + var conf elbv2.TargetGroup + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "aws_alb_target_group.test", + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSALBTargetGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSALBTargetGroupConfig_generatedName, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &conf), + ), + }, + }, + }) +} + +func TestAccAWSALBTargetGroup_changeNameForceNew(t *testing.T) { + var before, after elbv2.TargetGroup + targetGroupNameBefore := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + targetGroupNameAfter := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(4, acctest.CharSetAlphaNum)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "aws_alb_target_group.test", + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSALBTargetGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSALBTargetGroupConfig_basic(targetGroupNameBefore), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &before), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "name", targetGroupNameBefore), + ), + }, + { + Config: testAccAWSALBTargetGroupConfig_basic(targetGroupNameAfter), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &after), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "name", targetGroupNameAfter), + ), + }, + }, + }) +} + +func TestAccAWSALBTargetGroup_changeProtocolForceNew(t *testing.T) { + var before, after elbv2.TargetGroup + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "aws_alb_target_group.test", + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSALBTargetGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSALBTargetGroupConfig_basic(targetGroupName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &before), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "protocol", "HTTPS"), + ), + }, + { + Config: testAccAWSALBTargetGroupConfig_updatedProtocol(targetGroupName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &after), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "protocol", "HTTP"), + ), + }, + }, + }) +} + +func TestAccAWSALBTargetGroup_changePortForceNew(t *testing.T) { + var before, after elbv2.TargetGroup + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "aws_alb_target_group.test", + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSALBTargetGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSALBTargetGroupConfig_basic(targetGroupName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &before), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "port", "443"), + ), + }, + { + Config: testAccAWSALBTargetGroupConfig_updatedPort(targetGroupName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &after), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "port", "442"), + ), + }, + }, + }) +} + +func TestAccAWSALBTargetGroup_changeVpcForceNew(t *testing.T) { + var before, after elbv2.TargetGroup + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "aws_alb_target_group.test", + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSALBTargetGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSALBTargetGroupConfig_basic(targetGroupName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &before), + ), + }, + { + Config: testAccAWSALBTargetGroupConfig_updatedVpc(targetGroupName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &after), + ), + }, + }, + }) +} + +func TestAccAWSALBTargetGroup_tags(t *testing.T) { + var conf elbv2.TargetGroup + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "aws_alb_target_group.test", + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSALBTargetGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSALBTargetGroupConfig_basic(targetGroupName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &conf), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "tags.%", "1"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "tags.TestName", "TestAccAWSALBTargetGroup_basic"), + ), + }, + { + Config: testAccAWSALBTargetGroupConfig_updateTags(targetGroupName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &conf), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "tags.%", "2"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "tags.Environment", "Production"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "tags.Type", "ALB Target Group"), + ), + }, + }, + }) +} + +func TestAccAWSALBTargetGroup_updateHealthCheck(t *testing.T) { + var conf elbv2.TargetGroup + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "aws_alb_target_group.test", + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSALBTargetGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSALBTargetGroupConfig_basic(targetGroupName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &conf), + resource.TestCheckResourceAttrSet("aws_alb_target_group.test", "arn"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "name", targetGroupName), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "port", "443"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "protocol", "HTTPS"), + resource.TestCheckResourceAttrSet("aws_alb_target_group.test", "vpc_id"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "deregistration_delay", "200"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.#", "1"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.type", "lb_cookie"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.cookie_duration", "10000"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.#", "1"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.path", "/health"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.interval", "60"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.port", "8081"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.protocol", "HTTP"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.timeout", "3"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.healthy_threshold", "3"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.unhealthy_threshold", "3"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.matcher", "200-299"), + ), + }, + { + Config: testAccAWSALBTargetGroupConfig_updateHealthCheck(targetGroupName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &conf), + resource.TestCheckResourceAttrSet("aws_alb_target_group.test", "arn"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "name", targetGroupName), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "port", "443"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "protocol", "HTTPS"), + resource.TestCheckResourceAttrSet("aws_alb_target_group.test", "vpc_id"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "deregistration_delay", "200"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.#", "1"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.type", "lb_cookie"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.cookie_duration", "10000"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.#", "1"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.path", "/health2"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.interval", "30"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.port", "8082"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.protocol", "HTTPS"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.timeout", "4"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.healthy_threshold", "4"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.unhealthy_threshold", "4"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.matcher", "200"), + ), + }, + }, + }) +} + +func TestAccAWSALBTargetGroup_updateSticknessEnabled(t *testing.T) { + var conf elbv2.TargetGroup + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "aws_alb_target_group.test", + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSALBTargetGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSALBTargetGroupConfig_stickiness(targetGroupName, false, false), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &conf), + resource.TestCheckResourceAttrSet("aws_alb_target_group.test", "arn"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "name", targetGroupName), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "port", "443"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "protocol", "HTTPS"), + resource.TestCheckResourceAttrSet("aws_alb_target_group.test", "vpc_id"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "deregistration_delay", "200"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.#", "1"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.path", "/health2"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.interval", "30"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.port", "8082"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.protocol", "HTTPS"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.timeout", "4"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.healthy_threshold", "4"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.unhealthy_threshold", "4"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.matcher", "200"), + ), + }, + { + Config: testAccAWSALBTargetGroupConfig_stickiness(targetGroupName, true, true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &conf), + resource.TestCheckResourceAttrSet("aws_alb_target_group.test", "arn"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "name", targetGroupName), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "port", "443"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "protocol", "HTTPS"), + resource.TestCheckResourceAttrSet("aws_alb_target_group.test", "vpc_id"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "deregistration_delay", "200"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.#", "1"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.enabled", "true"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.type", "lb_cookie"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.cookie_duration", "10000"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.#", "1"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.path", "/health2"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.interval", "30"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.port", "8082"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.protocol", "HTTPS"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.timeout", "4"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.healthy_threshold", "4"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.unhealthy_threshold", "4"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.matcher", "200"), + ), + }, + { + Config: testAccAWSALBTargetGroupConfig_stickiness(targetGroupName, true, false), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSALBTargetGroupExists("aws_alb_target_group.test", &conf), + resource.TestCheckResourceAttrSet("aws_alb_target_group.test", "arn"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "name", targetGroupName), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "port", "443"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "protocol", "HTTPS"), + resource.TestCheckResourceAttrSet("aws_alb_target_group.test", "vpc_id"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "deregistration_delay", "200"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.#", "1"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.enabled", "false"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.type", "lb_cookie"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "stickiness.0.cookie_duration", "10000"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.#", "1"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.path", "/health2"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.interval", "30"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.port", "8082"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.protocol", "HTTPS"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.timeout", "4"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.healthy_threshold", "4"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.unhealthy_threshold", "4"), + resource.TestCheckResourceAttr("aws_alb_target_group.test", "health_check.0.matcher", "200"), + ), + }, + }, + }) +} + +func testAccCheckAWSALBTargetGroupExists(n string, res *elbv2.TargetGroup) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return errors.New("No Target Group ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).elbv2conn + + describe, err := conn.DescribeTargetGroups(&elbv2.DescribeTargetGroupsInput{ + TargetGroupArns: []*string{aws.String(rs.Primary.ID)}, + }) + + if err != nil { + return err + } + + if len(describe.TargetGroups) != 1 || + *describe.TargetGroups[0].TargetGroupArn != rs.Primary.ID { + return errors.New("Target Group not found") + } + + *res = *describe.TargetGroups[0] + return nil + } +} + +func testAccCheckAWSALBTargetGroupDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).elbv2conn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_alb_target_group" { + continue + } + + describe, err := conn.DescribeTargetGroups(&elbv2.DescribeTargetGroupsInput{ + TargetGroupArns: []*string{aws.String(rs.Primary.ID)}, + }) + + if err == nil { + if len(describe.TargetGroups) != 0 && + *describe.TargetGroups[0].TargetGroupArn == rs.Primary.ID { + return fmt.Errorf("Target Group %q still exists", rs.Primary.ID) + } + } + + // Verify the error + if isTargetGroupNotFound(err) { + return nil + } else { + return errwrap.Wrapf("Unexpected error checking ALB destroyed: {{err}}", err) + } + } + + return nil +} + +func testAccAWSALBTargetGroupConfig_basic(targetGroupName string) string { + return fmt.Sprintf(`resource "aws_alb_target_group" "test" { + name = "%s" + port = 443 + protocol = "HTTPS" + vpc_id = "${aws_vpc.test.id}" + + deregistration_delay = 200 + + stickiness { + type = "lb_cookie" + cookie_duration = 10000 + } + + health_check { + path = "/health" + interval = 60 + port = 8081 + protocol = "HTTP" + timeout = 3 + healthy_threshold = 3 + unhealthy_threshold = 3 + matcher = "200-299" + } + + tags { + TestName = "TestAccAWSALBTargetGroup_basic" + } +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags { + TestName = "TestAccAWSALBTargetGroup_basic" + } +}`, targetGroupName) +} + +func testAccAWSALBTargetGroupConfig_updatedPort(targetGroupName string) string { + return fmt.Sprintf(`resource "aws_alb_target_group" "test" { + name = "%s" + port = 442 + protocol = "HTTPS" + vpc_id = "${aws_vpc.test.id}" + + deregistration_delay = 200 + + stickiness { + type = "lb_cookie" + cookie_duration = 10000 + } + + health_check { + path = "/health" + interval = 60 + port = 8081 + protocol = "HTTP" + timeout = 3 + healthy_threshold = 3 + unhealthy_threshold = 3 + matcher = "200-299" + } + + tags { + TestName = "TestAccAWSALBTargetGroup_basic" + } +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags { + TestName = "TestAccAWSALBTargetGroup_basic" + } +}`, targetGroupName) +} + +func testAccAWSALBTargetGroupConfig_updatedProtocol(targetGroupName string) string { + return fmt.Sprintf(`resource "aws_alb_target_group" "test" { + name = "%s" + port = 443 + protocol = "HTTP" + vpc_id = "${aws_vpc.test2.id}" + + deregistration_delay = 200 + + stickiness { + type = "lb_cookie" + cookie_duration = 10000 + } + + health_check { + path = "/health" + interval = 60 + port = 8081 + protocol = "HTTP" + timeout = 3 + healthy_threshold = 3 + unhealthy_threshold = 3 + matcher = "200-299" + } + + tags { + TestName = "TestAccAWSALBTargetGroup_basic" + } +} + +resource "aws_vpc" "test2" { + cidr_block = "10.10.0.0/16" + + tags { + TestName = "TestAccAWSALBTargetGroup_basic" + } +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags { + TestName = "TestAccAWSALBTargetGroup_basic" + } +}`, targetGroupName) +} + +func testAccAWSALBTargetGroupConfig_updatedVpc(targetGroupName string) string { + return fmt.Sprintf(`resource "aws_alb_target_group" "test" { + name = "%s" + port = 443 + protocol = "HTTPS" + vpc_id = "${aws_vpc.test.id}" + + deregistration_delay = 200 + + stickiness { + type = "lb_cookie" + cookie_duration = 10000 + } + + health_check { + path = "/health" + interval = 60 + port = 8081 + protocol = "HTTP" + timeout = 3 + healthy_threshold = 3 + unhealthy_threshold = 3 + matcher = "200-299" + } + + tags { + TestName = "TestAccAWSALBTargetGroup_basic" + } +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags { + TestName = "TestAccAWSALBTargetGroup_basic" + } +}`, targetGroupName) +} + +func testAccAWSALBTargetGroupConfig_updateTags(targetGroupName string) string { + return fmt.Sprintf(`resource "aws_alb_target_group" "test" { + name = "%s" + port = 443 + protocol = "HTTPS" + vpc_id = "${aws_vpc.test.id}" + + deregistration_delay = 200 + + stickiness { + type = "lb_cookie" + cookie_duration = 10000 + } + + health_check { + path = "/health" + interval = 60 + port = 8081 + protocol = "HTTP" + timeout = 3 + healthy_threshold = 3 + unhealthy_threshold = 3 + matcher = "200-299" + } + + tags { + Environment = "Production" + Type = "ALB Target Group" + } +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags { + TestName = "TestAccAWSALBTargetGroup_basic" + } +}`, targetGroupName) +} + +func testAccAWSALBTargetGroupConfig_updateHealthCheck(targetGroupName string) string { + return fmt.Sprintf(`resource "aws_alb_target_group" "test" { + name = "%s" + port = 443 + protocol = "HTTPS" + vpc_id = "${aws_vpc.test.id}" + + deregistration_delay = 200 + + stickiness { + type = "lb_cookie" + cookie_duration = 10000 + } + + health_check { + path = "/health2" + interval = 30 + port = 8082 + protocol = "HTTPS" + timeout = 4 + healthy_threshold = 4 + unhealthy_threshold = 4 + matcher = "200" + } +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags { + TestName = "TestAccAWSALBTargetGroup_basic" + } +}`, targetGroupName) +} + +func testAccAWSALBTargetGroupConfig_stickiness(targetGroupName string, addStickinessBlock bool, enabled bool) string { + var stickinessBlock string + + if addStickinessBlock { + stickinessBlock = fmt.Sprintf(`stickiness { + enabled = "%t" + type = "lb_cookie" + cookie_duration = 10000 + }`, enabled) + } + + return fmt.Sprintf(`resource "aws_alb_target_group" "test" { + name = "%s" + port = 443 + protocol = "HTTPS" + vpc_id = "${aws_vpc.test.id}" + + deregistration_delay = 200 + + %s + + health_check { + path = "/health2" + interval = 30 + port = 8082 + protocol = "HTTPS" + timeout = 4 + healthy_threshold = 4 + unhealthy_threshold = 4 + matcher = "200" + } +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags { + TestName = "TestAccAWSALBTargetGroup_stickiness" + } +}`, targetGroupName, stickinessBlock) +} + +const testAccAWSALBTargetGroupConfig_namePrefix = ` +resource "aws_alb_target_group" "test" { + name_prefix = "tf-" + port = 80 + protocol = "HTTP" + vpc_id = "${aws_vpc.test.id}" +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + tags { + Name = "testAccAWSALBTargetGroupConfig_namePrefix" + } +} +` + +const testAccAWSALBTargetGroupConfig_generatedName = ` +resource "aws_alb_target_group" "test" { + port = 80 + protocol = "HTTP" + vpc_id = "${aws_vpc.test.id}" +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + tags { + Name = "testAccAWSALBTargetGroupConfig_generatedName" + } +} +` diff --git a/aws/resource_aws_ami.go b/aws/resource_aws_ami.go index 6ac2ec19172..7e89d4286d0 100644 --- a/aws/resource_aws_ami.go +++ b/aws/resource_aws_ami.go @@ -31,6 +31,12 @@ func resourceAwsAmi() *schema.Resource { return &schema.Resource{ Create: resourceAwsAmiCreate, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(AWSAMIRetryTimeout), + Update: schema.DefaultTimeout(AWSAMIRetryTimeout), + Delete: schema.DefaultTimeout(AWSAMIDeleteRetryTimeout), + }, + Schema: resourceSchema, // The Read, Update and Delete operations are shared with aws_ami_copy @@ -110,14 +116,12 @@ func resourceAwsAmiCreate(d *schema.ResourceData, meta interface{}) error { id := *res.ImageId d.SetId(id) - d.Partial(true) // make sure we record the id even if the rest of this gets interrupted - d.Set("id", id) + d.Partial(true) d.Set("manage_ebs_block_devices", false) - d.SetPartial("id") d.SetPartial("manage_ebs_block_devices") d.Partial(false) - _, err = resourceAwsAmiWaitForAvailable(id, client) + _, err = resourceAwsAmiWaitForAvailable(d.Timeout(schema.TimeoutCreate), id, client) if err != nil { return err } @@ -170,7 +174,7 @@ func resourceAwsAmiRead(d *schema.ResourceData, meta interface{}) error { // before we continue. We should never take this branch in normal // circumstances since we would've waited for availability during // the "Create" step. - image, err = resourceAwsAmiWaitForAvailable(id, client) + image, err = resourceAwsAmiWaitForAvailable(d.Timeout(schema.TimeoutCreate), id, client) if err != nil { return err } @@ -303,7 +307,7 @@ func resourceAwsAmiDelete(d *schema.ResourceData, meta interface{}) error { } // Verify that the image is actually removed, if not we need to wait for it to be removed - if err := resourceAwsAmiWaitForDestroy(d.Id(), client); err != nil { + if err := resourceAwsAmiWaitForDestroy(d.Timeout(schema.TimeoutDelete), d.Id(), client); err != nil { return err } @@ -336,16 +340,16 @@ func AMIStateRefreshFunc(client *ec2.EC2, id string) resource.StateRefreshFunc { } } -func resourceAwsAmiWaitForDestroy(id string, client *ec2.EC2) error { +func resourceAwsAmiWaitForDestroy(timeout time.Duration, id string, client *ec2.EC2) error { log.Printf("Waiting for AMI %s to be deleted...", id) stateConf := &resource.StateChangeConf{ Pending: []string{"available", "pending", "failed"}, Target: []string{"destroyed"}, Refresh: AMIStateRefreshFunc(client, id), - Timeout: AWSAMIDeleteRetryTimeout, + Timeout: timeout, Delay: AWSAMIRetryDelay, - MinTimeout: AWSAMIRetryTimeout, + MinTimeout: AWSAMIRetryMinTimeout, } _, err := stateConf.WaitForState() @@ -356,14 +360,14 @@ func resourceAwsAmiWaitForDestroy(id string, client *ec2.EC2) error { return nil } -func resourceAwsAmiWaitForAvailable(id string, client *ec2.EC2) (*ec2.Image, error) { +func resourceAwsAmiWaitForAvailable(timeout time.Duration, id string, client *ec2.EC2) (*ec2.Image, error) { log.Printf("Waiting for AMI %s to become available...", id) stateConf := &resource.StateChangeConf{ Pending: []string{"pending"}, Target: []string{"available"}, Refresh: AMIStateRefreshFunc(client, id), - Timeout: AWSAMIRetryTimeout, + Timeout: timeout, Delay: AWSAMIRetryDelay, MinTimeout: AWSAMIRetryMinTimeout, } @@ -398,10 +402,6 @@ func resourceAwsAmiCommonSchema(computed bool) map[string]*schema.Schema { } return map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, "image_location": { Type: schema.TypeString, Optional: !computed, diff --git a/aws/resource_aws_ami_copy.go b/aws/resource_aws_ami_copy.go index 3452d5b5287..efd27146fac 100644 --- a/aws/resource_aws_ami_copy.go +++ b/aws/resource_aws_ami_copy.go @@ -42,6 +42,12 @@ func resourceAwsAmiCopy() *schema.Resource { return &schema.Resource{ Create: resourceAwsAmiCopyCreate, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(AWSAMIRetryTimeout), + Update: schema.DefaultTimeout(AWSAMIRetryTimeout), + Delete: schema.DefaultTimeout(AWSAMIDeleteRetryTimeout), + }, + Schema: resourceSchema, // The remaining operations are shared with the generic aws_ami resource, @@ -75,13 +81,11 @@ func resourceAwsAmiCopyCreate(d *schema.ResourceData, meta interface{}) error { id := *res.ImageId d.SetId(id) d.Partial(true) // make sure we record the id even if the rest of this gets interrupted - d.Set("id", id) d.Set("manage_ebs_snapshots", true) - d.SetPartial("id") d.SetPartial("manage_ebs_snapshots") d.Partial(false) - _, err = resourceAwsAmiWaitForAvailable(id, client) + _, err = resourceAwsAmiWaitForAvailable(d.Timeout(schema.TimeoutCreate), id, client) if err != nil { return err } diff --git a/aws/resource_aws_ami_from_instance.go b/aws/resource_aws_ami_from_instance.go index cc272d3c15d..0db9d336346 100644 --- a/aws/resource_aws_ami_from_instance.go +++ b/aws/resource_aws_ami_from_instance.go @@ -27,6 +27,12 @@ func resourceAwsAmiFromInstance() *schema.Resource { return &schema.Resource{ Create: resourceAwsAmiFromInstanceCreate, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(AWSAMIRetryTimeout), + Update: schema.DefaultTimeout(AWSAMIRetryTimeout), + Delete: schema.DefaultTimeout(AWSAMIDeleteRetryTimeout), + }, + Schema: resourceSchema, // The remaining operations are shared with the generic aws_ami resource, @@ -55,13 +61,11 @@ func resourceAwsAmiFromInstanceCreate(d *schema.ResourceData, meta interface{}) id := *res.ImageId d.SetId(id) d.Partial(true) // make sure we record the id even if the rest of this gets interrupted - d.Set("id", id) d.Set("manage_ebs_snapshots", true) - d.SetPartial("id") d.SetPartial("manage_ebs_snapshots") d.Partial(false) - _, err = resourceAwsAmiWaitForAvailable(id, client) + _, err = resourceAwsAmiWaitForAvailable(d.Timeout(schema.TimeoutCreate), id, client) if err != nil { return err } diff --git a/aws/resource_aws_ami_launch_permission_test.go b/aws/resource_aws_ami_launch_permission_test.go index 4ccb35c7cb8..23de0c66f9d 100644 --- a/aws/resource_aws_ami_launch_permission_test.go +++ b/aws/resource_aws_ami_launch_permission_test.go @@ -117,7 +117,7 @@ func testAccAWSAMIDisappears(imageID *string) r.TestCheckFunc { return err } - if err := resourceAwsAmiWaitForDestroy(*imageID, conn); err != nil { + if err := resourceAwsAmiWaitForDestroy(AWSAMIDeleteRetryTimeout, *imageID, conn); err != nil { return err } return nil diff --git a/aws/resource_aws_api_gateway_usage_plan.go b/aws/resource_aws_api_gateway_usage_plan.go index 0d4930d08c0..9556278a293 100644 --- a/aws/resource_aws_api_gateway_usage_plan.go +++ b/aws/resource_aws_api_gateway_usage_plan.go @@ -92,7 +92,7 @@ func resourceAwsApiGatewayUsagePlan() *schema.Resource { }, "rate_limit": { - Type: schema.TypeInt, + Type: schema.TypeFloat, Default: 0, Optional: true, }, @@ -188,7 +188,7 @@ func resourceAwsApiGatewayUsagePlanCreate(d *schema.ResourceData, meta interface } if sv, ok := q["rate_limit"].(float64); ok { - ts.RateLimit = aws.Float64(float64(sv)) + ts.RateLimit = aws.Float64(sv) } params.Throttle = ts @@ -355,7 +355,7 @@ func resourceAwsApiGatewayUsagePlanUpdate(d *schema.ResourceData, meta interface operations = append(operations, &apigateway.PatchOperation{ Op: aws.String("replace"), Path: aws.String("/throttle/rateLimit"), - Value: aws.String(strconv.Itoa(d["rate_limit"].(int))), + Value: aws.String(strconv.FormatFloat(d["rate_limit"].(float64), 'f', -1, 64)), }) operations = append(operations, &apigateway.PatchOperation{ Op: aws.String("replace"), @@ -369,7 +369,7 @@ func resourceAwsApiGatewayUsagePlanUpdate(d *schema.ResourceData, meta interface operations = append(operations, &apigateway.PatchOperation{ Op: aws.String("add"), Path: aws.String("/throttle/rateLimit"), - Value: aws.String(strconv.Itoa(d["rate_limit"].(int))), + Value: aws.String(strconv.FormatFloat(d["rate_limit"].(float64), 'f', -1, 64)), }) operations = append(operations, &apigateway.PatchOperation{ Op: aws.String("add"), diff --git a/aws/resource_aws_api_gateway_usage_plan_test.go b/aws/resource_aws_api_gateway_usage_plan_test.go index 13d7afc2dbb..b0054db34cb 100644 --- a/aws/resource_aws_api_gateway_usage_plan_test.go +++ b/aws/resource_aws_api_gateway_usage_plan_test.go @@ -190,6 +190,27 @@ func TestAccAWSAPIGatewayUsagePlan_throttling(t *testing.T) { }) } +// https://github.com/terraform-providers/terraform-provider-aws/issues/2057 +func TestAccAWSAPIGatewayUsagePlan_throttlingInitialRateLimit(t *testing.T) { + var conf apigateway.UsagePlan + name := acctest.RandString(10) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayUsagePlanDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSApiGatewayUsagePlanThrottlingConfig(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), + resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings.4173790118.rate_limit", "5"), + ), + }, + }, + }) +} + func TestAccAWSAPIGatewayUsagePlan_quota(t *testing.T) { var conf apigateway.UsagePlan name := acctest.RandString(10) diff --git a/aws/resource_aws_batch_job_queue.go b/aws/resource_aws_batch_job_queue.go index f4affbf03b5..761518c9de4 100644 --- a/aws/resource_aws_batch_job_queue.go +++ b/aws/resource_aws_batch_job_queue.go @@ -143,9 +143,9 @@ func resourceAwsBatchJobQueueDelete(d *schema.ResourceData, meta interface{}) er // Wait until the Job Queue is disabled before deleting stateConf := &resource.StateChangeConf{ - Pending: []string{batch.JQStateEnabled}, - Target: []string{batch.JQStateDisabled}, - Refresh: batchJobQueueRefreshStateFunc(conn, sn), + Pending: []string{batch.JQStatusUpdating}, + Target: []string{batch.JQStatusValid}, + Refresh: batchJobQueueRefreshStatusFunc(conn, sn), Timeout: 10 * time.Minute, Delay: 10 * time.Second, MinTimeout: 3 * time.Second, @@ -154,6 +154,7 @@ func resourceAwsBatchJobQueueDelete(d *schema.ResourceData, meta interface{}) er if err != nil { return err } + log.Printf("[DEBUG] Trying to delete Job Queue %s", sn) _, err = conn.DeleteJobQueue(&batch.DeleteJobQueueInput{ JobQueue: aws.String(sn), diff --git a/aws/resource_aws_cloudfront_distribution.go b/aws/resource_aws_cloudfront_distribution.go index c1bf53ec44c..eeda932850a 100644 --- a/aws/resource_aws_cloudfront_distribution.go +++ b/aws/resource_aws_cloudfront_distribution.go @@ -484,7 +484,7 @@ func resourceAwsCloudFrontDistribution() *schema.Resource { "minimum_protocol_version": { Type: schema.TypeString, Optional: true, - Default: "SSLv3", + Default: "TLSv1", }, "ssl_support_method": { Type: schema.TypeString, diff --git a/aws/resource_aws_cloudwatch_log_metric_filter.go b/aws/resource_aws_cloudwatch_log_metric_filter.go index 943472f85ec..20ac887ae57 100644 --- a/aws/resource_aws_cloudwatch_log_metric_filter.go +++ b/aws/resource_aws_cloudwatch_log_metric_filter.go @@ -21,14 +21,14 @@ func resourceAwsCloudWatchLogMetricFilter() *schema.Resource { Delete: resourceAwsCloudWatchLogMetricFilterDelete, Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Required: true, ForceNew: true, ValidateFunc: validateLogMetricFilterName, }, - "pattern": &schema.Schema{ + "pattern": { Type: schema.TypeString, Required: true, ValidateFunc: validateMaxLength(512), @@ -41,34 +41,38 @@ func resourceAwsCloudWatchLogMetricFilter() *schema.Resource { }, }, - "log_group_name": &schema.Schema{ + "log_group_name": { Type: schema.TypeString, Required: true, ForceNew: true, ValidateFunc: validateLogGroupName, }, - "metric_transformation": &schema.Schema{ + "metric_transformation": { Type: schema.TypeList, Required: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Required: true, ValidateFunc: validateLogMetricFilterTransformationName, }, - "namespace": &schema.Schema{ + "namespace": { Type: schema.TypeString, Required: true, ValidateFunc: validateLogMetricFilterTransformationName, }, - "value": &schema.Schema{ + "value": { Type: schema.TypeString, Required: true, ValidateFunc: validateMaxLength(100), }, + "default_value": { + Type: schema.TypeFloat, + Optional: true, + }, }, }, }, @@ -87,10 +91,14 @@ func resourceAwsCloudWatchLogMetricFilterUpdate(d *schema.ResourceData, meta int transformations := d.Get("metric_transformation").([]interface{}) o := transformations[0].(map[string]interface{}) - input.MetricTransformations = expandCloudWachLogMetricTransformations(o) + metricsTransformations, err := expandCloudWachLogMetricTransformations(o) + if err != nil { + return err + } + input.MetricTransformations = metricsTransformations log.Printf("[DEBUG] Creating/Updating CloudWatch Log Metric Filter: %s", input) - _, err := conn.PutMetricFilter(&input) + _, err = conn.PutMetricFilter(&input) if err != nil { return fmt.Errorf("Creating/Updating CloudWatch Log Metric Filter failed: %s", err) } diff --git a/aws/resource_aws_cloudwatch_log_metric_filter_test.go b/aws/resource_aws_cloudwatch_log_metric_filter_test.go index d34ea8dd041..8a955648d81 100644 --- a/aws/resource_aws_cloudwatch_log_metric_filter_test.go +++ b/aws/resource_aws_cloudwatch_log_metric_filter_test.go @@ -51,6 +51,7 @@ func TestAccAWSCloudWatchLogMetricFilter_basic(t *testing.T) { resource.TestCheckResourceAttr("aws_cloudwatch_log_metric_filter.foobar", "metric_transformation.0.name", "AccessDeniedCount"), resource.TestCheckResourceAttr("aws_cloudwatch_log_metric_filter.foobar", "metric_transformation.0.namespace", "MyNamespace"), resource.TestCheckResourceAttr("aws_cloudwatch_log_metric_filter.foobar", "metric_transformation.0.value", "2"), + resource.TestCheckResourceAttr("aws_cloudwatch_log_metric_filter.foobar", "metric_transformation.0.default_value", "1"), testAccCheckCloudWatchLogMetricFilterTransformation(&mf, &cloudwatchlogs.MetricTransformation{ MetricName: aws.String("AccessDeniedCount"), MetricNamespace: aws.String("MyNamespace"), @@ -181,6 +182,7 @@ PATTERN name = "AccessDeniedCount" namespace = "MyNamespace" value = "2" + default_value = "1" } } diff --git a/aws/resource_aws_cognito_identity_pool.go b/aws/resource_aws_cognito_identity_pool.go index b85472cf97f..a4629b399ac 100644 --- a/aws/resource_aws_cognito_identity_pool.go +++ b/aws/resource_aws_cognito_identity_pool.go @@ -155,28 +155,20 @@ func resourceAwsCognitoIdentityPoolRead(d *schema.ResourceData, meta interface{} d.Set("allow_unauthenticated_identities", ip.AllowUnauthenticatedIdentities) d.Set("developer_provider_name", ip.DeveloperProviderName) - if ip.CognitoIdentityProviders != nil { - if err := d.Set("cognito_identity_providers", flattenCognitoIdentityProviders(ip.CognitoIdentityProviders)); err != nil { - return fmt.Errorf("[DEBUG] Error setting cognito_identity_providers error: %#v", err) - } + if err := d.Set("cognito_identity_providers", flattenCognitoIdentityProviders(ip.CognitoIdentityProviders)); err != nil { + return fmt.Errorf("[DEBUG] Error setting cognito_identity_providers error: %#v", err) } - if ip.OpenIdConnectProviderARNs != nil { - if err := d.Set("openid_connect_provider_arns", flattenStringList(ip.OpenIdConnectProviderARNs)); err != nil { - return fmt.Errorf("[DEBUG] Error setting openid_connect_provider_arns error: %#v", err) - } + if err := d.Set("openid_connect_provider_arns", flattenStringList(ip.OpenIdConnectProviderARNs)); err != nil { + return fmt.Errorf("[DEBUG] Error setting openid_connect_provider_arns error: %#v", err) } - if ip.SamlProviderARNs != nil { - if err := d.Set("saml_provider_arns", flattenStringList(ip.SamlProviderARNs)); err != nil { - return fmt.Errorf("[DEBUG] Error setting saml_provider_arns error: %#v", err) - } + if err := d.Set("saml_provider_arns", flattenStringList(ip.SamlProviderARNs)); err != nil { + return fmt.Errorf("[DEBUG] Error setting saml_provider_arns error: %#v", err) } - if ip.SupportedLoginProviders != nil { - if err := d.Set("supported_login_providers", flattenCognitoSupportedLoginProviders(ip.SupportedLoginProviders)); err != nil { - return fmt.Errorf("[DEBUG] Error setting supported_login_providers error: %#v", err) - } + if err := d.Set("supported_login_providers", flattenCognitoSupportedLoginProviders(ip.SupportedLoginProviders)); err != nil { + return fmt.Errorf("[DEBUG] Error setting supported_login_providers error: %#v", err) } return nil diff --git a/aws/resource_aws_cognito_identity_pool_roles_attachment.go b/aws/resource_aws_cognito_identity_pool_roles_attachment.go new file mode 100644 index 00000000000..5030331b961 --- /dev/null +++ b/aws/resource_aws_cognito_identity_pool_roles_attachment.go @@ -0,0 +1,285 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "bytes" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/cognitoidentity" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsCognitoIdentityPoolRolesAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsCognitoIdentityPoolRolesAttachmentCreate, + Read: resourceAwsCognitoIdentityPoolRolesAttachmentRead, + Update: resourceAwsCognitoIdentityPoolRolesAttachmentUpdate, + Delete: resourceAwsCognitoIdentityPoolRolesAttachmentDelete, + + Schema: map[string]*schema.Schema{ + "identity_pool_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "role_mapping": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "identity_provider": { + Type: schema.TypeString, + Required: true, + }, + "ambiguous_role_resolution": { + Type: schema.TypeString, + ValidateFunc: validateCognitoRoleMappingsAmbiguousRoleResolution, + Optional: true, // Required if Type equals Token or Rules. + }, + "mapping_rule": { + Type: schema.TypeList, + Optional: true, + MaxItems: 25, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "claim": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateCognitoRoleMappingsRulesClaim, + }, + "match_type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateCognitoRoleMappingsRulesMatchType, + }, + "role_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateArn, + }, + "value": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateCognitoRoleMappingsRulesValue, + }, + }, + }, + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateCognitoRoleMappingsType, + }, + }, + }, + }, + + "roles": { + Type: schema.TypeMap, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "authenticated": { + Type: schema.TypeString, + ValidateFunc: validateArn, + Optional: true, // Required if unauthenticated isn't defined. + }, + "unauthenticated": { + Type: schema.TypeString, + ValidateFunc: validateArn, + Optional: true, // Required if authenticated isn't defined. + }, + }, + }, + }, + }, + } +} + +func resourceAwsCognitoIdentityPoolRolesAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cognitoconn + + // Validates role keys to be either authenticated or unauthenticated, + // since ValidateFunc validates only the value not the key. + if errors := validateCognitoRoles(d.Get("roles").(map[string]interface{}), "roles"); len(errors) > 0 { + return fmt.Errorf("Error validating Roles: %v", errors) + } + + params := &cognitoidentity.SetIdentityPoolRolesInput{ + IdentityPoolId: aws.String(d.Get("identity_pool_id").(string)), + Roles: expandCognitoIdentityPoolRoles(d.Get("roles").(map[string]interface{})), + } + + if v, ok := d.GetOk("role_mapping"); ok { + errors := validateRoleMappings(v.(*schema.Set).List()) + + if len(errors) > 0 { + return fmt.Errorf("Error validating ambiguous role resolution: %v", errors) + } + + params.RoleMappings = expandCognitoIdentityPoolRoleMappingsAttachment(v.(*schema.Set).List()) + } + + log.Printf("[DEBUG] Creating Cognito Identity Pool Roles Association: %#v", params) + _, err := conn.SetIdentityPoolRoles(params) + if err != nil { + return fmt.Errorf("Error creating Cognito Identity Pool Roles Association: %s", err) + } + + d.SetId(d.Get("identity_pool_id").(string)) + + return resourceAwsCognitoIdentityPoolRolesAttachmentRead(d, meta) +} + +func resourceAwsCognitoIdentityPoolRolesAttachmentRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cognitoconn + log.Printf("[DEBUG] Reading Cognito Identity Pool Roles Association: %s", d.Id()) + + ip, err := conn.GetIdentityPoolRoles(&cognitoidentity.GetIdentityPoolRolesInput{ + IdentityPoolId: aws.String(d.Get("identity_pool_id").(string)), + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceNotFoundException" { + log.Printf("[WARN] Cognito Identity Pool Roles Association %s not found, removing from state", d.Id()) + d.SetId("") + return nil + } + return err + } + + if err := d.Set("roles", flattenCognitoIdentityPoolRoles(ip.Roles)); err != nil { + return fmt.Errorf("[DEBUG] Error setting roles error: %#v", err) + } + + if err := d.Set("role_mapping", flattenCognitoIdentityPoolRoleMappingsAttachment(ip.RoleMappings)); err != nil { + return fmt.Errorf("[DEBUG] Error setting role mappings error: %#v", err) + } + + return nil +} + +func resourceAwsCognitoIdentityPoolRolesAttachmentUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cognitoconn + + // Validates role keys to be either authenticated or unauthenticated, + // since ValidateFunc validates only the value not the key. + if errors := validateCognitoRoles(d.Get("roles").(map[string]interface{}), "roles"); len(errors) > 0 { + return fmt.Errorf("Error validating Roles: %v", errors) + } + + params := &cognitoidentity.SetIdentityPoolRolesInput{ + IdentityPoolId: aws.String(d.Get("identity_pool_id").(string)), + Roles: expandCognitoIdentityPoolRoles(d.Get("roles").(map[string]interface{})), + } + + if d.HasChange("role_mapping") { + v, ok := d.GetOk("role_mapping") + var mappings []interface{} + + if ok { + errors := validateRoleMappings(v.(*schema.Set).List()) + + if len(errors) > 0 { + return fmt.Errorf("Error validating ambiguous role resolution: %v", errors) + } + mappings = v.(*schema.Set).List() + } else { + mappings = []interface{}{} + } + + params.RoleMappings = expandCognitoIdentityPoolRoleMappingsAttachment(mappings) + } + + log.Printf("[DEBUG] Updating Cognito Identity Pool Roles Association: %#v", params) + _, err := conn.SetIdentityPoolRoles(params) + if err != nil { + return fmt.Errorf("Error updating Cognito Identity Pool Roles Association: %s", err) + } + + d.SetId(d.Get("identity_pool_id").(string)) + + return resourceAwsCognitoIdentityPoolRolesAttachmentRead(d, meta) +} + +func resourceAwsCognitoIdentityPoolRolesAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).cognitoconn + log.Printf("[DEBUG] Deleting Cognito Identity Pool Roles Association: %s", d.Id()) + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + _, err := conn.SetIdentityPoolRoles(&cognitoidentity.SetIdentityPoolRolesInput{ + IdentityPoolId: aws.String(d.Get("identity_pool_id").(string)), + Roles: expandCognitoIdentityPoolRoles(make(map[string]interface{})), + RoleMappings: expandCognitoIdentityPoolRoleMappingsAttachment([]interface{}{}), + }) + + if err == nil { + return nil + } + + return resource.NonRetryableError(err) + }) +} + +// Validating that each role_mapping ambiguous_role_resolution +// is defined when "type" equals Token or Rules. +func validateRoleMappings(roleMappings []interface{}) []error { + errors := make([]error, 0) + + for _, r := range roleMappings { + rm := r.(map[string]interface{}) + + // If Type equals "Token" or "Rules", ambiguous_role_resolution must be defined. + // This should be removed as soon as we can have a ValidateFuncAgainst callable on the schema. + if err := validateCognitoRoleMappingsAmbiguousRoleResolutionAgainstType(rm); len(err) > 0 { + errors = append(errors, fmt.Errorf("Role Mapping %q: %v", rm["identity_provider"].(string), err)) + } + + // Validating that Rules Configuration is defined when Type equals Rules + // but not defined when Type equals Token. + if err := validateCognitoRoleMappingsRulesConfiguration(rm); len(err) > 0 { + errors = append(errors, fmt.Errorf("Role Mapping %q: %v", rm["identity_provider"].(string), err)) + } + } + + return errors +} + +func cognitoRoleMappingHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["identity_provider"].(string))) + + return hashcode.String(buf.String()) +} + +func cognitoRoleMappingValueHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["type"].(string))) + if d, ok := m["ambiguous_role_resolution"]; ok { + buf.WriteString(fmt.Sprintf("%s-", d.(string))) + } + + return hashcode.String(buf.String()) +} + +func cognitoRoleMappingRulesConfigurationHash(v interface{}) int { + var buf bytes.Buffer + for _, rule := range v.([]interface{}) { + r := rule.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", r["claim"].(string))) + buf.WriteString(fmt.Sprintf("%s-", r["match_type"].(string))) + buf.WriteString(fmt.Sprintf("%s-", r["role_arn"].(string))) + buf.WriteString(fmt.Sprintf("%s-", r["value"].(string))) + } + + return hashcode.String(buf.String()) +} diff --git a/aws/resource_aws_cognito_identity_pool_roles_attachment_test.go b/aws/resource_aws_cognito_identity_pool_roles_attachment_test.go new file mode 100644 index 00000000000..8d0a9ba5fee --- /dev/null +++ b/aws/resource_aws_cognito_identity_pool_roles_attachment_test.go @@ -0,0 +1,440 @@ +package aws + +import ( + "errors" + "fmt" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/cognitoidentity" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSCognitoIdentityPoolRolesAttachment_basic(t *testing.T) { + name := fmt.Sprintf("%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + updatedName := fmt.Sprintf("%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCognitoIdentityPoolRolesAttachmentDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCognitoIdentityPoolRolesAttachmentConfig_basic(name), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSCognitoIdentityPoolRolesAttachmentExists("aws_cognito_identity_pool_roles_attachment.main"), + resource.TestCheckResourceAttrSet("aws_cognito_identity_pool_roles_attachment.main", "identity_pool_id"), + resource.TestCheckResourceAttrSet("aws_cognito_identity_pool_roles_attachment.main", "roles.authenticated"), + ), + }, + { + Config: testAccAWSCognitoIdentityPoolRolesAttachmentConfig_basic(updatedName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSCognitoIdentityPoolRolesAttachmentExists("aws_cognito_identity_pool_roles_attachment.main"), + resource.TestCheckResourceAttrSet("aws_cognito_identity_pool_roles_attachment.main", "identity_pool_id"), + resource.TestCheckResourceAttrSet("aws_cognito_identity_pool_roles_attachment.main", "roles.authenticated"), + ), + }, + }, + }) +} + +func TestAccAWSCognitoIdentityPoolRolesAttachment_roleMappings(t *testing.T) { + name := fmt.Sprintf("%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCognitoIdentityPoolRolesAttachmentDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCognitoIdentityPoolRolesAttachmentConfig_basic(name), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSCognitoIdentityPoolRolesAttachmentExists("aws_cognito_identity_pool_roles_attachment.main"), + resource.TestCheckResourceAttrSet("aws_cognito_identity_pool_roles_attachment.main", "identity_pool_id"), + resource.TestCheckResourceAttr("aws_cognito_identity_pool_roles_attachment.main", "role_mapping.#", "0"), + resource.TestCheckResourceAttrSet("aws_cognito_identity_pool_roles_attachment.main", "roles.authenticated"), + ), + }, + { + Config: testAccAWSCognitoIdentityPoolRolesAttachmentConfig_roleMappings(name), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSCognitoIdentityPoolRolesAttachmentExists("aws_cognito_identity_pool_roles_attachment.main"), + resource.TestCheckResourceAttrSet("aws_cognito_identity_pool_roles_attachment.main", "identity_pool_id"), + resource.TestCheckResourceAttr("aws_cognito_identity_pool_roles_attachment.main", "role_mapping.#", "1"), + resource.TestCheckResourceAttrSet("aws_cognito_identity_pool_roles_attachment.main", "roles.authenticated"), + ), + }, + { + Config: testAccAWSCognitoIdentityPoolRolesAttachmentConfig_roleMappingsUpdated(name), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSCognitoIdentityPoolRolesAttachmentExists("aws_cognito_identity_pool_roles_attachment.main"), + resource.TestCheckResourceAttrSet("aws_cognito_identity_pool_roles_attachment.main", "identity_pool_id"), + resource.TestCheckResourceAttr("aws_cognito_identity_pool_roles_attachment.main", "role_mapping.#", "1"), + resource.TestCheckResourceAttrSet("aws_cognito_identity_pool_roles_attachment.main", "roles.authenticated"), + ), + }, + { + Config: testAccAWSCognitoIdentityPoolRolesAttachmentConfig_basic(name), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSCognitoIdentityPoolRolesAttachmentExists("aws_cognito_identity_pool_roles_attachment.main"), + resource.TestCheckResourceAttrSet("aws_cognito_identity_pool_roles_attachment.main", "identity_pool_id"), + resource.TestCheckResourceAttr("aws_cognito_identity_pool_roles_attachment.main", "role_mapping.#", "0"), + resource.TestCheckResourceAttrSet("aws_cognito_identity_pool_roles_attachment.main", "roles.authenticated"), + ), + }, + }, + }) +} + +func TestAccAWSCognitoIdentityPoolRolesAttachment_roleMappingsWithAmbiguousRoleResolutionError(t *testing.T) { + name := fmt.Sprintf("%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCognitoIdentityPoolRolesAttachmentDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCognitoIdentityPoolRolesAttachmentConfig_roleMappingsWithAmbiguousRoleResolutionError(name), + ExpectError: regexp.MustCompile(`Error validating ambiguous role resolution`), + }, + }, + }) +} + +func TestAccAWSCognitoIdentityPoolRolesAttachment_roleMappingsWithRulesTypeError(t *testing.T) { + name := fmt.Sprintf("%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCognitoIdentityPoolRolesAttachmentDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCognitoIdentityPoolRolesAttachmentConfig_roleMappingsWithRulesTypeError(name), + ExpectError: regexp.MustCompile(`mapping_rule is required for Rules`), + }, + }, + }) +} + +func TestAccAWSCognitoIdentityPoolRolesAttachment_roleMappingsWithTokenTypeError(t *testing.T) { + name := fmt.Sprintf("%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCognitoIdentityPoolRolesAttachmentDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSCognitoIdentityPoolRolesAttachmentConfig_roleMappingsWithTokenTypeError(name), + ExpectError: regexp.MustCompile(`mapping_rule must not be set for Token based role mapping`), + }, + }, + }) +} + +func testAccCheckAWSCognitoIdentityPoolRolesAttachmentExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return errors.New("No Cognito Identity Pool Roles Attachment ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).cognitoconn + + _, err := conn.GetIdentityPoolRoles(&cognitoidentity.GetIdentityPoolRolesInput{ + IdentityPoolId: aws.String(rs.Primary.Attributes["identity_pool_id"]), + }) + + if err != nil { + return err + } + + return nil + } +} + +func testAccCheckAWSCognitoIdentityPoolRolesAttachmentDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).cognitoconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_cognito_identity_pool_roles_attachment" { + continue + } + + _, err := conn.GetIdentityPoolRoles(&cognitoidentity.GetIdentityPoolRolesInput{ + IdentityPoolId: aws.String(rs.Primary.Attributes["identity_pool_id"]), + }) + + if err != nil { + if wserr, ok := err.(awserr.Error); ok && wserr.Code() == "ResourceNotFoundException" { + return nil + } + return err + } + } + + return nil +} + +func baseAWSCognitoIdentityPoolRolesAttachmentConfig(name string) string { + return fmt.Sprintf(` +resource "aws_cognito_identity_pool" "main" { + identity_pool_name = "identity pool %[1]s" + allow_unauthenticated_identities = false + + supported_login_providers { + "graph.facebook.com" = "7346241598935555" + } +} + +# Unauthenticated Role +resource "aws_iam_role" "unauthenticated" { + name = "cognito_unauthenticated_%[1]s" + + assume_role_policy = < 0 { + log.Printf("[DEBUG] Found %d ENIs to cleanup for NLB %q", niCount, lbArn) + return resource.RetryableError(fmt.Errorf("Waiting for %d ENIs of %q to clean up", niCount, lbArn)) + } + log.Printf("[DEBUG] ENIs gone for NLB %q", lbArn) + + return nil + }) +} + +func getLbNameFromArn(arn string) (string, error) { + re := regexp.MustCompile("([^/]+/[^/]+/[^/]+)$") + matches := re.FindStringSubmatch(arn) + if len(matches) != 2 { + return "", fmt.Errorf("Unexpected ARN format: %q", arn) + } + + // e.g. app/example-alb/b26e625cdde161e6 + return matches[1], nil +} + // flattenSubnetsFromAvailabilityZones creates a slice of strings containing the subnet IDs // for the ALB based on the AvailabilityZones structure returned by the API. func flattenSubnetsFromAvailabilityZones(availabilityZones []*elbv2.AvailabilityZone) []string { diff --git a/aws/resource_aws_lb_target_group.go b/aws/resource_aws_lb_target_group.go index 9cab96019fe..9f2e19807cc 100644 --- a/aws/resource_aws_lb_target_group.go +++ b/aws/resource_aws_lb_target_group.go @@ -79,6 +79,13 @@ func resourceAwsLbTargetGroup() *schema.Resource { ValidateFunc: validateAwsLbTargetGroupDeregistrationDelay, }, + "target_type": { + Type: schema.TypeString, + Optional: true, + Default: "instance", + ForceNew: true, + }, + "stickiness": { Type: schema.TypeList, Optional: true, @@ -191,10 +198,11 @@ func resourceAwsLbTargetGroupCreate(d *schema.ResourceData, meta interface{}) er } params := &elbv2.CreateTargetGroupInput{ - Name: aws.String(groupName), - Port: aws.Int64(int64(d.Get("port").(int))), - Protocol: aws.String(d.Get("protocol").(string)), - VpcId: aws.String(d.Get("vpc_id").(string)), + Name: aws.String(groupName), + Port: aws.Int64(int64(d.Get("port").(int))), + Protocol: aws.String(d.Get("protocol").(string)), + VpcId: aws.String(d.Get("vpc_id").(string)), + TargetType: aws.String(d.Get("target_type").(string)), } if healthChecks := d.Get("health_check").([]interface{}); len(healthChecks) == 1 { @@ -476,6 +484,7 @@ func flattenAwsLbTargetGroupResource(d *schema.ResourceData, meta interface{}, t d.Set("port", targetGroup.Port) d.Set("protocol", targetGroup.Protocol) d.Set("vpc_id", targetGroup.VpcId) + d.Set("target_type", targetGroup.TargetType) healthCheck := make(map[string]interface{}) healthCheck["interval"] = *targetGroup.HealthCheckIntervalSeconds diff --git a/aws/resource_aws_lb_target_group_attachment.go b/aws/resource_aws_lb_target_group_attachment.go index bfa54d8892c..74eeaa850f0 100644 --- a/aws/resource_aws_lb_target_group_attachment.go +++ b/aws/resource_aws_lb_target_group_attachment.go @@ -36,6 +36,12 @@ func resourceAwsLbTargetGroupAttachment() *schema.Resource { ForceNew: true, Optional: true, }, + + "availability_zone": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, }, } } @@ -51,6 +57,10 @@ func resourceAwsLbAttachmentCreate(d *schema.ResourceData, meta interface{}) err target.Port = aws.Int64(int64(v.(int))) } + if v, ok := d.GetOk("availability_zone"); ok { + target.AvailabilityZone = aws.String(v.(string)) + } + params := &elbv2.RegisterTargetsInput{ TargetGroupArn: aws.String(d.Get("target_group_arn").(string)), Targets: []*elbv2.TargetDescription{target}, @@ -80,6 +90,10 @@ func resourceAwsLbAttachmentDelete(d *schema.ResourceData, meta interface{}) err target.Port = aws.Int64(int64(v.(int))) } + if v, ok := d.GetOk("availability_zone"); ok { + target.AvailabilityZone = aws.String(v.(string)) + } + params := &elbv2.DeregisterTargetsInput{ TargetGroupArn: aws.String(d.Get("target_group_arn").(string)), Targets: []*elbv2.TargetDescription{target}, @@ -108,6 +122,10 @@ func resourceAwsLbAttachmentRead(d *schema.ResourceData, meta interface{}) error target.Port = aws.Int64(int64(v.(int))) } + if v, ok := d.GetOk("availability_zone"); ok { + target.AvailabilityZone = aws.String(v.(string)) + } + resp, err := elbconn.DescribeTargetHealth(&elbv2.DescribeTargetHealthInput{ TargetGroupArn: aws.String(d.Get("target_group_arn").(string)), Targets: []*elbv2.TargetDescription{target}, diff --git a/aws/resource_aws_lb_target_group_attachment_test.go b/aws/resource_aws_lb_target_group_attachment_test.go index 4617c55fd00..53723de31f0 100644 --- a/aws/resource_aws_lb_target_group_attachment_test.go +++ b/aws/resource_aws_lb_target_group_attachment_test.go @@ -71,6 +71,25 @@ func TestAccAWSLBTargetGroupAttachment_withoutPort(t *testing.T) { }) } +func TestAccAWSALBTargetGroupAttachment_ipAddress(t *testing.T) { + targetGroupName := fmt.Sprintf("test-target-group-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "aws_lb_target_group.test", + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLBTargetGroupAttachmentDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLBTargetGroupAttachmentConfigWithIpAddress(targetGroupName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSLBTargetGroupAttachmentExists("aws_lb_target_group_attachment.test"), + ), + }, + }, + }) +} + func testAccCheckAWSLBTargetGroupAttachmentExists(n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -158,26 +177,21 @@ resource "aws_lb_target_group_attachment" "test" { target_group_arn = "${aws_lb_target_group.test.arn}" target_id = "${aws_instance.test.id}" } - resource "aws_instance" "test" { ami = "ami-f701cb97" instance_type = "t2.micro" subnet_id = "${aws_subnet.subnet.id}" } - resource "aws_lb_target_group" "test" { name = "%s" port = 443 protocol = "HTTPS" vpc_id = "${aws_vpc.test.id}" - deregistration_delay = 200 - stickiness { type = "lb_cookie" cookie_duration = 10000 } - health_check { path = "/health" interval = 60 @@ -189,13 +203,10 @@ resource "aws_lb_target_group" "test" { matcher = "200-299" } } - resource "aws_subnet" "subnet" { cidr_block = "10.0.1.0/24" vpc_id = "${aws_vpc.test.id}" - } - resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" tags { @@ -211,26 +222,21 @@ resource "aws_lb_target_group_attachment" "test" { target_id = "${aws_instance.test.id}" port = 80 } - resource "aws_instance" "test" { ami = "ami-f701cb97" instance_type = "t2.micro" subnet_id = "${aws_subnet.subnet.id}" } - resource "aws_lb_target_group" "test" { name = "%s" port = 443 protocol = "HTTPS" vpc_id = "${aws_vpc.test.id}" - deregistration_delay = 200 - stickiness { type = "lb_cookie" cookie_duration = 10000 } - health_check { path = "/health" interval = 60 @@ -242,13 +248,10 @@ resource "aws_lb_target_group" "test" { matcher = "200-299" } } - resource "aws_subnet" "subnet" { cidr_block = "10.0.1.0/24" vpc_id = "${aws_vpc.test.id}" - } - resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" tags { @@ -264,26 +267,21 @@ resource "aws_alb_target_group_attachment" "test" { target_id = "${aws_instance.test.id}" port = 80 } - resource "aws_instance" "test" { ami = "ami-f701cb97" instance_type = "t2.micro" subnet_id = "${aws_subnet.subnet.id}" } - resource "aws_alb_target_group" "test" { name = "%s" port = 443 protocol = "HTTPS" vpc_id = "${aws_vpc.test.id}" - deregistration_delay = 200 - stickiness { type = "lb_cookie" cookie_duration = 10000 } - health_check { path = "/health" interval = 60 @@ -295,13 +293,10 @@ resource "aws_alb_target_group" "test" { matcher = "200-299" } } - resource "aws_subnet" "subnet" { cidr_block = "10.0.1.0/24" vpc_id = "${aws_vpc.test.id}" - } - resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" tags { @@ -309,3 +304,49 @@ resource "aws_vpc" "test" { } }`, targetGroupName) } + +func testAccAWSLBTargetGroupAttachmentConfigWithIpAddress(targetGroupName string) string { + return fmt.Sprintf(` +resource "aws_lb_target_group_attachment" "test" { + target_group_arn = "${aws_lb_target_group.test.arn}" + target_id = "${aws_instance.test.private_ip}" + availability_zone = "${aws_instance.test.availability_zone}" +} +resource "aws_instance" "test" { + ami = "ami-f701cb97" + instance_type = "t2.micro" + subnet_id = "${aws_subnet.subnet.id}" +} +resource "aws_lb_target_group" "test" { + name = "%s" + port = 443 + protocol = "HTTPS" + vpc_id = "${aws_vpc.test.id}" + target_type = "ip" + deregistration_delay = 200 + stickiness { + type = "lb_cookie" + cookie_duration = 10000 + } + health_check { + path = "/health" + interval = 60 + port = 8081 + protocol = "HTTP" + timeout = 3 + healthy_threshold = 3 + unhealthy_threshold = 3 + matcher = "200-299" + } +} +resource "aws_subnet" "subnet" { + cidr_block = "10.0.1.0/24" + vpc_id = "${aws_vpc.test.id}" +} +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + tags { + Name = "testAccAWSALBTargetGroupAttachmentConfigWithoutPort" + } +}`, targetGroupName) +} diff --git a/aws/resource_aws_lb_test.go b/aws/resource_aws_lb_test.go index c568449b151..56333af09c1 100644 --- a/aws/resource_aws_lb_test.go +++ b/aws/resource_aws_lb_test.go @@ -109,6 +109,33 @@ func TestAccAWSLB_networkLoadbalancer(t *testing.T) { }) } +func TestAccAWSLB_networkLoadbalancerEIP(t *testing.T) { + var conf elbv2.LoadBalancer + lbName := fmt.Sprintf("testaccawslb-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLBDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSLBConfig_networkLoadBalancerEIP(lbName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSLBExists("aws_lb.test", &conf), + resource.TestCheckResourceAttr("aws_lb.test", "name", lbName), + resource.TestCheckResourceAttr("aws_lb.test", "internal", "false"), + resource.TestCheckResourceAttr("aws_lb.test", "ip_address_type", "ipv4"), + resource.TestCheckResourceAttrSet("aws_lb.test", "zone_id"), + resource.TestCheckResourceAttrSet("aws_lb.test", "dns_name"), + resource.TestCheckResourceAttrSet("aws_lb.test", "arn"), + resource.TestCheckResourceAttr("aws_lb.test", "load_balancer_type", "network"), + resource.TestCheckResourceAttr("aws_lb.test", "subnet_mapping.#", "2"), + ), + }, + }, + }) +} + func TestAccAWSLBBackwardsCompatibility(t *testing.T) { var conf elbv2.LoadBalancer lbName := fmt.Sprintf("testaccawslb-basic-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) @@ -830,6 +857,58 @@ resource "aws_subnet" "alb_test" { `, lbName) } +func testAccAWSLBConfig_networkLoadBalancerEIP(lbName string) string { + return fmt.Sprintf(` +data "aws_availability_zones" "available" {} + +resource "aws_vpc" "main" { + cidr_block = "10.10.0.0/16" +} + +resource "aws_subnet" "public" { + count = "${length(data.aws_availability_zones.available.names)}" + availability_zone = "${data.aws_availability_zones.available.names[count.index]}" + cidr_block = "10.10.${count.index}.0/24" + vpc_id = "${aws_vpc.main.id}" +} + +resource "aws_internet_gateway" "default" { + vpc_id = "${aws_vpc.main.id}" +} + +resource "aws_route_table" "public" { + vpc_id = "${aws_vpc.main.id}" + route { + cidr_block = "0.0.0.0/0" + gateway_id = "${aws_internet_gateway.default.id}" + } +} + +resource "aws_route_table_association" "a" { + count = "${length(data.aws_availability_zones.available.names)}" + subnet_id = "${aws_subnet.public.*.id[count.index]}" + route_table_id = "${aws_route_table.public.id}" +} + +resource "aws_lb" "test" { + name = "%s" + load_balancer_type = "network" + subnet_mapping { + subnet_id = "${aws_subnet.public.0.id}" + allocation_id = "${aws_eip.lb.0.id}" + } + subnet_mapping { + subnet_id = "${aws_subnet.public.1.id}" + allocation_id = "${aws_eip.lb.1.id}" + } +} + +resource "aws_eip" "lb" { + count = "${length(data.aws_availability_zones.available.names)}" +} +`, lbName) +} + func testAccAWSLBConfigBackwardsCompatibility(lbName string) string { return fmt.Sprintf(`resource "aws_alb" "lb_test" { name = "%s" diff --git a/aws/resource_aws_opsworks_application.go b/aws/resource_aws_opsworks_application.go index 7333018e5f6..9d3df69cde8 100644 --- a/aws/resource_aws_opsworks_application.go +++ b/aws/resource_aws_opsworks_application.go @@ -21,10 +21,6 @@ func resourceAwsOpsworksApplication() *schema.Resource { Update: resourceAwsOpsworksApplicationUpdate, Delete: resourceAwsOpsworksApplicationDelete, Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, "name": { Type: schema.TypeString, Required: true, @@ -345,7 +341,6 @@ func resourceAwsOpsworksApplicationCreate(d *schema.ResourceData, meta interface appID := *resp.AppId d.SetId(appID) - d.Set("id", appID) return resourceAwsOpsworksApplicationRead(d, meta) } diff --git a/aws/resource_aws_opsworks_instance.go b/aws/resource_aws_opsworks_instance.go index 255487727e7..08770b35985 100644 --- a/aws/resource_aws_opsworks_instance.go +++ b/aws/resource_aws_opsworks_instance.go @@ -32,11 +32,6 @@ func resourceAwsOpsworksInstance() *schema.Resource { }, Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, - "agent_version": { Type: schema.TypeString, Optional: true, @@ -564,7 +559,6 @@ func resourceAwsOpsworksInstanceRead(d *schema.ResourceData, meta interface{}) e d.Set("hostname", instance.Hostname) d.Set("infrastructure_class", instance.InfrastructureClass) d.Set("install_updates_on_boot", instance.InstallUpdatesOnBoot) - d.Set("id", instanceId) d.Set("instance_profile_arn", instance.InstanceProfileArn) d.Set("instance_type", instance.InstanceType) d.Set("last_service_error_id", instance.LastServiceErrorId) @@ -785,7 +779,6 @@ func resourceAwsOpsworksInstanceCreate(d *schema.ResourceData, meta interface{}) instanceId := *resp.InstanceId d.SetId(instanceId) - d.Set("id", instanceId) if v, ok := d.GetOk("state"); ok && v.(string) == "running" { err := startOpsworksInstance(d, meta, true, d.Timeout(schema.TimeoutCreate)) @@ -806,9 +799,9 @@ func resourceAwsOpsworksInstanceUpdate(d *schema.ResourceData, meta interface{}) } req := &opsworks.UpdateInstanceInput{ + InstanceId: aws.String(d.Id()), AgentVersion: aws.String(d.Get("agent_version").(string)), Architecture: aws.String(d.Get("architecture").(string)), - InstanceId: aws.String(d.Get("id").(string)), InstallUpdatesOnBoot: aws.Bool(d.Get("install_updates_on_boot").(bool)), } @@ -918,7 +911,7 @@ func resourceAwsOpsworksInstanceImport( func startOpsworksInstance(d *schema.ResourceData, meta interface{}, wait bool, timeout time.Duration) error { client := meta.(*AWSClient).opsworksconn - instanceId := d.Get("id").(string) + instanceId := d.Id() req := &opsworks.StartInstanceInput{ InstanceId: aws.String(instanceId), @@ -956,7 +949,7 @@ func startOpsworksInstance(d *schema.ResourceData, meta interface{}, wait bool, func stopOpsworksInstance(d *schema.ResourceData, meta interface{}, wait bool, timeout time.Duration) error { client := meta.(*AWSClient).opsworksconn - instanceId := d.Get("id").(string) + instanceId := d.Id() req := &opsworks.StopInstanceInput{ InstanceId: aws.String(instanceId), diff --git a/aws/resource_aws_opsworks_permission.go b/aws/resource_aws_opsworks_permission.go index f1292a2fe65..13757228d44 100644 --- a/aws/resource_aws_opsworks_permission.go +++ b/aws/resource_aws_opsworks_permission.go @@ -20,10 +20,6 @@ func resourceAwsOpsworksPermission() *schema.Resource { Read: resourceAwsOpsworksPermissionRead, Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, "allow_ssh": { Type: schema.TypeBool, Computed: true, @@ -104,7 +100,6 @@ func resourceAwsOpsworksPermissionRead(d *schema.ResourceData, meta interface{}) if d.Get("user_arn").(string)+d.Get("stack_id").(string) == id { found = true d.SetId(id) - d.Set("id", id) d.Set("allow_ssh", permission.AllowSsh) d.Set("allow_sudo", permission.AllowSudo) d.Set("user_arn", permission.IamUserArn) diff --git a/aws/resource_aws_opsworks_rds_db_instance.go b/aws/resource_aws_opsworks_rds_db_instance.go index d1aee903016..ef0d61f70fe 100644 --- a/aws/resource_aws_opsworks_rds_db_instance.go +++ b/aws/resource_aws_opsworks_rds_db_instance.go @@ -20,10 +20,6 @@ func resourceAwsOpsworksRdsDbInstance() *schema.Resource { Read: resourceAwsOpsworksRdsDbInstanceRead, Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, "stack_id": { Type: schema.TypeString, Required: true, @@ -154,7 +150,6 @@ func resourceAwsOpsworksRdsDbInstanceRead(d *schema.ResourceData, meta interface if fmt.Sprintf("%s%s", d.Get("rds_db_instance_arn").(string), d.Get("stack_id").(string)) == id { found = true d.SetId(id) - d.Set("id", id) d.Set("stack_id", instance.StackId) d.Set("rds_db_instance_arn", instance.RdsDbInstanceArn) d.Set("db_user", instance.DbUser) diff --git a/aws/resource_aws_opsworks_stack.go b/aws/resource_aws_opsworks_stack.go index f7f2283f114..a173da10a79 100644 --- a/aws/resource_aws_opsworks_stack.go +++ b/aws/resource_aws_opsworks_stack.go @@ -40,11 +40,6 @@ func resourceAwsOpsworksStack() *schema.Resource { Computed: true, }, - "id": { - Type: schema.TypeString, - Computed: true, - }, - "name": { Type: schema.TypeString, Required: true, @@ -476,7 +471,6 @@ func resourceAwsOpsworksStackCreate(d *schema.ResourceData, meta interface{}) er stackId := *resp.StackId d.SetId(stackId) - d.Set("id", stackId) if inVpc && *req.UseOpsworksSecurityGroups { // For VPC-based stacks, OpsWorks asynchronously creates some default diff --git a/aws/resource_aws_opsworks_user_profile.go b/aws/resource_aws_opsworks_user_profile.go index 39670b29549..ce24f5b2fdc 100644 --- a/aws/resource_aws_opsworks_user_profile.go +++ b/aws/resource_aws_opsworks_user_profile.go @@ -18,11 +18,6 @@ func resourceAwsOpsworksUserProfile() *schema.Resource { Delete: resourceAwsOpsworksUserProfileDelete, Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, - "user_arn": { Type: schema.TypeString, Required: true, diff --git a/aws/resource_aws_rds_cluster_test.go b/aws/resource_aws_rds_cluster_test.go index 51da8845220..35d99d37719 100644 --- a/aws/resource_aws_rds_cluster_test.go +++ b/aws/resource_aws_rds_cluster_test.go @@ -444,12 +444,18 @@ resource "aws_subnet" "a" { vpc_id = "${aws_vpc.test.id}" cidr_block = "10.0.0.0/24" availability_zone = "us-west-2a" + tags { + Name = "testAccAWSClusterConfig_namePrefix" + } } resource "aws_subnet" "b" { vpc_id = "${aws_vpc.test.id}" cidr_block = "10.0.1.0/24" availability_zone = "us-west-2b" + tags { + Name = "testAccAWSClusterConfig_namePrefix" + } } resource "aws_db_subnet_group" "test" { diff --git a/aws/resource_aws_s3_bucket.go b/aws/resource_aws_s3_bucket.go index e5d88c3eaf4..d42025eddcf 100644 --- a/aws/resource_aws_s3_bucket.go +++ b/aws/resource_aws_s3_bucket.go @@ -652,19 +652,22 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { } else { var host string var path string + var query string parsedHostName, err := url.Parse(*v.HostName) if err == nil { host = parsedHostName.Host path = parsedHostName.Path + query = parsedHostName.RawQuery } else { host = *v.HostName path = "" } w["redirect_all_requests_to"] = (&url.URL{ - Host: host, - Path: path, - Scheme: *v.Protocol, + Host: host, + Path: path, + Scheme: *v.Protocol, + RawQuery: query, }).String() } } @@ -1227,6 +1230,10 @@ func resourceAwsS3BucketWebsitePut(s3conn *s3.S3, d *schema.ResourceData, websit if redirect.Path != "" { redirectHostBuf.WriteString(redirect.Path) } + if redirect.RawQuery != "" { + redirectHostBuf.WriteString("?") + redirectHostBuf.WriteString(redirect.RawQuery) + } websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectHostBuf.String()), Protocol: aws.String(redirect.Scheme)} } else { websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectAllRequestsTo)} diff --git a/aws/resource_aws_s3_bucket_object.go b/aws/resource_aws_s3_bucket_object.go index fb2791ce4a1..4022f5bc8f4 100644 --- a/aws/resource_aws_s3_bucket_object.go +++ b/aws/resource_aws_s3_bucket_object.go @@ -60,6 +60,11 @@ func resourceAwsS3BucketObject() *schema.Resource { Optional: true, }, + "metadata": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + }, + "content_type": { Type: schema.TypeString, Optional: true, @@ -177,6 +182,16 @@ func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) erro putInput.ContentType = aws.String(v.(string)) } + if v, ok := d.GetOk("metadata"); ok { + meta := make(map[string]*string) + for key, value := range v.(map[string]interface{}) { + valueStr := fmt.Sprintf("%v", value) + meta[key] = &valueStr + } + + putInput.Metadata = meta + } + if v, ok := d.GetOk("content_encoding"); ok { putInput.ContentEncoding = aws.String(v.(string)) } @@ -258,6 +273,7 @@ func resourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) err d.Set("content_encoding", resp.ContentEncoding) d.Set("content_language", resp.ContentLanguage) d.Set("content_type", resp.ContentType) + d.Set("metadata", pointersMapToStringList(resp.Metadata)) d.Set("version_id", resp.VersionId) d.Set("server_side_encryption", resp.ServerSideEncryption) d.Set("website_redirect", resp.WebsiteRedirectLocation) diff --git a/aws/resource_aws_s3_bucket_test.go b/aws/resource_aws_s3_bucket_test.go index 3ef58a86fba..fb581a318ef 100644 --- a/aws/resource_aws_s3_bucket_test.go +++ b/aws/resource_aws_s3_bucket_test.go @@ -338,7 +338,7 @@ func TestAccAWSS3Bucket_WebsiteRedirect(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExists("aws_s3_bucket.bucket"), testAccCheckAWSS3BucketWebsite( - "aws_s3_bucket.bucket", "", "", "", "hashicorp.com"), + "aws_s3_bucket.bucket", "", "", "", "hashicorp.com?my=query"), resource.TestCheckResourceAttr( "aws_s3_bucket.bucket", "website_endpoint", testAccWebsiteEndpoint(rInt)), ), @@ -348,7 +348,7 @@ func TestAccAWSS3Bucket_WebsiteRedirect(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckAWSS3BucketExists("aws_s3_bucket.bucket"), testAccCheckAWSS3BucketWebsite( - "aws_s3_bucket.bucket", "", "", "https", "hashicorp.com"), + "aws_s3_bucket.bucket", "", "", "https", "hashicorp.com?my=query"), resource.TestCheckResourceAttr( "aws_s3_bucket.bucket", "website_endpoint", testAccWebsiteEndpoint(rInt)), ), @@ -1307,7 +1307,7 @@ resource "aws_s3_bucket" "bucket" { acl = "public-read" website { - redirect_all_requests_to = "hashicorp.com" + redirect_all_requests_to = "hashicorp.com?my=query" } } `, randInt) @@ -1320,7 +1320,7 @@ resource "aws_s3_bucket" "bucket" { acl = "public-read" website { - redirect_all_requests_to = "https://hashicorp.com" + redirect_all_requests_to = "https://hashicorp.com?my=query" } } `, randInt) diff --git a/aws/resource_aws_security_group.go b/aws/resource_aws_security_group.go index 4752634816e..9c8426e39ca 100644 --- a/aws/resource_aws_security_group.go +++ b/aws/resource_aws_security_group.go @@ -27,6 +27,9 @@ func resourceAwsSecurityGroup() *schema.Resource { State: resourceAwsSecurityGroupImportState, }, + SchemaVersion: 1, + MigrateState: resourceAwsSecurityGroupMigrateState, + Schema: map[string]*schema.Schema{ "name": { Type: schema.TypeString, @@ -218,6 +221,12 @@ func resourceAwsSecurityGroup() *schema.Resource { }, "tags": tagsSchema(), + + "revoke_rules_on_delete": { + Type: schema.TypeBool, + Default: false, + Optional: true, + }, }, } } @@ -427,6 +436,13 @@ func resourceAwsSecurityGroupDelete(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Failed to delete Lambda ENIs: %s", err) } + // conditionally revoke rules first before attempting to delete the group + if v := d.Get("revoke_rules_on_delete").(bool); v { + if err := forceRevokeSecurityGroupRules(conn, d); err != nil { + return err + } + } + return resource.Retry(5*time.Minute, func() *resource.RetryError { _, err := conn.DeleteSecurityGroup(&ec2.DeleteSecurityGroupInput{ GroupId: aws.String(d.Id()), @@ -453,6 +469,52 @@ func resourceAwsSecurityGroupDelete(d *schema.ResourceData, meta interface{}) er }) } +// Revoke all ingress/egress rules that a Security Group has +func forceRevokeSecurityGroupRules(conn *ec2.EC2, d *schema.ResourceData) error { + sgRaw, _, err := SGStateRefreshFunc(conn, d.Id())() + if err != nil { + return err + } + if sgRaw == nil { + return nil + } + + group := sgRaw.(*ec2.SecurityGroup) + if len(group.IpPermissions) > 0 { + req := &ec2.RevokeSecurityGroupIngressInput{ + GroupId: group.GroupId, + IpPermissions: group.IpPermissions, + } + if group.VpcId == nil || *group.VpcId == "" { + req.GroupId = nil + req.GroupName = group.GroupName + } + _, err = conn.RevokeSecurityGroupIngress(req) + + if err != nil { + return fmt.Errorf( + "Error revoking security group %s rules: %s", + *group.GroupId, err) + } + } + + if len(group.IpPermissionsEgress) > 0 { + req := &ec2.RevokeSecurityGroupEgressInput{ + GroupId: group.GroupId, + IpPermissions: group.IpPermissionsEgress, + } + _, err = conn.RevokeSecurityGroupEgress(req) + + if err != nil { + return fmt.Errorf( + "Error revoking security group %s rules: %s", + *group.GroupId, err) + } + } + + return nil +} + func resourceAwsSecurityGroupRuleHash(v interface{}) int { var buf bytes.Buffer m := v.(map[string]interface{}) diff --git a/aws/resource_aws_security_group_migrate.go b/aws/resource_aws_security_group_migrate.go new file mode 100644 index 00000000000..88357447f3c --- /dev/null +++ b/aws/resource_aws_security_group_migrate.go @@ -0,0 +1,34 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceAwsSecurityGroupMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + switch v { + case 0: + log.Println("[INFO] Found AWS SecurityGroup State v0; migrating to v1") + return migrateAwsSecurityGroupStateV0toV1(is) + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateAwsSecurityGroupStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + if is.Empty() || is.Attributes == nil { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + // set default for revoke_rules_on_delete + is.Attributes["revoke_rules_on_delete"] = "false" + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/aws/resource_aws_security_group_migrate_test.go b/aws/resource_aws_security_group_migrate_test.go new file mode 100644 index 00000000000..3b14edb0e5e --- /dev/null +++ b/aws/resource_aws_security_group_migrate_test.go @@ -0,0 +1,71 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/terraform" +) + +func TestAWSSecurityGroupMigrateState(t *testing.T) { + cases := map[string]struct { + StateVersion int + Attributes map[string]string + Expected map[string]string + Meta interface{} + }{ + "v0": { + StateVersion: 0, + Attributes: map[string]string{ + "name": "test", + }, + Expected: map[string]string{ + "name": "test", + "revoke_rules_on_delete": "false", + }, + }, + } + + for tn, tc := range cases { + is := &terraform.InstanceState{ + ID: "i-abc123", + Attributes: tc.Attributes, + } + is, err := resourceAwsSecurityGroupMigrateState( + tc.StateVersion, is, tc.Meta) + + if err != nil { + t.Fatalf("bad: %s, err: %#v", tn, err) + } + + for k, v := range tc.Expected { + if is.Attributes[k] != v { + t.Fatalf( + "bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", + tn, k, v, k, is.Attributes[k], is.Attributes) + } + } + } +} + +func TestAWSSecurityGroupMigrateState_empty(t *testing.T) { + var is *terraform.InstanceState + var meta interface{} + + // should handle nil + is, err := resourceAwsSecurityGroupMigrateState(0, is, meta) + + if err != nil { + t.Fatalf("err: %#v", err) + } + if is != nil { + t.Fatalf("expected nil instancestate, got: %#v", is) + } + + // should handle non-nil but empty + is = &terraform.InstanceState{} + is, err = resourceAwsSecurityGroupMigrateState(0, is, meta) + + if err != nil { + t.Fatalf("err: %#v", err) + } +} diff --git a/aws/resource_aws_security_group_rule.go b/aws/resource_aws_security_group_rule.go index e3f6760129c..d3ec4bc86ba 100644 --- a/aws/resource_aws_security_group_rule.go +++ b/aws/resource_aws_security_group_rule.go @@ -97,11 +97,10 @@ func resourceAwsSecurityGroupRule() *schema.Resource { }, "self": { - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - ConflictsWith: []string{"cidr_blocks"}, + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, }, "description": { @@ -732,10 +731,14 @@ func setDescriptionFromIPPerm(d *schema.ResourceData, rule *ec2.IpPermission) { // Validates that either 'cidr_blocks', 'ipv6_cidr_blocks', 'self', or 'source_security_group_id' is set func validateAwsSecurityGroupRule(d *schema.ResourceData) error { - _, blocksOk := d.GetOk("cidr_blocks") + blocks, blocksOk := d.GetOk("cidr_blocks") + self, selfOk := d.GetOk("self") + if blocksOk && self.(bool) { + return fmt.Errorf("'self': conflicts with 'cidr_blocks' (%#v)", blocks) + } + _, ipv6Ok := d.GetOk("ipv6_cidr_blocks") _, sourceOk := d.GetOk("source_security_group_id") - _, selfOk := d.GetOk("self") _, prefixOk := d.GetOk("prefix_list_ids") if !blocksOk && !sourceOk && !selfOk && !prefixOk && !ipv6Ok { return fmt.Errorf( diff --git a/aws/resource_aws_security_group_test.go b/aws/resource_aws_security_group_test.go index e251f7ddc4c..5de843f5812 100644 --- a/aws/resource_aws_security_group_test.go +++ b/aws/resource_aws_security_group_test.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "log" "reflect" "regexp" "strings" @@ -16,6 +17,80 @@ import ( "github.com/hashicorp/terraform/terraform" ) +// add sweeper to delete known test sgs +func init() { + resource.AddTestSweepers("aws_security_group", &resource.Sweeper{ + Name: "aws_security_group", + F: testSweepSecurityGroups, + }) +} + +func testSweepSecurityGroups(region string) error { + client, err := sharedClientForRegion(region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + conn := client.(*AWSClient).ec2conn + + req := &ec2.DescribeSecurityGroupsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("tag-value"), + Values: []*string{aws.String("tf-acc-revoke*")}, + }, + }, + } + resp, err := conn.DescribeSecurityGroups(req) + + if len(resp.SecurityGroups) == 0 { + log.Print("[DEBUG] No aws security groups to sweep") + return nil + } + + for _, sg := range resp.SecurityGroups { + // revoke the rules + if sg.IpPermissions != nil { + req := &ec2.RevokeSecurityGroupIngressInput{ + GroupId: sg.GroupId, + IpPermissions: sg.IpPermissions, + } + + if _, err = conn.RevokeSecurityGroupIngress(req); err != nil { + return fmt.Errorf( + "Error revoking default egress rule for Security Group (%s): %s", + *sg.GroupId, err) + } + } + + if sg.IpPermissionsEgress != nil { + req := &ec2.RevokeSecurityGroupEgressInput{ + GroupId: sg.GroupId, + IpPermissions: sg.IpPermissionsEgress, + } + + if _, err = conn.RevokeSecurityGroupEgress(req); err != nil { + return fmt.Errorf( + "Error revoking default egress rule for Security Group (%s): %s", + *sg.GroupId, err) + } + } + } + + for _, sg := range resp.SecurityGroups { + // delete the group + _, err := conn.DeleteSecurityGroup(&ec2.DeleteSecurityGroupInput{ + GroupId: sg.GroupId, + }) + if err != nil { + return fmt.Errorf( + "Error deleting Security Group (%s): %s", + *sg.GroupId, err) + } + } + + return nil +} + func TestProtocolStateFunc(t *testing.T) { cases := []struct { input interface{} @@ -296,6 +371,240 @@ func TestAccAWSSecurityGroup_basic(t *testing.T) { }) } +// cycleIpPermForGroup returns an IpPermission struct with a configured +// UserIdGroupPair for the groupid given. Used in +// TestAccAWSSecurityGroup_forceRevokeRules_should_fail to create a cyclic rule +// between 2 security groups +func cycleIpPermForGroup(groupId string) *ec2.IpPermission { + var perm ec2.IpPermission + perm.FromPort = aws.Int64(0) + perm.ToPort = aws.Int64(0) + perm.IpProtocol = aws.String("icmp") + perm.UserIdGroupPairs = make([]*ec2.UserIdGroupPair, 1) + perm.UserIdGroupPairs[0] = &ec2.UserIdGroupPair{ + GroupId: aws.String(groupId), + } + return &perm +} + +// testAddRuleCycle returns a TestCheckFunc to use at the end of a test, such +// that a Security Group Rule cyclic dependency will be created between the two +// Security Groups. A companion function, testRemoveRuleCycle, will undo this. +func testAddRuleCycle(primary, secondary *ec2.SecurityGroup) resource.TestCheckFunc { + return func(s *terraform.State) error { + if primary.GroupId == nil { + return fmt.Errorf("Primary SG not set for TestAccAWSSecurityGroup_forceRevokeRules_should_fail") + } + if secondary.GroupId == nil { + return fmt.Errorf("Secondary SG not set for TestAccAWSSecurityGroup_forceRevokeRules_should_fail") + } + + conn := testAccProvider.Meta().(*AWSClient).ec2conn + + // cycle from primary to secondary + perm1 := cycleIpPermForGroup(*secondary.GroupId) + // cycle from secondary to primary + perm2 := cycleIpPermForGroup(*primary.GroupId) + + req1 := &ec2.AuthorizeSecurityGroupEgressInput{ + GroupId: primary.GroupId, + IpPermissions: []*ec2.IpPermission{perm1}, + } + req2 := &ec2.AuthorizeSecurityGroupEgressInput{ + GroupId: secondary.GroupId, + IpPermissions: []*ec2.IpPermission{perm2}, + } + + var err error + _, err = conn.AuthorizeSecurityGroupEgress(req1) + if err != nil { + return fmt.Errorf( + "Error authorizing primary security group %s rules: %s", *primary.GroupId, + err) + } + _, err = conn.AuthorizeSecurityGroupEgress(req2) + if err != nil { + return fmt.Errorf( + "Error authorizing secondary security group %s rules: %s", *secondary.GroupId, + err) + } + return nil + } +} + +// testRemoveRuleCycle removes the cyclic dependency between two security groups +// that was added in testAddRuleCycle +func testRemoveRuleCycle(primary, secondary *ec2.SecurityGroup) resource.TestCheckFunc { + return func(s *terraform.State) error { + if primary.GroupId == nil { + return fmt.Errorf("Primary SG not set for TestAccAWSSecurityGroup_forceRevokeRules_should_fail") + } + if secondary.GroupId == nil { + return fmt.Errorf("Secondary SG not set for TestAccAWSSecurityGroup_forceRevokeRules_should_fail") + } + + conn := testAccProvider.Meta().(*AWSClient).ec2conn + for _, sg := range []*ec2.SecurityGroup{primary, secondary} { + var err error + if sg.IpPermissions != nil { + req := &ec2.RevokeSecurityGroupIngressInput{ + GroupId: sg.GroupId, + IpPermissions: sg.IpPermissions, + } + + if _, err = conn.RevokeSecurityGroupIngress(req); err != nil { + return fmt.Errorf( + "Error revoking default ingress rule for Security Group in testRemoveCycle (%s): %s", + *primary.GroupId, err) + } + } + + if sg.IpPermissionsEgress != nil { + req := &ec2.RevokeSecurityGroupEgressInput{ + GroupId: sg.GroupId, + IpPermissions: sg.IpPermissionsEgress, + } + + if _, err = conn.RevokeSecurityGroupEgress(req); err != nil { + return fmt.Errorf( + "Error revoking default egress rule for Security Group in testRemoveCycle (%s): %s", + *sg.GroupId, err) + } + } + } + return nil + } +} + +// This test should fail to destroy the Security Groups and VPC, due to a +// dependency cycle added outside of terraform's managment. There is a sweeper +// 'aws_vpc' and 'aws_security_group' that cleans these up, however, the test is +// written to allow Terraform to clean it up because we do go and revoke the +// cyclic rules that were added. +func TestAccAWSSecurityGroup_forceRevokeRules_true(t *testing.T) { + var primary ec2.SecurityGroup + var secondary ec2.SecurityGroup + + // Add rules to create a cycle between primary and secondary. This prevents + // Terraform/AWS from being able to destroy the groups + testAddCycle := testAddRuleCycle(&primary, &secondary) + // Remove the rules that created the cycle; Terraform/AWS can now destroy them + testRemoveCycle := testRemoveRuleCycle(&primary, &secondary) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSecurityGroupDestroy, + Steps: []resource.TestStep{ + // create the configuration with 2 security groups, then create a + // dependency cycle such that they cannot be deleted + { + Config: testAccAWSSecurityGroupConfig_revoke_base, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSecurityGroupExists("aws_security_group.primary", &primary), + testAccCheckAWSSecurityGroupExists("aws_security_group.secondary", &secondary), + testAddCycle, + ), + }, + // Verify the DependencyViolation error by using a configration with the + // groups removed. Terraform tries to destroy them but cannot. Expect a + // DependencyViolation error + { + Config: testAccAWSSecurityGroupConfig_revoke_base_removed, + ExpectError: regexp.MustCompile("DependencyViolation"), + }, + // Restore the config (a no-op plan) but also remove the dependencies + // between the groups with testRemoveCycle + { + Config: testAccAWSSecurityGroupConfig_revoke_base, + // ExpectError: regexp.MustCompile("DependencyViolation"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSecurityGroupExists("aws_security_group.primary", &primary), + testAccCheckAWSSecurityGroupExists("aws_security_group.secondary", &secondary), + testRemoveCycle, + ), + }, + // Again try to apply the config with the sgs removed; it should work + { + Config: testAccAWSSecurityGroupConfig_revoke_base_removed, + }, + //// + // now test with revoke_rules_on_delete + //// + // create the configuration with 2 security groups, then create a + // dependency cycle such that they cannot be deleted. In this + // configuration, each Security Group has `revoke_rules_on_delete` + // specified, and should delete with no issue + { + Config: testAccAWSSecurityGroupConfig_revoke_true, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSecurityGroupExists("aws_security_group.primary", &primary), + testAccCheckAWSSecurityGroupExists("aws_security_group.secondary", &secondary), + testAddCycle, + ), + }, + // Again try to apply the config with the sgs removed; it should work, + // because we've told the SGs to forecfully revoke their rules first + { + Config: testAccAWSSecurityGroupConfig_revoke_base_removed, + }, + }, + }) +} + +func TestAccAWSSecurityGroup_forceRevokeRules_false(t *testing.T) { + var primary ec2.SecurityGroup + var secondary ec2.SecurityGroup + + // Add rules to create a cycle between primary and secondary. This prevents + // Terraform/AWS from being able to destroy the groups + testAddCycle := testAddRuleCycle(&primary, &secondary) + // Remove the rules that created the cycle; Terraform/AWS can now destroy them + testRemoveCycle := testRemoveRuleCycle(&primary, &secondary) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSecurityGroupDestroy, + Steps: []resource.TestStep{ + // create the configuration with 2 security groups, then create a + // dependency cycle such that they cannot be deleted. These Security + // Groups are configured to explicitly not revoke rules on delete, + // `revoke_rules_on_delete = false` + { + Config: testAccAWSSecurityGroupConfig_revoke_false, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSecurityGroupExists("aws_security_group.primary", &primary), + testAccCheckAWSSecurityGroupExists("aws_security_group.secondary", &secondary), + testAddCycle, + ), + }, + // Verify the DependencyViolation error by using a configration with the + // groups removed, and the Groups not configured to revoke their ruls. + // Terraform tries to destroy them but cannot. Expect a + // DependencyViolation error + { + Config: testAccAWSSecurityGroupConfig_revoke_base_removed, + ExpectError: regexp.MustCompile("DependencyViolation"), + }, + // Restore the config (a no-op plan) but also remove the dependencies + // between the groups with testRemoveCycle + { + Config: testAccAWSSecurityGroupConfig_revoke_false, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSecurityGroupExists("aws_security_group.primary", &primary), + testAccCheckAWSSecurityGroupExists("aws_security_group.secondary", &secondary), + testRemoveCycle, + ), + }, + // Again try to apply the config with the sgs removed; it should work + { + Config: testAccAWSSecurityGroupConfig_revoke_base_removed, + }, + }, + }) +} + func TestAccAWSSecurityGroup_basicRuleDescription(t *testing.T) { var group ec2.SecurityGroup @@ -1361,6 +1670,9 @@ resource "aws_security_group" "web" { const testAccAWSSecurityGroupConfig = ` resource "aws_vpc" "foo" { cidr_block = "10.1.0.0/16" + tags { + Name = "tf-acc-revoke-test" + } } resource "aws_security_group" "web" { @@ -1375,17 +1687,113 @@ resource "aws_security_group" "web" { cidr_blocks = ["10.0.0.0/8"] } - egress { - protocol = "tcp" - from_port = 80 - to_port = 8000 - cidr_blocks = ["10.0.0.0/8"] - } + tags { + Name = "tf-acc-revoke-test" + } +} +` +const testAccAWSSecurityGroupConfig_revoke_base_removed = ` +resource "aws_vpc" "sg-race-revoke" { + cidr_block = "10.1.0.0/16" tags { - Name = "tf-acc-test" + Name = "tf-acc-revoke-test" + } +} +` +const testAccAWSSecurityGroupConfig_revoke_base = ` +resource "aws_vpc" "sg-race-revoke" { + cidr_block = "10.1.0.0/16" + tags { + Name = "tf-acc-revoke-test" + } +} + +resource "aws_security_group" "primary" { + name = "tf-acc-sg-race-revoke-primary" + description = "Used in the terraform acceptance tests" + vpc_id = "${aws_vpc.sg-race-revoke.id}" + + tags { + Name = "tf-acc-revoke-test-primary" } } + +resource "aws_security_group" "secondary" { + name = "tf-acc-sg-race-revoke-secondary" + description = "Used in the terraform acceptance tests" + vpc_id = "${aws_vpc.sg-race-revoke.id}" + + tags { + Name = "tf-acc-revoke-test-secondary" + } +} +` + +const testAccAWSSecurityGroupConfig_revoke_false = ` +resource "aws_vpc" "sg-race-revoke" { + cidr_block = "10.1.0.0/16" + tags { + Name = "tf-acc-revoke-test" + } +} + +resource "aws_security_group" "primary" { + name = "tf-acc-sg-race-revoke-primary" + description = "Used in the terraform acceptance tests" + vpc_id = "${aws_vpc.sg-race-revoke.id}" + + tags { + Name = "tf-acc-revoke-test-primary" + } + + revoke_rules_on_delete = false +} + +resource "aws_security_group" "secondary" { + name = "tf-acc-sg-race-revoke-secondary" + description = "Used in the terraform acceptance tests" + vpc_id = "${aws_vpc.sg-race-revoke.id}" + + tags { + Name = "tf-acc-revoke-test-secondary" + } + + revoke_rules_on_delete = false +} +` + +const testAccAWSSecurityGroupConfig_revoke_true = ` +resource "aws_vpc" "sg-race-revoke" { + cidr_block = "10.1.0.0/16" + tags { + Name = "tf-acc-revoke-test" + } +} + +resource "aws_security_group" "primary" { + name = "tf-acc-sg-race-revoke-primary" + description = "Used in the terraform acceptance tests" + vpc_id = "${aws_vpc.sg-race-revoke.id}" + + tags { + Name = "tf-acc-revoke-test-primary" + } + + revoke_rules_on_delete = true +} + +resource "aws_security_group" "secondary" { + name = "tf-acc-sg-race-revoke-secondary" + description = "Used in the terraform acceptance tests" + vpc_id = "${aws_vpc.sg-race-revoke.id}" + + tags { + Name = "tf-acc-revoke-test-secondary" + } + + revoke_rules_on_delete = true +} ` const testAccAWSSecurityGroupConfigChange = ` diff --git a/aws/resource_aws_servicecatalog_portfolio.go b/aws/resource_aws_servicecatalog_portfolio.go new file mode 100644 index 00000000000..6d87d4a42ea --- /dev/null +++ b/aws/resource_aws_servicecatalog_portfolio.go @@ -0,0 +1,224 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/servicecatalog" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsServiceCatalogPortfolio() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsServiceCatalogPortfolioCreate, + Read: resourceAwsServiceCatalogPortfolioRead, + Update: resourceAwsServiceCatalogPortfolioUpdate, + Delete: resourceAwsServiceCatalogPortfolioDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "created_time": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateServiceCatalogPortfolioName, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateServiceCatalogPortfolioDescription, + }, + "provider_name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateServiceCatalogPortfolioProviderName, + }, + "tags": tagsSchema(), + }, + } +} +func resourceAwsServiceCatalogPortfolioCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).scconn + input := servicecatalog.CreatePortfolioInput{ + AcceptLanguage: aws.String("en"), + } + name := d.Get("name").(string) + input.DisplayName = &name + now := time.Now() + input.IdempotencyToken = aws.String(fmt.Sprintf("%d", now.UnixNano())) + + if v, ok := d.GetOk("description"); ok { + input.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("provider_name"); ok { + input.ProviderName = aws.String(v.(string)) + } + + if v, ok := d.GetOk("tags"); ok { + tags := []*servicecatalog.Tag{} + t := v.(map[string]interface{}) + for k, v := range t { + tag := servicecatalog.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + tags = append(tags, &tag) + } + input.Tags = tags + } + + log.Printf("[DEBUG] Creating Service Catalog Portfolio: %#v", input) + resp, err := conn.CreatePortfolio(&input) + if err != nil { + return fmt.Errorf("Creating Service Catalog Portfolio failed: %s", err.Error()) + } + d.SetId(*resp.PortfolioDetail.Id) + + return resourceAwsServiceCatalogPortfolioRead(d, meta) +} + +func resourceAwsServiceCatalogPortfolioRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).scconn + input := servicecatalog.DescribePortfolioInput{ + AcceptLanguage: aws.String("en"), + } + input.Id = aws.String(d.Id()) + + log.Printf("[DEBUG] Reading Service Catalog Portfolio: %#v", input) + resp, err := conn.DescribePortfolio(&input) + if err != nil { + if scErr, ok := err.(awserr.Error); ok && scErr.Code() == "ResourceNotFoundException" { + log.Printf("[WARN] Service Catalog Portfolio %q not found, removing from state", d.Id()) + d.SetId("") + return nil + } + return fmt.Errorf("Reading ServiceCatalog Portfolio '%s' failed: %s", *input.Id, err.Error()) + } + portfolioDetail := resp.PortfolioDetail + if err := d.Set("created_time", portfolioDetail.CreatedTime.Format(time.RFC3339)); err != nil { + log.Printf("[DEBUG] Error setting created_time: %s", err) + } + d.Set("arn", portfolioDetail.ARN) + d.Set("description", portfolioDetail.Description) + d.Set("name", portfolioDetail.DisplayName) + d.Set("provider_name", portfolioDetail.ProviderName) + tags := map[string]string{} + for _, tag := range resp.Tags { + tags[*tag.Key] = *tag.Value + } + d.Set("tags", tags) + return nil +} + +func resourceAwsServiceCatalogPortfolioUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).scconn + input := servicecatalog.UpdatePortfolioInput{ + AcceptLanguage: aws.String("en"), + Id: aws.String(d.Id()), + } + + if d.HasChange("name") { + v, _ := d.GetOk("name") + input.DisplayName = aws.String(v.(string)) + } + + if d.HasChange("accept_language") { + v, _ := d.GetOk("accept_language") + input.AcceptLanguage = aws.String(v.(string)) + } + + if d.HasChange("description") { + v, _ := d.GetOk("description") + input.Description = aws.String(v.(string)) + } + + if d.HasChange("provider_name") { + v, _ := d.GetOk("provider_name") + input.ProviderName = aws.String(v.(string)) + } + + if d.HasChange("tags") { + currentTags, requiredTags := d.GetChange("tags") + log.Printf("[DEBUG] Current Tags: %#v", currentTags) + log.Printf("[DEBUG] Required Tags: %#v", requiredTags) + + tagsToAdd, tagsToRemove := tagUpdates(requiredTags.(map[string]interface{}), currentTags.(map[string]interface{})) + log.Printf("[DEBUG] Tags To Add: %#v", tagsToAdd) + log.Printf("[DEBUG] Tags To Remove: %#v", tagsToRemove) + input.AddTags = tagsToAdd + input.RemoveTags = tagsToRemove + } + + log.Printf("[DEBUG] Update Service Catalog Portfolio: %#v", input) + _, err := conn.UpdatePortfolio(&input) + if err != nil { + return fmt.Errorf("Updating Service Catalog Portfolio '%s' failed: %s", *input.Id, err.Error()) + } + return resourceAwsServiceCatalogPortfolioRead(d, meta) +} + +func tagUpdates(requriedTags, currentTags map[string]interface{}) ([]*servicecatalog.Tag, []*string) { + var tagsToAdd []*servicecatalog.Tag + var tagsToRemove []*string + + for rk, rv := range requriedTags { + addTag := true + for ck, cv := range currentTags { + if (rk == ck) && (rv.(string) == cv.(string)) { + addTag = false + } + } + if addTag { + tag := &servicecatalog.Tag{Key: aws.String(rk), Value: aws.String(rv.(string))} + tagsToAdd = append(tagsToAdd, tag) + } + } + + for ck, cv := range currentTags { + removeTag := true + for rk, rv := range requriedTags { + if (rk == ck) && (rv.(string) == cv.(string)) { + removeTag = false + } + } + if removeTag { + tagsToRemove = append(tagsToRemove, aws.String(ck)) + } + } + + return tagsToAdd, tagsToRemove +} + +func resourceAwsServiceCatalogPortfolioDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).scconn + input := servicecatalog.DeletePortfolioInput{} + input.Id = aws.String(d.Id()) + + log.Printf("[DEBUG] Delete Service Catalog Portfolio: %#v", input) + _, err := conn.DeletePortfolio(&input) + if err != nil { + return fmt.Errorf("Deleting Service Catalog Portfolio '%s' failed: %s", *input.Id, err.Error()) + } + return nil +} diff --git a/aws/resource_aws_servicecatalog_portfolio_test.go b/aws/resource_aws_servicecatalog_portfolio_test.go new file mode 100644 index 00000000000..99da34c2054 --- /dev/null +++ b/aws/resource_aws_servicecatalog_portfolio_test.go @@ -0,0 +1,209 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/servicecatalog" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "testing" +) + +func TestAccAWSServiceCatalogPortfolioBasic(t *testing.T) { + name := acctest.RandString(5) + var dpo servicecatalog.DescribePortfolioOutput + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckServiceCatlaogPortfolioDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckAwsServiceCatalogPortfolioResourceConfigBasic1(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckPortfolio("aws_servicecatalog_portfolio.test", &dpo), + resource.TestCheckResourceAttrSet("aws_servicecatalog_portfolio.test", "arn"), + resource.TestCheckResourceAttrSet("aws_servicecatalog_portfolio.test", "created_time"), + resource.TestCheckResourceAttr("aws_servicecatalog_portfolio.test", "name", name), + resource.TestCheckResourceAttr("aws_servicecatalog_portfolio.test", "description", "test-2"), + resource.TestCheckResourceAttr("aws_servicecatalog_portfolio.test", "provider_name", "test-3"), + resource.TestCheckResourceAttr("aws_servicecatalog_portfolio.test", "tags.%", "1"), + resource.TestCheckResourceAttr("aws_servicecatalog_portfolio.test", "tags.Key1", "Value One"), + ), + }, + resource.TestStep{ + Config: testAccCheckAwsServiceCatalogPortfolioResourceConfigBasic2(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckPortfolio("aws_servicecatalog_portfolio.test", &dpo), + resource.TestCheckResourceAttrSet("aws_servicecatalog_portfolio.test", "arn"), + resource.TestCheckResourceAttrSet("aws_servicecatalog_portfolio.test", "created_time"), + resource.TestCheckResourceAttr("aws_servicecatalog_portfolio.test", "name", name), + resource.TestCheckResourceAttr("aws_servicecatalog_portfolio.test", "description", "test-b"), + resource.TestCheckResourceAttr("aws_servicecatalog_portfolio.test", "provider_name", "test-c"), + resource.TestCheckResourceAttr("aws_servicecatalog_portfolio.test", "tags.%", "2"), + resource.TestCheckResourceAttr("aws_servicecatalog_portfolio.test", "tags.Key1", "Value 1"), + resource.TestCheckResourceAttr("aws_servicecatalog_portfolio.test", "tags.Key2", "Value Two"), + ), + }, + resource.TestStep{ + Config: testAccCheckAwsServiceCatalogPortfolioResourceConfigBasic3(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckPortfolio("aws_servicecatalog_portfolio.test", &dpo), + resource.TestCheckResourceAttrSet("aws_servicecatalog_portfolio.test", "arn"), + resource.TestCheckResourceAttrSet("aws_servicecatalog_portfolio.test", "created_time"), + resource.TestCheckResourceAttr("aws_servicecatalog_portfolio.test", "name", name), + resource.TestCheckResourceAttr("aws_servicecatalog_portfolio.test", "description", "test-only-change-me"), + resource.TestCheckResourceAttr("aws_servicecatalog_portfolio.test", "provider_name", "test-c"), + resource.TestCheckResourceAttr("aws_servicecatalog_portfolio.test", "tags.%", "1"), + resource.TestCheckResourceAttr("aws_servicecatalog_portfolio.test", "tags.Key3", "Value Three"), + ), + }, + }, + }) +} + +func TestAccAWSServiceCatalogPortfolioDisappears(t *testing.T) { + name := acctest.RandString(5) + var dpo servicecatalog.DescribePortfolioOutput + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckServiceCatlaogPortfolioDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckAwsServiceCatalogPortfolioResourceConfigBasic1(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckPortfolio("aws_servicecatalog_portfolio.test", &dpo), + testAccCheckServiceCatlaogPortfolioDisappears(&dpo), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccAWSServiceCatalogPortfolioImport(t *testing.T) { + resourceName := "aws_servicecatalog_portfolio.test" + + name := acctest.RandString(5) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckServiceCatlaogPortfolioDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckAwsServiceCatalogPortfolioResourceConfigBasic1(name), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckPortfolio(pr string, dpo *servicecatalog.DescribePortfolioOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).scconn + rs, ok := s.RootModule().Resources[pr] + if !ok { + return fmt.Errorf("Not found: %s", pr) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + input := servicecatalog.DescribePortfolioInput{} + input.Id = aws.String(rs.Primary.ID) + + resp, err := conn.DescribePortfolio(&input) + if err != nil { + return err + } + + *dpo = *resp + return nil + } +} + +func testAccCheckServiceCatlaogPortfolioDisappears(dpo *servicecatalog.DescribePortfolioOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).scconn + + input := servicecatalog.DeletePortfolioInput{} + input.Id = dpo.PortfolioDetail.Id + + _, err := conn.DeletePortfolio(&input) + if err != nil { + return err + } + + return nil + } +} + +func testAccCheckServiceCatlaogPortfolioDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).scconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_servicecatalog_portfolio" { + continue + } + input := servicecatalog.DescribePortfolioInput{} + input.Id = aws.String(rs.Primary.ID) + + _, err := conn.DescribePortfolio(&input) + if err == nil { + return fmt.Errorf("Portfolio still exists") + } + } + + return nil +} + +func testAccCheckAwsServiceCatalogPortfolioResourceConfigBasic1(name string) string { + return fmt.Sprintf(` +resource "aws_servicecatalog_portfolio" "test" { + name = "%s" + description = "test-2" + provider_name = "test-3" + tags { + Key1 = "Value One" + } +} +`, name) +} + +func testAccCheckAwsServiceCatalogPortfolioResourceConfigBasic2(name string) string { + return fmt.Sprintf(` +resource "aws_servicecatalog_portfolio" "test" { + name = "%s" + description = "test-b" + provider_name = "test-c" + tags { + Key1 = "Value 1" + Key2 = "Value Two" + } +} +`, name) +} + +func testAccCheckAwsServiceCatalogPortfolioResourceConfigBasic3(name string) string { + return fmt.Sprintf(` +resource "aws_servicecatalog_portfolio" "test" { + name = "%s" + description = "test-only-change-me" + provider_name = "test-c" + tags { + Key3 = "Value Three" + } +} +`, name) +} diff --git a/aws/resource_aws_ses_domain_dkim.go b/aws/resource_aws_ses_domain_dkim.go new file mode 100644 index 00000000000..abde46bf9bb --- /dev/null +++ b/aws/resource_aws_ses_domain_dkim.go @@ -0,0 +1,87 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ses" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSesDomainDkim() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSesDomainDkimCreate, + Read: resourceAwsSesDomainDkimRead, + Delete: resourceAwsSesDomainDkimDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "domain": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "dkim_tokens": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceAwsSesDomainDkimCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + domainName := d.Get("domain").(string) + + createOpts := &ses.VerifyDomainDkimInput{ + Domain: aws.String(domainName), + } + + _, err := conn.VerifyDomainDkim(createOpts) + if err != nil { + return fmt.Errorf("Error requesting SES domain identity verification: %s", err) + } + + d.SetId(domainName) + + return resourceAwsSesDomainDkimRead(d, meta) +} + +func resourceAwsSesDomainDkimRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + domainName := d.Id() + d.Set("domain", domainName) + + readOpts := &ses.GetIdentityDkimAttributesInput{ + Identities: []*string{ + aws.String(domainName), + }, + } + + response, err := conn.GetIdentityDkimAttributes(readOpts) + if err != nil { + log.Printf("[WARN] Error fetching identity verification attributes for %s: %s", d.Id(), err) + return err + } + + verificationAttrs, ok := response.DkimAttributes[domainName] + if !ok { + log.Printf("[WARN] Domain not listed in response when fetching verification attributes for %s", d.Id()) + d.SetId("") + return nil + } + + d.Set("dkim_tokens", aws.StringValueSlice(verificationAttrs.DkimTokens)) + return nil +} + +func resourceAwsSesDomainDkimDelete(d *schema.ResourceData, meta interface{}) error { + + return nil +} diff --git a/aws/resource_aws_ses_domain_dkim_test.go b/aws/resource_aws_ses_domain_dkim_test.go new file mode 100644 index 00000000000..184bb7a8244 --- /dev/null +++ b/aws/resource_aws_ses_domain_dkim_test.go @@ -0,0 +1,101 @@ +package aws + +import ( + "fmt" + "regexp" + "strconv" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ses" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAwsSESDomainDkim_basic(t *testing.T) { + domain := fmt.Sprintf( + "%s.terraformtesting.com", + acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: fmt.Sprintf(testAccAwsSESDomainDkimConfig, domain), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsSESDomainDkimExists("aws_ses_domain_dkim.test"), + testAccCheckAwsSESDomainDkimTokens("aws_ses_domain_dkim.test", domain), + ), + }, + }, + }) +} + +func testAccCheckAwsSESDomainDkimExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("SES Domain Identity not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("SES Domain Identity name not set") + } + + domain := rs.Primary.ID + conn := testAccProvider.Meta().(*AWSClient).sesConn + + params := &ses.GetIdentityDkimAttributesInput{ + Identities: []*string{ + aws.String(domain), + }, + } + + response, err := conn.GetIdentityDkimAttributes(params) + if err != nil { + return err + } + + if response.DkimAttributes[domain] == nil { + return fmt.Errorf("SES Domain DKIM %s not found in AWS", domain) + } + + return nil + } +} + +func testAccCheckAwsSESDomainDkimTokens(n string, domain string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, _ := s.RootModule().Resources[n] + + expectedNum := 3 + expectedFormat := regexp.MustCompile("[a-z0-9]{32}") + + tokenNum, _ := strconv.Atoi(rs.Primary.Attributes["dkim_tokens.#"]) + if expectedNum != tokenNum { + return fmt.Errorf("Incorrect number of DKIM tokens, expected: %d, got: %d", expectedNum, tokenNum) + } + for i := 0; i < expectedNum; i++ { + key := fmt.Sprintf("dkim_tokens.%d", i) + token, _ := rs.Primary.Attributes[key] + if !expectedFormat.MatchString(token) { + return fmt.Errorf("Incorrect format of DKIM token: %v", token) + } + } + + return nil + } +} + +const testAccAwsSESDomainDkimConfig = ` +resource "aws_ses_domain_identity" "test" { + domain = "%s" +} +resource "aws_ses_domain_dkim" "test" { + domain = "${aws_ses_domain_identity.test.domain}" +} +` diff --git a/aws/resource_aws_snapshot_create_volume_permission.go b/aws/resource_aws_snapshot_create_volume_permission.go index 6a7fd40a183..58b7090cbdb 100644 --- a/aws/resource_aws_snapshot_create_volume_permission.go +++ b/aws/resource_aws_snapshot_create_volume_permission.go @@ -66,7 +66,7 @@ func resourceAwsSnapshotCreateVolumePermissionCreate(d *schema.ResourceData, met Pending: []string{"denied"}, Target: []string{"granted"}, Refresh: resourceAwsSnapshotCreateVolumePermissionStateRefreshFunc(conn, snapshot_id, account_id), - Timeout: 5 * time.Minute, + Timeout: 20 * time.Minute, Delay: 10 * time.Second, MinTimeout: 10 * time.Second, } diff --git a/aws/resource_aws_spot_fleet_request.go b/aws/resource_aws_spot_fleet_request.go index cfb62b19367..0900eb47974 100644 --- a/aws/resource_aws_spot_fleet_request.go +++ b/aws/resource_aws_spot_fleet_request.go @@ -256,6 +256,11 @@ func resourceAwsSpotFleetRequest() *schema.Resource { Computed: true, ForceNew: true, }, + "tags": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, }, }, Set: hashLaunchSpecification, @@ -377,6 +382,21 @@ func buildSpotFleetLaunchSpecification(d map[string]interface{}, meta interface{ } } + if m, ok := d["tags"].(map[string]interface{}); ok && len(m) > 0 { + tagsSpec := make([]*ec2.SpotFleetTagSpecification, 0) + + tags := tagsFromMap(m) + + spec := &ec2.SpotFleetTagSpecification{ + ResourceType: aws.String("instance"), + Tags: tags, + } + + tagsSpec = append(tagsSpec, spec) + + opts.TagSpecifications = tagsSpec + } + subnetId, hasSubnetId := d["subnet_id"] if hasSubnetId { opts.SubnetId = aws.String(subnetId.(string)) @@ -880,6 +900,15 @@ func launchSpecToMap(l *ec2.SpotFleetLaunchSpecification, rootDevName *string) m m["weighted_capacity"] = strconv.FormatFloat(*l.WeightedCapacity, 'f', 0, 64) } + if l.TagSpecifications != nil { + for _, tagSpecs := range l.TagSpecifications { + // only "instance" tags are currently supported: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SpotFleetTagSpecification.html + if *(tagSpecs.ResourceType) == "instance" { + m["tags"] = tagsToMap(tagSpecs.Tags) + } + } + } + return m } diff --git a/aws/resource_aws_spot_fleet_request_test.go b/aws/resource_aws_spot_fleet_request_test.go index 9a566a416f8..5ed4e9b42cc 100644 --- a/aws/resource_aws_spot_fleet_request_test.go +++ b/aws/resource_aws_spot_fleet_request_test.go @@ -377,6 +377,29 @@ func TestAccAWSSpotFleetRequest_withEBSDisk(t *testing.T) { }) } +func TestAccAWSSpotFleetRequest_withTags(t *testing.T) { + var config ec2.SpotFleetRequestConfig + rName := acctest.RandString(10) + rInt := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSpotFleetRequestDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSpotFleetRequestTagsConfig(rName, rInt), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAWSSpotFleetRequestExists("aws_spot_fleet_request.foo", &config), + resource.TestCheckResourceAttr("aws_spot_fleet_request.foo", "launch_specification.24370212.tags.%", "2"), + resource.TestCheckResourceAttr("aws_spot_fleet_request.foo", "launch_specification.24370212.tags.First", "TfAccTest"), + resource.TestCheckResourceAttr("aws_spot_fleet_request.foo", "launch_specification.24370212.tags.Second", "Terraform"), + ), + }, + }, + }) +} + func TestAccAWSSpotFleetRequest_placementTenancy(t *testing.T) { var sfr ec2.SpotFleetRequestConfig rName := acctest.RandString(10) @@ -879,7 +902,7 @@ resource "aws_subnet" "bar" { resource "aws_spot_fleet_request" "foo" { iam_fleet_role = "${aws_iam_role.test-role.arn}" - spot_price = "0.025" + spot_price = "0.0265" target_capacity = 4 valid_until = "2019-11-04T20:44:20Z" terminate_instances_with_expiration = true @@ -1412,6 +1435,79 @@ resource "aws_spot_fleet_request" "foo" { `, rInt, rInt, rName) } +func testAccAWSSpotFleetRequestTagsConfig(rName string, rInt int) string { + return fmt.Sprintf(` +resource "aws_iam_policy" "test-policy" { + name = "test-policy-%d" + path = "/" + description = "Spot Fleet Request ACCTest Policy" + policy = < 0 { + log.Printf("[DEBUG] Removing tags: %#v", remove) + keys := make([]*string, 0, len(remove)) + for k, _ := range remove { + keys = append(keys, aws.String(k)) + } + + _, err := conn.UntagQueue(&sqs.UntagQueueInput{ + QueueUrl: aws.String(d.Id()), + TagKeys: keys, + }) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %#v", create) + + _, err := conn.TagQueue(&sqs.TagQueueInput{ + QueueUrl: aws.String(d.Id()), + Tags: create, + }) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/aws/resource_aws_sqs_queue_test.go b/aws/resource_aws_sqs_queue_test.go index 8e6d8cddca2..281d3ec9e95 100644 --- a/aws/resource_aws_sqs_queue_test.go +++ b/aws/resource_aws_sqs_queue_test.go @@ -45,6 +45,40 @@ func TestAccAWSSQSQueue_basic(t *testing.T) { }) } +func TestAccAWSSQSQueue_tags(t *testing.T) { + queueName := fmt.Sprintf("sqs-queue-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSQSQueueDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSSQSConfigWithTags(queueName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSQSExistsWithDefaults("aws_sqs_queue.queue"), + resource.TestCheckResourceAttr("aws_sqs_queue.queue", "tags.%", "2"), + resource.TestCheckResourceAttr("aws_sqs_queue.queue", "tags.Usage", "original"), + ), + }, + { + Config: testAccAWSSQSConfigWithTagsChanged(queueName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSQSExistsWithDefaults("aws_sqs_queue.queue"), + resource.TestCheckResourceAttr("aws_sqs_queue.queue", "tags.%", "1"), + resource.TestCheckResourceAttr("aws_sqs_queue.queue", "tags.Usage", "changed"), + ), + }, + { + Config: testAccAWSSQSConfigWithDefaults(queueName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSQSExistsWithDefaults("aws_sqs_queue.queue"), + resource.TestCheckNoResourceAttr("aws_sqs_queue.queue", "tags"), + ), + }, + }, + }) +} + func TestAccAWSSQSQueue_namePrefix(t *testing.T) { prefix := "acctest-sqs-queue" resource.Test(t, resource.TestCase{ @@ -407,7 +441,7 @@ func TestAccAWSSQSQueue_Encryption(t *testing.T) { func testAccAWSSQSConfigWithDefaults(r string) string { return fmt.Sprintf(` resource "aws_sqs_queue" "queue" { - name = "%s" + name = "%s" } `, r) } @@ -415,7 +449,7 @@ resource "aws_sqs_queue" "queue" { func testAccAWSSQSConfigWithNamePrefix(r string) string { return fmt.Sprintf(` resource "aws_sqs_queue" "queue" { - name_prefix = "%s" + name_prefix = "%s" } `, r) } @@ -423,8 +457,8 @@ resource "aws_sqs_queue" "queue" { func testAccAWSSQSFifoConfigWithDefaults(r string) string { return fmt.Sprintf(` resource "aws_sqs_queue" "queue" { - name = "%s.fifo" - fifo_queue = true + name = "%s.fifo" + fifo_queue = true } `, r) } @@ -450,8 +484,8 @@ resource "aws_sqs_queue" "my_queue" { redrive_policy = < 0 { log.Printf("[DEBUG] Removing tags: %#v", remove) - _, err := retryOnAwsCode("NoSuchBucket", func() (interface{}, error) { + _, err := retryOnAwsCodes([]string{"NoSuchBucket", "OperationAborted"}, func() (interface{}, error) { return conn.DeleteBucketTagging(&s3.DeleteBucketTaggingInput{ Bucket: aws.String(d.Get("bucket").(string)), }) @@ -40,7 +40,7 @@ func setTagsS3(conn *s3.S3, d *schema.ResourceData) error { }, } - _, err := retryOnAwsCode("NoSuchBucket", func() (interface{}, error) { + _, err := retryOnAwsCodes([]string{"NoSuchBucket", "OperationAborted"}, func() (interface{}, error) { return conn.PutBucketTagging(req) }) if err != nil { diff --git a/aws/structure.go b/aws/structure.go index 1b4745a2d42..393b26ef8b6 100644 --- a/aws/structure.go +++ b/aws/structure.go @@ -1014,6 +1014,38 @@ func expandESEBSOptions(m map[string]interface{}) *elasticsearch.EBSOptions { return &options } +func flattenESVPCDerivedInfo(o *elasticsearch.VPCDerivedInfo) []map[string]interface{} { + m := map[string]interface{}{} + + if o.AvailabilityZones != nil { + m["availability_zones"] = schema.NewSet(schema.HashString, flattenStringList(o.AvailabilityZones)) + } + if o.SecurityGroupIds != nil { + m["security_group_ids"] = schema.NewSet(schema.HashString, flattenStringList(o.SecurityGroupIds)) + } + if o.SubnetIds != nil { + m["subnet_ids"] = schema.NewSet(schema.HashString, flattenStringList(o.SubnetIds)) + } + if o.VPCId != nil { + m["vpc_id"] = *o.VPCId + } + + return []map[string]interface{}{m} +} + +func expandESVPCOptions(m map[string]interface{}) *elasticsearch.VPCOptions { + options := elasticsearch.VPCOptions{} + + if v, ok := m["security_group_ids"]; ok { + options.SecurityGroupIds = expandStringList(v.(*schema.Set).List()) + } + if v, ok := m["subnet_ids"]; ok { + options.SubnetIds = expandStringList(v.(*schema.Set).List()) + } + + return &options +} + func expandConfigRecordingGroup(configured []interface{}) *configservice.RecordingGroup { recordingGroup := configservice.RecordingGroup{} group := configured[0].(map[string]interface{}) @@ -1471,24 +1503,35 @@ func expandApiGatewayStageKeyOperations(d *schema.ResourceData) []*apigateway.Pa return operations } -func expandCloudWachLogMetricTransformations(m map[string]interface{}) []*cloudwatchlogs.MetricTransformation { +func expandCloudWachLogMetricTransformations(m map[string]interface{}) ([]*cloudwatchlogs.MetricTransformation, error) { transformation := cloudwatchlogs.MetricTransformation{ MetricName: aws.String(m["name"].(string)), MetricNamespace: aws.String(m["namespace"].(string)), MetricValue: aws.String(m["value"].(string)), } - return []*cloudwatchlogs.MetricTransformation{&transformation} + if m["default_value"] != "" { + transformation.DefaultValue = aws.Float64(m["default_value"].(float64)) + } + + return []*cloudwatchlogs.MetricTransformation{&transformation}, nil } -func flattenCloudWachLogMetricTransformations(ts []*cloudwatchlogs.MetricTransformation) map[string]string { - m := make(map[string]string, 0) +func flattenCloudWachLogMetricTransformations(ts []*cloudwatchlogs.MetricTransformation) []interface{} { + mts := make([]interface{}, 0) + m := make(map[string]interface{}, 0) m["name"] = *ts[0].MetricName m["namespace"] = *ts[0].MetricNamespace m["value"] = *ts[0].MetricValue - return m + if ts[0].DefaultValue != nil { + m["default_value"] = *ts[0].DefaultValue + } + + mts = append(mts, m) + + return mts } func flattenBeanstalkAsg(list []*elasticbeanstalk.AutoScalingGroup) []string { @@ -2186,3 +2229,114 @@ type GroupIdentifier struct { Description *string } + +func expandCognitoIdentityPoolRoles(config map[string]interface{}) map[string]*string { + m := map[string]*string{} + for k, v := range config { + s := v.(string) + m[k] = &s + } + return m +} + +func flattenCognitoIdentityPoolRoles(config map[string]*string) map[string]string { + m := map[string]string{} + for k, v := range config { + m[k] = *v + } + return m +} + +func expandCognitoIdentityPoolRoleMappingsAttachment(rms []interface{}) map[string]*cognitoidentity.RoleMapping { + values := make(map[string]*cognitoidentity.RoleMapping, 0) + + if len(rms) == 0 { + return values + } + + for _, v := range rms { + rm := v.(map[string]interface{}) + key := rm["identity_provider"].(string) + + roleMapping := &cognitoidentity.RoleMapping{ + Type: aws.String(rm["type"].(string)), + } + + if sv, ok := rm["ambiguous_role_resolution"].(string); ok { + roleMapping.AmbiguousRoleResolution = aws.String(sv) + } + + if mr, ok := rm["mapping_rule"].([]interface{}); ok && len(mr) > 0 { + rct := &cognitoidentity.RulesConfigurationType{} + mappingRules := make([]*cognitoidentity.MappingRule, 0) + + for _, r := range mr { + rule := r.(map[string]interface{}) + mr := &cognitoidentity.MappingRule{ + Claim: aws.String(rule["claim"].(string)), + MatchType: aws.String(rule["match_type"].(string)), + RoleARN: aws.String(rule["role_arn"].(string)), + Value: aws.String(rule["value"].(string)), + } + + mappingRules = append(mappingRules, mr) + } + + rct.Rules = mappingRules + roleMapping.RulesConfiguration = rct + } + + values[key] = roleMapping + } + + return values +} + +func flattenCognitoIdentityPoolRoleMappingsAttachment(rms map[string]*cognitoidentity.RoleMapping) []map[string]interface{} { + roleMappings := make([]map[string]interface{}, 0) + + if rms == nil { + return roleMappings + } + + for k, v := range rms { + m := make(map[string]interface{}) + + if v == nil { + return nil + } + + if v.Type != nil { + m["type"] = *v.Type + } + + if v.AmbiguousRoleResolution != nil { + m["ambiguous_role_resolution"] = *v.AmbiguousRoleResolution + } + + if v.RulesConfiguration != nil && v.RulesConfiguration.Rules != nil { + m["mapping_rule"] = flattenCognitoIdentityPoolRolesAttachmentMappingRules(v.RulesConfiguration.Rules) + } + + m["identity_provider"] = k + roleMappings = append(roleMappings, m) + } + + return roleMappings +} + +func flattenCognitoIdentityPoolRolesAttachmentMappingRules(d []*cognitoidentity.MappingRule) []interface{} { + rules := make([]interface{}, 0) + + for _, rule := range d { + r := make(map[string]interface{}) + r["claim"] = *rule.Claim + r["match_type"] = *rule.MatchType + r["role_arn"] = *rule.RoleARN + r["value"] = *rule.Value + + rules = append(rules, r) + } + + return rules +} diff --git a/aws/validators.go b/aws/validators.go index 426f560430c..44acd2cf1a6 100644 --- a/aws/validators.go +++ b/aws/validators.go @@ -9,6 +9,7 @@ import ( "time" "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/aws/aws-sdk-go/service/cognitoidentity" "github.com/aws/aws-sdk-go/service/s3" "github.com/hashicorp/terraform/helper/schema" ) @@ -761,7 +762,7 @@ func validateSQSFifoQueueName(v interface{}, k string) (errors []error) { func validateSNSSubscriptionProtocol(v interface{}, k string) (ws []string, errors []error) { value := strings.ToLower(v.(string)) - forbidden := []string{"email", "sms"} + forbidden := []string{"email"} for _, f := range forbidden { if strings.Contains(value, f) { errors = append( @@ -1511,3 +1512,142 @@ func validateSecurityGroupRuleDescription(v interface{}, k string) (ws []string, } return } + +func validateServiceCatalogPortfolioName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if (len(value) > 20) || (len(value) == 0) { + errors = append(errors, fmt.Errorf("Service catalog name must be between 1 and 20 characters.")) + } + return +} + +func validateServiceCatalogPortfolioDescription(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 2000 { + errors = append(errors, fmt.Errorf("Service catalog description must be less than 2000 characters.")) + } + return +} + +func validateServiceCatalogPortfolioProviderName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if (len(value) > 20) || (len(value) == 0) { + errors = append(errors, fmt.Errorf("Service catalog provider name must be between 1 and 20 characters.")) + } + return +} + +func validateCognitoRoleMappingsAmbiguousRoleResolutionAgainstType(v map[string]interface{}) (errors []error) { + t := v["type"].(string) + isRequired := t == cognitoidentity.RoleMappingTypeToken || t == cognitoidentity.RoleMappingTypeRules + + if value, ok := v["ambiguous_role_resolution"]; (!ok || value == "") && isRequired { + errors = append(errors, fmt.Errorf("Ambiguous Role Resolution must be defined when \"type\" equals \"Token\" or \"Rules\"")) + } + + return +} + +func validateCognitoRoleMappingsRulesConfiguration(v map[string]interface{}) (errors []error) { + t := v["type"].(string) + value, ok := v["mapping_rule"] + valLength := len(value.([]interface{})) + + if (!ok || valLength == 0) && t == cognitoidentity.RoleMappingTypeRules { + errors = append(errors, fmt.Errorf("mapping_rule is required for Rules")) + } + + if (ok || valLength > 0) && t == cognitoidentity.RoleMappingTypeToken { + errors = append(errors, fmt.Errorf("mapping_rule must not be set for Token based role mapping")) + } + + return +} + +func validateCognitoRoleMappingsAmbiguousRoleResolution(v interface{}, k string) (ws []string, errors []error) { + validValues := []string{ + cognitoidentity.AmbiguousRoleResolutionTypeAuthenticatedRole, + cognitoidentity.AmbiguousRoleResolutionTypeDeny, + } + value := v.(string) + for _, s := range validValues { + if value == s { + return + } + } + errors = append(errors, fmt.Errorf( + "%q contains an invalid value %q. Valid values are %q.", + k, value, validValues)) + return +} + +func validateCognitoRoleMappingsRulesClaim(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if !regexp.MustCompile("^[\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}]+$").MatchString(value) { + errors = append(errors, fmt.Errorf("%q must contain only alphanumeric caracters, dots, underscores, colons, slashes and hyphens", k)) + } + + return +} + +func validateCognitoRoleMappingsRulesMatchType(v interface{}, k string) (ws []string, errors []error) { + validValues := []string{ + cognitoidentity.MappingRuleMatchTypeEquals, + cognitoidentity.MappingRuleMatchTypeContains, + cognitoidentity.MappingRuleMatchTypeStartsWith, + cognitoidentity.MappingRuleMatchTypeNotEqual, + } + value := v.(string) + for _, s := range validValues { + if value == s { + return + } + } + errors = append(errors, fmt.Errorf( + "%q contains an invalid value %q. Valid values are %q.", + k, value, validValues)) + return +} + +func validateCognitoRoleMappingsRulesValue(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) < 1 { + errors = append(errors, fmt.Errorf("%q cannot be less than 1 caracter", k)) + } + + if len(value) > 128 { + errors = append(errors, fmt.Errorf("%q cannot be longer than 1 caracters", k)) + } + + return +} + +func validateCognitoRoleMappingsType(v interface{}, k string) (ws []string, errors []error) { + validValues := []string{ + cognitoidentity.RoleMappingTypeToken, + cognitoidentity.RoleMappingTypeRules, + } + value := v.(string) + for _, s := range validValues { + if value == s { + return + } + } + errors = append(errors, fmt.Errorf( + "%q contains an invalid value %q. Valid values are %q.", + k, value, validValues)) + return +} + +// Validates that either authenticated or unauthenticated is defined +func validateCognitoRoles(v map[string]interface{}, k string) (errors []error) { + _, hasAuthenticated := v["authenticated"].(string) + _, hasUnauthenticated := v["authenticated"].(string) + + if !hasAuthenticated && !hasUnauthenticated { + errors = append(errors, fmt.Errorf("%q: Either \"authenticated\" or \"unauthenticated\" must be defined", k)) + } + + return +} diff --git a/aws/validators_test.go b/aws/validators_test.go index 3b3b6e93314..2e9dae246b9 100644 --- a/aws/validators_test.go +++ b/aws/validators_test.go @@ -5,6 +5,7 @@ import ( "strings" "testing" + "github.com/aws/aws-sdk-go/service/cognitoidentity" "github.com/aws/aws-sdk-go/service/s3" ) @@ -1104,6 +1105,7 @@ func TestValidateSNSSubscriptionProtocol(t *testing.T) { "application", "http", "https", + "sms", } for _, v := range validProtocols { if _, errors := validateSNSSubscriptionProtocol(v, "protocol"); len(errors) > 0 { @@ -1116,8 +1118,6 @@ func TestValidateSNSSubscriptionProtocol(t *testing.T) { "email", "Email-JSON", "email-json", - "SMS", - "sms", } for _, v := range invalidProtocols { if _, errors := validateSNSSubscriptionProtocol(v, "protocol"); len(errors) == 0 { @@ -2449,6 +2449,107 @@ func TestValidateBatchName(t *testing.T) { } } +func TestValidateCognitoRoleMappingsAmbiguousRoleResolutionAgainstType(t *testing.T) { + cases := []struct { + AmbiguousRoleResolution interface{} + Type string + ErrCount int + }{ + { + AmbiguousRoleResolution: nil, + Type: cognitoidentity.RoleMappingTypeToken, + ErrCount: 1, + }, + { + AmbiguousRoleResolution: "foo", + Type: cognitoidentity.RoleMappingTypeToken, + ErrCount: 0, // 0 as it should be defined, the value isn't validated here + }, + { + AmbiguousRoleResolution: cognitoidentity.AmbiguousRoleResolutionTypeAuthenticatedRole, + Type: cognitoidentity.RoleMappingTypeToken, + ErrCount: 0, + }, + { + AmbiguousRoleResolution: cognitoidentity.AmbiguousRoleResolutionTypeDeny, + Type: cognitoidentity.RoleMappingTypeToken, + ErrCount: 0, + }, + } + + for _, tc := range cases { + m := make(map[string]interface{}) + // Reproducing the undefined ambiguous_role_resolution + if tc.AmbiguousRoleResolution != nil { + m["ambiguous_role_resolution"] = tc.AmbiguousRoleResolution + } + m["type"] = tc.Type + + errors := validateCognitoRoleMappingsAmbiguousRoleResolutionAgainstType(m) + if len(errors) != tc.ErrCount { + t.Fatalf("Cognito Role Mappings validation failed: %v, expected err count %d, got %d, for config %#v", errors, tc.ErrCount, len(errors), m) + } + } +} + +func TestValidateCognitoRoleMappingsAmbiguousRoleResolution(t *testing.T) { + validValues := []string{ + cognitoidentity.AmbiguousRoleResolutionTypeAuthenticatedRole, + cognitoidentity.AmbiguousRoleResolutionTypeDeny, + } + + for _, s := range validValues { + _, errors := validateCognitoRoleMappingsAmbiguousRoleResolution(s, "ambiguous_role_resolution") + if len(errors) > 0 { + t.Fatalf("%q should be a valid Cognito Ambiguous Role Resolution type: %v", s, errors) + } + } + + invalidValues := []string{ + "foo", + "123", + "foo-bar", + "foo_bar123", + } + + for _, s := range invalidValues { + _, errors := validateCognitoRoleMappingsAmbiguousRoleResolution(s, "ambiguous_role_resolution") + if len(errors) == 0 { + t.Fatalf("%q should not be a valid Cognito Ambiguous Role Resolution type: %v", s, errors) + } + } +} + +func TestValidateCognitoRoleMappingsRulesMatchType(t *testing.T) { + validValues := []string{ + cognitoidentity.MappingRuleMatchTypeEquals, + cognitoidentity.MappingRuleMatchTypeContains, + cognitoidentity.MappingRuleMatchTypeStartsWith, + cognitoidentity.MappingRuleMatchTypeNotEqual, + } + + for _, s := range validValues { + _, errors := validateCognitoRoleMappingsRulesMatchType(s, "match_type") + if len(errors) > 0 { + t.Fatalf("%q should be a valid Cognito Role Mappings Rules Match Type: %v", s, errors) + } + } + + invalidValues := []string{ + "foo", + "123", + "foo-bar", + "foo_bar123", + } + + for _, s := range invalidValues { + _, errors := validateCognitoRoleMappingsRulesMatchType(s, "match_type") + if len(errors) == 0 { + t.Fatalf("%q should not be a valid Cognito Role Mappings Rules Match Type: %v", s, errors) + } + } +} + func TestValidateSecurityGroupRuleDescription(t *testing.T) { validDescriptions := []string{ "testrule", @@ -2474,3 +2575,31 @@ func TestValidateSecurityGroupRuleDescription(t *testing.T) { } } } + +func TestValidateCognitoRoleMappingsType(t *testing.T) { + validValues := []string{ + cognitoidentity.RoleMappingTypeToken, + cognitoidentity.RoleMappingTypeRules, + } + + for _, s := range validValues { + _, errors := validateCognitoRoleMappingsType(s, "match_type") + if len(errors) > 0 { + t.Fatalf("%q should be a valid Cognito Role Mappings Type: %v", s, errors) + } + } + + invalidValues := []string{ + "foo", + "123", + "foo-bar", + "foo_bar123", + } + + for _, s := range invalidValues { + _, errors := validateCognitoRoleMappingsType(s, "match_type") + if len(errors) == 0 { + t.Fatalf("%q should not be a valid Cognito Role Mappings Type: %v", s, errors) + } + } +} diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE new file mode 100644 index 00000000000..a4c5efd822f --- /dev/null +++ b/vendor/cloud.google.com/go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 Google Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go new file mode 100644 index 00000000000..e708c031b95 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -0,0 +1,437 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata // import "cloud.google.com/go/compute/metadata" + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +const ( + // metadataIP is the documented metadata server IP address. + metadataIP = "169.254.169.254" + + // metadataHostEnv is the environment variable specifying the + // GCE metadata hostname. If empty, the default value of + // metadataIP ("169.254.169.254") is used instead. + // This is variable name is not defined by any spec, as far as + // I know; it was made up for the Go package. + metadataHostEnv = "GCE_METADATA_HOST" + + userAgent = "gcloud-golang/0.1" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var ( + metaClient = &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 2 * time.Second, + }, + } + subscribeClient = &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, + } +) + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func Get(suffix string) (string, error) { + val, _, err := getETag(metaClient, suffix) + return val, err +} + +// getETag returns a value from the metadata service as well as the associated +// ETag using the provided client. This func is otherwise equivalent to Get. +func getETag(client *http.Client, suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv(metadataHostEnv) + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = metadataIP + } + url := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", url, nil) + req.Header.Set("Metadata-Flavor", "Google") + req.Header.Set("User-Agent", userAgent) + res, err := client.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + if res.StatusCode != 200 { + return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + return string(all), res.Header.Get("Etag"), nil +} + +func getTrimmed(suffix string) (s string, err error) { + s, err = Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *cachedValue) get() (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = getTrimmed(c.k) + } else { + v, err = Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var ( + onGCEOnce sync.Once + onGCE bool +) + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + onGCEOnce.Do(initOnGCE) + return onGCE +} + +func initOnGCE() { + onGCE = testOnGCE() +} + +func testOnGCE() bool { + // The user explicitly said they're on GCE, so trust them. + if os.Getenv(metadataHostEnv) != "" { + return true + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resc := make(chan bool, 2) + + // Try two strategies in parallel. + // See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194 + go func() { + req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) + req.Header.Set("User-Agent", userAgent) + res, err := ctxhttp.Do(ctx, metaClient, req) + if err != nil { + resc <- false + return + } + defer res.Body.Close() + resc <- res.Header.Get("Metadata-Flavor") == "Google" + }() + + go func() { + addrs, err := net.LookupHost("metadata.google.internal") + if err != nil || len(addrs) == 0 { + resc <- false + return + } + resc <- strsContains(addrs, metadataIP) + }() + + tryHarder := systemInfoSuggestsGCE() + if tryHarder { + res := <-resc + if res { + // The first strategy succeeded, so let's use it. + return true + } + // Wait for either the DNS or metadata server probe to + // contradict the other one and say we are running on + // GCE. Give it a lot of time to do so, since the system + // info already suggests we're running on a GCE BIOS. + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case res = <-resc: + return res + case <-timer.C: + // Too slow. Who knows what this system is. + return false + } + } + + // There's no hint from the system info that we're running on + // GCE, so use the first probe's result as truth, whether it's + // true or false. The goal here is to optimize for speed for + // users who are NOT running on GCE. We can't assume that + // either a DNS lookup or an HTTP request to a blackholed IP + // address is fast. Worst case this should return when the + // metaClient's Transport.ResponseHeaderTimeout or + // Transport.Dial.Timeout fires (in two seconds). + return <-resc +} + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + if runtime.GOOS != "linux" { + // We don't have any non-Linux clues available, at least yet. + return false + } + slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(slurp)) + return name == "Google" || name == "Google Compute Engine" +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := getETag(subscribeClient, suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + if strings.ContainsRune(suffix, '?') { + suffix += "&wait_for_change=true&last_etag=" + } else { + suffix += "?wait_for_change=true&last_etag=" + } + for { + val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err + } + } +} + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return projID.get() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return projNum.get() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { + return getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { + var s []string + j, err := Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { + return instID.get() +} + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { + host, err := Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { + zone, err := getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } + +func lines(suffix string) ([]string, error) { + j, err := Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func InstanceAttributeValue(attr string) (string, error) { + return Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func ProjectAttributeValue(attr string) (string, error) { + return Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return lines("instance/service-accounts/" + serviceAccount + "/scopes") +} + +func strsContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE new file mode 100644 index 00000000000..af39a91e703 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/NOTICE b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE new file mode 100644 index 00000000000..2d1d72608c2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE @@ -0,0 +1,5 @@ +Microsoft Azure-SDK-for-Go +Copyright 2014-2017 Microsoft + +This product includes software developed at +the Microsoft Corporation (https://www.microsoft.com). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go new file mode 100755 index 00000000000..0870a03ded6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go @@ -0,0 +1,931 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// AccountsClient is the the Azure Storage Management API. +type AccountsClient struct { + ManagementClient +} + +// NewAccountsClient creates an instance of the AccountsClient client. +func NewAccountsClient(subscriptionID string) AccountsClient { + return NewAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAccountsClientWithBaseURI creates an instance of the AccountsClient client. +func NewAccountsClientWithBaseURI(baseURI string, subscriptionID string) AccountsClient { + return AccountsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckNameAvailability checks that the storage account name is valid and is not already in use. +// +// accountName is the name of the storage account within the specified resource group. Storage account names must be +// between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client AccountsClient) CheckNameAvailability(accountName AccountCheckNameAvailabilityParameters) (result CheckNameAvailabilityResult, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName.Name", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "accountName.Type", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "CheckNameAvailability") + } + + req, err := client.CheckNameAvailabilityPreparer(accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", nil, "Failure preparing request") + return + } + + resp, err := client.CheckNameAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", resp, "Failure sending request") + return + } + + result, err = client.CheckNameAvailabilityResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", resp, "Failure responding to request") + } + + return +} + +// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request. +func (client AccountsClient) CheckNameAvailabilityPreparer(accountName AccountCheckNameAvailabilityParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability", pathParameters), + autorest.WithJSON(accountName), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always +// closes the http.Response Body. +func (client AccountsClient) CheckNameAvailabilityResponder(resp *http.Response) (result CheckNameAvailabilityResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Create asynchronously creates a new storage account with the specified parameters. If an account is already created +// and a subsequent create request is issued with different properties, the account properties will be updated. If an +// account is already created and a subsequent create or update request is issued with the exact same set of +// properties, the request will succeed. This method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. +// accountName is the name of the storage account within the specified resource group. Storage account names must be +// between 3 and 24 characters in length and use numbers and lower-case letters only. parameters is the parameters to +// provide for the created account. +func (client AccountsClient) Create(resourceGroupName string, accountName string, parameters AccountCreateParameters, cancel <-chan struct{}) (<-chan Account, <-chan error) { + resultChan := make(chan Account, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Sku", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.Location", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.Identity", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Identity.Type", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.AccountPropertiesCreateParameters", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.CustomDomain", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.CustomDomain.Name", Name: validation.Null, Rule: true, Chain: nil}}}, + }}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "storage.AccountsClient", "Create") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result Account + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.CreatePreparer(resourceGroupName, accountName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", nil, "Failure preparing request") + return + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", resp, "Failure sending request") + return + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// CreatePreparer prepares the Create request. +func (client AccountsClient) CreatePreparer(resourceGroupName string, accountName string, parameters AccountCreateParameters, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) CreateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client AccountsClient) CreateResponder(resp *http.Response) (result Account, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a storage account in Microsoft Azure. +// +// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. +// accountName is the name of the storage account within the specified resource group. Storage account names must be +// between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client AccountsClient) Delete(resourceGroupName string, accountName string) (result autorest.Response, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "Delete") + } + + req, err := client.DeletePreparer(resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client AccountsClient) DeletePreparer(resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client AccountsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// GetProperties returns the properties for the specified storage account including but not limited to name, SKU name, +// location, and account status. The ListKeys operation should be used to retrieve storage keys. +// +// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. +// accountName is the name of the storage account within the specified resource group. Storage account names must be +// between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client AccountsClient) GetProperties(resourceGroupName string, accountName string) (result Account, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "GetProperties") + } + + req, err := client.GetPropertiesPreparer(resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", nil, "Failure preparing request") + return + } + + resp, err := client.GetPropertiesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", resp, "Failure sending request") + return + } + + result, err = client.GetPropertiesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", resp, "Failure responding to request") + } + + return +} + +// GetPropertiesPreparer prepares the GetProperties request. +func (client AccountsClient) GetPropertiesPreparer(resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetPropertiesSender sends the GetProperties request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) GetPropertiesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetPropertiesResponder handles the response to the GetProperties request. The method always +// closes the http.Response Body. +func (client AccountsClient) GetPropertiesResponder(resp *http.Response) (result Account, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all the storage accounts available under the subscription. Note that storage keys are not returned; use +// the ListKeys operation for this. +func (client AccountsClient) List() (result AccountListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client AccountsClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListResponder(resp *http.Response) (result AccountListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAccountSAS list SAS credentials of a storage account. +// +// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. +// accountName is the name of the storage account within the specified resource group. Storage account names must be +// between 3 and 24 characters in length and use numbers and lower-case letters only. parameters is the parameters to +// provide to list SAS credentials for the storage account. +func (client AccountsClient) ListAccountSAS(resourceGroupName string, accountName string, parameters AccountSasParameters) (result ListAccountSasResponse, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.SharedAccessExpiryTime", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "ListAccountSAS") + } + + req, err := client.ListAccountSASPreparer(resourceGroupName, accountName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListAccountSAS", nil, "Failure preparing request") + return + } + + resp, err := client.ListAccountSASSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListAccountSAS", resp, "Failure sending request") + return + } + + result, err = client.ListAccountSASResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListAccountSAS", resp, "Failure responding to request") + } + + return +} + +// ListAccountSASPreparer prepares the ListAccountSAS request. +func (client AccountsClient) ListAccountSASPreparer(resourceGroupName string, accountName string, parameters AccountSasParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListAccountSas", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAccountSASSender sends the ListAccountSAS request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListAccountSASSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAccountSASResponder handles the response to the ListAccountSAS request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListAccountSASResponder(resp *http.Response) (result ListAccountSasResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup lists all the storage accounts available under the given resource group. Note that storage keys +// are not returned; use the ListKeys operation for this. +// +// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. +func (client AccountsClient) ListByResourceGroup(resourceGroupName string) (result AccountListResult, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "ListByResourceGroup") + } + + req, err := client.ListByResourceGroupPreparer(resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client AccountsClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListByResourceGroupResponder(resp *http.Response) (result AccountListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListKeys lists the access keys for the specified storage account. +// +// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. +// accountName is the name of the storage account within the specified resource group. Storage account names must be +// between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client AccountsClient) ListKeys(resourceGroupName string, accountName string) (result AccountListKeysResult, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "ListKeys") + } + + req, err := client.ListKeysPreparer(resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", nil, "Failure preparing request") + return + } + + resp, err := client.ListKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", resp, "Failure sending request") + return + } + + result, err = client.ListKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", resp, "Failure responding to request") + } + + return +} + +// ListKeysPreparer prepares the ListKeys request. +func (client AccountsClient) ListKeysPreparer(resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListKeysSender sends the ListKeys request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListKeysSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListKeysResponder handles the response to the ListKeys request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListKeysResponder(resp *http.Response) (result AccountListKeysResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListServiceSAS list service SAS credentials of a specific resource. +// +// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. +// accountName is the name of the storage account within the specified resource group. Storage account names must be +// between 3 and 24 characters in length and use numbers and lower-case letters only. parameters is the parameters to +// provide to list service SAS credentials. +func (client AccountsClient) ListServiceSAS(resourceGroupName string, accountName string, parameters ServiceSasParameters) (result ListServiceSasResponse, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.CanonicalizedResource", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.Identifier", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Identifier", Name: validation.MaxLength, Rule: 64, Chain: nil}}}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "ListServiceSAS") + } + + req, err := client.ListServiceSASPreparer(resourceGroupName, accountName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListServiceSAS", nil, "Failure preparing request") + return + } + + resp, err := client.ListServiceSASSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListServiceSAS", resp, "Failure sending request") + return + } + + result, err = client.ListServiceSASResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListServiceSAS", resp, "Failure responding to request") + } + + return +} + +// ListServiceSASPreparer prepares the ListServiceSAS request. +func (client AccountsClient) ListServiceSASPreparer(resourceGroupName string, accountName string, parameters ServiceSasParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListServiceSas", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListServiceSASSender sends the ListServiceSAS request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListServiceSASSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListServiceSASResponder handles the response to the ListServiceSAS request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListServiceSASResponder(resp *http.Response) (result ListServiceSasResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RegenerateKey regenerates one of the access keys for the specified storage account. +// +// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. +// accountName is the name of the storage account within the specified resource group. Storage account names must be +// between 3 and 24 characters in length and use numbers and lower-case letters only. regenerateKey is specifies name +// of the key which should be regenerated -- key1 or key2. +func (client AccountsClient) RegenerateKey(resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (result AccountListKeysResult, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: regenerateKey, + Constraints: []validation.Constraint{{Target: "regenerateKey.KeyName", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "RegenerateKey") + } + + req, err := client.RegenerateKeyPreparer(resourceGroupName, accountName, regenerateKey) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", nil, "Failure preparing request") + return + } + + resp, err := client.RegenerateKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", resp, "Failure sending request") + return + } + + result, err = client.RegenerateKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", resp, "Failure responding to request") + } + + return +} + +// RegenerateKeyPreparer prepares the RegenerateKey request. +func (client AccountsClient) RegenerateKeyPreparer(resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey", pathParameters), + autorest.WithJSON(regenerateKey), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// RegenerateKeySender sends the RegenerateKey request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) RegenerateKeySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// RegenerateKeyResponder handles the response to the RegenerateKey request. The method always +// closes the http.Response Body. +func (client AccountsClient) RegenerateKeyResponder(resp *http.Response) (result AccountListKeysResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update the update operation can be used to update the SKU, encryption, access tier, or tags for a storage account. +// It can also be used to map the account to a custom domain. Only one custom domain is supported per storage account; +// the replacement/change of custom domain is not supported. In order to replace an old custom domain, the old value +// must be cleared/unregistered before a new value can be set. The update of multiple properties is supported. This +// call does not change the storage keys for the account. If you want to change the storage account keys, use the +// regenerate keys operation. The location and name of the storage account cannot be changed after creation. +// +// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. +// accountName is the name of the storage account within the specified resource group. Storage account names must be +// between 3 and 24 characters in length and use numbers and lower-case letters only. parameters is the parameters to +// provide for the updated account. +func (client AccountsClient) Update(resourceGroupName string, accountName string, parameters AccountUpdateParameters) (result Account, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "Update") + } + + req, err := client.UpdatePreparer(resourceGroupName, accountName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client AccountsClient) UpdatePreparer(resourceGroupName string, accountName string, parameters AccountUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client AccountsClient) UpdateResponder(resp *http.Response) (result Account, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go new file mode 100755 index 00000000000..133386ddbeb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go @@ -0,0 +1,51 @@ +// Package storage implements the Azure ARM Storage service API version 2017-06-01. +// +// The Azure Storage Management API. +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // DefaultBaseURI is the default URI used for the service Storage + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Storage. +type ManagementClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/models.go new file mode 100755 index 00000000000..37c1679ee84 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/models.go @@ -0,0 +1,602 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" +) + +// AccessTier enumerates the values for access tier. +type AccessTier string + +const ( + // Cool specifies the cool state for access tier. + Cool AccessTier = "Cool" + // Hot specifies the hot state for access tier. + Hot AccessTier = "Hot" +) + +// AccountStatus enumerates the values for account status. +type AccountStatus string + +const ( + // Available specifies the available state for account status. + Available AccountStatus = "available" + // Unavailable specifies the unavailable state for account status. + Unavailable AccountStatus = "unavailable" +) + +// Action enumerates the values for action. +type Action string + +const ( + // Allow specifies the allow state for action. + Allow Action = "Allow" +) + +// Bypass enumerates the values for bypass. +type Bypass string + +const ( + // AzureServices specifies the azure services state for bypass. + AzureServices Bypass = "AzureServices" + // Logging specifies the logging state for bypass. + Logging Bypass = "Logging" + // Metrics specifies the metrics state for bypass. + Metrics Bypass = "Metrics" + // None specifies the none state for bypass. + None Bypass = "None" +) + +// DefaultAction enumerates the values for default action. +type DefaultAction string + +const ( + // DefaultActionAllow specifies the default action allow state for default action. + DefaultActionAllow DefaultAction = "Allow" + // DefaultActionDeny specifies the default action deny state for default action. + DefaultActionDeny DefaultAction = "Deny" +) + +// HTTPProtocol enumerates the values for http protocol. +type HTTPProtocol string + +const ( + // HTTPS specifies the https state for http protocol. + HTTPS HTTPProtocol = "https" + // Httpshttp specifies the httpshttp state for http protocol. + Httpshttp HTTPProtocol = "https,http" +) + +// KeyPermission enumerates the values for key permission. +type KeyPermission string + +const ( + // Full specifies the full state for key permission. + Full KeyPermission = "Full" + // Read specifies the read state for key permission. + Read KeyPermission = "Read" +) + +// KeySource enumerates the values for key source. +type KeySource string + +const ( + // MicrosoftKeyvault specifies the microsoft keyvault state for key source. + MicrosoftKeyvault KeySource = "Microsoft.Keyvault" + // MicrosoftStorage specifies the microsoft storage state for key source. + MicrosoftStorage KeySource = "Microsoft.Storage" +) + +// Kind enumerates the values for kind. +type Kind string + +const ( + // BlobStorage specifies the blob storage state for kind. + BlobStorage Kind = "BlobStorage" + // Storage specifies the storage state for kind. + Storage Kind = "Storage" +) + +// Permissions enumerates the values for permissions. +type Permissions string + +const ( + // A specifies the a state for permissions. + A Permissions = "a" + // C specifies the c state for permissions. + C Permissions = "c" + // D specifies the d state for permissions. + D Permissions = "d" + // L specifies the l state for permissions. + L Permissions = "l" + // P specifies the p state for permissions. + P Permissions = "p" + // R specifies the r state for permissions. + R Permissions = "r" + // U specifies the u state for permissions. + U Permissions = "u" + // W specifies the w state for permissions. + W Permissions = "w" +) + +// ProvisioningState enumerates the values for provisioning state. +type ProvisioningState string + +const ( + // Creating specifies the creating state for provisioning state. + Creating ProvisioningState = "Creating" + // ResolvingDNS specifies the resolving dns state for provisioning state. + ResolvingDNS ProvisioningState = "ResolvingDNS" + // Succeeded specifies the succeeded state for provisioning state. + Succeeded ProvisioningState = "Succeeded" +) + +// Reason enumerates the values for reason. +type Reason string + +const ( + // AccountNameInvalid specifies the account name invalid state for reason. + AccountNameInvalid Reason = "AccountNameInvalid" + // AlreadyExists specifies the already exists state for reason. + AlreadyExists Reason = "AlreadyExists" +) + +// ReasonCode enumerates the values for reason code. +type ReasonCode string + +const ( + // NotAvailableForSubscription specifies the not available for subscription state for reason code. + NotAvailableForSubscription ReasonCode = "NotAvailableForSubscription" + // QuotaID specifies the quota id state for reason code. + QuotaID ReasonCode = "QuotaId" +) + +// Services enumerates the values for services. +type Services string + +const ( + // B specifies the b state for services. + B Services = "b" + // F specifies the f state for services. + F Services = "f" + // Q specifies the q state for services. + Q Services = "q" + // T specifies the t state for services. + T Services = "t" +) + +// SignedResource enumerates the values for signed resource. +type SignedResource string + +const ( + // SignedResourceB specifies the signed resource b state for signed resource. + SignedResourceB SignedResource = "b" + // SignedResourceC specifies the signed resource c state for signed resource. + SignedResourceC SignedResource = "c" + // SignedResourceF specifies the signed resource f state for signed resource. + SignedResourceF SignedResource = "f" + // SignedResourceS specifies the signed resource s state for signed resource. + SignedResourceS SignedResource = "s" +) + +// SignedResourceTypes enumerates the values for signed resource types. +type SignedResourceTypes string + +const ( + // SignedResourceTypesC specifies the signed resource types c state for signed resource types. + SignedResourceTypesC SignedResourceTypes = "c" + // SignedResourceTypesO specifies the signed resource types o state for signed resource types. + SignedResourceTypesO SignedResourceTypes = "o" + // SignedResourceTypesS specifies the signed resource types s state for signed resource types. + SignedResourceTypesS SignedResourceTypes = "s" +) + +// SkuName enumerates the values for sku name. +type SkuName string + +const ( + // PremiumLRS specifies the premium lrs state for sku name. + PremiumLRS SkuName = "Premium_LRS" + // StandardGRS specifies the standard grs state for sku name. + StandardGRS SkuName = "Standard_GRS" + // StandardLRS specifies the standard lrs state for sku name. + StandardLRS SkuName = "Standard_LRS" + // StandardRAGRS specifies the standard ragrs state for sku name. + StandardRAGRS SkuName = "Standard_RAGRS" + // StandardZRS specifies the standard zrs state for sku name. + StandardZRS SkuName = "Standard_ZRS" +) + +// SkuTier enumerates the values for sku tier. +type SkuTier string + +const ( + // Premium specifies the premium state for sku tier. + Premium SkuTier = "Premium" + // Standard specifies the standard state for sku tier. + Standard SkuTier = "Standard" +) + +// State enumerates the values for state. +type State string + +const ( + // StateDeprovisioning specifies the state deprovisioning state for state. + StateDeprovisioning State = "deprovisioning" + // StateFailed specifies the state failed state for state. + StateFailed State = "failed" + // StateNetworkSourceDeleted specifies the state network source deleted state for state. + StateNetworkSourceDeleted State = "networkSourceDeleted" + // StateProvisioning specifies the state provisioning state for state. + StateProvisioning State = "provisioning" + // StateSucceeded specifies the state succeeded state for state. + StateSucceeded State = "succeeded" +) + +// UsageUnit enumerates the values for usage unit. +type UsageUnit string + +const ( + // Bytes specifies the bytes state for usage unit. + Bytes UsageUnit = "Bytes" + // BytesPerSecond specifies the bytes per second state for usage unit. + BytesPerSecond UsageUnit = "BytesPerSecond" + // Count specifies the count state for usage unit. + Count UsageUnit = "Count" + // CountsPerSecond specifies the counts per second state for usage unit. + CountsPerSecond UsageUnit = "CountsPerSecond" + // Percent specifies the percent state for usage unit. + Percent UsageUnit = "Percent" + // Seconds specifies the seconds state for usage unit. + Seconds UsageUnit = "Seconds" +) + +// Account is the storage account. +type Account struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Sku *Sku `json:"sku,omitempty"` + Kind Kind `json:"kind,omitempty"` + Identity *Identity `json:"identity,omitempty"` + *AccountProperties `json:"properties,omitempty"` +} + +// AccountCheckNameAvailabilityParameters is the parameters used to check the availabity of the storage account name. +type AccountCheckNameAvailabilityParameters struct { + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` +} + +// AccountCreateParameters is the parameters used when creating a storage account. +type AccountCreateParameters struct { + Sku *Sku `json:"sku,omitempty"` + Kind Kind `json:"kind,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Identity *Identity `json:"identity,omitempty"` + *AccountPropertiesCreateParameters `json:"properties,omitempty"` +} + +// AccountKey is an access key for the storage account. +type AccountKey struct { + KeyName *string `json:"keyName,omitempty"` + Value *string `json:"value,omitempty"` + Permissions KeyPermission `json:"permissions,omitempty"` +} + +// AccountListKeysResult is the response from the ListKeys operation. +type AccountListKeysResult struct { + autorest.Response `json:"-"` + Keys *[]AccountKey `json:"keys,omitempty"` +} + +// AccountListResult is the response from the List Storage Accounts operation. +type AccountListResult struct { + autorest.Response `json:"-"` + Value *[]Account `json:"value,omitempty"` +} + +// AccountProperties is properties of the storage account. +type AccountProperties struct { + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + PrimaryEndpoints *Endpoints `json:"primaryEndpoints,omitempty"` + PrimaryLocation *string `json:"primaryLocation,omitempty"` + StatusOfPrimary AccountStatus `json:"statusOfPrimary,omitempty"` + LastGeoFailoverTime *date.Time `json:"lastGeoFailoverTime,omitempty"` + SecondaryLocation *string `json:"secondaryLocation,omitempty"` + StatusOfSecondary AccountStatus `json:"statusOfSecondary,omitempty"` + CreationTime *date.Time `json:"creationTime,omitempty"` + CustomDomain *CustomDomain `json:"customDomain,omitempty"` + SecondaryEndpoints *Endpoints `json:"secondaryEndpoints,omitempty"` + Encryption *Encryption `json:"encryption,omitempty"` + AccessTier AccessTier `json:"accessTier,omitempty"` + EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` + NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"` +} + +// AccountPropertiesCreateParameters is the parameters used to create the storage account. +type AccountPropertiesCreateParameters struct { + CustomDomain *CustomDomain `json:"customDomain,omitempty"` + Encryption *Encryption `json:"encryption,omitempty"` + NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"` + AccessTier AccessTier `json:"accessTier,omitempty"` + EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` +} + +// AccountPropertiesUpdateParameters is the parameters used when updating a storage account. +type AccountPropertiesUpdateParameters struct { + CustomDomain *CustomDomain `json:"customDomain,omitempty"` + Encryption *Encryption `json:"encryption,omitempty"` + AccessTier AccessTier `json:"accessTier,omitempty"` + EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` + NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"` +} + +// AccountRegenerateKeyParameters is the parameters used to regenerate the storage account key. +type AccountRegenerateKeyParameters struct { + KeyName *string `json:"keyName,omitempty"` +} + +// AccountSasParameters is the parameters to list SAS credentials of a storage account. +type AccountSasParameters struct { + Services Services `json:"signedServices,omitempty"` + ResourceTypes SignedResourceTypes `json:"signedResourceTypes,omitempty"` + Permissions Permissions `json:"signedPermission,omitempty"` + IPAddressOrRange *string `json:"signedIp,omitempty"` + Protocols HTTPProtocol `json:"signedProtocol,omitempty"` + SharedAccessStartTime *date.Time `json:"signedStart,omitempty"` + SharedAccessExpiryTime *date.Time `json:"signedExpiry,omitempty"` + KeyToSign *string `json:"keyToSign,omitempty"` +} + +// AccountUpdateParameters is the parameters that can be provided when updating the storage account properties. +type AccountUpdateParameters struct { + Sku *Sku `json:"sku,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Identity *Identity `json:"identity,omitempty"` + *AccountPropertiesUpdateParameters `json:"properties,omitempty"` +} + +// CheckNameAvailabilityResult is the CheckNameAvailability operation response. +type CheckNameAvailabilityResult struct { + autorest.Response `json:"-"` + NameAvailable *bool `json:"nameAvailable,omitempty"` + Reason Reason `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// CustomDomain is the custom domain assigned to this storage account. This can be set via Update. +type CustomDomain struct { + Name *string `json:"name,omitempty"` + UseSubDomain *bool `json:"useSubDomain,omitempty"` +} + +// Dimension is dimension of blobs, possiblly be blob type or access tier. +type Dimension struct { + Name *string `json:"name,omitempty"` + DisplayName *string `json:"displayName,omitempty"` +} + +// Encryption is the encryption settings on the storage account. +type Encryption struct { + Services *EncryptionServices `json:"services,omitempty"` + KeySource KeySource `json:"keySource,omitempty"` + KeyVaultProperties *KeyVaultProperties `json:"keyvaultproperties,omitempty"` +} + +// EncryptionService is a service that allows server-side encryption to be used. +type EncryptionService struct { + Enabled *bool `json:"enabled,omitempty"` + LastEnabledTime *date.Time `json:"lastEnabledTime,omitempty"` +} + +// EncryptionServices is a list of services that support encryption. +type EncryptionServices struct { + Blob *EncryptionService `json:"blob,omitempty"` + File *EncryptionService `json:"file,omitempty"` + Table *EncryptionService `json:"table,omitempty"` + Queue *EncryptionService `json:"queue,omitempty"` +} + +// Endpoints is the URIs that are used to perform a retrieval of a public blob, queue, or table object. +type Endpoints struct { + Blob *string `json:"blob,omitempty"` + Queue *string `json:"queue,omitempty"` + Table *string `json:"table,omitempty"` + File *string `json:"file,omitempty"` +} + +// Identity is identity for the resource. +type Identity struct { + PrincipalID *string `json:"principalId,omitempty"` + TenantID *string `json:"tenantId,omitempty"` + Type *string `json:"type,omitempty"` +} + +// IPRule is IP rule with specific IP or IP range in CIDR format. +type IPRule struct { + IPAddressOrRange *string `json:"value,omitempty"` + Action Action `json:"action,omitempty"` +} + +// KeyVaultProperties is properties of key vault. +type KeyVaultProperties struct { + KeyName *string `json:"keyname,omitempty"` + KeyVersion *string `json:"keyversion,omitempty"` + KeyVaultURI *string `json:"keyvaulturi,omitempty"` +} + +// ListAccountSasResponse is the List SAS credentials operation response. +type ListAccountSasResponse struct { + autorest.Response `json:"-"` + AccountSasToken *string `json:"accountSasToken,omitempty"` +} + +// ListServiceSasResponse is the List service SAS credentials operation response. +type ListServiceSasResponse struct { + autorest.Response `json:"-"` + ServiceSasToken *string `json:"serviceSasToken,omitempty"` +} + +// MetricSpecification is metric specification of operation. +type MetricSpecification struct { + Name *string `json:"name,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + DisplayDescription *string `json:"displayDescription,omitempty"` + Unit *string `json:"unit,omitempty"` + Dimensions *[]Dimension `json:"dimensions,omitempty"` + AggregationType *string `json:"aggregationType,omitempty"` + FillGapWithZero *bool `json:"fillGapWithZero,omitempty"` + Category *string `json:"category,omitempty"` + ResourceIDDimensionNameOverride *string `json:"resourceIdDimensionNameOverride,omitempty"` +} + +// NetworkRuleSet is network rule set +type NetworkRuleSet struct { + Bypass Bypass `json:"bypass,omitempty"` + VirtualNetworkRules *[]VirtualNetworkRule `json:"virtualNetworkRules,omitempty"` + IPRules *[]IPRule `json:"ipRules,omitempty"` + DefaultAction DefaultAction `json:"defaultAction,omitempty"` +} + +// Operation is storage REST API operation definition. +type Operation struct { + Name *string `json:"name,omitempty"` + Display *OperationDisplay `json:"display,omitempty"` + Origin *string `json:"origin,omitempty"` + *OperationProperties `json:"properties,omitempty"` +} + +// OperationDisplay is display metadata associated with the operation. +type OperationDisplay struct { + Provider *string `json:"provider,omitempty"` + Resource *string `json:"resource,omitempty"` + Operation *string `json:"operation,omitempty"` +} + +// OperationListResult is result of the request to list Storage operations. It contains a list of operations and a URL +// link to get the next set of results. +type OperationListResult struct { + autorest.Response `json:"-"` + Value *[]Operation `json:"value,omitempty"` +} + +// OperationProperties is properties of operation, include metric specifications. +type OperationProperties struct { + ServiceSpecification *ServiceSpecification `json:"serviceSpecification,omitempty"` +} + +// Resource is describes a storage resource. +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// Restriction is the restriction because of which SKU cannot be used. +type Restriction struct { + Type *string `json:"type,omitempty"` + Values *[]string `json:"values,omitempty"` + ReasonCode ReasonCode `json:"reasonCode,omitempty"` +} + +// ServiceSasParameters is the parameters to list service SAS credentials of a speicific resource. +type ServiceSasParameters struct { + CanonicalizedResource *string `json:"canonicalizedResource,omitempty"` + Resource SignedResource `json:"signedResource,omitempty"` + Permissions Permissions `json:"signedPermission,omitempty"` + IPAddressOrRange *string `json:"signedIp,omitempty"` + Protocols HTTPProtocol `json:"signedProtocol,omitempty"` + SharedAccessStartTime *date.Time `json:"signedStart,omitempty"` + SharedAccessExpiryTime *date.Time `json:"signedExpiry,omitempty"` + Identifier *string `json:"signedIdentifier,omitempty"` + PartitionKeyStart *string `json:"startPk,omitempty"` + PartitionKeyEnd *string `json:"endPk,omitempty"` + RowKeyStart *string `json:"startRk,omitempty"` + RowKeyEnd *string `json:"endRk,omitempty"` + KeyToSign *string `json:"keyToSign,omitempty"` + CacheControl *string `json:"rscc,omitempty"` + ContentDisposition *string `json:"rscd,omitempty"` + ContentEncoding *string `json:"rsce,omitempty"` + ContentLanguage *string `json:"rscl,omitempty"` + ContentType *string `json:"rsct,omitempty"` +} + +// ServiceSpecification is one property of operation, include metric specifications. +type ServiceSpecification struct { + MetricSpecifications *[]MetricSpecification `json:"metricSpecifications,omitempty"` +} + +// Sku is the SKU of the storage account. +type Sku struct { + Name SkuName `json:"name,omitempty"` + Tier SkuTier `json:"tier,omitempty"` + ResourceType *string `json:"resourceType,omitempty"` + Kind Kind `json:"kind,omitempty"` + Locations *[]string `json:"locations,omitempty"` + Capabilities *[]SKUCapability `json:"capabilities,omitempty"` + Restrictions *[]Restriction `json:"restrictions,omitempty"` +} + +// SKUCapability is the capability information in the specified sku, including file encryption, network acls, change +// notification, etc. +type SKUCapability struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} + +// SkuListResult is the response from the List Storage SKUs operation. +type SkuListResult struct { + autorest.Response `json:"-"` + Value *[]Sku `json:"value,omitempty"` +} + +// Usage is describes Storage Resource Usage. +type Usage struct { + Unit UsageUnit `json:"unit,omitempty"` + CurrentValue *int32 `json:"currentValue,omitempty"` + Limit *int32 `json:"limit,omitempty"` + Name *UsageName `json:"name,omitempty"` +} + +// UsageListResult is the response from the List Usages operation. +type UsageListResult struct { + autorest.Response `json:"-"` + Value *[]Usage `json:"value,omitempty"` +} + +// UsageName is the usage names that can be used; currently limited to StorageAccount. +type UsageName struct { + Value *string `json:"value,omitempty"` + LocalizedValue *string `json:"localizedValue,omitempty"` +} + +// VirtualNetworkRule is virtual Network rule. +type VirtualNetworkRule struct { + VirtualNetworkResourceID *string `json:"id,omitempty"` + Action Action `json:"action,omitempty"` + State State `json:"state,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/operations.go new file mode 100644 index 00000000000..cc46c699792 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/operations.go @@ -0,0 +1,96 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// OperationsClient is the the Azure Storage Management API. +type OperationsClient struct { + ManagementClient +} + +// NewOperationsClient creates an instance of the OperationsClient client. +func NewOperationsClient(subscriptionID string) OperationsClient { + return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client. +func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { + return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists all of the available Storage Rest API operations. +func (client OperationsClient) List() (result OperationListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client OperationsClient) ListPreparer() (*http.Request, error) { + const APIVersion = "2017-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/Microsoft.Storage/operations"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/skus.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/skus.go new file mode 100644 index 00000000000..94d4d6f83ec --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/skus.go @@ -0,0 +1,100 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// SkusClient is the the Azure Storage Management API. +type SkusClient struct { + ManagementClient +} + +// NewSkusClient creates an instance of the SkusClient client. +func NewSkusClient(subscriptionID string) SkusClient { + return NewSkusClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSkusClientWithBaseURI creates an instance of the SkusClient client. +func NewSkusClientWithBaseURI(baseURI string, subscriptionID string) SkusClient { + return SkusClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists the available SKUs supported by Microsoft.Storage for given subscription. +func (client SkusClient) List() (result SkuListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SkusClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/skus", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SkusClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SkusClient) ListResponder(resp *http.Response) (result SkuListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usage.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usage.go new file mode 100755 index 00000000000..682e5c16c36 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usage.go @@ -0,0 +1,100 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// UsageClient is the the Azure Storage Management API. +type UsageClient struct { + ManagementClient +} + +// NewUsageClient creates an instance of the UsageClient client. +func NewUsageClient(subscriptionID string) UsageClient { + return NewUsageClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewUsageClientWithBaseURI creates an instance of the UsageClient client. +func NewUsageClientWithBaseURI(baseURI string, subscriptionID string) UsageClient { + return UsageClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets the current usage count and the limit for the resources under the subscription. +func (client UsageClient) List() (result UsageListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + err = autorest.NewErrorWithError(err, "storage.UsageClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.UsageClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.UsageClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client UsageClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/usages", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client UsageClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client UsageClient) ListResponder(resp *http.Response) (result UsageListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go new file mode 100755 index 00000000000..467102d5973 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go @@ -0,0 +1,28 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/v11.0.0-beta arm-storage/2017-06-01" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return "v11.0.0-beta" +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md new file mode 100644 index 00000000000..6dc348e02af --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md @@ -0,0 +1,73 @@ +# Azure Storage SDK for Go + +The `github.com/Azure/azure-sdk-for-go/storage` package is used to perform REST operations against the [Azure Storage Service](https://docs.microsoft.com/en-us/azure/storage/). To manage your storage accounts (Azure Resource Manager / ARM), use the [github.com/Azure/azure-sdk-for-go/arm/storage](https://github.com/Azure/azure-sdk-for-go/tree/master/arm/storage) package. For your classic storage accounts (Azure Service Management / ASM), use [github.com/Azure/azure-sdk-for-go/management/storageservice](https://github.com/Azure/azure-sdk-for-go/tree/master/management/storageservice) package. + +This package includes support for [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/). + +# Getting Started + + 1. Go get the SDK `go get -u github.com/Azure/azure-sdk-for=go/storage` + 1. If you don't already have one, [create a Storage Account](https://docs.microsoft.com/en-us/azure/storage/storage-create-storage-account). + - Take note of your Azure Storage Account Name and Azure Storage Account Key. They'll both be necessary for using this library. + - This option is production ready, but can also be used for development. + 1. (Optional, Windows only) Download and start the [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/). + 1. Checkout our existing [samples](https://github.com/Azure-Samples?q=Storage&language=go). + +# Contributing + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +When contributing, please conform to the following practices: + - Run [gofmt](https://golang.org/cmd/gofmt/) to use standard go formatting. + - Run [golint](https://github.com/golang/lint) to conform to standard naming conventions. + - Run [go vet](https://golang.org/cmd/vet/) to catch common Go mistakes. + - Use [GoASTScanner/gas](https://github.com/GoASTScanner/gas) to ensure there are no common security violations in your contribution. + - Run [go test](https://golang.org/cmd/go/#hdr-Test_packages) to catch possible bugs in the code: `go test ./storage/...`. + - This project uses HTTP recordings for testing. + - The recorder should be attached to the client before calling the functions to test and later stopped. + - If you updated an existing test, its recording might need to be updated. Run `go test ./storage/... -ow -check.f TestName` to rerecord the test. + - Important note: all HTTP requests in the recording must be unique: different bodies, headers (`User-Agent`, `Authorization` and `Date` or `x-ms-date` headers are ignored), URLs and methods. As opposed to the example above, the following test is not suitable for recording: + +``` go +func (s *StorageQueueSuite) TestQueueExists(c *chk.C) { +cli := getQueueClient(c) +rec := cli.client.appendRecorder(c) +defer rec.Stop() + +queue := cli.GetQueueReference(queueName(c)) +ok, err := queue.Exists() +c.Assert(err, chk.IsNil) +c.Assert(ok, chk.Equals, false) + +c.Assert(queue.Create(nil), chk.IsNil) +defer queue.Delete(nil) + +ok, err = queue.Exists() // This is the very same request as the one 5 lines above +// The test replayer gets confused and the test fails in the last line +c.Assert(err, chk.IsNil) +c.Assert(ok, chk.Equals, true) +} +``` + + - On the other side, this test does not repeat requests: the URLs are different. + +``` go +func (s *StorageQueueSuite) TestQueueExists(c *chk.C) { +cli := getQueueClient(c) +rec := cli.client.appendRecorder(c) +defer rec.Stop() + +queue1 := cli.GetQueueReference(queueName(c, "nonexistent")) +ok, err := queue1.Exists() +c.Assert(err, chk.IsNil) +c.Assert(ok, chk.Equals, false) + +queue2 := cli.GetQueueReference(queueName(c, "exisiting")) +c.Assert(queue2.Create(nil), chk.IsNil) +defer queue2.Delete(nil) + +ok, err = queue2.Exists() +c.Assert(err, chk.IsNil) +c.Assert(ok, chk.Equals, true) +} +``` \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go new file mode 100644 index 00000000000..8b5b96d4884 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go @@ -0,0 +1,91 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "time" +) + +// PutAppendBlob initializes an empty append blob with specified name. An +// append blob must be created using this method before appending blocks. +// +// See CreateBlockBlobFromReader for more info on creating blobs. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob +func (b *Blob) PutAppendBlob(options *PutBlobOptions) error { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeAppend) + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + return b.respondCreation(resp, BlobTypeAppend) +} + +// AppendBlockOptions includes the options for an append block operation +type AppendBlockOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + MaxSize *uint `header:"x-ms-blob-condition-maxsize"` + AppendPosition *uint `header:"x-ms-blob-condition-appendpos"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` + ContentMD5 bool +} + +// AppendBlock appends a block to an append blob. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Append-Block +func (b *Blob) AppendBlock(chunk []byte, options *AppendBlockOptions) error { + params := url.Values{"comp": {"appendblock"}} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeAppend) + headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + if options.ContentMD5 { + md5sum := md5.Sum(chunk) + headers[headerContentMD5] = base64.StdEncoding.EncodeToString(md5sum[:]) + } + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes.NewReader(chunk), b.Container.bsc.auth) + if err != nil { + return err + } + return b.respondCreation(resp, BlobTypeAppend) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go new file mode 100644 index 00000000000..76794c30518 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go @@ -0,0 +1,246 @@ +// Package storage provides clients for Microsoft Azure Storage Services. +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "net/url" + "sort" + "strings" +) + +// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/authentication-for-the-azure-storage-services + +type authentication string + +const ( + sharedKey authentication = "sharedKey" + sharedKeyForTable authentication = "sharedKeyTable" + sharedKeyLite authentication = "sharedKeyLite" + sharedKeyLiteForTable authentication = "sharedKeyLiteTable" + + // headers + headerAcceptCharset = "Accept-Charset" + headerAuthorization = "Authorization" + headerContentLength = "Content-Length" + headerDate = "Date" + headerXmsDate = "x-ms-date" + headerXmsVersion = "x-ms-version" + headerContentEncoding = "Content-Encoding" + headerContentLanguage = "Content-Language" + headerContentType = "Content-Type" + headerContentMD5 = "Content-MD5" + headerIfModifiedSince = "If-Modified-Since" + headerIfMatch = "If-Match" + headerIfNoneMatch = "If-None-Match" + headerIfUnmodifiedSince = "If-Unmodified-Since" + headerRange = "Range" + headerDataServiceVersion = "DataServiceVersion" + headerMaxDataServiceVersion = "MaxDataServiceVersion" + headerContentTransferEncoding = "Content-Transfer-Encoding" +) + +func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) { + if !c.sasClient { + authHeader, err := c.getSharedKey(verb, url, headers, auth) + if err != nil { + return nil, err + } + headers[headerAuthorization] = authHeader + } + return headers, nil +} + +func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) { + canRes, err := c.buildCanonicalizedResource(url, auth, false) + if err != nil { + return "", err + } + + canString, err := buildCanonicalizedString(verb, headers, canRes, auth) + if err != nil { + return "", err + } + return c.createAuthorizationHeader(canString, auth), nil +} + +func (c *Client) buildCanonicalizedResource(uri string, auth authentication, sas bool) (string, error) { + errMsg := "buildCanonicalizedResource error: %s" + u, err := url.Parse(uri) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + cr := bytes.NewBufferString("") + if c.accountName != StorageEmulatorAccountName || !sas { + cr.WriteString("/") + cr.WriteString(c.getCanonicalizedAccountName()) + } + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } + + params, err := url.ParseQuery(u.RawQuery) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + // See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277 + if auth == sharedKey { + if len(params) > 0 { + cr.WriteString("\n") + + keys := []string{} + for key := range params { + keys = append(keys, key) + } + sort.Strings(keys) + + completeParams := []string{} + for _, key := range keys { + if len(params[key]) > 1 { + sort.Strings(params[key]) + } + + completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ","))) + } + cr.WriteString(strings.Join(completeParams, "\n")) + } + } else { + // search for "comp" parameter, if exists then add it to canonicalizedresource + if v, ok := params["comp"]; ok { + cr.WriteString("?comp=" + v[0]) + } + } + + return string(cr.Bytes()), nil +} + +func (c *Client) getCanonicalizedAccountName() string { + // since we may be trying to access a secondary storage account, we need to + // remove the -secondary part of the storage name + return strings.TrimSuffix(c.accountName, "-secondary") +} + +func buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string, auth authentication) (string, error) { + contentLength := headers[headerContentLength] + if contentLength == "0" { + contentLength = "" + } + date := headers[headerDate] + if v, ok := headers[headerXmsDate]; ok { + if auth == sharedKey || auth == sharedKeyLite { + date = "" + } else { + date = v + } + } + var canString string + switch auth { + case sharedKey: + canString = strings.Join([]string{ + verb, + headers[headerContentEncoding], + headers[headerContentLanguage], + contentLength, + headers[headerContentMD5], + headers[headerContentType], + date, + headers[headerIfModifiedSince], + headers[headerIfMatch], + headers[headerIfNoneMatch], + headers[headerIfUnmodifiedSince], + headers[headerRange], + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + case sharedKeyForTable: + canString = strings.Join([]string{ + verb, + headers[headerContentMD5], + headers[headerContentType], + date, + canonicalizedResource, + }, "\n") + case sharedKeyLite: + canString = strings.Join([]string{ + verb, + headers[headerContentMD5], + headers[headerContentType], + date, + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + case sharedKeyLiteForTable: + canString = strings.Join([]string{ + date, + canonicalizedResource, + }, "\n") + default: + return "", fmt.Errorf("%s authentication is not supported yet", auth) + } + return canString, nil +} + +func buildCanonicalizedHeader(headers map[string]string) string { + cm := make(map[string]string) + + for k, v := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = v + } + } + + if len(cm) == 0 { + return "" + } + + keys := []string{} + for key := range cm { + keys = append(keys, key) + } + + sort.Strings(keys) + + ch := bytes.NewBufferString("") + + for _, key := range keys { + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(cm[key]) + ch.WriteRune('\n') + } + + return strings.TrimSuffix(string(ch.Bytes()), "\n") +} + +func (c *Client) createAuthorizationHeader(canonicalizedString string, auth authentication) string { + signature := c.computeHmac256(canonicalizedString) + var key string + switch auth { + case sharedKey, sharedKeyForTable: + key = "SharedKey" + case sharedKeyLite, sharedKeyLiteForTable: + key = "SharedKeyLite" + } + return fmt.Sprintf("%s %s:%s", key, c.getCanonicalizedAccountName(), signature) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go new file mode 100644 index 00000000000..a9d3cfccb68 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go @@ -0,0 +1,652 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// A Blob is an entry in BlobListResponse. +type Blob struct { + Container *Container + Name string `xml:"Name"` + Snapshot time.Time `xml:"Snapshot"` + Properties BlobProperties `xml:"Properties"` + Metadata BlobMetadata `xml:"Metadata"` +} + +// PutBlobOptions includes the options any put blob operation +// (page, block, append) +type PutBlobOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + Origin string `header:"Origin"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// BlobMetadata is a set of custom name/value pairs. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179404.aspx +type BlobMetadata map[string]string + +type blobMetadataEntries struct { + Entries []blobMetadataEntry `xml:",any"` +} +type blobMetadataEntry struct { + XMLName xml.Name + Value string `xml:",chardata"` +} + +// UnmarshalXML converts the xml:Metadata into Metadata map +func (bm *BlobMetadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var entries blobMetadataEntries + if err := d.DecodeElement(&entries, &start); err != nil { + return err + } + for _, entry := range entries.Entries { + if *bm == nil { + *bm = make(BlobMetadata) + } + (*bm)[strings.ToLower(entry.XMLName.Local)] = entry.Value + } + return nil +} + +// MarshalXML implements the xml.Marshaler interface. It encodes +// metadata name/value pairs as they would appear in an Azure +// ListBlobs response. +func (bm BlobMetadata) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + entries := make([]blobMetadataEntry, 0, len(bm)) + for k, v := range bm { + entries = append(entries, blobMetadataEntry{ + XMLName: xml.Name{Local: http.CanonicalHeaderKey(k)}, + Value: v, + }) + } + return enc.EncodeElement(blobMetadataEntries{ + Entries: entries, + }, start) +} + +// BlobProperties contains various properties of a blob +// returned in various endpoints like ListBlobs or GetBlobProperties. +type BlobProperties struct { + LastModified TimeRFC1123 `xml:"Last-Modified"` + Etag string `xml:"Etag"` + ContentMD5 string `xml:"Content-MD5" header:"x-ms-blob-content-md5"` + ContentLength int64 `xml:"Content-Length"` + ContentType string `xml:"Content-Type" header:"x-ms-blob-content-type"` + ContentEncoding string `xml:"Content-Encoding" header:"x-ms-blob-content-encoding"` + CacheControl string `xml:"Cache-Control" header:"x-ms-blob-cache-control"` + ContentLanguage string `xml:"Cache-Language" header:"x-ms-blob-content-language"` + ContentDisposition string `xml:"Content-Disposition" header:"x-ms-blob-content-disposition"` + BlobType BlobType `xml:"BlobType"` + SequenceNumber int64 `xml:"x-ms-blob-sequence-number"` + CopyID string `xml:"CopyId"` + CopyStatus string `xml:"CopyStatus"` + CopySource string `xml:"CopySource"` + CopyProgress string `xml:"CopyProgress"` + CopyCompletionTime TimeRFC1123 `xml:"CopyCompletionTime"` + CopyStatusDescription string `xml:"CopyStatusDescription"` + LeaseStatus string `xml:"LeaseStatus"` + LeaseState string `xml:"LeaseState"` + LeaseDuration string `xml:"LeaseDuration"` + ServerEncrypted bool `xml:"ServerEncrypted"` + IncrementalCopy bool `xml:"IncrementalCopy"` +} + +// BlobType defines the type of the Azure Blob. +type BlobType string + +// Types of page blobs +const ( + BlobTypeBlock BlobType = "BlockBlob" + BlobTypePage BlobType = "PageBlob" + BlobTypeAppend BlobType = "AppendBlob" +) + +func (b *Blob) buildPath() string { + return b.Container.buildPath() + "/" + b.Name +} + +// Exists returns true if a blob with given name exists on the specified +// container of the storage account. +func (b *Blob) Exists() (bool, error) { + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), nil) + headers := b.Container.bsc.client.getStandardHeaders() + resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusOK, nil + } + } + return false, err +} + +// GetURL gets the canonical URL to the blob with the specified name in the +// specified container. +// This method does not create a publicly accessible URL if the blob or container +// is private and this method does not check if the blob exists. +func (b *Blob) GetURL() string { + container := b.Container.Name + if container == "" { + container = "$root" + } + return b.Container.bsc.client.getEndpoint(blobServiceName, pathForResource(container, b.Name), nil) +} + +// GetBlobRangeOptions includes the options for a get blob range operation +type GetBlobRangeOptions struct { + Range *BlobRange + GetRangeContentMD5 bool + *GetBlobOptions +} + +// GetBlobOptions includes the options for a get blob operation +type GetBlobOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + Origin string `header:"Origin"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// BlobRange represents the bytes range to be get +type BlobRange struct { + Start uint64 + End uint64 +} + +func (br BlobRange) String() string { + if br.End == 0 { + return fmt.Sprintf("bytes=%d-", br.Start) + } + return fmt.Sprintf("bytes=%d-%d", br.Start, br.End) +} + +// Get returns a stream to read the blob. Caller must call both Read and Close() +// to correctly close the underlying connection. +// +// See the GetRange method for use with a Range header. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob +func (b *Blob) Get(options *GetBlobOptions) (io.ReadCloser, error) { + rangeOptions := GetBlobRangeOptions{ + GetBlobOptions: options, + } + resp, err := b.getRange(&rangeOptions) + if err != nil { + return nil, err + } + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + if err := b.writeProperties(resp.headers, true); err != nil { + return resp.body, err + } + return resp.body, nil +} + +// GetRange reads the specified range of a blob to a stream. The bytesRange +// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec. +// Caller must call both Read and Close()// to correctly close the underlying +// connection. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob +func (b *Blob) GetRange(options *GetBlobRangeOptions) (io.ReadCloser, error) { + resp, err := b.getRange(options) + if err != nil { + return nil, err + } + + if err := checkRespCode(resp.statusCode, []int{http.StatusPartialContent}); err != nil { + return nil, err + } + // Content-Length header should not be updated, as the service returns the range length + // (which is not alwys the full blob length) + if err := b.writeProperties(resp.headers, false); err != nil { + return resp.body, err + } + return resp.body, nil +} + +func (b *Blob) getRange(options *GetBlobRangeOptions) (*storageResponse, error) { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + + if options != nil { + if options.Range != nil { + headers["Range"] = options.Range.String() + if options.GetRangeContentMD5 { + headers["x-ms-range-get-content-md5"] = "true" + } + } + if options.GetBlobOptions != nil { + headers = mergeHeaders(headers, headersFromStruct(*options.GetBlobOptions)) + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + } + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return nil, err + } + return resp, err +} + +// SnapshotOptions includes the options for a snapshot blob operation +type SnapshotOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// CreateSnapshot creates a snapshot for a blob +// See https://msdn.microsoft.com/en-us/library/azure/ee691971.aspx +func (b *Blob) CreateSnapshot(options *SnapshotOptions) (snapshotTimestamp *time.Time, err error) { + params := url.Values{"comp": {"snapshot"}} + headers := b.Container.bsc.client.getStandardHeaders() + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil || resp == nil { + return nil, err + } + defer readAndCloseBody(resp.body) + + if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { + return nil, err + } + + snapshotResponse := resp.headers.Get(http.CanonicalHeaderKey("x-ms-snapshot")) + if snapshotResponse != "" { + snapshotTimestamp, err := time.Parse(time.RFC3339, snapshotResponse) + if err != nil { + return nil, err + } + return &snapshotTimestamp, nil + } + + return nil, errors.New("Snapshot not created") +} + +// GetBlobPropertiesOptions includes the options for a get blob properties operation +type GetBlobPropertiesOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// GetProperties provides various information about the specified blob. +// See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx +func (b *Blob) GetProperties(options *GetBlobPropertiesOptions) error { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return err + } + return b.writeProperties(resp.headers, true) +} + +func (b *Blob) writeProperties(h http.Header, includeContentLen bool) error { + var err error + + contentLength := b.Properties.ContentLength + if includeContentLen { + contentLengthStr := h.Get("Content-Length") + if contentLengthStr != "" { + contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) + if err != nil { + return err + } + } + } + + var sequenceNum int64 + sequenceNumStr := h.Get("x-ms-blob-sequence-number") + if sequenceNumStr != "" { + sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64) + if err != nil { + return err + } + } + + lastModified, err := getTimeFromHeaders(h, "Last-Modified") + if err != nil { + return err + } + + copyCompletionTime, err := getTimeFromHeaders(h, "x-ms-copy-completion-time") + if err != nil { + return err + } + + b.Properties = BlobProperties{ + LastModified: TimeRFC1123(*lastModified), + Etag: h.Get("Etag"), + ContentMD5: h.Get("Content-MD5"), + ContentLength: contentLength, + ContentEncoding: h.Get("Content-Encoding"), + ContentType: h.Get("Content-Type"), + ContentDisposition: h.Get("Content-Disposition"), + CacheControl: h.Get("Cache-Control"), + ContentLanguage: h.Get("Content-Language"), + SequenceNumber: sequenceNum, + CopyCompletionTime: TimeRFC1123(*copyCompletionTime), + CopyStatusDescription: h.Get("x-ms-copy-status-description"), + CopyID: h.Get("x-ms-copy-id"), + CopyProgress: h.Get("x-ms-copy-progress"), + CopySource: h.Get("x-ms-copy-source"), + CopyStatus: h.Get("x-ms-copy-status"), + BlobType: BlobType(h.Get("x-ms-blob-type")), + LeaseStatus: h.Get("x-ms-lease-status"), + LeaseState: h.Get("x-ms-lease-state"), + } + b.writeMetadata(h) + return nil +} + +// SetBlobPropertiesOptions contains various properties of a blob and is an entry +// in SetProperties +type SetBlobPropertiesOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + Origin string `header:"Origin"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + SequenceNumberAction *SequenceNumberAction + RequestID string `header:"x-ms-client-request-id"` +} + +// SequenceNumberAction defines how the blob's sequence number should be modified +type SequenceNumberAction string + +// Options for sequence number action +const ( + SequenceNumberActionMax SequenceNumberAction = "max" + SequenceNumberActionUpdate SequenceNumberAction = "update" + SequenceNumberActionIncrement SequenceNumberAction = "increment" +) + +// SetProperties replaces the BlobHeaders for the specified blob. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetBlobProperties. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Blob-Properties +func (b *Blob) SetProperties(options *SetBlobPropertiesOptions) error { + params := url.Values{"comp": {"properties"}} + headers := b.Container.bsc.client.getStandardHeaders() + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + if b.Properties.BlobType == BlobTypePage { + headers = addToHeaders(headers, "x-ms-blob-content-length", fmt.Sprintf("%v", b.Properties.ContentLength)) + if options != nil && options.SequenceNumberAction != nil { + headers = addToHeaders(headers, "x-ms-sequence-number-action", string(*options.SequenceNumberAction)) + if *options.SequenceNumberAction != SequenceNumberActionIncrement { + headers = addToHeaders(headers, "x-ms-blob-sequence-number", fmt.Sprintf("%v", b.Properties.SequenceNumber)) + } + } + } + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + +// SetBlobMetadataOptions includes the options for a set blob metadata operation +type SetBlobMetadataOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// SetMetadata replaces the metadata for the specified blob. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetBlobMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx +func (b *Blob) SetMetadata(options *SetBlobMetadataOptions) error { + params := url.Values{"comp": {"metadata"}} + headers := b.Container.bsc.client.getStandardHeaders() + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + +// GetBlobMetadataOptions includes the options for a get blob metadata operation +type GetBlobMetadataOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// GetMetadata returns all user-defined metadata for the specified blob. +// +// All metadata keys will be returned in lower case. (HTTP header +// names are case-insensitive.) +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx +func (b *Blob) GetMetadata(options *GetBlobMetadataOptions) error { + params := url.Values{"comp": {"metadata"}} + headers := b.Container.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return err + } + + b.writeMetadata(resp.headers) + return nil +} + +func (b *Blob) writeMetadata(h http.Header) { + metadata := make(map[string]string) + for k, v := range h { + // Can't trust CanonicalHeaderKey() to munge case + // reliably. "_" is allowed in identifiers: + // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx + // https://msdn.microsoft.com/library/aa664670(VS.71).aspx + // http://tools.ietf.org/html/rfc7230#section-3.2 + // ...but "_" is considered invalid by + // CanonicalMIMEHeaderKey in + // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 + // so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl". + k = strings.ToLower(k) + if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { + continue + } + // metadata["lol"] = content of the last X-Ms-Meta-Lol header + k = k[len(userDefinedMetadataHeaderPrefix):] + metadata[k] = v[len(v)-1] + } + + b.Metadata = BlobMetadata(metadata) +} + +// DeleteBlobOptions includes the options for a delete blob operation +type DeleteBlobOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + DeleteSnapshots *bool + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// Delete deletes the given blob from the specified container. +// If the blob does not exists at the time of the Delete Blob operation, it +// returns error. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob +func (b *Blob) Delete(options *DeleteBlobOptions) error { + resp, err := b.delete(options) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} + +// DeleteIfExists deletes the given blob from the specified container If the +// blob is deleted with this call, returns true. Otherwise returns false. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob +func (b *Blob) DeleteIfExists(options *DeleteBlobOptions) (bool, error) { + resp, err := b.delete(options) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +func (b *Blob) delete(options *DeleteBlobOptions) (*storageResponse, error) { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + headers = mergeHeaders(headers, headersFromStruct(*options)) + if options.DeleteSnapshots != nil { + if *options.DeleteSnapshots { + headers["x-ms-delete-snapshots"] = "include" + } else { + headers["x-ms-delete-snapshots"] = "only" + } + } + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + return b.Container.bsc.client.exec(http.MethodDelete, uri, headers, nil, b.Container.bsc.auth) +} + +// helper method to construct the path to either a blob or container +func pathForResource(container, name string) string { + if name != "" { + return fmt.Sprintf("/%s/%s", container, name) + } + return fmt.Sprintf("/%s", container) +} + +func (b *Blob) respondCreation(resp *storageResponse, bt BlobType) error { + readAndCloseBody(resp.body) + err := checkRespCode(resp.statusCode, []int{http.StatusCreated}) + if err != nil { + return err + } + b.Properties.BlobType = bt + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go new file mode 100644 index 00000000000..e11af77441e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go @@ -0,0 +1,170 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "fmt" + "net/url" + "strings" + "time" +) + +// OverrideHeaders defines overridable response heaedrs in +// a request using a SAS URI. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +type OverrideHeaders struct { + CacheControl string + ContentDisposition string + ContentEncoding string + ContentLanguage string + ContentType string +} + +// BlobSASOptions are options to construct a blob SAS +// URI. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +type BlobSASOptions struct { + BlobServiceSASPermissions + OverrideHeaders + SASOptions +} + +// BlobServiceSASPermissions includes the available permissions for +// blob service SAS URI. +type BlobServiceSASPermissions struct { + Read bool + Add bool + Create bool + Write bool + Delete bool +} + +func (p BlobServiceSASPermissions) buildString() string { + permissions := "" + if p.Read { + permissions += "r" + } + if p.Add { + permissions += "a" + } + if p.Create { + permissions += "c" + } + if p.Write { + permissions += "w" + } + if p.Delete { + permissions += "d" + } + return permissions +} + +// GetSASURI creates an URL to the blob which contains the Shared +// Access Signature with the specified options. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +func (b *Blob) GetSASURI(options BlobSASOptions) (string, error) { + uri := b.GetURL() + signedResource := "b" + canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(uri, b.Container.bsc.auth, true) + if err != nil { + return "", err + } + + permissions := options.BlobServiceSASPermissions.buildString() + return b.Container.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders) +} + +func (c *Client) blobAndFileSASURI(options SASOptions, uri, permissions, canonicalizedResource, signedResource string, headers OverrideHeaders) (string, error) { + start := "" + if options.Start != (time.Time{}) { + start = options.Start.UTC().Format(time.RFC3339) + } + + expiry := options.Expiry.UTC().Format(time.RFC3339) + + // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). + canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) + canonicalizedResource, err := url.QueryUnescape(canonicalizedResource) + if err != nil { + return "", err + } + + protocols := "" + if options.UseHTTPS { + protocols = "https" + } + stringToSign, err := blobSASStringToSign(permissions, start, expiry, canonicalizedResource, options.Identifier, options.IP, protocols, c.apiVersion, headers) + if err != nil { + return "", err + } + + sig := c.computeHmac256(stringToSign) + sasParams := url.Values{ + "sv": {c.apiVersion}, + "se": {expiry}, + "sr": {signedResource}, + "sp": {permissions}, + "sig": {sig}, + } + + if c.apiVersion >= "2015-04-05" { + if protocols != "" { + sasParams.Add("spr", protocols) + } + if options.IP != "" { + sasParams.Add("sip", options.IP) + } + } + + // Add override response hedaers + addQueryParameter(sasParams, "rscc", headers.CacheControl) + addQueryParameter(sasParams, "rscd", headers.ContentDisposition) + addQueryParameter(sasParams, "rsce", headers.ContentEncoding) + addQueryParameter(sasParams, "rscl", headers.ContentLanguage) + addQueryParameter(sasParams, "rsct", headers.ContentType) + + sasURL, err := url.Parse(uri) + if err != nil { + return "", err + } + sasURL.RawQuery = sasParams.Encode() + return sasURL.String(), nil +} + +func blobSASStringToSign(signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion string, headers OverrideHeaders) (string, error) { + rscc := headers.CacheControl + rscd := headers.ContentDisposition + rsce := headers.ContentEncoding + rscl := headers.ContentLanguage + rsct := headers.ContentType + + if signedVersion >= "2015-02-21" { + canonicalizedResource = "/blob" + canonicalizedResource + } + + // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12 + if signedVersion >= "2015-04-05" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil + } + + // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + if signedVersion >= "2013-08-15" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil + } + + return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go new file mode 100644 index 00000000000..8fe21b0cfd9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go @@ -0,0 +1,126 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" +) + +// BlobStorageClient contains operations for Microsoft Azure Blob Storage +// Service. +type BlobStorageClient struct { + client Client + auth authentication +} + +// GetServiceProperties gets the properties of your storage account's blob service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-blob-service-properties +func (b *BlobStorageClient) GetServiceProperties() (*ServiceProperties, error) { + return b.client.getServiceProperties(blobServiceName, b.auth) +} + +// SetServiceProperties sets the properties of your storage account's blob service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-blob-service-properties +func (b *BlobStorageClient) SetServiceProperties(props ServiceProperties) error { + return b.client.setServiceProperties(props, blobServiceName, b.auth) +} + +// ListContainersParameters defines the set of customizable parameters to make a +// List Containers call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +type ListContainersParameters struct { + Prefix string + Marker string + Include string + MaxResults uint + Timeout uint +} + +// GetContainerReference returns a Container object for the specified container name. +func (b *BlobStorageClient) GetContainerReference(name string) *Container { + return &Container{ + bsc: b, + Name: name, + } +} + +// GetContainerReferenceFromSASURI returns a Container object for the specified +// container SASURI +func GetContainerReferenceFromSASURI(sasuri url.URL) (*Container, error) { + path := strings.Split(sasuri.Path, "/") + if len(path) <= 1 { + return nil, fmt.Errorf("could not find a container in URI: %s", sasuri.String()) + } + cli := newSASClient().GetBlobService() + return &Container{ + bsc: &cli, + Name: path[1], + sasuri: sasuri, + }, nil +} + +// ListContainers returns the list of containers in a storage account along with +// pagination token and other response details. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*ContainerListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) + uri := b.client.getEndpoint(blobServiceName, "", q) + headers := b.client.getStandardHeaders() + + var out ContainerListResponse + resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) + if err != nil { + return nil, err + } + defer resp.body.Close() + err = xmlUnmarshal(resp.body, &out) + if err != nil { + return nil, err + } + + // assign our client to the newly created Container objects + for i := range out.Containers { + out.Containers[i].bsc = &b + } + return &out, err +} + +func (p ListContainersParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != "" { + out.Set("include", p.Include) + } + if p.MaxResults != 0 { + out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) + } + if p.Timeout != 0 { + out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) + } + + return out +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go new file mode 100644 index 00000000000..e0176d664ad --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go @@ -0,0 +1,270 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// BlockListType is used to filter out types of blocks in a Get Blocks List call +// for a block blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all +// block types. +type BlockListType string + +// Filters for listing blocks in block blobs +const ( + BlockListTypeAll BlockListType = "all" + BlockListTypeCommitted BlockListType = "committed" + BlockListTypeUncommitted BlockListType = "uncommitted" +) + +// Maximum sizes (per REST API) for various concepts +const ( + MaxBlobBlockSize = 100 * 1024 * 1024 + MaxBlobPageSize = 4 * 1024 * 1024 +) + +// BlockStatus defines states a block for a block blob can +// be in. +type BlockStatus string + +// List of statuses that can be used to refer to a block in a block list +const ( + BlockStatusUncommitted BlockStatus = "Uncommitted" + BlockStatusCommitted BlockStatus = "Committed" + BlockStatusLatest BlockStatus = "Latest" +) + +// Block is used to create Block entities for Put Block List +// call. +type Block struct { + ID string + Status BlockStatus +} + +// BlockListResponse contains the response fields from Get Block List call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx +type BlockListResponse struct { + XMLName xml.Name `xml:"BlockList"` + CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"` + UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"` +} + +// BlockResponse contains the block information returned +// in the GetBlockListCall. +type BlockResponse struct { + Name string `xml:"Name"` + Size int64 `xml:"Size"` +} + +// CreateBlockBlob initializes an empty block blob with no blocks. +// +// See CreateBlockBlobFromReader for more info on creating blobs. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob +func (b *Blob) CreateBlockBlob(options *PutBlobOptions) error { + return b.CreateBlockBlobFromReader(nil, options) +} + +// CreateBlockBlobFromReader initializes a block blob using data from +// reader. Size must be the number of bytes read from reader. To +// create an empty blob, use size==0 and reader==nil. +// +// Any headers set in blob.Properties or metadata in blob.Metadata +// will be set on the blob. +// +// The API rejects requests with size > 256 MiB (but this limit is not +// checked by the SDK). To write a larger blob, use CreateBlockBlob, +// PutBlock, and PutBlockList. +// +// To create a blob from scratch, call container.GetBlobReference() to +// get an empty blob, fill in blob.Properties and blob.Metadata as +// appropriate then call this method. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob +func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions) error { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeBlock) + + headers["Content-Length"] = "0" + var n int64 + var err error + if blob != nil { + type lener interface { + Len() int + } + // TODO(rjeczalik): handle io.ReadSeeker, in case blob is *os.File etc. + if l, ok := blob.(lener); ok { + n = int64(l.Len()) + } else { + var buf bytes.Buffer + n, err = io.Copy(&buf, blob) + if err != nil { + return err + } + blob = &buf + } + + headers["Content-Length"] = strconv.FormatInt(n, 10) + } + b.Properties.ContentLength = n + + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth) + if err != nil { + return err + } + return b.respondCreation(resp, BlobTypeBlock) +} + +// PutBlockOptions includes the options for a put block operation +type PutBlockOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + ContentMD5 string `header:"Content-MD5"` + RequestID string `header:"x-ms-client-request-id"` +} + +// PutBlock saves the given data chunk to the specified block blob with +// given ID. +// +// The API rejects chunks larger than 100 MiB (but this limit is not +// checked by the SDK). +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block +func (b *Blob) PutBlock(blockID string, chunk []byte, options *PutBlockOptions) error { + return b.PutBlockWithLength(blockID, uint64(len(chunk)), bytes.NewReader(chunk), options) +} + +// PutBlockWithLength saves the given data stream of exactly specified size to +// the block blob with given ID. It is an alternative to PutBlocks where data +// comes as stream but the length is known in advance. +// +// The API rejects requests with size > 100 MiB (but this limit is not +// checked by the SDK). +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block +func (b *Blob) PutBlockWithLength(blockID string, size uint64, blob io.Reader, options *PutBlockOptions) error { + query := url.Values{ + "comp": {"block"}, + "blockid": {blockID}, + } + headers := b.Container.bsc.client.getStandardHeaders() + headers["Content-Length"] = fmt.Sprintf("%v", size) + + if options != nil { + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), query) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth) + if err != nil { + return err + } + return b.respondCreation(resp, BlobTypeBlock) +} + +// PutBlockListOptions includes the options for a put block list operation +type PutBlockListOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// PutBlockList saves list of blocks to the specified block blob. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block-List +func (b *Blob) PutBlockList(blocks []Block, options *PutBlockListOptions) error { + params := url.Values{"comp": {"blocklist"}} + blockListXML := prepareBlockListRequest(blocks) + headers := b.Container.bsc.client.getStandardHeaders() + headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML)) + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, strings.NewReader(blockListXML), b.Container.bsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// GetBlockListOptions includes the options for a get block list operation +type GetBlockListOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + RequestID string `header:"x-ms-client-request-id"` +} + +// GetBlockList retrieves list of blocks in the specified block blob. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Block-List +func (b *Blob) GetBlockList(blockType BlockListType, options *GetBlockListOptions) (BlockListResponse, error) { + params := url.Values{ + "comp": {"blocklist"}, + "blocklisttype": {string(blockType)}, + } + headers := b.Container.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + var out BlockListResponse + resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return out, err + } + defer resp.body.Close() + + err = xmlUnmarshal(resp.body, &out) + return out, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go new file mode 100644 index 00000000000..8f6cd95da71 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go @@ -0,0 +1,878 @@ +// Package storage provides clients for Microsoft Azure Storage Services. +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bufio" + "bytes" + "encoding/base64" + "encoding/json" + "encoding/xml" + "errors" + "fmt" + "io" + "io/ioutil" + "mime" + "mime/multipart" + "net/http" + "net/url" + "regexp" + "runtime" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +const ( + // DefaultBaseURL is the domain name used for storage requests in the + // public cloud when a default client is created. + DefaultBaseURL = "core.windows.net" + + // DefaultAPIVersion is the Azure Storage API version string used when a + // basic client is created. + DefaultAPIVersion = "2016-05-31" + + defaultUseHTTPS = true + defaultRetryAttempts = 5 + defaultRetryDuration = time.Second * 5 + + // StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator + StorageEmulatorAccountName = "devstoreaccount1" + + // StorageEmulatorAccountKey is the the fixed storage account used by Azure Storage Emulator + StorageEmulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" + + blobServiceName = "blob" + tableServiceName = "table" + queueServiceName = "queue" + fileServiceName = "file" + + storageEmulatorBlob = "127.0.0.1:10000" + storageEmulatorTable = "127.0.0.1:10002" + storageEmulatorQueue = "127.0.0.1:10001" + + userAgentHeader = "User-Agent" + + userDefinedMetadataHeaderPrefix = "x-ms-meta-" +) + +var ( + validStorageAccount = regexp.MustCompile("^[0-9a-z]{3,24}$") + defaultValidStatusCodes = []int{ + http.StatusRequestTimeout, // 408 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } +) + +// Sender sends a request +type Sender interface { + Send(*Client, *http.Request) (*http.Response, error) +} + +// DefaultSender is the default sender for the client. It implements +// an automatic retry strategy. +type DefaultSender struct { + RetryAttempts int + RetryDuration time.Duration + ValidStatusCodes []int + attempts int // used for testing +} + +// Send is the default retry strategy in the client +func (ds *DefaultSender) Send(c *Client, req *http.Request) (resp *http.Response, err error) { + rr := autorest.NewRetriableRequest(req) + for attempts := 0; attempts < ds.RetryAttempts; attempts++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = c.HTTPClient.Do(rr.Request()) + if err != nil || !autorest.ResponseHasStatusCode(resp, ds.ValidStatusCodes...) { + return resp, err + } + autorest.DelayForBackoff(ds.RetryDuration, attempts, req.Cancel) + ds.attempts = attempts + } + ds.attempts++ + return resp, err +} + +// Client is the object that needs to be constructed to perform +// operations on the storage account. +type Client struct { + // HTTPClient is the http.Client used to initiate API + // requests. http.DefaultClient is used when creating a + // client. + HTTPClient *http.Client + + // Sender is an interface that sends the request. Clients are + // created with a DefaultSender. The DefaultSender has an + // automatic retry strategy built in. The Sender can be customized. + Sender Sender + + accountName string + accountKey []byte + useHTTPS bool + UseSharedKeyLite bool + baseURL string + apiVersion string + userAgent string + sasClient bool + accountSASToken url.Values +} + +type storageResponse struct { + statusCode int + headers http.Header + body io.ReadCloser +} + +type odataResponse struct { + storageResponse + odata odataErrorWrapper +} + +// AzureStorageServiceError contains fields of the error response from +// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx +// Some fields might be specific to certain calls. +type AzureStorageServiceError struct { + Code string `xml:"Code"` + Message string `xml:"Message"` + AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"` + QueryParameterName string `xml:"QueryParameterName"` + QueryParameterValue string `xml:"QueryParameterValue"` + Reason string `xml:"Reason"` + Lang string + StatusCode int + RequestID string + Date string + APIVersion string +} + +type odataErrorMessage struct { + Lang string `json:"lang"` + Value string `json:"value"` +} + +type odataError struct { + Code string `json:"code"` + Message odataErrorMessage `json:"message"` +} + +type odataErrorWrapper struct { + Err odataError `json:"odata.error"` +} + +// UnexpectedStatusCodeError is returned when a storage service responds with neither an error +// nor with an HTTP status code indicating success. +type UnexpectedStatusCodeError struct { + allowed []int + got int +} + +func (e UnexpectedStatusCodeError) Error() string { + s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) } + + got := s(e.got) + expected := []string{} + for _, v := range e.allowed { + expected = append(expected, s(v)) + } + return fmt.Sprintf("storage: status code from service response is %s; was expecting %s", got, strings.Join(expected, " or ")) +} + +// Got is the actual status code returned by Azure. +func (e UnexpectedStatusCodeError) Got() int { + return e.got +} + +// NewBasicClient constructs a Client with given storage service name and +// key. +func NewBasicClient(accountName, accountKey string) (Client, error) { + if accountName == StorageEmulatorAccountName { + return NewEmulatorClient() + } + return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS) +} + +// NewBasicClientOnSovereignCloud constructs a Client with given storage service name and +// key in the referenced cloud. +func NewBasicClientOnSovereignCloud(accountName, accountKey string, env azure.Environment) (Client, error) { + if accountName == StorageEmulatorAccountName { + return NewEmulatorClient() + } + return NewClient(accountName, accountKey, env.StorageEndpointSuffix, DefaultAPIVersion, defaultUseHTTPS) +} + +//NewEmulatorClient contructs a Client intended to only work with Azure +//Storage Emulator +func NewEmulatorClient() (Client, error) { + return NewClient(StorageEmulatorAccountName, StorageEmulatorAccountKey, DefaultBaseURL, DefaultAPIVersion, false) +} + +// NewClient constructs a Client. This should be used if the caller wants +// to specify whether to use HTTPS, a specific REST API version or a custom +// storage endpoint than Azure Public Cloud. +func NewClient(accountName, accountKey, serviceBaseURL, apiVersion string, useHTTPS bool) (Client, error) { + var c Client + if !IsValidStorageAccount(accountName) { + return c, fmt.Errorf("azure: account name is not valid: it must be between 3 and 24 characters, and only may contain numbers and lowercase letters: %v", accountName) + } else if accountKey == "" { + return c, fmt.Errorf("azure: account key required") + } else if serviceBaseURL == "" { + return c, fmt.Errorf("azure: base storage service url required") + } + + key, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return c, fmt.Errorf("azure: malformed storage account key: %v", err) + } + + c = Client{ + HTTPClient: http.DefaultClient, + accountName: accountName, + accountKey: key, + useHTTPS: useHTTPS, + baseURL: serviceBaseURL, + apiVersion: apiVersion, + sasClient: false, + UseSharedKeyLite: false, + Sender: &DefaultSender{ + RetryAttempts: defaultRetryAttempts, + ValidStatusCodes: defaultValidStatusCodes, + RetryDuration: defaultRetryDuration, + }, + } + c.userAgent = c.getDefaultUserAgent() + return c, nil +} + +// IsValidStorageAccount checks if the storage account name is valid. +// See https://docs.microsoft.com/en-us/azure/storage/storage-create-storage-account +func IsValidStorageAccount(account string) bool { + return validStorageAccount.MatchString(account) +} + +// NewAccountSASClient contructs a client that uses accountSAS authorization +// for its operations. +func NewAccountSASClient(account string, token url.Values, env azure.Environment) Client { + c := newSASClient() + c.accountSASToken = token + c.accountName = account + c.baseURL = env.StorageEndpointSuffix + + // Get API version and protocol from token + c.apiVersion = token.Get("sv") + c.useHTTPS = token.Get("spr") == "https" + return c +} + +func newSASClient() Client { + c := Client{ + HTTPClient: http.DefaultClient, + apiVersion: DefaultAPIVersion, + sasClient: true, + Sender: &DefaultSender{ + RetryAttempts: defaultRetryAttempts, + ValidStatusCodes: defaultValidStatusCodes, + RetryDuration: defaultRetryDuration, + }, + } + c.userAgent = c.getDefaultUserAgent() + return c +} + +func (c Client) isServiceSASClient() bool { + return c.sasClient && c.accountSASToken == nil +} + +func (c Client) isAccountSASClient() bool { + return c.sasClient && c.accountSASToken != nil +} + +func (c Client) getDefaultUserAgent() string { + return fmt.Sprintf("Go/%s (%s-%s) azure-storage-go/%s api-version/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + sdkVersion, + c.apiVersion, + ) +} + +// AddToUserAgent adds an extension to the current user agent +func (c *Client) AddToUserAgent(extension string) error { + if extension != "" { + c.userAgent = fmt.Sprintf("%s %s", c.userAgent, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.userAgent) +} + +// protectUserAgent is used in funcs that include extraheaders as a parameter. +// It prevents the User-Agent header to be overwritten, instead if it happens to +// be present, it gets added to the current User-Agent. Use it before getStandardHeaders +func (c *Client) protectUserAgent(extraheaders map[string]string) map[string]string { + if v, ok := extraheaders[userAgentHeader]; ok { + c.AddToUserAgent(v) + delete(extraheaders, userAgentHeader) + } + return extraheaders +} + +func (c Client) getBaseURL(service string) *url.URL { + scheme := "http" + if c.useHTTPS { + scheme = "https" + } + host := "" + if c.accountName == StorageEmulatorAccountName { + switch service { + case blobServiceName: + host = storageEmulatorBlob + case tableServiceName: + host = storageEmulatorTable + case queueServiceName: + host = storageEmulatorQueue + } + } else { + host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL) + } + + return &url.URL{ + Scheme: scheme, + Host: host, + } +} + +func (c Client) getEndpoint(service, path string, params url.Values) string { + u := c.getBaseURL(service) + + // API doesn't accept path segments not starting with '/' + if !strings.HasPrefix(path, "/") { + path = fmt.Sprintf("/%v", path) + } + + if c.accountName == StorageEmulatorAccountName { + path = fmt.Sprintf("/%v%v", StorageEmulatorAccountName, path) + } + + u.Path = path + u.RawQuery = params.Encode() + return u.String() +} + +// AccountSASTokenOptions includes options for constructing +// an account SAS token. +// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas +type AccountSASTokenOptions struct { + APIVersion string + Services Services + ResourceTypes ResourceTypes + Permissions Permissions + Start time.Time + Expiry time.Time + IP string + UseHTTPS bool +} + +// Services specify services accessible with an account SAS. +type Services struct { + Blob bool + Queue bool + Table bool + File bool +} + +// ResourceTypes specify the resources accesible with an +// account SAS. +type ResourceTypes struct { + Service bool + Container bool + Object bool +} + +// Permissions specifies permissions for an accountSAS. +type Permissions struct { + Read bool + Write bool + Delete bool + List bool + Add bool + Create bool + Update bool + Process bool +} + +// GetAccountSASToken creates an account SAS token +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas +func (c Client) GetAccountSASToken(options AccountSASTokenOptions) (url.Values, error) { + if options.APIVersion == "" { + options.APIVersion = c.apiVersion + } + + if options.APIVersion < "2015-04-05" { + return url.Values{}, fmt.Errorf("account SAS does not support API versions prior to 2015-04-05. API version : %s", options.APIVersion) + } + + // build services string + services := "" + if options.Services.Blob { + services += "b" + } + if options.Services.Queue { + services += "q" + } + if options.Services.Table { + services += "t" + } + if options.Services.File { + services += "f" + } + + // build resources string + resources := "" + if options.ResourceTypes.Service { + resources += "s" + } + if options.ResourceTypes.Container { + resources += "c" + } + if options.ResourceTypes.Object { + resources += "o" + } + + // build permissions string + permissions := "" + if options.Permissions.Read { + permissions += "r" + } + if options.Permissions.Write { + permissions += "w" + } + if options.Permissions.Delete { + permissions += "d" + } + if options.Permissions.List { + permissions += "l" + } + if options.Permissions.Add { + permissions += "a" + } + if options.Permissions.Create { + permissions += "c" + } + if options.Permissions.Update { + permissions += "u" + } + if options.Permissions.Process { + permissions += "p" + } + + // build start time, if exists + start := "" + if options.Start != (time.Time{}) { + start = options.Start.Format(time.RFC3339) + // For some reason I don't understand, it fails when the rest of the string is included + start = start[:10] + } + + // build expiry time + expiry := options.Expiry.Format(time.RFC3339) + // For some reason I don't understand, it fails when the rest of the string is included + expiry = expiry[:10] + + protocol := "https,http" + if options.UseHTTPS { + protocol = "https" + } + + stringToSign := strings.Join([]string{ + c.accountName, + permissions, + services, + resources, + start, + expiry, + options.IP, + protocol, + options.APIVersion, + "", + }, "\n") + signature := c.computeHmac256(stringToSign) + + sasParams := url.Values{ + "sv": {options.APIVersion}, + "ss": {services}, + "srt": {resources}, + "sp": {permissions}, + "se": {expiry}, + "spr": {protocol}, + "sig": {signature}, + } + if start != "" { + sasParams.Add("st", start) + } + if options.IP != "" { + sasParams.Add("sip", options.IP) + } + + return sasParams, nil +} + +// GetBlobService returns a BlobStorageClient which can operate on the blob +// service of the storage account. +func (c Client) GetBlobService() BlobStorageClient { + b := BlobStorageClient{ + client: c, + } + b.client.AddToUserAgent(blobServiceName) + b.auth = sharedKey + if c.UseSharedKeyLite { + b.auth = sharedKeyLite + } + return b +} + +// GetQueueService returns a QueueServiceClient which can operate on the queue +// service of the storage account. +func (c Client) GetQueueService() QueueServiceClient { + q := QueueServiceClient{ + client: c, + } + q.client.AddToUserAgent(queueServiceName) + q.auth = sharedKey + if c.UseSharedKeyLite { + q.auth = sharedKeyLite + } + return q +} + +// GetTableService returns a TableServiceClient which can operate on the table +// service of the storage account. +func (c Client) GetTableService() TableServiceClient { + t := TableServiceClient{ + client: c, + } + t.client.AddToUserAgent(tableServiceName) + t.auth = sharedKeyForTable + if c.UseSharedKeyLite { + t.auth = sharedKeyLiteForTable + } + return t +} + +// GetFileService returns a FileServiceClient which can operate on the file +// service of the storage account. +func (c Client) GetFileService() FileServiceClient { + f := FileServiceClient{ + client: c, + } + f.client.AddToUserAgent(fileServiceName) + f.auth = sharedKey + if c.UseSharedKeyLite { + f.auth = sharedKeyLite + } + return f +} + +func (c Client) getStandardHeaders() map[string]string { + return map[string]string{ + userAgentHeader: c.userAgent, + "x-ms-version": c.apiVersion, + "x-ms-date": currentTimeRfc1123Formatted(), + } +} + +func (c Client) exec(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*storageResponse, error) { + headers, err := c.addAuthorizationHeader(verb, url, headers, auth) + if err != nil { + return nil, err + } + + req, err := http.NewRequest(verb, url, body) + if err != nil { + return nil, errors.New("azure/storage: error creating request: " + err.Error()) + } + + // if a body was provided ensure that the content length was set. + // http.NewRequest() will automatically do this for a handful of types + // and for those that it doesn't we will handle here. + if body != nil && req.ContentLength < 1 { + if lr, ok := body.(*io.LimitedReader); ok { + setContentLengthFromLimitedReader(req, lr) + } + } + + for k, v := range headers { + req.Header[k] = append(req.Header[k], v) // Must bypass case munging present in `Add` by using map functions directly. See https://github.com/Azure/azure-sdk-for-go/issues/645 + } + + resp, err := c.Sender.Send(&c, req) + if err != nil { + return nil, err + } + + if resp.StatusCode >= 400 && resp.StatusCode <= 505 { + var respBody []byte + respBody, err = readAndCloseBody(resp.Body) + if err != nil { + return nil, err + } + + requestID, date, version := getDebugHeaders(resp.Header) + if len(respBody) == 0 { + // no error in response body, might happen in HEAD requests + err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version) + } else { + storageErr := AzureStorageServiceError{ + StatusCode: resp.StatusCode, + RequestID: requestID, + Date: date, + APIVersion: version, + } + // response contains storage service error object, unmarshal + if resp.Header.Get("Content-Type") == "application/xml" { + errIn := serviceErrFromXML(respBody, &storageErr) + if err != nil { // error unmarshaling the error response + err = errIn + } + } else { + errIn := serviceErrFromJSON(respBody, &storageErr) + if err != nil { // error unmarshaling the error response + err = errIn + } + } + err = storageErr + } + return &storageResponse{ + statusCode: resp.StatusCode, + headers: resp.Header, + body: ioutil.NopCloser(bytes.NewReader(respBody)), /* restore the body */ + }, err + } + + return &storageResponse{ + statusCode: resp.StatusCode, + headers: resp.Header, + body: resp.Body}, nil +} + +func (c Client) execInternalJSONCommon(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, *http.Request, *http.Response, error) { + headers, err := c.addAuthorizationHeader(verb, url, headers, auth) + if err != nil { + return nil, nil, nil, err + } + + req, err := http.NewRequest(verb, url, body) + for k, v := range headers { + req.Header.Add(k, v) + } + + resp, err := c.Sender.Send(&c, req) + if err != nil { + return nil, nil, nil, err + } + + respToRet := &odataResponse{} + respToRet.body = resp.Body + respToRet.statusCode = resp.StatusCode + respToRet.headers = resp.Header + + statusCode := resp.StatusCode + if statusCode >= 400 && statusCode <= 505 { + var respBody []byte + respBody, err = readAndCloseBody(resp.Body) + if err != nil { + return nil, nil, nil, err + } + + requestID, date, version := getDebugHeaders(resp.Header) + if len(respBody) == 0 { + // no error in response body, might happen in HEAD requests + err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version) + return respToRet, req, resp, err + } + // try unmarshal as odata.error json + err = json.Unmarshal(respBody, &respToRet.odata) + } + + return respToRet, req, resp, err +} + +func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) { + respToRet, _, _, err := c.execInternalJSONCommon(verb, url, headers, body, auth) + return respToRet, err +} + +func (c Client) execBatchOperationJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) { + // execute common query, get back generated request, response etc... for more processing. + respToRet, req, resp, err := c.execInternalJSONCommon(verb, url, headers, body, auth) + if err != nil { + return nil, err + } + + // return the OData in the case of executing batch commands. + // In this case we need to read the outer batch boundary and contents. + // Then we read the changeset information within the batch + var respBody []byte + respBody, err = readAndCloseBody(resp.Body) + if err != nil { + return nil, err + } + + // outer multipart body + _, batchHeader, err := mime.ParseMediaType(resp.Header["Content-Type"][0]) + if err != nil { + return nil, err + } + + // batch details. + batchBoundary := batchHeader["boundary"] + batchPartBuf, changesetBoundary, err := genBatchReader(batchBoundary, respBody) + if err != nil { + return nil, err + } + + // changeset details. + err = genChangesetReader(req, respToRet, batchPartBuf, changesetBoundary) + if err != nil { + return nil, err + } + + return respToRet, nil +} + +func genChangesetReader(req *http.Request, respToRet *odataResponse, batchPartBuf io.Reader, changesetBoundary string) error { + changesetMultiReader := multipart.NewReader(batchPartBuf, changesetBoundary) + changesetPart, err := changesetMultiReader.NextPart() + if err != nil { + return err + } + + changesetPartBufioReader := bufio.NewReader(changesetPart) + changesetResp, err := http.ReadResponse(changesetPartBufioReader, req) + if err != nil { + return err + } + + if changesetResp.StatusCode != http.StatusNoContent { + changesetBody, err := readAndCloseBody(changesetResp.Body) + err = json.Unmarshal(changesetBody, &respToRet.odata) + if err != nil { + return err + } + respToRet.statusCode = changesetResp.StatusCode + } + + return nil +} + +func genBatchReader(batchBoundary string, respBody []byte) (io.Reader, string, error) { + respBodyString := string(respBody) + respBodyReader := strings.NewReader(respBodyString) + + // reading batchresponse + batchMultiReader := multipart.NewReader(respBodyReader, batchBoundary) + batchPart, err := batchMultiReader.NextPart() + if err != nil { + return nil, "", err + } + batchPartBufioReader := bufio.NewReader(batchPart) + + _, changesetHeader, err := mime.ParseMediaType(batchPart.Header.Get("Content-Type")) + if err != nil { + return nil, "", err + } + changesetBoundary := changesetHeader["boundary"] + return batchPartBufioReader, changesetBoundary, nil +} + +func readAndCloseBody(body io.ReadCloser) ([]byte, error) { + defer body.Close() + out, err := ioutil.ReadAll(body) + if err == io.EOF { + err = nil + } + return out, err +} + +func serviceErrFromXML(body []byte, storageErr *AzureStorageServiceError) error { + if err := xml.Unmarshal(body, storageErr); err != nil { + storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body)) + return err + } + return nil +} + +func serviceErrFromJSON(body []byte, storageErr *AzureStorageServiceError) error { + odataError := odataErrorWrapper{} + if err := json.Unmarshal(body, &odataError); err != nil { + storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body)) + return err + } + storageErr.Code = odataError.Err.Code + storageErr.Message = odataError.Err.Message.Value + storageErr.Lang = odataError.Err.Message.Lang + return nil +} + +func serviceErrFromStatusCode(code int, status string, requestID, date, version string) AzureStorageServiceError { + return AzureStorageServiceError{ + StatusCode: code, + Code: status, + RequestID: requestID, + Date: date, + APIVersion: version, + Message: "no response body was available for error status code", + } +} + +func (e AzureStorageServiceError) Error() string { + return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestInitiated=%s, RequestId=%s, API Version=%s, QueryParameterName=%s, QueryParameterValue=%s", + e.StatusCode, e.Code, e.Message, e.Date, e.RequestID, e.APIVersion, e.QueryParameterName, e.QueryParameterValue) +} + +// checkRespCode returns UnexpectedStatusError if the given response code is not +// one of the allowed status codes; otherwise nil. +func checkRespCode(respCode int, allowed []int) error { + for _, v := range allowed { + if respCode == v { + return nil + } + } + return UnexpectedStatusCodeError{allowed, respCode} +} + +func (c Client) addMetadataToHeaders(h map[string]string, metadata map[string]string) map[string]string { + metadata = c.protectUserAgent(metadata) + for k, v := range metadata { + h[userDefinedMetadataHeaderPrefix+k] = v + } + return h +} + +func getDebugHeaders(h http.Header) (requestID, date, version string) { + requestID = h.Get("x-ms-request-id") + version = h.Get("x-ms-version") + date = h.Get("Date") + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go new file mode 100644 index 00000000000..e898e9bfaf9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go @@ -0,0 +1,38 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "net/url" + "time" +) + +// SASOptions includes options used by SAS URIs for different +// services and resources. +type SASOptions struct { + APIVersion string + Start time.Time + Expiry time.Time + IP string + UseHTTPS bool + Identifier string +} + +func addQueryParameter(query url.Values, key, value string) url.Values { + if value != "" { + query.Add(key, value) + } + return query +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go new file mode 100644 index 00000000000..8963c7a89b3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go @@ -0,0 +1,545 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// Container represents an Azure container. +type Container struct { + bsc *BlobStorageClient + Name string `xml:"Name"` + Properties ContainerProperties `xml:"Properties"` + Metadata map[string]string + sasuri url.URL +} + +// Client returns the HTTP client used by the Container reference. +func (c *Container) Client() *Client { + return &c.bsc.client +} + +func (c *Container) buildPath() string { + return fmt.Sprintf("/%s", c.Name) +} + +// GetURL gets the canonical URL to the container. +// This method does not create a publicly accessible URL if the container +// is private and this method does not check if the blob exists. +func (c *Container) GetURL() string { + container := c.Name + if container == "" { + container = "$root" + } + return c.bsc.client.getEndpoint(blobServiceName, pathForResource(container, ""), nil) +} + +// ContainerSASOptions are options to construct a container SAS +// URI. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +type ContainerSASOptions struct { + ContainerSASPermissions + OverrideHeaders + SASOptions +} + +// ContainerSASPermissions includes the available permissions for +// a container SAS URI. +type ContainerSASPermissions struct { + BlobServiceSASPermissions + List bool +} + +// GetSASURI creates an URL to the container which contains the Shared +// Access Signature with the specified options. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +func (c *Container) GetSASURI(options ContainerSASOptions) (string, error) { + uri := c.GetURL() + signedResource := "c" + canonicalizedResource, err := c.bsc.client.buildCanonicalizedResource(uri, c.bsc.auth, true) + if err != nil { + return "", err + } + + // build permissions string + permissions := options.BlobServiceSASPermissions.buildString() + if options.List { + permissions += "l" + } + + return c.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders) +} + +// ContainerProperties contains various properties of a container returned from +// various endpoints like ListContainers. +type ContainerProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` + LeaseStatus string `xml:"LeaseStatus"` + LeaseState string `xml:"LeaseState"` + LeaseDuration string `xml:"LeaseDuration"` +} + +// ContainerListResponse contains the response fields from +// ListContainers call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +type ContainerListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Containers []Container `xml:"Containers>Container"` +} + +// BlobListResponse contains the response fields from ListBlobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +type BlobListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Blobs []Blob `xml:"Blobs>Blob"` + + // BlobPrefix is used to traverse blobs as if it were a file system. + // It is returned if ListBlobsParameters.Delimiter is specified. + // The list here can be thought of as "folders" that may contain + // other folders or blobs. + BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"` + + // Delimiter is used to traverse blobs as if it were a file system. + // It is returned if ListBlobsParameters.Delimiter is specified. + Delimiter string `xml:"Delimiter"` +} + +// IncludeBlobDataset has options to include in a list blobs operation +type IncludeBlobDataset struct { + Snapshots bool + Metadata bool + UncommittedBlobs bool + Copy bool +} + +// ListBlobsParameters defines the set of customizable +// parameters to make a List Blobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +type ListBlobsParameters struct { + Prefix string + Delimiter string + Marker string + Include *IncludeBlobDataset + MaxResults uint + Timeout uint + RequestID string +} + +func (p ListBlobsParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Delimiter != "" { + out.Set("delimiter", p.Delimiter) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != nil { + include := []string{} + include = addString(include, p.Include.Snapshots, "snapshots") + include = addString(include, p.Include.Metadata, "metadata") + include = addString(include, p.Include.UncommittedBlobs, "uncommittedblobs") + include = addString(include, p.Include.Copy, "copy") + fullInclude := strings.Join(include, ",") + out.Set("include", fullInclude) + } + if p.MaxResults != 0 { + out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) + } + if p.Timeout != 0 { + out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) + } + + return out +} + +func addString(datasets []string, include bool, text string) []string { + if include { + datasets = append(datasets, text) + } + return datasets +} + +// ContainerAccessType defines the access level to the container from a public +// request. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms- +// blob-public-access" header. +type ContainerAccessType string + +// Access options for containers +const ( + ContainerAccessTypePrivate ContainerAccessType = "" + ContainerAccessTypeBlob ContainerAccessType = "blob" + ContainerAccessTypeContainer ContainerAccessType = "container" +) + +// ContainerAccessPolicy represents each access policy in the container ACL. +type ContainerAccessPolicy struct { + ID string + StartTime time.Time + ExpiryTime time.Time + CanRead bool + CanWrite bool + CanDelete bool +} + +// ContainerPermissions represents the container ACLs. +type ContainerPermissions struct { + AccessType ContainerAccessType + AccessPolicies []ContainerAccessPolicy +} + +// ContainerAccessHeader references header used when setting/getting container ACL +const ( + ContainerAccessHeader string = "x-ms-blob-public-access" +) + +// GetBlobReference returns a Blob object for the specified blob name. +func (c *Container) GetBlobReference(name string) *Blob { + return &Blob{ + Container: c, + Name: name, + } +} + +// CreateContainerOptions includes the options for a create container operation +type CreateContainerOptions struct { + Timeout uint + Access ContainerAccessType `header:"x-ms-blob-public-access"` + RequestID string `header:"x-ms-client-request-id"` +} + +// Create creates a blob container within the storage account +// with given name and access level. Returns error if container already exists. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Container +func (c *Container) Create(options *CreateContainerOptions) error { + resp, err := c.create(options) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// CreateIfNotExists creates a blob container if it does not exist. Returns +// true if container is newly created or false if container already exists. +func (c *Container) CreateIfNotExists(options *CreateContainerOptions) (bool, error) { + resp, err := c.create(options) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { + return resp.statusCode == http.StatusCreated, nil + } + } + return false, err +} + +func (c *Container) create(options *CreateContainerOptions) (*storageResponse, error) { + query := url.Values{"restype": {"container"}} + headers := c.bsc.client.getStandardHeaders() + headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata) + + if options != nil { + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query) + + return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth) +} + +// Exists returns true if a container with given name exists +// on the storage account, otherwise returns false. +func (c *Container) Exists() (bool, error) { + q := url.Values{"restype": {"container"}} + var uri string + if c.bsc.client.isServiceSASClient() { + q = mergeParams(q, c.sasuri.Query()) + newURI := c.sasuri + newURI.RawQuery = q.Encode() + uri = newURI.String() + + } else { + if c.bsc.client.isAccountSASClient() { + q = mergeParams(q, c.bsc.client.accountSASToken) + } + uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) + } + headers := c.bsc.client.getStandardHeaders() + + resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusOK, nil + } + } + return false, err +} + +// SetContainerPermissionOptions includes options for a set container permissions operation +type SetContainerPermissionOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + RequestID string `header:"x-ms-client-request-id"` +} + +// SetPermissions sets up container permissions +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Container-ACL +func (c *Container) SetPermissions(permissions ContainerPermissions, options *SetContainerPermissionOptions) error { + body, length, err := generateContainerACLpayload(permissions.AccessPolicies) + if err != nil { + return err + } + params := url.Values{ + "restype": {"container"}, + "comp": {"acl"}, + } + headers := c.bsc.client.getStandardHeaders() + headers = addToHeaders(headers, ContainerAccessHeader, string(permissions.AccessType)) + headers["Content-Length"] = strconv.Itoa(length) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) + + resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return errors.New("Unable to set permissions") + } + + return nil +} + +// GetContainerPermissionOptions includes options for a get container permissions operation +type GetContainerPermissionOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + RequestID string `header:"x-ms-client-request-id"` +} + +// GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx +// If timeout is 0 then it will not be passed to Azure +// leaseID will only be passed to Azure if populated +func (c *Container) GetPermissions(options *GetContainerPermissionOptions) (*ContainerPermissions, error) { + params := url.Values{ + "restype": {"container"}, + "comp": {"acl"}, + } + headers := c.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) + + resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) + if err != nil { + return nil, err + } + defer resp.body.Close() + + var ap AccessPolicy + err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList) + if err != nil { + return nil, err + } + return buildAccessPolicy(ap, &resp.headers), nil +} + +func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissions { + // containerAccess. Blob, Container, empty + containerAccess := headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader)) + permissions := ContainerPermissions{ + AccessType: ContainerAccessType(containerAccess), + AccessPolicies: []ContainerAccessPolicy{}, + } + + for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { + capd := ContainerAccessPolicy{ + ID: policy.ID, + StartTime: policy.AccessPolicy.StartTime, + ExpiryTime: policy.AccessPolicy.ExpiryTime, + } + capd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") + capd.CanWrite = updatePermissions(policy.AccessPolicy.Permission, "w") + capd.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d") + + permissions.AccessPolicies = append(permissions.AccessPolicies, capd) + } + return &permissions +} + +// DeleteContainerOptions includes options for a delete container operation +type DeleteContainerOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + RequestID string `header:"x-ms-client-request-id"` +} + +// Delete deletes the container with given name on the storage +// account. If the container does not exist returns error. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container +func (c *Container) Delete(options *DeleteContainerOptions) error { + resp, err := c.delete(options) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} + +// DeleteIfExists deletes the container with given name on the storage +// account if it exists. Returns true if container is deleted with this call, or +// false if the container did not exist at the time of the Delete Container +// operation. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container +func (c *Container) DeleteIfExists(options *DeleteContainerOptions) (bool, error) { + resp, err := c.delete(options) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +func (c *Container) delete(options *DeleteContainerOptions) (*storageResponse, error) { + query := url.Values{"restype": {"container"}} + headers := c.bsc.client.getStandardHeaders() + + if options != nil { + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query) + + return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth) +} + +// ListBlobs returns an object that contains list of blobs in the container, +// pagination token and other information in the response of List Blobs call. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Blobs +func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{ + "restype": {"container"}, + "comp": {"list"}, + }) + var uri string + if c.bsc.client.isServiceSASClient() { + q = mergeParams(q, c.sasuri.Query()) + newURI := c.sasuri + newURI.RawQuery = q.Encode() + uri = newURI.String() + } else { + if c.bsc.client.isAccountSASClient() { + q = mergeParams(q, c.bsc.client.accountSASToken) + } + uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) + } + + headers := c.bsc.client.getStandardHeaders() + headers = addToHeaders(headers, "x-ms-client-request-id", params.RequestID) + + var out BlobListResponse + resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) + if err != nil { + return out, err + } + defer resp.body.Close() + + err = xmlUnmarshal(resp.body, &out) + for i := range out.Blobs { + out.Blobs[i].Container = c + } + return out, err +} + +func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) { + sil := SignedIdentifiers{ + SignedIdentifiers: []SignedIdentifier{}, + } + for _, capd := range policies { + permission := capd.generateContainerPermissions() + signedIdentifier := convertAccessPolicyToXMLStructs(capd.ID, capd.StartTime, capd.ExpiryTime, permission) + sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) + } + return xmlMarshal(sil) +} + +func (capd *ContainerAccessPolicy) generateContainerPermissions() (permissions string) { + // generate the permissions string (rwd). + // still want the end user API to have bool flags. + permissions = "" + + if capd.CanRead { + permissions += "r" + } + + if capd.CanWrite { + permissions += "w" + } + + if capd.CanDelete { + permissions += "d" + } + + return permissions +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go new file mode 100644 index 00000000000..a4cc2527b67 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go @@ -0,0 +1,237 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" +) + +const ( + blobCopyStatusPending = "pending" + blobCopyStatusSuccess = "success" + blobCopyStatusAborted = "aborted" + blobCopyStatusFailed = "failed" +) + +// CopyOptions includes the options for a copy blob operation +type CopyOptions struct { + Timeout uint + Source CopyOptionsConditions + Destiny CopyOptionsConditions + RequestID string +} + +// IncrementalCopyOptions includes the options for an incremental copy blob operation +type IncrementalCopyOptions struct { + Timeout uint + Destination IncrementalCopyOptionsConditions + RequestID string +} + +// CopyOptionsConditions includes some conditional options in a copy blob operation +type CopyOptionsConditions struct { + LeaseID string + IfModifiedSince *time.Time + IfUnmodifiedSince *time.Time + IfMatch string + IfNoneMatch string +} + +// IncrementalCopyOptionsConditions includes some conditional options in a copy blob operation +type IncrementalCopyOptionsConditions struct { + IfModifiedSince *time.Time + IfUnmodifiedSince *time.Time + IfMatch string + IfNoneMatch string +} + +// Copy starts a blob copy operation and waits for the operation to +// complete. sourceBlob parameter must be a canonical URL to the blob (can be +// obtained using the GetURL method.) There is no SLA on blob copy and therefore +// this helper method works faster on smaller files. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob +func (b *Blob) Copy(sourceBlob string, options *CopyOptions) error { + copyID, err := b.StartCopy(sourceBlob, options) + if err != nil { + return err + } + + return b.WaitForCopy(copyID) +} + +// StartCopy starts a blob copy operation. +// sourceBlob parameter must be a canonical URL to the blob (can be +// obtained using the GetURL method.) +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob +func (b *Blob) StartCopy(sourceBlob string, options *CopyOptions) (string, error) { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-copy-source"] = sourceBlob + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) + // source + headers = addToHeaders(headers, "x-ms-source-lease-id", options.Source.LeaseID) + headers = addTimeToHeaders(headers, "x-ms-source-if-modified-since", options.Source.IfModifiedSince) + headers = addTimeToHeaders(headers, "x-ms-source-if-unmodified-since", options.Source.IfUnmodifiedSince) + headers = addToHeaders(headers, "x-ms-source-if-match", options.Source.IfMatch) + headers = addToHeaders(headers, "x-ms-source-if-none-match", options.Source.IfNoneMatch) + //destiny + headers = addToHeaders(headers, "x-ms-lease-id", options.Destiny.LeaseID) + headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destiny.IfModifiedSince) + headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destiny.IfUnmodifiedSince) + headers = addToHeaders(headers, "x-ms-if-match", options.Destiny.IfMatch) + headers = addToHeaders(headers, "x-ms-if-none-match", options.Destiny.IfNoneMatch) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return "", err + } + defer readAndCloseBody(resp.body) + + if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil { + return "", err + } + + copyID := resp.headers.Get("x-ms-copy-id") + if copyID == "" { + return "", errors.New("Got empty copy id header") + } + return copyID, nil +} + +// AbortCopyOptions includes the options for an abort blob operation +type AbortCopyOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + RequestID string `header:"x-ms-client-request-id"` +} + +// AbortCopy aborts a BlobCopy which has already been triggered by the StartBlobCopy function. +// copyID is generated from StartBlobCopy function. +// currentLeaseID is required IF the destination blob has an active lease on it. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Abort-Copy-Blob +func (b *Blob) AbortCopy(copyID string, options *AbortCopyOptions) error { + params := url.Values{ + "comp": {"copy"}, + "copyid": {copyID}, + } + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-copy-action"] = "abort" + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// WaitForCopy loops until a BlobCopy operation is completed (or fails with error) +func (b *Blob) WaitForCopy(copyID string) error { + for { + err := b.GetProperties(nil) + if err != nil { + return err + } + + if b.Properties.CopyID != copyID { + return errBlobCopyIDMismatch + } + + switch b.Properties.CopyStatus { + case blobCopyStatusSuccess: + return nil + case blobCopyStatusPending: + continue + case blobCopyStatusAborted: + return errBlobCopyAborted + case blobCopyStatusFailed: + return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", b.Properties.CopyID, b.Properties.CopyStatusDescription) + default: + return fmt.Errorf("storage: unhandled blob copy status: '%s'", b.Properties.CopyStatus) + } + } +} + +// IncrementalCopyBlob copies a snapshot of a source blob and copies to referring blob +// sourceBlob parameter must be a valid snapshot URL of the original blob. +// THe original blob mut be public, or use a Shared Access Signature. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/incremental-copy-blob . +func (b *Blob) IncrementalCopyBlob(sourceBlobURL string, snapshotTime time.Time, options *IncrementalCopyOptions) (string, error) { + params := url.Values{"comp": {"incrementalcopy"}} + + // need formatting to 7 decimal places so it's friendly to Windows and *nix + snapshotTimeFormatted := snapshotTime.Format("2006-01-02T15:04:05.0000000Z") + u, err := url.Parse(sourceBlobURL) + if err != nil { + return "", err + } + query := u.Query() + query.Add("snapshot", snapshotTimeFormatted) + encodedQuery := query.Encode() + encodedQuery = strings.Replace(encodedQuery, "%3A", ":", -1) + u.RawQuery = encodedQuery + snapshotURL := u.String() + + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-copy-source"] = snapshotURL + + if options != nil { + addTimeout(params, options.Timeout) + headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) + headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destination.IfModifiedSince) + headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destination.IfUnmodifiedSince) + headers = addToHeaders(headers, "x-ms-if-match", options.Destination.IfMatch) + headers = addToHeaders(headers, "x-ms-if-none-match", options.Destination.IfNoneMatch) + } + + // get URI of destination blob + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return "", err + } + defer readAndCloseBody(resp.body) + + if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted}); err != nil { + return "", err + } + + copyID := resp.headers.Get("x-ms-copy-id") + if copyID == "" { + return "", errors.New("Got empty copy id header") + } + return copyID, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go new file mode 100644 index 00000000000..189e0380244 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go @@ -0,0 +1,238 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/xml" + "net/http" + "net/url" + "sync" +) + +// Directory represents a directory on a share. +type Directory struct { + fsc *FileServiceClient + Metadata map[string]string + Name string `xml:"Name"` + parent *Directory + Properties DirectoryProperties + share *Share +} + +// DirectoryProperties contains various properties of a directory. +type DirectoryProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` +} + +// ListDirsAndFilesParameters defines the set of customizable parameters to +// make a List Files and Directories call. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files +type ListDirsAndFilesParameters struct { + Prefix string + Marker string + MaxResults uint + Timeout uint +} + +// DirsAndFilesListResponse contains the response fields from +// a List Files and Directories call. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files +type DirsAndFilesListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Marker string `xml:"Marker"` + MaxResults int64 `xml:"MaxResults"` + Directories []Directory `xml:"Entries>Directory"` + Files []File `xml:"Entries>File"` + NextMarker string `xml:"NextMarker"` +} + +// builds the complete directory path for this directory object. +func (d *Directory) buildPath() string { + path := "" + current := d + for current.Name != "" { + path = "/" + current.Name + path + current = current.parent + } + return d.share.buildPath() + path +} + +// Create this directory in the associated share. +// If a directory with the same name already exists, the operation fails. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory +func (d *Directory) Create(options *FileRequestOptions) error { + // if this is the root directory exit early + if d.parent == nil { + return nil + } + + params := prepareOptions(options) + headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, params, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated}) + if err != nil { + return err + } + + d.updateEtagAndLastModified(headers) + return nil +} + +// CreateIfNotExists creates this directory under the associated share if the +// directory does not exists. Returns true if the directory is newly created or +// false if the directory already exists. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory +func (d *Directory) CreateIfNotExists(options *FileRequestOptions) (bool, error) { + // if this is the root directory exit early + if d.parent == nil { + return false, nil + } + + params := prepareOptions(options) + resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, params, nil) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { + if resp.statusCode == http.StatusCreated { + d.updateEtagAndLastModified(resp.headers) + return true, nil + } + + return false, d.FetchAttributes(nil) + } + } + + return false, err +} + +// Delete removes this directory. It must be empty in order to be deleted. +// If the directory does not exist the operation fails. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory +func (d *Directory) Delete(options *FileRequestOptions) error { + return d.fsc.deleteResource(d.buildPath(), resourceDirectory, options) +} + +// DeleteIfExists removes this directory if it exists. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory +func (d *Directory) DeleteIfExists(options *FileRequestOptions) (bool, error) { + resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory, options) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +// Exists returns true if this directory exists. +func (d *Directory) Exists() (bool, error) { + exists, headers, err := d.fsc.resourceExists(d.buildPath(), resourceDirectory) + if exists { + d.updateEtagAndLastModified(headers) + } + return exists, err +} + +// FetchAttributes retrieves metadata for this directory. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-directory-properties +func (d *Directory) FetchAttributes(options *FileRequestOptions) error { + params := prepareOptions(options) + headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, params, http.MethodHead) + if err != nil { + return err + } + + d.updateEtagAndLastModified(headers) + d.Metadata = getMetadataFromHeaders(headers) + + return nil +} + +// GetDirectoryReference returns a child Directory object for this directory. +func (d *Directory) GetDirectoryReference(name string) *Directory { + return &Directory{ + fsc: d.fsc, + Name: name, + parent: d, + share: d.share, + } +} + +// GetFileReference returns a child File object for this directory. +func (d *Directory) GetFileReference(name string) *File { + return &File{ + fsc: d.fsc, + Name: name, + parent: d, + share: d.share, + mutex: &sync.Mutex{}, + } +} + +// ListDirsAndFiles returns a list of files and directories under this directory. +// It also contains a pagination token and other response details. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files +func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) { + q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory)) + + resp, err := d.fsc.listContent(d.buildPath(), q, nil) + if err != nil { + return nil, err + } + + defer resp.body.Close() + var out DirsAndFilesListResponse + err = xmlUnmarshal(resp.body, &out) + return &out, err +} + +// SetMetadata replaces the metadata for this directory. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetDirectoryMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Directory-Metadata +func (d *Directory) SetMetadata(options *FileRequestOptions) error { + headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil), options) + if err != nil { + return err + } + + d.updateEtagAndLastModified(headers) + return nil +} + +// updates Etag and last modified date +func (d *Directory) updateEtagAndLastModified(headers http.Header) { + d.Properties.Etag = headers.Get("Etag") + d.Properties.LastModified = headers.Get("Last-Modified") +} + +// URL gets the canonical URL to this directory. +// This method does not create a publicly accessible URL if the directory +// is private and this method does not check if the directory exists. +func (d *Directory) URL() string { + return d.fsc.client.getEndpoint(fileServiceName, d.buildPath(), url.Values{}) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go new file mode 100644 index 00000000000..9668ea66949 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go @@ -0,0 +1,453 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/satori/uuid" +) + +// Annotating as secure for gas scanning +/* #nosec */ +const ( + partitionKeyNode = "PartitionKey" + rowKeyNode = "RowKey" + etagErrorTemplate = "Etag didn't match: %v" +) + +var ( + errEmptyPayload = errors.New("Empty payload is not a valid metadata level for this operation") + errNilPreviousResult = errors.New("The previous results page is nil") + errNilNextLink = errors.New("There are no more pages in this query results") +) + +// Entity represents an entity inside an Azure table. +type Entity struct { + Table *Table + PartitionKey string + RowKey string + TimeStamp time.Time + OdataMetadata string + OdataType string + OdataID string + OdataEtag string + OdataEditLink string + Properties map[string]interface{} +} + +// GetEntityReference returns an Entity object with the specified +// partition key and row key. +func (t *Table) GetEntityReference(partitionKey, rowKey string) *Entity { + return &Entity{ + PartitionKey: partitionKey, + RowKey: rowKey, + Table: t, + } +} + +// EntityOptions includes options for entity operations. +type EntityOptions struct { + Timeout uint + RequestID string `header:"x-ms-client-request-id"` +} + +// GetEntityOptions includes options for a get entity operation +type GetEntityOptions struct { + Select []string + RequestID string `header:"x-ms-client-request-id"` +} + +// Get gets the referenced entity. Which properties to get can be +// specified using the select option. +// See: +// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities +// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities +func (e *Entity) Get(timeout uint, ml MetadataLevel, options *GetEntityOptions) error { + if ml == EmptyPayload { + return errEmptyPayload + } + // RowKey and PartitionKey could be lost if not included in the query + // As those are the entity identifiers, it is best if they are not lost + rk := e.RowKey + pk := e.PartitionKey + + query := url.Values{ + "timeout": {strconv.FormatUint(uint64(timeout), 10)}, + } + headers := e.Table.tsc.client.getStandardHeaders() + headers[headerAccept] = string(ml) + + if options != nil { + if len(options.Select) > 0 { + query.Add("$select", strings.Join(options.Select, ",")) + } + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) + resp, err := e.Table.tsc.client.exec(http.MethodGet, uri, headers, nil, e.Table.tsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return err + } + + respBody, err := ioutil.ReadAll(resp.body) + if err != nil { + return err + } + err = json.Unmarshal(respBody, e) + if err != nil { + return err + } + e.PartitionKey = pk + e.RowKey = rk + + return nil +} + +// Insert inserts the referenced entity in its table. +// The function fails if there is an entity with the same +// PartitionKey and RowKey in the table. +// ml determines the level of detail of metadata in the operation response, +// or no data at all. +// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-entity +func (e *Entity) Insert(ml MetadataLevel, options *EntityOptions) error { + query, headers := options.getParameters() + headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) + + body, err := json.Marshal(e) + if err != nil { + return err + } + headers = addBodyRelatedHeaders(headers, len(body)) + headers = addReturnContentHeaders(headers, ml) + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.Table.buildPath(), query) + resp, err := e.Table.tsc.client.exec(http.MethodPost, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) + if err != nil { + return err + } + defer resp.body.Close() + + data, err := ioutil.ReadAll(resp.body) + if err != nil { + return err + } + + if ml != EmptyPayload { + if err = checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { + return err + } + if err = e.UnmarshalJSON(data); err != nil { + return err + } + } else { + if err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return err + } + } + + return nil +} + +// Update updates the contents of an entity. The function fails if there is no entity +// with the same PartitionKey and RowKey in the table or if the ETag is different +// than the one in Azure. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/update-entity2 +func (e *Entity) Update(force bool, options *EntityOptions) error { + return e.updateMerge(force, http.MethodPut, options) +} + +// Merge merges the contents of entity specified with PartitionKey and RowKey +// with the content specified in Properties. +// The function fails if there is no entity with the same PartitionKey and +// RowKey in the table or if the ETag is different than the one in Azure. +// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/merge-entity +func (e *Entity) Merge(force bool, options *EntityOptions) error { + return e.updateMerge(force, "MERGE", options) +} + +// Delete deletes the entity. +// The function fails if there is no entity with the same PartitionKey and +// RowKey in the table or if the ETag is different than the one in Azure. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-entity1 +func (e *Entity) Delete(force bool, options *EntityOptions) error { + query, headers := options.getParameters() + headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) + + headers = addIfMatchHeader(headers, force, e.OdataEtag) + headers = addReturnContentHeaders(headers, EmptyPayload) + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) + resp, err := e.Table.tsc.client.exec(http.MethodDelete, uri, headers, nil, e.Table.tsc.auth) + if err != nil { + if resp.statusCode == http.StatusPreconditionFailed { + return fmt.Errorf(etagErrorTemplate, err) + } + return err + } + defer readAndCloseBody(resp.body) + + if err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return err + } + + return e.updateTimestamp(resp.headers) +} + +// InsertOrReplace inserts an entity or replaces the existing one. +// Read more: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-or-replace-entity +func (e *Entity) InsertOrReplace(options *EntityOptions) error { + return e.insertOr(http.MethodPut, options) +} + +// InsertOrMerge inserts an entity or merges the existing one. +// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/insert-or-merge-entity +func (e *Entity) InsertOrMerge(options *EntityOptions) error { + return e.insertOr("MERGE", options) +} + +func (e *Entity) buildPath() string { + return fmt.Sprintf("%s(PartitionKey='%s', RowKey='%s')", e.Table.buildPath(), e.PartitionKey, e.RowKey) +} + +// MarshalJSON is a custom marshaller for entity +func (e *Entity) MarshalJSON() ([]byte, error) { + completeMap := map[string]interface{}{} + completeMap[partitionKeyNode] = e.PartitionKey + completeMap[rowKeyNode] = e.RowKey + for k, v := range e.Properties { + typeKey := strings.Join([]string{k, OdataTypeSuffix}, "") + switch t := v.(type) { + case []byte: + completeMap[typeKey] = OdataBinary + completeMap[k] = string(t) + case time.Time: + completeMap[typeKey] = OdataDateTime + completeMap[k] = t.Format(time.RFC3339Nano) + case uuid.UUID: + completeMap[typeKey] = OdataGUID + completeMap[k] = t.String() + case int64: + completeMap[typeKey] = OdataInt64 + completeMap[k] = fmt.Sprintf("%v", v) + default: + completeMap[k] = v + } + if strings.HasSuffix(k, OdataTypeSuffix) { + if !(completeMap[k] == OdataBinary || + completeMap[k] == OdataDateTime || + completeMap[k] == OdataGUID || + completeMap[k] == OdataInt64) { + return nil, fmt.Errorf("Odata.type annotation %v value is not valid", k) + } + valueKey := strings.TrimSuffix(k, OdataTypeSuffix) + if _, ok := completeMap[valueKey]; !ok { + return nil, fmt.Errorf("Odata.type annotation %v defined without value defined", k) + } + } + } + return json.Marshal(completeMap) +} + +// UnmarshalJSON is a custom unmarshaller for entities +func (e *Entity) UnmarshalJSON(data []byte) error { + errorTemplate := "Deserializing error: %v" + + props := map[string]interface{}{} + err := json.Unmarshal(data, &props) + if err != nil { + return err + } + + // deselialize metadata + e.OdataMetadata = stringFromMap(props, "odata.metadata") + e.OdataType = stringFromMap(props, "odata.type") + e.OdataID = stringFromMap(props, "odata.id") + e.OdataEtag = stringFromMap(props, "odata.etag") + e.OdataEditLink = stringFromMap(props, "odata.editLink") + e.PartitionKey = stringFromMap(props, partitionKeyNode) + e.RowKey = stringFromMap(props, rowKeyNode) + + // deserialize timestamp + timeStamp, ok := props["Timestamp"] + if ok { + str, ok := timeStamp.(string) + if !ok { + return fmt.Errorf(errorTemplate, "Timestamp casting error") + } + t, err := time.Parse(time.RFC3339Nano, str) + if err != nil { + return fmt.Errorf(errorTemplate, err) + } + e.TimeStamp = t + } + delete(props, "Timestamp") + delete(props, "Timestamp@odata.type") + + // deserialize entity (user defined fields) + for k, v := range props { + if strings.HasSuffix(k, OdataTypeSuffix) { + valueKey := strings.TrimSuffix(k, OdataTypeSuffix) + str, ok := props[valueKey].(string) + if !ok { + return fmt.Errorf(errorTemplate, fmt.Sprintf("%v casting error", v)) + } + switch v { + case OdataBinary: + props[valueKey] = []byte(str) + case OdataDateTime: + t, err := time.Parse("2006-01-02T15:04:05Z", str) + if err != nil { + return fmt.Errorf(errorTemplate, err) + } + props[valueKey] = t + case OdataGUID: + props[valueKey] = uuid.FromStringOrNil(str) + case OdataInt64: + i, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return fmt.Errorf(errorTemplate, err) + } + props[valueKey] = i + default: + return fmt.Errorf(errorTemplate, fmt.Sprintf("%v is not supported", v)) + } + delete(props, k) + } + } + + e.Properties = props + return nil +} + +func getAndDelete(props map[string]interface{}, key string) interface{} { + if value, ok := props[key]; ok { + delete(props, key) + return value + } + return nil +} + +func addIfMatchHeader(h map[string]string, force bool, etag string) map[string]string { + if force { + h[headerIfMatch] = "*" + } else { + h[headerIfMatch] = etag + } + return h +} + +// updates Etag and timestamp +func (e *Entity) updateEtagAndTimestamp(headers http.Header) error { + e.OdataEtag = headers.Get(headerEtag) + return e.updateTimestamp(headers) +} + +func (e *Entity) updateTimestamp(headers http.Header) error { + str := headers.Get(headerDate) + t, err := time.Parse(time.RFC1123, str) + if err != nil { + return fmt.Errorf("Update timestamp error: %v", err) + } + e.TimeStamp = t + return nil +} + +func (e *Entity) insertOr(verb string, options *EntityOptions) error { + query, headers := options.getParameters() + headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) + + body, err := json.Marshal(e) + if err != nil { + return err + } + headers = addBodyRelatedHeaders(headers, len(body)) + headers = addReturnContentHeaders(headers, EmptyPayload) + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) + resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + if err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return err + } + + return e.updateEtagAndTimestamp(resp.headers) +} + +func (e *Entity) updateMerge(force bool, verb string, options *EntityOptions) error { + query, headers := options.getParameters() + headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) + + body, err := json.Marshal(e) + if err != nil { + return err + } + headers = addBodyRelatedHeaders(headers, len(body)) + headers = addIfMatchHeader(headers, force, e.OdataEtag) + headers = addReturnContentHeaders(headers, EmptyPayload) + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) + resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) + if err != nil { + if resp.statusCode == http.StatusPreconditionFailed { + return fmt.Errorf(etagErrorTemplate, err) + } + return err + } + defer readAndCloseBody(resp.body) + + if err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return err + } + + return e.updateEtagAndTimestamp(resp.headers) +} + +func stringFromMap(props map[string]interface{}, key string) string { + value := getAndDelete(props, key) + if value != nil { + return value.(string) + } + return "" +} + +func (options *EntityOptions) getParameters() (url.Values, map[string]string) { + query := url.Values{} + headers := map[string]string{} + if options != nil { + query = addTimeout(query, options.Timeout) + headers = headersFromStruct(*options) + } + return query, headers +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go new file mode 100644 index 00000000000..5fb516c55fd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go @@ -0,0 +1,476 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "sync" +) + +const fourMB = uint64(4194304) +const oneTB = uint64(1099511627776) + +// File represents a file on a share. +type File struct { + fsc *FileServiceClient + Metadata map[string]string + Name string `xml:"Name"` + parent *Directory + Properties FileProperties `xml:"Properties"` + share *Share + FileCopyProperties FileCopyState + mutex *sync.Mutex +} + +// FileProperties contains various properties of a file. +type FileProperties struct { + CacheControl string `header:"x-ms-cache-control"` + Disposition string `header:"x-ms-content-disposition"` + Encoding string `header:"x-ms-content-encoding"` + Etag string + Language string `header:"x-ms-content-language"` + LastModified string + Length uint64 `xml:"Content-Length" header:"x-ms-content-length"` + MD5 string `header:"x-ms-content-md5"` + Type string `header:"x-ms-content-type"` +} + +// FileCopyState contains various properties of a file copy operation. +type FileCopyState struct { + CompletionTime string + ID string `header:"x-ms-copy-id"` + Progress string + Source string + Status string `header:"x-ms-copy-status"` + StatusDesc string +} + +// FileStream contains file data returned from a call to GetFile. +type FileStream struct { + Body io.ReadCloser + ContentMD5 string +} + +// FileRequestOptions will be passed to misc file operations. +// Currently just Timeout (in seconds) but could expand. +type FileRequestOptions struct { + Timeout uint // timeout duration in seconds. +} + +func prepareOptions(options *FileRequestOptions) url.Values { + params := url.Values{} + if options != nil { + params = addTimeout(params, options.Timeout) + } + return params +} + +// FileRanges contains a list of file range information for a file. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges +type FileRanges struct { + ContentLength uint64 + LastModified string + ETag string + FileRanges []FileRange `xml:"Range"` +} + +// FileRange contains range information for a file. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges +type FileRange struct { + Start uint64 `xml:"Start"` + End uint64 `xml:"End"` +} + +func (fr FileRange) String() string { + return fmt.Sprintf("bytes=%d-%d", fr.Start, fr.End) +} + +// builds the complete file path for this file object +func (f *File) buildPath() string { + return f.parent.buildPath() + "/" + f.Name +} + +// ClearRange releases the specified range of space in a file. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range +func (f *File) ClearRange(fileRange FileRange, options *FileRequestOptions) error { + var timeout *uint + if options != nil { + timeout = &options.Timeout + } + headers, err := f.modifyRange(nil, fileRange, timeout, nil) + if err != nil { + return err + } + + f.updateEtagAndLastModified(headers) + return nil +} + +// Create creates a new file or replaces an existing one. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-File +func (f *File) Create(maxSize uint64, options *FileRequestOptions) error { + if maxSize > oneTB { + return fmt.Errorf("max file size is 1TB") + } + params := prepareOptions(options) + headers := headersFromStruct(f.Properties) + headers["x-ms-content-length"] = strconv.FormatUint(maxSize, 10) + headers["x-ms-type"] = "file" + + outputHeaders, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, headers), []int{http.StatusCreated}) + if err != nil { + return err + } + + f.Properties.Length = maxSize + f.updateEtagAndLastModified(outputHeaders) + return nil +} + +// CopyFile operation copied a file/blob from the sourceURL to the path provided. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/copy-file +func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error { + extraHeaders := map[string]string{ + "x-ms-type": "file", + "x-ms-copy-source": sourceURL, + } + params := prepareOptions(options) + + headers, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted}) + if err != nil { + return err + } + + f.updateEtagAndLastModified(headers) + f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id") + f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status") + return nil +} + +// Delete immediately removes this file from the storage account. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2 +func (f *File) Delete(options *FileRequestOptions) error { + return f.fsc.deleteResource(f.buildPath(), resourceFile, options) +} + +// DeleteIfExists removes this file if it exists. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2 +func (f *File) DeleteIfExists(options *FileRequestOptions) (bool, error) { + resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile, options) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +// GetFileOptions includes options for a get file operation +type GetFileOptions struct { + Timeout uint + GetContentMD5 bool +} + +// DownloadToStream operation downloads the file. +// +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file +func (f *File) DownloadToStream(options *FileRequestOptions) (io.ReadCloser, error) { + params := prepareOptions(options) + resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, nil) + if err != nil { + return nil, err + } + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + readAndCloseBody(resp.body) + return nil, err + } + return resp.body, nil +} + +// DownloadRangeToStream operation downloads the specified range of this file with optional MD5 hash. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file +func (f *File) DownloadRangeToStream(fileRange FileRange, options *GetFileOptions) (fs FileStream, err error) { + extraHeaders := map[string]string{ + "Range": fileRange.String(), + } + params := url.Values{} + if options != nil { + if options.GetContentMD5 { + if isRangeTooBig(fileRange) { + return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true") + } + extraHeaders["x-ms-range-get-content-md5"] = "true" + } + params = addTimeout(params, options.Timeout) + } + + resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, extraHeaders) + if err != nil { + return fs, err + } + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK, http.StatusPartialContent}); err != nil { + readAndCloseBody(resp.body) + return fs, err + } + + fs.Body = resp.body + if options != nil && options.GetContentMD5 { + fs.ContentMD5 = resp.headers.Get("Content-MD5") + } + return fs, nil +} + +// Exists returns true if this file exists. +func (f *File) Exists() (bool, error) { + exists, headers, err := f.fsc.resourceExists(f.buildPath(), resourceFile) + if exists { + f.updateEtagAndLastModified(headers) + f.updateProperties(headers) + } + return exists, err +} + +// FetchAttributes updates metadata and properties for this file. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-properties +func (f *File) FetchAttributes(options *FileRequestOptions) error { + params := prepareOptions(options) + headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, params, http.MethodHead) + if err != nil { + return err + } + + f.updateEtagAndLastModified(headers) + f.updateProperties(headers) + f.Metadata = getMetadataFromHeaders(headers) + return nil +} + +// returns true if the range is larger than 4MB +func isRangeTooBig(fileRange FileRange) bool { + if fileRange.End-fileRange.Start > fourMB { + return true + } + + return false +} + +// ListRangesOptions includes options for a list file ranges operation +type ListRangesOptions struct { + Timeout uint + ListRange *FileRange +} + +// ListRanges returns the list of valid ranges for this file. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges +func (f *File) ListRanges(options *ListRangesOptions) (*FileRanges, error) { + params := url.Values{"comp": {"rangelist"}} + + // add optional range to list + var headers map[string]string + if options != nil { + params = addTimeout(params, options.Timeout) + if options.ListRange != nil { + headers = make(map[string]string) + headers["Range"] = options.ListRange.String() + } + } + + resp, err := f.fsc.listContent(f.buildPath(), params, headers) + if err != nil { + return nil, err + } + + defer resp.body.Close() + var cl uint64 + cl, err = strconv.ParseUint(resp.headers.Get("x-ms-content-length"), 10, 64) + if err != nil { + ioutil.ReadAll(resp.body) + return nil, err + } + + var out FileRanges + out.ContentLength = cl + out.ETag = resp.headers.Get("ETag") + out.LastModified = resp.headers.Get("Last-Modified") + + err = xmlUnmarshal(resp.body, &out) + return &out, err +} + +// modifies a range of bytes in this file +func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, timeout *uint, contentMD5 *string) (http.Header, error) { + if err := f.fsc.checkForStorageEmulator(); err != nil { + return nil, err + } + if fileRange.End < fileRange.Start { + return nil, errors.New("the value for rangeEnd must be greater than or equal to rangeStart") + } + if bytes != nil && isRangeTooBig(fileRange) { + return nil, errors.New("range cannot exceed 4MB in size") + } + + params := url.Values{"comp": {"range"}} + if timeout != nil { + params = addTimeout(params, *timeout) + } + + uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), params) + + // default to clear + write := "clear" + cl := uint64(0) + + // if bytes is not nil then this is an update operation + if bytes != nil { + write = "update" + cl = (fileRange.End - fileRange.Start) + 1 + } + + extraHeaders := map[string]string{ + "Content-Length": strconv.FormatUint(cl, 10), + "Range": fileRange.String(), + "x-ms-write": write, + } + + if contentMD5 != nil { + extraHeaders["Content-MD5"] = *contentMD5 + } + + headers := mergeHeaders(f.fsc.client.getStandardHeaders(), extraHeaders) + resp, err := f.fsc.client.exec(http.MethodPut, uri, headers, bytes, f.fsc.auth) + if err != nil { + return nil, err + } + defer readAndCloseBody(resp.body) + return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// SetMetadata replaces the metadata for this file. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetFileMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Metadata +func (f *File) SetMetadata(options *FileRequestOptions) error { + headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil), options) + if err != nil { + return err + } + + f.updateEtagAndLastModified(headers) + return nil +} + +// SetProperties sets system properties on this file. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by SetFileProperties. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Properties +func (f *File) SetProperties(options *FileRequestOptions) error { + headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties), options) + if err != nil { + return err + } + + f.updateEtagAndLastModified(headers) + return nil +} + +// updates Etag and last modified date +func (f *File) updateEtagAndLastModified(headers http.Header) { + f.Properties.Etag = headers.Get("Etag") + f.Properties.LastModified = headers.Get("Last-Modified") +} + +// updates file properties from the specified HTTP header +func (f *File) updateProperties(header http.Header) { + size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64) + if err == nil { + f.Properties.Length = size + } + + f.updateEtagAndLastModified(header) + f.Properties.CacheControl = header.Get("Cache-Control") + f.Properties.Disposition = header.Get("Content-Disposition") + f.Properties.Encoding = header.Get("Content-Encoding") + f.Properties.Language = header.Get("Content-Language") + f.Properties.MD5 = header.Get("Content-MD5") + f.Properties.Type = header.Get("Content-Type") +} + +// URL gets the canonical URL to this file. +// This method does not create a publicly accessible URL if the file +// is private and this method does not check if the file exists. +func (f *File) URL() string { + return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), nil) +} + +// WriteRangeOptions includes options for a write file range operation +type WriteRangeOptions struct { + Timeout uint + ContentMD5 string +} + +// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content (inside +// options parameter). Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with +// a maximum size of 4MB. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range +func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, options *WriteRangeOptions) error { + if bytes == nil { + return errors.New("bytes cannot be nil") + } + var timeout *uint + var md5 *string + if options != nil { + timeout = &options.Timeout + md5 = &options.ContentMD5 + } + + headers, err := f.modifyRange(bytes, fileRange, timeout, md5) + if err != nil { + return err + } + // it's perfectly legal for multiple go routines to call WriteRange + // on the same *File (e.g. concurrently writing non-overlapping ranges) + // so we must take the file mutex before updating our properties. + f.mutex.Lock() + f.updateEtagAndLastModified(headers) + f.mutex.Unlock() + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go new file mode 100644 index 00000000000..295e3d3e25c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go @@ -0,0 +1,338 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/xml" + "fmt" + "net/http" + "net/url" + "strconv" +) + +// FileServiceClient contains operations for Microsoft Azure File Service. +type FileServiceClient struct { + client Client + auth authentication +} + +// ListSharesParameters defines the set of customizable parameters to make a +// List Shares call. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares +type ListSharesParameters struct { + Prefix string + Marker string + Include string + MaxResults uint + Timeout uint +} + +// ShareListResponse contains the response fields from +// ListShares call. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares +type ShareListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Shares []Share `xml:"Shares>Share"` +} + +type compType string + +const ( + compNone compType = "" + compList compType = "list" + compMetadata compType = "metadata" + compProperties compType = "properties" + compRangeList compType = "rangelist" +) + +func (ct compType) String() string { + return string(ct) +} + +type resourceType string + +const ( + resourceDirectory resourceType = "directory" + resourceFile resourceType = "" + resourceShare resourceType = "share" +) + +func (rt resourceType) String() string { + return string(rt) +} + +func (p ListSharesParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != "" { + out.Set("include", p.Include) + } + if p.MaxResults != 0 { + out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) + } + if p.Timeout != 0 { + out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) + } + + return out +} + +func (p ListDirsAndFilesParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.MaxResults != 0 { + out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) + } + out = addTimeout(out, p.Timeout) + + return out +} + +// returns url.Values for the specified types +func getURLInitValues(comp compType, res resourceType) url.Values { + values := url.Values{} + if comp != compNone { + values.Set("comp", comp.String()) + } + if res != resourceFile { + values.Set("restype", res.String()) + } + return values +} + +// GetShareReference returns a Share object for the specified share name. +func (f *FileServiceClient) GetShareReference(name string) *Share { + return &Share{ + fsc: f, + Name: name, + Properties: ShareProperties{ + Quota: -1, + }, + } +} + +// ListShares returns the list of shares in a storage account along with +// pagination token and other response details. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/list-shares +func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) + + var out ShareListResponse + resp, err := f.listContent("", q, nil) + if err != nil { + return nil, err + } + defer resp.body.Close() + err = xmlUnmarshal(resp.body, &out) + + // assign our client to the newly created Share objects + for i := range out.Shares { + out.Shares[i].fsc = &f + } + return &out, err +} + +// GetServiceProperties gets the properties of your storage account's file service. +// File service does not support logging +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-service-properties +func (f *FileServiceClient) GetServiceProperties() (*ServiceProperties, error) { + return f.client.getServiceProperties(fileServiceName, f.auth) +} + +// SetServiceProperties sets the properties of your storage account's file service. +// File service does not support logging +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-file-service-properties +func (f *FileServiceClient) SetServiceProperties(props ServiceProperties) error { + return f.client.setServiceProperties(props, fileServiceName, f.auth) +} + +// retrieves directory or share content +func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*storageResponse, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + uri := f.client.getEndpoint(fileServiceName, path, params) + extraHeaders = f.client.protectUserAgent(extraHeaders) + headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) + + resp, err := f.client.exec(http.MethodGet, uri, headers, nil, f.auth) + if err != nil { + return nil, err + } + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + readAndCloseBody(resp.body) + return nil, err + } + + return resp, nil +} + +// returns true if the specified resource exists +func (f FileServiceClient) resourceExists(path string, res resourceType) (bool, http.Header, error) { + if err := f.checkForStorageEmulator(); err != nil { + return false, nil, err + } + + uri := f.client.getEndpoint(fileServiceName, path, getURLInitValues(compNone, res)) + headers := f.client.getStandardHeaders() + + resp, err := f.client.exec(http.MethodHead, uri, headers, nil, f.auth) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusOK, resp.headers, nil + } + } + return false, nil, err +} + +// creates a resource depending on the specified resource type +func (f FileServiceClient) createResource(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string, expectedResponseCodes []int) (http.Header, error) { + resp, err := f.createResourceNoClose(path, res, urlParams, extraHeaders) + if err != nil { + return nil, err + } + defer readAndCloseBody(resp.body) + return resp.headers, checkRespCode(resp.statusCode, expectedResponseCodes) +} + +// creates a resource depending on the specified resource type, doesn't close the response body +func (f FileServiceClient) createResourceNoClose(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string) (*storageResponse, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + values := getURLInitValues(compNone, res) + combinedParams := mergeParams(values, urlParams) + uri := f.client.getEndpoint(fileServiceName, path, combinedParams) + extraHeaders = f.client.protectUserAgent(extraHeaders) + headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) + + return f.client.exec(http.MethodPut, uri, headers, nil, f.auth) +} + +// returns HTTP header data for the specified directory or share +func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, params url.Values, verb string) (http.Header, error) { + resp, err := f.getResourceNoClose(path, comp, res, params, verb, nil) + if err != nil { + return nil, err + } + defer readAndCloseBody(resp.body) + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + return resp.headers, nil +} + +// gets the specified resource, doesn't close the response body +func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, params url.Values, verb string, extraHeaders map[string]string) (*storageResponse, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + params = mergeParams(params, getURLInitValues(comp, res)) + uri := f.client.getEndpoint(fileServiceName, path, params) + headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) + + return f.client.exec(verb, uri, headers, nil, f.auth) +} + +// deletes the resource and returns the response +func (f FileServiceClient) deleteResource(path string, res resourceType, options *FileRequestOptions) error { + resp, err := f.deleteResourceNoClose(path, res, options) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} + +// deletes the resource and returns the response, doesn't close the response body +func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType, options *FileRequestOptions) (*storageResponse, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + values := mergeParams(getURLInitValues(compNone, res), prepareOptions(options)) + uri := f.client.getEndpoint(fileServiceName, path, values) + return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil, f.auth) +} + +// merges metadata into extraHeaders and returns extraHeaders +func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[string]string { + if metadata == nil && extraHeaders == nil { + return nil + } + if extraHeaders == nil { + extraHeaders = make(map[string]string) + } + for k, v := range metadata { + extraHeaders[userDefinedMetadataHeaderPrefix+k] = v + } + return extraHeaders +} + +// sets extra header data for the specified resource +func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string, options *FileRequestOptions) (http.Header, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + params := mergeParams(getURLInitValues(comp, res), prepareOptions(options)) + uri := f.client.getEndpoint(fileServiceName, path, params) + extraHeaders = f.client.protectUserAgent(extraHeaders) + headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) + + resp, err := f.client.exec(http.MethodPut, uri, headers, nil, f.auth) + if err != nil { + return nil, err + } + defer readAndCloseBody(resp.body) + + return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + +//checkForStorageEmulator determines if the client is setup for use with +//Azure Storage Emulator, and returns a relevant error +func (f FileServiceClient) checkForStorageEmulator() error { + if f.client.accountName == StorageEmulatorAccountName { + return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator") + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go new file mode 100644 index 00000000000..3d9d52d8e35 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go @@ -0,0 +1,201 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "net/http" + "net/url" + "strconv" + "time" +) + +// lease constants. +const ( + leaseHeaderPrefix = "x-ms-lease-" + headerLeaseID = "x-ms-lease-id" + leaseAction = "x-ms-lease-action" + leaseBreakPeriod = "x-ms-lease-break-period" + leaseDuration = "x-ms-lease-duration" + leaseProposedID = "x-ms-proposed-lease-id" + leaseTime = "x-ms-lease-time" + + acquireLease = "acquire" + renewLease = "renew" + changeLease = "change" + releaseLease = "release" + breakLease = "break" +) + +// leasePut is common PUT code for the various acquire/release/break etc functions. +func (b *Blob) leaseCommonPut(headers map[string]string, expectedStatus int, options *LeaseOptions) (http.Header, error) { + params := url.Values{"comp": {"lease"}} + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return nil, err + } + defer readAndCloseBody(resp.body) + + if err := checkRespCode(resp.statusCode, []int{expectedStatus}); err != nil { + return nil, err + } + + return resp.headers, nil +} + +// LeaseOptions includes options for all operations regarding leasing blobs +type LeaseOptions struct { + Timeout uint + Origin string `header:"Origin"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + RequestID string `header:"x-ms-client-request-id"` +} + +// AcquireLease creates a lease for a blob +// returns leaseID acquired +// In API Versions starting on 2012-02-12, the minimum leaseTimeInSeconds is 15, the maximum +// non-infinite leaseTimeInSeconds is 60. To specify an infinite lease, provide the value -1. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) AcquireLease(leaseTimeInSeconds int, proposedLeaseID string, options *LeaseOptions) (returnedLeaseID string, err error) { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = acquireLease + + if leaseTimeInSeconds == -1 { + // Do nothing, but don't trigger the following clauses. + } else if leaseTimeInSeconds > 60 || b.Container.bsc.client.apiVersion < "2012-02-12" { + leaseTimeInSeconds = 60 + } else if leaseTimeInSeconds < 15 { + leaseTimeInSeconds = 15 + } + + headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds) + + if proposedLeaseID != "" { + headers[leaseProposedID] = proposedLeaseID + } + + respHeaders, err := b.leaseCommonPut(headers, http.StatusCreated, options) + if err != nil { + return "", err + } + + returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) + + if returnedLeaseID != "" { + return returnedLeaseID, nil + } + + return "", errors.New("LeaseID not returned") +} + +// BreakLease breaks the lease for a blob +// Returns the timeout remaining in the lease in seconds +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) BreakLease(options *LeaseOptions) (breakTimeout int, err error) { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = breakLease + return b.breakLeaseCommon(headers, options) +} + +// BreakLeaseWithBreakPeriod breaks the lease for a blob +// breakPeriodInSeconds is used to determine how long until new lease can be created. +// Returns the timeout remaining in the lease in seconds +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) BreakLeaseWithBreakPeriod(breakPeriodInSeconds int, options *LeaseOptions) (breakTimeout int, err error) { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = breakLease + headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds) + return b.breakLeaseCommon(headers, options) +} + +// breakLeaseCommon is common code for both version of BreakLease (with and without break period) +func (b *Blob) breakLeaseCommon(headers map[string]string, options *LeaseOptions) (breakTimeout int, err error) { + + respHeaders, err := b.leaseCommonPut(headers, http.StatusAccepted, options) + if err != nil { + return 0, err + } + + breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime)) + if breakTimeoutStr != "" { + breakTimeout, err = strconv.Atoi(breakTimeoutStr) + if err != nil { + return 0, err + } + } + + return breakTimeout, nil +} + +// ChangeLease changes a lease ID for a blob +// Returns the new LeaseID acquired +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) ChangeLease(currentLeaseID string, proposedLeaseID string, options *LeaseOptions) (newLeaseID string, err error) { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = changeLease + headers[headerLeaseID] = currentLeaseID + headers[leaseProposedID] = proposedLeaseID + + respHeaders, err := b.leaseCommonPut(headers, http.StatusOK, options) + if err != nil { + return "", err + } + + newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) + if newLeaseID != "" { + return newLeaseID, nil + } + + return "", errors.New("LeaseID not returned") +} + +// ReleaseLease releases the lease for a blob +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) ReleaseLease(currentLeaseID string, options *LeaseOptions) error { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = releaseLease + headers[headerLeaseID] = currentLeaseID + + _, err := b.leaseCommonPut(headers, http.StatusOK, options) + if err != nil { + return err + } + + return nil +} + +// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx +func (b *Blob) RenewLease(currentLeaseID string, options *LeaseOptions) error { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = renewLease + headers[headerLeaseID] = currentLeaseID + + _, err := b.leaseCommonPut(headers, http.StatusOK, options) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go new file mode 100644 index 00000000000..7d9038a5f71 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go @@ -0,0 +1,167 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/xml" + "fmt" + "net/http" + "net/url" + "strconv" + "time" +) + +// Message represents an Azure message. +type Message struct { + Queue *Queue + Text string `xml:"MessageText"` + ID string `xml:"MessageId"` + Insertion TimeRFC1123 `xml:"InsertionTime"` + Expiration TimeRFC1123 `xml:"ExpirationTime"` + PopReceipt string `xml:"PopReceipt"` + NextVisible TimeRFC1123 `xml:"TimeNextVisible"` + DequeueCount int `xml:"DequeueCount"` +} + +func (m *Message) buildPath() string { + return fmt.Sprintf("%s/%s", m.Queue.buildPathMessages(), m.ID) +} + +// PutMessageOptions is the set of options can be specified for Put Messsage +// operation. A zero struct does not use any preferences for the request. +type PutMessageOptions struct { + Timeout uint + VisibilityTimeout int + MessageTTL int + RequestID string `header:"x-ms-client-request-id"` +} + +// Put operation adds a new message to the back of the message queue. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Message +func (m *Message) Put(options *PutMessageOptions) error { + query := url.Values{} + headers := m.Queue.qsc.client.getStandardHeaders() + + req := putMessageRequest{MessageText: m.Text} + body, nn, err := xmlMarshal(req) + if err != nil { + return err + } + headers["Content-Length"] = strconv.Itoa(nn) + + if options != nil { + if options.VisibilityTimeout != 0 { + query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) + } + if options.MessageTTL != 0 { + query.Set("messagettl", strconv.Itoa(options.MessageTTL)) + } + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + + uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.Queue.buildPathMessages(), query) + resp, err := m.Queue.qsc.client.exec(http.MethodPost, uri, headers, body, m.Queue.qsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + err = xmlUnmarshal(resp.body, m) + if err != nil { + return err + } + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// UpdateMessageOptions is the set of options can be specified for Update Messsage +// operation. A zero struct does not use any preferences for the request. +type UpdateMessageOptions struct { + Timeout uint + VisibilityTimeout int + RequestID string `header:"x-ms-client-request-id"` +} + +// Update operation updates the specified message. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Update-Message +func (m *Message) Update(options *UpdateMessageOptions) error { + query := url.Values{} + if m.PopReceipt != "" { + query.Set("popreceipt", m.PopReceipt) + } + + headers := m.Queue.qsc.client.getStandardHeaders() + req := putMessageRequest{MessageText: m.Text} + body, nn, err := xmlMarshal(req) + if err != nil { + return err + } + headers["Content-Length"] = strconv.Itoa(nn) + + if options != nil { + if options.VisibilityTimeout != 0 { + query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) + } + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), query) + + resp, err := m.Queue.qsc.client.exec(http.MethodPut, uri, headers, body, m.Queue.qsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + m.PopReceipt = resp.headers.Get("x-ms-popreceipt") + nextTimeStr := resp.headers.Get("x-ms-time-next-visible") + if nextTimeStr != "" { + nextTime, err := time.Parse(time.RFC1123, nextTimeStr) + if err != nil { + return err + } + m.NextVisible = TimeRFC1123(nextTime) + } + + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// Delete operation deletes the specified message. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx +func (m *Message) Delete(options *QueueServiceOptions) error { + params := url.Values{"popreceipt": {m.PopReceipt}} + headers := m.Queue.qsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), params) + + resp, err := m.Queue.qsc.client.exec(http.MethodDelete, uri, headers, nil, m.Queue.qsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +type putMessageRequest struct { + XMLName xml.Name `xml:"QueueMessage"` + MessageText string `xml:"MessageText"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go new file mode 100644 index 00000000000..800adf129dc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go @@ -0,0 +1,47 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// MetadataLevel determines if operations should return a paylod, +// and it level of detail. +type MetadataLevel string + +// This consts are meant to help with Odata supported operations +const ( + OdataTypeSuffix = "@odata.type" + + // Types + + OdataBinary = "Edm.Binary" + OdataDateTime = "Edm.DateTime" + OdataGUID = "Edm.Guid" + OdataInt64 = "Edm.Int64" + + // Query options + + OdataFilter = "$filter" + OdataOrderBy = "$orderby" + OdataTop = "$top" + OdataSkip = "$skip" + OdataCount = "$count" + OdataExpand = "$expand" + OdataSelect = "$select" + OdataSearch = "$search" + + EmptyPayload MetadataLevel = "" + NoMetadata MetadataLevel = "application/json;odata=nometadata" + MinimalMetadata MetadataLevel = "application/json;odata=minimalmetadata" + FullMetadata MetadataLevel = "application/json;odata=fullmetadata" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go new file mode 100644 index 00000000000..c59fd4b50b9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go @@ -0,0 +1,204 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "time" +) + +// GetPageRangesResponse contains the response fields from +// Get Page Ranges call. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +type GetPageRangesResponse struct { + XMLName xml.Name `xml:"PageList"` + PageList []PageRange `xml:"PageRange"` +} + +// PageRange contains information about a page of a page blob from +// Get Pages Range call. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +type PageRange struct { + Start int64 `xml:"Start"` + End int64 `xml:"End"` +} + +var ( + errBlobCopyAborted = errors.New("storage: blob copy is aborted") + errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch") +) + +// PutPageOptions includes the options for a put page operation +type PutPageOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfSequenceNumberLessThanOrEqualTo *int `header:"x-ms-if-sequence-number-le"` + IfSequenceNumberLessThan *int `header:"x-ms-if-sequence-number-lt"` + IfSequenceNumberEqualTo *int `header:"x-ms-if-sequence-number-eq"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// WriteRange writes a range of pages to a page blob. +// Ranges must be aligned with 512-byte boundaries and chunk must be of size +// multiplies by 512. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page +func (b *Blob) WriteRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error { + if bytes == nil { + return errors.New("bytes cannot be nil") + } + return b.modifyRange(blobRange, bytes, options) +} + +// ClearRange clears the given range in a page blob. +// Ranges must be aligned with 512-byte boundaries and chunk must be of size +// multiplies by 512. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page +func (b *Blob) ClearRange(blobRange BlobRange, options *PutPageOptions) error { + return b.modifyRange(blobRange, nil, options) +} + +func (b *Blob) modifyRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error { + if blobRange.End < blobRange.Start { + return errors.New("the value for rangeEnd must be greater than or equal to rangeStart") + } + if blobRange.Start%512 != 0 { + return errors.New("the value for rangeStart must be a modulus of 512") + } + if blobRange.End%512 != 511 { + return errors.New("the value for rangeEnd must be a modulus of 511") + } + + params := url.Values{"comp": {"page"}} + + // default to clear + write := "clear" + var cl uint64 + + // if bytes is not nil then this is an update operation + if bytes != nil { + write = "update" + cl = (blobRange.End - blobRange.Start) + 1 + } + + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypePage) + headers["x-ms-page-write"] = write + headers["x-ms-range"] = blobRange.String() + headers["Content-Length"] = fmt.Sprintf("%v", cl) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes, b.Container.bsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// GetPageRangesOptions includes the options for a get page ranges operation +type GetPageRangesOptions struct { + Timeout uint + Snapshot *time.Time + PreviousSnapshot *time.Time + Range *BlobRange + LeaseID string `header:"x-ms-lease-id"` + RequestID string `header:"x-ms-client-request-id"` +} + +// GetPageRanges returns the list of valid page ranges for a page blob. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Page-Ranges +func (b *Blob) GetPageRanges(options *GetPageRangesOptions) (GetPageRangesResponse, error) { + params := url.Values{"comp": {"pagelist"}} + headers := b.Container.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + if options.PreviousSnapshot != nil { + params.Add("prevsnapshot", timeRfc1123Formatted(*options.PreviousSnapshot)) + } + if options.Range != nil { + headers["Range"] = options.Range.String() + } + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + var out GetPageRangesResponse + resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return out, err + } + defer resp.body.Close() + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return out, err + } + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// PutPageBlob initializes an empty page blob with specified name and maximum +// size in bytes (size must be aligned to a 512-byte boundary). A page blob must +// be created using this method before writing pages. +// +// See CreateBlockBlobFromReader for more info on creating blobs. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob +func (b *Blob) PutPageBlob(options *PutBlobOptions) error { + if b.Properties.ContentLength%512 != 0 { + return errors.New("Content length must be aligned to a 512-byte boundary") + } + + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypePage) + headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", b.Properties.ContentLength) + headers["x-ms-blob-sequence-number"] = fmt.Sprintf("%v", b.Properties.SequenceNumber) + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + return b.respondCreation(resp, BlobTypePage) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go new file mode 100644 index 00000000000..499592ebd12 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go @@ -0,0 +1,441 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "time" +) + +const ( + // casing is per Golang's http.Header canonicalizing the header names. + approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count" +) + +// QueueAccessPolicy represents each access policy in the queue ACL. +type QueueAccessPolicy struct { + ID string + StartTime time.Time + ExpiryTime time.Time + CanRead bool + CanAdd bool + CanUpdate bool + CanProcess bool +} + +// QueuePermissions represents the queue ACLs. +type QueuePermissions struct { + AccessPolicies []QueueAccessPolicy +} + +// SetQueuePermissionOptions includes options for a set queue permissions operation +type SetQueuePermissionOptions struct { + Timeout uint + RequestID string `header:"x-ms-client-request-id"` +} + +// Queue represents an Azure queue. +type Queue struct { + qsc *QueueServiceClient + Name string + Metadata map[string]string + AproxMessageCount uint64 +} + +func (q *Queue) buildPath() string { + return fmt.Sprintf("/%s", q.Name) +} + +func (q *Queue) buildPathMessages() string { + return fmt.Sprintf("%s/messages", q.buildPath()) +} + +// QueueServiceOptions includes options for some queue service operations +type QueueServiceOptions struct { + Timeout uint + RequestID string `header:"x-ms-client-request-id"` +} + +// Create operation creates a queue under the given account. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Queue4 +func (q *Queue) Create(options *QueueServiceOptions) error { + params := url.Values{} + headers := q.qsc.client.getStandardHeaders() + headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) + + resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// Delete operation permanently deletes the specified queue. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Queue3 +func (q *Queue) Delete(options *QueueServiceOptions) error { + params := url.Values{} + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) + resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// Exists returns true if a queue with given name exists. +func (q *Queue) Exists() (bool, error) { + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}}) + resp, err := q.qsc.client.exec(http.MethodGet, uri, q.qsc.client.getStandardHeaders(), nil, q.qsc.auth) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusOK, nil + } + } + return false, err +} + +// SetMetadata operation sets user-defined metadata on the specified queue. +// Metadata is associated with the queue as name-value pairs. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata +func (q *Queue) SetMetadata(options *QueueServiceOptions) error { + params := url.Values{"comp": {"metadata"}} + headers := q.qsc.client.getStandardHeaders() + headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) + + resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// GetMetadata operation retrieves user-defined metadata and queue +// properties on the specified queue. Metadata is associated with +// the queue as name-values pairs. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata +// +// Because the way Golang's http client (and http.Header in particular) +// canonicalize header names, the returned metadata names would always +// be all lower case. +func (q *Queue) GetMetadata(options *QueueServiceOptions) error { + params := url.Values{"comp": {"metadata"}} + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}}) + + resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return err + } + + aproxMessagesStr := resp.headers.Get(http.CanonicalHeaderKey(approximateMessagesCountHeader)) + if aproxMessagesStr != "" { + aproxMessages, err := strconv.ParseUint(aproxMessagesStr, 10, 64) + if err != nil { + return err + } + q.AproxMessageCount = aproxMessages + } + + q.Metadata = getMetadataFromHeaders(resp.headers) + return nil +} + +// GetMessageReference returns a message object with the specified text. +func (q *Queue) GetMessageReference(text string) *Message { + return &Message{ + Queue: q, + Text: text, + } +} + +// GetMessagesOptions is the set of options can be specified for Get +// Messsages operation. A zero struct does not use any preferences for the +// request. +type GetMessagesOptions struct { + Timeout uint + NumOfMessages int + VisibilityTimeout int + RequestID string `header:"x-ms-client-request-id"` +} + +type messages struct { + XMLName xml.Name `xml:"QueueMessagesList"` + Messages []Message `xml:"QueueMessage"` +} + +// GetMessages operation retrieves one or more messages from the front of the +// queue. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Messages +func (q *Queue) GetMessages(options *GetMessagesOptions) ([]Message, error) { + query := url.Values{} + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + if options.NumOfMessages != 0 { + query.Set("numofmessages", strconv.Itoa(options.NumOfMessages)) + } + if options.VisibilityTimeout != 0 { + query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) + } + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query) + + resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) + if err != nil { + return []Message{}, err + } + defer readAndCloseBody(resp.body) + + var out messages + err = xmlUnmarshal(resp.body, &out) + if err != nil { + return []Message{}, err + } + for i := range out.Messages { + out.Messages[i].Queue = q + } + return out.Messages, err +} + +// PeekMessagesOptions is the set of options can be specified for Peek +// Messsage operation. A zero struct does not use any preferences for the +// request. +type PeekMessagesOptions struct { + Timeout uint + NumOfMessages int + RequestID string `header:"x-ms-client-request-id"` +} + +// PeekMessages retrieves one or more messages from the front of the queue, but +// does not alter the visibility of the message. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Peek-Messages +func (q *Queue) PeekMessages(options *PeekMessagesOptions) ([]Message, error) { + query := url.Values{"peekonly": {"true"}} // Required for peek operation + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + if options.NumOfMessages != 0 { + query.Set("numofmessages", strconv.Itoa(options.NumOfMessages)) + } + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query) + + resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) + if err != nil { + return []Message{}, err + } + defer readAndCloseBody(resp.body) + + var out messages + err = xmlUnmarshal(resp.body, &out) + if err != nil { + return []Message{}, err + } + for i := range out.Messages { + out.Messages[i].Queue = q + } + return out.Messages, err +} + +// ClearMessages operation deletes all messages from the specified queue. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Clear-Messages +func (q *Queue) ClearMessages(options *QueueServiceOptions) error { + params := url.Values{} + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), params) + + resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// SetPermissions sets up queue permissions +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-acl +func (q *Queue) SetPermissions(permissions QueuePermissions, options *SetQueuePermissionOptions) error { + body, length, err := generateQueueACLpayload(permissions.AccessPolicies) + if err != nil { + return err + } + + params := url.Values{ + "comp": {"acl"}, + } + headers := q.qsc.client.getStandardHeaders() + headers["Content-Length"] = strconv.Itoa(length) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) + resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, body, q.qsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return errors.New("Unable to set permissions") + } + + return nil +} + +func generateQueueACLpayload(policies []QueueAccessPolicy) (io.Reader, int, error) { + sil := SignedIdentifiers{ + SignedIdentifiers: []SignedIdentifier{}, + } + for _, qapd := range policies { + permission := qapd.generateQueuePermissions() + signedIdentifier := convertAccessPolicyToXMLStructs(qapd.ID, qapd.StartTime, qapd.ExpiryTime, permission) + sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) + } + return xmlMarshal(sil) +} + +func (qapd *QueueAccessPolicy) generateQueuePermissions() (permissions string) { + // generate the permissions string (raup). + // still want the end user API to have bool flags. + permissions = "" + + if qapd.CanRead { + permissions += "r" + } + + if qapd.CanAdd { + permissions += "a" + } + + if qapd.CanUpdate { + permissions += "u" + } + + if qapd.CanProcess { + permissions += "p" + } + + return permissions +} + +// GetQueuePermissionOptions includes options for a get queue permissions operation +type GetQueuePermissionOptions struct { + Timeout uint + RequestID string `header:"x-ms-client-request-id"` +} + +// GetPermissions gets the queue permissions as per https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-acl +// If timeout is 0 then it will not be passed to Azure +func (q *Queue) GetPermissions(options *GetQueuePermissionOptions) (*QueuePermissions, error) { + params := url.Values{ + "comp": {"acl"}, + } + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) + resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) + if err != nil { + return nil, err + } + defer resp.body.Close() + + var ap AccessPolicy + err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList) + if err != nil { + return nil, err + } + return buildQueueAccessPolicy(ap, &resp.headers), nil +} + +func buildQueueAccessPolicy(ap AccessPolicy, headers *http.Header) *QueuePermissions { + permissions := QueuePermissions{ + AccessPolicies: []QueueAccessPolicy{}, + } + + for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { + qapd := QueueAccessPolicy{ + ID: policy.ID, + StartTime: policy.AccessPolicy.StartTime, + ExpiryTime: policy.AccessPolicy.ExpiryTime, + } + qapd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") + qapd.CanAdd = updatePermissions(policy.AccessPolicy.Permission, "a") + qapd.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u") + qapd.CanProcess = updatePermissions(policy.AccessPolicy.Permission, "p") + + permissions.AccessPolicies = append(permissions.AccessPolicies, qapd) + } + return &permissions +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go new file mode 100644 index 00000000000..28d9ab937e2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go @@ -0,0 +1,146 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "fmt" + "net/url" + "strings" + "time" +) + +// QueueSASOptions are options to construct a blob SAS +// URI. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +type QueueSASOptions struct { + QueueSASPermissions + SASOptions +} + +// QueueSASPermissions includes the available permissions for +// a queue SAS URI. +type QueueSASPermissions struct { + Read bool + Add bool + Update bool + Process bool +} + +func (q QueueSASPermissions) buildString() string { + permissions := "" + + if q.Read { + permissions += "r" + } + if q.Add { + permissions += "a" + } + if q.Update { + permissions += "u" + } + if q.Process { + permissions += "p" + } + return permissions +} + +// GetSASURI creates an URL to the specified queue which contains the Shared +// Access Signature with specified permissions and expiration time. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +func (q *Queue) GetSASURI(options QueueSASOptions) (string, error) { + canonicalizedResource, err := q.qsc.client.buildCanonicalizedResource(q.buildPath(), q.qsc.auth, true) + if err != nil { + return "", err + } + + // "The canonicalizedresouce portion of the string is a canonical path to the signed resource. + // It must include the service name (blob, table, queue or file) for version 2015-02-21 or + // later, the storage account name, and the resource name, and must be URL-decoded. + // -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). + canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) + canonicalizedResource, err = url.QueryUnescape(canonicalizedResource) + if err != nil { + return "", err + } + + signedStart := "" + if options.Start != (time.Time{}) { + signedStart = options.Start.UTC().Format(time.RFC3339) + } + signedExpiry := options.Expiry.UTC().Format(time.RFC3339) + + protocols := "https,http" + if options.UseHTTPS { + protocols = "https" + } + + permissions := options.QueueSASPermissions.buildString() + stringToSign, err := queueSASStringToSign(q.qsc.client.apiVersion, canonicalizedResource, signedStart, signedExpiry, options.IP, permissions, protocols, options.Identifier) + if err != nil { + return "", err + } + + sig := q.qsc.client.computeHmac256(stringToSign) + sasParams := url.Values{ + "sv": {q.qsc.client.apiVersion}, + "se": {signedExpiry}, + "sp": {permissions}, + "sig": {sig}, + } + + if q.qsc.client.apiVersion >= "2015-04-05" { + sasParams.Add("spr", protocols) + addQueryParameter(sasParams, "sip", options.IP) + } + + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), nil) + sasURL, err := url.Parse(uri) + if err != nil { + return "", err + } + sasURL.RawQuery = sasParams.Encode() + return sasURL.String(), nil +} + +func queueSASStringToSign(signedVersion, canonicalizedResource, signedStart, signedExpiry, signedIP, signedPermissions, protocols, signedIdentifier string) (string, error) { + + if signedVersion >= "2015-02-21" { + canonicalizedResource = "/queue" + canonicalizedResource + } + + // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12 + if signedVersion >= "2015-04-05" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", + signedPermissions, + signedStart, + signedExpiry, + canonicalizedResource, + signedIdentifier, + signedIP, + protocols, + signedVersion), nil + + } + + // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + if signedVersion >= "2013-08-15" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion), nil + } + + return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go new file mode 100644 index 00000000000..29febe146f6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go @@ -0,0 +1,42 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// QueueServiceClient contains operations for Microsoft Azure Queue Storage +// Service. +type QueueServiceClient struct { + client Client + auth authentication +} + +// GetServiceProperties gets the properties of your storage account's queue service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-service-properties +func (q *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) { + return q.client.getServiceProperties(queueServiceName, q.auth) +} + +// SetServiceProperties sets the properties of your storage account's queue service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-service-properties +func (q *QueueServiceClient) SetServiceProperties(props ServiceProperties) error { + return q.client.setServiceProperties(props, queueServiceName, q.auth) +} + +// GetQueueReference returns a Container object for the specified queue name. +func (q *QueueServiceClient) GetQueueReference(name string) *Queue { + return &Queue{ + qsc: q, + Name: name, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go new file mode 100644 index 00000000000..a14d9d32447 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go @@ -0,0 +1,216 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" + "net/url" + "strconv" +) + +// Share represents an Azure file share. +type Share struct { + fsc *FileServiceClient + Name string `xml:"Name"` + Properties ShareProperties `xml:"Properties"` + Metadata map[string]string +} + +// ShareProperties contains various properties of a share. +type ShareProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` + Quota int `xml:"Quota"` +} + +// builds the complete path for this share object. +func (s *Share) buildPath() string { + return fmt.Sprintf("/%s", s.Name) +} + +// Create this share under the associated account. +// If a share with the same name already exists, the operation fails. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share +func (s *Share) Create(options *FileRequestOptions) error { + extraheaders := map[string]string{} + if s.Properties.Quota > 0 { + extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) + } + + params := prepareOptions(options) + headers, err := s.fsc.createResource(s.buildPath(), resourceShare, params, mergeMDIntoExtraHeaders(s.Metadata, extraheaders), []int{http.StatusCreated}) + if err != nil { + return err + } + + s.updateEtagAndLastModified(headers) + return nil +} + +// CreateIfNotExists creates this share under the associated account if +// it does not exist. Returns true if the share is newly created or false if +// the share already exists. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share +func (s *Share) CreateIfNotExists(options *FileRequestOptions) (bool, error) { + extraheaders := map[string]string{} + if s.Properties.Quota > 0 { + extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) + } + + params := prepareOptions(options) + resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, params, extraheaders) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { + if resp.statusCode == http.StatusCreated { + s.updateEtagAndLastModified(resp.headers) + return true, nil + } + return false, s.FetchAttributes(nil) + } + } + + return false, err +} + +// Delete marks this share for deletion. The share along with any files +// and directories contained within it are later deleted during garbage +// collection. If the share does not exist the operation fails +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share +func (s *Share) Delete(options *FileRequestOptions) error { + return s.fsc.deleteResource(s.buildPath(), resourceShare, options) +} + +// DeleteIfExists operation marks this share for deletion if it exists. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share +func (s *Share) DeleteIfExists(options *FileRequestOptions) (bool, error) { + resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare, options) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +// Exists returns true if this share already exists +// on the storage account, otherwise returns false. +func (s *Share) Exists() (bool, error) { + exists, headers, err := s.fsc.resourceExists(s.buildPath(), resourceShare) + if exists { + s.updateEtagAndLastModified(headers) + s.updateQuota(headers) + } + return exists, err +} + +// FetchAttributes retrieves metadata and properties for this share. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-share-properties +func (s *Share) FetchAttributes(options *FileRequestOptions) error { + params := prepareOptions(options) + headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, params, http.MethodHead) + if err != nil { + return err + } + + s.updateEtagAndLastModified(headers) + s.updateQuota(headers) + s.Metadata = getMetadataFromHeaders(headers) + + return nil +} + +// GetRootDirectoryReference returns a Directory object at the root of this share. +func (s *Share) GetRootDirectoryReference() *Directory { + return &Directory{ + fsc: s.fsc, + share: s, + } +} + +// ServiceClient returns the FileServiceClient associated with this share. +func (s *Share) ServiceClient() *FileServiceClient { + return s.fsc +} + +// SetMetadata replaces the metadata for this share. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetShareMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-share-metadata +func (s *Share) SetMetadata(options *FileRequestOptions) error { + headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil), options) + if err != nil { + return err + } + + s.updateEtagAndLastModified(headers) + return nil +} + +// SetProperties sets system properties for this share. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by SetShareProperties. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Share-Properties +func (s *Share) SetProperties(options *FileRequestOptions) error { + extraheaders := map[string]string{} + if s.Properties.Quota > 0 { + if s.Properties.Quota > 5120 { + return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota) + } + extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) + } + + headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, extraheaders, options) + if err != nil { + return err + } + + s.updateEtagAndLastModified(headers) + return nil +} + +// updates Etag and last modified date +func (s *Share) updateEtagAndLastModified(headers http.Header) { + s.Properties.Etag = headers.Get("Etag") + s.Properties.LastModified = headers.Get("Last-Modified") +} + +// updates quota value +func (s *Share) updateQuota(headers http.Header) { + quota, err := strconv.Atoi(headers.Get("x-ms-share-quota")) + if err == nil { + s.Properties.Quota = quota + } +} + +// URL gets the canonical URL to this share. This method does not create a publicly accessible +// URL if the share is private and this method does not check if the share exists. +func (s *Share) URL() string { + return s.fsc.client.getEndpoint(fileServiceName, s.buildPath(), url.Values{}) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go new file mode 100644 index 00000000000..056ab398a81 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go @@ -0,0 +1,61 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "strings" + "time" +) + +// AccessPolicyDetailsXML has specifics about an access policy +// annotated with XML details. +type AccessPolicyDetailsXML struct { + StartTime time.Time `xml:"Start"` + ExpiryTime time.Time `xml:"Expiry"` + Permission string `xml:"Permission"` +} + +// SignedIdentifier is a wrapper for a specific policy +type SignedIdentifier struct { + ID string `xml:"Id"` + AccessPolicy AccessPolicyDetailsXML `xml:"AccessPolicy"` +} + +// SignedIdentifiers part of the response from GetPermissions call. +type SignedIdentifiers struct { + SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"` +} + +// AccessPolicy is the response type from the GetPermissions call. +type AccessPolicy struct { + SignedIdentifiersList SignedIdentifiers `xml:"SignedIdentifiers"` +} + +// convertAccessPolicyToXMLStructs converts between AccessPolicyDetails which is a struct better for API usage to the +// AccessPolicy struct which will get converted to XML. +func convertAccessPolicyToXMLStructs(id string, startTime time.Time, expiryTime time.Time, permissions string) SignedIdentifier { + return SignedIdentifier{ + ID: id, + AccessPolicy: AccessPolicyDetailsXML{ + StartTime: startTime.UTC().Round(time.Second), + ExpiryTime: expiryTime.UTC().Round(time.Second), + Permission: permissions, + }, + } +} + +func updatePermissions(permissions, permission string) bool { + return strings.Contains(permissions, permission) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go new file mode 100644 index 00000000000..c102619c987 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go @@ -0,0 +1,131 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "net/http" + "net/url" + "strconv" +) + +// ServiceProperties represents the storage account service properties +type ServiceProperties struct { + Logging *Logging + HourMetrics *Metrics + MinuteMetrics *Metrics + Cors *Cors +} + +// Logging represents the Azure Analytics Logging settings +type Logging struct { + Version string + Delete bool + Read bool + Write bool + RetentionPolicy *RetentionPolicy +} + +// RetentionPolicy indicates if retention is enabled and for how many days +type RetentionPolicy struct { + Enabled bool + Days *int +} + +// Metrics provide request statistics. +type Metrics struct { + Version string + Enabled bool + IncludeAPIs *bool + RetentionPolicy *RetentionPolicy +} + +// Cors includes all the CORS rules +type Cors struct { + CorsRule []CorsRule +} + +// CorsRule includes all settings for a Cors rule +type CorsRule struct { + AllowedOrigins string + AllowedMethods string + MaxAgeInSeconds int + ExposedHeaders string + AllowedHeaders string +} + +func (c Client) getServiceProperties(service string, auth authentication) (*ServiceProperties, error) { + query := url.Values{ + "restype": {"service"}, + "comp": {"properties"}, + } + uri := c.getEndpoint(service, "", query) + headers := c.getStandardHeaders() + + resp, err := c.exec(http.MethodGet, uri, headers, nil, auth) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + var out ServiceProperties + err = xmlUnmarshal(resp.body, &out) + if err != nil { + return nil, err + } + + return &out, nil +} + +func (c Client) setServiceProperties(props ServiceProperties, service string, auth authentication) error { + query := url.Values{ + "restype": {"service"}, + "comp": {"properties"}, + } + uri := c.getEndpoint(service, "", query) + + // Ideally, StorageServiceProperties would be the output struct + // This is to avoid golint stuttering, while generating the correct XML + type StorageServiceProperties struct { + Logging *Logging + HourMetrics *Metrics + MinuteMetrics *Metrics + Cors *Cors + } + input := StorageServiceProperties{ + Logging: props.Logging, + HourMetrics: props.HourMetrics, + MinuteMetrics: props.MinuteMetrics, + Cors: props.Cors, + } + + body, length, err := xmlMarshal(input) + if err != nil { + return err + } + + headers := c.getStandardHeaders() + headers["Content-Length"] = strconv.Itoa(length) + + resp, err := c.exec(http.MethodPut, uri, headers, body, auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go new file mode 100644 index 00000000000..6c01d32ee13 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go @@ -0,0 +1,419 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +const ( + tablesURIPath = "/Tables" + nextTableQueryParameter = "NextTableName" + headerNextPartitionKey = "x-ms-continuation-NextPartitionKey" + headerNextRowKey = "x-ms-continuation-NextRowKey" + nextPartitionKeyQueryParameter = "NextPartitionKey" + nextRowKeyQueryParameter = "NextRowKey" +) + +// TableAccessPolicy are used for SETTING table policies +type TableAccessPolicy struct { + ID string + StartTime time.Time + ExpiryTime time.Time + CanRead bool + CanAppend bool + CanUpdate bool + CanDelete bool +} + +// Table represents an Azure table. +type Table struct { + tsc *TableServiceClient + Name string `json:"TableName"` + OdataEditLink string `json:"odata.editLink"` + OdataID string `json:"odata.id"` + OdataMetadata string `json:"odata.metadata"` + OdataType string `json:"odata.type"` +} + +// EntityQueryResult contains the response from +// ExecuteQuery and ExecuteQueryNextResults functions. +type EntityQueryResult struct { + OdataMetadata string `json:"odata.metadata"` + Entities []*Entity `json:"value"` + QueryNextLink + table *Table +} + +type continuationToken struct { + NextPartitionKey string + NextRowKey string +} + +func (t *Table) buildPath() string { + return fmt.Sprintf("/%s", t.Name) +} + +func (t *Table) buildSpecificPath() string { + return fmt.Sprintf("%s('%s')", tablesURIPath, t.Name) +} + +// Get gets the referenced table. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities +func (t *Table) Get(timeout uint, ml MetadataLevel) error { + if ml == EmptyPayload { + return errEmptyPayload + } + + query := url.Values{ + "timeout": {strconv.FormatUint(uint64(timeout), 10)}, + } + headers := t.tsc.client.getStandardHeaders() + headers[headerAccept] = string(ml) + + uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), query) + resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return err + } + + respBody, err := ioutil.ReadAll(resp.body) + if err != nil { + return err + } + err = json.Unmarshal(respBody, t) + if err != nil { + return err + } + return nil +} + +// Create creates the referenced table. +// This function fails if the name is not compliant +// with the specification or the tables already exists. +// ml determines the level of detail of metadata in the operation response, +// or no data at all. +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/create-table +func (t *Table) Create(timeout uint, ml MetadataLevel, options *TableOptions) error { + uri := t.tsc.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{ + "timeout": {strconv.FormatUint(uint64(timeout), 10)}, + }) + + type createTableRequest struct { + TableName string `json:"TableName"` + } + req := createTableRequest{TableName: t.Name} + buf := new(bytes.Buffer) + if err := json.NewEncoder(buf).Encode(req); err != nil { + return err + } + + headers := t.tsc.client.getStandardHeaders() + headers = addReturnContentHeaders(headers, ml) + headers = addBodyRelatedHeaders(headers, buf.Len()) + headers = options.addToHeaders(headers) + + resp, err := t.tsc.client.exec(http.MethodPost, uri, headers, buf, t.tsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + if ml == EmptyPayload { + if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return err + } + } else { + if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { + return err + } + } + + if ml != EmptyPayload { + data, err := ioutil.ReadAll(resp.body) + if err != nil { + return err + } + err = json.Unmarshal(data, t) + if err != nil { + return err + } + } + + return nil +} + +// Delete deletes the referenced table. +// This function fails if the table is not present. +// Be advised: Delete deletes all the entries that may be present. +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/delete-table +func (t *Table) Delete(timeout uint, options *TableOptions) error { + uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), url.Values{ + "timeout": {strconv.Itoa(int(timeout))}, + }) + + headers := t.tsc.client.getStandardHeaders() + headers = addReturnContentHeaders(headers, EmptyPayload) + headers = options.addToHeaders(headers) + + resp, err := t.tsc.client.exec(http.MethodDelete, uri, headers, nil, t.tsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// QueryOptions includes options for a query entities operation. +// Top, filter and select are OData query options. +type QueryOptions struct { + Top uint + Filter string + Select []string + RequestID string +} + +func (options *QueryOptions) getParameters() (url.Values, map[string]string) { + query := url.Values{} + headers := map[string]string{} + if options != nil { + if options.Top > 0 { + query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10)) + } + if options.Filter != "" { + query.Add(OdataFilter, options.Filter) + } + if len(options.Select) > 0 { + query.Add(OdataSelect, strings.Join(options.Select, ",")) + } + headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) + } + return query, headers +} + +// QueryEntities returns the entities in the table. +// You can use query options defined by the OData Protocol specification. +// +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities +func (t *Table) QueryEntities(timeout uint, ml MetadataLevel, options *QueryOptions) (*EntityQueryResult, error) { + if ml == EmptyPayload { + return nil, errEmptyPayload + } + query, headers := options.getParameters() + query = addTimeout(query, timeout) + uri := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), query) + return t.queryEntities(uri, headers, ml) +} + +// NextResults returns the next page of results +// from a QueryEntities or NextResults operation. +// +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination +func (eqr *EntityQueryResult) NextResults(options *TableOptions) (*EntityQueryResult, error) { + if eqr == nil { + return nil, errNilPreviousResult + } + if eqr.NextLink == nil { + return nil, errNilNextLink + } + headers := options.addToHeaders(map[string]string{}) + return eqr.table.queryEntities(*eqr.NextLink, headers, eqr.ml) +} + +// SetPermissions sets up table ACL permissions +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/Set-Table-ACL +func (t *Table) SetPermissions(tap []TableAccessPolicy, timeout uint, options *TableOptions) error { + params := url.Values{"comp": {"acl"}, + "timeout": {strconv.Itoa(int(timeout))}, + } + + uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params) + headers := t.tsc.client.getStandardHeaders() + headers = options.addToHeaders(headers) + + body, length, err := generateTableACLPayload(tap) + if err != nil { + return err + } + headers["Content-Length"] = strconv.Itoa(length) + + resp, err := t.tsc.client.exec(http.MethodPut, uri, headers, body, t.tsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) { + sil := SignedIdentifiers{ + SignedIdentifiers: []SignedIdentifier{}, + } + for _, tap := range policies { + permission := generateTablePermissions(&tap) + signedIdentifier := convertAccessPolicyToXMLStructs(tap.ID, tap.StartTime, tap.ExpiryTime, permission) + sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) + } + return xmlMarshal(sil) +} + +// GetPermissions gets the table ACL permissions +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/get-table-acl +func (t *Table) GetPermissions(timeout int, options *TableOptions) ([]TableAccessPolicy, error) { + params := url.Values{"comp": {"acl"}, + "timeout": {strconv.Itoa(int(timeout))}, + } + + uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params) + headers := t.tsc.client.getStandardHeaders() + headers = options.addToHeaders(headers) + + resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + var ap AccessPolicy + err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList) + if err != nil { + return nil, err + } + return updateTableAccessPolicy(ap), nil +} + +func (t *Table) queryEntities(uri string, headers map[string]string, ml MetadataLevel) (*EntityQueryResult, error) { + headers = mergeHeaders(headers, t.tsc.client.getStandardHeaders()) + if ml != EmptyPayload { + headers[headerAccept] = string(ml) + } + + resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + data, err := ioutil.ReadAll(resp.body) + if err != nil { + return nil, err + } + var entities EntityQueryResult + err = json.Unmarshal(data, &entities) + if err != nil { + return nil, err + } + + for i := range entities.Entities { + entities.Entities[i].Table = t + } + entities.table = t + + contToken := extractContinuationTokenFromHeaders(resp.headers) + if contToken == nil { + entities.NextLink = nil + } else { + originalURI, err := url.Parse(uri) + if err != nil { + return nil, err + } + v := originalURI.Query() + v.Set(nextPartitionKeyQueryParameter, contToken.NextPartitionKey) + v.Set(nextRowKeyQueryParameter, contToken.NextRowKey) + newURI := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), v) + entities.NextLink = &newURI + entities.ml = ml + } + + return &entities, nil +} + +func extractContinuationTokenFromHeaders(h http.Header) *continuationToken { + ct := continuationToken{ + NextPartitionKey: h.Get(headerNextPartitionKey), + NextRowKey: h.Get(headerNextRowKey), + } + + if ct.NextPartitionKey != "" && ct.NextRowKey != "" { + return &ct + } + return nil +} + +func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy { + taps := []TableAccessPolicy{} + for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { + tap := TableAccessPolicy{ + ID: policy.ID, + StartTime: policy.AccessPolicy.StartTime, + ExpiryTime: policy.AccessPolicy.ExpiryTime, + } + tap.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") + tap.CanAppend = updatePermissions(policy.AccessPolicy.Permission, "a") + tap.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u") + tap.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d") + + taps = append(taps, tap) + } + return taps +} + +func generateTablePermissions(tap *TableAccessPolicy) (permissions string) { + // generate the permissions string (raud). + // still want the end user API to have bool flags. + permissions = "" + + if tap.CanRead { + permissions += "r" + } + + if tap.CanAppend { + permissions += "a" + } + + if tap.CanUpdate { + permissions += "u" + } + + if tap.CanDelete { + permissions += "d" + } + return permissions +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go new file mode 100644 index 00000000000..3f882417c65 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go @@ -0,0 +1,316 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/textproto" + "sort" + "strings" + + "github.com/satori/uuid" +) + +// Operation type. Insert, Delete, Replace etc. +type Operation int + +// consts for batch operations. +const ( + InsertOp = Operation(1) + DeleteOp = Operation(2) + ReplaceOp = Operation(3) + MergeOp = Operation(4) + InsertOrReplaceOp = Operation(5) + InsertOrMergeOp = Operation(6) +) + +// BatchEntity used for tracking Entities to operate on and +// whether operations (replace/merge etc) should be forced. +// Wrapper for regular Entity with additional data specific for the entity. +type BatchEntity struct { + *Entity + Force bool + Op Operation +} + +// TableBatch stores all the entities that will be operated on during a batch process. +// Entities can be inserted, replaced or deleted. +type TableBatch struct { + BatchEntitySlice []BatchEntity + + // reference to table we're operating on. + Table *Table +} + +// defaultChangesetHeaders for changeSets +var defaultChangesetHeaders = map[string]string{ + "Accept": "application/json;odata=minimalmetadata", + "Content-Type": "application/json", + "Prefer": "return-no-content", +} + +// NewBatch return new TableBatch for populating. +func (t *Table) NewBatch() *TableBatch { + return &TableBatch{ + Table: t, + } +} + +// InsertEntity adds an entity in preparation for a batch insert. +func (t *TableBatch) InsertEntity(entity *Entity) { + be := BatchEntity{Entity: entity, Force: false, Op: InsertOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// InsertOrReplaceEntity adds an entity in preparation for a batch insert or replace. +func (t *TableBatch) InsertOrReplaceEntity(entity *Entity, force bool) { + be := BatchEntity{Entity: entity, Force: false, Op: InsertOrReplaceOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// InsertOrReplaceEntityByForce adds an entity in preparation for a batch insert or replace. Forces regardless of ETag +func (t *TableBatch) InsertOrReplaceEntityByForce(entity *Entity) { + t.InsertOrReplaceEntity(entity, true) +} + +// InsertOrMergeEntity adds an entity in preparation for a batch insert or merge. +func (t *TableBatch) InsertOrMergeEntity(entity *Entity, force bool) { + be := BatchEntity{Entity: entity, Force: false, Op: InsertOrMergeOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// InsertOrMergeEntityByForce adds an entity in preparation for a batch insert or merge. Forces regardless of ETag +func (t *TableBatch) InsertOrMergeEntityByForce(entity *Entity) { + t.InsertOrMergeEntity(entity, true) +} + +// ReplaceEntity adds an entity in preparation for a batch replace. +func (t *TableBatch) ReplaceEntity(entity *Entity) { + be := BatchEntity{Entity: entity, Force: false, Op: ReplaceOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// DeleteEntity adds an entity in preparation for a batch delete +func (t *TableBatch) DeleteEntity(entity *Entity, force bool) { + be := BatchEntity{Entity: entity, Force: false, Op: DeleteOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// DeleteEntityByForce adds an entity in preparation for a batch delete. Forces regardless of ETag +func (t *TableBatch) DeleteEntityByForce(entity *Entity, force bool) { + t.DeleteEntity(entity, true) +} + +// MergeEntity adds an entity in preparation for a batch merge +func (t *TableBatch) MergeEntity(entity *Entity) { + be := BatchEntity{Entity: entity, Force: false, Op: MergeOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// ExecuteBatch executes many table operations in one request to Azure. +// The operations can be combinations of Insert, Delete, Replace and Merge +// Creates the inner changeset body (various operations, Insert, Delete etc) then creates the outer request packet that encompasses +// the changesets. +// As per document https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/performing-entity-group-transactions +func (t *TableBatch) ExecuteBatch() error { + changesetBoundary := fmt.Sprintf("changeset_%s", uuid.NewV1()) + uri := t.Table.tsc.client.getEndpoint(tableServiceName, "$batch", nil) + changesetBody, err := t.generateChangesetBody(changesetBoundary) + if err != nil { + return err + } + + boundary := fmt.Sprintf("batch_%s", uuid.NewV1()) + body, err := generateBody(changesetBody, changesetBoundary, boundary) + if err != nil { + return err + } + + headers := t.Table.tsc.client.getStandardHeaders() + headers[headerContentType] = fmt.Sprintf("multipart/mixed; boundary=%s", boundary) + + resp, err := t.Table.tsc.client.execBatchOperationJSON(http.MethodPost, uri, headers, bytes.NewReader(body.Bytes()), t.Table.tsc.auth) + if err != nil { + return err + } + defer resp.body.Close() + + if err = checkRespCode(resp.statusCode, []int{http.StatusAccepted}); err != nil { + + // check which batch failed. + operationFailedMessage := t.getFailedOperation(resp.odata.Err.Message.Value) + requestID, date, version := getDebugHeaders(resp.headers) + return AzureStorageServiceError{ + StatusCode: resp.statusCode, + Code: resp.odata.Err.Code, + RequestID: requestID, + Date: date, + APIVersion: version, + Message: operationFailedMessage, + } + } + + return nil +} + +// getFailedOperation parses the original Azure error string and determines which operation failed +// and generates appropriate message. +func (t *TableBatch) getFailedOperation(errorMessage string) string { + // errorMessage consists of "number:string" we just need the number. + sp := strings.Split(errorMessage, ":") + if len(sp) > 1 { + msg := fmt.Sprintf("Element %s in the batch returned an unexpected response code.\n%s", sp[0], errorMessage) + return msg + } + + // cant parse the message, just return the original message to client + return errorMessage +} + +// generateBody generates the complete body for the batch request. +func generateBody(changeSetBody *bytes.Buffer, changesetBoundary string, boundary string) (*bytes.Buffer, error) { + + body := new(bytes.Buffer) + writer := multipart.NewWriter(body) + writer.SetBoundary(boundary) + h := make(textproto.MIMEHeader) + h.Set(headerContentType, fmt.Sprintf("multipart/mixed; boundary=%s\r\n", changesetBoundary)) + batchWriter, err := writer.CreatePart(h) + if err != nil { + return nil, err + } + batchWriter.Write(changeSetBody.Bytes()) + writer.Close() + return body, nil +} + +// generateChangesetBody generates the individual changesets for the various operations within the batch request. +// There is a changeset for Insert, Delete, Merge etc. +func (t *TableBatch) generateChangesetBody(changesetBoundary string) (*bytes.Buffer, error) { + + body := new(bytes.Buffer) + writer := multipart.NewWriter(body) + writer.SetBoundary(changesetBoundary) + + for _, be := range t.BatchEntitySlice { + t.generateEntitySubset(&be, writer) + } + + writer.Close() + return body, nil +} + +// generateVerb generates the HTTP request VERB required for each changeset. +func generateVerb(op Operation) (string, error) { + switch op { + case InsertOp: + return http.MethodPost, nil + case DeleteOp: + return http.MethodDelete, nil + case ReplaceOp, InsertOrReplaceOp: + return http.MethodPut, nil + case MergeOp, InsertOrMergeOp: + return "MERGE", nil + default: + return "", errors.New("Unable to detect operation") + } +} + +// generateQueryPath generates the query path for within the changesets +// For inserts it will just be a table query path (table name) +// but for other operations (modifying an existing entity) then +// the partition/row keys need to be generated. +func (t *TableBatch) generateQueryPath(op Operation, entity *Entity) string { + if op == InsertOp { + return entity.Table.buildPath() + } + return entity.buildPath() +} + +// generateGenericOperationHeaders generates common headers for a given operation. +func generateGenericOperationHeaders(be *BatchEntity) map[string]string { + retval := map[string]string{} + + for k, v := range defaultChangesetHeaders { + retval[k] = v + } + + if be.Op == DeleteOp || be.Op == ReplaceOp || be.Op == MergeOp { + if be.Force || be.Entity.OdataEtag == "" { + retval["If-Match"] = "*" + } else { + retval["If-Match"] = be.Entity.OdataEtag + } + } + + return retval +} + +// generateEntitySubset generates body payload for particular batch entity +func (t *TableBatch) generateEntitySubset(batchEntity *BatchEntity, writer *multipart.Writer) error { + + h := make(textproto.MIMEHeader) + h.Set(headerContentType, "application/http") + h.Set(headerContentTransferEncoding, "binary") + + verb, err := generateVerb(batchEntity.Op) + if err != nil { + return err + } + + genericOpHeadersMap := generateGenericOperationHeaders(batchEntity) + queryPath := t.generateQueryPath(batchEntity.Op, batchEntity.Entity) + uri := t.Table.tsc.client.getEndpoint(tableServiceName, queryPath, nil) + + operationWriter, err := writer.CreatePart(h) + if err != nil { + return err + } + + urlAndVerb := fmt.Sprintf("%s %s HTTP/1.1\r\n", verb, uri) + operationWriter.Write([]byte(urlAndVerb)) + writeHeaders(genericOpHeadersMap, &operationWriter) + operationWriter.Write([]byte("\r\n")) // additional \r\n is needed per changeset separating the "headers" and the body. + + // delete operation doesn't need a body. + if batchEntity.Op != DeleteOp { + //var e Entity = batchEntity.Entity + body, err := json.Marshal(batchEntity.Entity) + if err != nil { + return err + } + operationWriter.Write(body) + } + + return nil +} + +func writeHeaders(h map[string]string, writer *io.Writer) { + // This way it is guaranteed the headers will be written in a sorted order + var keys []string + for k := range h { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + (*writer).Write([]byte(fmt.Sprintf("%s: %s\r\n", k, h[k]))) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go new file mode 100644 index 00000000000..456bee7733a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go @@ -0,0 +1,204 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strconv" +) + +const ( + headerAccept = "Accept" + headerEtag = "Etag" + headerPrefer = "Prefer" + headerXmsContinuation = "x-ms-Continuation-NextTableName" +) + +// TableServiceClient contains operations for Microsoft Azure Table Storage +// Service. +type TableServiceClient struct { + client Client + auth authentication +} + +// TableOptions includes options for some table operations +type TableOptions struct { + RequestID string +} + +func (options *TableOptions) addToHeaders(h map[string]string) map[string]string { + if options != nil { + h = addToHeaders(h, "x-ms-client-request-id", options.RequestID) + } + return h +} + +// QueryNextLink includes information for getting the next page of +// results in query operations +type QueryNextLink struct { + NextLink *string + ml MetadataLevel +} + +// GetServiceProperties gets the properties of your storage account's table service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-service-properties +func (t *TableServiceClient) GetServiceProperties() (*ServiceProperties, error) { + return t.client.getServiceProperties(tableServiceName, t.auth) +} + +// SetServiceProperties sets the properties of your storage account's table service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-table-service-properties +func (t *TableServiceClient) SetServiceProperties(props ServiceProperties) error { + return t.client.setServiceProperties(props, tableServiceName, t.auth) +} + +// GetTableReference returns a Table object for the specified table name. +func (t *TableServiceClient) GetTableReference(name string) *Table { + return &Table{ + tsc: t, + Name: name, + } +} + +// QueryTablesOptions includes options for some table operations +type QueryTablesOptions struct { + Top uint + Filter string + RequestID string +} + +func (options *QueryTablesOptions) getParameters() (url.Values, map[string]string) { + query := url.Values{} + headers := map[string]string{} + if options != nil { + if options.Top > 0 { + query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10)) + } + if options.Filter != "" { + query.Add(OdataFilter, options.Filter) + } + headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) + } + return query, headers +} + +// QueryTables returns the tables in the storage account. +// You can use query options defined by the OData Protocol specification. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-tables +func (t *TableServiceClient) QueryTables(ml MetadataLevel, options *QueryTablesOptions) (*TableQueryResult, error) { + query, headers := options.getParameters() + uri := t.client.getEndpoint(tableServiceName, tablesURIPath, query) + return t.queryTables(uri, headers, ml) +} + +// NextResults returns the next page of results +// from a QueryTables or a NextResults operation. +// +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-tables +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination +func (tqr *TableQueryResult) NextResults(options *TableOptions) (*TableQueryResult, error) { + if tqr == nil { + return nil, errNilPreviousResult + } + if tqr.NextLink == nil { + return nil, errNilNextLink + } + headers := options.addToHeaders(map[string]string{}) + + return tqr.tsc.queryTables(*tqr.NextLink, headers, tqr.ml) +} + +// TableQueryResult contains the response from +// QueryTables and QueryTablesNextResults functions. +type TableQueryResult struct { + OdataMetadata string `json:"odata.metadata"` + Tables []Table `json:"value"` + QueryNextLink + tsc *TableServiceClient +} + +func (t *TableServiceClient) queryTables(uri string, headers map[string]string, ml MetadataLevel) (*TableQueryResult, error) { + if ml == EmptyPayload { + return nil, errEmptyPayload + } + headers = mergeHeaders(headers, t.client.getStandardHeaders()) + headers[headerAccept] = string(ml) + + resp, err := t.client.exec(http.MethodGet, uri, headers, nil, t.auth) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + respBody, err := ioutil.ReadAll(resp.body) + if err != nil { + return nil, err + } + var out TableQueryResult + err = json.Unmarshal(respBody, &out) + if err != nil { + return nil, err + } + + for i := range out.Tables { + out.Tables[i].tsc = t + } + out.tsc = t + + nextLink := resp.headers.Get(http.CanonicalHeaderKey(headerXmsContinuation)) + if nextLink == "" { + out.NextLink = nil + } else { + originalURI, err := url.Parse(uri) + if err != nil { + return nil, err + } + v := originalURI.Query() + v.Set(nextTableQueryParameter, nextLink) + newURI := t.client.getEndpoint(tableServiceName, tablesURIPath, v) + out.NextLink = &newURI + out.ml = ml + } + + return &out, nil +} + +func addBodyRelatedHeaders(h map[string]string, length int) map[string]string { + h[headerContentType] = "application/json" + h[headerContentLength] = fmt.Sprintf("%v", length) + h[headerAcceptCharset] = "UTF-8" + return h +} + +func addReturnContentHeaders(h map[string]string, ml MetadataLevel) map[string]string { + if ml != EmptyPayload { + h[headerPrefer] = "return-content" + h[headerAccept] = string(ml) + } else { + h[headerPrefer] = "return-no-content" + // From API version 2015-12-11 onwards, Accept header is required + h[headerAccept] = string(NoMetadata) + } + return h +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go new file mode 100644 index 00000000000..7734b8f886f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go @@ -0,0 +1,235 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +var ( + fixedTime = time.Date(2050, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6)) + accountSASOptions = AccountSASTokenOptions{ + Services: Services{ + Blob: true, + }, + ResourceTypes: ResourceTypes{ + Service: true, + Container: true, + Object: true, + }, + Permissions: Permissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Add: true, + Create: true, + Update: true, + Process: true, + }, + Expiry: fixedTime, + UseHTTPS: true, + } +) + +func (c Client) computeHmac256(message string) string { + h := hmac.New(sha256.New, c.accountKey) + h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func currentTimeRfc1123Formatted() string { + return timeRfc1123Formatted(time.Now().UTC()) +} + +func timeRfc1123Formatted(t time.Time) string { + return t.Format(http.TimeFormat) +} + +func mergeParams(v1, v2 url.Values) url.Values { + out := url.Values{} + for k, v := range v1 { + out[k] = v + } + for k, v := range v2 { + vals, ok := out[k] + if ok { + vals = append(vals, v...) + out[k] = vals + } else { + out[k] = v + } + } + return out +} + +func prepareBlockListRequest(blocks []Block) string { + s := `` + for _, v := range blocks { + s += fmt.Sprintf("<%s>%s", v.Status, v.ID, v.Status) + } + s += `` + return s +} + +func xmlUnmarshal(body io.Reader, v interface{}) error { + data, err := ioutil.ReadAll(body) + if err != nil { + return err + } + return xml.Unmarshal(data, v) +} + +func xmlMarshal(v interface{}) (io.Reader, int, error) { + b, err := xml.Marshal(v) + if err != nil { + return nil, 0, err + } + return bytes.NewReader(b), len(b), nil +} + +func headersFromStruct(v interface{}) map[string]string { + headers := make(map[string]string) + value := reflect.ValueOf(v) + for i := 0; i < value.NumField(); i++ { + key := value.Type().Field(i).Tag.Get("header") + if key != "" { + reflectedValue := reflect.Indirect(value.Field(i)) + var val string + if reflectedValue.IsValid() { + switch reflectedValue.Type() { + case reflect.TypeOf(fixedTime): + val = timeRfc1123Formatted(reflectedValue.Interface().(time.Time)) + case reflect.TypeOf(uint64(0)), reflect.TypeOf(uint(0)): + val = strconv.FormatUint(reflectedValue.Uint(), 10) + case reflect.TypeOf(int(0)): + val = strconv.FormatInt(reflectedValue.Int(), 10) + default: + val = reflectedValue.String() + } + } + if val != "" { + headers[key] = val + } + } + } + return headers +} + +// merges extraHeaders into headers and returns headers +func mergeHeaders(headers, extraHeaders map[string]string) map[string]string { + for k, v := range extraHeaders { + headers[k] = v + } + return headers +} + +func addToHeaders(h map[string]string, key, value string) map[string]string { + if value != "" { + h[key] = value + } + return h +} + +func addTimeToHeaders(h map[string]string, key string, value *time.Time) map[string]string { + if value != nil { + h = addToHeaders(h, key, timeRfc1123Formatted(*value)) + } + return h +} + +func addTimeout(params url.Values, timeout uint) url.Values { + if timeout > 0 { + params.Add("timeout", fmt.Sprintf("%v", timeout)) + } + return params +} + +func addSnapshot(params url.Values, snapshot *time.Time) url.Values { + if snapshot != nil { + params.Add("snapshot", snapshot.Format("2006-01-02T15:04:05.0000000Z")) + } + return params +} + +func getTimeFromHeaders(h http.Header, key string) (*time.Time, error) { + var out time.Time + var err error + outStr := h.Get(key) + if outStr != "" { + out, err = time.Parse(time.RFC1123, outStr) + if err != nil { + return nil, err + } + } + return &out, nil +} + +// TimeRFC1123 is an alias for time.Time needed for custom Unmarshalling +type TimeRFC1123 time.Time + +// UnmarshalXML is a custom unmarshaller that overrides the default time unmarshal which uses a different time layout. +func (t *TimeRFC1123) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var value string + d.DecodeElement(&value, &start) + parse, err := time.Parse(time.RFC1123, value) + if err != nil { + return err + } + *t = TimeRFC1123(parse) + return nil +} + +// returns a map of custom metadata values from the specified HTTP header +func getMetadataFromHeaders(header http.Header) map[string]string { + metadata := make(map[string]string) + for k, v := range header { + // Can't trust CanonicalHeaderKey() to munge case + // reliably. "_" is allowed in identifiers: + // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx + // https://msdn.microsoft.com/library/aa664670(VS.71).aspx + // http://tools.ietf.org/html/rfc7230#section-3.2 + // ...but "_" is considered invalid by + // CanonicalMIMEHeaderKey in + // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 + // so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl". + k = strings.ToLower(k) + if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { + continue + } + // metadata["lol"] = content of the last X-Ms-Meta-Lol header + k = k[len(userDefinedMetadataHeaderPrefix):] + metadata[k] = v[len(v)-1] + } + + if len(metadata) == 0 { + return nil + } + + return metadata +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go new file mode 100644 index 00000000000..67ff6ca03fe --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go @@ -0,0 +1,26 @@ +// +build !go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "io" + "net/http" +) + +func setContentLengthFromLimitedReader(req *http.Request, lr *io.LimitedReader) { + req.ContentLength = lr.N +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go new file mode 100644 index 00000000000..eada102c0cf --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go @@ -0,0 +1,32 @@ +// +build go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "io" + "io/ioutil" + "net/http" +) + +func setContentLengthFromLimitedReader(req *http.Request, lr *io.LimitedReader) { + req.ContentLength = lr.N + snapshot := *lr + req.GetBody = func() (io.ReadCloser, error) { + r := snapshot + return ioutil.NopCloser(&r), nil + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go new file mode 100644 index 00000000000..1cd3e03d12a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go @@ -0,0 +1,19 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +var ( + sdkVersion = "10.0.2" +) diff --git a/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/Azure/go-autorest/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md new file mode 100644 index 00000000000..a17cf98c621 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -0,0 +1,253 @@ +# Azure Active Directory library for Go + +This project provides a stand alone Azure Active Directory library for Go. The code was extracted +from [go-autorest](https://github.com/Azure/go-autorest/) project, which is used as a base for +[azure-sdk-for-go](https://github.com/Azure/azure-sdk-for-go). + + +## Installation + +``` +go get -u github.com/Azure/go-autorest/autorest/adal +``` + +## Usage + +An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) follow these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). + +### Register an Azure AD Application with secret + + +1. Register a new application with a `secret` credential + + ``` + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --password secret + ``` + +2. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "Application ID" + ``` + + * Replace `Application ID` with `appId` from step 1. + +### Register an Azure AD Application with certificate + +1. Create a private key + + ``` + openssl genrsa -out "example-app.key" 2048 + ``` + +2. Create the certificate + + ``` + openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" + openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 + ``` + +3. Create the PKCS12 version of the certificate containing also the private key + + ``` + openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: + + ``` + +4. Register a new application with the certificate content form `example-app.crt` + + ``` + certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" + + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --key-usage Verify --end-date 2018-01-01 \ + --key-value "${certificateContents}" + ``` + +5. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "APPLICATION_ID" + ``` + + * Replace `APPLICATION_ID` with `appId` from step 4. + + +### Grant the necessary permissions + +Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained +level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles) +which can be assigned to a service principal of an Azure AD application depending of your needs. + +``` +az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" +``` + +* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. +* Replace the `ROLE_NAME` with a role name of your choice. + +It is also possible to define custom role definitions. + +``` +az role definition create --role-definition role-definition.json +``` + +* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. + + +### Acquire Access Token + +The common configuration used by all flows: + +```Go +const activeDirectoryEndpoint = "https://login.microsoftonline.com/" +tenantID := "TENANT_ID" +oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) + +applicationID := "APPLICATION_ID" + +callback := func(token adal.Token) error { + // This is called after the token is acquired +} + +// The resource for which the token is acquired +resource := "https://management.core.windows.net/" +``` + +* Replace the `TENANT_ID` with your tenant ID. +* Replace the `APPLICATION_ID` with the value from previous section. + +#### Client Credentials + +```Go +applicationSecret := "APPLICATION_SECRET" + +spt, err := adal.NewServicePrincipalToken( + oauthConfig, + appliationID, + applicationSecret, + resource, + callbacks...) +if err != nil { + return nil, err +} + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Replace the `APPLICATION_SECRET` with the `password` value from previous section. + +#### Client Certificate + +```Go +certificatePath := "./example-app.pfx" + +certData, err := ioutil.ReadFile(certificatePath) +if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) +} + +// Get the certificate and private key from pfx file +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) +} + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + oauthConfig, + applicationID, + certificate, + rsaPrivateKey, + resource, + callbacks...) + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Update the certificate path to point to the example-app.pfx file which was created in previous section. + + +#### Device Code + +```Go +oauthClient := &http.Client{} + +// Acquire the device code +deviceCode, err := adal.InitiateDeviceAuth( + oauthClient, + oauthConfig, + applicationID, + resource) +if err != nil { + return nil, fmt.Errorf("Failed to start device auth flow: %s", err) +} + +// Display the authentication message +fmt.Println(*deviceCode.Message) + +// Wait here until the user is authenticated +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +if err != nil { + return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) +} + +spt, err := adal.NewServicePrincipalTokenFromManualToken( + oauthConfig, + applicationID, + resource, + *token, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +### Command Line Tool + +A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. + +``` +adal -h + +Usage of ./adal: + -applicationId string + application id + -certificatePath string + path to pk12/PFC application certificate + -mode string + authentication mode (device, secret, cert, refresh) (default "device") + -resource string + resource for which the token is requested + -secret string + application secret + -tenantId string + tenant id + -tokenCachePath string + location of oath token cache (default "/home/cgc/.adal/accessToken.json") +``` + +Example acquire a token for `https://management.core.windows.net/` using device code flow: + +``` +adal -mode device \ + -applicationId "APPLICATION_ID" \ + -tenantId "TENANT_ID" \ + -resource https://management.core.windows.net/ + +``` diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go new file mode 100644 index 00000000000..49e9214d598 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -0,0 +1,65 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/url" +) + +const ( + activeDirectoryAPIVersion = "1.0" +) + +// OAuthConfig represents the endpoints needed +// in OAuth operations +type OAuthConfig struct { + AuthorityEndpoint url.URL + AuthorizeEndpoint url.URL + TokenEndpoint url.URL + DeviceCodeEndpoint url.URL +} + +// NewOAuthConfig returns an OAuthConfig with tenant specific urls +func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s" + u, err := url.Parse(activeDirectoryEndpoint) + if err != nil { + return nil, err + } + authorityURL, err := u.Parse(tenantID) + if err != nil { + return nil, err + } + authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + + return &OAuthConfig{ + AuthorityEndpoint: *authorityURL, + AuthorizeEndpoint: *authorizeURL, + TokenEndpoint: *tokenURL, + DeviceCodeEndpoint: *deviceCodeURL, + }, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go new file mode 100644 index 00000000000..b38f4c24589 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go @@ -0,0 +1,242 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + This file is largely based on rjw57/oauth2device's code, with the follow differences: + * scope -> resource, and only allow a single one + * receive "Message" in the DeviceCode struct and show it to users as the prompt + * azure-xplat-cli has the following behavior that this emulates: + - does not send client_secret during the token exchange + - sends resource again in the token exchange request +*/ + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" +) + +const ( + logPrefix = "autorest/adal/devicetoken:" +) + +var ( + // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow + ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) + + // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow + ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) + + // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow + ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) + + // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow + ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) + + // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow + ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) + + // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow + ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) + + // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow + ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) + + errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" + errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" + errTokenSendingFails = "Error occurred while sending request with device code for a token" + errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" + errStatusNotOK = "Error HTTP status != 200" +) + +// DeviceCode is the object returned by the device auth endpoint +// It contains information to instruct the user to complete the auth flow +type DeviceCode struct { + DeviceCode *string `json:"device_code,omitempty"` + UserCode *string `json:"user_code,omitempty"` + VerificationURL *string `json:"verification_url,omitempty"` + ExpiresIn *int64 `json:"expires_in,string,omitempty"` + Interval *int64 `json:"interval,string,omitempty"` + + Message *string `json:"message"` // Azure specific + Resource string // store the following, stored when initiating, used when exchanging + OAuthConfig OAuthConfig + ClientID string +} + +// TokenError is the object returned by the token exchange endpoint +// when something is amiss +type TokenError struct { + Error *string `json:"error,omitempty"` + ErrorCodes []int `json:"error_codes,omitempty"` + ErrorDescription *string `json:"error_description,omitempty"` + Timestamp *string `json:"timestamp,omitempty"` + TraceID *string `json:"trace_id,omitempty"` +} + +// DeviceToken is the object return by the token exchange endpoint +// It can either look like a Token or an ErrorToken, so put both here +// and check for presence of "Error" to know if we are in error state +type deviceToken struct { + Token + TokenError +} + +// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + v := url.Values{ + "client_id": []string{clientID}, + "resource": []string{resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) + } + + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrDeviceCodeEmpty + } + + var code DeviceCode + err = json.Unmarshal(rb, &code) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + code.ClientID = clientID + code.Resource = resource + code.OAuthConfig = oauthConfig + + return &code, nil +} + +// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + v := url.Values{ + "client_id": []string{code.ClientID}, + "code": []string{*code.DeviceCode}, + "grant_type": []string{OAuthGrantTypeDeviceCode}, + "resource": []string{code.Resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrOAuthTokenEmpty + } + + var token deviceToken + err = json.Unmarshal(rb, &token) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if token.Error == nil { + return &token.Token, nil + } + + switch *token.Error { + case "authorization_pending": + return nil, ErrDeviceAuthorizationPending + case "slow_down": + return nil, ErrDeviceSlowDown + case "access_denied": + return nil, ErrDeviceAccessDenied + case "code_expired": + return nil, ErrDeviceCodeExpired + default: + return nil, ErrDeviceGeneric + } +} + +// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. +// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + intervalDuration := time.Duration(*code.Interval) * time.Second + waitDuration := intervalDuration + + for { + token, err := CheckForUserCompletion(sender, code) + + if err == nil { + return token, nil + } + + switch err { + case ErrDeviceSlowDown: + waitDuration += waitDuration + case ErrDeviceAuthorizationPending: + // noop + default: // everything else is "fatal" to us + return nil, err + } + + if waitDuration > (intervalDuration * 3) { + return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) + } + + time.Sleep(waitDuration) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go b/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go new file mode 100644 index 00000000000..5e02d52ac27 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go @@ -0,0 +1,20 @@ +// +build !windows + +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// msiPath is the path to the MSI Extension settings file (to discover the endpoint) +var msiPath = "/var/lib/waagent/ManagedIdentity-Settings" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go b/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go new file mode 100644 index 00000000000..261b568829c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go @@ -0,0 +1,25 @@ +// +build windows + +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "os" + "strings" +) + +// msiPath is the path to the MSI Extension settings file (to discover the endpoint) +var msiPath = strings.Join([]string{os.Getenv("SystemDrive"), "WindowsAzure/Config/ManagedIdentity-Settings"}, "/") diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go new file mode 100644 index 00000000000..9e15f2751f2 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go @@ -0,0 +1,73 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// LoadToken restores a Token object from a file located at 'path'. +func LoadToken(path string) (*Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var token Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&token); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) + } + return &token, nil +} + +// SaveToken persists an oauth token at the given location on disk. +// It moves the new file into place so it can safely be used to replace an existing file +// that maybe accessed by multiple processes. +func SaveToken(path string, mode os.FileMode, token Token) error { + dir := filepath.Dir(path) + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) + } + + newFile, err := ioutil.TempFile(dir, "token") + if err != nil { + return fmt.Errorf("failed to create the temp file to write the token: %v", err) + } + tempPath := newFile.Name() + + if err := json.NewEncoder(newFile).Encode(token); err != nil { + return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) + } + if err := newFile.Close(); err != nil { + return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) + } + + // Atomic replace to avoid multi-writer file corruptions + if err := os.Rename(tempPath, path); err != nil { + return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) + } + if err := os.Chmod(path, mode); err != nil { + return fmt.Errorf("failed to chmod the token file %s: %v", path, err) + } + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go new file mode 100644 index 00000000000..0e5ad14d396 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -0,0 +1,60 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "net/http" +) + +const ( + contentType = "Content-Type" + mimeTypeFormPost = "application/x-www-form-urlencoded" +) + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(&http.Client{}, decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go new file mode 100644 index 00000000000..67dd97a18c1 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -0,0 +1,427 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest/date" + "github.com/dgrijalva/jwt-go" +) + +const ( + defaultRefresh = 5 * time.Minute + + // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow + OAuthGrantTypeDeviceCode = "device_code" + + // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows + OAuthGrantTypeClientCredentials = "client_credentials" + + // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows + OAuthGrantTypeRefreshToken = "refresh_token" + + // metadataHeader is the header required by MSI extension + metadataHeader = "Metadata" +) + +// OAuthTokenProvider is an interface which should be implemented by an access token retriever +type OAuthTokenProvider interface { + OAuthToken() string +} + +// Refresher is an interface for token refresh functionality +type Refresher interface { + Refresh() error + RefreshExchange(resource string) error + EnsureFresh() error +} + +// TokenRefreshCallback is the type representing callbacks that will be called after +// a successful token refresh +type TokenRefreshCallback func(Token) error + +// Token encapsulates the access token used to authorize Azure requests. +type Token struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + ExpiresIn string `json:"expires_in"` + ExpiresOn string `json:"expires_on"` + NotBefore string `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` +} + +// Expires returns the time.Time when the Token expires. +func (t Token) Expires() time.Time { + s, err := strconv.Atoi(t.ExpiresOn) + if err != nil { + s = -3600 + } + + expiration := date.NewUnixTimeFromSeconds(float64(s)) + + return time.Time(expiration).UTC() +} + +// IsExpired returns true if the Token is expired, false otherwise. +func (t Token) IsExpired() bool { + return t.WillExpireIn(0) +} + +// WillExpireIn returns true if the Token will expire after the passed time.Duration interval +// from now, false otherwise. +func (t Token) WillExpireIn(d time.Duration) bool { + return !t.Expires().After(time.Now().Add(d)) +} + +//OAuthToken return the current access token +func (t *Token) OAuthToken() string { + return t.AccessToken +} + +// ServicePrincipalNoSecret represents a secret type that contains no secret +// meaning it is not valid for fetching a fresh token. This is used by Manual +type ServicePrincipalNoSecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret +// It only returns an error for the ServicePrincipalNoSecret type +func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") +} + +// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form +// that is submitted when acquiring an oAuth token. +type ServicePrincipalSecret interface { + SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error +} + +// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. +type ServicePrincipalTokenSecret struct { + ClientSecret string +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using the client_secret. +func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("client_secret", tokenSecret.ClientSecret) + return nil +} + +// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. +type ServicePrincipalCertificateSecret struct { + Certificate *x509.Certificate + PrivateKey *rsa.PrivateKey +} + +// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. +type ServicePrincipalMSISecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return nil +} + +// SignJwt returns the JWT signed with the certificate's private key. +func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { + hasher := sha1.New() + _, err := hasher.Write(secret.Certificate.Raw) + if err != nil { + return "", err + } + + thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + + // The jti (JWT ID) claim provides a unique identifier for the JWT. + jti := make([]byte, 20) + _, err = rand.Read(jti) + if err != nil { + return "", err + } + + token := jwt.New(jwt.SigningMethodRS256) + token.Header["x5t"] = thumbprint + token.Claims = jwt.MapClaims{ + "aud": spt.oauthConfig.TokenEndpoint.String(), + "iss": spt.clientID, + "sub": spt.clientID, + "jti": base64.URLEncoding.EncodeToString(jti), + "nbf": time.Now().Unix(), + "exp": time.Now().Add(time.Hour * 24).Unix(), + } + + signedString, err := token.SignedString(secret.PrivateKey) + return signedString, err +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. +func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.SignJwt(spt) + if err != nil { + return err + } + + v.Set("client_assertion", jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// ServicePrincipalToken encapsulates a Token created for a Service Principal. +type ServicePrincipalToken struct { + Token + + secret ServicePrincipalSecret + oauthConfig OAuthConfig + clientID string + resource string + autoRefresh bool + refreshWithin time.Duration + sender Sender + + refreshCallbacks []TokenRefreshCallback +} + +// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. +func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + spt := &ServicePrincipalToken{ + oauthConfig: oauthConfig, + secret: secret, + clientID: id, + resource: resource, + autoRefresh: true, + refreshWithin: defaultRefresh, + sender: &http.Client{}, + refreshCallbacks: callbacks, + } + return spt, nil +} + +// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token +func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalNoSecret{}, + callbacks...) + if err != nil { + return nil, err + } + + spt.Token = token + + return spt, nil +} + +// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal +// credentials scoped to the named resource. +func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalTokenSecret{ + ClientSecret: secret, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromCertificate create a ServicePrincipalToken from the supplied pkcs12 bytes. +func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + callbacks..., + ) +} + +// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. +func GetMSIVMEndpoint() (string, error) { + return getMSIVMEndpoint(msiPath) +} + +func getMSIVMEndpoint(path string) (string, error) { + // Read MSI settings + bytes, err := ioutil.ReadFile(path) + if err != nil { + return "", err + } + msiSettings := struct { + URL string `json:"url"` + }{} + err = json.Unmarshal(bytes, &msiSettings) + if err != nil { + return "", err + } + + return msiSettings.URL, nil +} + +// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. +func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + // We set the oauth config token endpoint to be MSI's endpoint + msiEndpointURL, err := url.Parse(msiEndpoint) + if err != nil { + return nil, err + } + + oauthConfig, err := NewOAuthConfig(msiEndpointURL.String(), "") + if err != nil { + return nil, err + } + + spt := &ServicePrincipalToken{ + oauthConfig: *oauthConfig, + secret: &ServicePrincipalMSISecret{}, + resource: resource, + autoRefresh: true, + refreshWithin: defaultRefresh, + sender: &http.Client{}, + refreshCallbacks: callbacks, + } + + return spt, nil +} + +// EnsureFresh will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. +func (spt *ServicePrincipalToken) EnsureFresh() error { + if spt.autoRefresh && spt.WillExpireIn(spt.refreshWithin) { + return spt.Refresh() + } + return nil +} + +// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization +func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { + if spt.refreshCallbacks != nil { + for _, callback := range spt.refreshCallbacks { + err := callback(spt.Token) + if err != nil { + return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) + } + } + } + return nil +} + +// Refresh obtains a fresh token for the Service Principal. +func (spt *ServicePrincipalToken) Refresh() error { + return spt.refreshInternal(spt.resource) +} + +// RefreshExchange refreshes the token, but for a different resource. +func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { + return spt.refreshInternal(resource) +} + +func (spt *ServicePrincipalToken) refreshInternal(resource string) error { + v := url.Values{} + v.Set("client_id", spt.clientID) + v.Set("resource", resource) + + if spt.RefreshToken != "" { + v.Set("grant_type", OAuthGrantTypeRefreshToken) + v.Set("refresh_token", spt.RefreshToken) + } else { + v.Set("grant_type", OAuthGrantTypeClientCredentials) + err := spt.secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + req, err := http.NewRequest(http.MethodPost, spt.oauthConfig.TokenEndpoint.String(), body) + if err != nil { + return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + if _, ok := spt.secret.(*ServicePrincipalMSISecret); ok { + req.Header.Set(metadataHeader, "true") + } + resp, err := spt.sender.Do(req) + if err != nil { + return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) + } + + defer resp.Body.Close() + rb, err := ioutil.ReadAll(resp.Body) + + if resp.StatusCode != http.StatusOK { + if err != nil { + return fmt.Errorf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body", resp.StatusCode) + } + return fmt.Errorf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)) + } + + if err != nil { + return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return fmt.Errorf("adal: Empty service principal token received during refresh") + } + var token Token + err = json.Unmarshal(rb, &token) + if err != nil { + return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)) + } + + spt.Token = token + + return spt.InvokeRefreshCallbacks(token) +} + +// SetAutoRefresh enables or disables automatic refreshing of stale tokens. +func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { + spt.autoRefresh = autoRefresh +} + +// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will +// refresh the token. +func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { + spt.refreshWithin = d + return +} + +// SetSender sets the http.Client used when obtaining the Service Principal token. An +// undecorated http.Client is used by default. +func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go new file mode 100644 index 00000000000..71e3ced2d6a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -0,0 +1,181 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" +) + +const ( + bearerChallengeHeader = "Www-Authenticate" + bearer = "Bearer" + tenantID = "tenantID" +) + +// Authorizer is the interface that provides a PrepareDecorator used to supply request +// authorization. Most often, the Authorizer decorator runs last so it has access to the full +// state of the formed HTTP request. +type Authorizer interface { + WithAuthorization() PrepareDecorator +} + +// NullAuthorizer implements a default, "do nothing" Authorizer. +type NullAuthorizer struct{} + +// WithAuthorization returns a PrepareDecorator that does nothing. +func (na NullAuthorizer) WithAuthorization() PrepareDecorator { + return WithNothing() +} + +// BearerAuthorizer implements the bearer authorization +type BearerAuthorizer struct { + tokenProvider adal.OAuthTokenProvider +} + +// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider +func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { + return &BearerAuthorizer{tokenProvider: tp} +} + +func (ba *BearerAuthorizer) withBearerAuthorization() PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken())) +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the token. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + refresher, ok := ba.tokenProvider.(adal.Refresher) + if ok { + err := refresher.EnsureFresh() + if err != nil { + return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", nil, + "Failed to refresh the Token for request to %s", r.URL) + } + } + return (ba.withBearerAuthorization()(p)).Prepare(r) + }) + } +} + +// BearerAuthorizerCallbackFunc is the authentication callback signature. +type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) + +// BearerAuthorizerCallback implements bearer authorization via a callback. +type BearerAuthorizerCallback struct { + sender Sender + callback BearerAuthorizerCallbackFunc +} + +// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback +// is invoked when the HTTP request is submitted. +func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { + if sender == nil { + sender = &http.Client{} + } + return &BearerAuthorizerCallback{sender: sender, callback: callback} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value +// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + // make a copy of the request and remove the body as it's not + // required and avoids us having to create a copy of it. + rCopy := *r + removeRequestBody(&rCopy) + + resp, err := bacb.sender.Do(&rCopy) + if err == nil && resp.StatusCode == 401 { + defer resp.Body.Close() + if hasBearerChallenge(resp) { + bc, err := newBearerChallenge(resp) + if err != nil { + return r, err + } + if bacb.callback != nil { + ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) + if err != nil { + return r, err + } + return ba.WithAuthorization()(p).Prepare(r) + } + } + } + return r, err + }) + } +} + +// returns true if the HTTP response contains a bearer challenge +func hasBearerChallenge(resp *http.Response) bool { + authHeader := resp.Header.Get(bearerChallengeHeader) + if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { + return false + } + return true +} + +type bearerChallenge struct { + values map[string]string +} + +func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) { + challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader)) + trimmedChallenge := challenge[len(bearer)+1:] + + // challenge is a set of key=value pairs that are comma delimited + pairs := strings.Split(trimmedChallenge, ",") + if len(pairs) < 1 { + err = fmt.Errorf("challenge '%s' contains no pairs", challenge) + return bc, err + } + + bc.values = make(map[string]string) + for i := range pairs { + trimmedPair := strings.TrimSpace(pairs[i]) + pair := strings.Split(trimmedPair, "=") + if len(pair) == 2 { + // remove the enclosing quotes + key := strings.Trim(pair[0], "\"") + value := strings.Trim(pair[1], "\"") + + switch key { + case "authorization", "authorization_uri": + // strip the tenant ID from the authorization URL + asURL, err := url.Parse(value) + if err != nil { + return bc, err + } + bc.values[tenantID] = asURL.Path[1:] + default: + bc.values[key] = value + } + } + } + + return bc, err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go new file mode 100644 index 00000000000..37b907c77f5 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -0,0 +1,129 @@ +/* +Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines +and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) +generated Go code. + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + +will set the URL to: + + https://microsoft.com/a/b/c + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., ByUnmarshallingJson) is likely incorrect. + +Lastly, the Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure +correct parsing and formatting. + +Errors raised by autorest objects and methods will conform to the autorest.Error interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. +*/ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "net/http" + "time" +) + +const ( + // HeaderLocation specifies the HTTP Location header. + HeaderLocation = "Location" + + // HeaderRetryAfter specifies the HTTP Retry-After header. + HeaderRetryAfter = "Retry-After" +) + +// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set +// and false otherwise. +func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { + return containsInt(codes, resp.StatusCode) +} + +// GetLocation retrieves the URL from the Location header of the passed response. +func GetLocation(resp *http.Response) string { + return resp.Header.Get(HeaderLocation) +} + +// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If +// the header is absent or is malformed, it will return the supplied default delay time.Duration. +func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { + retry := resp.Header.Get(HeaderRetryAfter) + if retry == "" { + return defaultDelay + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + return defaultDelay + } + + return d +} + +// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. +func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare(&http.Request{Cancel: cancel}, + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go new file mode 100644 index 00000000000..ffbc8da28e5 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -0,0 +1,316 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" +) + +const ( + headerAsyncOperation = "Azure-AsyncOperation" +) + +const ( + operationInProgress string = "InProgress" + operationCanceled string = "Canceled" + operationFailed string = "Failed" + operationSucceeded string = "Succeeded" +) + +// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure +// long-running operation. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + if err != nil { + return resp, err + } + pollingCodes := []int{http.StatusAccepted, http.StatusCreated, http.StatusOK} + if !autorest.ResponseHasStatusCode(resp, pollingCodes...) { + return resp, nil + } + + ps := pollingState{} + for err == nil { + err = updatePollingState(resp, &ps) + if err != nil { + break + } + if ps.hasTerminated() { + if !ps.hasSucceeded() { + err = ps + } + break + } + + r, err = newPollingRequest(resp, ps) + if err != nil { + return resp, err + } + + delay = autorest.GetRetryAfter(resp, delay) + resp, err = autorest.SendWithSender(s, r, + autorest.AfterDelay(delay)) + } + + return resp, err + }) + } +} + +func getAsyncOperation(resp *http.Response) string { + return resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) +} + +func hasSucceeded(state string) bool { + return state == operationSucceeded +} + +func hasTerminated(state string) bool { + switch state { + case operationCanceled, operationFailed, operationSucceeded: + return true + default: + return false + } +} + +func hasFailed(state string) bool { + return state == operationFailed +} + +type provisioningTracker interface { + state() string + hasSucceeded() bool + hasTerminated() bool +} + +type operationResource struct { + // Note: + // The specification states services should return the "id" field. However some return it as + // "operationId". + ID string `json:"id"` + OperationID string `json:"operationId"` + Name string `json:"name"` + Status string `json:"status"` + Properties map[string]interface{} `json:"properties"` + OperationError ServiceError `json:"error"` + StartTime date.Time `json:"startTime"` + EndTime date.Time `json:"endTime"` + PercentComplete float64 `json:"percentComplete"` +} + +func (or operationResource) state() string { + return or.Status +} + +func (or operationResource) hasSucceeded() bool { + return hasSucceeded(or.state()) +} + +func (or operationResource) hasTerminated() bool { + return hasTerminated(or.state()) +} + +type provisioningProperties struct { + ProvisioningState string `json:"provisioningState"` +} + +type provisioningStatus struct { + Properties provisioningProperties `json:"properties,omitempty"` + ProvisioningError ServiceError `json:"error,omitempty"` +} + +func (ps provisioningStatus) state() string { + return ps.Properties.ProvisioningState +} + +func (ps provisioningStatus) hasSucceeded() bool { + return hasSucceeded(ps.state()) +} + +func (ps provisioningStatus) hasTerminated() bool { + return hasTerminated(ps.state()) +} + +func (ps provisioningStatus) hasProvisioningError() bool { + return ps.ProvisioningError != ServiceError{} +} + +type pollingResponseFormat string + +const ( + usesOperationResponse pollingResponseFormat = "OperationResponse" + usesProvisioningStatus pollingResponseFormat = "ProvisioningStatus" + formatIsUnknown pollingResponseFormat = "" +) + +type pollingState struct { + responseFormat pollingResponseFormat + uri string + state string + code string + message string +} + +func (ps pollingState) hasSucceeded() bool { + return hasSucceeded(ps.state) +} + +func (ps pollingState) hasTerminated() bool { + return hasTerminated(ps.state) +} + +func (ps pollingState) hasFailed() bool { + return hasFailed(ps.state) +} + +func (ps pollingState) Error() string { + return fmt.Sprintf("Long running operation terminated with status '%s': Code=%q Message=%q", ps.state, ps.code, ps.message) +} + +// updatePollingState maps the operation status -- retrieved from either a provisioningState +// field, the status field of an OperationResource, or inferred from the HTTP status code -- +// into a well-known states. Since the process begins from the initial request, the state +// always comes from either a the provisioningState returned or is inferred from the HTTP +// status code. Subsequent requests will read an Azure OperationResource object if the +// service initially returned the Azure-AsyncOperation header. The responseFormat field notes +// the expected response format. +func updatePollingState(resp *http.Response, ps *pollingState) error { + // Determine the response shape + // -- The first response will always be a provisioningStatus response; only the polling requests, + // depending on the header returned, may be something otherwise. + var pt provisioningTracker + if ps.responseFormat == usesOperationResponse { + pt = &operationResource{} + } else { + pt = &provisioningStatus{} + } + + // If this is the first request (that is, the polling response shape is unknown), determine how + // to poll and what to expect + if ps.responseFormat == formatIsUnknown { + req := resp.Request + if req == nil { + return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Original HTTP request is missing") + } + + // Prefer the Azure-AsyncOperation header + ps.uri = getAsyncOperation(resp) + if ps.uri != "" { + ps.responseFormat = usesOperationResponse + } else { + ps.responseFormat = usesProvisioningStatus + } + + // Else, use the Location header + if ps.uri == "" { + ps.uri = autorest.GetLocation(resp) + } + + // Lastly, requests against an existing resource, use the last request URI + if ps.uri == "" { + m := strings.ToUpper(req.Method) + if m == http.MethodPatch || m == http.MethodPut || m == http.MethodGet { + ps.uri = req.URL.String() + } + } + } + + // Read and interpret the response (saving the Body in case no polling is necessary) + b := &bytes.Buffer{} + err := autorest.Respond(resp, + autorest.ByCopying(b), + autorest.ByUnmarshallingJSON(pt), + autorest.ByClosing()) + resp.Body = ioutil.NopCloser(b) + if err != nil { + return err + } + + // Interpret the results + // -- Terminal states apply regardless + // -- Unknown states are per-service inprogress states + // -- Otherwise, infer state from HTTP status code + if pt.hasTerminated() { + ps.state = pt.state() + } else if pt.state() != "" { + ps.state = operationInProgress + } else { + switch resp.StatusCode { + case http.StatusAccepted: + ps.state = operationInProgress + + case http.StatusNoContent, http.StatusCreated, http.StatusOK: + ps.state = operationSucceeded + + default: + ps.state = operationFailed + } + } + + if ps.state == operationInProgress && ps.uri == "" { + return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Unable to obtain polling URI for %s %s", resp.Request.Method, resp.Request.URL) + } + + // For failed operation, check for error code and message in + // -- Operation resource + // -- Response + // -- Otherwise, Unknown + if ps.hasFailed() { + if ps.responseFormat == usesOperationResponse { + or := pt.(*operationResource) + ps.code = or.OperationError.Code + ps.message = or.OperationError.Message + } else { + p := pt.(*provisioningStatus) + if p.hasProvisioningError() { + ps.code = p.ProvisioningError.Code + ps.message = p.ProvisioningError.Message + } else { + ps.code = "Unknown" + ps.message = "None" + } + } + } + return nil +} + +func newPollingRequest(resp *http.Response, ps pollingState) (*http.Request, error) { + req := resp.Request + if req == nil { + return nil, autorest.NewError("azure", "newPollingRequest", "Azure Polling Error - Original HTTP request is missing") + } + + reqPoll, err := autorest.Prepare(&http.Request{Cancel: req.Cancel}, + autorest.AsGet(), + autorest.WithBaseURL(ps.uri)) + if err != nil { + return nil, autorest.NewErrorWithError(err, "azure", "newPollingRequest", nil, "Failure creating poll request to %s", ps.uri) + } + + return reqPoll, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go new file mode 100644 index 00000000000..fa18356476b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -0,0 +1,200 @@ +/* +Package azure provides Azure-specific implementations used with AutoRest. + +See the included examples for more detail. +*/ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strconv" + + "github.com/Azure/go-autorest/autorest" +) + +const ( + // HeaderClientID is the Azure extension header to set a user-specified request ID. + HeaderClientID = "x-ms-client-request-id" + + // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID + // should be included in the response. + HeaderReturnClientID = "x-ms-return-client-request-id" + + // HeaderRequestID is the Azure extension header of the service generated request ID returned + // in the response. + HeaderRequestID = "x-ms-request-id" +) + +// ServiceError encapsulates the error response from an Azure service. +type ServiceError struct { + Code string `json:"code"` + Message string `json:"message"` + Details *[]interface{} `json:"details"` +} + +func (se ServiceError) Error() string { + if se.Details != nil { + d, err := json.Marshal(*(se.Details)) + if err != nil { + return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, *se.Details) + } + return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, string(d)) + } + return fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) +} + +// RequestError describes an error response returned by Azure service. +type RequestError struct { + autorest.DetailedError + + // The error returned by the Azure service. + ServiceError *ServiceError `json:"error"` + + // The request id (from the x-ms-request-id-header) of the request. + RequestID string +} + +// Error returns a human-friendly error message from service error. +func (e RequestError) Error() string { + return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", + e.StatusCode, e.ServiceError) +} + +// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. +func IsAzureError(e error) bool { + _, ok := e.(*RequestError) + return ok +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { + if v, ok := original.(*RequestError); ok { + return *v + } + + statusCode := autorest.UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + return RequestError{ + DetailedError: autorest.DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + }, + } +} + +// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id +// header to true such that UUID accompanies the http.Response. +func WithReturningClientID(uuid string) autorest.PrepareDecorator { + preparer := autorest.CreatePreparer( + WithClientID(uuid), + WithReturnClientID(true)) + + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + return preparer.Prepare(r) + }) + } +} + +// WithClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). +func WithClientID(uuid string) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderClientID, uuid) +} + +// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-return-client-request-id whose boolean value indicates if the value of the +// x-ms-client-request-id header should be included in the http.Response. +func WithReturnClientID(b bool) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) +} + +// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the +// http.Request sent to the service (and returned in the http.Response) +func ExtractClientID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderClientID, resp) +} + +// ExtractRequestID extracts the Azure server generated request identifier from the +// x-ms-request-id header. +func ExtractRequestID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderRequestID, resp) +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an +// azure.RequestError by reading the response body unless the response HTTP status code +// is among the set passed. +// +// If there is a chance service may return responses other than the Azure error +// format and the response cannot be parsed into an error, a decoding error will +// be returned containing the response body. In any case, the Responder will +// return an error if the status code is not satisfied. +// +// If this Responder returns an error, the response body will be replaced with +// an in-memory reader, which needs no further closing. +func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { + var e RequestError + defer resp.Body.Close() + + // Copy and replace the Body in case it does not contain an error object. + // This will leave the Body available to the caller. + b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e) + resp.Body = ioutil.NopCloser(&b) + if decodeErr != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) + } else if e.ServiceError == nil { + // Check if error is unwrapped ServiceError + if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil || e.ServiceError.Message == "" { + e.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + } + } + } + + e.RequestID = ExtractRequestID(resp) + if e.StatusCode == nil { + e.StatusCode = resp.StatusCode + } + err = &e + } + return err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go new file mode 100644 index 00000000000..30c4351a576 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -0,0 +1,144 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "strings" +) + +var environments = map[string]Environment{ + "AZURECHINACLOUD": ChinaCloud, + "AZUREGERMANCLOUD": GermanCloud, + "AZUREPUBLICCLOUD": PublicCloud, + "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, +} + +// Environment represents a set of endpoints for each of Azure's Clouds. +type Environment struct { + Name string `json:"name"` + ManagementPortalURL string `json:"managementPortalURL"` + PublishSettingsURL string `json:"publishSettingsURL"` + ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` + ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` + GalleryEndpoint string `json:"galleryEndpoint"` + KeyVaultEndpoint string `json:"keyVaultEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + StorageEndpointSuffix string `json:"storageEndpointSuffix"` + SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` + TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` + KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` + ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` + ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` + ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` + ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` +} + +var ( + // PublicCloud is the default public Azure cloud environment + PublicCloud = Environment{ + Name: "AzurePublicCloud", + ManagementPortalURL: "https://manage.windowsazure.com/", + PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.windows.net/", + ResourceManagerEndpoint: "https://management.azure.com/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.azure.com/", + KeyVaultEndpoint: "https://vault.azure.net/", + GraphEndpoint: "https://graph.windows.net/", + StorageEndpointSuffix: "core.windows.net", + SQLDatabaseDNSSuffix: "database.windows.net", + TrafficManagerDNSSuffix: "trafficmanager.net", + KeyVaultDNSSuffix: "vault.azure.net", + ServiceBusEndpointSuffix: "servicebus.azure.com", + ServiceManagementVMDNSSuffix: "cloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.azure.com", + ContainerRegistryDNSSuffix: "azurecr.io", + } + + // USGovernmentCloud is the cloud environment for the US Government + USGovernmentCloud = Environment{ + Name: "AzureUSGovernmentCloud", + ManagementPortalURL: "https://manage.windowsazure.us/", + PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", + ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.usgovcloudapi.net/", + KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", + GraphEndpoint: "https://graph.usgovcloudapi.net/", + StorageEndpointSuffix: "core.usgovcloudapi.net", + SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", + TrafficManagerDNSSuffix: "usgovtrafficmanager.net", + KeyVaultDNSSuffix: "vault.usgovcloudapi.net", + ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", + ServiceManagementVMDNSSuffix: "usgovcloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", + ContainerRegistryDNSSuffix: "azurecr.io", + } + + // ChinaCloud is the cloud environment operated in China + ChinaCloud = Environment{ + Name: "AzureChinaCloud", + ManagementPortalURL: "https://manage.chinacloudapi.com/", + PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", + ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", + ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", + GalleryEndpoint: "https://gallery.chinacloudapi.cn/", + KeyVaultEndpoint: "https://vault.azure.cn/", + GraphEndpoint: "https://graph.chinacloudapi.cn/", + StorageEndpointSuffix: "core.chinacloudapi.cn", + SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", + TrafficManagerDNSSuffix: "trafficmanager.cn", + KeyVaultDNSSuffix: "vault.azure.cn", + ServiceBusEndpointSuffix: "servicebus.chinacloudapi.net", + ServiceManagementVMDNSSuffix: "chinacloudapp.cn", + ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", + ContainerRegistryDNSSuffix: "azurecr.io", + } + + // GermanCloud is the cloud environment operated in Germany + GermanCloud = Environment{ + Name: "AzureGermanCloud", + ManagementPortalURL: "http://portal.microsoftazure.de/", + PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.cloudapi.de/", + ResourceManagerEndpoint: "https://management.microsoftazure.de/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", + GalleryEndpoint: "https://gallery.cloudapi.de/", + KeyVaultEndpoint: "https://vault.microsoftazure.de/", + GraphEndpoint: "https://graph.cloudapi.de/", + StorageEndpointSuffix: "core.cloudapi.de", + SQLDatabaseDNSSuffix: "database.cloudapi.de", + TrafficManagerDNSSuffix: "azuretrafficmanager.de", + KeyVaultDNSSuffix: "vault.microsoftazure.de", + ServiceBusEndpointSuffix: "servicebus.cloudapi.de", + ServiceManagementVMDNSSuffix: "azurecloudapp.de", + ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", + ContainerRegistryDNSSuffix: "azurecr.io", + } +) + +// EnvironmentFromName returns an Environment based on the common name specified +func EnvironmentFromName(name string) (Environment, error) { + name = strings.ToUpper(name) + env, ok := environments[name] + if !ok { + return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) + } + return env, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go new file mode 100644 index 00000000000..6036d069a12 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go @@ -0,0 +1,202 @@ +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azure + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. +// It also handles request retries +func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := autorest.NewRetriableRequest(r) + for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + + resp, err = autorest.SendWithSender(s, rr.Request(), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return resp, err + } + + if resp.StatusCode != http.StatusConflict { + return resp, err + } + var re RequestError + err = autorest.Respond( + resp, + autorest.ByUnmarshallingJSON(&re), + ) + if err != nil { + return resp, err + } + + if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { + err = register(client, r, re) + if err != nil { + return resp, fmt.Errorf("failed auto registering Resource Provider: %s", err) + } + } + } + return resp, errors.New("failed request and resource provider registration") + }) + } +} + +func getProvider(re RequestError) (string, error) { + if re.ServiceError != nil { + if re.ServiceError.Details != nil && len(*re.ServiceError.Details) > 0 { + detail := (*re.ServiceError.Details)[0].(map[string]interface{}) + return detail["target"].(string), nil + } + } + return "", errors.New("provider was not found in the response") +} + +func register(client autorest.Client, originalReq *http.Request, re RequestError) error { + subID := getSubscription(originalReq.URL.Path) + if subID == "" { + return errors.New("missing parameter subscriptionID to register resource provider") + } + providerName, err := getProvider(re) + if err != nil { + return fmt.Errorf("missing parameter provider to register resource provider: %s", err) + } + newURL := url.URL{ + Scheme: originalReq.URL.Scheme, + Host: originalReq.URL.Host, + } + + // taken from the resources SDK + // with almost identical code, this sections are easier to mantain + // It is also not a good idea to import the SDK here + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", providerName), + "subscriptionId": autorest.Encode("path", subID), + } + + const APIVersion = "2016-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + + req, err := preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req.Cancel = originalReq.Cancel + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + type Provider struct { + RegistrationState *string `json:"registrationState,omitempty"` + } + var provider Provider + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + // poll for registered provisioning state + now := time.Now() + for err == nil && time.Since(now) < client.PollingDuration { + // taken from the resources SDK + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + req, err = preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req.Cancel = originalReq.Cancel + + resp, err := autorest.SendWithSender(client.Sender, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + if provider.RegistrationState != nil && + *provider.RegistrationState == "Registered" { + break + } + + delayed := autorest.DelayWithRetryAfter(resp, originalReq.Cancel) + if !delayed { + autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Cancel) + } + } + if !(time.Since(now) < client.PollingDuration) { + return errors.New("polling for resource provider registration has exceeded the polling duration") + } + return err +} + +func getSubscription(path string) string { + parts := strings.Split(path, "/") + for i, v := range parts { + if v == "subscriptions" && (i+1) < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go new file mode 100644 index 00000000000..ce7a605f896 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -0,0 +1,251 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/http/cookiejar" + "runtime" + "time" +) + +const ( + // DefaultPollingDelay is a reasonable delay between polling requests. + DefaultPollingDelay = 60 * time.Second + + // DefaultPollingDuration is a reasonable total polling duration. + DefaultPollingDuration = 15 * time.Minute + + // DefaultRetryAttempts is number of attempts for retry status codes (5xx). + DefaultRetryAttempts = 3 +) + +var ( + // defaultUserAgent builds a string containing the Go version, system archityecture and OS, + // and the go-autorest version. + defaultUserAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + Version(), + ) + + // StatusCodesForRetry are a defined group of status code for which the client will retry + StatusCodesForRetry = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } +) + +const ( + requestFormat = `HTTP Request Begin =================================================== +%s +===================================================== HTTP Request End +` + responseFormat = `HTTP Response Begin =================================================== +%s +===================================================== HTTP Response End +` +) + +// Response serves as the base for all responses from generated clients. It provides access to the +// last http.Response. +type Response struct { + *http.Response `json:"-"` +} + +// LoggingInspector implements request and response inspectors that log the full request and +// response to a supplied log. +type LoggingInspector struct { + Logger *log.Logger +} + +// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + var body, b bytes.Buffer + + defer r.Body.Close() + + r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) + if err := r.Write(&b); err != nil { + return nil, fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(requestFormat, b.String()) + + r.Body = ioutil.NopCloser(&body) + return p.Prepare(r) + }) + } +} + +// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + var body, b bytes.Buffer + defer resp.Body.Close() + resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) + if err := resp.Write(&b); err != nil { + return fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(responseFormat, b.String()) + + resp.Body = ioutil.NopCloser(&body) + return r.Respond(resp) + }) + } +} + +// Client is the base for autorest generated clients. It provides default, "do nothing" +// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the +// standard, undecorated http.Client as a default Sender. +// +// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and +// return responses that compose with Response. +// +// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom +// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit +// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence +// sending the request by providing a decorated Sender. +type Client struct { + Authorizer Authorizer + Sender Sender + RequestInspector PrepareDecorator + ResponseInspector RespondDecorator + + // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header + PollingDelay time.Duration + + // PollingDuration sets the maximum polling time after which an error is returned. + PollingDuration time.Duration + + // RetryAttempts sets the default number of retry attempts for client. + RetryAttempts int + + // RetryDuration sets the delay duration for retries. + RetryDuration time.Duration + + // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent + // through the Do method. + UserAgent string + + Jar http.CookieJar +} + +// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed +// string. +func NewClientWithUserAgent(ua string) Client { + c := Client{ + PollingDelay: DefaultPollingDelay, + PollingDuration: DefaultPollingDuration, + RetryAttempts: DefaultRetryAttempts, + RetryDuration: 30 * time.Second, + UserAgent: defaultUserAgent, + } + c.Sender = c.sender() + c.AddToUserAgent(ua) + return c +} + +// AddToUserAgent adds an extension to the current user agent +func (c *Client) AddToUserAgent(extension string) error { + if extension != "" { + c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) +} + +// Do implements the Sender interface by invoking the active Sender after applying authorization. +// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent +// is set, apply set the User-Agent header. +func (c Client) Do(r *http.Request) (*http.Response, error) { + if r.UserAgent() == "" { + r, _ = Prepare(r, + WithUserAgent(c.UserAgent)) + } + r, err := Prepare(r, + c.WithInspection(), + c.WithAuthorization()) + if err != nil { + return nil, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") + } + + resp, err := SendWithSender(c.sender(), r) + Respond(resp, c.ByInspecting()) + return resp, err +} + +// sender returns the Sender to which to send requests. +func (c Client) sender() Sender { + if c.Sender == nil { + j, _ := cookiejar.New(nil) + return &http.Client{Jar: j} + } + return c.Sender +} + +// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator +// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. +func (c Client) WithAuthorization() PrepareDecorator { + return c.authorizer().WithAuthorization() +} + +// authorizer returns the Authorizer to use. +func (c Client) authorizer() Authorizer { + if c.Authorizer == nil { + return NullAuthorizer{} + } + return c.Authorizer +} + +// WithInspection is a convenience method that passes the request to the supplied RequestInspector, +// if present, or returns the WithNothing PrepareDecorator otherwise. +func (c Client) WithInspection() PrepareDecorator { + if c.RequestInspector == nil { + return WithNothing() + } + return c.RequestInspector +} + +// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, +// if present, or returns the ByIgnoring RespondDecorator otherwise. +func (c Client) ByInspecting() RespondDecorator { + if c.ResponseInspector == nil { + return ByIgnoring() + } + return c.ResponseInspector +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go new file mode 100644 index 00000000000..c4571065685 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/date.go @@ -0,0 +1,96 @@ +/* +Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) +defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of +time.Time types. And both convert to time.Time through a ToTime method. +*/ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "time" +) + +const ( + fullDate = "2006-01-02" + fullDateJSON = `"2006-01-02"` + dateFormat = "%04d-%02d-%02d" + jsonFormat = `"%04d-%02d-%02d"` +) + +// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., +// 2006-01-02). +type Date struct { + time.Time +} + +// ParseDate create a new Date from the passed string. +func ParseDate(date string) (d Date, err error) { + return parseDate(date, fullDate) +} + +func parseDate(date string, format string) (Date, error) { + d, err := time.Parse(format, date) + return Date{Time: d}, err +} + +// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalBinary() ([]byte, error) { + return d.MarshalText() +} + +// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalBinary(data []byte) error { + return d.UnmarshalText(data) +} + +// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalJSON() (json []byte, err error) { + return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalJSON(data []byte) (err error) { + d.Time, err = time.Parse(fullDateJSON, string(data)) + return err +} + +// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalText() (text []byte, err error) { + return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalText(data []byte) (err error) { + d.Time, err = time.Parse(fullDate, string(data)) + return err +} + +// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). +func (d Date) String() string { + return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) +} + +// ToTime returns a Date as a time.Time +func (d Date) ToTime() time.Time { + return d.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go new file mode 100644 index 00000000000..b453fad0491 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/time.go @@ -0,0 +1,103 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "regexp" + "time" +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +const ( + azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` + azureUtcFormat = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` + rfc3339 = time.RFC3339Nano + tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` +) + +// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +type Time struct { + time.Time +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalBinary() ([]byte, error) { + return t.Time.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalJSON() (json []byte, err error) { + return t.Time.MarshalJSON() +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalJSON(data []byte) (err error) { + timeFormat := azureUtcFormatJSON + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339JSON + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalText() (text []byte, err error) { + return t.Time.MarshalText() +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalText(data []byte) (err error) { + timeFormat := azureUtcFormat + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339 + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// String returns the Time formatted as an RFC3339 date-time string (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) String() string { + // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} + +// ToTime returns a Time as a time.Time +func (t Time) ToTime() time.Time { + return t.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go new file mode 100644 index 00000000000..48fb39ba9b9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go @@ -0,0 +1,100 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` + rfc1123 = time.RFC1123 +) + +// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +type TimeRFC1123 struct { + time.Time +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123JSON, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalJSON() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") + } + b := []byte(t.Format(rfc1123JSON)) + return b, nil +} + +// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalText() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") + } + + b := []byte(t.Format(rfc1123)) + return b, nil +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalBinary() ([]byte, error) { + return t.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// ToTime returns a Time as a time.Time +func (t TimeRFC1123) ToTime() time.Time { + return t.Time +} + +// String returns the Time formatted as an RFC1123 date-time string (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) String() string { + // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go new file mode 100644 index 00000000000..7073959b2a9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go @@ -0,0 +1,123 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "time" +) + +// unixEpoch is the moment in time that should be treated as timestamp 0. +var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) + +// UnixTime marshals and unmarshals a time that is represented as the number +// of seconds (ignoring skip-seconds) since the Unix Epoch. +type UnixTime time.Time + +// Duration returns the time as a Duration since the UnixEpoch. +func (t UnixTime) Duration() time.Duration { + return time.Time(t).Sub(unixEpoch) +} + +// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. +func NewUnixTimeFromSeconds(seconds float64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) +} + +// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. +func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(nanoseconds)) +} + +// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. +func NewUnixTimeFromDuration(dur time.Duration) UnixTime { + return UnixTime(unixEpoch.Add(dur)) +} + +// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' +func UnixEpoch() time.Time { + return unixEpoch +} + +// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. +// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) +func (t UnixTime) MarshalJSON() ([]byte, error) { + buffer := &bytes.Buffer{} + enc := json.NewEncoder(buffer) + err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) + if err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since +// midnight January 1st, 1970. +func (t *UnixTime) UnmarshalJSON(text []byte) error { + dec := json.NewDecoder(bytes.NewReader(text)) + + var secondsSinceEpoch float64 + if err := dec.Decode(&secondsSinceEpoch); err != nil { + return err + } + + *t = NewUnixTimeFromSeconds(secondsSinceEpoch) + + return nil +} + +// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. +func (t UnixTime) MarshalText() ([]byte, error) { + cast := time.Time(t) + return cast.MarshalText() +} + +// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. +func (t *UnixTime) UnmarshalText(raw []byte) error { + var unmarshaled time.Time + + if err := unmarshaled.UnmarshalText(raw); err != nil { + return err + } + + *t = UnixTime(unmarshaled) + return nil +} + +// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. +func (t UnixTime) MarshalBinary() ([]byte, error) { + buf := &bytes.Buffer{} + + payload := int64(t.Duration()) + + if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. +func (t *UnixTime) UnmarshalBinary(raw []byte) error { + var nanosecondsSinceEpoch int64 + + if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { + return err + } + *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go new file mode 100644 index 00000000000..12addf0ebb4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go @@ -0,0 +1,25 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "strings" + "time" +) + +// ParseTime to parse Time string to specified format. +func ParseTime(format string, t string) (d time.Time, err error) { + return time.Parse(format, strings.ToUpper(t)) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go new file mode 100644 index 00000000000..f724f33327e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/error.go @@ -0,0 +1,98 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" +) + +const ( + // UndefinedStatusCode is used when HTTP status code is not available for an error. + UndefinedStatusCode = 0 +) + +// DetailedError encloses a error with details of the package, method, and associated HTTP +// status code (if any). +type DetailedError struct { + Original error + + // PackageType is the package type of the object emitting the error. For types, the value + // matches that produced the the '%T' format specifier of the fmt package. For other elements, + // such as functions, it is just the package name (e.g., "autorest"). + PackageType string + + // Method is the name of the method raising the error. + Method string + + // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. + StatusCode interface{} + + // Message is the error message. + Message string + + // Service Error is the response body of failed API in bytes + ServiceError []byte + + // Response is the response object that was returned during failure if applicable. + Response *http.Response +} + +// NewError creates a new Error conforming object from the passed packageType, method, and +// message. message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, nil, message, args...) +} + +// NewErrorWithResponse creates a new Error conforming object from the passed +// packageType, method, statusCode of the given resp (UndefinedStatusCode if +// resp is nil), and message. message is treated as a format string to which the +// optional args apply. +func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, resp, message, args...) +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + if v, ok := original.(DetailedError); ok { + return v + } + + statusCode := UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + + return DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + Response: resp, + } +} + +// Error returns a formatted containing all available details (i.e., PackageType, Method, +// StatusCode, Message, and original error (if any)). +func (e DetailedError) Error() string { + if e.Original == nil { + return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) + } + return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go new file mode 100644 index 00000000000..2290c401003 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -0,0 +1,442 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "strings" +) + +const ( + mimeTypeJSON = "application/json" + mimeTypeFormPost = "application/x-www-form-urlencoded" + + headerAuthorization = "Authorization" + headerContentType = "Content-Type" + headerUserAgent = "User-Agent" +) + +// Preparer is the interface that wraps the Prepare method. +// +// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations +// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. +type Preparer interface { + Prepare(*http.Request) (*http.Request, error) +} + +// PreparerFunc is a method that implements the Preparer interface. +type PreparerFunc func(*http.Request) (*http.Request, error) + +// Prepare implements the Preparer interface on PreparerFunc. +func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { + return pf(r) +} + +// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then affect the result. +type PrepareDecorator func(Preparer) Preparer + +// CreatePreparer creates, decorates, and returns a Preparer. +// Without decorators, the returned Preparer returns the passed http.Request unmodified. +// Preparers are safe to share and re-use. +func CreatePreparer(decorators ...PrepareDecorator) Preparer { + return DecoratePreparer( + Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), + decorators...) +} + +// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it +// applies to the Preparer. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (change the http.Request and then pass it +// along) or a post-decorator (pass the http.Request along and alter it on return). +func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { + for _, decorate := range decorators { + p = decorate(p) + } + return p +} + +// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. +// It creates a Preparer from the decorators which it then applies to the passed http.Request. +func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { + if r == nil { + return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") + } + return CreatePreparer(decorators...).Prepare(r) +} + +// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed +// http.Request. +func WithNothing() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return p.Prepare(r) + }) + } +} + +// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to +// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before +// adding the header. +func WithHeader(header string, value string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(header), value) + } + return r, err + }) + } +} + +// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the supplied token. +func WithBearerAuthorization(token string) PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) +} + +// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value +// is the passed contentType. +func AsContentType(contentType string) PrepareDecorator { + return WithHeader(headerContentType, contentType) +} + +// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the +// passed string. +func WithUserAgent(ua string) PrepareDecorator { + return WithHeader(headerUserAgent, ua) +} + +// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/x-www-form-urlencoded". +func AsFormURLEncoded() PrepareDecorator { + return AsContentType(mimeTypeFormPost) +} + +// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/json". +func AsJSON() PrepareDecorator { + return AsContentType(mimeTypeJSON) +} + +// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The +// decorator does not validate that the passed method string is a known HTTP method. +func WithMethod(method string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Method = method + return p.Prepare(r) + }) + } +} + +// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. +func AsDelete() PrepareDecorator { return WithMethod("DELETE") } + +// AsGet returns a PrepareDecorator that sets the HTTP method to GET. +func AsGet() PrepareDecorator { return WithMethod("GET") } + +// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. +func AsHead() PrepareDecorator { return WithMethod("HEAD") } + +// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. +func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } + +// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. +func AsPatch() PrepareDecorator { return WithMethod("PATCH") } + +// AsPost returns a PrepareDecorator that sets the HTTP method to POST. +func AsPost() PrepareDecorator { return WithMethod("POST") } + +// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. +func AsPut() PrepareDecorator { return WithMethod("PUT") } + +// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed +// from the supplied baseUrl. +func WithBaseURL(baseURL string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var u *url.URL + if u, err = url.Parse(baseURL); err != nil { + return r, err + } + if u.Scheme == "" { + err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) + } + if err == nil { + r.URL = u + } + } + return r, err + }) + } +} + +// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the +// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. +func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(urlParameters) + for key, value := range parameters { + baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) + } + return WithBaseURL(baseURL) +} + +// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the +// http.Request body. +func WithFormData(v url.Values) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + s := v.Encode() + r.ContentLength = int64(len(s)) + r.Body = ioutil.NopCloser(strings.NewReader(s)) + } + return r, err + }) + } +} + +// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters +// into the http.Request body. +func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var body bytes.Buffer + writer := multipart.NewWriter(&body) + for key, value := range formDataParameters { + if rc, ok := value.(io.ReadCloser); ok { + var fd io.Writer + if fd, err = writer.CreateFormFile(key, key); err != nil { + return r, err + } + if _, err = io.Copy(fd, rc); err != nil { + return r, err + } + } else { + if err = writer.WriteField(key, ensureValueString(value)); err != nil { + return r, err + } + } + } + if err = writer.Close(); err != nil { + return r, err + } + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) + r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + r.ContentLength = int64(body.Len()) + return r, err + } + return r, err + }) + } +} + +// WithFile returns a PrepareDecorator that sends file in request body. +func WithFile(f io.ReadCloser) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := ioutil.ReadAll(f) + if err != nil { + return r, err + } + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + r.ContentLength = int64(len(b)) + } + return r, err + }) + } +} + +// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request +// and sets the Content-Length header. +func WithBool(v bool) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the +// request and sets the Content-Length header. +func WithFloat32(v float32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the +// request and sets the Content-Length header. +func WithFloat64(v float64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request +// and sets the Content-Length header. +func WithInt32(v int32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request +// and sets the Content-Length header. +func WithInt64(v int64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithString returns a PrepareDecorator that encodes the passed string into the body of the request +// and sets the Content-Length header. +func WithString(v string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + r.ContentLength = int64(len(v)) + r.Body = ioutil.NopCloser(strings.NewReader(v)) + } + return r, err + }) + } +} + +// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the +// request and sets the Content-Length header. +func WithJSON(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := json.Marshal(v) + if err == nil { + r.ContentLength = int64(len(b)) + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + } + return r, err + }) + } +} + +// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path +// is absolute (that is, it begins with a "/"), it replaces the existing path. +func WithPath(path string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPath", "Invoked with a nil URL") + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The +// values will be escaped (aka URL encoded) before insertion into the path. +func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := escapeValueStrings(ensureValueStrings(pathParameters)) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. +func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(pathParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +func parseURL(u *url.URL, path string) (*url.URL, error) { + p := strings.TrimRight(u.String(), "/") + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + return url.Parse(p + path) +} + +// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters +// given in the supplied map (i.e., key=value). +func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(queryParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") + } + v := r.URL.Query() + for key, value := range parameters { + v.Add(key, value) + } + r.URL.RawQuery = createQuery(v) + } + return r, err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go new file mode 100644 index 00000000000..a908a0adb70 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -0,0 +1,250 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" +) + +// Responder is the interface that wraps the Respond method. +// +// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold +// state since Responders may be shared and re-used. +type Responder interface { + Respond(*http.Response) error +} + +// ResponderFunc is a method that implements the Responder interface. +type ResponderFunc func(*http.Response) error + +// Respond implements the Responder interface on ResponderFunc. +func (rf ResponderFunc) Respond(r *http.Response) error { + return rf(r) +} + +// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to +// the http.Response and pass it along or, first, pass the http.Response along then react. +type RespondDecorator func(Responder) Responder + +// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned +// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share +// and re-used: It depends on the applied decorators. For example, a standard decorator that closes +// the response body is fine to share whereas a decorator that reads the body into a passed struct +// is not. +// +// To prevent memory leaks, ensure that at least one Responder closes the response body. +func CreateResponder(decorators ...RespondDecorator) Responder { + return DecorateResponder( + Responder(ResponderFunc(func(r *http.Response) error { return nil })), + decorators...) +} + +// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it +// applies to the Responder. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (react to the http.Response and then pass it +// along) or a post-decorator (pass the http.Response along and then react). +func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { + for _, decorate := range decorators { + r = decorate(r) + } + return r +} + +// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. +// It creates a Responder from the decorators it then applies to the passed http.Response. +func Respond(r *http.Response, decorators ...RespondDecorator) error { + if r == nil { + return nil + } + return CreateResponder(decorators...).Respond(r) +} + +// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined +// to the next RespondDecorator. +func ByIgnoring() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + return r.Respond(resp) + }) + } +} + +// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as +// the Body is read. +func ByCopying(b *bytes.Buffer) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + resp.Body = TeeReadCloser(resp.Body, b) + } + return err + }) + } +} + +// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which +// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed +// Responder is invoked prior to discarding the response body, the decorator may occur anywhere +// within the set. +func ByDiscardingBody() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + return fmt.Errorf("Error discarding the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it +// closes the response body. Since the passed Responder is invoked prior to closing the response +// body, the decorator may occur anywhere within the set. +func ByClosing() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which +// it closes the response if the passed Responder returns an error and the response body exists. +func ByClosingIfError() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil && resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingJSON(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + // Some responses might include a BOM, remove for successful unmarshalling + b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else if len(strings.Trim(string(b), " ")) > 0 { + errInner = json.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingXML(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + errInner = xml.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response +// StatusCode is among the set passed. On error, response body is fully read into a buffer and +// presented in the returned error, as well as in the response body. +func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + if resp.Body != nil { + defer resp.Body.Close() + b, _ := ioutil.ReadAll(resp.Body) + derr.ServiceError = b + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + err = derr + } + return err + }) + } +} + +// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is +// anything other than HTTP 200. +func WithErrorUnlessOK() RespondDecorator { + return WithErrorUnlessStatusCode(http.StatusOK) +} + +// ExtractHeader extracts all values of the specified header from the http.Response. It returns an +// empty string slice if the passed http.Response is nil or the header does not exist. +func ExtractHeader(header string, resp *http.Response) []string { + if resp != nil && resp.Header != nil { + return resp.Header[http.CanonicalHeaderKey(header)] + } + return nil +} + +// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It +// returns an empty string if the passed http.Response is nil or the header does not exist. +func ExtractHeaderValue(header string, resp *http.Response) string { + h := ExtractHeader(header, resp) + if len(h) > 0 { + return h[0] + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go new file mode 100644 index 00000000000..fa11dbed79b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go @@ -0,0 +1,52 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. +func NewRetriableRequest(req *http.Request) *RetriableRequest { + return &RetriableRequest{req: req} +} + +// Request returns the wrapped HTTP request. +func (rr *RetriableRequest) Request() *http.Request { + return rr.req +} + +func (rr *RetriableRequest) prepareFromByteReader() (err error) { + // fall back to making a copy (only do this once) + b := []byte{} + if rr.req.ContentLength > 0 { + b = make([]byte, rr.req.ContentLength) + _, err = io.ReadFull(rr.req.Body, b) + if err != nil { + return err + } + } else { + b, err = ioutil.ReadAll(rr.req.Body) + if err != nil { + return err + } + } + rr.br = bytes.NewReader(b) + rr.req.Body = ioutil.NopCloser(rr.br) + return err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go new file mode 100644 index 00000000000..7143cc61b58 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go @@ -0,0 +1,54 @@ +// +build !go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.br != nil { + _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go new file mode 100644 index 00000000000..ae15c6bf962 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go @@ -0,0 +1,66 @@ +// +build go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + rc io.ReadCloser + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.rc != nil { + rr.req.Body = rr.rc + } else if rr.br != nil { + _, err = rr.br.Seek(0, io.SeekStart) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.req.GetBody != nil { + // this will allow us to preserve the body without having to + // make a copy. note we need to do this on each iteration + rr.rc, err = rr.req.GetBody() + if err != nil { + return err + } + } else if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.GetBody = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go new file mode 100644 index 00000000000..7264c32f27d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -0,0 +1,307 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "log" + "math" + "net/http" + "strconv" + "time" +) + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(&http.Client{}, decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +// Send sends, by means of the default http.Client, the passed http.Request, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// Send is a convenience method and not recommended for production. Advanced users should use +// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). +// +// Send will not poll or retry requests. +func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return SendWithSender(&http.Client{}, r, decorators...) +} + +// SendWithSender sends the passed http.Request, through the provided Sender, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// SendWithSender will not poll or retry requests. +func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return DecorateSender(s, decorators...).Do(r) +} + +// AfterDelay returns a SendDecorator that delays for the passed time.Duration before +// invoking the Sender. The delay may be terminated by closing the optional channel on the +// http.Request. If canceled, no further Senders are invoked. +func AfterDelay(d time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if !DelayForBackoff(d, 0, r.Cancel) { + return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") + } + return s.Do(r) + }) + } +} + +// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. +func AsIs() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return s.Do(r) + }) + } +} + +// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which +// it closes the response if the passed Sender returns an error and the response body exists. +func DoCloseIfError() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + Respond(resp, ByDiscardingBody(), ByClosing()) + } + return resp, err + }) + } +} + +// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is +// among the set passed. Since these are artificial errors, the response body may still require +// closing. +func DoErrorIfStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response +// StatusCode is among the set passed. Since these are artificial errors, the response body +// may still require closing. +func DoErrorUnlessStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the +// passed status codes. It expects the http.Response to contain a Location header providing the +// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than +// the supplied duration. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + + if err == nil && ResponseHasStatusCode(resp, codes...) { + r, err = NewPollingRequest(resp, r.Cancel) + + for err == nil && ResponseHasStatusCode(resp, codes...) { + Respond(resp, + ByDiscardingBody(), + ByClosing()) + resp, err = SendWithSender(s, r, + AfterDelay(GetRetryAfter(resp, delay))) + } + } + + return resp, err + }) + } +} + +// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + for attempt := 0; attempt < attempts; attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + DelayForBackoff(backoff, attempt, r.Cancel) + } + return resp, err + }) + } +} + +// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + // Increment to add the first call (attempts denotes number of retries) + attempts++ + for attempt := 0; attempt < attempts; attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) + if err != nil || !ResponseHasStatusCode(resp, codes...) { + return resp, err + } + delayed := DelayWithRetryAfter(resp, r.Cancel) + if !delayed { + DelayForBackoff(backoff, attempt, r.Cancel) + } + } + return resp, err + }) + } +} + +// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header in +// responses with status code 429 +func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { + retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After")) + if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 { + select { + case <-time.After(time.Duration(retryAfter) * time.Second): + return true + case <-cancel: + return false + } + } + return false +} + +// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal +// to or greater than the specified duration, exponentially backing off between requests using the +// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the +// optional channel on the http.Request. +func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + end := time.Now().Add(d) + for attempt := 0; time.Now().Before(end); attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + DelayForBackoff(backoff, attempt, r.Cancel) + } + return resp, err + }) + } +} + +// WithLogging returns a SendDecorator that implements simple before and after logging of the +// request. +func WithLogging(logger *log.Logger) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + logger.Printf("Sending %s %s", r.Method, r.URL) + resp, err := s.Do(r) + if err != nil { + logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) + } else { + logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) + } + return resp, err + }) + } +} + +// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, +// returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { + select { + case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second): + return true + case <-cancel: + return false + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go new file mode 100644 index 00000000000..dfdc6efdff0 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -0,0 +1,192 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/url" + "reflect" + "sort" + "strings" +) + +// EncodedAs is a series of constants specifying various data encodings +type EncodedAs string + +const ( + // EncodedAsJSON states that data is encoded as JSON + EncodedAsJSON EncodedAs = "JSON" + + // EncodedAsXML states that data is encoded as Xml + EncodedAsXML EncodedAs = "XML" +) + +// Decoder defines the decoding method json.Decoder and xml.Decoder share +type Decoder interface { + Decode(v interface{}) error +} + +// NewDecoder creates a new decoder appropriate to the passed encoding. +// encodedAs specifies the type of encoding and r supplies the io.Reader containing the +// encoded data. +func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { + if encodedAs == EncodedAsJSON { + return json.NewDecoder(r) + } else if encodedAs == EncodedAsXML { + return xml.NewDecoder(r) + } + return nil +} + +// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy +// is especially useful if there is a chance the data will fail to decode. +// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v +// is the decoding destination. +func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { + b := bytes.Buffer{} + return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) +} + +// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. +// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. +// Further, when it is closed, it ensures that rc is closed as well. +func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { + return &teeReadCloser{rc, io.TeeReader(rc, w)} +} + +type teeReadCloser struct { + rc io.ReadCloser + r io.Reader +} + +func (t *teeReadCloser) Read(p []byte) (int, error) { + return t.r.Read(p) +} + +func (t *teeReadCloser) Close() error { + return t.rc.Close() +} + +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +func escapeValueStrings(m map[string]string) map[string]string { + for key, value := range m { + m[key] = url.QueryEscape(value) + } + return m +} + +func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { + mapOfStrings := make(map[string]string) + for key, value := range mapOfInterface { + mapOfStrings[key] = ensureValueString(value) + } + return mapOfStrings +} + +func ensureValueString(value interface{}) string { + if value == nil { + return "" + } + switch v := value.(type) { + case string: + return v + case []byte: + return string(v) + default: + return fmt.Sprintf("%v", v) + } +} + +// MapToValues method converts map[string]interface{} to url.Values. +func MapToValues(m map[string]interface{}) url.Values { + v := url.Values{} + for key, value := range m { + x := reflect.ValueOf(value) + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + for i := 0; i < x.Len(); i++ { + v.Add(key, ensureValueString(x.Index(i))) + } + } else { + v.Add(key, ensureValueString(value)) + } + } + return v +} + +// String method converts interface v to string. If interface is a list, it +// joins list elements using separator. +func String(v interface{}, sep ...string) string { + if len(sep) > 0 { + return ensureValueString(strings.Join(v.([]string), sep[0])) + } + return ensureValueString(v) +} + +// Encode method encodes url path and query parameters. +func Encode(location string, v interface{}, sep ...string) string { + s := String(v, sep...) + switch strings.ToLower(location) { + case "path": + return pathEscape(s) + case "query": + return queryEscape(s) + default: + return s + } +} + +func pathEscape(s string) string { + return strings.Replace(url.QueryEscape(s), "+", "%20", -1) +} + +func queryEscape(s string) string { + return url.QueryEscape(s) +} + +// This method is same as Encode() method of "net/url" go package, +// except it does not encode the query parameters because they +// already come encoded. It formats values map in query format (bar=foo&a=b). +func createQuery(v url.Values) string { + var buf bytes.Buffer + keys := make([]string, 0, len(v)) + for k := range v { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := v[k] + prefix := url.QueryEscape(k) + "=" + for _, v := range vs { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(prefix) + buf.WriteString(v) + } + } + return buf.String() +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go new file mode 100644 index 00000000000..3fe62c93056 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go @@ -0,0 +1,395 @@ +/* +Package validation provides methods for validating parameter value using reflection. +*/ +package validation + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "reflect" + "regexp" + "strings" +) + +// Constraint stores constraint name, target field name +// Rule and chain validations. +type Constraint struct { + + // Target field name for validation. + Target string + + // Constraint name e.g. minLength, MaxLength, Pattern, etc. + Name string + + // Rule for constraint e.g. greater than 10, less than 5 etc. + Rule interface{} + + // Chain Validations for struct type + Chain []Constraint +} + +// Validation stores parameter-wise validation. +type Validation struct { + TargetValue interface{} + Constraints []Constraint +} + +// Constraint list +const ( + Empty = "Empty" + Null = "Null" + ReadOnly = "ReadOnly" + Pattern = "Pattern" + MaxLength = "MaxLength" + MinLength = "MinLength" + MaxItems = "MaxItems" + MinItems = "MinItems" + MultipleOf = "MultipleOf" + UniqueItems = "UniqueItems" + InclusiveMaximum = "InclusiveMaximum" + ExclusiveMaximum = "ExclusiveMaximum" + ExclusiveMinimum = "ExclusiveMinimum" + InclusiveMinimum = "InclusiveMinimum" +) + +// Validate method validates constraints on parameter +// passed in validation array. +func Validate(m []Validation) error { + for _, item := range m { + v := reflect.ValueOf(item.TargetValue) + for _, constraint := range item.Constraints { + var err error + switch v.Kind() { + case reflect.Ptr: + err = validatePtr(v, constraint) + case reflect.String: + err = validateString(v, constraint) + case reflect.Struct: + err = validateStruct(v, constraint) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + err = validateInt(v, constraint) + case reflect.Float32, reflect.Float64: + err = validateFloat(v, constraint) + case reflect.Array, reflect.Slice, reflect.Map: + err = validateArrayMap(v, constraint) + default: + err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind())) + } + + if err != nil { + return err + } + } + } + return nil +} + +func validateStruct(x reflect.Value, v Constraint, name ...string) error { + //Get field name from target name which is in format a.b.c + s := strings.Split(v.Target, ".") + f := x.FieldByName(s[len(s)-1]) + if isZero(f) { + return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.Target)) + } + + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(f), + Constraints: []Constraint{v}, + }, + }) +} + +func validatePtr(x reflect.Value, v Constraint) error { + if v.Name == ReadOnly { + if !x.IsNil() { + return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request") + } + return nil + } + if x.IsNil() { + return checkNil(x, v) + } + if v.Chain != nil { + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(x.Elem()), + Constraints: v.Chain, + }, + }) + } + return nil +} + +func validateInt(x reflect.Value, v Constraint) error { + i := x.Int() + r, ok := v.Rule.(int) + if !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) + } + switch v.Name { + case MultipleOf: + if i%int64(r) != 0 { + return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r)) + } + case ExclusiveMinimum: + if i <= int64(r) { + return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) + } + case ExclusiveMaximum: + if i >= int64(r) { + return createError(x, v, fmt.Sprintf("value must be less than %v", r)) + } + case InclusiveMinimum: + if i < int64(r) { + return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) + } + case InclusiveMaximum: + if i > int64(r) { + return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) + } + default: + return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.Name)) + } + return nil +} + +func validateFloat(x reflect.Value, v Constraint) error { + f := x.Float() + r, ok := v.Rule.(float64) + if !ok { + return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.Name, v.Rule)) + } + switch v.Name { + case ExclusiveMinimum: + if f <= r { + return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) + } + case ExclusiveMaximum: + if f >= r { + return createError(x, v, fmt.Sprintf("value must be less than %v", r)) + } + case InclusiveMinimum: + if f < r { + return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) + } + case InclusiveMaximum: + if f > r { + return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) + } + default: + return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.Name)) + } + return nil +} + +func validateString(x reflect.Value, v Constraint) error { + s := x.String() + switch v.Name { + case Empty: + if len(s) == 0 { + return checkEmpty(x, v) + } + case Pattern: + reg, err := regexp.Compile(v.Rule.(string)) + if err != nil { + return createError(x, v, err.Error()) + } + if !reg.MatchString(s) { + return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.Rule)) + } + case MaxLength: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) + } + if len(s) > v.Rule.(int) { + return createError(x, v, fmt.Sprintf("value length must be less than or equal to %v", v.Rule)) + } + case MinLength: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) + } + if len(s) < v.Rule.(int) { + return createError(x, v, fmt.Sprintf("value length must be greater than or equal to %v", v.Rule)) + } + case ReadOnly: + if len(s) > 0 { + return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request") + } + default: + return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.Name)) + } + + if v.Chain != nil { + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(x), + Constraints: v.Chain, + }, + }) + } + return nil +} + +func validateArrayMap(x reflect.Value, v Constraint) error { + switch v.Name { + case Null: + if x.IsNil() { + return checkNil(x, v) + } + case Empty: + if x.IsNil() || x.Len() == 0 { + return checkEmpty(x, v) + } + case MaxItems: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) + } + if x.Len() > v.Rule.(int) { + return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.Rule, x.Len())) + } + case MinItems: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) + } + if x.Len() < v.Rule.(int) { + return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.Rule, x.Len())) + } + case UniqueItems: + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + if !checkForUniqueInArray(x) { + return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) + } + } else if x.Kind() == reflect.Map { + if !checkForUniqueInMap(x) { + return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) + } + } else { + return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.Name, x.Kind())) + } + case ReadOnly: + if x.Len() != 0 { + return createError(x, v, "readonly parameter; must send as nil or empty in request") + } + case Pattern: + reg, err := regexp.Compile(v.Rule.(string)) + if err != nil { + return createError(x, v, err.Error()) + } + keys := x.MapKeys() + for _, k := range keys { + if !reg.MatchString(k.String()) { + return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.Rule)) + } + } + default: + return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.Name)) + } + + if v.Chain != nil { + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(x), + Constraints: v.Chain, + }, + }) + } + return nil +} + +func checkNil(x reflect.Value, v Constraint) error { + if _, ok := v.Rule.(bool); !ok { + return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) + } + if v.Rule.(bool) { + return createError(x, v, "value can not be null; required parameter") + } + return nil +} + +func checkEmpty(x reflect.Value, v Constraint) error { + if _, ok := v.Rule.(bool); !ok { + return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) + } + + if v.Rule.(bool) { + return createError(x, v, "value can not be null or empty; required parameter") + } + return nil +} + +func checkForUniqueInArray(x reflect.Value) bool { + if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { + return false + } + arrOfInterface := make([]interface{}, x.Len()) + + for i := 0; i < x.Len(); i++ { + arrOfInterface[i] = x.Index(i).Interface() + } + + m := make(map[interface{}]bool) + for _, val := range arrOfInterface { + if m[val] { + return false + } + m[val] = true + } + return true +} + +func checkForUniqueInMap(x reflect.Value) bool { + if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { + return false + } + mapOfInterface := make(map[interface{}]interface{}, x.Len()) + + keys := x.MapKeys() + for _, k := range keys { + mapOfInterface[k.Interface()] = x.MapIndex(k).Interface() + } + + m := make(map[interface{}]bool) + for _, val := range mapOfInterface { + if m[val] { + return false + } + m[val] = true + } + return true +} + +func getInterfaceValue(x reflect.Value) interface{} { + if x.Kind() == reflect.Invalid { + return nil + } + return x.Interface() +} + +func isZero(x interface{}) bool { + return x == reflect.Zero(reflect.TypeOf(x)).Interface() +} + +func createError(x reflect.Value, v Constraint, err string) error { + return fmt.Errorf("autorest/validation: validation failed: parameter=%s constraint=%s value=%#v details: %s", + v.Target, v.Name, getInterfaceValue(x), err) +} + +// NewErrorWithValidationError appends package type and method name in +// validation error. +func NewErrorWithValidationError(err error, packageType, method string) error { + return fmt.Errorf("%s#%s: Invalid input: %v", packageType, method, err) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go new file mode 100644 index 00000000000..f588807dbb9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -0,0 +1,49 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "strings" + "sync" +) + +const ( + major = 8 + minor = 0 + patch = 0 + tag = "" +) + +var once sync.Once +var version string + +// Version returns the semantic version (see http://semver.org). +func Version() string { + once.Do(func() { + semver := fmt.Sprintf("%d.%d.%d", major, minor, patch) + verBuilder := bytes.NewBufferString(semver) + if tag != "" && tag != "-" { + updated := strings.TrimPrefix(tag, "-") + _, err := verBuilder.WriteString("-" + updated) + if err == nil { + verBuilder = bytes.NewBufferString(semver) + } + } + version = verBuilder.String() + }) + return version +} diff --git a/vendor/github.com/Unknwon/com/LICENSE b/vendor/github.com/Unknwon/com/LICENSE new file mode 100644 index 00000000000..8405e89a0b1 --- /dev/null +++ b/vendor/github.com/Unknwon/com/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/Unknwon/com/README.md b/vendor/github.com/Unknwon/com/README.md new file mode 100644 index 00000000000..8d821abd65e --- /dev/null +++ b/vendor/github.com/Unknwon/com/README.md @@ -0,0 +1,20 @@ +Common Functions +================ + +[![Build Status](https://travis-ci.org/Unknwon/com.svg)](https://travis-ci.org/Unknwon/com) [![Go Walker](http://gowalker.org/api/v1/badge)](http://gowalker.org/github.com/Unknwon/com) + +This is an open source project for commonly used functions for the Go programming language. + +This package need >= **go 1.2** + +Code Convention: based on [Go Code Convention](https://github.com/Unknwon/go-code-convention). + +## Contribute + +Your contribute is welcome, but you have to check following steps after you added some functions and commit them: + +1. Make sure you wrote user-friendly comments for **all functions** . +2. Make sure you wrote test cases with any possible condition for **all functions** in file `*_test.go`. +3. Make sure you wrote benchmarks for **all functions** in file `*_test.go`. +4. Make sure you wrote useful examples for **all functions** in file `example_test.go`. +5. Make sure you ran `go test` and got **PASS** . diff --git a/vendor/github.com/Unknwon/com/cmd.go b/vendor/github.com/Unknwon/com/cmd.go new file mode 100644 index 00000000000..dc7086d8f71 --- /dev/null +++ b/vendor/github.com/Unknwon/com/cmd.go @@ -0,0 +1,161 @@ +// +build go1.2 + +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package com is an open source project for commonly used functions for the Go programming language. +package com + +import ( + "bytes" + "fmt" + "os/exec" + "runtime" + "strings" +) + +// ExecCmdDirBytes executes system command in given directory +// and return stdout, stderr in bytes type, along with possible error. +func ExecCmdDirBytes(dir, cmdName string, args ...string) ([]byte, []byte, error) { + bufOut := new(bytes.Buffer) + bufErr := new(bytes.Buffer) + + cmd := exec.Command(cmdName, args...) + cmd.Dir = dir + cmd.Stdout = bufOut + cmd.Stderr = bufErr + + err := cmd.Run() + return bufOut.Bytes(), bufErr.Bytes(), err +} + +// ExecCmdBytes executes system command +// and return stdout, stderr in bytes type, along with possible error. +func ExecCmdBytes(cmdName string, args ...string) ([]byte, []byte, error) { + return ExecCmdDirBytes("", cmdName, args...) +} + +// ExecCmdDir executes system command in given directory +// and return stdout, stderr in string type, along with possible error. +func ExecCmdDir(dir, cmdName string, args ...string) (string, string, error) { + bufOut, bufErr, err := ExecCmdDirBytes(dir, cmdName, args...) + return string(bufOut), string(bufErr), err +} + +// ExecCmd executes system command +// and return stdout, stderr in string type, along with possible error. +func ExecCmd(cmdName string, args ...string) (string, string, error) { + return ExecCmdDir("", cmdName, args...) +} + +// _________ .__ .____ +// \_ ___ \ ____ | | ___________ | | ____ ____ +// / \ \/ / _ \| | / _ \_ __ \ | | / _ \ / ___\ +// \ \___( <_> ) |_( <_> ) | \/ | |__( <_> ) /_/ > +// \______ /\____/|____/\____/|__| |_______ \____/\___ / +// \/ \/ /_____/ + +// Color number constants. +const ( + Gray = uint8(iota + 90) + Red + Green + Yellow + Blue + Magenta + //NRed = uint8(31) // Normal + EndColor = "\033[0m" +) + +// getColorLevel returns colored level string by given level. +func getColorLevel(level string) string { + level = strings.ToUpper(level) + switch level { + case "TRAC": + return fmt.Sprintf("\033[%dm%s\033[0m", Blue, level) + case "ERRO": + return fmt.Sprintf("\033[%dm%s\033[0m", Red, level) + case "WARN": + return fmt.Sprintf("\033[%dm%s\033[0m", Magenta, level) + case "SUCC": + return fmt.Sprintf("\033[%dm%s\033[0m", Green, level) + default: + return level + } +} + +// ColorLogS colors log and return colored content. +// Log format: [ error ]. +// Level: TRAC -> blue; ERRO -> red; WARN -> Magenta; SUCC -> green; others -> default. +// Content: default; path: yellow; error -> red. +// Level has to be surrounded by "[" and "]". +// Highlights have to be surrounded by "# " and " #"(space), "#" will be deleted. +// Paths have to be surrounded by "( " and " )"(space). +// Errors have to be surrounded by "[ " and " ]"(space). +// Note: it hasn't support windows yet, contribute is welcome. +func ColorLogS(format string, a ...interface{}) string { + log := fmt.Sprintf(format, a...) + + var clog string + + if runtime.GOOS != "windows" { + // Level. + i := strings.Index(log, "]") + if log[0] == '[' && i > -1 { + clog += "[" + getColorLevel(log[1:i]) + "]" + } + + log = log[i+1:] + + // Error. + log = strings.Replace(log, "[ ", fmt.Sprintf("[\033[%dm", Red), -1) + log = strings.Replace(log, " ]", EndColor+"]", -1) + + // Path. + log = strings.Replace(log, "( ", fmt.Sprintf("(\033[%dm", Yellow), -1) + log = strings.Replace(log, " )", EndColor+")", -1) + + // Highlights. + log = strings.Replace(log, "# ", fmt.Sprintf("\033[%dm", Gray), -1) + log = strings.Replace(log, " #", EndColor, -1) + + } else { + // Level. + i := strings.Index(log, "]") + if log[0] == '[' && i > -1 { + clog += "[" + log[1:i] + "]" + } + + log = log[i+1:] + + // Error. + log = strings.Replace(log, "[ ", "[", -1) + log = strings.Replace(log, " ]", "]", -1) + + // Path. + log = strings.Replace(log, "( ", "(", -1) + log = strings.Replace(log, " )", ")", -1) + + // Highlights. + log = strings.Replace(log, "# ", "", -1) + log = strings.Replace(log, " #", "", -1) + } + return clog + log +} + +// ColorLog prints colored log to stdout. +// See color rules in function 'ColorLogS'. +func ColorLog(format string, a ...interface{}) { + fmt.Print(ColorLogS(format, a...)) +} diff --git a/vendor/github.com/Unknwon/com/convert.go b/vendor/github.com/Unknwon/com/convert.go new file mode 100644 index 00000000000..bf24aa8bc30 --- /dev/null +++ b/vendor/github.com/Unknwon/com/convert.go @@ -0,0 +1,167 @@ +// Copyright 2014 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "fmt" + "strconv" +) + +// Convert string to specify type. +type StrTo string + +func (f StrTo) Exist() bool { + return string(f) != string(0x1E) +} + +func (f StrTo) Uint8() (uint8, error) { + v, err := strconv.ParseUint(f.String(), 10, 8) + return uint8(v), err +} + +func (f StrTo) Int() (int, error) { + v, err := strconv.ParseInt(f.String(), 10, 0) + return int(v), err +} + +func (f StrTo) Int64() (int64, error) { + v, err := strconv.ParseInt(f.String(), 10, 64) + return int64(v), err +} + +func (f StrTo) Float64() (float64, error) { + v, err := strconv.ParseFloat(f.String(), 64) + return float64(v), err +} + +func (f StrTo) MustUint8() uint8 { + v, _ := f.Uint8() + return v +} + +func (f StrTo) MustInt() int { + v, _ := f.Int() + return v +} + +func (f StrTo) MustInt64() int64 { + v, _ := f.Int64() + return v +} + +func (f StrTo) MustFloat64() float64 { + v, _ := f.Float64() + return v +} + +func (f StrTo) String() string { + if f.Exist() { + return string(f) + } + return "" +} + +// Convert any type to string. +func ToStr(value interface{}, args ...int) (s string) { + switch v := value.(type) { + case bool: + s = strconv.FormatBool(v) + case float32: + s = strconv.FormatFloat(float64(v), 'f', argInt(args).Get(0, -1), argInt(args).Get(1, 32)) + case float64: + s = strconv.FormatFloat(v, 'f', argInt(args).Get(0, -1), argInt(args).Get(1, 64)) + case int: + s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10)) + case int8: + s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10)) + case int16: + s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10)) + case int32: + s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10)) + case int64: + s = strconv.FormatInt(v, argInt(args).Get(0, 10)) + case uint: + s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10)) + case uint8: + s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10)) + case uint16: + s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10)) + case uint32: + s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10)) + case uint64: + s = strconv.FormatUint(v, argInt(args).Get(0, 10)) + case string: + s = v + case []byte: + s = string(v) + default: + s = fmt.Sprintf("%v", v) + } + return s +} + +type argInt []int + +func (a argInt) Get(i int, args ...int) (r int) { + if i >= 0 && i < len(a) { + r = a[i] + } else if len(args) > 0 { + r = args[0] + } + return +} + +// HexStr2int converts hex format string to decimal number. +func HexStr2int(hexStr string) (int, error) { + num := 0 + length := len(hexStr) + for i := 0; i < length; i++ { + char := hexStr[length-i-1] + factor := -1 + + switch { + case char >= '0' && char <= '9': + factor = int(char) - '0' + case char >= 'a' && char <= 'f': + factor = int(char) - 'a' + 10 + default: + return -1, fmt.Errorf("invalid hex: %s", string(char)) + } + + num += factor * PowInt(16, i) + } + return num, nil +} + +// Int2HexStr converts decimal number to hex format string. +func Int2HexStr(num int) (hex string) { + if num == 0 { + return "0" + } + + for num > 0 { + r := num % 16 + + c := "?" + if r >= 0 && r <= 9 { + c = string(r + '0') + } else { + c = string(r + 'a' - 10) + } + hex = c + hex + num = num / 16 + } + return hex +} diff --git a/vendor/github.com/Unknwon/com/dir.go b/vendor/github.com/Unknwon/com/dir.go new file mode 100644 index 00000000000..c126d79da86 --- /dev/null +++ b/vendor/github.com/Unknwon/com/dir.go @@ -0,0 +1,173 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "errors" + "fmt" + "os" + "path" + "strings" +) + +// IsDir returns true if given path is a directory, +// or returns false when it's a file or does not exist. +func IsDir(dir string) bool { + f, e := os.Stat(dir) + if e != nil { + return false + } + return f.IsDir() +} + +func statDir(dirPath, recPath string, includeDir, isDirOnly bool) ([]string, error) { + dir, err := os.Open(dirPath) + if err != nil { + return nil, err + } + defer dir.Close() + + fis, err := dir.Readdir(0) + if err != nil { + return nil, err + } + + statList := make([]string, 0) + for _, fi := range fis { + if strings.Contains(fi.Name(), ".DS_Store") { + continue + } + + relPath := path.Join(recPath, fi.Name()) + curPath := path.Join(dirPath, fi.Name()) + if fi.IsDir() { + if includeDir { + statList = append(statList, relPath+"/") + } + s, err := statDir(curPath, relPath, includeDir, isDirOnly) + if err != nil { + return nil, err + } + statList = append(statList, s...) + } else if !isDirOnly { + statList = append(statList, relPath) + } + } + return statList, nil +} + +// StatDir gathers information of given directory by depth-first. +// It returns slice of file list and includes subdirectories if enabled; +// it returns error and nil slice when error occurs in underlying functions, +// or given path is not a directory or does not exist. +// +// Slice does not include given path itself. +// If subdirectories is enabled, they will have suffix '/'. +func StatDir(rootPath string, includeDir ...bool) ([]string, error) { + if !IsDir(rootPath) { + return nil, errors.New("not a directory or does not exist: " + rootPath) + } + + isIncludeDir := false + if len(includeDir) >= 1 { + isIncludeDir = includeDir[0] + } + return statDir(rootPath, "", isIncludeDir, false) +} + +// GetAllSubDirs returns all subdirectories of given root path. +// Slice does not include given path itself. +func GetAllSubDirs(rootPath string) ([]string, error) { + if !IsDir(rootPath) { + return nil, errors.New("not a directory or does not exist: " + rootPath) + } + return statDir(rootPath, "", true, true) +} + +// GetFileListBySuffix returns an ordered list of file paths. +// It recognize if given path is a file, and don't do recursive find. +func GetFileListBySuffix(dirPath, suffix string) ([]string, error) { + if !IsExist(dirPath) { + return nil, fmt.Errorf("given path does not exist: %s", dirPath) + } else if IsFile(dirPath) { + return []string{dirPath}, nil + } + + // Given path is a directory. + dir, err := os.Open(dirPath) + if err != nil { + return nil, err + } + + fis, err := dir.Readdir(0) + if err != nil { + return nil, err + } + + files := make([]string, 0, len(fis)) + for _, fi := range fis { + if strings.HasSuffix(fi.Name(), suffix) { + files = append(files, path.Join(dirPath, fi.Name())) + } + } + + return files, nil +} + +// CopyDir copy files recursively from source to target directory. +// +// The filter accepts a function that process the path info. +// and should return true for need to filter. +// +// It returns error when error occurs in underlying functions. +func CopyDir(srcPath, destPath string, filters ...func(filePath string) bool) error { + // Check if target directory exists. + if IsExist(destPath) { + return errors.New("file or directory alreay exists: " + destPath) + } + + err := os.MkdirAll(destPath, os.ModePerm) + if err != nil { + return err + } + + // Gather directory info. + infos, err := StatDir(srcPath, true) + if err != nil { + return err + } + + var filter func(filePath string) bool + if len(filters) > 0 { + filter = filters[0] + } + + for _, info := range infos { + if filter != nil && filter(info) { + continue + } + + curPath := path.Join(destPath, info) + if strings.HasSuffix(info, "/") { + err = os.MkdirAll(curPath, os.ModePerm) + } else { + err = Copy(path.Join(srcPath, info), curPath) + } + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/Unknwon/com/file.go b/vendor/github.com/Unknwon/com/file.go new file mode 100644 index 00000000000..b51502c9176 --- /dev/null +++ b/vendor/github.com/Unknwon/com/file.go @@ -0,0 +1,145 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "fmt" + "io" + "io/ioutil" + "math" + "os" + "path" +) + +// Storage unit constants. +const ( + Byte = 1 + KByte = Byte * 1024 + MByte = KByte * 1024 + GByte = MByte * 1024 + TByte = GByte * 1024 + PByte = TByte * 1024 + EByte = PByte * 1024 +) + +func logn(n, b float64) float64 { + return math.Log(n) / math.Log(b) +} + +func humanateBytes(s uint64, base float64, sizes []string) string { + if s < 10 { + return fmt.Sprintf("%dB", s) + } + e := math.Floor(logn(float64(s), base)) + suffix := sizes[int(e)] + val := float64(s) / math.Pow(base, math.Floor(e)) + f := "%.0f" + if val < 10 { + f = "%.1f" + } + + return fmt.Sprintf(f+"%s", val, suffix) +} + +// HumaneFileSize calculates the file size and generate user-friendly string. +func HumaneFileSize(s uint64) string { + sizes := []string{"B", "KB", "MB", "GB", "TB", "PB", "EB"} + return humanateBytes(s, 1024, sizes) +} + +// FileMTime returns file modified time and possible error. +func FileMTime(file string) (int64, error) { + f, err := os.Stat(file) + if err != nil { + return 0, err + } + return f.ModTime().Unix(), nil +} + +// FileSize returns file size in bytes and possible error. +func FileSize(file string) (int64, error) { + f, err := os.Stat(file) + if err != nil { + return 0, err + } + return f.Size(), nil +} + +// Copy copies file from source to target path. +func Copy(src, dest string) error { + // Gather file information to set back later. + si, err := os.Lstat(src) + if err != nil { + return err + } + + // Handle symbolic link. + if si.Mode()&os.ModeSymlink != 0 { + target, err := os.Readlink(src) + if err != nil { + return err + } + // NOTE: os.Chmod and os.Chtimes don't recoganize symbolic link, + // which will lead "no such file or directory" error. + return os.Symlink(target, dest) + } + + sr, err := os.Open(src) + if err != nil { + return err + } + defer sr.Close() + + dw, err := os.Create(dest) + if err != nil { + return err + } + defer dw.Close() + + if _, err = io.Copy(dw, sr); err != nil { + return err + } + + // Set back file information. + if err = os.Chtimes(dest, si.ModTime(), si.ModTime()); err != nil { + return err + } + return os.Chmod(dest, si.Mode()) +} + +// WriteFile writes data to a file named by filename. +// If the file does not exist, WriteFile creates it +// and its upper level paths. +func WriteFile(filename string, data []byte) error { + os.MkdirAll(path.Dir(filename), os.ModePerm) + return ioutil.WriteFile(filename, data, 0655) +} + +// IsFile returns true if given path is a file, +// or returns false when it's a directory or does not exist. +func IsFile(filePath string) bool { + f, e := os.Stat(filePath) + if e != nil { + return false + } + return !f.IsDir() +} + +// IsExist checks whether a file or directory exists. +// It returns false when the file or directory does not exist. +func IsExist(path string) bool { + _, err := os.Stat(path) + return err == nil || os.IsExist(err) +} diff --git a/vendor/github.com/Unknwon/com/html.go b/vendor/github.com/Unknwon/com/html.go new file mode 100644 index 00000000000..762d94b050d --- /dev/null +++ b/vendor/github.com/Unknwon/com/html.go @@ -0,0 +1,60 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "html" + "regexp" + "strings" +) + +// Html2JS converts []byte type of HTML content into JS format. +func Html2JS(data []byte) []byte { + s := string(data) + s = strings.Replace(s, `\`, `\\`, -1) + s = strings.Replace(s, "\n", `\n`, -1) + s = strings.Replace(s, "\r", "", -1) + s = strings.Replace(s, "\"", `\"`, -1) + s = strings.Replace(s, "", "<table>", -1) + return []byte(s) +} + +// encode html chars to string +func HtmlEncode(str string) string { + return html.EscapeString(str) +} + +// decode string to html chars +func HtmlDecode(str string) string { + return html.UnescapeString(str) +} + +// strip tags in html string +func StripTags(src string) string { + //去除style,script,html tag + re := regexp.MustCompile(`(?s)<(?:style|script)[^<>]*>.*?|]*>|`) + src = re.ReplaceAllString(src, "") + + //trim all spaces(2+) into \n + re = regexp.MustCompile(`\s{2,}`) + src = re.ReplaceAllString(src, "\n") + + return strings.TrimSpace(src) +} + +// change \n to
+func Nl2br(str string) string { + return strings.Replace(str, "\n", "
", -1) +} diff --git a/vendor/github.com/Unknwon/com/http.go b/vendor/github.com/Unknwon/com/http.go new file mode 100644 index 00000000000..3415059ae90 --- /dev/null +++ b/vendor/github.com/Unknwon/com/http.go @@ -0,0 +1,201 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "path" +) + +type NotFoundError struct { + Message string +} + +func (e NotFoundError) Error() string { + return e.Message +} + +type RemoteError struct { + Host string + Err error +} + +func (e *RemoteError) Error() string { + return e.Err.Error() +} + +var UserAgent = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1541.0 Safari/537.36" + +// HttpCall makes HTTP method call. +func HttpCall(client *http.Client, method, url string, header http.Header, body io.Reader) (io.ReadCloser, error) { + req, err := http.NewRequest(method, url, body) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", UserAgent) + for k, vs := range header { + req.Header[k] = vs + } + resp, err := client.Do(req) + if err != nil { + return nil, err + } + if resp.StatusCode == 200 { + return resp.Body, nil + } + resp.Body.Close() + if resp.StatusCode == 404 { // 403 can be rate limit error. || resp.StatusCode == 403 { + err = fmt.Errorf("resource not found: %s", url) + } else { + err = fmt.Errorf("%s %s -> %d", method, url, resp.StatusCode) + } + return nil, err +} + +// HttpGet gets the specified resource. +// ErrNotFound is returned if the server responds with status 404. +func HttpGet(client *http.Client, url string, header http.Header) (io.ReadCloser, error) { + return HttpCall(client, "GET", url, header, nil) +} + +// HttpPost posts the specified resource. +// ErrNotFound is returned if the server responds with status 404. +func HttpPost(client *http.Client, url string, header http.Header, body []byte) (io.ReadCloser, error) { + return HttpCall(client, "POST", url, header, bytes.NewBuffer(body)) +} + +// HttpGetToFile gets the specified resource and writes to file. +// ErrNotFound is returned if the server responds with status 404. +func HttpGetToFile(client *http.Client, url string, header http.Header, fileName string) error { + rc, err := HttpGet(client, url, header) + if err != nil { + return err + } + defer rc.Close() + + os.MkdirAll(path.Dir(fileName), os.ModePerm) + f, err := os.Create(fileName) + if err != nil { + return err + } + defer f.Close() + _, err = io.Copy(f, rc) + return err +} + +// HttpGetBytes gets the specified resource. ErrNotFound is returned if the server +// responds with status 404. +func HttpGetBytes(client *http.Client, url string, header http.Header) ([]byte, error) { + rc, err := HttpGet(client, url, header) + if err != nil { + return nil, err + } + defer rc.Close() + return ioutil.ReadAll(rc) +} + +// HttpGetJSON gets the specified resource and mapping to struct. +// ErrNotFound is returned if the server responds with status 404. +func HttpGetJSON(client *http.Client, url string, v interface{}) error { + rc, err := HttpGet(client, url, nil) + if err != nil { + return err + } + defer rc.Close() + err = json.NewDecoder(rc).Decode(v) + if _, ok := err.(*json.SyntaxError); ok { + return fmt.Errorf("JSON syntax error at %s", url) + } + return nil +} + +// HttpPostJSON posts the specified resource with struct values, +// and maps results to struct. +// ErrNotFound is returned if the server responds with status 404. +func HttpPostJSON(client *http.Client, url string, body, v interface{}) error { + data, err := json.Marshal(body) + if err != nil { + return err + } + rc, err := HttpPost(client, url, http.Header{"content-type": []string{"application/json"}}, data) + if err != nil { + return err + } + defer rc.Close() + err = json.NewDecoder(rc).Decode(v) + if _, ok := err.(*json.SyntaxError); ok { + return fmt.Errorf("JSON syntax error at %s", url) + } + return nil +} + +// A RawFile describes a file that can be downloaded. +type RawFile interface { + Name() string + RawUrl() string + Data() []byte + SetData([]byte) +} + +// FetchFiles fetches files specified by the rawURL field in parallel. +func FetchFiles(client *http.Client, files []RawFile, header http.Header) error { + ch := make(chan error, len(files)) + for i := range files { + go func(i int) { + p, err := HttpGetBytes(client, files[i].RawUrl(), nil) + if err != nil { + ch <- err + return + } + files[i].SetData(p) + ch <- nil + }(i) + } + for _ = range files { + if err := <-ch; err != nil { + return err + } + } + return nil +} + +// FetchFiles uses command `curl` to fetch files specified by the rawURL field in parallel. +func FetchFilesCurl(files []RawFile, curlOptions ...string) error { + ch := make(chan error, len(files)) + for i := range files { + go func(i int) { + stdout, _, err := ExecCmd("curl", append(curlOptions, files[i].RawUrl())...) + if err != nil { + ch <- err + return + } + + files[i].SetData([]byte(stdout)) + ch <- nil + }(i) + } + for _ = range files { + if err := <-ch; err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/Unknwon/com/math.go b/vendor/github.com/Unknwon/com/math.go new file mode 100644 index 00000000000..99c56b65947 --- /dev/null +++ b/vendor/github.com/Unknwon/com/math.go @@ -0,0 +1,29 @@ +// Copyright 2014 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +// PowInt is int type of math.Pow function. +func PowInt(x int, y int) int { + if y <= 0 { + return 1 + } else { + if y % 2 == 0 { + sqrt := PowInt(x, y/2) + return sqrt * sqrt + } else { + return PowInt(x, y-1) * x + } + } +} diff --git a/vendor/github.com/Unknwon/com/path.go b/vendor/github.com/Unknwon/com/path.go new file mode 100644 index 00000000000..b1e860def42 --- /dev/null +++ b/vendor/github.com/Unknwon/com/path.go @@ -0,0 +1,80 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "errors" + "os" + "path/filepath" + "runtime" + "strings" +) + +// GetGOPATHs returns all paths in GOPATH variable. +func GetGOPATHs() []string { + gopath := os.Getenv("GOPATH") + var paths []string + if runtime.GOOS == "windows" { + gopath = strings.Replace(gopath, "\\", "/", -1) + paths = strings.Split(gopath, ";") + } else { + paths = strings.Split(gopath, ":") + } + return paths +} + +// GetSrcPath returns app. source code path. +// It only works when you have src. folder in GOPATH, +// it returns error not able to locate source folder path. +func GetSrcPath(importPath string) (appPath string, err error) { + paths := GetGOPATHs() + for _, p := range paths { + if IsExist(p + "/src/" + importPath + "/") { + appPath = p + "/src/" + importPath + "/" + break + } + } + + if len(appPath) == 0 { + return "", errors.New("Unable to locate source folder path") + } + + appPath = filepath.Dir(appPath) + "/" + if runtime.GOOS == "windows" { + // Replace all '\' to '/'. + appPath = strings.Replace(appPath, "\\", "/", -1) + } + + return appPath, nil +} + +// HomeDir returns path of '~'(in Linux) on Windows, +// it returns error when the variable does not exist. +func HomeDir() (home string, err error) { + if runtime.GOOS == "windows" { + home = os.Getenv("USERPROFILE") + if len(home) == 0 { + home = os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") + } + } else { + home = os.Getenv("HOME") + } + + if len(home) == 0 { + return "", errors.New("Cannot specify home directory because it's empty") + } + + return home, nil +} diff --git a/vendor/github.com/Unknwon/com/regex.go b/vendor/github.com/Unknwon/com/regex.go new file mode 100644 index 00000000000..765bfc43113 --- /dev/null +++ b/vendor/github.com/Unknwon/com/regex.go @@ -0,0 +1,56 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import "regexp" + +const ( + regex_email_pattern = `(?i)[A-Z0-9._%+-]+@(?:[A-Z0-9-]+\.)+[A-Z]{2,6}` + regex_strict_email_pattern = `(?i)[A-Z0-9!#$%&'*+/=?^_{|}~-]+` + + `(?:\.[A-Z0-9!#$%&'*+/=?^_{|}~-]+)*` + + `@(?:[A-Z0-9](?:[A-Z0-9-]*[A-Z0-9])?\.)+` + + `[A-Z0-9](?:[A-Z0-9-]*[A-Z0-9])?` + regex_url_pattern = `(ftp|http|https):\/\/(\w+:{0,1}\w*@)?(\S+)(:[0-9]+)?(\/|\/([\w#!:.?+=&%@!\-\/]))?` +) + +var ( + regex_email *regexp.Regexp + regex_strict_email *regexp.Regexp + regex_url *regexp.Regexp +) + +func init() { + regex_email = regexp.MustCompile(regex_email_pattern) + regex_strict_email = regexp.MustCompile(regex_strict_email_pattern) + regex_url = regexp.MustCompile(regex_url_pattern) +} + +// validate string is an email address, if not return false +// basically validation can match 99% cases +func IsEmail(email string) bool { + return regex_email.MatchString(email) +} + +// validate string is an email address, if not return false +// this validation omits RFC 2822 +func IsEmailRFC(email string) bool { + return regex_strict_email.MatchString(email) +} + +// validate string is a url link, if not return false +// simple validation can match 99% cases +func IsUrl(url string) bool { + return regex_url.MatchString(url) +} diff --git a/vendor/github.com/Unknwon/com/slice.go b/vendor/github.com/Unknwon/com/slice.go new file mode 100644 index 00000000000..27801a4d7df --- /dev/null +++ b/vendor/github.com/Unknwon/com/slice.go @@ -0,0 +1,87 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "strings" +) + +// AppendStr appends string to slice with no duplicates. +func AppendStr(strs []string, str string) []string { + for _, s := range strs { + if s == str { + return strs + } + } + return append(strs, str) +} + +// CompareSliceStr compares two 'string' type slices. +// It returns true if elements and order are both the same. +func CompareSliceStr(s1, s2 []string) bool { + if len(s1) != len(s2) { + return false + } + + for i := range s1 { + if s1[i] != s2[i] { + return false + } + } + + return true +} + +// CompareSliceStr compares two 'string' type slices. +// It returns true if elements are the same, and ignores the order. +func CompareSliceStrU(s1, s2 []string) bool { + if len(s1) != len(s2) { + return false + } + + for i := range s1 { + for j := len(s2) - 1; j >= 0; j-- { + if s1[i] == s2[j] { + s2 = append(s2[:j], s2[j+1:]...) + break + } + } + } + if len(s2) > 0 { + return false + } + return true +} + +// IsSliceContainsStr returns true if the string exists in given slice, ignore case. +func IsSliceContainsStr(sl []string, str string) bool { + str = strings.ToLower(str) + for _, s := range sl { + if strings.ToLower(s) == str { + return true + } + } + return false +} + +// IsSliceContainsInt64 returns true if the int64 exists in given slice. +func IsSliceContainsInt64(sl []int64, i int64) bool { + for _, s := range sl { + if s == i { + return true + } + } + return false +} diff --git a/vendor/github.com/Unknwon/com/string.go b/vendor/github.com/Unknwon/com/string.go new file mode 100644 index 00000000000..7080d174a81 --- /dev/null +++ b/vendor/github.com/Unknwon/com/string.go @@ -0,0 +1,253 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "errors" + r "math/rand" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +// AESGCMEncrypt encrypts plaintext with the given key using AES in GCM mode. +func AESGCMEncrypt(key, plaintext []byte) ([]byte, error) { + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return nil, err + } + + nonce := make([]byte, gcm.NonceSize()) + if _, err := rand.Read(nonce); err != nil { + return nil, err + } + + ciphertext := gcm.Seal(nil, nonce, plaintext, nil) + return append(nonce, ciphertext...), nil +} + +// AESGCMDecrypt decrypts ciphertext with the given key using AES in GCM mode. +func AESGCMDecrypt(key, ciphertext []byte) ([]byte, error) { + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return nil, err + } + + size := gcm.NonceSize() + if len(ciphertext)-size <= 0 { + return nil, errors.New("Ciphertext is empty") + } + + nonce := ciphertext[:size] + ciphertext = ciphertext[size:] + + plainText, err := gcm.Open(nil, nonce, ciphertext, nil) + if err != nil { + return nil, err + } + + return plainText, nil +} + +// IsLetter returns true if the 'l' is an English letter. +func IsLetter(l uint8) bool { + n := (l | 0x20) - 'a' + if n >= 0 && n < 26 { + return true + } + return false +} + +// Expand replaces {k} in template with match[k] or subs[atoi(k)] if k is not in match. +func Expand(template string, match map[string]string, subs ...string) string { + var p []byte + var i int + for { + i = strings.Index(template, "{") + if i < 0 { + break + } + p = append(p, template[:i]...) + template = template[i+1:] + i = strings.Index(template, "}") + if s, ok := match[template[:i]]; ok { + p = append(p, s...) + } else { + j, _ := strconv.Atoi(template[:i]) + if j >= len(subs) { + p = append(p, []byte("Missing")...) + } else { + p = append(p, subs[j]...) + } + } + template = template[i+1:] + } + p = append(p, template...) + return string(p) +} + +// Reverse s string, support unicode +func Reverse(s string) string { + n := len(s) + runes := make([]rune, n) + for _, rune := range s { + n-- + runes[n] = rune + } + return string(runes[n:]) +} + +// RandomCreateBytes generate random []byte by specify chars. +func RandomCreateBytes(n int, alphabets ...byte) []byte { + const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + var bytes = make([]byte, n) + var randby bool + if num, err := rand.Read(bytes); num != n || err != nil { + r.Seed(time.Now().UnixNano()) + randby = true + } + for i, b := range bytes { + if len(alphabets) == 0 { + if randby { + bytes[i] = alphanum[r.Intn(len(alphanum))] + } else { + bytes[i] = alphanum[b%byte(len(alphanum))] + } + } else { + if randby { + bytes[i] = alphabets[r.Intn(len(alphabets))] + } else { + bytes[i] = alphabets[b%byte(len(alphabets))] + } + } + } + return bytes +} + +// ToSnakeCase can convert all upper case characters in a string to +// underscore format. +// +// Some samples. +// "FirstName" => "first_name" +// "HTTPServer" => "http_server" +// "NoHTTPS" => "no_https" +// "GO_PATH" => "go_path" +// "GO PATH" => "go_path" // space is converted to underscore. +// "GO-PATH" => "go_path" // hyphen is converted to underscore. +// +// From https://github.com/huandu/xstrings +func ToSnakeCase(str string) string { + if len(str) == 0 { + return "" + } + + buf := &bytes.Buffer{} + var prev, r0, r1 rune + var size int + + r0 = '_' + + for len(str) > 0 { + prev = r0 + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + switch { + case r0 == utf8.RuneError: + buf.WriteByte(byte(str[0])) + + case unicode.IsUpper(r0): + if prev != '_' { + buf.WriteRune('_') + } + + buf.WriteRune(unicode.ToLower(r0)) + + if len(str) == 0 { + break + } + + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if !unicode.IsUpper(r0) { + buf.WriteRune(r0) + break + } + + // find next non-upper-case character and insert `_` properly. + // it's designed to convert `HTTPServer` to `http_server`. + // if there are more than 2 adjacent upper case characters in a word, + // treat them as an abbreviation plus a normal word. + for len(str) > 0 { + r1 = r0 + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if r0 == utf8.RuneError { + buf.WriteRune(unicode.ToLower(r1)) + buf.WriteByte(byte(str[0])) + break + } + + if !unicode.IsUpper(r0) { + if r0 == '_' || r0 == ' ' || r0 == '-' { + r0 = '_' + + buf.WriteRune(unicode.ToLower(r1)) + } else { + buf.WriteRune('_') + buf.WriteRune(unicode.ToLower(r1)) + buf.WriteRune(r0) + } + + break + } + + buf.WriteRune(unicode.ToLower(r1)) + } + + if len(str) == 0 || r0 == '_' { + buf.WriteRune(unicode.ToLower(r0)) + break + } + + default: + if r0 == ' ' || r0 == '-' { + r0 = '_' + } + + buf.WriteRune(r0) + } + } + + return buf.String() +} diff --git a/vendor/github.com/Unknwon/com/time.go b/vendor/github.com/Unknwon/com/time.go new file mode 100644 index 00000000000..dd1cdbcf2a0 --- /dev/null +++ b/vendor/github.com/Unknwon/com/time.go @@ -0,0 +1,115 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "fmt" + "strconv" + "strings" + "time" +) + +// Format unix time int64 to string +func Date(ti int64, format string) string { + t := time.Unix(int64(ti), 0) + return DateT(t, format) +} + +// Format unix time string to string +func DateS(ts string, format string) string { + i, _ := strconv.ParseInt(ts, 10, 64) + return Date(i, format) +} + +// Format time.Time struct to string +// MM - month - 01 +// M - month - 1, single bit +// DD - day - 02 +// D - day 2 +// YYYY - year - 2006 +// YY - year - 06 +// HH - 24 hours - 03 +// H - 24 hours - 3 +// hh - 12 hours - 03 +// h - 12 hours - 3 +// mm - minute - 04 +// m - minute - 4 +// ss - second - 05 +// s - second = 5 +func DateT(t time.Time, format string) string { + res := strings.Replace(format, "MM", t.Format("01"), -1) + res = strings.Replace(res, "M", t.Format("1"), -1) + res = strings.Replace(res, "DD", t.Format("02"), -1) + res = strings.Replace(res, "D", t.Format("2"), -1) + res = strings.Replace(res, "YYYY", t.Format("2006"), -1) + res = strings.Replace(res, "YY", t.Format("06"), -1) + res = strings.Replace(res, "HH", fmt.Sprintf("%02d", t.Hour()), -1) + res = strings.Replace(res, "H", fmt.Sprintf("%d", t.Hour()), -1) + res = strings.Replace(res, "hh", t.Format("03"), -1) + res = strings.Replace(res, "h", t.Format("3"), -1) + res = strings.Replace(res, "mm", t.Format("04"), -1) + res = strings.Replace(res, "m", t.Format("4"), -1) + res = strings.Replace(res, "ss", t.Format("05"), -1) + res = strings.Replace(res, "s", t.Format("5"), -1) + return res +} + +// DateFormat pattern rules. +var datePatterns = []string{ + // year + "Y", "2006", // A full numeric representation of a year, 4 digits Examples: 1999 or 2003 + "y", "06", //A two digit representation of a year Examples: 99 or 03 + + // month + "m", "01", // Numeric representation of a month, with leading zeros 01 through 12 + "n", "1", // Numeric representation of a month, without leading zeros 1 through 12 + "M", "Jan", // A short textual representation of a month, three letters Jan through Dec + "F", "January", // A full textual representation of a month, such as January or March January through December + + // day + "d", "02", // Day of the month, 2 digits with leading zeros 01 to 31 + "j", "2", // Day of the month without leading zeros 1 to 31 + + // week + "D", "Mon", // A textual representation of a day, three letters Mon through Sun + "l", "Monday", // A full textual representation of the day of the week Sunday through Saturday + + // time + "g", "3", // 12-hour format of an hour without leading zeros 1 through 12 + "G", "15", // 24-hour format of an hour without leading zeros 0 through 23 + "h", "03", // 12-hour format of an hour with leading zeros 01 through 12 + "H", "15", // 24-hour format of an hour with leading zeros 00 through 23 + + "a", "pm", // Lowercase Ante meridiem and Post meridiem am or pm + "A", "PM", // Uppercase Ante meridiem and Post meridiem AM or PM + + "i", "04", // Minutes with leading zeros 00 to 59 + "s", "05", // Seconds, with leading zeros 00 through 59 + + // time zone + "T", "MST", + "P", "-07:00", + "O", "-0700", + + // RFC 2822 + "r", time.RFC1123Z, +} + +// Parse Date use PHP time format. +func DateParse(dateString, format string) (time.Time, error) { + replacer := strings.NewReplacer(datePatterns...) + format = replacer.Replace(format) + return time.ParseInLocation(format, dateString, time.Local) +} diff --git a/vendor/github.com/Unknwon/com/url.go b/vendor/github.com/Unknwon/com/url.go new file mode 100644 index 00000000000..b0b7c0e12b2 --- /dev/null +++ b/vendor/github.com/Unknwon/com/url.go @@ -0,0 +1,41 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "encoding/base64" + "net/url" +) + +// url encode string, is + not %20 +func UrlEncode(str string) string { + return url.QueryEscape(str) +} + +// url decode string +func UrlDecode(str string) (string, error) { + return url.QueryUnescape(str) +} + +// base64 encode +func Base64Encode(str string) string { + return base64.StdEncoding.EncodeToString([]byte(str)) +} + +// base64 decode +func Base64Decode(str string) (string, error) { + s, e := base64.StdEncoding.DecodeString(str) + return string(s), e +} diff --git a/vendor/github.com/agext/levenshtein/DCO b/vendor/github.com/agext/levenshtein/DCO new file mode 100644 index 00000000000..716561d5d28 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/DCO @@ -0,0 +1,36 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. diff --git a/vendor/github.com/agext/levenshtein/LICENSE b/vendor/github.com/agext/levenshtein/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/agext/levenshtein/MAINTAINERS b/vendor/github.com/agext/levenshtein/MAINTAINERS new file mode 100644 index 00000000000..726c2afb321 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/MAINTAINERS @@ -0,0 +1 @@ +Alex Bucataru (@AlexBucataru) diff --git a/vendor/github.com/agext/levenshtein/NOTICE b/vendor/github.com/agext/levenshtein/NOTICE new file mode 100644 index 00000000000..eaffaab94c4 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/NOTICE @@ -0,0 +1,5 @@ +Alrux Go EXTensions (AGExt) - package levenshtein +Copyright 2016 ALRUX Inc. + +This product includes software developed at ALRUX Inc. +(http://www.alrux.com/). diff --git a/vendor/github.com/agext/levenshtein/README.md b/vendor/github.com/agext/levenshtein/README.md new file mode 100644 index 00000000000..90509c2a20d --- /dev/null +++ b/vendor/github.com/agext/levenshtein/README.md @@ -0,0 +1,38 @@ +# A Go package for calculating the Levenshtein distance between two strings + +[![Release](https://img.shields.io/github/release/agext/levenshtein.svg?style=flat)](https://github.com/agext/levenshtein/releases/latest) +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/agext/levenshtein)  +[![Build Status](https://travis-ci.org/agext/levenshtein.svg?branch=master&style=flat)](https://travis-ci.org/agext/levenshtein) +[![Coverage Status](https://coveralls.io/repos/github/agext/levenshtein/badge.svg?style=flat)](https://coveralls.io/github/agext/levenshtein) +[![Go Report Card](https://goreportcard.com/badge/github.com/agext/levenshtein?style=flat)](https://goreportcard.com/report/github.com/agext/levenshtein) + + +This package implements distance and similarity metrics for strings, based on the Levenshtein measure, in [Go](http://golang.org). + +## Project Status + +v1.2.1 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis. + +This package is being actively maintained. If you encounter any problems or have any suggestions for improvement, please [open an issue](https://github.com/agext/levenshtein/issues). Pull requests are welcome. + +## Overview + +The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0. + +A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded. + +The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0. + +The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest. + +The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed. + +## Installation + +``` +go get github.com/agext/levenshtein +``` + +## License + +Package levenshtein is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details. diff --git a/vendor/github.com/agext/levenshtein/levenshtein.go b/vendor/github.com/agext/levenshtein/levenshtein.go new file mode 100644 index 00000000000..df69ce70165 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/levenshtein.go @@ -0,0 +1,290 @@ +// Copyright 2016 ALRUX Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package levenshtein implements distance and similarity metrics for strings, based on the Levenshtein measure. + +The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0. + +A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded. + +The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0. + +The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest. + +The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed. +*/ +package levenshtein + +// Calculate determines the Levenshtein distance between two strings, using +// the given costs for each edit operation. It returns the distance along with +// the lengths of the longest common prefix and suffix. +// +// If maxCost is non-zero, the calculation stops as soon as the distance is determined +// to be greater than maxCost. Therefore, any return value higher than maxCost is a +// lower bound for the actual distance. +func Calculate(str1, str2 []rune, maxCost, insCost, subCost, delCost int) (dist, prefixLen, suffixLen int) { + l1, l2 := len(str1), len(str2) + // trim common prefix, if any, as it doesn't affect the distance + for ; prefixLen < l1 && prefixLen < l2; prefixLen++ { + if str1[prefixLen] != str2[prefixLen] { + break + } + } + str1, str2 = str1[prefixLen:], str2[prefixLen:] + l1 -= prefixLen + l2 -= prefixLen + // trim common suffix, if any, as it doesn't affect the distance + for 0 < l1 && 0 < l2 { + if str1[l1-1] != str2[l2-1] { + str1, str2 = str1[:l1], str2[:l2] + break + } + l1-- + l2-- + suffixLen++ + } + // if the first string is empty, the distance is the length of the second string times the cost of insertion + if l1 == 0 { + dist = l2 * insCost + return + } + // if the second string is empty, the distance is the length of the first string times the cost of deletion + if l2 == 0 { + dist = l1 * delCost + return + } + + // variables used in inner "for" loops + var y, dy, c, l int + + // if maxCost is greater than or equal to the maximum possible distance, it's equivalent to 'unlimited' + if maxCost > 0 { + if subCost < delCost+insCost { + if maxCost >= l1*subCost+(l2-l1)*insCost { + maxCost = 0 + } + } else { + if maxCost >= l1*delCost+l2*insCost { + maxCost = 0 + } + } + } + + if maxCost > 0 { + // prefer the longer string first, to minimize time; + // a swap also transposes the meanings of insertion and deletion. + if l1 < l2 { + str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost + } + + // the length differential times cost of deletion is a lower bound for the cost; + // if it is higher than the maxCost, there is no point going into the main calculation. + if dist = (l1 - l2) * delCost; dist > maxCost { + return + } + + d := make([]int, l1+1) + + // offset and length of d in the current row + doff, dlen := 0, 1 + for y, dy = 1, delCost; y <= l1 && dy <= maxCost; dlen++ { + d[y] = dy + y++ + dy = y * delCost + } + // fmt.Printf("%q -> %q: init doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, doff, dlen, doff, doff+dlen, d[doff:doff+dlen]) + + for x := 0; x < l2; x++ { + dy, d[doff] = d[doff], d[doff]+insCost + for d[doff] > maxCost && dlen > 0 { + if str1[doff] != str2[x] { + dy += subCost + } + doff++ + dlen-- + if c = d[doff] + insCost; c < dy { + dy = c + } + dy, d[doff] = d[doff], dy + } + for y, l = doff, doff+dlen-1; y < l; dy, d[y] = d[y], dy { + if str1[y] != str2[x] { + dy += subCost + } + if c = d[y] + delCost; c < dy { + dy = c + } + y++ + if c = d[y] + insCost; c < dy { + dy = c + } + } + if y < l1 { + if str1[y] != str2[x] { + dy += subCost + } + if c = d[y] + delCost; c < dy { + dy = c + } + for ; dy <= maxCost && y < l1; dy, d[y] = dy+delCost, dy { + y++ + dlen++ + } + } + // fmt.Printf("%q -> %q: x=%d doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, x, doff, dlen, doff, doff+dlen, d[doff:doff+dlen]) + if dlen == 0 { + dist = maxCost + 1 + return + } + } + if doff+dlen-1 < l1 { + dist = maxCost + 1 + return + } + dist = d[l1] + } else { + // ToDo: This is O(l1*l2) time and O(min(l1,l2)) space; investigate if it is + // worth to implement diagonal approach - O(l1*(1+dist)) time, up to O(l1*l2) space + // http://www.csse.monash.edu.au/~lloyd/tildeStrings/Alignment/92.IPL.html + + // prefer the shorter string first, to minimize space; time is O(l1*l2) anyway; + // a swap also transposes the meanings of insertion and deletion. + if l1 > l2 { + str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost + } + d := make([]int, l1+1) + + for y = 1; y <= l1; y++ { + d[y] = y * delCost + } + for x := 0; x < l2; x++ { + dy, d[0] = d[0], d[0]+insCost + for y = 0; y < l1; dy, d[y] = d[y], dy { + if str1[y] != str2[x] { + dy += subCost + } + if c = d[y] + delCost; c < dy { + dy = c + } + y++ + if c = d[y] + insCost; c < dy { + dy = c + } + } + } + dist = d[l1] + } + + return +} + +// Distance returns the Levenshtein distance between str1 and str2, using the +// default or provided cost values. Pass nil for the third argument to use the +// default cost of 1 for all three operations, with no maximum. +func Distance(str1, str2 string, p *Params) int { + if p == nil { + p = defaultParams + } + dist, _, _ := Calculate([]rune(str1), []rune(str2), p.maxCost, p.insCost, p.subCost, p.delCost) + return dist +} + +// Similarity returns a score in the range of 0..1 for how similar the two strings are. +// A score of 1 means the strings are identical, and 0 means they have nothing in common. +// +// A nil third argument uses the default cost of 1 for all three operations. +// +// If a non-zero MinScore value is provided in the parameters, scores lower than it +// will be returned as 0. +func Similarity(str1, str2 string, p *Params) float64 { + return Match(str1, str2, p.Clone().BonusThreshold(1.1)) // guaranteed no bonus +} + +// Match returns a similarity score adjusted by the same method as proposed by Winkler for +// the Jaro distance - giving a bonus to string pairs that share a common prefix, only if their +// similarity score is already over a threshold. +// +// The score is in the range of 0..1, with 1 meaning the strings are identical, +// and 0 meaning they have nothing in common. +// +// A nil third argument uses the default cost of 1 for all three operations, maximum length of +// common prefix to consider for bonus of 4, scaling factor of 0.1, and bonus threshold of 0.7. +// +// If a non-zero MinScore value is provided in the parameters, scores lower than it +// will be returned as 0. +func Match(str1, str2 string, p *Params) float64 { + s1, s2 := []rune(str1), []rune(str2) + l1, l2 := len(s1), len(s2) + // two empty strings are identical; shortcut also avoids divByZero issues later on. + if l1 == 0 && l2 == 0 { + return 1 + } + + if p == nil { + p = defaultParams + } + + // a min over 1 can never be satisfied, so the score is 0. + if p.minScore > 1 { + return 0 + } + + insCost, delCost, maxDist, max := p.insCost, p.delCost, 0, 0 + if l1 > l2 { + l1, l2, insCost, delCost = l2, l1, delCost, insCost + } + + if p.subCost < delCost+insCost { + maxDist = l1*p.subCost + (l2-l1)*insCost + } else { + maxDist = l1*delCost + l2*insCost + } + + // a zero min is always satisfied, so no need to set a max cost. + if p.minScore > 0 { + // if p.minScore is lower than p.bonusThreshold, we can use a simplified formula + // for the max cost, because a sim score below min cannot receive a bonus. + if p.minScore < p.bonusThreshold { + // round down the max - a cost equal to a rounded up max would already be under min. + max = int((1 - p.minScore) * float64(maxDist)) + } else { + // p.minScore <= sim + p.bonusPrefix*p.bonusScale*(1-sim) + // p.minScore <= (1-dist/maxDist) + p.bonusPrefix*p.bonusScale*(1-(1-dist/maxDist)) + // p.minScore <= 1 - dist/maxDist + p.bonusPrefix*p.bonusScale*dist/maxDist + // 1 - p.minScore >= dist/maxDist - p.bonusPrefix*p.bonusScale*dist/maxDist + // (1-p.minScore)*maxDist/(1-p.bonusPrefix*p.bonusScale) >= dist + max = int((1 - p.minScore) * float64(maxDist) / (1 - float64(p.bonusPrefix)*p.bonusScale)) + } + } + + dist, pl, _ := Calculate(s1, s2, max, p.insCost, p.subCost, p.delCost) + if max > 0 && dist > max { + return 0 + } + sim := 1 - float64(dist)/float64(maxDist) + + if sim >= p.bonusThreshold && sim < 1 && p.bonusPrefix > 0 && p.bonusScale > 0 { + if pl > p.bonusPrefix { + pl = p.bonusPrefix + } + sim += float64(pl) * p.bonusScale * (1 - sim) + } + + if sim < p.minScore { + return 0 + } + + return sim +} diff --git a/vendor/github.com/agext/levenshtein/params.go b/vendor/github.com/agext/levenshtein/params.go new file mode 100644 index 00000000000..a85727b3eff --- /dev/null +++ b/vendor/github.com/agext/levenshtein/params.go @@ -0,0 +1,152 @@ +// Copyright 2016 ALRUX Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package levenshtein + +// Params represents a set of parameter values for the various formulas involved +// in the calculation of the Levenshtein string metrics. +type Params struct { + insCost int + subCost int + delCost int + maxCost int + minScore float64 + bonusPrefix int + bonusScale float64 + bonusThreshold float64 +} + +var ( + defaultParams = NewParams() +) + +// NewParams creates a new set of parameters and initializes it with the default values. +func NewParams() *Params { + return &Params{ + insCost: 1, + subCost: 1, + delCost: 1, + maxCost: 0, + minScore: 0, + bonusPrefix: 4, + bonusScale: .1, + bonusThreshold: .7, + } +} + +// Clone returns a pointer to a copy of the receiver parameter set, or of a new +// default parameter set if the receiver is nil. +func (p *Params) Clone() *Params { + if p == nil { + return NewParams() + } + return &Params{ + insCost: p.insCost, + subCost: p.subCost, + delCost: p.delCost, + maxCost: p.maxCost, + minScore: p.minScore, + bonusPrefix: p.bonusPrefix, + bonusScale: p.bonusScale, + bonusThreshold: p.bonusThreshold, + } +} + +// InsCost overrides the default value of 1 for the cost of insertion. +// The new value must be zero or positive. +func (p *Params) InsCost(v int) *Params { + if v >= 0 { + p.insCost = v + } + return p +} + +// SubCost overrides the default value of 1 for the cost of substitution. +// The new value must be zero or positive. +func (p *Params) SubCost(v int) *Params { + if v >= 0 { + p.subCost = v + } + return p +} + +// DelCost overrides the default value of 1 for the cost of deletion. +// The new value must be zero or positive. +func (p *Params) DelCost(v int) *Params { + if v >= 0 { + p.delCost = v + } + return p +} + +// MaxCost overrides the default value of 0 (meaning unlimited) for the maximum cost. +// The calculation of Distance() stops when the result is guaranteed to exceed +// this maximum, returning a lower-bound rather than exact value. +// The new value must be zero or positive. +func (p *Params) MaxCost(v int) *Params { + if v >= 0 { + p.maxCost = v + } + return p +} + +// MinScore overrides the default value of 0 for the minimum similarity score. +// Scores below this threshold are returned as 0 by Similarity() and Match(). +// The new value must be zero or positive. Note that a minimum greater than 1 +// can never be satisfied, resulting in a score of 0 for any pair of strings. +func (p *Params) MinScore(v float64) *Params { + if v >= 0 { + p.minScore = v + } + return p +} + +// BonusPrefix overrides the default value for the maximum length of +// common prefix to be considered for bonus by Match(). +// The new value must be zero or positive. +func (p *Params) BonusPrefix(v int) *Params { + if v >= 0 { + p.bonusPrefix = v + } + return p +} + +// BonusScale overrides the default value for the scaling factor used by Match() +// in calculating the bonus. +// The new value must be zero or positive. To guarantee that the similarity score +// remains in the interval 0..1, this scaling factor is not allowed to exceed +// 1 / BonusPrefix. +func (p *Params) BonusScale(v float64) *Params { + if v >= 0 { + p.bonusScale = v + } + + // the bonus cannot exceed (1-sim), or the score may become greater than 1. + if float64(p.bonusPrefix)*p.bonusScale > 1 { + p.bonusScale = 1 / float64(p.bonusPrefix) + } + + return p +} + +// BonusThreshold overrides the default value for the minimum similarity score +// for which Match() can assign a bonus. +// The new value must be zero or positive. Note that a threshold greater than 1 +// effectively makes Match() become the equivalent of Similarity(). +func (p *Params) BonusThreshold(v float64) *Params { + if v >= 0 { + p.bonusThreshold = v + } + return p +} diff --git a/vendor/github.com/apparentlymart/go-textseg/LICENSE b/vendor/github.com/apparentlymart/go-textseg/LICENSE new file mode 100644 index 00000000000..684b03b4a27 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/LICENSE @@ -0,0 +1,95 @@ +Copyright (c) 2017 Martin Atkins + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------- + +Unicode table generation programs are under a separate copyright and license: + +Copyright (c) 2014 Couchbase, Inc. +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file +except in compliance with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the +License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +either express or implied. See the License for the specific language governing permissions +and limitations under the License. + +--------- + +Grapheme break data is provided as part of the Unicode character database, +copright 2016 Unicode, Inc, which is provided with the following license: + +Unicode Data Files include all data files under the directories +http://www.unicode.org/Public/, http://www.unicode.org/reports/, +http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and +http://www.unicode.org/utility/trac/browser/. + +Unicode Data Files do not include PDF online code charts under the +directory http://www.unicode.org/Public/. + +Software includes any source code published in the Unicode Standard +or under the directories +http://www.unicode.org/Public/, http://www.unicode.org/reports/, +http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and +http://www.unicode.org/utility/trac/browser/. + +NOTICE TO USER: Carefully read the following legal agreement. +BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S +DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), +YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE +TERMS AND CONDITIONS OF THIS AGREEMENT. +IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE +THE DATA FILES OR SOFTWARE. + +COPYRIGHT AND PERMISSION NOTICE + +Copyright © 1991-2017 Unicode, Inc. All rights reserved. +Distributed under the Terms of Use in http://www.unicode.org/copyright.html. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Unicode data files and any associated documentation +(the "Data Files") or Unicode software and any associated documentation +(the "Software") to deal in the Data Files or Software +without restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, and/or sell copies of +the Data Files or Software, and to permit persons to whom the Data Files +or Software are furnished to do so, provided that either +(a) this copyright and permission notice appear with all copies +of the Data Files or Software, or +(b) this copyright and permission notice appear in associated +Documentation. + +THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT OF THIRD PARTY RIGHTS. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS +NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL +DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, +DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THE DATA FILES OR SOFTWARE. + +Except as contained in this notice, the name of a copyright holder +shall not be used in advertising or otherwise to promote the sale, +use or other dealings in these Data Files or Software without prior +written authorization of the copyright holder. diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go b/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go new file mode 100644 index 00000000000..5752e9ef8f7 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go @@ -0,0 +1,30 @@ +package textseg + +import ( + "bufio" + "bytes" +) + +// AllTokens is a utility that uses a bufio.SplitFunc to produce a slice of +// all of the recognized tokens in the given buffer. +func AllTokens(buf []byte, splitFunc bufio.SplitFunc) ([][]byte, error) { + scanner := bufio.NewScanner(bytes.NewReader(buf)) + scanner.Split(splitFunc) + var ret [][]byte + for scanner.Scan() { + ret = append(ret, scanner.Bytes()) + } + return ret, scanner.Err() +} + +// TokenCount is a utility that uses a bufio.SplitFunc to count the number of +// recognized tokens in the given buffer. +func TokenCount(buf []byte, splitFunc bufio.SplitFunc) (int, error) { + scanner := bufio.NewScanner(bytes.NewReader(buf)) + scanner.Split(splitFunc) + var ret int + for scanner.Scan() { + ret++ + } + return ret, scanner.Err() +} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go b/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go new file mode 100644 index 00000000000..81f3a747178 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go @@ -0,0 +1,7 @@ +package textseg + +//go:generate go run make_tables.go -output tables.go +//go:generate go run make_test_tables.go -output tables_test.go +//go:generate ruby unicode2ragel.rb --url=http://www.unicode.org/Public/9.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -m GraphemeCluster -p "Prepend,CR,LF,Control,Extend,Regional_Indicator,SpacingMark,L,V,T,LV,LVT,E_Base,E_Modifier,ZWJ,Glue_After_Zwj,E_Base_GAZ" -o grapheme_clusters_table.rl +//go:generate ragel -Z grapheme_clusters.rl +//go:generate gofmt -w grapheme_clusters.go diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go new file mode 100644 index 00000000000..012bc690aa1 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go @@ -0,0 +1,5276 @@ + +// line 1 "grapheme_clusters.rl" +package textseg + +import ( + "errors" + "unicode/utf8" +) + +// Generated from grapheme_clusters.rl. DO NOT EDIT + +// line 13 "grapheme_clusters.go" +var _graphclust_actions []byte = []byte{ + 0, 1, 0, 1, 4, 1, 9, 1, 10, + 1, 11, 1, 12, 1, 13, 1, 14, + 1, 15, 1, 16, 1, 17, 1, 18, + 1, 19, 1, 20, 1, 21, 2, 1, + 7, 2, 1, 8, 2, 2, 3, 2, + 5, 1, 3, 0, 1, 8, 3, 5, + 0, 1, 3, 5, 1, 6, +} + +var _graphclust_key_offsets []int16 = []int16{ + 0, 0, 1, 3, 5, 7, 10, 15, + 17, 20, 28, 31, 33, 35, 37, 67, + 75, 77, 81, 84, 89, 94, 104, 116, + 122, 127, 137, 140, 147, 151, 159, 169, + 173, 181, 183, 191, 194, 196, 201, 203, + 210, 212, 220, 221, 242, 246, 252, 257, + 259, 263, 267, 269, 273, 275, 278, 282, + 284, 291, 293, 297, 301, 305, 307, 309, + 318, 322, 327, 329, 335, 337, 338, 340, + 341, 343, 345, 347, 349, 364, 368, 370, + 372, 377, 381, 385, 387, 389, 393, 397, + 399, 403, 410, 415, 419, 422, 423, 427, + 434, 439, 440, 441, 443, 452, 454, 477, + 481, 483, 487, 491, 492, 496, 500, 503, + 505, 510, 523, 525, 527, 529, 531, 535, + 539, 541, 543, 545, 549, 553, 557, 559, + 561, 563, 565, 566, 568, 574, 580, 586, + 588, 592, 596, 601, 604, 614, 616, 618, + 621, 623, 625, 627, 629, 632, 637, 639, + 642, 650, 653, 655, 657, 659, 690, 698, + 700, 704, 711, 723, 730, 744, 750, 768, + 779, 785, 797, 800, 809, 814, 824, 830, + 844, 850, 862, 874, 878, 880, 886, 888, + 895, 898, 906, 907, 928, 937, 945, 951, + 953, 957, 961, 966, 972, 974, 977, 990, + 995, 1009, 1011, 1020, 1027, 1038, 1048, 1056, + 1067, 1071, 1076, 1078, 1080, 1082, 1083, 1085, + 1087, 1089, 1091, 1106, 1110, 1112, 1114, 1122, + 1130, 1132, 1136, 1147, 1150, 1160, 1164, 1171, + 1179, 1185, 1188, 1189, 1193, 1200, 1205, 1206, + 1207, 1209, 1218, 1220, 1243, 1248, 1250, 1259, + 1264, 1265, 1274, 1280, 1290, 1295, 1302, 1316, + 1320, 1325, 1336, 1339, 1349, 1353, 1362, 1364, + 1372, 1379, 1385, 1392, 1396, 1398, 1400, 1402, + 1403, 1405, 1411, 1419, 1425, 1427, 1431, 1435, + 1440, 1443, 1453, 1455, 1457, 1458, 1460, 1461, + 1467, 1469, 1471, 1471, 1472, 1473, 1474, 1480, + 1482, 1484, 1484, 1490, 1492, 1497, 1502, 1504, + 1506, 1508, 1511, 1516, 1518, 1521, 1529, 1532, + 1534, 1536, 1538, 1568, 1576, 1578, 1582, 1585, + 1590, 1595, 1605, 1617, 1623, 1628, 1638, 1641, + 1648, 1652, 1660, 1670, 1674, 1682, 1684, 1692, + 1695, 1697, 1702, 1704, 1711, 1713, 1721, 1722, + 1743, 1747, 1753, 1758, 1760, 1764, 1768, 1770, + 1774, 1776, 1779, 1783, 1785, 1792, 1794, 1798, + 1802, 1806, 1808, 1810, 1819, 1823, 1828, 1830, + 1836, 1838, 1839, 1841, 1842, 1844, 1846, 1848, + 1850, 1865, 1869, 1871, 1873, 1878, 1882, 1886, + 1888, 1890, 1894, 1898, 1900, 1904, 1911, 1916, + 1920, 1923, 1924, 1928, 1935, 1940, 1941, 1942, + 1944, 1953, 1955, 1978, 1982, 1984, 1988, 1992, + 1993, 1997, 2001, 2004, 2006, 2011, 2024, 2026, + 2028, 2030, 2032, 2036, 2040, 2042, 2044, 2046, + 2050, 2054, 2058, 2060, 2062, 2064, 2066, 2067, + 2069, 2075, 2081, 2087, 2089, 2093, 2097, 2102, + 2105, 2115, 2117, 2119, 2122, 2124, 2126, 2128, + 2130, 2133, 2138, 2140, 2143, 2151, 2154, 2156, + 2158, 2160, 2191, 2199, 2201, 2205, 2212, 2224, + 2231, 2245, 2251, 2269, 2280, 2286, 2298, 2301, + 2310, 2315, 2325, 2331, 2345, 2351, 2363, 2375, + 2379, 2381, 2387, 2389, 2396, 2399, 2407, 2408, + 2429, 2438, 2446, 2452, 2454, 2458, 2462, 2467, + 2473, 2475, 2478, 2491, 2496, 2510, 2512, 2521, + 2528, 2539, 2549, 2557, 2568, 2572, 2577, 2579, + 2581, 2583, 2584, 2586, 2588, 2590, 2592, 2607, + 2611, 2613, 2615, 2623, 2631, 2633, 2637, 2648, + 2651, 2661, 2665, 2672, 2680, 2686, 2689, 2690, + 2694, 2701, 2706, 2707, 2708, 2710, 2719, 2721, + 2744, 2749, 2751, 2760, 2765, 2766, 2775, 2781, + 2791, 2796, 2803, 2817, 2821, 2826, 2837, 2840, + 2850, 2854, 2863, 2865, 2873, 2880, 2886, 2893, + 2897, 2899, 2901, 2903, 2904, 2906, 2912, 2920, + 2926, 2928, 2932, 2936, 2941, 2944, 2954, 2956, + 2958, 2959, 2961, 2962, 2968, 2970, 2972, 2972, + 2973, 2974, 2975, 2981, 2983, 2985, 2985, 2991, + 2993, 2997, 3003, 3006, 3009, 3013, 3016, 3019, + 3026, 3028, 3052, 3054, 3078, 3080, 3082, 3105, + 3107, 3109, 3110, 3112, 3114, 3116, 3122, 3124, + 3156, 3160, 3165, 3188, 3190, 3192, 3194, 3196, + 3199, 3201, 3203, 3207, 3207, 3263, 3319, 3350, + 3355, 3359, 3366, 3374, 3378, 3381, 3384, 3390, + 3392, 3412, 3418, 3423, 3425, 3427, 3430, 3432, + 3434, 3438, 3494, 3550, 3581, 3586, 3594, 3598, + 3600, 3605, 3611, 3615, 3618, 3624, 3627, 3631, + 3634, 3638, 3651, 3655, 3662, 3663, 3665, 3668, + 3678, 3698, 3705, 3709, 3716, 3726, 3733, 3736, + 3751, 3753, 3756, 3761, 3763, 3766, 3769, 3773, + 3776, 3779, 3786, 3788, 3790, 3792, 3794, 3797, + 3802, 3804, 3807, 3815, 3818, 3820, 3822, 3824, + 3854, 3862, 3864, 3868, 3871, 3876, 3881, 3891, + 3903, 3909, 3914, 3924, 3927, 3934, 3938, 3946, + 3956, 3960, 3968, 3970, 3978, 3981, 3983, 3988, + 3990, 3997, 3999, 4007, 4008, 4029, 4033, 4039, + 4044, 4046, 4050, 4054, 4056, 4060, 4062, 4065, + 4069, 4071, 4078, 4080, 4084, 4088, 4092, 4094, + 4096, 4105, 4109, 4114, 4116, 4122, 4124, 4125, + 4127, 4128, 4130, 4132, 4134, 4136, 4151, 4155, + 4157, 4159, 4164, 4168, 4172, 4174, 4176, 4180, + 4184, 4186, 4190, 4197, 4202, 4206, 4209, 4210, + 4214, 4221, 4226, 4227, 4228, 4230, 4239, 4241, + 4264, 4268, 4270, 4274, 4278, 4279, 4283, 4287, + 4290, 4292, 4297, 4310, 4312, 4314, 4316, 4318, + 4322, 4326, 4328, 4330, 4332, 4336, 4340, 4344, + 4346, 4348, 4350, 4352, 4353, 4355, 4361, 4367, + 4373, 4375, 4379, 4383, 4388, 4391, 4401, 4403, + 4405, 4408, 4410, 4412, 4414, 4416, 4419, 4424, + 4426, 4429, 4437, 4440, 4442, 4444, 4446, 4477, + 4485, 4487, 4491, 4498, 4510, 4517, 4531, 4537, + 4555, 4566, 4572, 4584, 4587, 4596, 4601, 4611, + 4617, 4631, 4637, 4649, 4661, 4665, 4667, 4673, + 4675, 4682, 4685, 4693, 4694, 4715, 4724, 4732, + 4738, 4740, 4744, 4748, 4753, 4759, 4761, 4764, + 4777, 4782, 4796, 4798, 4807, 4814, 4825, 4835, + 4843, 4854, 4858, 4863, 4865, 4867, 4869, 4870, + 4872, 4874, 4876, 4878, 4893, 4897, 4899, 4901, + 4909, 4917, 4919, 4923, 4934, 4937, 4947, 4951, + 4958, 4966, 4972, 4975, 4976, 4980, 4987, 4992, + 4993, 4994, 4996, 5005, 5007, 5030, 5035, 5037, + 5046, 5051, 5052, 5061, 5067, 5077, 5082, 5089, + 5103, 5107, 5112, 5123, 5126, 5136, 5140, 5149, + 5151, 5159, 5166, 5172, 5179, 5183, 5185, 5187, + 5189, 5190, 5192, 5198, 5206, 5212, 5214, 5218, + 5222, 5227, 5230, 5240, 5242, 5244, 5245, 5247, + 5248, 5254, 5256, 5258, 5258, 5259, 5260, 5261, + 5267, 5269, 5271, 5271, 5277, 5301, 5303, 5327, + 5329, 5331, 5354, 5356, 5358, 5359, 5361, 5363, + 5365, 5371, 5373, 5405, 5409, 5414, 5437, 5439, + 5441, 5443, 5445, 5448, 5450, 5452, 5456, 5456, + 5512, 5568, 5599, 5604, 5607, 5614, 5626, 5628, + 5630, 5632, 5635, 5640, 5642, 5645, 5653, 5656, + 5658, 5660, 5662, 5692, 5700, 5702, 5706, 5709, + 5714, 5719, 5729, 5741, 5747, 5752, 5762, 5765, + 5772, 5776, 5784, 5794, 5798, 5806, 5808, 5816, + 5819, 5821, 5826, 5828, 5835, 5837, 5845, 5846, + 5867, 5871, 5877, 5882, 5884, 5888, 5892, 5894, + 5898, 5900, 5903, 5907, 5909, 5916, 5918, 5922, + 5926, 5930, 5932, 5934, 5943, 5947, 5952, 5954, + 5956, 5958, 5959, 5961, 5963, 5965, 5967, 5982, + 5986, 5988, 5990, 5995, 5999, 6003, 6005, 6007, + 6011, 6015, 6017, 6021, 6028, 6033, 6037, 6040, + 6041, 6045, 6051, 6056, 6057, 6058, 6060, 6069, + 6071, 6094, 6098, 6100, 6104, 6108, 6109, 6113, + 6117, 6120, 6122, 6127, 6140, 6142, 6144, 6146, + 6148, 6152, 6156, 6158, 6160, 6162, 6166, 6170, + 6174, 6176, 6178, 6180, 6182, 6183, 6185, 6191, + 6197, 6203, 6205, 6209, 6213, 6218, 6221, 6231, + 6233, 6235, 6236, 6242, 6244, 6246, 6246, 6252, + 6253, 6260, 6263, 6265, 6267, 6269, 6271, 6274, + 6279, 6281, 6284, 6292, 6295, 6297, 6299, 6301, + 6332, 6340, 6342, 6346, 6353, 6365, 6372, 6386, + 6392, 6410, 6421, 6427, 6439, 6442, 6451, 6456, + 6466, 6472, 6486, 6492, 6504, 6516, 6520, 6522, + 6528, 6530, 6537, 6540, 6548, 6549, 6570, 6579, + 6587, 6593, 6595, 6599, 6603, 6608, 6614, 6616, + 6619, 6632, 6637, 6651, 6653, 6662, 6669, 6680, + 6690, 6698, 6709, 6713, 6718, 6720, 6722, 6724, + 6725, 6727, 6729, 6731, 6733, 6748, 6752, 6754, + 6756, 6764, 6772, 6774, 6778, 6789, 6792, 6802, + 6806, 6813, 6821, 6827, 6830, 6831, 6835, 6842, + 6847, 6848, 6849, 6851, 6860, 6862, 6885, 6890, + 6892, 6901, 6906, 6907, 6916, 6922, 6932, 6937, + 6944, 6958, 6962, 6967, 6978, 6981, 6991, 6995, + 7004, 7006, 7014, 7021, 7027, 7034, 7038, 7040, + 7042, 7044, 7045, 7047, 7053, 7061, 7067, 7069, + 7073, 7077, 7082, 7085, 7095, 7097, 7099, 7100, + 7102, 7103, 7109, 7111, 7113, 7113, 7114, 7115, + 7121, 7124, 7126, 7128, 7130, 7133, 7138, 7140, + 7143, 7151, 7154, 7156, 7158, 7160, 7191, 7199, + 7201, 7205, 7212, 7214, 7216, 7218, 7221, 7226, + 7228, 7231, 7239, 7242, 7244, 7246, 7248, 7278, + 7286, 7288, 7292, 7295, 7300, 7305, 7315, 7327, + 7333, 7338, 7348, 7351, 7358, 7362, 7370, 7380, + 7384, 7392, 7394, 7402, 7405, 7407, 7412, 7414, + 7421, 7423, 7431, 7432, 7453, 7457, 7463, 7468, + 7470, 7474, 7478, 7480, 7484, 7486, 7489, 7493, + 7495, 7502, 7504, 7508, 7512, 7516, 7518, 7520, + 7529, 7533, 7538, 7540, 7546, 7548, 7549, 7551, + 7552, 7554, 7556, 7558, 7560, 7575, 7579, 7581, + 7583, 7588, 7592, 7596, 7598, 7600, 7604, 7608, + 7610, 7614, 7621, 7626, 7630, 7633, 7634, 7638, + 7645, 7650, 7651, 7652, 7654, 7663, 7665, 7688, + 7692, 7694, 7698, 7702, 7703, 7707, 7711, 7714, + 7716, 7721, 7734, 7736, 7738, 7740, 7742, 7746, + 7750, 7752, 7754, 7756, 7760, 7764, 7768, 7770, + 7772, 7774, 7776, 7777, 7779, 7785, 7791, 7797, + 7799, 7803, 7807, 7812, 7815, 7825, 7827, 7829, + 7832, 7834, 7835, 7836, 7837, 7843, 7845, 7847, + 7847, 7853, 7865, 7872, 7886, 7892, 7910, 7921, + 7927, 7939, 7942, 7951, 7956, 7966, 7972, 7986, + 7992, 8004, 8016, 8020, 8022, 8028, 8030, 8037, + 8040, 8048, 8049, 8070, 8079, 8087, 8093, 8095, + 8099, 8103, 8108, 8114, 8116, 8119, 8132, 8137, + 8151, 8153, 8162, 8169, 8180, 8190, 8198, 8209, + 8213, 8218, 8220, 8222, 8224, 8225, 8227, 8229, + 8231, 8233, 8248, 8252, 8254, 8256, 8264, 8272, + 8274, 8278, 8289, 8292, 8302, 8306, 8313, 8321, + 8327, 8330, 8331, 8335, 8342, 8347, 8348, 8349, + 8351, 8360, 8362, 8385, 8390, 8392, 8401, 8406, + 8407, 8416, 8422, 8432, 8437, 8444, 8458, 8462, + 8467, 8478, 8481, 8491, 8495, 8504, 8506, 8514, + 8521, 8527, 8534, 8538, 8540, 8542, 8544, 8545, + 8547, 8553, 8561, 8567, 8569, 8573, 8577, 8582, + 8585, 8595, 8597, 8599, 8600, 8602, 8603, 8609, + 8611, 8613, 8613, 8616, 8622, 8624, 8644, 8650, + 8655, 8657, 8659, 8662, 8664, 8666, 8670, 8726, + 8782, 8817, 8822, 8830, 8832, 8832, 8834, 8838, + 8841, 8848, 8854, 8858, 8861, 8867, 8870, 8876, + 8879, 8885, 8898, 8902, 8904, 8906, 8908, 8911, + 8916, 8918, 8921, 8929, 8932, 8934, 8936, 8938, + 8968, 8976, 8978, 8982, 8985, 8990, 8995, 9005, + 9017, 9023, 9028, 9038, 9041, 9048, 9052, 9060, + 9070, 9074, 9082, 9084, 9092, 9095, 9097, 9102, + 9104, 9111, 9113, 9121, 9122, 9143, 9147, 9153, + 9158, 9160, 9164, 9168, 9170, 9174, 9176, 9179, + 9183, 9185, 9192, 9194, 9198, 9202, 9206, 9208, + 9210, 9219, 9223, 9228, 9230, 9236, 9238, 9239, + 9241, 9242, 9244, 9246, 9248, 9250, 9265, 9269, + 9271, 9273, 9278, 9282, 9286, 9288, 9290, 9294, + 9298, 9300, 9304, 9311, 9316, 9320, 9323, 9324, + 9328, 9335, 9340, 9341, 9342, 9344, 9353, 9355, + 9378, 9382, 9384, 9388, 9392, 9393, 9397, 9401, + 9404, 9406, 9411, 9424, 9426, 9428, 9430, 9432, + 9436, 9440, 9442, 9444, 9446, 9450, 9454, 9458, + 9460, 9462, 9464, 9466, 9467, 9469, 9475, 9481, + 9487, 9489, 9493, 9497, 9502, 9505, 9515, 9517, + 9519, 9522, 9524, 9526, 9528, 9530, 9533, 9538, + 9540, 9543, 9551, 9554, 9556, 9558, 9560, 9591, + 9599, 9601, 9605, 9612, 9624, 9631, 9645, 9651, + 9669, 9680, 9686, 9698, 9701, 9710, 9715, 9725, + 9731, 9745, 9751, 9763, 9775, 9779, 9781, 9787, + 9789, 9796, 9799, 9807, 9808, 9829, 9838, 9846, + 9852, 9854, 9858, 9862, 9867, 9873, 9875, 9878, + 9891, 9896, 9910, 9912, 9921, 9928, 9939, 9949, + 9957, 9968, 9972, 9977, 9979, 9981, 9983, 9984, + 9986, 9988, 9990, 9992, 10007, 10011, 10013, 10015, + 10023, 10031, 10033, 10037, 10048, 10051, 10061, 10065, + 10072, 10080, 10086, 10089, 10090, 10094, 10101, 10106, + 10107, 10108, 10110, 10119, 10121, 10144, 10149, 10151, + 10160, 10165, 10166, 10175, 10181, 10191, 10196, 10203, + 10217, 10221, 10226, 10237, 10240, 10250, 10254, 10263, + 10265, 10273, 10280, 10286, 10293, 10297, 10299, 10301, + 10303, 10304, 10306, 10312, 10320, 10326, 10328, 10332, + 10336, 10341, 10344, 10354, 10356, 10358, 10359, 10361, + 10362, 10368, 10370, 10372, 10372, 10373, 10374, 10375, + 10381, 10383, 10385, 10385, 10391, 10398, 10399, 10401, + 10404, 10414, 10434, 10441, 10445, 10452, 10462, 10469, + 10472, 10487, 10489, 10492, 10501, 10505, 10509, 10538, + 10558, 10578, 10598, 10620, 10640, 10660, 10680, 10703, + 10724, 10745, 10766, 10786, 10809, 10829, 10849, 10869, + 10890, 10911, 10932, 10952, 10972, 10992, 11012, 11032, + 11052, 11072, 11092, 11112, +} + +var _graphclust_trans_keys []byte = []byte{ + 10, 128, 255, 176, 255, 131, 137, 191, + 145, 189, 135, 129, 130, 132, 133, 144, + 154, 176, 139, 159, 150, 156, 159, 164, + 167, 168, 170, 173, 145, 176, 255, 139, + 255, 166, 176, 171, 179, 160, 161, 163, + 164, 165, 167, 169, 171, 173, 174, 175, + 176, 177, 179, 180, 181, 182, 183, 184, + 185, 186, 187, 188, 189, 190, 191, 166, + 170, 172, 178, 150, 153, 155, 163, 165, + 167, 169, 173, 153, 155, 148, 161, 163, + 255, 189, 132, 185, 144, 152, 161, 164, + 255, 188, 129, 131, 190, 255, 133, 134, + 137, 138, 142, 150, 152, 161, 164, 255, + 131, 134, 137, 138, 142, 144, 146, 175, + 178, 180, 182, 255, 134, 138, 142, 161, + 164, 255, 188, 129, 131, 190, 191, 128, + 132, 135, 136, 139, 141, 150, 151, 162, + 163, 130, 190, 191, 151, 128, 130, 134, + 136, 138, 141, 128, 131, 190, 255, 133, + 137, 142, 148, 151, 161, 164, 255, 128, + 132, 134, 136, 138, 141, 149, 150, 162, + 163, 129, 131, 190, 255, 133, 137, 142, + 150, 152, 161, 164, 255, 130, 131, 138, + 150, 143, 148, 152, 159, 178, 179, 177, + 179, 186, 135, 142, 177, 179, 185, 187, + 188, 136, 141, 181, 183, 185, 152, 153, + 190, 191, 177, 191, 128, 132, 134, 135, + 141, 151, 153, 188, 134, 128, 129, 130, + 141, 156, 157, 158, 159, 160, 162, 164, + 168, 169, 170, 172, 173, 174, 175, 176, + 179, 183, 173, 183, 185, 190, 150, 153, + 158, 160, 177, 180, 130, 141, 157, 132, + 134, 157, 159, 146, 148, 178, 180, 146, + 147, 178, 179, 180, 255, 148, 156, 158, + 255, 139, 141, 169, 133, 134, 160, 171, + 176, 187, 151, 155, 160, 162, 191, 149, + 158, 165, 188, 176, 190, 128, 132, 180, + 255, 133, 170, 180, 255, 128, 130, 161, + 173, 166, 179, 164, 183, 173, 144, 146, + 148, 168, 178, 180, 184, 185, 128, 181, + 187, 191, 128, 131, 179, 181, 183, 140, + 141, 128, 131, 157, 179, 181, 183, 144, + 176, 164, 175, 177, 191, 160, 191, 128, + 130, 170, 175, 153, 154, 153, 154, 155, + 160, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 175, 175, 178, 180, 189, + 158, 159, 176, 177, 130, 134, 139, 163, + 167, 128, 129, 180, 255, 134, 159, 178, + 255, 166, 173, 135, 147, 128, 131, 179, + 255, 129, 164, 166, 255, 169, 182, 131, + 188, 140, 141, 176, 178, 180, 183, 184, + 190, 191, 129, 171, 175, 181, 182, 163, + 170, 172, 173, 172, 184, 190, 158, 128, + 143, 160, 175, 144, 145, 150, 155, 157, + 158, 159, 135, 139, 141, 168, 171, 189, + 160, 182, 186, 191, 129, 131, 133, 134, + 140, 143, 184, 186, 165, 166, 128, 129, + 130, 132, 133, 134, 135, 136, 139, 140, + 141, 144, 145, 146, 147, 150, 151, 152, + 153, 154, 156, 176, 178, 128, 130, 184, + 255, 135, 190, 131, 175, 187, 255, 128, + 130, 167, 180, 179, 128, 130, 179, 255, + 129, 137, 141, 255, 190, 172, 183, 159, + 170, 188, 128, 131, 190, 191, 151, 128, + 132, 135, 136, 139, 141, 162, 163, 166, + 172, 176, 180, 181, 191, 128, 134, 176, + 255, 132, 255, 175, 181, 184, 255, 129, + 155, 158, 255, 129, 255, 171, 183, 157, + 171, 175, 182, 184, 191, 146, 167, 169, + 182, 171, 172, 189, 190, 176, 180, 176, + 182, 145, 190, 143, 146, 178, 157, 158, + 133, 134, 137, 168, 169, 170, 165, 169, + 173, 178, 187, 255, 131, 132, 140, 169, + 174, 255, 130, 132, 128, 182, 187, 255, + 173, 180, 182, 255, 132, 155, 159, 161, + 175, 128, 163, 165, 128, 134, 136, 152, + 155, 161, 163, 164, 166, 170, 144, 150, + 132, 138, 145, 146, 151, 166, 169, 0, + 127, 176, 255, 131, 137, 191, 145, 189, + 135, 129, 130, 132, 133, 144, 154, 176, + 139, 159, 150, 156, 159, 164, 167, 168, + 170, 173, 145, 176, 255, 139, 255, 166, + 176, 171, 179, 160, 161, 163, 164, 165, + 166, 167, 169, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, + 168, 170, 150, 153, 155, 163, 165, 167, + 169, 173, 153, 155, 148, 161, 163, 255, + 131, 187, 189, 132, 185, 190, 255, 141, + 144, 129, 136, 145, 151, 152, 161, 162, + 163, 164, 255, 129, 188, 190, 130, 131, + 191, 255, 141, 151, 129, 132, 133, 134, + 137, 138, 142, 161, 162, 163, 164, 255, + 131, 188, 129, 130, 190, 255, 145, 181, + 129, 130, 131, 134, 135, 136, 137, 138, + 139, 141, 142, 175, 176, 177, 178, 255, + 134, 138, 141, 129, 136, 142, 161, 162, + 163, 164, 255, 129, 188, 130, 131, 190, + 191, 128, 141, 129, 132, 135, 136, 139, + 140, 150, 151, 162, 163, 130, 190, 191, + 128, 141, 151, 129, 130, 134, 136, 138, + 140, 128, 129, 131, 190, 255, 133, 137, + 129, 132, 142, 148, 151, 161, 164, 255, + 129, 188, 190, 191, 130, 131, 130, 134, + 128, 132, 135, 136, 138, 139, 140, 141, + 149, 150, 162, 163, 129, 190, 130, 131, + 191, 255, 133, 137, 141, 151, 129, 132, + 142, 161, 162, 163, 164, 255, 138, 143, + 150, 159, 144, 145, 146, 148, 152, 158, + 178, 179, 177, 179, 180, 186, 135, 142, + 177, 179, 180, 185, 187, 188, 136, 141, + 181, 183, 185, 152, 153, 190, 191, 191, + 177, 190, 128, 132, 134, 135, 141, 151, + 153, 188, 134, 128, 129, 130, 141, 156, + 157, 158, 159, 160, 162, 164, 168, 169, + 170, 172, 173, 174, 175, 176, 179, 183, + 177, 173, 183, 185, 186, 187, 188, 189, + 190, 150, 151, 152, 153, 158, 160, 177, + 180, 130, 132, 141, 157, 133, 134, 157, + 159, 146, 148, 178, 180, 146, 147, 178, + 179, 182, 180, 189, 190, 255, 134, 157, + 137, 147, 148, 255, 139, 141, 169, 133, + 134, 178, 160, 162, 163, 166, 167, 168, + 169, 171, 176, 184, 185, 187, 155, 151, + 152, 153, 154, 150, 160, 162, 191, 149, + 151, 152, 158, 165, 172, 173, 178, 179, + 188, 176, 190, 132, 181, 187, 128, 131, + 180, 188, 189, 255, 130, 133, 170, 171, + 179, 180, 255, 130, 161, 170, 128, 129, + 162, 165, 166, 167, 168, 173, 167, 173, + 166, 169, 170, 174, 175, 177, 178, 179, + 164, 171, 172, 179, 180, 181, 182, 183, + 161, 173, 180, 144, 146, 148, 168, 178, + 179, 184, 185, 128, 181, 187, 191, 128, + 131, 179, 181, 183, 140, 141, 144, 176, + 175, 177, 191, 160, 191, 128, 130, 170, + 175, 153, 154, 153, 154, 155, 160, 162, + 163, 164, 165, 166, 167, 168, 169, 170, + 171, 175, 175, 178, 180, 189, 158, 159, + 176, 177, 130, 134, 139, 167, 163, 164, + 165, 166, 132, 133, 134, 159, 160, 177, + 178, 255, 166, 173, 135, 145, 146, 147, + 131, 179, 188, 128, 130, 180, 181, 182, + 185, 186, 255, 165, 129, 255, 169, 174, + 175, 176, 177, 178, 179, 180, 181, 182, + 131, 140, 141, 188, 176, 178, 180, 183, + 184, 190, 191, 129, 171, 181, 182, 172, + 173, 174, 175, 165, 168, 172, 173, 163, + 170, 172, 184, 190, 158, 128, 143, 160, + 175, 144, 145, 150, 155, 157, 158, 159, + 135, 139, 141, 168, 171, 189, 160, 182, + 186, 191, 129, 131, 133, 134, 140, 143, + 184, 186, 165, 166, 128, 129, 130, 132, + 133, 134, 135, 136, 139, 140, 141, 144, + 145, 146, 147, 150, 151, 152, 153, 154, + 156, 176, 178, 129, 128, 130, 184, 255, + 135, 190, 130, 131, 175, 176, 178, 183, + 184, 187, 255, 172, 128, 130, 167, 180, + 179, 130, 128, 129, 179, 181, 182, 190, + 191, 255, 129, 137, 138, 140, 141, 255, + 180, 190, 172, 174, 175, 177, 178, 181, + 182, 183, 159, 160, 162, 163, 170, 188, + 190, 191, 128, 129, 130, 131, 128, 151, + 129, 132, 135, 136, 139, 141, 162, 163, + 166, 172, 176, 180, 181, 183, 184, 191, + 133, 128, 129, 130, 134, 176, 185, 189, + 177, 178, 179, 186, 187, 190, 191, 255, + 129, 132, 255, 175, 190, 176, 177, 178, + 181, 184, 187, 188, 255, 129, 155, 158, + 255, 189, 176, 178, 179, 186, 187, 190, + 191, 255, 129, 255, 172, 182, 171, 173, + 174, 175, 176, 183, 166, 157, 159, 160, + 161, 162, 171, 175, 190, 176, 182, 184, + 191, 169, 177, 180, 146, 167, 170, 182, + 171, 172, 189, 190, 176, 180, 176, 182, + 143, 146, 178, 157, 158, 133, 134, 137, + 168, 169, 170, 166, 173, 165, 169, 174, + 178, 187, 255, 131, 132, 140, 169, 174, + 255, 130, 132, 128, 182, 187, 255, 173, + 180, 182, 255, 132, 155, 159, 161, 175, + 128, 163, 165, 128, 134, 136, 152, 155, + 161, 163, 164, 166, 170, 144, 150, 132, + 138, 143, 187, 191, 160, 128, 129, 132, + 135, 133, 134, 160, 255, 192, 255, 139, + 168, 160, 128, 129, 132, 135, 133, 134, + 160, 255, 192, 255, 144, 145, 150, 155, + 157, 158, 128, 191, 173, 128, 159, 160, + 191, 156, 128, 133, 134, 191, 0, 127, + 176, 255, 131, 137, 191, 145, 189, 135, + 129, 130, 132, 133, 144, 154, 176, 139, + 159, 150, 156, 159, 164, 167, 168, 170, + 173, 145, 176, 255, 139, 255, 166, 176, + 171, 179, 160, 161, 163, 164, 165, 167, + 169, 171, 173, 174, 175, 176, 177, 179, + 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 166, 170, 172, 178, + 150, 153, 155, 163, 165, 167, 169, 173, + 153, 155, 148, 161, 163, 255, 189, 132, + 185, 144, 152, 161, 164, 255, 188, 129, + 131, 190, 255, 133, 134, 137, 138, 142, + 150, 152, 161, 164, 255, 131, 134, 137, + 138, 142, 144, 146, 175, 178, 180, 182, + 255, 134, 138, 142, 161, 164, 255, 188, + 129, 131, 190, 191, 128, 132, 135, 136, + 139, 141, 150, 151, 162, 163, 130, 190, + 191, 151, 128, 130, 134, 136, 138, 141, + 128, 131, 190, 255, 133, 137, 142, 148, + 151, 161, 164, 255, 128, 132, 134, 136, + 138, 141, 149, 150, 162, 163, 129, 131, + 190, 255, 133, 137, 142, 150, 152, 161, + 164, 255, 130, 131, 138, 150, 143, 148, + 152, 159, 178, 179, 177, 179, 186, 135, + 142, 177, 179, 185, 187, 188, 136, 141, + 181, 183, 185, 152, 153, 190, 191, 177, + 191, 128, 132, 134, 135, 141, 151, 153, + 188, 134, 128, 129, 130, 141, 156, 157, + 158, 159, 160, 162, 164, 168, 169, 170, + 172, 173, 174, 175, 176, 179, 183, 173, + 183, 185, 190, 150, 153, 158, 160, 177, + 180, 130, 141, 157, 132, 134, 157, 159, + 146, 148, 178, 180, 146, 147, 178, 179, + 180, 255, 148, 156, 158, 255, 139, 141, + 169, 133, 134, 160, 171, 176, 187, 151, + 155, 160, 162, 191, 149, 158, 165, 188, + 176, 190, 128, 132, 180, 255, 133, 170, + 180, 255, 128, 130, 161, 173, 166, 179, + 164, 183, 173, 144, 146, 148, 168, 178, + 180, 184, 185, 128, 181, 187, 191, 128, + 131, 179, 181, 183, 140, 141, 128, 131, + 157, 179, 181, 183, 144, 176, 164, 175, + 177, 191, 160, 191, 128, 130, 170, 175, + 153, 154, 153, 154, 155, 160, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, + 175, 175, 178, 180, 189, 158, 159, 176, + 177, 130, 134, 139, 163, 167, 128, 129, + 180, 255, 134, 159, 178, 255, 166, 173, + 135, 147, 128, 131, 179, 255, 129, 164, + 166, 255, 169, 182, 131, 188, 140, 141, + 176, 178, 180, 183, 184, 190, 191, 129, + 171, 175, 181, 182, 163, 170, 172, 173, + 172, 184, 190, 158, 128, 143, 160, 175, + 144, 145, 150, 155, 157, 158, 159, 135, + 139, 141, 168, 171, 189, 160, 182, 186, + 191, 129, 131, 133, 134, 140, 143, 184, + 186, 165, 166, 128, 129, 130, 132, 133, + 134, 135, 136, 139, 140, 141, 144, 145, + 146, 147, 150, 151, 152, 153, 154, 156, + 176, 178, 128, 130, 184, 255, 135, 190, + 131, 175, 187, 255, 128, 130, 167, 180, + 179, 128, 130, 179, 255, 129, 137, 141, + 255, 190, 172, 183, 159, 170, 188, 128, + 131, 190, 191, 151, 128, 132, 135, 136, + 139, 141, 162, 163, 166, 172, 176, 180, + 181, 191, 128, 134, 176, 255, 132, 255, + 175, 181, 184, 255, 129, 155, 158, 255, + 129, 255, 171, 183, 157, 171, 175, 182, + 184, 191, 146, 167, 169, 182, 171, 172, + 189, 190, 176, 180, 176, 182, 145, 190, + 143, 146, 178, 157, 158, 133, 134, 137, + 168, 169, 170, 165, 169, 173, 178, 187, + 255, 131, 132, 140, 169, 174, 255, 130, + 132, 128, 182, 187, 255, 173, 180, 182, + 255, 132, 155, 159, 161, 175, 128, 163, + 165, 128, 134, 136, 152, 155, 161, 163, + 164, 166, 170, 144, 150, 132, 138, 145, + 146, 151, 166, 169, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 166, 167, 169, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 168, 170, 150, + 153, 155, 163, 165, 167, 169, 173, 153, + 155, 148, 161, 163, 255, 131, 187, 189, + 132, 185, 190, 255, 141, 144, 129, 136, + 145, 151, 152, 161, 162, 163, 164, 255, + 129, 188, 190, 130, 131, 191, 255, 141, + 151, 129, 132, 133, 134, 137, 138, 142, + 161, 162, 163, 164, 255, 131, 188, 129, + 130, 190, 255, 145, 181, 129, 130, 131, + 134, 135, 136, 137, 138, 139, 141, 142, + 175, 176, 177, 178, 255, 134, 138, 141, + 129, 136, 142, 161, 162, 163, 164, 255, + 129, 188, 130, 131, 190, 191, 128, 141, + 129, 132, 135, 136, 139, 140, 150, 151, + 162, 163, 130, 190, 191, 128, 141, 151, + 129, 130, 134, 136, 138, 140, 128, 129, + 131, 190, 255, 133, 137, 129, 132, 142, + 148, 151, 161, 164, 255, 129, 188, 190, + 191, 130, 131, 130, 134, 128, 132, 135, + 136, 138, 139, 140, 141, 149, 150, 162, + 163, 129, 190, 130, 131, 191, 255, 133, + 137, 141, 151, 129, 132, 142, 161, 162, + 163, 164, 255, 138, 143, 150, 159, 144, + 145, 146, 148, 152, 158, 178, 179, 177, + 179, 180, 186, 135, 142, 177, 179, 180, + 185, 187, 188, 136, 141, 181, 183, 185, + 152, 153, 190, 191, 191, 177, 190, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 177, 173, 183, + 185, 186, 187, 188, 189, 190, 150, 151, + 152, 153, 158, 160, 177, 180, 130, 132, + 141, 157, 133, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 182, 180, + 189, 190, 255, 134, 157, 137, 147, 148, + 255, 139, 141, 169, 133, 134, 178, 160, + 162, 163, 166, 167, 168, 169, 171, 176, + 184, 185, 187, 155, 151, 152, 153, 154, + 150, 160, 162, 191, 149, 151, 152, 158, + 165, 172, 173, 178, 179, 188, 176, 190, + 132, 181, 187, 128, 131, 180, 188, 189, + 255, 130, 133, 170, 171, 179, 180, 255, + 130, 161, 170, 128, 129, 162, 165, 166, + 167, 168, 173, 167, 173, 166, 169, 170, + 174, 175, 177, 178, 179, 164, 171, 172, + 179, 180, 181, 182, 183, 161, 173, 180, + 144, 146, 148, 168, 178, 179, 184, 185, + 128, 181, 187, 191, 128, 131, 179, 181, + 183, 140, 141, 144, 176, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 167, 163, 164, 165, 166, 132, + 133, 134, 159, 160, 177, 178, 255, 166, + 173, 135, 145, 146, 147, 131, 179, 188, + 128, 130, 180, 181, 182, 185, 186, 255, + 165, 129, 255, 169, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 131, 140, 141, + 188, 176, 178, 180, 183, 184, 190, 191, + 129, 171, 181, 182, 172, 173, 174, 175, + 165, 168, 172, 173, 163, 170, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 129, 128, 130, 184, 255, 135, 190, 130, + 131, 175, 176, 178, 183, 184, 187, 255, + 172, 128, 130, 167, 180, 179, 130, 128, + 129, 179, 181, 182, 190, 191, 255, 129, + 137, 138, 140, 141, 255, 180, 190, 172, + 174, 175, 177, 178, 181, 182, 183, 159, + 160, 162, 163, 170, 188, 190, 191, 128, + 129, 130, 131, 128, 151, 129, 132, 135, + 136, 139, 141, 162, 163, 166, 172, 176, + 180, 181, 183, 184, 191, 133, 128, 129, + 130, 134, 176, 185, 189, 177, 178, 179, + 186, 187, 190, 191, 255, 129, 132, 255, + 175, 190, 176, 177, 178, 181, 184, 187, + 188, 255, 129, 155, 158, 255, 189, 176, + 178, 179, 186, 187, 190, 191, 255, 129, + 255, 172, 182, 171, 173, 174, 175, 176, + 183, 166, 157, 159, 160, 161, 162, 171, + 175, 190, 176, 182, 184, 191, 169, 177, + 180, 146, 167, 170, 182, 171, 172, 189, + 190, 176, 180, 176, 182, 143, 146, 178, + 157, 158, 133, 134, 137, 168, 169, 170, + 166, 173, 165, 169, 174, 178, 187, 255, + 131, 132, 140, 169, 174, 255, 130, 132, + 128, 182, 187, 255, 173, 180, 182, 255, + 132, 155, 159, 161, 175, 128, 163, 165, + 128, 134, 136, 152, 155, 161, 163, 164, + 166, 170, 144, 150, 132, 138, 143, 187, + 191, 160, 128, 129, 132, 135, 133, 134, + 160, 255, 192, 255, 139, 168, 160, 128, + 129, 132, 135, 133, 134, 160, 255, 192, + 255, 144, 145, 150, 155, 157, 158, 128, + 191, 160, 172, 174, 191, 128, 133, 134, + 155, 157, 191, 157, 128, 191, 143, 128, + 191, 163, 181, 128, 191, 162, 128, 191, + 142, 128, 191, 132, 133, 134, 135, 160, + 128, 191, 128, 255, 128, 129, 130, 132, + 133, 134, 141, 156, 157, 158, 159, 160, + 162, 164, 168, 169, 170, 172, 173, 174, + 175, 176, 179, 183, 160, 255, 128, 129, + 130, 133, 134, 135, 141, 156, 157, 158, + 159, 160, 162, 164, 168, 169, 170, 172, + 173, 174, 175, 176, 179, 183, 160, 255, + 168, 255, 128, 129, 130, 134, 135, 141, + 156, 157, 158, 159, 160, 162, 164, 168, + 169, 170, 172, 173, 174, 175, 176, 179, + 183, 168, 255, 192, 255, 159, 139, 187, + 158, 159, 176, 255, 135, 138, 139, 187, + 188, 255, 168, 255, 153, 154, 155, 160, + 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 175, 177, 178, 179, 180, 181, + 182, 184, 185, 186, 187, 188, 189, 191, + 176, 190, 192, 255, 135, 147, 160, 188, + 128, 156, 184, 129, 255, 128, 129, 130, + 133, 134, 141, 156, 157, 158, 159, 160, + 162, 164, 168, 169, 170, 172, 173, 174, + 175, 176, 179, 183, 158, 159, 135, 255, + 148, 176, 140, 168, 132, 160, 188, 152, + 180, 144, 172, 136, 164, 192, 255, 129, + 130, 131, 132, 133, 134, 136, 137, 138, + 139, 140, 141, 143, 144, 145, 146, 147, + 148, 150, 151, 152, 153, 154, 155, 157, + 158, 159, 160, 161, 162, 164, 165, 166, + 167, 168, 169, 171, 172, 173, 174, 175, + 176, 178, 179, 180, 181, 182, 183, 185, + 186, 187, 188, 189, 190, 128, 191, 129, + 130, 131, 132, 133, 134, 136, 137, 138, + 139, 140, 141, 143, 144, 145, 146, 147, + 148, 150, 151, 152, 153, 154, 155, 157, + 158, 159, 160, 161, 162, 164, 165, 166, + 167, 168, 169, 171, 172, 173, 174, 175, + 176, 178, 179, 180, 181, 182, 183, 185, + 186, 187, 188, 189, 190, 128, 191, 129, + 130, 131, 132, 133, 134, 136, 137, 138, + 139, 140, 141, 143, 144, 145, 146, 147, + 148, 150, 151, 152, 153, 154, 155, 157, + 158, 159, 128, 156, 160, 255, 136, 164, + 175, 176, 255, 128, 141, 143, 191, 128, + 129, 152, 155, 156, 130, 191, 140, 141, + 128, 138, 144, 167, 175, 191, 128, 159, + 176, 191, 157, 128, 191, 185, 128, 191, + 128, 137, 138, 141, 142, 191, 128, 191, + 165, 177, 178, 179, 180, 181, 182, 184, + 185, 186, 187, 188, 189, 191, 128, 175, + 176, 190, 192, 255, 128, 159, 160, 188, + 189, 191, 128, 156, 184, 129, 255, 148, + 176, 140, 168, 132, 160, 188, 152, 180, + 144, 172, 136, 164, 192, 255, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 128, 156, 160, 255, 136, 164, 175, + 176, 255, 135, 138, 139, 187, 188, 191, + 192, 255, 187, 191, 128, 190, 128, 190, + 188, 128, 175, 190, 191, 145, 155, 157, + 159, 128, 191, 130, 135, 128, 191, 189, + 128, 191, 128, 129, 130, 131, 132, 191, + 178, 128, 191, 128, 159, 164, 191, 133, + 128, 191, 128, 178, 187, 191, 135, 142, + 143, 145, 146, 149, 150, 153, 154, 155, + 164, 128, 191, 128, 165, 166, 191, 144, + 145, 150, 155, 157, 158, 159, 135, 166, + 191, 133, 128, 191, 128, 130, 131, 132, + 133, 137, 138, 139, 140, 191, 174, 188, + 128, 129, 130, 131, 132, 133, 134, 144, + 145, 165, 166, 169, 170, 175, 176, 184, + 185, 191, 128, 132, 170, 129, 135, 136, + 191, 181, 186, 128, 191, 144, 128, 148, + 149, 150, 151, 191, 128, 132, 133, 135, + 136, 138, 139, 143, 144, 191, 163, 128, + 179, 180, 182, 183, 191, 128, 129, 191, + 166, 176, 191, 128, 151, 152, 158, 159, + 178, 179, 185, 186, 187, 188, 190, 128, + 191, 160, 128, 191, 128, 129, 135, 132, + 134, 128, 175, 157, 128, 191, 143, 128, + 191, 163, 181, 128, 191, 162, 128, 191, + 142, 128, 191, 132, 133, 134, 135, 160, + 128, 191, 0, 127, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 167, 169, 171, + 173, 174, 175, 176, 177, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 166, 170, 172, 178, 150, 153, + 155, 163, 165, 167, 169, 173, 153, 155, + 148, 161, 163, 255, 189, 132, 185, 144, + 152, 161, 164, 255, 188, 129, 131, 190, + 255, 133, 134, 137, 138, 142, 150, 152, + 161, 164, 255, 131, 134, 137, 138, 142, + 144, 146, 175, 178, 180, 182, 255, 134, + 138, 142, 161, 164, 255, 188, 129, 131, + 190, 191, 128, 132, 135, 136, 139, 141, + 150, 151, 162, 163, 130, 190, 191, 151, + 128, 130, 134, 136, 138, 141, 128, 131, + 190, 255, 133, 137, 142, 148, 151, 161, + 164, 255, 128, 132, 134, 136, 138, 141, + 149, 150, 162, 163, 129, 131, 190, 255, + 133, 137, 142, 150, 152, 161, 164, 255, + 130, 131, 138, 150, 143, 148, 152, 159, + 178, 179, 177, 179, 186, 135, 142, 177, + 179, 185, 187, 188, 136, 141, 181, 183, + 185, 152, 153, 190, 191, 177, 191, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 173, 183, 185, + 190, 150, 153, 158, 160, 177, 180, 130, + 141, 157, 132, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 180, 255, + 148, 156, 158, 255, 139, 141, 169, 133, + 134, 160, 171, 176, 187, 151, 155, 160, + 162, 191, 149, 158, 165, 188, 176, 190, + 128, 132, 180, 255, 133, 170, 180, 255, + 128, 130, 161, 173, 166, 179, 164, 183, + 173, 144, 146, 148, 168, 178, 180, 184, + 185, 128, 181, 187, 191, 128, 131, 179, + 181, 183, 140, 141, 128, 131, 157, 179, + 181, 183, 144, 176, 164, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 163, 167, 128, 129, 180, 255, + 134, 159, 178, 255, 166, 173, 135, 147, + 128, 131, 179, 255, 129, 164, 166, 255, + 169, 182, 131, 188, 140, 141, 176, 178, + 180, 183, 184, 190, 191, 129, 171, 175, + 181, 182, 163, 170, 172, 173, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 128, 130, 184, 255, 135, 190, 131, 175, + 187, 255, 128, 130, 167, 180, 179, 128, + 130, 179, 255, 129, 137, 141, 255, 190, + 172, 183, 159, 170, 188, 128, 131, 190, + 191, 151, 128, 132, 135, 136, 139, 141, + 162, 163, 166, 172, 176, 180, 181, 191, + 128, 134, 176, 255, 132, 255, 175, 181, + 184, 255, 129, 155, 158, 255, 129, 255, + 171, 183, 157, 171, 175, 182, 184, 191, + 146, 167, 169, 182, 171, 172, 189, 190, + 176, 180, 176, 182, 145, 190, 143, 146, + 178, 157, 158, 133, 134, 137, 168, 169, + 170, 165, 169, 173, 178, 187, 255, 131, + 132, 140, 169, 174, 255, 130, 132, 128, + 182, 187, 255, 173, 180, 182, 255, 132, + 155, 159, 161, 175, 128, 163, 165, 128, + 134, 136, 152, 155, 161, 163, 164, 166, + 170, 144, 150, 132, 138, 145, 146, 151, + 166, 169, 128, 255, 176, 255, 131, 137, + 191, 145, 189, 135, 129, 130, 132, 133, + 144, 154, 176, 139, 159, 150, 156, 159, + 164, 167, 168, 170, 173, 145, 176, 255, + 139, 255, 166, 176, 171, 179, 160, 161, + 163, 164, 165, 166, 167, 169, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 168, 170, 150, 153, 155, + 163, 165, 167, 169, 173, 153, 155, 148, + 161, 163, 255, 131, 187, 189, 132, 185, + 190, 255, 141, 144, 129, 136, 145, 151, + 152, 161, 162, 163, 164, 255, 129, 188, + 190, 130, 131, 191, 255, 141, 151, 129, + 132, 133, 134, 137, 138, 142, 161, 162, + 163, 164, 255, 131, 188, 129, 130, 190, + 255, 145, 181, 129, 130, 131, 134, 135, + 136, 137, 138, 139, 141, 142, 175, 176, + 177, 178, 255, 134, 138, 141, 129, 136, + 142, 161, 162, 163, 164, 255, 129, 188, + 130, 131, 190, 191, 128, 141, 129, 132, + 135, 136, 139, 140, 150, 151, 162, 163, + 130, 190, 191, 128, 141, 151, 129, 130, + 134, 136, 138, 140, 128, 129, 131, 190, + 255, 133, 137, 129, 132, 142, 148, 151, + 161, 164, 255, 129, 188, 190, 191, 130, + 131, 130, 134, 128, 132, 135, 136, 138, + 139, 140, 141, 149, 150, 162, 163, 129, + 190, 130, 131, 191, 255, 133, 137, 141, + 151, 129, 132, 142, 161, 162, 163, 164, + 255, 138, 143, 150, 159, 144, 145, 146, + 148, 152, 158, 178, 179, 177, 179, 180, + 186, 135, 142, 177, 179, 180, 185, 187, + 188, 136, 141, 181, 183, 185, 152, 153, + 190, 191, 191, 177, 190, 128, 132, 134, + 135, 141, 151, 153, 188, 134, 128, 129, + 130, 141, 156, 157, 158, 159, 160, 162, + 164, 168, 169, 170, 172, 173, 174, 175, + 176, 179, 183, 177, 173, 183, 185, 186, + 187, 188, 189, 190, 150, 151, 152, 153, + 158, 160, 177, 180, 130, 132, 141, 157, + 133, 134, 157, 159, 146, 148, 178, 180, + 146, 147, 178, 179, 182, 180, 189, 190, + 255, 134, 157, 137, 147, 148, 255, 139, + 141, 169, 133, 134, 178, 160, 162, 163, + 166, 167, 168, 169, 171, 176, 184, 185, + 187, 155, 151, 152, 153, 154, 150, 160, + 162, 191, 149, 151, 152, 158, 165, 172, + 173, 178, 179, 188, 176, 190, 132, 181, + 187, 128, 131, 180, 188, 189, 255, 130, + 133, 170, 171, 179, 180, 255, 130, 161, + 170, 128, 129, 162, 165, 166, 167, 168, + 173, 167, 173, 166, 169, 170, 174, 175, + 177, 178, 179, 164, 171, 172, 179, 180, + 181, 182, 183, 161, 173, 180, 144, 146, + 148, 168, 178, 179, 184, 185, 128, 181, + 187, 191, 128, 131, 179, 181, 183, 140, + 141, 144, 176, 175, 177, 191, 160, 191, + 128, 130, 170, 175, 153, 154, 153, 154, + 155, 160, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 175, 175, 178, 180, + 189, 158, 159, 176, 177, 130, 134, 139, + 167, 163, 164, 165, 166, 132, 133, 134, + 159, 160, 177, 178, 255, 166, 173, 135, + 145, 146, 147, 131, 179, 188, 128, 130, + 180, 181, 182, 185, 186, 255, 165, 129, + 255, 169, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 131, 140, 141, 188, 176, + 178, 180, 183, 184, 190, 191, 129, 171, + 181, 182, 172, 173, 174, 175, 165, 168, + 172, 173, 163, 170, 172, 184, 190, 158, + 128, 143, 160, 175, 144, 145, 150, 155, + 157, 158, 159, 135, 139, 141, 168, 171, + 189, 160, 182, 186, 191, 129, 131, 133, + 134, 140, 143, 184, 186, 165, 166, 128, + 129, 130, 132, 133, 134, 135, 136, 139, + 140, 141, 144, 145, 146, 147, 150, 151, + 152, 153, 154, 156, 176, 178, 129, 128, + 130, 184, 255, 135, 190, 130, 131, 175, + 176, 178, 183, 184, 187, 255, 172, 128, + 130, 167, 180, 179, 130, 128, 129, 179, + 181, 182, 190, 191, 255, 129, 137, 138, + 140, 141, 255, 180, 190, 172, 174, 175, + 177, 178, 181, 182, 183, 159, 160, 162, + 163, 170, 188, 190, 191, 128, 129, 130, + 131, 128, 151, 129, 132, 135, 136, 139, + 141, 162, 163, 166, 172, 176, 180, 181, + 183, 184, 191, 133, 128, 129, 130, 134, + 176, 185, 189, 177, 178, 179, 186, 187, + 190, 191, 255, 129, 132, 255, 175, 190, + 176, 177, 178, 181, 184, 187, 188, 255, + 129, 155, 158, 255, 189, 176, 178, 179, + 186, 187, 190, 191, 255, 129, 255, 172, + 182, 171, 173, 174, 175, 176, 183, 166, + 157, 159, 160, 161, 162, 171, 175, 190, + 176, 182, 184, 191, 169, 177, 180, 146, + 167, 170, 182, 171, 172, 189, 190, 176, + 180, 176, 182, 143, 146, 178, 157, 158, + 133, 134, 137, 168, 169, 170, 166, 173, + 165, 169, 174, 178, 187, 255, 131, 132, + 140, 169, 174, 255, 130, 132, 128, 182, + 187, 255, 173, 180, 182, 255, 132, 155, + 159, 161, 175, 128, 163, 165, 128, 134, + 136, 152, 155, 161, 163, 164, 166, 170, + 144, 150, 132, 138, 143, 187, 191, 160, + 128, 129, 132, 135, 133, 134, 160, 255, + 192, 255, 139, 168, 160, 128, 129, 132, + 135, 133, 134, 160, 255, 192, 255, 144, + 145, 150, 155, 157, 158, 128, 129, 130, + 132, 133, 134, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 160, 255, 128, + 129, 130, 133, 134, 135, 141, 156, 157, + 158, 159, 160, 162, 164, 168, 169, 170, + 172, 173, 174, 175, 176, 179, 183, 160, + 255, 168, 255, 128, 129, 130, 134, 135, + 141, 156, 157, 158, 159, 160, 162, 164, + 168, 169, 170, 172, 173, 174, 175, 176, + 179, 183, 168, 255, 192, 255, 159, 139, + 187, 158, 159, 176, 255, 135, 138, 139, + 187, 188, 255, 168, 255, 153, 154, 155, + 160, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 175, 177, 178, 179, 180, + 181, 182, 184, 185, 186, 187, 188, 189, + 191, 176, 190, 192, 255, 135, 147, 160, + 188, 128, 156, 184, 129, 255, 128, 129, + 130, 133, 134, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 158, 159, 135, + 255, 148, 176, 140, 168, 132, 160, 188, + 152, 180, 144, 172, 136, 164, 192, 255, + 129, 130, 131, 132, 133, 134, 136, 137, + 138, 139, 140, 141, 143, 144, 145, 146, + 147, 148, 150, 151, 152, 153, 154, 155, + 157, 158, 159, 160, 161, 162, 164, 165, + 166, 167, 168, 169, 171, 172, 173, 174, + 175, 176, 178, 179, 180, 181, 182, 183, + 185, 186, 187, 188, 189, 190, 128, 191, + 129, 130, 131, 132, 133, 134, 136, 137, + 138, 139, 140, 141, 143, 144, 145, 146, + 147, 148, 150, 151, 152, 153, 154, 155, + 157, 158, 159, 160, 161, 162, 164, 165, + 166, 167, 168, 169, 171, 172, 173, 174, + 175, 176, 178, 179, 180, 181, 182, 183, + 185, 186, 187, 188, 189, 190, 128, 191, + 129, 130, 131, 132, 133, 134, 136, 137, + 138, 139, 140, 141, 143, 144, 145, 146, + 147, 148, 150, 151, 152, 153, 154, 155, + 157, 158, 159, 128, 156, 160, 255, 136, + 164, 175, 176, 255, 142, 128, 191, 128, + 129, 152, 155, 156, 130, 191, 139, 141, + 128, 140, 142, 143, 144, 167, 168, 174, + 175, 191, 128, 255, 176, 255, 131, 137, + 191, 145, 189, 135, 129, 130, 132, 133, + 144, 154, 176, 139, 159, 150, 156, 159, + 164, 167, 168, 170, 173, 145, 176, 255, + 139, 255, 166, 176, 171, 179, 160, 161, + 163, 164, 165, 167, 169, 171, 173, 174, + 175, 176, 177, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, + 166, 170, 172, 178, 150, 153, 155, 163, + 165, 167, 169, 173, 153, 155, 148, 161, + 163, 255, 189, 132, 185, 144, 152, 161, + 164, 255, 188, 129, 131, 190, 255, 133, + 134, 137, 138, 142, 150, 152, 161, 164, + 255, 131, 134, 137, 138, 142, 144, 146, + 175, 178, 180, 182, 255, 134, 138, 142, + 161, 164, 255, 188, 129, 131, 190, 191, + 128, 132, 135, 136, 139, 141, 150, 151, + 162, 163, 130, 190, 191, 151, 128, 130, + 134, 136, 138, 141, 128, 131, 190, 255, + 133, 137, 142, 148, 151, 161, 164, 255, + 128, 132, 134, 136, 138, 141, 149, 150, + 162, 163, 129, 131, 190, 255, 133, 137, + 142, 150, 152, 161, 164, 255, 130, 131, + 138, 150, 143, 148, 152, 159, 178, 179, + 177, 179, 186, 135, 142, 177, 179, 185, + 187, 188, 136, 141, 181, 183, 185, 152, + 153, 190, 191, 177, 191, 128, 132, 134, + 135, 141, 151, 153, 188, 134, 128, 129, + 130, 141, 156, 157, 158, 159, 160, 162, + 164, 168, 169, 170, 172, 173, 174, 175, + 176, 179, 183, 173, 183, 185, 190, 150, + 153, 158, 160, 177, 180, 130, 141, 157, + 132, 134, 157, 159, 146, 148, 178, 180, + 146, 147, 178, 179, 180, 255, 148, 156, + 158, 255, 139, 141, 169, 133, 134, 160, + 171, 176, 187, 151, 155, 160, 162, 191, + 149, 158, 165, 188, 176, 190, 128, 132, + 180, 255, 133, 170, 180, 255, 128, 130, + 161, 173, 166, 179, 164, 183, 173, 144, + 146, 148, 168, 178, 180, 184, 185, 128, + 181, 187, 191, 128, 131, 179, 181, 183, + 140, 141, 144, 176, 175, 177, 191, 160, + 191, 128, 130, 170, 175, 153, 154, 153, + 154, 155, 160, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 175, 175, 178, + 180, 189, 158, 159, 176, 177, 130, 134, + 139, 163, 167, 128, 129, 180, 255, 134, + 159, 178, 255, 166, 173, 135, 147, 128, + 131, 179, 255, 129, 164, 166, 255, 169, + 182, 131, 188, 140, 141, 176, 178, 180, + 183, 184, 190, 191, 129, 171, 175, 181, + 182, 163, 170, 172, 173, 172, 184, 190, + 158, 128, 143, 160, 175, 144, 145, 150, + 155, 157, 158, 135, 139, 141, 168, 171, + 189, 160, 182, 186, 191, 129, 131, 133, + 134, 140, 143, 184, 186, 165, 166, 128, + 129, 130, 132, 133, 134, 135, 136, 139, + 140, 141, 144, 145, 146, 147, 150, 151, + 152, 153, 154, 156, 176, 178, 128, 130, + 184, 255, 135, 190, 131, 175, 187, 255, + 128, 130, 167, 180, 179, 128, 130, 179, + 255, 129, 137, 141, 255, 190, 172, 183, + 159, 170, 188, 128, 131, 190, 191, 151, + 128, 132, 135, 136, 139, 141, 162, 163, + 166, 172, 176, 180, 181, 191, 128, 134, + 176, 255, 132, 255, 175, 181, 184, 255, + 129, 155, 158, 255, 129, 255, 171, 183, + 157, 171, 175, 182, 184, 191, 146, 167, + 169, 182, 171, 172, 189, 190, 176, 180, + 176, 182, 145, 190, 143, 146, 178, 157, + 158, 133, 134, 137, 168, 169, 170, 165, + 169, 173, 178, 187, 255, 131, 132, 140, + 169, 174, 255, 130, 132, 128, 182, 187, + 255, 173, 180, 182, 255, 132, 155, 159, + 161, 175, 128, 163, 165, 128, 134, 136, + 152, 155, 161, 163, 164, 166, 170, 144, + 150, 132, 138, 160, 128, 129, 132, 135, + 133, 134, 160, 255, 192, 255, 128, 131, + 157, 179, 181, 183, 164, 144, 145, 150, + 155, 157, 158, 159, 145, 146, 151, 166, + 169, 128, 255, 176, 255, 131, 137, 191, + 145, 189, 135, 129, 130, 132, 133, 144, + 154, 176, 139, 159, 150, 156, 159, 164, + 167, 168, 170, 173, 145, 176, 255, 139, + 255, 166, 176, 171, 179, 160, 161, 163, + 164, 165, 166, 167, 169, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 168, 170, 150, 153, 155, 163, + 165, 167, 169, 173, 153, 155, 148, 161, + 163, 255, 131, 187, 189, 132, 185, 190, + 255, 141, 144, 129, 136, 145, 151, 152, + 161, 162, 163, 164, 255, 129, 188, 190, + 130, 131, 191, 255, 141, 151, 129, 132, + 133, 134, 137, 138, 142, 161, 162, 163, + 164, 255, 131, 188, 129, 130, 190, 255, + 145, 181, 129, 130, 131, 134, 135, 136, + 137, 138, 139, 141, 142, 175, 176, 177, + 178, 255, 134, 138, 141, 129, 136, 142, + 161, 162, 163, 164, 255, 129, 188, 130, + 131, 190, 191, 128, 141, 129, 132, 135, + 136, 139, 140, 150, 151, 162, 163, 130, + 190, 191, 128, 141, 151, 129, 130, 134, + 136, 138, 140, 128, 129, 131, 190, 255, + 133, 137, 129, 132, 142, 148, 151, 161, + 164, 255, 129, 188, 190, 191, 130, 131, + 130, 134, 128, 132, 135, 136, 138, 139, + 140, 141, 149, 150, 162, 163, 129, 190, + 130, 131, 191, 255, 133, 137, 141, 151, + 129, 132, 142, 161, 162, 163, 164, 255, + 138, 143, 150, 159, 144, 145, 146, 148, + 152, 158, 178, 179, 177, 179, 180, 186, + 135, 142, 177, 179, 180, 185, 187, 188, + 136, 141, 181, 183, 185, 152, 153, 190, + 191, 191, 177, 190, 128, 132, 134, 135, + 141, 151, 153, 188, 134, 128, 129, 130, + 141, 156, 157, 158, 159, 160, 162, 164, + 168, 169, 170, 172, 173, 174, 175, 176, + 179, 183, 177, 173, 183, 185, 186, 187, + 188, 189, 190, 150, 151, 152, 153, 158, + 160, 177, 180, 130, 132, 141, 157, 133, + 134, 157, 159, 146, 148, 178, 180, 146, + 147, 178, 179, 182, 180, 189, 190, 255, + 134, 157, 137, 147, 148, 255, 139, 141, + 169, 133, 134, 178, 160, 162, 163, 166, + 167, 168, 169, 171, 176, 184, 185, 187, + 155, 151, 152, 153, 154, 150, 160, 162, + 191, 149, 151, 152, 158, 165, 172, 173, + 178, 179, 188, 176, 190, 132, 181, 187, + 128, 131, 180, 188, 189, 255, 130, 133, + 170, 171, 179, 180, 255, 130, 161, 170, + 128, 129, 162, 165, 166, 167, 168, 173, + 167, 173, 166, 169, 170, 174, 175, 177, + 178, 179, 164, 171, 172, 179, 180, 181, + 182, 183, 161, 173, 180, 144, 146, 148, + 168, 178, 179, 184, 185, 128, 181, 187, + 191, 128, 131, 179, 181, 183, 140, 141, + 144, 176, 175, 177, 191, 160, 191, 128, + 130, 170, 175, 153, 154, 153, 154, 155, + 160, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 175, 175, 178, 180, 189, + 158, 159, 176, 177, 130, 134, 139, 167, + 163, 164, 165, 166, 132, 133, 134, 159, + 160, 177, 178, 255, 166, 173, 135, 145, + 146, 147, 131, 179, 188, 128, 130, 180, + 181, 182, 185, 186, 255, 165, 129, 255, + 169, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 131, 140, 141, 188, 176, 178, + 180, 183, 184, 190, 191, 129, 171, 181, + 182, 172, 173, 174, 175, 165, 168, 172, + 173, 163, 170, 172, 184, 190, 158, 128, + 143, 160, 175, 144, 145, 150, 155, 157, + 158, 159, 135, 139, 141, 168, 171, 189, + 160, 182, 186, 191, 129, 131, 133, 134, + 140, 143, 184, 186, 165, 166, 128, 129, + 130, 132, 133, 134, 135, 136, 139, 140, + 141, 144, 145, 146, 147, 150, 151, 152, + 153, 154, 156, 176, 178, 129, 128, 130, + 184, 255, 135, 190, 130, 131, 175, 176, + 178, 183, 184, 187, 255, 172, 128, 130, + 167, 180, 179, 130, 128, 129, 179, 181, + 182, 190, 191, 255, 129, 137, 138, 140, + 141, 255, 180, 190, 172, 174, 175, 177, + 178, 181, 182, 183, 159, 160, 162, 163, + 170, 188, 190, 191, 128, 129, 130, 131, + 128, 151, 129, 132, 135, 136, 139, 141, + 162, 163, 166, 172, 176, 180, 181, 183, + 184, 191, 133, 128, 129, 130, 134, 176, + 185, 189, 177, 178, 179, 186, 187, 190, + 191, 255, 129, 132, 255, 175, 190, 176, + 177, 178, 181, 184, 187, 188, 255, 129, + 155, 158, 255, 189, 176, 178, 179, 186, + 187, 190, 191, 255, 129, 255, 172, 182, + 171, 173, 174, 175, 176, 183, 166, 157, + 159, 160, 161, 162, 171, 175, 190, 176, + 182, 184, 191, 169, 177, 180, 146, 167, + 170, 182, 171, 172, 189, 190, 176, 180, + 176, 182, 143, 146, 178, 157, 158, 133, + 134, 137, 168, 169, 170, 166, 173, 165, + 169, 174, 178, 187, 255, 131, 132, 140, + 169, 174, 255, 130, 132, 128, 182, 187, + 255, 173, 180, 182, 255, 132, 155, 159, + 161, 175, 128, 163, 165, 128, 134, 136, + 152, 155, 161, 163, 164, 166, 170, 144, + 150, 132, 138, 143, 187, 191, 160, 128, + 129, 132, 135, 133, 134, 160, 255, 192, + 255, 139, 168, 128, 159, 160, 175, 176, + 191, 157, 128, 191, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 166, 167, 169, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 168, 170, 150, + 153, 155, 163, 165, 167, 169, 173, 153, + 155, 148, 161, 163, 255, 131, 187, 189, + 132, 185, 190, 255, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 167, 169, 171, + 173, 174, 175, 176, 177, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 166, 170, 172, 178, 150, 153, + 155, 163, 165, 167, 169, 173, 153, 155, + 148, 161, 163, 255, 189, 132, 185, 144, + 152, 161, 164, 255, 188, 129, 131, 190, + 255, 133, 134, 137, 138, 142, 150, 152, + 161, 164, 255, 131, 134, 137, 138, 142, + 144, 146, 175, 178, 180, 182, 255, 134, + 138, 142, 161, 164, 255, 188, 129, 131, + 190, 191, 128, 132, 135, 136, 139, 141, + 150, 151, 162, 163, 130, 190, 191, 151, + 128, 130, 134, 136, 138, 141, 128, 131, + 190, 255, 133, 137, 142, 148, 151, 161, + 164, 255, 128, 132, 134, 136, 138, 141, + 149, 150, 162, 163, 129, 131, 190, 255, + 133, 137, 142, 150, 152, 161, 164, 255, + 130, 131, 138, 150, 143, 148, 152, 159, + 178, 179, 177, 179, 186, 135, 142, 177, + 179, 185, 187, 188, 136, 141, 181, 183, + 185, 152, 153, 190, 191, 177, 191, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 173, 183, 185, + 190, 150, 153, 158, 160, 177, 180, 130, + 141, 157, 132, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 180, 255, + 148, 156, 158, 255, 139, 141, 169, 133, + 134, 160, 171, 176, 187, 151, 155, 160, + 162, 191, 149, 158, 165, 188, 176, 190, + 128, 132, 180, 255, 133, 170, 180, 255, + 128, 130, 161, 173, 166, 179, 164, 183, + 173, 144, 146, 148, 168, 178, 180, 184, + 185, 128, 181, 187, 191, 128, 131, 179, + 181, 183, 140, 141, 128, 131, 157, 179, + 181, 183, 144, 176, 164, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 163, 167, 128, 129, 180, 255, + 134, 159, 178, 255, 166, 173, 135, 147, + 128, 131, 179, 255, 129, 164, 166, 255, + 169, 182, 131, 188, 140, 141, 176, 178, + 180, 183, 184, 190, 191, 129, 171, 175, + 181, 182, 163, 170, 172, 173, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 128, 130, 184, 255, 135, 190, 131, 175, + 187, 255, 128, 130, 167, 180, 179, 128, + 130, 179, 255, 129, 137, 141, 255, 190, + 172, 183, 159, 170, 188, 128, 131, 190, + 191, 151, 128, 132, 135, 136, 139, 141, + 162, 163, 166, 172, 176, 180, 181, 191, + 128, 134, 176, 255, 132, 255, 175, 181, + 184, 255, 129, 155, 158, 255, 129, 255, + 171, 183, 157, 171, 175, 182, 184, 191, + 146, 167, 169, 182, 171, 172, 189, 190, + 176, 180, 176, 182, 145, 190, 143, 146, + 178, 157, 158, 133, 134, 137, 168, 169, + 170, 165, 169, 173, 178, 187, 255, 131, + 132, 140, 169, 174, 255, 130, 132, 128, + 182, 187, 255, 173, 180, 182, 255, 132, + 155, 159, 161, 175, 128, 163, 165, 128, + 134, 136, 152, 155, 161, 163, 164, 166, + 170, 144, 150, 132, 138, 145, 146, 151, + 166, 169, 139, 168, 160, 128, 129, 132, + 135, 133, 134, 160, 255, 192, 255, 144, + 145, 150, 155, 157, 158, 141, 144, 129, + 136, 145, 151, 152, 161, 162, 163, 164, + 255, 129, 188, 190, 130, 131, 191, 255, + 141, 151, 129, 132, 133, 134, 137, 138, + 142, 161, 162, 163, 164, 255, 131, 188, + 129, 130, 190, 255, 145, 181, 129, 130, + 131, 134, 135, 136, 137, 138, 139, 141, + 142, 175, 176, 177, 178, 255, 134, 138, + 141, 129, 136, 142, 161, 162, 163, 164, + 255, 129, 188, 130, 131, 190, 191, 128, + 141, 129, 132, 135, 136, 139, 140, 150, + 151, 162, 163, 130, 190, 191, 128, 141, + 151, 129, 130, 134, 136, 138, 140, 128, + 129, 131, 190, 255, 133, 137, 129, 132, + 142, 148, 151, 161, 164, 255, 129, 188, + 190, 191, 130, 131, 130, 134, 128, 132, + 135, 136, 138, 139, 140, 141, 149, 150, + 162, 163, 129, 190, 130, 131, 191, 255, + 133, 137, 141, 151, 129, 132, 142, 161, + 162, 163, 164, 255, 138, 143, 150, 159, + 144, 145, 146, 148, 152, 158, 178, 179, + 177, 179, 180, 186, 135, 142, 177, 179, + 180, 185, 187, 188, 136, 141, 181, 183, + 185, 152, 153, 190, 191, 191, 177, 190, + 128, 132, 134, 135, 141, 151, 153, 188, + 134, 128, 129, 130, 141, 156, 157, 158, + 159, 160, 162, 164, 168, 169, 170, 172, + 173, 174, 175, 176, 179, 183, 177, 173, + 183, 185, 186, 187, 188, 189, 190, 150, + 151, 152, 153, 158, 160, 177, 180, 130, + 132, 141, 157, 133, 134, 157, 159, 146, + 148, 178, 180, 146, 147, 178, 179, 182, + 180, 189, 190, 255, 134, 157, 137, 147, + 148, 255, 139, 141, 169, 133, 134, 178, + 160, 162, 163, 166, 167, 168, 169, 171, + 176, 184, 185, 187, 155, 151, 152, 153, + 154, 150, 160, 162, 191, 149, 151, 152, + 158, 165, 172, 173, 178, 179, 188, 176, + 190, 132, 181, 187, 128, 131, 180, 188, + 189, 255, 130, 133, 170, 171, 179, 180, + 255, 130, 161, 170, 128, 129, 162, 165, + 166, 167, 168, 173, 167, 173, 166, 169, + 170, 174, 175, 177, 178, 179, 164, 171, + 172, 179, 180, 181, 182, 183, 161, 173, + 180, 144, 146, 148, 168, 178, 179, 184, + 185, 128, 181, 187, 191, 128, 131, 179, + 181, 183, 140, 141, 144, 176, 175, 177, + 191, 160, 191, 128, 130, 170, 175, 153, + 154, 153, 154, 155, 160, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 175, + 175, 178, 180, 189, 158, 159, 176, 177, + 130, 134, 139, 167, 163, 164, 165, 166, + 132, 133, 134, 159, 160, 177, 178, 255, + 166, 173, 135, 145, 146, 147, 131, 179, + 188, 128, 130, 180, 181, 182, 185, 186, + 255, 165, 129, 255, 169, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 131, 140, + 141, 188, 176, 178, 180, 183, 184, 190, + 191, 129, 171, 181, 182, 172, 173, 174, + 175, 165, 168, 172, 173, 163, 170, 172, + 184, 190, 158, 128, 143, 160, 175, 144, + 145, 150, 155, 157, 158, 159, 135, 139, + 141, 168, 171, 189, 160, 182, 186, 191, + 129, 131, 133, 134, 140, 143, 184, 186, + 165, 166, 128, 129, 130, 132, 133, 134, + 135, 136, 139, 140, 141, 144, 145, 146, + 147, 150, 151, 152, 153, 154, 156, 176, + 178, 129, 128, 130, 184, 255, 135, 190, + 130, 131, 175, 176, 178, 183, 184, 187, + 255, 172, 128, 130, 167, 180, 179, 130, + 128, 129, 179, 181, 182, 190, 191, 255, + 129, 137, 138, 140, 141, 255, 180, 190, + 172, 174, 175, 177, 178, 181, 182, 183, + 159, 160, 162, 163, 170, 188, 190, 191, + 128, 129, 130, 131, 128, 151, 129, 132, + 135, 136, 139, 141, 162, 163, 166, 172, + 176, 180, 181, 183, 184, 191, 133, 128, + 129, 130, 134, 176, 185, 189, 177, 178, + 179, 186, 187, 190, 191, 255, 129, 132, + 255, 175, 190, 176, 177, 178, 181, 184, + 187, 188, 255, 129, 155, 158, 255, 189, + 176, 178, 179, 186, 187, 190, 191, 255, + 129, 255, 172, 182, 171, 173, 174, 175, + 176, 183, 166, 157, 159, 160, 161, 162, + 171, 175, 190, 176, 182, 184, 191, 169, + 177, 180, 146, 167, 170, 182, 171, 172, + 189, 190, 176, 180, 176, 182, 143, 146, + 178, 157, 158, 133, 134, 137, 168, 169, + 170, 166, 173, 165, 169, 174, 178, 187, + 255, 131, 132, 140, 169, 174, 255, 130, + 132, 128, 182, 187, 255, 173, 180, 182, + 255, 132, 155, 159, 161, 175, 128, 163, + 165, 128, 134, 136, 152, 155, 161, 163, + 164, 166, 170, 144, 150, 132, 138, 143, + 187, 191, 160, 128, 129, 132, 135, 133, + 134, 160, 255, 192, 255, 185, 128, 191, + 128, 137, 138, 141, 142, 191, 128, 191, + 165, 177, 178, 179, 180, 181, 182, 184, + 185, 186, 187, 188, 189, 191, 128, 175, + 176, 190, 192, 255, 128, 159, 160, 188, + 189, 191, 128, 156, 184, 129, 255, 148, + 176, 140, 168, 132, 160, 188, 152, 180, + 144, 172, 136, 164, 192, 255, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 191, 128, 156, 161, 190, 192, + 255, 136, 164, 175, 176, 255, 135, 138, + 139, 187, 188, 191, 192, 255, 0, 127, + 192, 255, 187, 191, 128, 190, 191, 128, + 190, 188, 128, 175, 176, 189, 190, 191, + 145, 155, 157, 159, 128, 191, 130, 135, + 128, 191, 189, 128, 191, 128, 129, 130, + 131, 132, 191, 178, 128, 191, 128, 159, + 160, 163, 164, 191, 133, 128, 191, 128, + 178, 179, 186, 187, 191, 135, 142, 143, + 145, 146, 149, 150, 153, 154, 155, 164, + 128, 191, 128, 165, 166, 191, 128, 255, + 176, 255, 131, 137, 191, 145, 189, 135, + 129, 130, 132, 133, 144, 154, 176, 139, + 159, 150, 156, 159, 164, 167, 168, 170, + 173, 145, 176, 255, 139, 255, 166, 176, + 171, 179, 160, 161, 163, 164, 165, 167, + 169, 171, 173, 174, 175, 176, 177, 179, + 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 166, 170, 172, 178, + 150, 153, 155, 163, 165, 167, 169, 173, + 153, 155, 148, 161, 163, 255, 189, 132, + 185, 144, 152, 161, 164, 255, 188, 129, + 131, 190, 255, 133, 134, 137, 138, 142, + 150, 152, 161, 164, 255, 131, 134, 137, + 138, 142, 144, 146, 175, 178, 180, 182, + 255, 134, 138, 142, 161, 164, 255, 188, + 129, 131, 190, 191, 128, 132, 135, 136, + 139, 141, 150, 151, 162, 163, 130, 190, + 191, 151, 128, 130, 134, 136, 138, 141, + 128, 131, 190, 255, 133, 137, 142, 148, + 151, 161, 164, 255, 128, 132, 134, 136, + 138, 141, 149, 150, 162, 163, 129, 131, + 190, 255, 133, 137, 142, 150, 152, 161, + 164, 255, 130, 131, 138, 150, 143, 148, + 152, 159, 178, 179, 177, 179, 186, 135, + 142, 177, 179, 185, 187, 188, 136, 141, + 181, 183, 185, 152, 153, 190, 191, 177, + 191, 128, 132, 134, 135, 141, 151, 153, + 188, 134, 128, 129, 130, 141, 156, 157, + 158, 159, 160, 162, 164, 168, 169, 170, + 172, 173, 174, 175, 176, 179, 183, 173, + 183, 185, 190, 150, 153, 158, 160, 177, + 180, 130, 141, 157, 132, 134, 157, 159, + 146, 148, 178, 180, 146, 147, 178, 179, + 180, 255, 148, 156, 158, 255, 139, 141, + 169, 133, 134, 160, 171, 176, 187, 151, + 155, 160, 162, 191, 149, 158, 165, 188, + 176, 190, 128, 132, 180, 255, 133, 170, + 180, 255, 128, 130, 161, 173, 166, 179, + 164, 183, 173, 144, 146, 148, 168, 178, + 180, 184, 185, 128, 181, 187, 191, 128, + 131, 179, 181, 183, 140, 141, 128, 131, + 157, 179, 181, 183, 144, 176, 164, 175, + 177, 191, 160, 191, 128, 130, 170, 175, + 153, 154, 153, 154, 155, 160, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, + 175, 175, 178, 180, 189, 158, 159, 176, + 177, 130, 134, 139, 163, 167, 128, 129, + 180, 255, 134, 159, 178, 255, 166, 173, + 135, 147, 128, 131, 179, 255, 129, 164, + 166, 255, 169, 182, 131, 188, 140, 141, + 176, 178, 180, 183, 184, 190, 191, 129, + 171, 175, 181, 182, 163, 170, 172, 173, + 172, 184, 190, 158, 128, 143, 160, 175, + 144, 145, 150, 155, 157, 158, 159, 135, + 139, 141, 168, 171, 189, 160, 182, 186, + 191, 129, 131, 133, 134, 140, 143, 184, + 186, 165, 166, 128, 129, 130, 132, 133, + 134, 135, 136, 139, 140, 141, 144, 145, + 146, 147, 150, 151, 152, 153, 154, 156, + 176, 178, 128, 130, 184, 255, 135, 190, + 131, 175, 187, 255, 128, 130, 167, 180, + 179, 128, 130, 179, 255, 129, 137, 141, + 255, 190, 172, 183, 159, 170, 188, 128, + 131, 190, 191, 151, 128, 132, 135, 136, + 139, 141, 162, 163, 166, 172, 176, 180, + 181, 191, 128, 134, 176, 255, 132, 255, + 175, 181, 184, 255, 129, 155, 158, 255, + 129, 255, 171, 183, 157, 171, 175, 182, + 184, 191, 146, 167, 169, 182, 171, 172, + 189, 190, 176, 180, 176, 182, 145, 190, + 143, 146, 178, 157, 158, 133, 134, 137, + 168, 169, 170, 165, 169, 173, 178, 187, + 255, 131, 132, 140, 169, 174, 255, 130, + 132, 128, 182, 187, 255, 173, 180, 182, + 255, 132, 155, 159, 161, 175, 128, 163, + 165, 128, 134, 136, 152, 155, 161, 163, + 164, 166, 170, 144, 150, 132, 138, 145, + 146, 151, 166, 169, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 166, 167, 169, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 168, 170, 150, + 153, 155, 163, 165, 167, 169, 173, 153, + 155, 148, 161, 163, 255, 131, 187, 189, + 132, 185, 190, 255, 141, 144, 129, 136, + 145, 151, 152, 161, 162, 163, 164, 255, + 129, 188, 190, 130, 131, 191, 255, 141, + 151, 129, 132, 133, 134, 137, 138, 142, + 161, 162, 163, 164, 255, 131, 188, 129, + 130, 190, 255, 145, 181, 129, 130, 131, + 134, 135, 136, 137, 138, 139, 141, 142, + 175, 176, 177, 178, 255, 134, 138, 141, + 129, 136, 142, 161, 162, 163, 164, 255, + 129, 188, 130, 131, 190, 191, 128, 141, + 129, 132, 135, 136, 139, 140, 150, 151, + 162, 163, 130, 190, 191, 128, 141, 151, + 129, 130, 134, 136, 138, 140, 128, 129, + 131, 190, 255, 133, 137, 129, 132, 142, + 148, 151, 161, 164, 255, 129, 188, 190, + 191, 130, 131, 130, 134, 128, 132, 135, + 136, 138, 139, 140, 141, 149, 150, 162, + 163, 129, 190, 130, 131, 191, 255, 133, + 137, 141, 151, 129, 132, 142, 161, 162, + 163, 164, 255, 138, 143, 150, 159, 144, + 145, 146, 148, 152, 158, 178, 179, 177, + 179, 180, 186, 135, 142, 177, 179, 180, + 185, 187, 188, 136, 141, 181, 183, 185, + 152, 153, 190, 191, 191, 177, 190, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 177, 173, 183, + 185, 186, 187, 188, 189, 190, 150, 151, + 152, 153, 158, 160, 177, 180, 130, 132, + 141, 157, 133, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 182, 180, + 189, 190, 255, 134, 157, 137, 147, 148, + 255, 139, 141, 169, 133, 134, 178, 160, + 162, 163, 166, 167, 168, 169, 171, 176, + 184, 185, 187, 155, 151, 152, 153, 154, + 150, 160, 162, 191, 149, 151, 152, 158, + 165, 172, 173, 178, 179, 188, 176, 190, + 132, 181, 187, 128, 131, 180, 188, 189, + 255, 130, 133, 170, 171, 179, 180, 255, + 130, 161, 170, 128, 129, 162, 165, 166, + 167, 168, 173, 167, 173, 166, 169, 170, + 174, 175, 177, 178, 179, 164, 171, 172, + 179, 180, 181, 182, 183, 161, 173, 180, + 144, 146, 148, 168, 178, 179, 184, 185, + 128, 181, 187, 191, 128, 131, 179, 181, + 183, 140, 141, 144, 176, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 167, 163, 164, 165, 166, 132, + 133, 134, 159, 160, 177, 178, 255, 166, + 173, 135, 145, 146, 147, 131, 179, 188, + 128, 130, 180, 181, 182, 185, 186, 255, + 165, 129, 255, 169, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 131, 140, 141, + 188, 176, 178, 180, 183, 184, 190, 191, + 129, 171, 181, 182, 172, 173, 174, 175, + 165, 168, 172, 173, 163, 170, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 129, 128, 130, 184, 255, 135, 190, 130, + 131, 175, 176, 178, 183, 184, 187, 255, + 172, 128, 130, 167, 180, 179, 130, 128, + 129, 179, 181, 182, 190, 191, 255, 129, + 137, 138, 140, 141, 255, 180, 190, 172, + 174, 175, 177, 178, 181, 182, 183, 159, + 160, 162, 163, 170, 188, 190, 191, 128, + 129, 130, 131, 128, 151, 129, 132, 135, + 136, 139, 141, 162, 163, 166, 172, 176, + 180, 181, 183, 184, 191, 133, 128, 129, + 130, 134, 176, 185, 189, 177, 178, 179, + 186, 187, 190, 191, 255, 129, 132, 255, + 175, 190, 176, 177, 178, 181, 184, 187, + 188, 255, 129, 155, 158, 255, 189, 176, + 178, 179, 186, 187, 190, 191, 255, 129, + 255, 172, 182, 171, 173, 174, 175, 176, + 183, 166, 157, 159, 160, 161, 162, 171, + 175, 190, 176, 182, 184, 191, 169, 177, + 180, 146, 167, 170, 182, 171, 172, 189, + 190, 176, 180, 176, 182, 143, 146, 178, + 157, 158, 133, 134, 137, 168, 169, 170, + 166, 173, 165, 169, 174, 178, 187, 255, + 131, 132, 140, 169, 174, 255, 130, 132, + 128, 182, 187, 255, 173, 180, 182, 255, + 132, 155, 159, 161, 175, 128, 163, 165, + 128, 134, 136, 152, 155, 161, 163, 164, + 166, 170, 144, 150, 132, 138, 143, 187, + 191, 160, 128, 129, 132, 135, 133, 134, + 160, 255, 192, 255, 139, 168, 160, 128, + 129, 132, 135, 133, 134, 160, 255, 192, + 255, 144, 145, 150, 155, 157, 158, 144, + 145, 150, 155, 157, 158, 159, 135, 166, + 191, 133, 128, 191, 128, 130, 131, 132, + 133, 137, 138, 139, 140, 191, 174, 188, + 128, 129, 130, 131, 132, 133, 134, 144, + 145, 165, 166, 169, 170, 175, 176, 184, + 185, 191, 128, 132, 170, 129, 135, 136, + 191, 181, 186, 128, 191, 144, 128, 148, + 149, 150, 151, 191, 128, 132, 133, 135, + 136, 138, 139, 143, 144, 191, 163, 128, + 179, 180, 182, 183, 191, 128, 129, 191, + 166, 176, 191, 128, 151, 152, 158, 159, + 178, 179, 185, 186, 187, 188, 190, 128, + 191, 160, 128, 191, 128, 130, 131, 135, + 191, 129, 134, 136, 190, 128, 159, 160, + 191, 128, 175, 176, 255, 10, 13, 127, + 194, 216, 219, 220, 224, 225, 226, 234, + 235, 236, 237, 239, 240, 243, 0, 31, + 128, 191, 192, 223, 227, 238, 241, 247, + 248, 255, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 239, 240, 243, 204, 205, + 210, 214, 215, 216, 217, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 234, 239, + 240, 243, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 239, 240, 243, 194, 216, + 219, 220, 224, 225, 226, 234, 235, 236, + 237, 239, 240, 243, 32, 126, 192, 223, + 227, 238, 241, 247, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 235, 236, 237, 239, 240, 243, 204, + 205, 210, 214, 215, 216, 217, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 234, + 237, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 237, 239, 240, + 243, 204, 205, 210, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 234, 237, 239, 240, 243, 204, 205, + 210, 214, 215, 216, 217, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 234, 239, + 240, 243, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 235, 236, 237, 239, 240, + 243, 204, 205, 210, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 234, 239, 240, 243, 204, 205, 210, + 214, 215, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 234, 239, 240, + 243, 204, 205, 210, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 234, 239, 240, 243, 204, 205, 210, + 214, 215, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 234, 237, 239, + 240, 243, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 237, 239, 240, 243, 204, + 205, 210, 214, 215, 216, 217, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 234, + 237, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, +} + +var _graphclust_single_lengths []byte = []byte{ + 0, 1, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 26, 0, + 0, 0, 1, 1, 1, 0, 0, 2, + 1, 0, 1, 1, 0, 2, 0, 0, + 2, 0, 2, 1, 0, 1, 0, 3, + 0, 0, 1, 21, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 1, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 1, + 0, 5, 2, 6, 0, 1, 0, 1, + 0, 2, 0, 0, 15, 0, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 0, + 2, 1, 1, 0, 3, 1, 0, 7, + 5, 1, 1, 0, 1, 0, 23, 0, + 0, 0, 0, 1, 0, 0, 1, 0, + 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 4, 0, 0, + 0, 0, 1, 0, 6, 0, 0, 0, + 0, 0, 1, 3, 0, 0, 0, 3, + 0, 0, 0, 0, 1, 1, 0, 1, + 0, 1, 0, 0, 0, 29, 0, 0, + 0, 3, 2, 3, 2, 2, 2, 3, + 2, 2, 3, 3, 1, 2, 4, 2, + 2, 4, 4, 2, 0, 2, 0, 3, + 1, 0, 1, 21, 1, 0, 4, 0, + 0, 0, 1, 2, 0, 1, 1, 1, + 4, 0, 3, 1, 3, 2, 0, 3, + 0, 5, 2, 0, 0, 1, 0, 2, + 0, 0, 15, 0, 0, 0, 4, 0, + 0, 0, 3, 1, 0, 4, 1, 4, + 4, 3, 1, 0, 7, 5, 1, 1, + 0, 1, 0, 23, 1, 0, 1, 1, + 1, 1, 0, 2, 1, 3, 2, 0, + 1, 3, 1, 2, 0, 1, 0, 2, + 1, 2, 3, 4, 0, 0, 0, 1, + 0, 6, 2, 0, 0, 0, 0, 1, + 3, 0, 0, 0, 1, 0, 1, 4, + 0, 0, 0, 1, 1, 1, 4, 0, + 0, 0, 6, 0, 1, 1, 0, 0, + 0, 1, 1, 0, 1, 0, 1, 0, + 0, 0, 26, 0, 0, 0, 1, 1, + 1, 0, 0, 2, 1, 0, 1, 1, + 0, 2, 0, 0, 2, 0, 2, 1, + 0, 1, 0, 3, 0, 0, 1, 21, + 0, 0, 3, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 3, 0, 0, 0, + 0, 0, 0, 1, 0, 5, 2, 6, + 0, 1, 0, 1, 0, 2, 0, 0, + 15, 0, 0, 0, 3, 0, 0, 0, + 0, 0, 0, 0, 2, 1, 1, 0, + 3, 1, 0, 7, 5, 1, 1, 0, + 1, 0, 23, 0, 0, 0, 0, 1, + 0, 0, 1, 0, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 4, 0, 0, 0, 0, 1, 0, + 6, 0, 0, 0, 0, 0, 1, 3, + 0, 0, 0, 3, 0, 0, 0, 0, + 1, 1, 0, 1, 0, 1, 0, 0, + 0, 29, 0, 0, 0, 3, 2, 3, + 2, 2, 2, 3, 2, 2, 3, 3, + 1, 2, 4, 2, 2, 4, 4, 2, + 0, 2, 0, 3, 1, 0, 1, 21, + 1, 0, 4, 0, 0, 0, 1, 2, + 0, 1, 1, 1, 4, 0, 3, 1, + 3, 2, 0, 3, 0, 5, 2, 0, + 0, 1, 0, 2, 0, 0, 15, 0, + 0, 0, 4, 0, 0, 0, 3, 1, + 0, 4, 1, 4, 4, 3, 1, 0, + 7, 5, 1, 1, 0, 1, 0, 23, + 1, 0, 1, 1, 1, 1, 0, 2, + 1, 3, 2, 0, 1, 3, 1, 2, + 0, 1, 0, 2, 1, 2, 3, 4, + 0, 0, 0, 1, 0, 6, 2, 0, + 0, 0, 0, 1, 3, 0, 0, 0, + 1, 0, 1, 4, 0, 0, 0, 1, + 1, 1, 4, 0, 0, 0, 6, 0, + 0, 0, 1, 1, 2, 1, 1, 5, + 0, 24, 0, 24, 0, 0, 23, 0, + 0, 1, 0, 2, 0, 0, 0, 28, + 0, 3, 23, 2, 0, 2, 2, 3, + 2, 2, 2, 0, 54, 54, 27, 1, + 0, 5, 2, 0, 1, 1, 0, 0, + 14, 0, 3, 2, 2, 3, 2, 2, + 2, 54, 54, 27, 1, 0, 2, 0, + 1, 4, 2, 1, 0, 1, 0, 1, + 0, 11, 0, 7, 1, 0, 1, 0, + 2, 3, 2, 1, 0, 1, 1, 3, + 0, 1, 3, 0, 1, 1, 2, 1, + 1, 5, 0, 0, 0, 0, 1, 1, + 0, 1, 0, 1, 0, 0, 0, 26, + 0, 0, 0, 1, 1, 1, 0, 0, + 2, 1, 0, 1, 1, 0, 2, 0, + 0, 2, 0, 2, 1, 0, 1, 0, + 3, 0, 0, 1, 21, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 1, 0, 5, 2, 6, 0, 1, 0, + 1, 0, 2, 0, 0, 15, 0, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 0, 2, 1, 1, 0, 3, 1, 0, + 7, 5, 1, 1, 0, 1, 0, 23, + 0, 0, 0, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 4, 0, + 0, 0, 0, 1, 0, 6, 0, 0, + 0, 0, 0, 1, 3, 0, 0, 0, + 3, 0, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 29, 0, + 0, 0, 3, 2, 3, 2, 2, 2, + 3, 2, 2, 3, 3, 1, 2, 4, + 2, 2, 4, 4, 2, 0, 2, 0, + 3, 1, 0, 1, 21, 1, 0, 4, + 0, 0, 0, 1, 2, 0, 1, 1, + 1, 4, 0, 3, 1, 3, 2, 0, + 3, 0, 5, 2, 0, 0, 1, 0, + 2, 0, 0, 15, 0, 0, 0, 4, + 0, 0, 0, 3, 1, 0, 4, 1, + 4, 4, 3, 1, 0, 7, 5, 1, + 1, 0, 1, 0, 23, 1, 0, 1, + 1, 1, 1, 0, 2, 1, 3, 2, + 0, 1, 3, 1, 2, 0, 1, 0, + 2, 1, 2, 3, 4, 0, 0, 0, + 1, 0, 6, 2, 0, 0, 0, 0, + 1, 3, 0, 0, 0, 1, 0, 1, + 4, 0, 0, 0, 1, 1, 1, 4, + 0, 0, 0, 6, 24, 0, 24, 0, + 0, 23, 0, 0, 1, 0, 2, 0, + 0, 0, 28, 0, 3, 23, 2, 0, + 2, 2, 3, 2, 2, 2, 0, 54, + 54, 27, 1, 1, 5, 2, 0, 0, + 0, 1, 1, 0, 1, 0, 1, 0, + 0, 0, 26, 0, 0, 0, 1, 1, + 1, 0, 0, 2, 1, 0, 1, 1, + 0, 2, 0, 0, 2, 0, 2, 1, + 0, 1, 0, 3, 0, 0, 1, 21, + 0, 0, 3, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 3, 0, 0, 0, + 0, 0, 0, 1, 0, 5, 2, 0, + 0, 1, 0, 2, 0, 0, 15, 0, + 0, 0, 3, 0, 0, 0, 0, 0, + 0, 0, 2, 1, 1, 0, 3, 1, + 0, 6, 5, 1, 1, 0, 1, 0, + 23, 0, 0, 0, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 4, + 0, 0, 0, 0, 1, 0, 6, 0, + 0, 0, 0, 0, 1, 3, 0, 0, + 0, 1, 4, 0, 0, 0, 6, 1, + 7, 3, 0, 0, 0, 0, 1, 1, + 0, 1, 0, 1, 0, 0, 0, 29, + 0, 0, 0, 3, 2, 3, 2, 2, + 2, 3, 2, 2, 3, 3, 1, 2, + 4, 2, 2, 4, 4, 2, 0, 2, + 0, 3, 1, 0, 1, 21, 1, 0, + 4, 0, 0, 0, 1, 2, 0, 1, + 1, 1, 4, 0, 3, 1, 3, 2, + 0, 3, 0, 5, 2, 0, 0, 1, + 0, 2, 0, 0, 15, 0, 0, 0, + 4, 0, 0, 0, 3, 1, 0, 4, + 1, 4, 4, 3, 1, 0, 7, 5, + 1, 1, 0, 1, 0, 23, 1, 0, + 1, 1, 1, 1, 0, 2, 1, 3, + 2, 0, 1, 3, 1, 2, 0, 1, + 0, 2, 1, 2, 3, 4, 0, 0, + 0, 1, 0, 6, 2, 0, 0, 0, + 0, 1, 3, 0, 0, 0, 1, 0, + 1, 4, 0, 0, 0, 1, 1, 0, + 1, 0, 0, 0, 1, 1, 0, 1, + 0, 1, 0, 0, 0, 29, 0, 0, + 0, 3, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 26, 0, + 0, 0, 1, 1, 1, 0, 0, 2, + 1, 0, 1, 1, 0, 2, 0, 0, + 2, 0, 2, 1, 0, 1, 0, 3, + 0, 0, 1, 21, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 1, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 1, + 0, 5, 2, 6, 0, 1, 0, 1, + 0, 2, 0, 0, 15, 0, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 0, + 2, 1, 1, 0, 3, 1, 0, 7, + 5, 1, 1, 0, 1, 0, 23, 0, + 0, 0, 0, 1, 0, 0, 1, 0, + 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 4, 0, 0, + 0, 0, 1, 0, 6, 0, 0, 0, + 0, 0, 1, 3, 0, 0, 0, 3, + 0, 1, 1, 1, 4, 0, 0, 0, + 6, 2, 3, 2, 2, 2, 3, 2, + 2, 3, 3, 1, 2, 4, 2, 2, + 4, 4, 2, 0, 2, 0, 3, 1, + 0, 1, 21, 1, 0, 4, 0, 0, + 0, 1, 2, 0, 1, 1, 1, 4, + 0, 3, 1, 3, 2, 0, 3, 0, + 5, 2, 0, 0, 1, 0, 2, 0, + 0, 15, 0, 0, 0, 4, 0, 0, + 0, 3, 1, 0, 4, 1, 4, 4, + 3, 1, 0, 7, 5, 1, 1, 0, + 1, 0, 23, 1, 0, 1, 1, 1, + 1, 0, 2, 1, 3, 2, 0, 1, + 3, 1, 2, 0, 1, 0, 2, 1, + 2, 3, 4, 0, 0, 0, 1, 0, + 6, 2, 0, 0, 0, 0, 1, 3, + 0, 0, 0, 1, 0, 1, 4, 0, + 0, 0, 1, 0, 0, 14, 0, 3, + 2, 2, 3, 2, 2, 2, 54, 54, + 29, 1, 0, 0, 0, 0, 2, 1, + 1, 4, 2, 1, 0, 1, 0, 1, + 0, 11, 0, 0, 0, 0, 1, 1, + 0, 1, 0, 1, 0, 0, 0, 26, + 0, 0, 0, 1, 1, 1, 0, 0, + 2, 1, 0, 1, 1, 0, 2, 0, + 0, 2, 0, 2, 1, 0, 1, 0, + 3, 0, 0, 1, 21, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 1, 0, 5, 2, 6, 0, 1, 0, + 1, 0, 2, 0, 0, 15, 0, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 0, 2, 1, 1, 0, 3, 1, 0, + 7, 5, 1, 1, 0, 1, 0, 23, + 0, 0, 0, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 4, 0, + 0, 0, 0, 1, 0, 6, 0, 0, + 0, 0, 0, 1, 3, 0, 0, 0, + 3, 0, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 29, 0, + 0, 0, 3, 2, 3, 2, 2, 2, + 3, 2, 2, 3, 3, 1, 2, 4, + 2, 2, 4, 4, 2, 0, 2, 0, + 3, 1, 0, 1, 21, 1, 0, 4, + 0, 0, 0, 1, 2, 0, 1, 1, + 1, 4, 0, 3, 1, 3, 2, 0, + 3, 0, 5, 2, 0, 0, 1, 0, + 2, 0, 0, 15, 0, 0, 0, 4, + 0, 0, 0, 3, 1, 0, 4, 1, + 4, 4, 3, 1, 0, 7, 5, 1, + 1, 0, 1, 0, 23, 1, 0, 1, + 1, 1, 1, 0, 2, 1, 3, 2, + 0, 1, 3, 1, 2, 0, 1, 0, + 2, 1, 2, 3, 4, 0, 0, 0, + 1, 0, 6, 2, 0, 0, 0, 0, + 1, 3, 0, 0, 0, 1, 0, 1, + 4, 0, 0, 0, 1, 1, 1, 4, + 0, 0, 0, 6, 7, 1, 0, 1, + 0, 2, 3, 2, 1, 0, 1, 1, + 3, 0, 1, 5, 0, 0, 17, 20, + 20, 20, 14, 20, 20, 20, 23, 21, + 21, 21, 20, 23, 20, 20, 20, 21, + 21, 21, 20, 20, 20, 20, 20, 20, + 20, 20, 20, 20, +} + +var _graphclust_range_lengths []byte = []byte{ + 0, 0, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 2, 4, + 1, 2, 1, 2, 2, 5, 6, 2, + 2, 5, 1, 3, 2, 3, 5, 2, + 3, 1, 3, 1, 1, 2, 1, 2, + 1, 4, 0, 0, 2, 3, 1, 1, + 2, 2, 1, 2, 1, 1, 2, 1, + 2, 1, 2, 2, 2, 1, 1, 4, + 2, 0, 0, 0, 1, 0, 1, 0, + 1, 0, 1, 1, 0, 2, 1, 1, + 1, 2, 2, 1, 1, 2, 2, 1, + 1, 3, 2, 2, 0, 0, 2, 0, + 0, 0, 0, 1, 4, 1, 0, 2, + 1, 2, 2, 0, 2, 2, 1, 1, + 2, 6, 1, 1, 1, 1, 2, 2, + 1, 1, 1, 2, 2, 0, 1, 1, + 1, 1, 0, 1, 0, 3, 3, 1, + 2, 2, 2, 0, 5, 1, 1, 0, + 1, 1, 1, 1, 1, 2, 1, 1, + 4, 1, 1, 1, 1, 1, 4, 1, + 2, 2, 5, 2, 6, 2, 8, 4, + 2, 5, 0, 3, 2, 4, 1, 6, + 2, 4, 4, 1, 1, 2, 1, 2, + 1, 4, 0, 0, 4, 4, 1, 1, + 2, 2, 2, 2, 1, 1, 6, 2, + 5, 1, 3, 3, 4, 4, 4, 4, + 2, 0, 0, 1, 1, 0, 1, 0, + 1, 1, 0, 2, 1, 1, 2, 4, + 1, 2, 4, 1, 5, 0, 3, 2, + 1, 0, 0, 2, 0, 0, 0, 0, + 1, 4, 1, 0, 2, 1, 4, 2, + 0, 4, 3, 4, 2, 2, 6, 2, + 2, 4, 1, 4, 2, 4, 1, 3, + 3, 2, 2, 0, 1, 1, 1, 0, + 1, 0, 3, 3, 1, 2, 2, 2, + 0, 5, 1, 1, 0, 1, 0, 1, + 1, 1, 0, 0, 0, 0, 1, 1, + 1, 0, 0, 1, 2, 2, 1, 1, + 1, 1, 2, 1, 1, 4, 1, 1, + 1, 1, 2, 4, 1, 2, 1, 2, + 2, 5, 6, 2, 2, 5, 1, 3, + 2, 3, 5, 2, 3, 1, 3, 1, + 1, 2, 1, 2, 1, 4, 0, 0, + 2, 3, 1, 1, 2, 2, 1, 2, + 1, 1, 2, 1, 2, 1, 2, 2, + 2, 1, 1, 4, 2, 0, 0, 0, + 1, 0, 1, 0, 1, 0, 1, 1, + 0, 2, 1, 1, 1, 2, 2, 1, + 1, 2, 2, 1, 1, 3, 2, 2, + 0, 0, 2, 0, 0, 0, 0, 1, + 4, 1, 0, 2, 1, 2, 2, 0, + 2, 2, 1, 1, 2, 6, 1, 1, + 1, 1, 2, 2, 1, 1, 1, 2, + 2, 0, 1, 1, 1, 1, 0, 1, + 0, 3, 3, 1, 2, 2, 2, 0, + 5, 1, 1, 0, 1, 1, 1, 1, + 1, 2, 1, 1, 4, 1, 1, 1, + 1, 1, 4, 1, 2, 2, 5, 2, + 6, 2, 8, 4, 2, 5, 0, 3, + 2, 4, 1, 6, 2, 4, 4, 1, + 1, 2, 1, 2, 1, 4, 0, 0, + 4, 4, 1, 1, 2, 2, 2, 2, + 1, 1, 6, 2, 5, 1, 3, 3, + 4, 4, 4, 4, 2, 0, 0, 1, + 1, 0, 1, 0, 1, 1, 0, 2, + 1, 1, 2, 4, 1, 2, 4, 1, + 5, 0, 3, 2, 1, 0, 0, 2, + 0, 0, 0, 0, 1, 4, 1, 0, + 2, 1, 4, 2, 0, 4, 3, 4, + 2, 2, 6, 2, 2, 4, 1, 4, + 2, 4, 1, 3, 3, 2, 2, 0, + 1, 1, 1, 0, 1, 0, 3, 3, + 1, 2, 2, 2, 0, 5, 1, 1, + 0, 1, 0, 1, 1, 1, 0, 0, + 0, 0, 1, 1, 1, 0, 0, 1, + 2, 3, 1, 1, 1, 1, 1, 1, + 1, 0, 1, 0, 1, 1, 0, 1, + 1, 0, 1, 0, 1, 3, 1, 2, + 2, 1, 0, 0, 1, 0, 0, 0, + 0, 0, 1, 0, 1, 1, 2, 2, + 2, 1, 3, 2, 1, 1, 3, 1, + 3, 3, 1, 0, 0, 0, 0, 0, + 1, 1, 1, 2, 2, 4, 1, 1, + 2, 1, 1, 1, 3, 1, 2, 1, + 2, 1, 2, 0, 0, 1, 1, 5, + 9, 2, 1, 3, 5, 3, 1, 6, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 2, + 1, 1, 4, 1, 1, 1, 1, 2, + 4, 1, 2, 1, 2, 2, 5, 6, + 2, 2, 5, 1, 3, 2, 3, 5, + 2, 3, 1, 3, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 2, 3, 1, + 1, 2, 2, 1, 2, 1, 1, 2, + 1, 2, 1, 2, 2, 2, 1, 1, + 4, 2, 0, 0, 0, 1, 0, 1, + 0, 1, 0, 1, 1, 0, 2, 1, + 1, 1, 2, 2, 1, 1, 2, 2, + 1, 1, 3, 2, 2, 0, 0, 2, + 0, 0, 0, 0, 1, 4, 1, 0, + 2, 1, 2, 2, 0, 2, 2, 1, + 1, 2, 6, 1, 1, 1, 1, 2, + 2, 1, 1, 1, 2, 2, 0, 1, + 1, 1, 1, 0, 1, 0, 3, 3, + 1, 2, 2, 2, 0, 5, 1, 1, + 0, 1, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 1, 4, + 1, 2, 2, 5, 2, 6, 2, 8, + 4, 2, 5, 0, 3, 2, 4, 1, + 6, 2, 4, 4, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 4, 4, 1, + 1, 2, 2, 2, 2, 1, 1, 6, + 2, 5, 1, 3, 3, 4, 4, 4, + 4, 2, 0, 0, 1, 1, 0, 1, + 0, 1, 1, 0, 2, 1, 1, 2, + 4, 1, 2, 4, 1, 5, 0, 3, + 2, 1, 0, 0, 2, 0, 0, 0, + 0, 1, 4, 1, 0, 2, 1, 4, + 2, 0, 4, 3, 4, 2, 2, 6, + 2, 2, 4, 1, 4, 2, 4, 1, + 3, 3, 2, 2, 0, 1, 1, 1, + 0, 1, 0, 3, 3, 1, 2, 2, + 2, 0, 5, 1, 1, 0, 1, 0, + 1, 1, 1, 0, 0, 0, 0, 1, + 1, 1, 0, 0, 0, 1, 0, 1, + 1, 0, 1, 1, 0, 1, 0, 1, + 3, 1, 2, 2, 1, 0, 0, 1, + 0, 0, 0, 0, 0, 1, 0, 1, + 1, 2, 2, 1, 1, 5, 1, 1, + 1, 1, 2, 1, 1, 4, 1, 1, + 1, 1, 2, 4, 1, 2, 1, 2, + 2, 5, 6, 2, 2, 5, 1, 3, + 2, 3, 5, 2, 3, 1, 3, 1, + 1, 2, 1, 2, 1, 4, 0, 0, + 2, 3, 1, 1, 2, 2, 1, 2, + 1, 1, 2, 1, 2, 1, 2, 2, + 2, 1, 1, 4, 2, 0, 0, 1, + 1, 0, 1, 0, 1, 1, 0, 2, + 1, 1, 1, 2, 2, 1, 1, 2, + 2, 1, 1, 3, 2, 2, 0, 0, + 2, 0, 0, 0, 0, 1, 4, 1, + 0, 2, 1, 2, 2, 0, 2, 2, + 1, 1, 2, 6, 1, 1, 1, 1, + 2, 2, 1, 1, 1, 2, 2, 0, + 1, 1, 1, 1, 0, 1, 0, 3, + 3, 1, 2, 2, 2, 0, 5, 1, + 1, 0, 1, 1, 1, 0, 0, 0, + 0, 0, 1, 1, 1, 1, 1, 2, + 1, 1, 4, 1, 1, 1, 1, 1, + 4, 1, 2, 2, 5, 2, 6, 2, + 8, 4, 2, 5, 0, 3, 2, 4, + 1, 6, 2, 4, 4, 1, 1, 2, + 1, 2, 1, 4, 0, 0, 4, 4, + 1, 1, 2, 2, 2, 2, 1, 1, + 6, 2, 5, 1, 3, 3, 4, 4, + 4, 4, 2, 0, 0, 1, 1, 0, + 1, 0, 1, 1, 0, 2, 1, 1, + 2, 4, 1, 2, 4, 1, 5, 0, + 3, 2, 1, 0, 0, 2, 0, 0, + 0, 0, 1, 4, 1, 0, 2, 1, + 4, 2, 0, 4, 3, 4, 2, 2, + 6, 2, 2, 4, 1, 4, 2, 4, + 1, 3, 3, 2, 2, 0, 1, 1, + 1, 0, 1, 0, 3, 3, 1, 2, + 2, 2, 0, 5, 1, 1, 0, 1, + 0, 1, 1, 1, 0, 0, 0, 3, + 1, 1, 1, 1, 1, 2, 1, 1, + 4, 1, 1, 1, 1, 1, 4, 1, + 2, 2, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 2, 4, + 1, 2, 1, 2, 2, 5, 6, 2, + 2, 5, 1, 3, 2, 3, 5, 2, + 3, 1, 3, 1, 1, 2, 1, 2, + 1, 4, 0, 0, 2, 3, 1, 1, + 2, 2, 1, 2, 1, 1, 2, 1, + 2, 1, 2, 2, 2, 1, 1, 4, + 2, 0, 0, 0, 1, 0, 1, 0, + 1, 0, 1, 1, 0, 2, 1, 1, + 1, 2, 2, 1, 1, 2, 2, 1, + 1, 3, 2, 2, 0, 0, 2, 0, + 0, 0, 0, 1, 4, 1, 0, 2, + 1, 2, 2, 0, 2, 2, 1, 1, + 2, 6, 1, 1, 1, 1, 2, 2, + 1, 1, 1, 2, 2, 0, 1, 1, + 1, 1, 0, 1, 0, 3, 3, 1, + 2, 2, 2, 0, 5, 1, 1, 0, + 1, 0, 0, 0, 1, 1, 1, 0, + 0, 5, 2, 6, 2, 8, 4, 2, + 5, 0, 3, 2, 4, 1, 6, 2, + 4, 4, 1, 1, 2, 1, 2, 1, + 4, 0, 0, 4, 4, 1, 1, 2, + 2, 2, 2, 1, 1, 6, 2, 5, + 1, 3, 3, 4, 4, 4, 4, 2, + 0, 0, 1, 1, 0, 1, 0, 1, + 1, 0, 2, 1, 1, 2, 4, 1, + 2, 4, 1, 5, 0, 3, 2, 1, + 0, 0, 2, 0, 0, 0, 0, 1, + 4, 1, 0, 2, 1, 4, 2, 0, + 4, 3, 4, 2, 2, 6, 2, 2, + 4, 1, 4, 2, 4, 1, 3, 3, + 2, 2, 0, 1, 1, 1, 0, 1, + 0, 3, 3, 1, 2, 2, 2, 0, + 5, 1, 1, 0, 1, 0, 1, 1, + 1, 0, 1, 3, 1, 3, 3, 1, + 0, 0, 0, 0, 0, 1, 1, 1, + 3, 2, 4, 1, 0, 1, 1, 1, + 3, 1, 1, 1, 3, 1, 3, 1, + 3, 1, 2, 1, 1, 1, 1, 2, + 1, 1, 4, 1, 1, 1, 1, 2, + 4, 1, 2, 1, 2, 2, 5, 6, + 2, 2, 5, 1, 3, 2, 3, 5, + 2, 3, 1, 3, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 2, 3, 1, + 1, 2, 2, 1, 2, 1, 1, 2, + 1, 2, 1, 2, 2, 2, 1, 1, + 4, 2, 0, 0, 0, 1, 0, 1, + 0, 1, 0, 1, 1, 0, 2, 1, + 1, 1, 2, 2, 1, 1, 2, 2, + 1, 1, 3, 2, 2, 0, 0, 2, + 0, 0, 0, 0, 1, 4, 1, 0, + 2, 1, 2, 2, 0, 2, 2, 1, + 1, 2, 6, 1, 1, 1, 1, 2, + 2, 1, 1, 1, 2, 2, 0, 1, + 1, 1, 1, 0, 1, 0, 3, 3, + 1, 2, 2, 2, 0, 5, 1, 1, + 0, 1, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 1, 4, + 1, 2, 2, 5, 2, 6, 2, 8, + 4, 2, 5, 0, 3, 2, 4, 1, + 6, 2, 4, 4, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 4, 4, 1, + 1, 2, 2, 2, 2, 1, 1, 6, + 2, 5, 1, 3, 3, 4, 4, 4, + 4, 2, 0, 0, 1, 1, 0, 1, + 0, 1, 1, 0, 2, 1, 1, 2, + 4, 1, 2, 4, 1, 5, 0, 3, + 2, 1, 0, 0, 2, 0, 0, 0, + 0, 1, 4, 1, 0, 2, 1, 4, + 2, 0, 4, 3, 4, 2, 2, 6, + 2, 2, 4, 1, 4, 2, 4, 1, + 3, 3, 2, 2, 0, 1, 1, 1, + 0, 1, 0, 3, 3, 1, 2, 2, + 2, 0, 5, 1, 1, 0, 1, 0, + 1, 1, 1, 0, 0, 0, 0, 1, + 1, 1, 0, 0, 0, 0, 1, 1, + 5, 9, 2, 1, 3, 5, 3, 1, + 6, 1, 1, 2, 2, 2, 6, 0, + 0, 0, 4, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, +} + +var _graphclust_index_offsets []int16 = []int16{ + 0, 0, 2, 4, 6, 8, 11, 15, + 17, 20, 25, 28, 30, 32, 34, 63, + 68, 70, 73, 76, 80, 84, 90, 97, + 102, 106, 112, 115, 120, 123, 129, 135, + 138, 144, 146, 152, 155, 157, 161, 163, + 169, 171, 176, 178, 200, 203, 207, 212, + 214, 217, 220, 222, 225, 227, 230, 233, + 235, 241, 243, 246, 249, 252, 254, 256, + 262, 265, 271, 274, 281, 283, 285, 287, + 289, 291, 294, 296, 298, 314, 317, 319, + 321, 326, 329, 332, 334, 336, 339, 342, + 344, 348, 353, 357, 360, 364, 366, 369, + 377, 383, 385, 387, 389, 395, 397, 421, + 424, 426, 429, 432, 434, 437, 440, 443, + 445, 449, 457, 459, 461, 463, 465, 468, + 471, 473, 475, 477, 480, 483, 488, 490, + 492, 494, 496, 498, 500, 507, 511, 515, + 517, 520, 523, 527, 531, 537, 539, 541, + 545, 547, 549, 551, 553, 556, 560, 562, + 565, 570, 573, 575, 577, 579, 610, 615, + 617, 620, 626, 634, 640, 649, 654, 665, + 673, 678, 686, 690, 697, 701, 708, 714, + 723, 728, 737, 746, 750, 752, 757, 759, + 765, 768, 773, 775, 797, 803, 808, 814, + 816, 819, 822, 826, 831, 833, 836, 844, + 848, 858, 860, 867, 872, 880, 887, 892, + 900, 903, 909, 912, 914, 916, 918, 920, + 923, 925, 927, 943, 946, 948, 950, 957, + 962, 964, 967, 975, 978, 984, 989, 994, + 1001, 1007, 1011, 1013, 1016, 1024, 1030, 1032, + 1034, 1036, 1042, 1044, 1068, 1072, 1074, 1080, + 1084, 1086, 1092, 1096, 1103, 1107, 1113, 1122, + 1125, 1129, 1137, 1140, 1147, 1150, 1156, 1158, + 1164, 1169, 1174, 1180, 1185, 1187, 1189, 1191, + 1193, 1195, 1202, 1208, 1212, 1214, 1217, 1220, + 1224, 1228, 1234, 1236, 1238, 1240, 1242, 1244, + 1250, 1252, 1254, 1255, 1257, 1259, 1261, 1267, + 1269, 1271, 1272, 1279, 1281, 1285, 1289, 1291, + 1293, 1295, 1298, 1302, 1304, 1307, 1312, 1315, + 1317, 1319, 1321, 1350, 1355, 1357, 1360, 1363, + 1367, 1371, 1377, 1384, 1389, 1393, 1399, 1402, + 1407, 1410, 1416, 1422, 1425, 1431, 1433, 1439, + 1442, 1444, 1448, 1450, 1456, 1458, 1463, 1465, + 1487, 1490, 1494, 1499, 1501, 1504, 1507, 1509, + 1512, 1514, 1517, 1520, 1522, 1528, 1530, 1533, + 1536, 1539, 1541, 1543, 1549, 1552, 1558, 1561, + 1568, 1570, 1572, 1574, 1576, 1578, 1581, 1583, + 1585, 1601, 1604, 1606, 1608, 1613, 1616, 1619, + 1621, 1623, 1626, 1629, 1631, 1635, 1640, 1644, + 1647, 1651, 1653, 1656, 1664, 1670, 1672, 1674, + 1676, 1682, 1684, 1708, 1711, 1713, 1716, 1719, + 1721, 1724, 1727, 1730, 1732, 1736, 1744, 1746, + 1748, 1750, 1752, 1755, 1758, 1760, 1762, 1764, + 1767, 1770, 1775, 1777, 1779, 1781, 1783, 1785, + 1787, 1794, 1798, 1802, 1804, 1807, 1810, 1814, + 1818, 1824, 1826, 1828, 1832, 1834, 1836, 1838, + 1840, 1843, 1847, 1849, 1852, 1857, 1860, 1862, + 1864, 1866, 1897, 1902, 1904, 1907, 1913, 1921, + 1927, 1936, 1941, 1952, 1960, 1965, 1973, 1977, + 1984, 1988, 1995, 2001, 2010, 2015, 2024, 2033, + 2037, 2039, 2044, 2046, 2052, 2055, 2060, 2062, + 2084, 2090, 2095, 2101, 2103, 2106, 2109, 2113, + 2118, 2120, 2123, 2131, 2135, 2145, 2147, 2154, + 2159, 2167, 2174, 2179, 2187, 2190, 2196, 2199, + 2201, 2203, 2205, 2207, 2210, 2212, 2214, 2230, + 2233, 2235, 2237, 2244, 2249, 2251, 2254, 2262, + 2265, 2271, 2276, 2281, 2288, 2294, 2298, 2300, + 2303, 2311, 2317, 2319, 2321, 2323, 2329, 2331, + 2355, 2359, 2361, 2367, 2371, 2373, 2379, 2383, + 2390, 2394, 2400, 2409, 2412, 2416, 2424, 2427, + 2434, 2437, 2443, 2445, 2451, 2456, 2461, 2467, + 2472, 2474, 2476, 2478, 2480, 2482, 2489, 2495, + 2499, 2501, 2504, 2507, 2511, 2515, 2521, 2523, + 2525, 2527, 2529, 2531, 2537, 2539, 2541, 2542, + 2544, 2546, 2548, 2554, 2556, 2558, 2559, 2566, + 2568, 2571, 2575, 2578, 2581, 2585, 2588, 2591, + 2598, 2600, 2625, 2627, 2652, 2654, 2656, 2680, + 2682, 2684, 2686, 2688, 2691, 2693, 2697, 2699, + 2730, 2733, 2738, 2762, 2765, 2767, 2770, 2773, + 2777, 2780, 2783, 2787, 2788, 2844, 2900, 2930, + 2934, 2937, 2944, 2950, 2953, 2956, 2959, 2963, + 2965, 2983, 2987, 2992, 2995, 2998, 3002, 3005, + 3008, 3012, 3068, 3124, 3154, 3158, 3163, 3167, + 3169, 3173, 3179, 3183, 3186, 3190, 3193, 3196, + 3199, 3202, 3215, 3218, 3226, 3228, 3230, 3233, + 3239, 3251, 3257, 3261, 3266, 3272, 3277, 3280, + 3290, 3292, 3295, 3300, 3302, 3305, 3308, 3312, + 3315, 3318, 3325, 3327, 3329, 3331, 3333, 3336, + 3340, 3342, 3345, 3350, 3353, 3355, 3357, 3359, + 3388, 3393, 3395, 3398, 3401, 3405, 3409, 3415, + 3422, 3427, 3431, 3437, 3440, 3445, 3448, 3454, + 3460, 3463, 3469, 3471, 3477, 3480, 3482, 3486, + 3488, 3494, 3496, 3501, 3503, 3525, 3528, 3532, + 3537, 3539, 3542, 3545, 3547, 3550, 3552, 3555, + 3558, 3560, 3566, 3568, 3571, 3574, 3577, 3579, + 3581, 3587, 3590, 3596, 3599, 3606, 3608, 3610, + 3612, 3614, 3616, 3619, 3621, 3623, 3639, 3642, + 3644, 3646, 3651, 3654, 3657, 3659, 3661, 3664, + 3667, 3669, 3673, 3678, 3682, 3685, 3689, 3691, + 3694, 3702, 3708, 3710, 3712, 3714, 3720, 3722, + 3746, 3749, 3751, 3754, 3757, 3759, 3762, 3765, + 3768, 3770, 3774, 3782, 3784, 3786, 3788, 3790, + 3793, 3796, 3798, 3800, 3802, 3805, 3808, 3813, + 3815, 3817, 3819, 3821, 3823, 3825, 3832, 3836, + 3840, 3842, 3845, 3848, 3852, 3856, 3862, 3864, + 3866, 3870, 3872, 3874, 3876, 3878, 3881, 3885, + 3887, 3890, 3895, 3898, 3900, 3902, 3904, 3935, + 3940, 3942, 3945, 3951, 3959, 3965, 3974, 3979, + 3990, 3998, 4003, 4011, 4015, 4022, 4026, 4033, + 4039, 4048, 4053, 4062, 4071, 4075, 4077, 4082, + 4084, 4090, 4093, 4098, 4100, 4122, 4128, 4133, + 4139, 4141, 4144, 4147, 4151, 4156, 4158, 4161, + 4169, 4173, 4183, 4185, 4192, 4197, 4205, 4212, + 4217, 4225, 4228, 4234, 4237, 4239, 4241, 4243, + 4245, 4248, 4250, 4252, 4268, 4271, 4273, 4275, + 4282, 4287, 4289, 4292, 4300, 4303, 4309, 4314, + 4319, 4326, 4332, 4336, 4338, 4341, 4349, 4355, + 4357, 4359, 4361, 4367, 4369, 4393, 4397, 4399, + 4405, 4409, 4411, 4417, 4421, 4428, 4432, 4438, + 4447, 4450, 4454, 4462, 4465, 4472, 4475, 4481, + 4483, 4489, 4494, 4499, 4505, 4510, 4512, 4514, + 4516, 4518, 4520, 4527, 4533, 4537, 4539, 4542, + 4545, 4549, 4553, 4559, 4561, 4563, 4565, 4567, + 4569, 4575, 4577, 4579, 4580, 4582, 4584, 4586, + 4592, 4594, 4596, 4597, 4604, 4629, 4631, 4656, + 4658, 4660, 4684, 4686, 4688, 4690, 4692, 4695, + 4697, 4701, 4703, 4734, 4737, 4742, 4766, 4769, + 4771, 4774, 4777, 4781, 4784, 4787, 4791, 4792, + 4848, 4904, 4934, 4938, 4941, 4948, 4956, 4958, + 4960, 4962, 4965, 4969, 4971, 4974, 4979, 4982, + 4984, 4986, 4988, 5017, 5022, 5024, 5027, 5030, + 5034, 5038, 5044, 5051, 5056, 5060, 5066, 5069, + 5074, 5077, 5083, 5089, 5092, 5098, 5100, 5106, + 5109, 5111, 5115, 5117, 5123, 5125, 5130, 5132, + 5154, 5157, 5161, 5166, 5168, 5171, 5174, 5176, + 5179, 5181, 5184, 5187, 5189, 5195, 5197, 5200, + 5203, 5206, 5208, 5210, 5216, 5219, 5225, 5228, + 5230, 5232, 5234, 5236, 5239, 5241, 5243, 5259, + 5262, 5264, 5266, 5271, 5274, 5277, 5279, 5281, + 5284, 5287, 5289, 5293, 5298, 5302, 5305, 5309, + 5311, 5314, 5321, 5327, 5329, 5331, 5333, 5339, + 5341, 5365, 5368, 5370, 5373, 5376, 5378, 5381, + 5384, 5387, 5389, 5393, 5401, 5403, 5405, 5407, + 5409, 5412, 5415, 5417, 5419, 5421, 5424, 5427, + 5432, 5434, 5436, 5438, 5440, 5442, 5444, 5451, + 5455, 5459, 5461, 5464, 5467, 5471, 5475, 5481, + 5483, 5485, 5487, 5493, 5495, 5497, 5498, 5505, + 5507, 5515, 5519, 5521, 5523, 5525, 5527, 5530, + 5534, 5536, 5539, 5544, 5547, 5549, 5551, 5553, + 5584, 5589, 5591, 5594, 5600, 5608, 5614, 5623, + 5628, 5639, 5647, 5652, 5660, 5664, 5671, 5675, + 5682, 5688, 5697, 5702, 5711, 5720, 5724, 5726, + 5731, 5733, 5739, 5742, 5747, 5749, 5771, 5777, + 5782, 5788, 5790, 5793, 5796, 5800, 5805, 5807, + 5810, 5818, 5822, 5832, 5834, 5841, 5846, 5854, + 5861, 5866, 5874, 5877, 5883, 5886, 5888, 5890, + 5892, 5894, 5897, 5899, 5901, 5917, 5920, 5922, + 5924, 5931, 5936, 5938, 5941, 5949, 5952, 5958, + 5963, 5968, 5975, 5981, 5985, 5987, 5990, 5998, + 6004, 6006, 6008, 6010, 6016, 6018, 6042, 6046, + 6048, 6054, 6058, 6060, 6066, 6070, 6077, 6081, + 6087, 6096, 6099, 6103, 6111, 6114, 6121, 6124, + 6130, 6132, 6138, 6143, 6148, 6154, 6159, 6161, + 6163, 6165, 6167, 6169, 6176, 6182, 6186, 6188, + 6191, 6194, 6198, 6202, 6208, 6210, 6212, 6214, + 6216, 6218, 6224, 6226, 6228, 6229, 6231, 6233, + 6237, 6240, 6242, 6244, 6246, 6249, 6253, 6255, + 6258, 6263, 6266, 6268, 6270, 6272, 6303, 6308, + 6310, 6313, 6319, 6321, 6323, 6325, 6328, 6332, + 6334, 6337, 6342, 6345, 6347, 6349, 6351, 6380, + 6385, 6387, 6390, 6393, 6397, 6401, 6407, 6414, + 6419, 6423, 6429, 6432, 6437, 6440, 6446, 6452, + 6455, 6461, 6463, 6469, 6472, 6474, 6478, 6480, + 6486, 6488, 6493, 6495, 6517, 6520, 6524, 6529, + 6531, 6534, 6537, 6539, 6542, 6544, 6547, 6550, + 6552, 6558, 6560, 6563, 6566, 6569, 6571, 6573, + 6579, 6582, 6588, 6591, 6598, 6600, 6602, 6604, + 6606, 6608, 6611, 6613, 6615, 6631, 6634, 6636, + 6638, 6643, 6646, 6649, 6651, 6653, 6656, 6659, + 6661, 6665, 6670, 6674, 6677, 6681, 6683, 6686, + 6694, 6700, 6702, 6704, 6706, 6712, 6714, 6738, + 6741, 6743, 6746, 6749, 6751, 6754, 6757, 6760, + 6762, 6766, 6774, 6776, 6778, 6780, 6782, 6785, + 6788, 6790, 6792, 6794, 6797, 6800, 6805, 6807, + 6809, 6811, 6813, 6815, 6817, 6824, 6828, 6832, + 6834, 6837, 6840, 6844, 6848, 6854, 6856, 6858, + 6862, 6864, 6866, 6868, 6870, 6876, 6878, 6880, + 6881, 6888, 6896, 6902, 6911, 6916, 6927, 6935, + 6940, 6948, 6952, 6959, 6963, 6970, 6976, 6985, + 6990, 6999, 7008, 7012, 7014, 7019, 7021, 7027, + 7030, 7035, 7037, 7059, 7065, 7070, 7076, 7078, + 7081, 7084, 7088, 7093, 7095, 7098, 7106, 7110, + 7120, 7122, 7129, 7134, 7142, 7149, 7154, 7162, + 7165, 7171, 7174, 7176, 7178, 7180, 7182, 7185, + 7187, 7189, 7205, 7208, 7210, 7212, 7219, 7224, + 7226, 7229, 7237, 7240, 7246, 7251, 7256, 7263, + 7269, 7273, 7275, 7278, 7286, 7292, 7294, 7296, + 7298, 7304, 7306, 7330, 7334, 7336, 7342, 7346, + 7348, 7354, 7358, 7365, 7369, 7375, 7384, 7387, + 7391, 7399, 7402, 7409, 7412, 7418, 7420, 7426, + 7431, 7436, 7442, 7447, 7449, 7451, 7453, 7455, + 7457, 7464, 7470, 7474, 7476, 7479, 7482, 7486, + 7490, 7496, 7498, 7500, 7502, 7504, 7506, 7512, + 7514, 7516, 7517, 7520, 7524, 7526, 7544, 7548, + 7553, 7556, 7559, 7563, 7566, 7569, 7573, 7629, + 7685, 7718, 7722, 7727, 7729, 7730, 7732, 7736, + 7739, 7744, 7750, 7754, 7757, 7761, 7764, 7768, + 7771, 7775, 7788, 7791, 7793, 7795, 7797, 7800, + 7804, 7806, 7809, 7814, 7817, 7819, 7821, 7823, + 7852, 7857, 7859, 7862, 7865, 7869, 7873, 7879, + 7886, 7891, 7895, 7901, 7904, 7909, 7912, 7918, + 7924, 7927, 7933, 7935, 7941, 7944, 7946, 7950, + 7952, 7958, 7960, 7965, 7967, 7989, 7992, 7996, + 8001, 8003, 8006, 8009, 8011, 8014, 8016, 8019, + 8022, 8024, 8030, 8032, 8035, 8038, 8041, 8043, + 8045, 8051, 8054, 8060, 8063, 8070, 8072, 8074, + 8076, 8078, 8080, 8083, 8085, 8087, 8103, 8106, + 8108, 8110, 8115, 8118, 8121, 8123, 8125, 8128, + 8131, 8133, 8137, 8142, 8146, 8149, 8153, 8155, + 8158, 8166, 8172, 8174, 8176, 8178, 8184, 8186, + 8210, 8213, 8215, 8218, 8221, 8223, 8226, 8229, + 8232, 8234, 8238, 8246, 8248, 8250, 8252, 8254, + 8257, 8260, 8262, 8264, 8266, 8269, 8272, 8277, + 8279, 8281, 8283, 8285, 8287, 8289, 8296, 8300, + 8304, 8306, 8309, 8312, 8316, 8320, 8326, 8328, + 8330, 8334, 8336, 8338, 8340, 8342, 8345, 8349, + 8351, 8354, 8359, 8362, 8364, 8366, 8368, 8399, + 8404, 8406, 8409, 8415, 8423, 8429, 8438, 8443, + 8454, 8462, 8467, 8475, 8479, 8486, 8490, 8497, + 8503, 8512, 8517, 8526, 8535, 8539, 8541, 8546, + 8548, 8554, 8557, 8562, 8564, 8586, 8592, 8597, + 8603, 8605, 8608, 8611, 8615, 8620, 8622, 8625, + 8633, 8637, 8647, 8649, 8656, 8661, 8669, 8676, + 8681, 8689, 8692, 8698, 8701, 8703, 8705, 8707, + 8709, 8712, 8714, 8716, 8732, 8735, 8737, 8739, + 8746, 8751, 8753, 8756, 8764, 8767, 8773, 8778, + 8783, 8790, 8796, 8800, 8802, 8805, 8813, 8819, + 8821, 8823, 8825, 8831, 8833, 8857, 8861, 8863, + 8869, 8873, 8875, 8881, 8885, 8892, 8896, 8902, + 8911, 8914, 8918, 8926, 8929, 8936, 8939, 8945, + 8947, 8953, 8958, 8963, 8969, 8974, 8976, 8978, + 8980, 8982, 8984, 8991, 8997, 9001, 9003, 9006, + 9009, 9013, 9017, 9023, 9025, 9027, 9029, 9031, + 9033, 9039, 9041, 9043, 9044, 9046, 9048, 9050, + 9056, 9058, 9060, 9061, 9068, 9076, 9078, 9080, + 9083, 9089, 9101, 9107, 9111, 9116, 9122, 9127, + 9130, 9140, 9142, 9145, 9153, 9156, 9159, 9183, + 9204, 9225, 9246, 9265, 9286, 9307, 9328, 9352, + 9374, 9396, 9418, 9439, 9463, 9484, 9505, 9526, + 9548, 9570, 9592, 9613, 9634, 9655, 9676, 9697, + 9718, 9739, 9760, 9781, +} + +var _graphclust_indicies []int16 = []int16{ + 0, 1, 3, 2, 2, 3, 3, 2, + 3, 3, 2, 3, 3, 3, 2, 3, + 2, 3, 3, 2, 3, 3, 3, 3, + 2, 3, 3, 2, 2, 3, 3, 2, + 3, 2, 4, 5, 6, 7, 8, 10, + 11, 12, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 9, 13, 2, 3, + 3, 3, 3, 2, 3, 2, 3, 3, + 2, 2, 2, 3, 2, 2, 2, 3, + 3, 3, 3, 2, 2, 2, 2, 2, + 2, 3, 2, 2, 2, 2, 2, 2, + 3, 2, 2, 2, 2, 3, 3, 3, + 3, 2, 3, 3, 3, 3, 3, 2, + 3, 3, 2, 3, 3, 3, 3, 2, + 3, 3, 2, 2, 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 2, 3, + 3, 2, 2, 2, 2, 2, 2, 3, + 3, 2, 3, 3, 3, 3, 3, 2, + 3, 3, 2, 3, 2, 3, 3, 3, + 2, 3, 2, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 3, 3, 3, 2, + 3, 2, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 2, + 3, 3, 2, 3, 3, 3, 2, 3, + 3, 3, 3, 2, 3, 2, 3, 3, + 2, 3, 3, 2, 3, 2, 2, 2, + 3, 3, 2, 3, 3, 2, 3, 3, + 2, 3, 2, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 3, 2, 2, 2, + 3, 3, 3, 2, 3, 2, 3, 2, + 3, 3, 3, 3, 3, 2, 3, 3, + 2, 53, 54, 55, 56, 57, 2, 3, + 58, 2, 53, 54, 59, 55, 56, 57, + 2, 3, 2, 3, 2, 3, 2, 3, + 2, 3, 2, 60, 61, 2, 3, 2, + 3, 2, 62, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, + 76, 2, 3, 3, 2, 3, 2, 3, + 2, 3, 3, 3, 3, 2, 3, 3, + 2, 2, 2, 3, 3, 2, 3, 2, + 3, 3, 2, 2, 2, 3, 3, 2, + 3, 3, 3, 2, 3, 3, 3, 3, + 2, 3, 3, 3, 2, 3, 3, 2, + 77, 78, 63, 2, 3, 2, 3, 3, + 2, 79, 80, 81, 82, 83, 84, 85, + 2, 86, 87, 88, 89, 90, 2, 3, + 2, 3, 2, 3, 2, 3, 3, 3, + 3, 3, 2, 3, 2, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 106, 107, 104, 108, + 109, 110, 111, 112, 2, 3, 3, 2, + 2, 3, 2, 2, 3, 3, 3, 2, + 3, 2, 3, 3, 2, 2, 2, 3, + 3, 3, 2, 3, 2, 3, 3, 3, + 2, 3, 3, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 2, 3, 2, 2, + 3, 3, 3, 2, 2, 2, 3, 2, + 3, 3, 2, 3, 2, 3, 3, 2, + 3, 3, 2, 113, 114, 115, 116, 2, + 3, 2, 3, 2, 3, 2, 3, 2, + 117, 2, 3, 2, 118, 119, 120, 121, + 122, 123, 2, 3, 3, 3, 2, 2, + 2, 2, 3, 3, 2, 3, 3, 2, + 2, 2, 3, 3, 3, 3, 2, 124, + 125, 126, 2, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 2, 127, 128, 129, + 2, 130, 2, 2, 130, 2, 130, 130, + 2, 130, 130, 2, 130, 130, 130, 2, + 130, 2, 130, 130, 2, 130, 130, 130, + 130, 2, 130, 130, 2, 2, 130, 130, + 2, 130, 2, 131, 132, 133, 134, 135, + 136, 137, 139, 140, 141, 142, 143, 144, + 145, 146, 147, 148, 149, 150, 22, 151, + 152, 153, 154, 155, 156, 157, 158, 159, + 138, 2, 130, 130, 130, 130, 2, 130, + 2, 130, 130, 2, 3, 3, 2, 2, + 3, 130, 130, 2, 130, 130, 2, 130, + 2, 3, 130, 130, 130, 3, 3, 2, + 130, 130, 130, 2, 2, 2, 130, 2, + 3, 3, 130, 130, 3, 2, 130, 130, + 130, 2, 130, 2, 130, 2, 130, 2, + 3, 2, 2, 130, 130, 2, 130, 2, + 3, 130, 130, 3, 130, 2, 3, 130, + 130, 3, 3, 130, 130, 2, 130, 130, + 3, 2, 130, 130, 130, 3, 3, 3, + 2, 130, 3, 130, 2, 2, 2, 3, + 2, 2, 2, 130, 130, 130, 3, 130, + 3, 2, 130, 130, 3, 3, 3, 130, + 130, 130, 2, 130, 130, 3, 3, 2, + 2, 2, 130, 130, 130, 2, 130, 2, + 3, 130, 130, 130, 130, 3, 130, 3, + 3, 2, 130, 3, 130, 2, 130, 2, + 130, 3, 130, 130, 2, 130, 2, 130, + 130, 130, 130, 3, 2, 3, 130, 2, + 130, 130, 130, 130, 2, 130, 2, 160, + 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 2, 3, 130, 130, + 3, 130, 2, 3, 130, 130, 130, 2, + 130, 3, 130, 130, 130, 2, 130, 2, + 130, 130, 2, 130, 130, 2, 3, 130, + 3, 2, 130, 130, 130, 2, 3, 130, + 2, 130, 130, 2, 130, 130, 3, 130, + 3, 3, 130, 2, 130, 130, 3, 2, + 130, 130, 130, 130, 3, 130, 130, 3, + 130, 2, 130, 2, 3, 3, 3, 130, + 130, 3, 2, 130, 2, 130, 2, 3, + 3, 3, 3, 130, 130, 3, 130, 2, + 3, 130, 130, 3, 130, 3, 2, 3, + 130, 3, 130, 2, 3, 130, 130, 130, + 130, 3, 130, 2, 130, 130, 2, 181, + 182, 183, 184, 185, 2, 130, 58, 2, + 130, 2, 130, 2, 130, 2, 130, 2, + 186, 187, 2, 130, 2, 130, 2, 188, + 189, 190, 191, 66, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 2, 130, + 130, 2, 130, 2, 130, 2, 130, 130, + 130, 3, 3, 130, 2, 130, 2, 130, + 2, 3, 130, 2, 130, 3, 2, 3, + 130, 130, 130, 3, 130, 3, 2, 130, + 2, 3, 130, 3, 130, 3, 130, 2, + 130, 130, 3, 130, 2, 130, 130, 130, + 130, 2, 130, 3, 3, 130, 130, 3, + 2, 130, 130, 3, 130, 3, 2, 202, + 203, 189, 2, 130, 2, 130, 130, 2, + 204, 205, 206, 207, 208, 209, 210, 2, + 211, 212, 213, 214, 215, 2, 130, 2, + 130, 2, 130, 2, 130, 130, 130, 130, + 130, 2, 130, 2, 216, 217, 218, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 228, 229, 230, 231, 232, 233, 234, 235, + 236, 237, 238, 2, 130, 3, 130, 2, + 2, 130, 3, 2, 3, 3, 2, 130, + 3, 130, 130, 2, 130, 2, 3, 130, + 3, 130, 3, 2, 2, 130, 2, 3, + 130, 130, 3, 130, 3, 130, 2, 130, + 3, 130, 2, 130, 130, 3, 130, 3, + 2, 130, 130, 3, 3, 3, 3, 130, + 130, 2, 3, 130, 2, 3, 3, 130, + 2, 130, 3, 130, 3, 130, 3, 130, + 2, 3, 2, 130, 130, 3, 3, 130, + 3, 130, 2, 2, 2, 130, 130, 3, + 130, 3, 130, 2, 2, 130, 3, 3, + 130, 3, 130, 2, 3, 130, 3, 130, + 2, 3, 3, 130, 130, 2, 3, 3, + 3, 130, 130, 2, 239, 240, 115, 241, + 2, 130, 2, 130, 2, 130, 2, 242, + 2, 130, 2, 243, 244, 245, 246, 247, + 248, 2, 3, 3, 130, 130, 130, 2, + 2, 2, 2, 130, 130, 2, 130, 130, + 2, 2, 2, 130, 130, 130, 130, 2, + 249, 250, 251, 2, 130, 130, 130, 130, + 130, 2, 130, 2, 130, 2, 252, 2, + 3, 2, 253, 2, 254, 255, 256, 258, + 257, 2, 130, 2, 2, 130, 130, 3, + 2, 3, 2, 259, 2, 260, 261, 262, + 264, 263, 2, 3, 2, 2, 3, 3, + 79, 80, 81, 82, 83, 84, 2, 3, + 1, 265, 265, 3, 1, 265, 266, 3, + 1, 267, 268, 267, 268, 268, 267, 268, + 268, 267, 268, 268, 268, 267, 268, 267, + 268, 268, 267, 268, 268, 268, 268, 267, + 268, 268, 267, 267, 268, 268, 267, 268, + 267, 269, 270, 271, 272, 273, 275, 276, + 277, 279, 280, 281, 282, 283, 284, 285, + 286, 287, 288, 289, 290, 291, 292, 293, + 294, 295, 296, 274, 278, 267, 268, 268, + 268, 268, 267, 268, 267, 268, 268, 267, + 267, 267, 268, 267, 267, 267, 268, 268, + 268, 268, 267, 267, 267, 267, 267, 267, + 268, 267, 267, 267, 267, 267, 267, 268, + 267, 267, 267, 267, 268, 268, 268, 268, + 267, 268, 268, 268, 268, 268, 267, 268, + 268, 267, 268, 268, 268, 268, 267, 268, + 268, 267, 267, 267, 267, 267, 267, 268, + 268, 268, 268, 268, 268, 267, 268, 268, + 267, 267, 267, 267, 267, 267, 268, 268, + 267, 268, 268, 268, 268, 268, 267, 268, + 268, 267, 268, 267, 268, 268, 268, 267, + 268, 267, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 268, 268, 268, 267, 268, + 267, 297, 298, 299, 300, 301, 302, 303, + 304, 305, 306, 307, 308, 309, 310, 311, + 312, 313, 314, 315, 316, 317, 267, 268, + 268, 267, 268, 268, 268, 267, 268, 268, + 268, 268, 267, 268, 267, 268, 268, 267, + 268, 268, 267, 268, 267, 267, 267, 268, + 268, 267, 268, 268, 267, 268, 268, 267, + 268, 267, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 268, 267, 267, 267, 268, + 268, 268, 267, 268, 267, 268, 267, 268, + 268, 268, 268, 268, 267, 268, 268, 267, + 318, 319, 320, 321, 322, 267, 268, 323, + 267, 318, 319, 324, 320, 321, 322, 267, + 268, 267, 268, 267, 268, 267, 268, 267, + 268, 267, 325, 326, 267, 268, 267, 268, + 267, 327, 328, 329, 330, 331, 332, 333, + 334, 335, 336, 337, 338, 339, 340, 341, + 267, 268, 268, 267, 268, 267, 268, 267, + 268, 268, 268, 268, 267, 268, 268, 267, + 267, 267, 268, 268, 267, 268, 267, 268, + 268, 267, 267, 267, 268, 268, 267, 268, + 268, 268, 267, 268, 268, 268, 268, 267, + 268, 268, 268, 267, 268, 268, 267, 342, + 343, 328, 267, 268, 267, 268, 268, 267, + 344, 345, 346, 347, 348, 349, 350, 267, + 351, 352, 353, 354, 355, 267, 268, 267, + 268, 267, 268, 267, 268, 268, 268, 268, + 268, 267, 268, 267, 356, 357, 358, 359, + 360, 361, 362, 363, 364, 365, 366, 367, + 368, 369, 370, 371, 372, 369, 373, 374, + 375, 376, 377, 267, 268, 268, 267, 267, + 268, 267, 267, 268, 268, 268, 267, 268, + 267, 268, 268, 267, 267, 267, 268, 268, + 268, 267, 268, 267, 268, 268, 268, 267, + 268, 268, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 267, 268, 267, 267, 268, + 268, 268, 267, 267, 267, 268, 267, 268, + 268, 267, 268, 267, 268, 268, 267, 268, + 268, 267, 378, 379, 380, 381, 267, 268, + 267, 268, 267, 268, 267, 268, 267, 382, + 267, 268, 267, 383, 384, 385, 386, 387, + 388, 267, 268, 268, 268, 267, 267, 267, + 267, 268, 268, 267, 268, 268, 267, 267, + 267, 268, 268, 268, 268, 267, 389, 390, + 391, 267, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 267, 392, 393, 394, 267, + 395, 267, 395, 267, 267, 395, 395, 267, + 395, 395, 267, 395, 395, 395, 267, 395, + 267, 395, 395, 267, 395, 395, 395, 395, + 267, 395, 395, 267, 267, 395, 395, 267, + 395, 267, 396, 397, 398, 399, 400, 401, + 402, 404, 405, 406, 407, 408, 409, 410, + 411, 412, 413, 414, 415, 287, 416, 417, + 418, 419, 420, 421, 422, 423, 424, 403, + 267, 395, 395, 395, 395, 267, 395, 267, + 395, 395, 267, 268, 268, 267, 267, 268, + 395, 395, 267, 395, 395, 267, 395, 267, + 268, 395, 395, 395, 268, 268, 267, 395, + 395, 395, 267, 267, 267, 395, 267, 268, + 268, 395, 395, 268, 267, 395, 395, 395, + 267, 395, 267, 395, 267, 395, 267, 268, + 267, 267, 395, 395, 267, 395, 267, 268, + 395, 395, 268, 395, 267, 268, 395, 395, + 268, 268, 395, 395, 267, 395, 395, 268, + 267, 395, 395, 395, 268, 268, 268, 267, + 395, 268, 395, 267, 267, 267, 268, 267, + 267, 267, 395, 395, 395, 268, 395, 268, + 267, 395, 395, 268, 268, 268, 395, 395, + 395, 267, 395, 395, 268, 268, 267, 267, + 267, 395, 395, 395, 267, 395, 267, 268, + 395, 395, 395, 395, 268, 395, 268, 268, + 267, 395, 268, 395, 267, 395, 267, 395, + 268, 395, 395, 267, 395, 267, 395, 395, + 395, 395, 268, 267, 268, 395, 267, 395, + 395, 395, 395, 267, 395, 267, 425, 426, + 427, 428, 429, 430, 431, 432, 433, 434, + 435, 436, 437, 438, 439, 440, 441, 442, + 443, 444, 445, 267, 268, 395, 395, 268, + 395, 267, 268, 395, 395, 395, 267, 395, + 268, 395, 395, 395, 267, 395, 267, 395, + 395, 267, 395, 395, 267, 268, 395, 268, + 267, 395, 395, 395, 267, 268, 395, 267, + 395, 395, 267, 395, 395, 268, 395, 268, + 268, 395, 267, 395, 395, 268, 267, 395, + 395, 395, 395, 268, 395, 395, 268, 395, + 267, 395, 267, 268, 268, 268, 395, 395, + 268, 267, 395, 267, 395, 267, 268, 268, + 268, 268, 395, 395, 268, 395, 267, 268, + 395, 395, 268, 395, 268, 267, 268, 395, + 268, 395, 267, 268, 395, 395, 395, 395, + 268, 395, 267, 395, 395, 267, 446, 447, + 448, 449, 450, 267, 395, 323, 267, 395, + 267, 395, 267, 395, 267, 395, 267, 451, + 452, 267, 395, 267, 395, 267, 453, 454, + 455, 456, 331, 457, 458, 459, 460, 461, + 462, 463, 464, 465, 466, 267, 395, 395, + 267, 395, 267, 395, 267, 395, 395, 395, + 268, 268, 395, 267, 395, 267, 395, 267, + 268, 395, 267, 395, 268, 267, 268, 395, + 395, 395, 268, 395, 268, 267, 395, 267, + 268, 395, 268, 395, 268, 395, 267, 395, + 395, 268, 395, 267, 395, 395, 395, 395, + 267, 395, 268, 268, 395, 395, 268, 267, + 395, 395, 268, 395, 268, 267, 467, 468, + 454, 267, 395, 267, 395, 395, 267, 469, + 470, 471, 472, 473, 474, 475, 267, 476, + 477, 478, 479, 480, 267, 395, 267, 395, + 267, 395, 267, 395, 395, 395, 395, 395, + 267, 395, 267, 481, 482, 483, 484, 485, + 486, 487, 488, 489, 490, 491, 492, 493, + 494, 495, 496, 497, 498, 499, 500, 501, + 502, 503, 267, 395, 268, 395, 267, 267, + 395, 268, 267, 268, 268, 267, 395, 268, + 395, 395, 267, 395, 267, 268, 395, 268, + 395, 268, 267, 267, 395, 267, 268, 395, + 395, 268, 395, 268, 395, 267, 395, 268, + 395, 267, 395, 395, 268, 395, 268, 267, + 395, 395, 268, 268, 268, 268, 395, 395, + 267, 268, 395, 267, 268, 268, 395, 267, + 395, 268, 395, 268, 395, 268, 395, 267, + 268, 267, 395, 395, 268, 268, 395, 268, + 395, 267, 267, 267, 395, 395, 268, 395, + 268, 395, 267, 267, 395, 268, 268, 395, + 268, 395, 267, 268, 395, 268, 395, 267, + 268, 268, 395, 395, 267, 268, 268, 268, + 395, 395, 267, 504, 505, 380, 506, 267, + 395, 267, 395, 267, 395, 267, 507, 267, + 395, 267, 508, 509, 510, 511, 512, 513, + 267, 268, 268, 395, 395, 395, 267, 267, + 267, 267, 395, 395, 267, 395, 395, 267, + 267, 267, 395, 395, 395, 395, 267, 514, + 515, 516, 267, 395, 395, 395, 395, 395, + 267, 395, 267, 395, 267, 517, 267, 268, + 267, 518, 267, 519, 520, 521, 523, 522, + 267, 395, 267, 267, 395, 395, 268, 267, + 268, 267, 524, 267, 525, 526, 527, 529, + 528, 267, 268, 267, 267, 268, 268, 344, + 345, 346, 347, 348, 349, 267, 268, 267, + 268, 268, 267, 266, 268, 268, 267, 266, + 268, 267, 266, 268, 267, 531, 532, 530, + 267, 266, 268, 267, 266, 268, 267, 533, + 534, 535, 536, 537, 530, 267, 538, 267, + 297, 298, 299, 533, 534, 539, 300, 301, + 302, 303, 304, 305, 306, 307, 308, 309, + 310, 311, 312, 313, 314, 315, 316, 317, + 267, 540, 538, 297, 298, 299, 541, 535, + 536, 300, 301, 302, 303, 304, 305, 306, + 307, 308, 309, 310, 311, 312, 313, 314, + 315, 316, 317, 267, 540, 267, 542, 540, + 297, 298, 299, 543, 536, 300, 301, 302, + 303, 304, 305, 306, 307, 308, 309, 310, + 311, 312, 313, 314, 315, 316, 317, 267, + 542, 267, 267, 542, 544, 267, 542, 267, + 545, 546, 267, 540, 267, 267, 542, 267, + 540, 267, 540, 327, 328, 329, 330, 331, + 332, 333, 547, 335, 336, 337, 338, 339, + 340, 341, 549, 550, 551, 552, 553, 554, + 549, 550, 551, 552, 553, 554, 549, 548, + 555, 267, 268, 538, 267, 556, 556, 556, + 542, 267, 297, 298, 299, 541, 539, 300, + 301, 302, 303, 304, 305, 306, 307, 308, + 309, 310, 311, 312, 313, 314, 315, 316, + 317, 267, 545, 557, 267, 267, 540, 556, + 556, 542, 556, 556, 542, 556, 556, 556, + 542, 556, 556, 542, 556, 556, 542, 556, + 556, 267, 542, 542, 551, 552, 553, 554, + 548, 549, 551, 552, 553, 554, 548, 549, + 551, 552, 553, 554, 548, 549, 551, 552, + 553, 554, 548, 549, 551, 552, 553, 554, + 548, 549, 551, 552, 553, 554, 548, 549, + 551, 552, 553, 554, 548, 549, 551, 552, + 553, 554, 548, 549, 551, 552, 553, 554, + 548, 549, 550, 555, 552, 553, 554, 548, + 549, 550, 552, 553, 554, 548, 549, 550, + 552, 553, 554, 548, 549, 550, 552, 553, + 554, 548, 549, 550, 552, 553, 554, 548, + 549, 550, 552, 553, 554, 548, 549, 550, + 552, 553, 554, 548, 549, 550, 552, 553, + 554, 548, 549, 550, 552, 553, 554, 548, + 549, 550, 551, 555, 553, 554, 548, 549, + 550, 551, 553, 554, 548, 549, 550, 551, + 553, 554, 548, 549, 550, 551, 553, 554, + 548, 549, 550, 551, 553, 558, 557, 552, + 267, 555, 556, 267, 540, 542, 268, 268, + 267, 559, 560, 561, 562, 563, 530, 267, + 268, 323, 268, 268, 268, 267, 268, 268, + 267, 395, 268, 267, 395, 268, 267, 268, + 395, 268, 267, 530, 267, 564, 566, 567, + 568, 569, 570, 571, 566, 567, 568, 569, + 570, 571, 566, 530, 565, 555, 267, 268, + 538, 268, 267, 540, 540, 540, 542, 267, + 540, 540, 542, 540, 540, 542, 540, 540, + 540, 542, 540, 540, 542, 540, 540, 542, + 540, 540, 267, 542, 568, 569, 570, 571, + 565, 566, 568, 569, 570, 571, 565, 566, + 568, 569, 570, 571, 565, 566, 568, 569, + 570, 571, 565, 566, 568, 569, 570, 571, + 565, 566, 568, 569, 570, 571, 565, 566, + 568, 569, 570, 571, 565, 566, 568, 569, + 570, 571, 565, 566, 568, 569, 570, 571, + 565, 566, 567, 555, 569, 570, 571, 565, + 566, 567, 569, 570, 571, 565, 566, 567, + 569, 570, 571, 565, 566, 567, 569, 570, + 571, 565, 566, 567, 569, 570, 571, 565, + 566, 567, 569, 570, 571, 565, 566, 567, + 569, 570, 571, 565, 566, 567, 569, 570, + 571, 565, 566, 567, 569, 570, 571, 565, + 566, 567, 568, 555, 570, 571, 565, 566, + 567, 568, 570, 571, 565, 566, 567, 568, + 570, 571, 565, 566, 567, 568, 570, 571, + 565, 566, 567, 568, 570, 572, 573, 569, + 267, 555, 540, 268, 540, 542, 268, 542, + 268, 267, 540, 574, 575, 530, 267, 268, + 267, 268, 268, 268, 267, 577, 578, 579, + 580, 576, 267, 581, 582, 530, 267, 266, + 268, 267, 268, 266, 268, 267, 583, 530, + 267, 268, 268, 267, 584, 530, 267, 268, + 268, 267, 585, 586, 587, 588, 589, 590, + 591, 592, 593, 594, 595, 530, 267, 268, + 596, 267, 344, 345, 346, 347, 348, 349, + 597, 267, 598, 267, 268, 267, 395, 268, + 267, 268, 395, 268, 395, 268, 267, 395, + 395, 268, 395, 268, 395, 268, 395, 268, + 395, 268, 267, 268, 268, 395, 395, 268, + 267, 395, 395, 268, 267, 395, 268, 395, + 268, 267, 268, 395, 268, 395, 268, 267, + 395, 268, 395, 268, 267, 395, 268, 267, + 395, 395, 268, 268, 395, 268, 395, 268, + 395, 267, 576, 267, 599, 576, 267, 322, + 530, 600, 530, 267, 268, 267, 266, 3, + 1, 266, 3, 1, 602, 603, 601, 1, + 266, 3, 1, 266, 3, 1, 604, 605, + 606, 607, 608, 601, 1, 609, 610, 612, + 611, 611, 612, 612, 611, 612, 612, 611, + 612, 612, 612, 611, 612, 611, 612, 612, + 611, 612, 612, 612, 612, 611, 612, 612, + 611, 611, 612, 612, 611, 612, 611, 613, + 614, 615, 616, 617, 619, 620, 621, 623, + 624, 625, 626, 627, 628, 629, 630, 631, + 632, 633, 634, 635, 636, 637, 638, 639, + 640, 618, 622, 611, 612, 612, 612, 612, + 611, 612, 611, 612, 612, 611, 611, 611, + 612, 611, 611, 611, 612, 612, 612, 612, + 611, 611, 611, 611, 611, 611, 612, 611, + 611, 611, 611, 611, 611, 612, 611, 611, + 611, 611, 612, 612, 612, 612, 611, 612, + 612, 612, 612, 612, 611, 612, 612, 611, + 612, 612, 612, 612, 611, 612, 612, 611, + 611, 611, 611, 611, 611, 612, 612, 612, + 612, 612, 612, 611, 612, 612, 611, 611, + 611, 611, 611, 611, 612, 612, 611, 612, + 612, 612, 612, 612, 611, 612, 612, 611, + 612, 611, 612, 612, 612, 611, 612, 611, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 612, 612, 612, 611, 612, 611, 641, + 642, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, + 658, 659, 660, 661, 611, 612, 612, 611, + 612, 612, 612, 611, 612, 612, 612, 612, + 611, 612, 611, 612, 612, 611, 612, 612, + 611, 612, 611, 611, 611, 612, 612, 611, + 612, 612, 611, 612, 612, 611, 612, 611, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 612, 611, 611, 611, 612, 612, 612, + 611, 612, 611, 612, 611, 612, 612, 612, + 612, 612, 611, 612, 612, 611, 662, 663, + 664, 665, 666, 611, 612, 667, 611, 662, + 663, 668, 664, 665, 666, 611, 612, 611, + 612, 611, 612, 611, 612, 611, 612, 611, + 669, 670, 611, 612, 611, 612, 611, 671, + 672, 673, 674, 675, 676, 677, 678, 679, + 680, 681, 682, 683, 684, 685, 611, 612, + 612, 611, 612, 611, 612, 611, 612, 612, + 612, 612, 611, 612, 612, 611, 611, 611, + 612, 612, 611, 612, 611, 612, 612, 611, + 611, 611, 612, 612, 611, 612, 612, 612, + 611, 612, 612, 612, 612, 611, 612, 612, + 612, 611, 612, 612, 611, 686, 687, 672, + 611, 612, 611, 612, 612, 611, 688, 689, + 690, 691, 692, 693, 694, 611, 695, 696, + 697, 698, 699, 611, 612, 611, 612, 611, + 612, 611, 612, 612, 612, 612, 612, 611, + 612, 611, 700, 701, 702, 703, 704, 705, + 706, 707, 708, 709, 710, 711, 712, 713, + 714, 715, 716, 713, 717, 718, 719, 720, + 721, 611, 612, 612, 611, 611, 612, 611, + 611, 612, 612, 612, 611, 612, 611, 612, + 612, 611, 611, 611, 612, 612, 612, 611, + 612, 611, 612, 612, 612, 611, 612, 612, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 611, 612, 611, 611, 612, 612, 612, + 611, 611, 611, 612, 611, 612, 612, 611, + 612, 611, 612, 612, 611, 612, 612, 611, + 722, 723, 724, 725, 611, 612, 611, 612, + 611, 612, 611, 612, 611, 726, 611, 612, + 611, 727, 728, 729, 730, 731, 732, 611, + 612, 612, 612, 611, 611, 611, 611, 612, + 612, 611, 612, 612, 611, 611, 611, 612, + 612, 612, 612, 611, 733, 734, 735, 611, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 611, 736, 737, 738, 611, 739, 611, + 739, 611, 611, 739, 739, 611, 739, 739, + 611, 739, 739, 739, 611, 739, 611, 739, + 739, 611, 739, 739, 739, 739, 611, 739, + 739, 611, 611, 739, 739, 611, 739, 611, + 740, 741, 742, 743, 744, 745, 746, 748, + 749, 750, 751, 752, 753, 754, 755, 756, + 757, 758, 759, 631, 760, 761, 762, 763, + 764, 765, 766, 767, 768, 747, 611, 739, + 739, 739, 739, 611, 739, 611, 739, 739, + 611, 612, 612, 611, 611, 612, 739, 739, + 611, 739, 739, 611, 739, 611, 612, 739, + 739, 739, 612, 612, 611, 739, 739, 739, + 611, 611, 611, 739, 611, 612, 612, 739, + 739, 612, 611, 739, 739, 739, 611, 739, + 611, 739, 611, 739, 611, 612, 611, 611, + 739, 739, 611, 739, 611, 612, 739, 739, + 612, 739, 611, 612, 739, 739, 612, 612, + 739, 739, 611, 739, 739, 612, 611, 739, + 739, 739, 612, 612, 612, 611, 739, 612, + 739, 611, 611, 611, 612, 611, 611, 611, + 739, 739, 739, 612, 739, 612, 611, 739, + 739, 612, 612, 612, 739, 739, 739, 611, + 739, 739, 612, 612, 611, 611, 611, 739, + 739, 739, 611, 739, 611, 612, 739, 739, + 739, 739, 612, 739, 612, 612, 611, 739, + 612, 739, 611, 739, 611, 739, 612, 739, + 739, 611, 739, 611, 739, 739, 739, 739, + 612, 611, 612, 739, 611, 739, 739, 739, + 739, 611, 739, 611, 769, 770, 771, 772, + 773, 774, 775, 776, 777, 778, 779, 780, + 781, 782, 783, 784, 785, 786, 787, 788, + 789, 611, 612, 739, 739, 612, 739, 611, + 612, 739, 739, 739, 611, 739, 612, 739, + 739, 739, 611, 739, 611, 739, 739, 611, + 739, 739, 611, 612, 739, 612, 611, 739, + 739, 739, 611, 612, 739, 611, 739, 739, + 611, 739, 739, 612, 739, 612, 612, 739, + 611, 739, 739, 612, 611, 739, 739, 739, + 739, 612, 739, 739, 612, 739, 611, 739, + 611, 612, 612, 612, 739, 739, 612, 611, + 739, 611, 739, 611, 612, 612, 612, 612, + 739, 739, 612, 739, 611, 612, 739, 739, + 612, 739, 612, 611, 612, 739, 612, 739, + 611, 612, 739, 739, 739, 739, 612, 739, + 611, 739, 739, 611, 790, 791, 792, 793, + 794, 611, 739, 667, 611, 739, 611, 739, + 611, 739, 611, 739, 611, 795, 796, 611, + 739, 611, 739, 611, 797, 798, 799, 800, + 675, 801, 802, 803, 804, 805, 806, 807, + 808, 809, 810, 611, 739, 739, 611, 739, + 611, 739, 611, 739, 739, 739, 612, 612, + 739, 611, 739, 611, 739, 611, 612, 739, + 611, 739, 612, 611, 612, 739, 739, 739, + 612, 739, 612, 611, 739, 611, 612, 739, + 612, 739, 612, 739, 611, 739, 739, 612, + 739, 611, 739, 739, 739, 739, 611, 739, + 612, 612, 739, 739, 612, 611, 739, 739, + 612, 739, 612, 611, 811, 812, 798, 611, + 739, 611, 739, 739, 611, 813, 814, 815, + 816, 817, 818, 819, 611, 820, 821, 822, + 823, 824, 611, 739, 611, 739, 611, 739, + 611, 739, 739, 739, 739, 739, 611, 739, + 611, 825, 826, 827, 828, 829, 830, 831, + 832, 833, 834, 835, 836, 837, 838, 839, + 840, 841, 842, 843, 844, 845, 846, 847, + 611, 739, 612, 739, 611, 611, 739, 612, + 611, 612, 612, 611, 739, 612, 739, 739, + 611, 739, 611, 612, 739, 612, 739, 612, + 611, 611, 739, 611, 612, 739, 739, 612, + 739, 612, 739, 611, 739, 612, 739, 611, + 739, 739, 612, 739, 612, 611, 739, 739, + 612, 612, 612, 612, 739, 739, 611, 612, + 739, 611, 612, 612, 739, 611, 739, 612, + 739, 612, 739, 612, 739, 611, 612, 611, + 739, 739, 612, 612, 739, 612, 739, 611, + 611, 611, 739, 739, 612, 739, 612, 739, + 611, 611, 739, 612, 612, 739, 612, 739, + 611, 612, 739, 612, 739, 611, 612, 612, + 739, 739, 611, 612, 612, 612, 739, 739, + 611, 848, 849, 724, 850, 611, 739, 611, + 739, 611, 739, 611, 851, 611, 739, 611, + 852, 853, 854, 855, 856, 857, 611, 612, + 612, 739, 739, 739, 611, 611, 611, 611, + 739, 739, 611, 739, 739, 611, 611, 611, + 739, 739, 739, 739, 611, 858, 859, 860, + 611, 739, 739, 739, 739, 739, 611, 739, + 611, 739, 611, 861, 611, 612, 611, 862, + 611, 863, 864, 865, 867, 866, 611, 739, + 611, 611, 739, 739, 612, 611, 612, 611, + 868, 611, 869, 870, 871, 873, 872, 611, + 612, 611, 611, 612, 612, 688, 689, 690, + 691, 692, 693, 611, 641, 642, 643, 604, + 605, 874, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, + 658, 659, 660, 661, 611, 875, 610, 641, + 642, 643, 876, 606, 607, 644, 645, 646, + 647, 648, 649, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 659, 660, 661, 611, + 875, 611, 877, 875, 641, 642, 643, 878, + 607, 644, 645, 646, 647, 648, 649, 650, + 651, 652, 653, 654, 655, 656, 657, 658, + 659, 660, 661, 611, 877, 611, 609, 877, + 879, 611, 877, 611, 880, 881, 611, 875, + 611, 611, 877, 611, 875, 611, 875, 671, + 672, 673, 674, 675, 676, 677, 882, 679, + 680, 681, 682, 683, 684, 685, 884, 885, + 886, 887, 888, 889, 884, 885, 886, 887, + 888, 889, 884, 883, 890, 611, 612, 610, + 611, 891, 891, 891, 877, 611, 641, 642, + 643, 876, 874, 644, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 659, 660, 661, 611, 880, 892, + 611, 611, 875, 891, 891, 877, 891, 891, + 877, 891, 891, 891, 877, 891, 891, 877, + 891, 891, 877, 891, 891, 611, 877, 877, + 886, 887, 888, 889, 883, 884, 886, 887, + 888, 889, 883, 884, 886, 887, 888, 889, + 883, 884, 886, 887, 888, 889, 883, 884, + 886, 887, 888, 889, 883, 884, 886, 887, + 888, 889, 883, 884, 886, 887, 888, 889, + 883, 884, 886, 887, 888, 889, 883, 884, + 886, 887, 888, 889, 883, 884, 885, 890, + 887, 888, 889, 883, 884, 885, 887, 888, + 889, 883, 884, 885, 887, 888, 889, 883, + 884, 885, 887, 888, 889, 883, 884, 885, + 887, 888, 889, 883, 884, 885, 887, 888, + 889, 883, 884, 885, 887, 888, 889, 883, + 884, 885, 887, 888, 889, 883, 884, 885, + 887, 888, 889, 883, 884, 885, 886, 890, + 888, 889, 883, 884, 885, 886, 888, 889, + 883, 884, 885, 886, 888, 889, 883, 884, + 885, 886, 888, 889, 883, 884, 885, 886, + 888, 893, 892, 887, 611, 890, 891, 611, + 875, 877, 265, 3, 1, 894, 895, 896, + 897, 898, 601, 1, 265, 899, 3, 265, + 3, 265, 3, 1, 901, 900, 900, 901, + 901, 900, 901, 901, 900, 901, 901, 901, + 900, 901, 900, 901, 901, 900, 901, 901, + 901, 901, 900, 901, 901, 900, 900, 901, + 901, 900, 901, 900, 902, 903, 904, 905, + 906, 908, 909, 910, 912, 913, 914, 915, + 916, 917, 918, 919, 920, 921, 922, 923, + 924, 925, 926, 927, 928, 929, 907, 911, + 900, 901, 901, 901, 901, 900, 901, 900, + 901, 901, 900, 900, 900, 901, 900, 900, + 900, 901, 901, 901, 901, 900, 900, 900, + 900, 900, 900, 901, 900, 900, 900, 900, + 900, 900, 901, 900, 900, 900, 900, 901, + 901, 901, 901, 900, 901, 901, 901, 901, + 901, 900, 901, 901, 900, 901, 901, 901, + 901, 900, 901, 901, 900, 900, 900, 900, + 900, 900, 901, 901, 901, 901, 901, 901, + 900, 901, 901, 900, 900, 900, 900, 900, + 900, 901, 901, 900, 901, 901, 901, 901, + 901, 900, 901, 901, 900, 901, 900, 901, + 901, 901, 900, 901, 900, 901, 901, 901, + 901, 901, 900, 901, 900, 901, 901, 901, + 901, 900, 901, 900, 930, 931, 932, 933, + 934, 935, 936, 937, 938, 939, 940, 941, + 942, 943, 944, 945, 946, 947, 948, 949, + 950, 900, 901, 901, 900, 901, 901, 901, + 900, 901, 901, 901, 901, 900, 901, 900, + 901, 901, 900, 901, 901, 900, 901, 900, + 900, 900, 901, 901, 900, 901, 901, 900, + 901, 901, 900, 901, 900, 901, 901, 901, + 901, 901, 900, 901, 900, 901, 901, 900, + 900, 900, 901, 901, 901, 900, 901, 900, + 901, 900, 901, 901, 901, 901, 901, 900, + 901, 901, 900, 951, 952, 953, 954, 955, + 900, 901, 899, 900, 901, 900, 901, 900, + 901, 900, 901, 900, 956, 957, 900, 901, + 900, 901, 900, 958, 959, 960, 961, 962, + 963, 964, 965, 966, 967, 968, 969, 970, + 971, 972, 900, 901, 901, 900, 901, 900, + 901, 900, 901, 901, 901, 901, 900, 901, + 901, 900, 900, 900, 901, 901, 900, 901, + 900, 901, 901, 900, 900, 900, 901, 901, + 900, 901, 901, 901, 900, 901, 901, 901, + 901, 900, 901, 901, 901, 900, 901, 901, + 900, 973, 974, 959, 900, 901, 900, 901, + 901, 900, 975, 976, 977, 978, 979, 980, + 900, 981, 982, 983, 984, 985, 900, 901, + 900, 901, 900, 901, 900, 901, 901, 901, + 901, 901, 900, 901, 900, 986, 987, 988, + 989, 990, 991, 992, 993, 994, 995, 996, + 997, 998, 999, 1000, 1001, 1002, 999, 1003, + 1004, 1005, 1006, 1007, 900, 901, 901, 900, + 900, 901, 900, 900, 901, 901, 901, 900, + 901, 900, 901, 901, 900, 900, 900, 901, + 901, 901, 900, 901, 900, 901, 901, 901, + 900, 901, 901, 901, 901, 901, 901, 901, + 900, 901, 900, 901, 900, 901, 900, 900, + 901, 901, 901, 900, 900, 900, 901, 900, + 901, 901, 900, 901, 900, 901, 901, 900, + 901, 901, 900, 1008, 1009, 1010, 1011, 900, + 901, 900, 901, 900, 901, 900, 901, 900, + 1012, 900, 901, 900, 1013, 1014, 1015, 1016, + 1017, 1018, 900, 901, 901, 901, 900, 900, + 900, 900, 901, 901, 900, 901, 901, 900, + 900, 900, 901, 901, 901, 901, 900, 1019, + 1020, 1021, 900, 901, 901, 901, 901, 901, + 900, 901, 900, 901, 900, 1022, 900, 1023, + 1024, 1025, 1027, 1026, 900, 901, 900, 900, + 901, 901, 951, 952, 1028, 953, 954, 955, + 900, 901, 900, 975, 976, 977, 978, 979, + 980, 1029, 900, 1030, 1031, 1032, 900, 1033, + 900, 1033, 900, 900, 1033, 1033, 900, 1033, + 1033, 900, 1033, 1033, 1033, 900, 1033, 900, + 1033, 1033, 900, 1033, 1033, 1033, 1033, 900, + 1033, 1033, 900, 900, 1033, 1033, 900, 1033, + 900, 1034, 1035, 1036, 1037, 1038, 1039, 1040, + 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, + 1050, 1051, 1052, 1053, 920, 1054, 1055, 1056, + 1057, 1058, 1059, 1060, 1061, 1062, 1041, 900, + 1033, 1033, 1033, 1033, 900, 1033, 900, 1033, + 1033, 900, 901, 901, 900, 900, 901, 1033, + 1033, 900, 1033, 1033, 900, 1033, 900, 901, + 1033, 1033, 1033, 901, 901, 900, 1033, 1033, + 1033, 900, 900, 900, 1033, 900, 901, 901, + 1033, 1033, 901, 900, 1033, 1033, 1033, 900, + 1033, 900, 1033, 900, 1033, 900, 901, 900, + 900, 1033, 1033, 900, 1033, 900, 901, 1033, + 1033, 901, 1033, 900, 901, 1033, 1033, 901, + 901, 1033, 1033, 900, 1033, 1033, 901, 900, + 1033, 1033, 1033, 901, 901, 901, 900, 1033, + 901, 1033, 900, 900, 900, 901, 900, 900, + 900, 1033, 1033, 1033, 901, 1033, 901, 900, + 1033, 1033, 901, 901, 901, 1033, 1033, 1033, + 900, 1033, 1033, 901, 901, 900, 900, 900, + 1033, 1033, 1033, 900, 1033, 900, 901, 1033, + 1033, 1033, 1033, 901, 1033, 901, 901, 900, + 1033, 901, 1033, 900, 1033, 900, 1033, 901, + 1033, 1033, 900, 1033, 900, 1033, 1033, 1033, + 1033, 901, 900, 901, 1033, 900, 1033, 1033, + 1033, 1033, 900, 1033, 900, 1063, 1064, 1065, + 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, + 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, + 1082, 1083, 900, 901, 1033, 1033, 901, 1033, + 900, 901, 1033, 1033, 1033, 900, 1033, 901, + 1033, 1033, 1033, 900, 1033, 900, 1033, 1033, + 900, 1033, 1033, 900, 901, 1033, 901, 900, + 1033, 1033, 1033, 900, 901, 1033, 900, 1033, + 1033, 900, 1033, 1033, 901, 1033, 901, 901, + 1033, 900, 1033, 1033, 901, 900, 1033, 1033, + 1033, 1033, 901, 1033, 1033, 901, 1033, 900, + 1033, 900, 901, 901, 901, 1033, 1033, 901, + 900, 1033, 900, 1033, 900, 901, 901, 901, + 901, 1033, 1033, 901, 1033, 900, 901, 1033, + 1033, 901, 1033, 901, 900, 901, 1033, 901, + 1033, 900, 901, 1033, 1033, 1033, 1033, 901, + 1033, 900, 1033, 1033, 900, 1084, 1085, 1086, + 1087, 1088, 900, 1033, 899, 900, 1033, 900, + 1033, 900, 1033, 900, 1033, 900, 1089, 1090, + 900, 1033, 900, 1033, 900, 1091, 1092, 1093, + 1094, 962, 1095, 1096, 1097, 1098, 1099, 1100, + 1101, 1102, 1103, 1104, 900, 1033, 1033, 900, + 1033, 900, 1033, 900, 1033, 1033, 1033, 901, + 901, 1033, 900, 1033, 900, 1033, 900, 901, + 1033, 900, 1033, 901, 900, 901, 1033, 1033, + 1033, 901, 1033, 901, 900, 1033, 900, 901, + 1033, 901, 1033, 901, 1033, 900, 1033, 1033, + 901, 1033, 900, 1033, 1033, 1033, 1033, 900, + 1033, 901, 901, 1033, 1033, 901, 900, 1033, + 1033, 901, 1033, 901, 900, 1105, 1106, 1092, + 900, 1033, 900, 1033, 1033, 900, 1107, 1108, + 1109, 1110, 1111, 1112, 1113, 900, 1114, 1115, + 1116, 1117, 1118, 900, 1033, 900, 1033, 900, + 1033, 900, 1033, 1033, 1033, 1033, 1033, 900, + 1033, 900, 1119, 1120, 1121, 1122, 1123, 1124, + 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, + 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, + 1141, 900, 1033, 901, 1033, 900, 900, 1033, + 901, 900, 901, 901, 900, 1033, 901, 1033, + 1033, 900, 1033, 900, 901, 1033, 901, 1033, + 901, 900, 900, 1033, 900, 901, 1033, 1033, + 901, 1033, 901, 1033, 900, 1033, 901, 1033, + 900, 1033, 1033, 901, 1033, 901, 900, 1033, + 1033, 901, 901, 901, 901, 1033, 1033, 900, + 901, 1033, 900, 901, 901, 1033, 900, 1033, + 901, 1033, 901, 1033, 901, 1033, 900, 901, + 900, 1033, 1033, 901, 901, 1033, 901, 1033, + 900, 900, 900, 1033, 1033, 901, 1033, 901, + 1033, 900, 900, 1033, 901, 901, 1033, 901, + 1033, 900, 901, 1033, 901, 1033, 900, 901, + 901, 1033, 1033, 900, 901, 901, 901, 1033, + 1033, 900, 1142, 1143, 1010, 1144, 900, 1033, + 900, 1033, 900, 1033, 900, 1145, 900, 1033, + 900, 1146, 1147, 1148, 1149, 1150, 1151, 900, + 901, 901, 1033, 1033, 1033, 900, 900, 900, + 900, 1033, 1033, 900, 1033, 1033, 900, 900, + 900, 1033, 1033, 1033, 1033, 900, 1152, 1153, + 1154, 900, 1033, 1033, 1033, 1033, 1033, 900, + 1033, 900, 1033, 900, 1155, 900, 901, 900, + 1156, 900, 1157, 1158, 1159, 1161, 1160, 900, + 1033, 900, 900, 1033, 1033, 901, 900, 901, + 900, 3, 265, 3, 1, 1162, 3, 1, + 1162, 1163, 1163, 1162, 1162, 1163, 1162, 1162, + 1163, 1162, 1162, 1162, 1163, 1162, 1163, 1162, + 1162, 1163, 1162, 1162, 1162, 1162, 1163, 1162, + 1162, 1163, 1163, 1162, 1162, 1163, 1162, 1163, + 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1172, + 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, + 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, + 1189, 1190, 1191, 1192, 1193, 1171, 1163, 1162, + 1162, 1162, 1162, 1163, 1162, 1163, 1162, 1162, + 1163, 1194, 1194, 1163, 1163, 1194, 1162, 1194, + 1163, 1163, 1194, 1194, 1163, 1194, 1194, 1163, + 1194, 1194, 1194, 1163, 1194, 1163, 1194, 1194, + 1163, 1194, 1194, 1194, 1194, 1163, 1194, 1194, + 1163, 1163, 1194, 1194, 1163, 1194, 1163, 1195, + 1196, 1197, 1198, 1199, 1201, 1202, 1203, 1205, + 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1184, + 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, + 1221, 1200, 1204, 1163, 1194, 1194, 1194, 1194, + 1163, 1194, 1163, 1194, 1194, 1163, 1163, 1163, + 1194, 1163, 1163, 1163, 1194, 1194, 1194, 1194, + 1163, 1163, 1163, 1163, 1163, 1163, 1194, 1163, + 1163, 1163, 1163, 1163, 1163, 1194, 1163, 1163, + 1163, 1163, 1194, 1194, 1194, 1194, 1163, 1194, + 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, + 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, + 1163, 1163, 1163, 1163, 1163, 1194, 1194, 1194, + 1194, 1194, 1194, 1163, 1194, 1194, 1163, 1163, + 1163, 1163, 1163, 1163, 1194, 1194, 1163, 1194, + 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, + 1194, 1163, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1194, 1194, 1163, 1194, 1163, 1222, + 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, + 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, + 1239, 1240, 1241, 1242, 1163, 1194, 1194, 1163, + 1194, 1194, 1194, 1163, 1194, 1194, 1194, 1194, + 1163, 1194, 1163, 1194, 1194, 1163, 1194, 1194, + 1163, 1194, 1163, 1163, 1163, 1194, 1194, 1163, + 1194, 1194, 1163, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1163, 1163, 1163, 1194, 1194, 1194, + 1163, 1194, 1163, 1194, 1163, 1194, 1194, 1194, + 1194, 1194, 1163, 1194, 1194, 1163, 1243, 1244, + 1245, 1246, 1247, 1163, 1194, 1248, 1163, 1243, + 1244, 1249, 1245, 1246, 1247, 1163, 1194, 1163, + 1194, 1163, 1194, 1163, 1194, 1163, 1194, 1163, + 1250, 1251, 1163, 1194, 1163, 1194, 1163, 1252, + 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, + 1261, 1262, 1263, 1264, 1265, 1266, 1163, 1194, + 1194, 1163, 1194, 1163, 1194, 1163, 1194, 1194, + 1194, 1194, 1163, 1194, 1194, 1163, 1163, 1163, + 1194, 1194, 1163, 1194, 1163, 1194, 1194, 1163, + 1163, 1163, 1194, 1194, 1163, 1194, 1194, 1194, + 1163, 1194, 1194, 1194, 1194, 1163, 1194, 1194, + 1194, 1163, 1194, 1194, 1163, 1267, 1268, 1253, + 1163, 1194, 1163, 1194, 1194, 1163, 1269, 1270, + 1271, 1272, 1273, 1274, 1275, 1163, 1276, 1277, + 1278, 1279, 1280, 1163, 1194, 1163, 1194, 1163, + 1194, 1163, 1194, 1194, 1194, 1194, 1194, 1163, + 1194, 1163, 1281, 1282, 1283, 1284, 1285, 1286, + 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, + 1295, 1296, 1297, 1294, 1298, 1299, 1300, 1301, + 1302, 1163, 1194, 1194, 1163, 1163, 1194, 1163, + 1163, 1194, 1194, 1194, 1163, 1194, 1163, 1194, + 1194, 1163, 1163, 1163, 1194, 1194, 1194, 1163, + 1194, 1163, 1194, 1194, 1194, 1163, 1194, 1194, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1163, 1194, 1163, 1163, 1194, 1194, 1194, + 1163, 1163, 1163, 1194, 1163, 1194, 1194, 1163, + 1194, 1163, 1194, 1194, 1163, 1194, 1194, 1163, + 1303, 1304, 1305, 1306, 1163, 1194, 1163, 1194, + 1163, 1194, 1163, 1194, 1163, 1307, 1163, 1194, + 1163, 1308, 1309, 1310, 1311, 1312, 1313, 1163, + 1194, 1194, 1194, 1163, 1163, 1163, 1163, 1194, + 1194, 1163, 1194, 1194, 1163, 1163, 1163, 1194, + 1194, 1194, 1194, 1163, 1314, 1315, 1316, 1163, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1163, 1317, 1318, 1319, 1163, 1162, 1163, + 1194, 1163, 1194, 1163, 1320, 1163, 1321, 1322, + 1323, 1325, 1324, 1163, 1194, 1163, 1163, 1194, + 1194, 1269, 1270, 1271, 1272, 1273, 1274, 1163, + 1162, 1163, 1162, 1162, 1163, 1162, 1163, 1194, + 1162, 1162, 1162, 1194, 1194, 1163, 1162, 1162, + 1162, 1163, 1163, 1163, 1162, 1163, 1194, 1194, + 1162, 1162, 1194, 1163, 1162, 1162, 1162, 1163, + 1162, 1163, 1162, 1163, 1162, 1163, 1194, 1163, + 1163, 1162, 1162, 1163, 1162, 1163, 1194, 1162, + 1162, 1194, 1162, 1163, 1194, 1162, 1162, 1194, + 1194, 1162, 1162, 1163, 1162, 1162, 1194, 1163, + 1162, 1162, 1162, 1194, 1194, 1194, 1163, 1162, + 1194, 1162, 1163, 1163, 1163, 1194, 1163, 1163, + 1163, 1162, 1162, 1162, 1194, 1162, 1194, 1163, + 1162, 1162, 1194, 1194, 1194, 1162, 1162, 1162, + 1163, 1162, 1162, 1194, 1194, 1163, 1163, 1163, + 1162, 1162, 1162, 1163, 1162, 1163, 1194, 1162, + 1162, 1162, 1162, 1194, 1162, 1194, 1194, 1163, + 1162, 1194, 1162, 1163, 1162, 1163, 1162, 1194, + 1162, 1162, 1163, 1162, 1163, 1162, 1162, 1162, + 1162, 1194, 1163, 1194, 1162, 1163, 1162, 1162, + 1162, 1162, 1163, 1162, 1163, 1326, 1327, 1328, + 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, + 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, + 1345, 1346, 1163, 1194, 1162, 1162, 1194, 1162, + 1163, 1194, 1162, 1162, 1162, 1163, 1162, 1194, + 1162, 1162, 1162, 1163, 1162, 1163, 1162, 1162, + 1163, 1162, 1162, 1163, 1194, 1162, 1194, 1163, + 1162, 1162, 1162, 1163, 1194, 1162, 1163, 1162, + 1162, 1163, 1162, 1162, 1194, 1162, 1194, 1194, + 1162, 1163, 1162, 1162, 1194, 1163, 1162, 1162, + 1162, 1162, 1194, 1162, 1162, 1194, 1162, 1163, + 1162, 1163, 1194, 1194, 1194, 1162, 1162, 1194, + 1163, 1162, 1163, 1162, 1163, 1194, 1194, 1194, + 1194, 1162, 1162, 1194, 1162, 1163, 1194, 1162, + 1162, 1194, 1162, 1194, 1163, 1194, 1162, 1194, + 1162, 1163, 1194, 1162, 1162, 1162, 1162, 1194, + 1162, 1163, 1162, 1162, 1163, 1347, 1348, 1349, + 1350, 1351, 1163, 1162, 1248, 1163, 1162, 1163, + 1162, 1163, 1162, 1163, 1162, 1163, 1352, 1353, + 1163, 1162, 1163, 1162, 1163, 1354, 1355, 1356, + 1357, 1256, 1358, 1359, 1360, 1361, 1362, 1363, + 1364, 1365, 1366, 1367, 1163, 1162, 1162, 1163, + 1162, 1163, 1162, 1163, 1162, 1162, 1162, 1194, + 1194, 1162, 1163, 1162, 1163, 1162, 1163, 1194, + 1162, 1163, 1162, 1194, 1163, 1194, 1162, 1162, + 1162, 1194, 1162, 1194, 1163, 1162, 1163, 1194, + 1162, 1194, 1162, 1194, 1162, 1163, 1162, 1162, + 1194, 1162, 1163, 1162, 1162, 1162, 1162, 1163, + 1162, 1194, 1194, 1162, 1162, 1194, 1163, 1162, + 1162, 1194, 1162, 1194, 1163, 1368, 1369, 1355, + 1163, 1162, 1163, 1162, 1162, 1163, 1370, 1371, + 1372, 1373, 1374, 1375, 1376, 1163, 1377, 1378, + 1379, 1380, 1381, 1163, 1162, 1163, 1162, 1163, + 1162, 1163, 1162, 1162, 1162, 1162, 1162, 1163, + 1162, 1163, 1382, 1383, 1384, 1385, 1386, 1387, + 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, + 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, + 1404, 1163, 1162, 1194, 1162, 1163, 1163, 1162, + 1194, 1163, 1194, 1194, 1163, 1162, 1194, 1162, + 1162, 1163, 1162, 1163, 1194, 1162, 1194, 1162, + 1194, 1163, 1163, 1162, 1163, 1194, 1162, 1162, + 1194, 1162, 1194, 1162, 1163, 1162, 1194, 1162, + 1163, 1162, 1162, 1194, 1162, 1194, 1163, 1162, + 1162, 1194, 1194, 1194, 1194, 1162, 1162, 1163, + 1194, 1162, 1163, 1194, 1194, 1162, 1163, 1162, + 1194, 1162, 1194, 1162, 1194, 1162, 1163, 1194, + 1163, 1162, 1162, 1194, 1194, 1162, 1194, 1162, + 1163, 1163, 1163, 1162, 1162, 1194, 1162, 1194, + 1162, 1163, 1163, 1162, 1194, 1194, 1162, 1194, + 1162, 1163, 1194, 1162, 1194, 1162, 1163, 1194, + 1194, 1162, 1162, 1163, 1194, 1194, 1194, 1162, + 1162, 1163, 1405, 1406, 1305, 1407, 1163, 1162, + 1163, 1162, 1163, 1162, 1163, 1408, 1163, 1162, + 1163, 1409, 1410, 1411, 1412, 1413, 1414, 1163, + 1194, 1194, 1162, 1162, 1162, 1163, 1163, 1163, + 1163, 1162, 1162, 1163, 1162, 1162, 1163, 1163, + 1163, 1162, 1162, 1162, 1162, 1163, 1415, 1416, + 1417, 1163, 1162, 1162, 1162, 1162, 1162, 1163, + 1162, 1163, 1162, 1163, 1418, 1163, 1194, 1163, + 1419, 1163, 1420, 1421, 1422, 1424, 1423, 1163, + 1162, 1163, 1163, 1162, 1162, 1162, 3, 1, + 3, 1162, 3, 1, 601, 1, 1425, 1427, + 1428, 1429, 1430, 1431, 1432, 1427, 1428, 1429, + 1430, 1431, 1432, 1427, 601, 1426, 890, 1, + 3, 610, 3, 1, 875, 875, 875, 877, + 1, 875, 875, 877, 875, 875, 877, 875, + 875, 875, 877, 875, 875, 877, 875, 875, + 877, 875, 875, 1, 877, 1429, 1430, 1431, + 1432, 1426, 1427, 1429, 1430, 1431, 1432, 1426, + 1427, 1429, 1430, 1431, 1432, 1426, 1427, 1429, + 1430, 1431, 1432, 1426, 1427, 1429, 1430, 1431, + 1432, 1426, 1427, 1429, 1430, 1431, 1432, 1426, + 1427, 1429, 1430, 1431, 1432, 1426, 1427, 1429, + 1430, 1431, 1432, 1426, 1427, 1429, 1430, 1431, + 1432, 1426, 1427, 1428, 890, 1430, 1431, 1432, + 1426, 1427, 1428, 1430, 1431, 1432, 1426, 1427, + 1428, 1430, 1431, 1432, 1426, 1427, 1428, 1430, + 1431, 1432, 1426, 1427, 1428, 1430, 1431, 1432, + 1426, 1427, 1428, 1430, 1431, 1432, 1426, 1427, + 1428, 1430, 1431, 1432, 1426, 1427, 1428, 1430, + 1431, 1432, 1426, 1427, 1428, 1430, 1431, 1432, + 1426, 1427, 1428, 1429, 890, 1431, 1432, 1426, + 1427, 1428, 1429, 1431, 1432, 1426, 1427, 1428, + 1429, 1431, 1432, 1426, 1427, 1428, 1429, 1431, + 1432, 1426, 1427, 1428, 1429, 1431, 1433, 1434, + 1435, 1437, 1430, 1436, 1, 890, 875, 3, + 875, 877, 3, 877, 3, 1, 875, 1, + 265, 265, 1, 265, 1438, 1439, 601, 1, + 265, 3, 1, 3, 3, 265, 3, 1, + 1441, 1442, 1443, 1444, 1440, 1, 1445, 1446, + 601, 1, 266, 3, 1, 3, 266, 3, + 1, 1447, 601, 1, 3, 265, 3, 1, + 1448, 601, 1, 3, 265, 3, 1, 1449, + 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, + 1458, 1459, 601, 1, 3, 1460, 1, 1462, + 1461, 1461, 1462, 1462, 1461, 1462, 1462, 1461, + 1462, 1462, 1462, 1461, 1462, 1461, 1462, 1462, + 1461, 1462, 1462, 1462, 1462, 1461, 1462, 1462, + 1461, 1461, 1462, 1462, 1461, 1462, 1461, 1463, + 1464, 1465, 1466, 1467, 1469, 1470, 1471, 1473, + 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, + 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, + 1490, 1468, 1472, 1461, 1462, 1462, 1462, 1462, + 1461, 1462, 1461, 1462, 1462, 1461, 1461, 1461, + 1462, 1461, 1461, 1461, 1462, 1462, 1462, 1462, + 1461, 1461, 1461, 1461, 1461, 1461, 1462, 1461, + 1461, 1461, 1461, 1461, 1461, 1462, 1461, 1461, + 1461, 1461, 1462, 1462, 1462, 1462, 1461, 1462, + 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, + 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, + 1461, 1461, 1461, 1461, 1461, 1462, 1462, 1462, + 1462, 1462, 1462, 1461, 1462, 1462, 1461, 1461, + 1461, 1461, 1461, 1461, 1462, 1462, 1461, 1462, + 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, + 1462, 1461, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1462, 1462, 1461, 1462, 1461, 1491, + 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, + 1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507, + 1508, 1509, 1510, 1511, 1461, 1462, 1462, 1461, + 1462, 1462, 1462, 1461, 1462, 1462, 1462, 1462, + 1461, 1462, 1461, 1462, 1462, 1461, 1462, 1462, + 1461, 1462, 1461, 1461, 1461, 1462, 1462, 1461, + 1462, 1462, 1461, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1461, 1461, 1461, 1462, 1462, 1462, + 1461, 1462, 1461, 1462, 1461, 1462, 1462, 1462, + 1462, 1462, 1461, 1462, 1462, 1461, 1512, 1513, + 1514, 1515, 1516, 1461, 1462, 1517, 1461, 1512, + 1513, 1518, 1514, 1515, 1516, 1461, 1462, 1461, + 1462, 1461, 1462, 1461, 1462, 1461, 1462, 1461, + 1519, 1520, 1461, 1462, 1461, 1462, 1461, 1521, + 1522, 1523, 1524, 1525, 1526, 1527, 1528, 1529, + 1530, 1531, 1532, 1533, 1534, 1535, 1461, 1462, + 1462, 1461, 1462, 1461, 1462, 1461, 1462, 1462, + 1462, 1462, 1461, 1462, 1462, 1461, 1461, 1461, + 1462, 1462, 1461, 1462, 1461, 1462, 1462, 1461, + 1461, 1461, 1462, 1462, 1461, 1462, 1462, 1462, + 1461, 1462, 1462, 1462, 1462, 1461, 1462, 1462, + 1462, 1461, 1462, 1462, 1461, 1536, 1537, 1522, + 1461, 1462, 1461, 1462, 1462, 1461, 1538, 1539, + 1540, 1541, 1542, 1543, 1544, 1461, 1545, 1546, + 1547, 1548, 1549, 1461, 1462, 1461, 1462, 1461, + 1462, 1461, 1462, 1462, 1462, 1462, 1462, 1461, + 1462, 1461, 1550, 1551, 1552, 1553, 1554, 1555, + 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, + 1564, 1565, 1566, 1563, 1567, 1568, 1569, 1570, + 1571, 1461, 1462, 1462, 1461, 1461, 1462, 1461, + 1461, 1462, 1462, 1462, 1461, 1462, 1461, 1462, + 1462, 1461, 1461, 1461, 1462, 1462, 1462, 1461, + 1462, 1461, 1462, 1462, 1462, 1461, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1461, 1462, 1461, 1461, 1462, 1462, 1462, + 1461, 1461, 1461, 1462, 1461, 1462, 1462, 1461, + 1462, 1461, 1462, 1462, 1461, 1462, 1462, 1461, + 1572, 1573, 1574, 1575, 1461, 1462, 1461, 1462, + 1461, 1462, 1461, 1462, 1461, 1576, 1461, 1462, + 1461, 1577, 1578, 1579, 1580, 1581, 1582, 1461, + 1462, 1462, 1462, 1461, 1461, 1461, 1461, 1462, + 1462, 1461, 1462, 1462, 1461, 1461, 1461, 1462, + 1462, 1462, 1462, 1461, 1583, 1584, 1585, 1461, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1461, 1586, 1587, 1588, 1461, 1589, 1461, + 1589, 1461, 1461, 1589, 1589, 1461, 1589, 1589, + 1461, 1589, 1589, 1589, 1461, 1589, 1461, 1589, + 1589, 1461, 1589, 1589, 1589, 1589, 1461, 1589, + 1589, 1461, 1461, 1589, 1589, 1461, 1589, 1461, + 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1598, + 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, + 1607, 1608, 1609, 1481, 1610, 1611, 1612, 1613, + 1614, 1615, 1616, 1617, 1618, 1597, 1461, 1589, + 1589, 1589, 1589, 1461, 1589, 1461, 1589, 1589, + 1461, 1462, 1462, 1461, 1461, 1462, 1589, 1589, + 1461, 1589, 1589, 1461, 1589, 1461, 1462, 1589, + 1589, 1589, 1462, 1462, 1461, 1589, 1589, 1589, + 1461, 1461, 1461, 1589, 1461, 1462, 1462, 1589, + 1589, 1462, 1461, 1589, 1589, 1589, 1461, 1589, + 1461, 1589, 1461, 1589, 1461, 1462, 1461, 1461, + 1589, 1589, 1461, 1589, 1461, 1462, 1589, 1589, + 1462, 1589, 1461, 1462, 1589, 1589, 1462, 1462, + 1589, 1589, 1461, 1589, 1589, 1462, 1461, 1589, + 1589, 1589, 1462, 1462, 1462, 1461, 1589, 1462, + 1589, 1461, 1461, 1461, 1462, 1461, 1461, 1461, + 1589, 1589, 1589, 1462, 1589, 1462, 1461, 1589, + 1589, 1462, 1462, 1462, 1589, 1589, 1589, 1461, + 1589, 1589, 1462, 1462, 1461, 1461, 1461, 1589, + 1589, 1589, 1461, 1589, 1461, 1462, 1589, 1589, + 1589, 1589, 1462, 1589, 1462, 1462, 1461, 1589, + 1462, 1589, 1461, 1589, 1461, 1589, 1462, 1589, + 1589, 1461, 1589, 1461, 1589, 1589, 1589, 1589, + 1462, 1461, 1462, 1589, 1461, 1589, 1589, 1589, + 1589, 1461, 1589, 1461, 1619, 1620, 1621, 1622, + 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, + 1631, 1632, 1633, 1634, 1635, 1636, 1637, 1638, + 1639, 1461, 1462, 1589, 1589, 1462, 1589, 1461, + 1462, 1589, 1589, 1589, 1461, 1589, 1462, 1589, + 1589, 1589, 1461, 1589, 1461, 1589, 1589, 1461, + 1589, 1589, 1461, 1462, 1589, 1462, 1461, 1589, + 1589, 1589, 1461, 1462, 1589, 1461, 1589, 1589, + 1461, 1589, 1589, 1462, 1589, 1462, 1462, 1589, + 1461, 1589, 1589, 1462, 1461, 1589, 1589, 1589, + 1589, 1462, 1589, 1589, 1462, 1589, 1461, 1589, + 1461, 1462, 1462, 1462, 1589, 1589, 1462, 1461, + 1589, 1461, 1589, 1461, 1462, 1462, 1462, 1462, + 1589, 1589, 1462, 1589, 1461, 1462, 1589, 1589, + 1462, 1589, 1462, 1461, 1462, 1589, 1462, 1589, + 1461, 1462, 1589, 1589, 1589, 1589, 1462, 1589, + 1461, 1589, 1589, 1461, 1640, 1641, 1642, 1643, + 1644, 1461, 1589, 1517, 1461, 1589, 1461, 1589, + 1461, 1589, 1461, 1589, 1461, 1645, 1646, 1461, + 1589, 1461, 1589, 1461, 1647, 1648, 1649, 1650, + 1525, 1651, 1652, 1653, 1654, 1655, 1656, 1657, + 1658, 1659, 1660, 1461, 1589, 1589, 1461, 1589, + 1461, 1589, 1461, 1589, 1589, 1589, 1462, 1462, + 1589, 1461, 1589, 1461, 1589, 1461, 1462, 1589, + 1461, 1589, 1462, 1461, 1462, 1589, 1589, 1589, + 1462, 1589, 1462, 1461, 1589, 1461, 1462, 1589, + 1462, 1589, 1462, 1589, 1461, 1589, 1589, 1462, + 1589, 1461, 1589, 1589, 1589, 1589, 1461, 1589, + 1462, 1462, 1589, 1589, 1462, 1461, 1589, 1589, + 1462, 1589, 1462, 1461, 1661, 1662, 1648, 1461, + 1589, 1461, 1589, 1589, 1461, 1663, 1664, 1665, + 1666, 1667, 1668, 1669, 1461, 1670, 1671, 1672, + 1673, 1674, 1461, 1589, 1461, 1589, 1461, 1589, + 1461, 1589, 1589, 1589, 1589, 1589, 1461, 1589, + 1461, 1675, 1676, 1677, 1678, 1679, 1680, 1681, + 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, + 1690, 1691, 1692, 1693, 1694, 1695, 1696, 1697, + 1461, 1589, 1462, 1589, 1461, 1461, 1589, 1462, + 1461, 1462, 1462, 1461, 1589, 1462, 1589, 1589, + 1461, 1589, 1461, 1462, 1589, 1462, 1589, 1462, + 1461, 1461, 1589, 1461, 1462, 1589, 1589, 1462, + 1589, 1462, 1589, 1461, 1589, 1462, 1589, 1461, + 1589, 1589, 1462, 1589, 1462, 1461, 1589, 1589, + 1462, 1462, 1462, 1462, 1589, 1589, 1461, 1462, + 1589, 1461, 1462, 1462, 1589, 1461, 1589, 1462, + 1589, 1462, 1589, 1462, 1589, 1461, 1462, 1461, + 1589, 1589, 1462, 1462, 1589, 1462, 1589, 1461, + 1461, 1461, 1589, 1589, 1462, 1589, 1462, 1589, + 1461, 1461, 1589, 1462, 1462, 1589, 1462, 1589, + 1461, 1462, 1589, 1462, 1589, 1461, 1462, 1462, + 1589, 1589, 1461, 1462, 1462, 1462, 1589, 1589, + 1461, 1698, 1699, 1574, 1700, 1461, 1589, 1461, + 1589, 1461, 1589, 1461, 1701, 1461, 1589, 1461, + 1702, 1703, 1704, 1705, 1706, 1707, 1461, 1462, + 1462, 1589, 1589, 1589, 1461, 1461, 1461, 1461, + 1589, 1589, 1461, 1589, 1589, 1461, 1461, 1461, + 1589, 1589, 1589, 1589, 1461, 1708, 1709, 1710, + 1461, 1589, 1589, 1589, 1589, 1589, 1461, 1589, + 1461, 1589, 1461, 1711, 1461, 1462, 1461, 1712, + 1461, 1713, 1714, 1715, 1717, 1716, 1461, 1589, + 1461, 1461, 1589, 1589, 1462, 1461, 1462, 1461, + 1718, 1461, 1719, 1720, 1721, 1723, 1722, 1461, + 1462, 1461, 1461, 1462, 1462, 1538, 1539, 1540, + 1541, 1542, 1543, 1461, 1538, 1539, 1540, 1541, + 1542, 1543, 1724, 1461, 1725, 1461, 1462, 1461, + 1162, 3, 1, 3, 1162, 3, 1162, 3, + 1, 1162, 1162, 3, 1162, 3, 1162, 3, + 1162, 3, 1162, 3, 1, 3, 3, 1162, + 1162, 3, 1, 1162, 1162, 3, 1, 1162, + 3, 1162, 3, 1, 3, 1162, 3, 1162, + 3, 1, 1162, 3, 1162, 3, 1, 1162, + 3, 1, 1162, 1162, 3, 3, 1162, 3, + 1162, 3, 1162, 1, 1440, 1, 1726, 1440, + 1, 1727, 1435, 1437, 1728, 1437, 601, 1436, + 1, 265, 3, 1, 3, 265, 1, 1, + 1730, 1729, 1733, 1734, 1735, 1736, 1737, 1738, + 1739, 1741, 1742, 1743, 1744, 1745, 1746, 1748, + 1729, 1, 1732, 1740, 1747, 1, 1731, 262, + 264, 1750, 1751, 1752, 1753, 1754, 1755, 1756, + 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, + 1765, 1766, 1767, 1749, 262, 264, 1750, 1751, + 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, + 1760, 1761, 1768, 1763, 1764, 1765, 1769, 1767, + 1749, 256, 258, 1770, 1771, 1772, 1773, 1774, + 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, + 1783, 1784, 1785, 1786, 1787, 1749, 1789, 1790, + 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, + 1799, 1800, 1801, 1803, 268, 530, 576, 1802, + 1788, 527, 529, 1804, 1805, 1806, 1807, 1808, + 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, + 1817, 1818, 1819, 1820, 1821, 1788, 527, 529, + 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, + 1812, 1813, 1814, 1815, 1822, 1817, 1818, 1819, + 1823, 1821, 1788, 521, 523, 1824, 1825, 1826, + 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834, + 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1788, + 527, 529, 1804, 1805, 1806, 1807, 1808, 1809, + 1810, 1811, 1812, 1813, 1814, 1842, 1816, 1817, + 1843, 1844, 1845, 1846, 1819, 1820, 1821, 1788, + 527, 529, 1804, 1805, 1806, 1807, 1808, 1809, + 1810, 1811, 1812, 1813, 1814, 1847, 1816, 1817, + 1818, 1848, 1819, 1820, 1821, 1788, 527, 529, + 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, + 1812, 1813, 1814, 1849, 1816, 1817, 1818, 1850, + 1819, 1820, 1821, 1788, 527, 529, 1804, 1805, + 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, + 1814, 1851, 1816, 1817, 1818, 1852, 1819, 1820, + 1821, 1788, 527, 529, 1804, 1805, 1806, 1807, + 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, + 1816, 1817, 1818, 1819, 1853, 1821, 1788, 871, + 873, 1855, 1856, 1857, 1858, 1859, 1860, 1861, + 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, + 1870, 1871, 1872, 1873, 1874, 1875, 1854, 871, + 873, 1855, 1856, 1857, 1858, 1859, 1860, 1861, + 1862, 1863, 1864, 1865, 1876, 1867, 1868, 1877, + 1873, 1874, 1875, 1854, 871, 873, 1855, 1856, + 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, + 1865, 1876, 1878, 1868, 1877, 1873, 1879, 1875, + 1854, 865, 867, 1880, 1881, 1882, 1883, 1884, + 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, + 1893, 1894, 1895, 1896, 1897, 1854, 871, 873, + 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, + 1863, 1864, 1865, 1898, 1867, 1868, 1877, 1899, + 1873, 1874, 1875, 1854, 871, 873, 1855, 1856, + 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, + 1865, 1900, 1867, 1868, 1877, 1901, 1873, 1874, + 1875, 1854, 871, 873, 1855, 1856, 1857, 1858, + 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1902, + 1867, 1868, 1877, 1903, 1873, 1874, 1875, 1854, + 1025, 1027, 1905, 1906, 1907, 1908, 1909, 1910, + 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, + 1919, 1920, 1921, 1922, 1904, 1025, 1027, 1905, + 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, + 1914, 1915, 1916, 1923, 1918, 1919, 1920, 1924, + 1922, 1904, 1159, 1161, 1925, 1926, 1927, 1928, + 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, + 1937, 1938, 1939, 1940, 1941, 1942, 1904, 1422, + 1424, 1944, 1945, 1946, 1947, 1948, 1949, 1950, + 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, + 1959, 1960, 1961, 1943, 1323, 1325, 1962, 1963, + 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, + 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, + 1943, 1323, 1325, 1962, 1963, 1964, 1965, 1966, + 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1980, + 1975, 1976, 1977, 1981, 1979, 1943, 1721, 1723, + 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, + 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, + 1999, 2000, 1982, 1721, 1723, 1983, 1984, 1985, + 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, + 1994, 1995, 1996, 1997, 1998, 2001, 2000, 1982, + 1721, 1723, 1983, 1984, 1985, 1986, 1987, 1988, + 1989, 1990, 1991, 1992, 1993, 1994, 2002, 1996, + 1997, 1998, 2003, 2000, 1982, 1715, 1717, 2004, + 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, + 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, + 2021, 1982, +} + +var _graphclust_trans_targs []int16 = []int16{ + 1974, 0, 1974, 1975, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, + 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 66, 68, 70, + 71, 72, 1976, 69, 74, 75, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, + 87, 88, 89, 90, 91, 93, 94, 96, + 102, 125, 130, 132, 139, 143, 97, 98, + 99, 100, 101, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 123, + 124, 126, 127, 128, 129, 131, 133, 134, + 135, 136, 137, 138, 140, 141, 142, 144, + 291, 292, 1977, 158, 159, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 188, 189, 190, 191, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 210, 211, 212, + 213, 214, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 228, 229, 230, + 231, 232, 234, 235, 237, 243, 267, 271, + 273, 280, 284, 238, 239, 240, 241, 242, + 244, 245, 246, 247, 248, 249, 250, 251, + 252, 253, 254, 255, 256, 257, 258, 259, + 260, 261, 262, 263, 264, 265, 266, 268, + 269, 270, 272, 274, 275, 276, 277, 278, + 279, 281, 282, 283, 285, 287, 288, 289, + 145, 290, 146, 294, 295, 296, 2, 297, + 3, 1974, 1978, 1974, 1979, 315, 316, 317, + 318, 319, 320, 321, 322, 323, 324, 325, + 326, 327, 328, 329, 330, 331, 332, 333, + 334, 335, 336, 337, 338, 339, 340, 341, + 342, 344, 345, 346, 347, 348, 349, 350, + 351, 352, 353, 354, 355, 356, 357, 358, + 359, 360, 361, 362, 363, 364, 366, 368, + 370, 371, 372, 1980, 369, 374, 375, 377, + 378, 379, 380, 381, 382, 383, 384, 385, + 386, 387, 388, 389, 390, 391, 393, 394, + 396, 402, 425, 430, 432, 439, 443, 397, + 398, 399, 400, 401, 403, 404, 405, 406, + 407, 408, 409, 410, 411, 412, 413, 414, + 415, 416, 417, 418, 419, 420, 421, 422, + 423, 424, 426, 427, 428, 429, 431, 433, + 434, 435, 436, 437, 438, 440, 441, 442, + 444, 591, 592, 1981, 458, 459, 460, 461, + 462, 463, 464, 465, 466, 467, 468, 469, + 470, 471, 472, 473, 474, 475, 476, 477, + 478, 479, 480, 481, 482, 483, 484, 485, + 486, 488, 489, 490, 491, 492, 493, 494, + 495, 496, 497, 498, 499, 500, 501, 502, + 503, 504, 505, 506, 507, 508, 510, 511, + 512, 513, 514, 516, 517, 519, 520, 521, + 522, 523, 524, 525, 526, 527, 528, 529, + 530, 531, 532, 534, 535, 537, 543, 567, + 571, 573, 580, 584, 538, 539, 540, 541, + 542, 544, 545, 546, 547, 548, 549, 550, + 551, 552, 553, 554, 555, 556, 557, 558, + 559, 560, 561, 562, 563, 564, 565, 566, + 568, 569, 570, 572, 574, 575, 576, 577, + 578, 579, 581, 582, 583, 585, 587, 588, + 589, 445, 590, 446, 594, 595, 596, 302, + 597, 303, 599, 605, 606, 608, 610, 613, + 616, 640, 1982, 622, 1983, 612, 1984, 615, + 618, 620, 621, 624, 625, 629, 630, 631, + 632, 633, 634, 635, 1985, 628, 639, 642, + 643, 644, 645, 646, 649, 650, 651, 652, + 653, 654, 655, 656, 660, 661, 663, 664, + 647, 666, 669, 671, 673, 667, 668, 670, + 672, 674, 678, 679, 680, 681, 682, 683, + 684, 685, 686, 687, 1986, 676, 677, 690, + 691, 299, 695, 696, 698, 997, 1000, 1003, + 1027, 1974, 1987, 1974, 1988, 712, 713, 714, + 715, 716, 717, 718, 719, 720, 721, 722, + 723, 724, 725, 726, 727, 728, 729, 730, + 731, 732, 733, 734, 735, 736, 737, 738, + 739, 741, 742, 743, 744, 745, 746, 747, + 748, 749, 750, 751, 752, 753, 754, 755, + 756, 757, 758, 759, 760, 761, 763, 765, + 767, 768, 769, 1989, 766, 771, 772, 774, + 775, 776, 777, 778, 779, 780, 781, 782, + 783, 784, 785, 786, 787, 788, 790, 791, + 793, 799, 822, 827, 829, 836, 840, 794, + 795, 796, 797, 798, 800, 801, 802, 803, + 804, 805, 806, 807, 808, 809, 810, 811, + 812, 813, 814, 815, 816, 817, 818, 819, + 820, 821, 823, 824, 825, 826, 828, 830, + 831, 832, 833, 834, 835, 837, 838, 839, + 841, 988, 989, 1990, 855, 856, 857, 858, + 859, 860, 861, 862, 863, 864, 865, 866, + 867, 868, 869, 870, 871, 872, 873, 874, + 875, 876, 877, 878, 879, 880, 881, 882, + 883, 885, 886, 887, 888, 889, 890, 891, + 892, 893, 894, 895, 896, 897, 898, 899, + 900, 901, 902, 903, 904, 905, 907, 908, + 909, 910, 911, 913, 914, 916, 917, 918, + 919, 920, 921, 922, 923, 924, 925, 926, + 927, 928, 929, 931, 932, 934, 940, 964, + 968, 970, 977, 981, 935, 936, 937, 938, + 939, 941, 942, 943, 944, 945, 946, 947, + 948, 949, 950, 951, 952, 953, 954, 955, + 956, 957, 958, 959, 960, 961, 962, 963, + 965, 966, 967, 969, 971, 972, 973, 974, + 975, 976, 978, 979, 980, 982, 984, 985, + 986, 842, 987, 843, 991, 992, 993, 699, + 994, 700, 1009, 1991, 999, 1992, 1002, 1005, + 1007, 1008, 1011, 1012, 1016, 1017, 1018, 1019, + 1020, 1021, 1022, 1993, 1015, 1026, 1029, 1327, + 1328, 1626, 1627, 1994, 1974, 1995, 1043, 1044, + 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, + 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, + 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, + 1069, 1070, 1072, 1073, 1074, 1075, 1076, 1077, + 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, + 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1094, + 1095, 1096, 1097, 1098, 1100, 1101, 1103, 1104, + 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, + 1113, 1114, 1115, 1116, 1117, 1119, 1120, 1122, + 1128, 1151, 1156, 1158, 1165, 1123, 1124, 1125, + 1126, 1127, 1129, 1130, 1131, 1132, 1133, 1134, + 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, + 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, + 1152, 1153, 1154, 1155, 1157, 1159, 1160, 1161, + 1162, 1163, 1164, 1166, 1167, 1168, 1170, 1171, + 1172, 1030, 1173, 1031, 1175, 1177, 1178, 1325, + 1326, 1996, 1192, 1193, 1194, 1195, 1196, 1197, + 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, + 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, + 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1222, + 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, + 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, + 1239, 1240, 1241, 1242, 1244, 1245, 1246, 1247, + 1248, 1250, 1251, 1253, 1254, 1255, 1256, 1257, + 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, + 1266, 1268, 1269, 1271, 1277, 1301, 1305, 1307, + 1314, 1318, 1272, 1273, 1274, 1275, 1276, 1278, + 1279, 1280, 1281, 1282, 1283, 1284, 1285, 1286, + 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, + 1295, 1296, 1297, 1298, 1299, 1300, 1302, 1303, + 1304, 1306, 1308, 1309, 1310, 1311, 1312, 1313, + 1315, 1316, 1317, 1319, 1321, 1322, 1323, 1179, + 1324, 1180, 1997, 1974, 1342, 1343, 1344, 1345, + 1497, 1498, 1499, 1500, 1501, 1502, 1503, 1504, + 1505, 1506, 1507, 1508, 1509, 1510, 1511, 1512, + 1377, 1513, 1514, 1515, 1516, 1517, 1518, 1519, + 1520, 1521, 1998, 1359, 1360, 1361, 1362, 1363, + 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, + 1372, 1373, 1374, 1375, 1376, 1378, 1379, 1380, + 1381, 1382, 1383, 1384, 1385, 1386, 1388, 1389, + 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, + 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, + 1406, 1407, 1408, 1410, 1412, 1414, 1415, 1416, + 1999, 1413, 1418, 1419, 1421, 1422, 1423, 1424, + 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, + 1433, 1434, 1435, 1437, 1438, 1440, 1446, 1469, + 1474, 1476, 1483, 1487, 1441, 1442, 1443, 1444, + 1445, 1447, 1448, 1449, 1450, 1451, 1452, 1453, + 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, + 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1470, + 1471, 1472, 1473, 1475, 1477, 1478, 1479, 1480, + 1481, 1482, 1484, 1485, 1486, 1488, 1489, 1490, + 1492, 1493, 1494, 1346, 1495, 1347, 1523, 1524, + 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, + 1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, + 1541, 1542, 1543, 1545, 1546, 1547, 1548, 1549, + 1551, 1552, 1554, 1555, 1556, 1557, 1558, 1559, + 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, + 1569, 1570, 1572, 1578, 1602, 1606, 1608, 1615, + 1619, 1573, 1574, 1575, 1576, 1577, 1579, 1580, + 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, + 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, + 1597, 1598, 1599, 1600, 1601, 1603, 1604, 1605, + 1607, 1609, 1610, 1611, 1612, 1613, 1614, 1616, + 1617, 1618, 1620, 1622, 1623, 1624, 1329, 1625, + 1330, 1630, 1631, 1632, 1633, 1634, 1635, 1636, + 1637, 1641, 1642, 1643, 1644, 1645, 1647, 1648, + 1628, 1650, 1653, 1655, 1657, 1651, 1652, 1654, + 1656, 1658, 1959, 1960, 1961, 1962, 1963, 1964, + 1965, 1966, 1967, 1968, 2000, 1974, 2001, 1672, + 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, + 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, + 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696, + 1697, 1698, 1699, 1701, 1702, 1703, 1704, 1705, + 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713, + 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, + 1723, 1725, 1727, 1728, 1729, 2002, 1726, 1731, + 1732, 1734, 1735, 1736, 1737, 1738, 1739, 1740, + 1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, + 1750, 1751, 1753, 1759, 1782, 1787, 1789, 1796, + 1800, 1754, 1755, 1756, 1757, 1758, 1760, 1761, + 1762, 1763, 1764, 1765, 1766, 1767, 1768, 1769, + 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, + 1778, 1779, 1780, 1781, 1783, 1784, 1785, 1786, + 1788, 1790, 1791, 1792, 1793, 1794, 1795, 1797, + 1798, 1799, 1801, 1948, 1949, 2003, 1815, 1816, + 1817, 1818, 1819, 1820, 1821, 1822, 1823, 1824, + 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, + 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, + 1841, 1842, 1843, 1845, 1846, 1847, 1848, 1849, + 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, + 1858, 1859, 1860, 1861, 1862, 1863, 1864, 1865, + 1867, 1868, 1869, 1870, 1871, 1873, 1874, 1876, + 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884, + 1885, 1886, 1887, 1888, 1889, 1891, 1892, 1894, + 1900, 1924, 1928, 1930, 1937, 1941, 1895, 1896, + 1897, 1898, 1899, 1901, 1902, 1903, 1904, 1905, + 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, + 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921, + 1922, 1923, 1925, 1926, 1927, 1929, 1931, 1932, + 1933, 1934, 1935, 1936, 1938, 1939, 1940, 1942, + 1944, 1945, 1946, 1802, 1947, 1803, 1951, 1952, + 1953, 1659, 1954, 1660, 1957, 1958, 1971, 1972, + 1973, 1974, 1, 1975, 299, 300, 301, 692, + 693, 694, 697, 1028, 1628, 1629, 1638, 1639, + 1640, 1646, 1649, 1969, 1970, 1974, 4, 5, + 6, 7, 8, 9, 10, 11, 12, 13, + 14, 43, 65, 73, 76, 92, 298, 293, + 67, 95, 147, 148, 149, 150, 151, 152, + 153, 154, 155, 156, 157, 187, 209, 215, + 218, 233, 236, 286, 1974, 600, 601, 602, + 603, 604, 607, 641, 648, 657, 658, 659, + 662, 665, 688, 689, 304, 305, 306, 307, + 308, 309, 310, 311, 312, 313, 314, 343, + 365, 373, 376, 392, 598, 593, 367, 395, + 447, 448, 449, 450, 451, 452, 453, 454, + 455, 456, 457, 487, 509, 515, 518, 533, + 536, 586, 609, 623, 636, 637, 638, 611, + 619, 614, 617, 626, 627, 675, 1974, 701, + 702, 703, 704, 705, 706, 707, 708, 709, + 710, 711, 996, 762, 770, 1010, 1023, 1024, + 1025, 789, 995, 990, 740, 773, 764, 792, + 844, 845, 846, 847, 848, 849, 850, 851, + 852, 853, 854, 884, 906, 912, 915, 930, + 933, 983, 998, 1006, 1001, 1004, 1013, 1014, + 1974, 1032, 1033, 1034, 1035, 1036, 1037, 1038, + 1039, 1040, 1041, 1042, 1071, 1174, 1099, 1102, + 1118, 1176, 1169, 1093, 1121, 1181, 1182, 1183, + 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, + 1221, 1243, 1249, 1252, 1267, 1270, 1320, 1974, + 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, + 1339, 1340, 1341, 1522, 1544, 1550, 1553, 1568, + 1571, 1621, 1348, 1349, 1350, 1351, 1352, 1353, + 1354, 1355, 1356, 1357, 1358, 1387, 1409, 1417, + 1420, 1436, 1496, 1491, 1411, 1439, 1974, 1661, + 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, + 1670, 1671, 1700, 1722, 1730, 1733, 1749, 1956, + 1950, 1955, 1724, 1752, 1804, 1805, 1806, 1807, + 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1844, + 1866, 1872, 1875, 1890, 1893, 1943, +} + +var _graphclust_trans_actions []byte = []byte{ + 31, 0, 27, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 34, 40, 25, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 40, 0, 40, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 29, 51, 17, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 51, 0, 51, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 40, 21, 40, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 40, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 19, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 40, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 40, 23, 40, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 40, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 40, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 43, 1, 47, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 15, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 13, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 5, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 9, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 7, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 11, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, +} + +var _graphclust_to_state_actions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 37, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, +} + +var _graphclust_from_state_actions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, +} + +var _graphclust_eof_trans []int16 = []int16{ + 0, 0, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 0, 0, 0, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 0, 0, 0, 0, + 0, 0, 610, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 610, 612, 612, + 610, 612, 612, 610, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 610, 612, + 612, 612, 612, 0, 0, 0, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 0, + 0, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1750, + 1750, 1750, 1789, 1789, 1789, 1789, 1789, 1789, + 1789, 1789, 1789, 1855, 1855, 1855, 1855, 1855, + 1855, 1855, 1905, 1905, 1905, 1944, 1944, 1944, + 1983, 1983, 1983, 1983, +} + +const graphclust_start int = 1974 +const graphclust_first_final int = 1974 +const graphclust_error int = 0 + +const graphclust_en_main int = 1974 + + +// line 14 "grapheme_clusters.rl" + + +var Error = errors.New("invalid UTF8 text") + +// ScanGraphemeClusters is a split function for bufio.Scanner that splits +// on grapheme cluster boundaries. +func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) { + if len(data) == 0 { + return 0, nil, nil + } + + // Ragel state + cs := 0 // Current State + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + act := 0 + eof := pe + + // Make Go compiler happy + _ = ts + _ = te + _ = act + _ = eof + + startPos := 0 + endPos := 0 + + +// line 4976 "grapheme_clusters.go" + { + cs = graphclust_start + ts = 0 + te = 0 + act = 0 + } + +// line 4984 "grapheme_clusters.go" + { + var _klen int + var _trans int + var _acts int + var _nacts uint + var _keys int + if p == pe { + goto _test_eof + } + if cs == 0 { + goto _out + } +_resume: + _acts = int(_graphclust_from_state_actions[cs]) + _nacts = uint(_graphclust_actions[_acts]); _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts - 1] { + case 4: +// line 1 "NONE" + +ts = p + +// line 5008 "grapheme_clusters.go" + } + } + + _keys = int(_graphclust_key_offsets[cs]) + _trans = int(_graphclust_index_offsets[cs]) + + _klen = int(_graphclust_single_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } + + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case data[p] < _graphclust_trans_keys[_mid]: + _upper = _mid - 1 + case data[p] > _graphclust_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match + } + } + _keys += _klen + _trans += _klen + } + + _klen = int(_graphclust_range_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } + + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case data[p] < _graphclust_trans_keys[_mid]: + _upper = _mid - 2 + case data[p] > _graphclust_trans_keys[_mid + 1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match + } + } + _trans += _klen + } + +_match: + _trans = int(_graphclust_indicies[_trans]) +_eof_trans: + cs = int(_graphclust_trans_targs[_trans]) + + if _graphclust_trans_actions[_trans] == 0 { + goto _again + } + + _acts = int(_graphclust_trans_actions[_trans]) + _nacts = uint(_graphclust_actions[_acts]); _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts-1] { + case 0: +// line 46 "grapheme_clusters.rl" + + + startPos = p + + case 1: +// line 50 "grapheme_clusters.rl" + + + endPos = p + + case 5: +// line 1 "NONE" + +te = p+1 + + case 6: +// line 54 "grapheme_clusters.rl" + +act = 3; + case 7: +// line 54 "grapheme_clusters.rl" + +te = p+1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 8: +// line 54 "grapheme_clusters.rl" + +te = p+1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 9: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 10: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 11: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 12: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 13: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 14: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 15: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 16: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 17: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 18: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 19: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 20: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 21: +// line 1 "NONE" + + switch act { + case 0: + {cs = 0 +goto _again +} + case 3: + {p = (te) - 1 + + return endPos+1, data[startPos:endPos+1], nil + } + } + +// line 5218 "grapheme_clusters.go" + } + } + +_again: + _acts = int(_graphclust_to_state_actions[cs]) + _nacts = uint(_graphclust_actions[_acts]); _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts-1] { + case 2: +// line 1 "NONE" + +ts = 0 + + case 3: +// line 1 "NONE" + +act = 0 + +// line 5238 "grapheme_clusters.go" + } + } + + if cs == 0 { + goto _out + } + p++ + if p != pe { + goto _resume + } + _test_eof: {} + if p == eof { + if _graphclust_eof_trans[cs] > 0 { + _trans = int(_graphclust_eof_trans[cs] - 1) + goto _eof_trans + } + } + + _out: {} + } + +// line 116 "grapheme_clusters.rl" + + + // If we fall out here then we were unable to complete a sequence. + // If we weren't able to complete a sequence then either we've + // reached the end of a partial buffer (so there's more data to come) + // or we have an isolated symbol that would normally be part of a + // grapheme cluster but has appeared in isolation here. + + if !atEOF { + // Request more + return 0, nil, nil + } + + // Just take the first UTF-8 sequence and return that. + _, seqLen := utf8.DecodeRune(data) + return seqLen, data[:seqLen], nil +} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl new file mode 100644 index 00000000000..003ffbf5948 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl @@ -0,0 +1,132 @@ +package textseg + +import ( + "errors" + "unicode/utf8" +) + +// Generated from grapheme_clusters.rl. DO NOT EDIT +%%{ + # (except you are actually in grapheme_clusters.rl here, so edit away!) + + machine graphclust; + write data; +}%% + +var Error = errors.New("invalid UTF8 text") + +// ScanGraphemeClusters is a split function for bufio.Scanner that splits +// on grapheme cluster boundaries. +func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) { + if len(data) == 0 { + return 0, nil, nil + } + + // Ragel state + cs := 0 // Current State + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + act := 0 + eof := pe + + // Make Go compiler happy + _ = ts + _ = te + _ = act + _ = eof + + startPos := 0 + endPos := 0 + + %%{ + include GraphemeCluster "grapheme_clusters_table.rl"; + + action start { + startPos = p + } + + action end { + endPos = p + } + + action emit { + return endPos+1, data[startPos:endPos+1], nil + } + + ZWJGlue = ZWJ (Glue_After_Zwj | E_Base_GAZ Extend* E_Modifier?)?; + AnyExtender = Extend | ZWJGlue | SpacingMark; + Extension = AnyExtender*; + ReplacementChar = (0xEF 0xBF 0xBD); + + CRLFSeq = CR LF; + ControlSeq = Control | ReplacementChar; + HangulSeq = ( + L+ (((LV? V+ | LVT) T*)?|LV?) | + LV V* T* | + V+ T* | + LVT T* | + T+ + ) Extension; + EmojiSeq = (E_Base | E_Base_GAZ) Extend* E_Modifier? Extension; + ZWJSeq = ZWJGlue Extension; + EmojiFlagSeq = Regional_Indicator Regional_Indicator? Extension; + + UTF8Cont = 0x80 .. 0xBF; + AnyUTF8 = ( + 0x00..0x7F | + 0xC0..0xDF . UTF8Cont | + 0xE0..0xEF . UTF8Cont . UTF8Cont | + 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont + ); + + # OtherSeq is any character that isn't at the start of one of the extended sequences above, followed by extension + OtherSeq = (AnyUTF8 - (CR|LF|Control|ReplacementChar|L|LV|V|LVT|T|E_Base|E_Base_GAZ|ZWJ|Regional_Indicator|Prepend)) Extension; + + # PrependSeq is prepend followed by any of the other patterns above, except control characters which explicitly break + PrependSeq = Prepend+ (HangulSeq|EmojiSeq|ZWJSeq|EmojiFlagSeq|OtherSeq)?; + + CRLFTok = CRLFSeq >start @end; + ControlTok = ControlSeq >start @end; + HangulTok = HangulSeq >start @end; + EmojiTok = EmojiSeq >start @end; + ZWJTok = ZWJSeq >start @end; + EmojiFlagTok = EmojiFlagSeq >start @end; + OtherTok = OtherSeq >start @end; + PrependTok = PrependSeq >start @end; + + main := |* + CRLFTok => emit; + ControlTok => emit; + HangulTok => emit; + EmojiTok => emit; + ZWJTok => emit; + EmojiFlagTok => emit; + PrependTok => emit; + OtherTok => emit; + + # any single valid UTF-8 character would also be valid per spec, + # but we'll handle that separately after the loop so we can deal + # with requesting more bytes if we're not at EOF. + *|; + + write init; + write exec; + }%% + + // If we fall out here then we were unable to complete a sequence. + // If we weren't able to complete a sequence then either we've + // reached the end of a partial buffer (so there's more data to come) + // or we have an isolated symbol that would normally be part of a + // grapheme cluster but has appeared in isolation here. + + if !atEOF { + // Request more + return 0, nil, nil + } + + // Just take the first UTF-8 sequence and return that. + _, seqLen := utf8.DecodeRune(data) + return seqLen, data[:seqLen], nil +} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl new file mode 100644 index 00000000000..fb4511825c6 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl @@ -0,0 +1,1583 @@ +# The following Ragel file was autogenerated with unicode2ragel.rb +# from: http://www.unicode.org/Public/9.0.0/ucd/auxiliary/GraphemeBreakProperty.txt +# +# It defines ["Prepend", "CR", "LF", "Control", "Extend", "Regional_Indicator", "SpacingMark", "L", "V", "T", "LV", "LVT", "E_Base", "E_Modifier", "ZWJ", "Glue_After_Zwj", "E_Base_GAZ"]. +# +# To use this, make sure that your alphtype is set to byte, +# and that your input is in utf8. + +%%{ + machine GraphemeCluster; + + Prepend = + 0xD8 0x80..0x85 #Cf [6] ARABIC NUMBER SIGN..ARABIC NUMBER ... + | 0xDB 0x9D #Cf ARABIC END OF AYAH + | 0xDC 0x8F #Cf SYRIAC ABBREVIATION MARK + | 0xE0 0xA3 0xA2 #Cf ARABIC DISPUTED END OF AYAH + | 0xE0 0xB5 0x8E #Lo MALAYALAM LETTER DOT REPH + | 0xF0 0x91 0x82 0xBD #Cf KAITHI NUMBER SIGN + | 0xF0 0x91 0x87 0x82..0x83 #Lo [2] SHARADA SIGN JIHVAMULIYA..SHARA... + ; + + CR = + 0x0D #Cc + ; + + LF = + 0x0A #Cc + ; + + Control = + 0x00..0x09 #Cc [10] .. + | 0x0B..0x0C #Cc [2] .. + | 0x0E..0x1F #Cc [18] .. + | 0x7F #Cc [33] .. + | 0xC2 0x80..0x9F # + | 0xC2 0xAD #Cf SOFT HYPHEN + | 0xD8 0x9C #Cf ARABIC LETTER MARK + | 0xE1 0xA0 0x8E #Cf MONGOLIAN VOWEL SEPARATOR + | 0xE2 0x80 0x8B #Cf ZERO WIDTH SPACE + | 0xE2 0x80 0x8E..0x8F #Cf [2] LEFT-TO-RIGHT MARK..RIGHT-TO-LEFT ... + | 0xE2 0x80 0xA8 #Zl LINE SEPARATOR + | 0xE2 0x80 0xA9 #Zp PARAGRAPH SEPARATOR + | 0xE2 0x80 0xAA..0xAE #Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-... + | 0xE2 0x81 0xA0..0xA4 #Cf [5] WORD JOINER..INVISIBLE PLUS + | 0xE2 0x81 0xA5 #Cn + | 0xE2 0x81 0xA6..0xAF #Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIG... + | 0xED 0xA0 0x80..0xFF #Cs [2048] .... + | 0xEF 0xBF 0xB9..0xBB #Cf [3] INTERLINEAR ANNOTATION ANCHOR..INT... + | 0xF0 0x9B 0xB2 0xA0..0xA3 #Cf [4] SHORTHAND FORMAT LETTER OVERLAP... + | 0xF0 0x9D 0x85 0xB3..0xBA #Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSI... + | 0xF3 0xA0 0x80 0x80 #Cn + | 0xF3 0xA0 0x80 0x81 #Cf LANGUAGE TAG + | 0xF3 0xA0 0x80 0x82..0x9F #Cn [30] .. + | 0xF3 0xA0 0x82 0x80..0xFF #Cn [128] .. + | 0xF3 0xA0 0x83 0x00..0xBF # + | 0xF3 0xA0 0x87 0xB0..0xFF #Cn [3600] .. +# +# This script uses the unicode spec to generate a Ragel state machine +# that recognizes unicode alphanumeric characters. It generates 5 +# character classes: uupper, ulower, ualpha, udigit, and ualnum. +# Currently supported encodings are UTF-8 [default] and UCS-4. +# +# Usage: unicode2ragel.rb [options] +# -e, --encoding [ucs4 | utf8] Data encoding +# -h, --help Show this message +# +# This script was originally written as part of the Ferret search +# engine library. +# +# Author: Rakan El-Khalil + +require 'optparse' +require 'open-uri' + +ENCODINGS = [ :utf8, :ucs4 ] +ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" } +DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt" +DEFAULT_MACHINE_NAME= "WChar" + +### +# Display vars & default option + +TOTAL_WIDTH = 80 +RANGE_WIDTH = 23 +@encoding = :utf8 +@chart_url = DEFAULT_CHART_URL +machine_name = DEFAULT_MACHINE_NAME +properties = [] +@output = $stdout + +### +# Option parsing + +cli_opts = OptionParser.new do |opts| + opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o| + @encoding = o.downcase.to_sym + end + opts.on("-h", "--help", "Show this message") do + puts opts + exit + end + opts.on("-u", "--url URL", "URL to process") do |o| + @chart_url = o + end + opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o| + machine_name = o + end + opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o| + properties = o + end + opts.on("-o", "--output FILE", "output file") do |o| + @output = File.new(o, "w+") + end +end + +cli_opts.parse(ARGV) +unless ENCODINGS.member? @encoding + puts "Invalid encoding: #{@encoding}" + puts cli_opts + exit +end + +## +# Downloads the document at url and yields every alpha line's hex +# range and description. + +def each_alpha( url, property ) + open( url ) do |file| + file.each_line do |line| + next if line =~ /^#/; + next if line !~ /; #{property} #/; + + range, description = line.split(/;/) + range.strip! + description.gsub!(/.*#/, '').strip! + + if range =~ /\.\./ + start, stop = range.split '..' + else start = stop = range + end + + yield start.hex .. stop.hex, description + end + end +end + +### +# Formats to hex at minimum width + +def to_hex( n ) + r = "%0X" % n + r = "0#{r}" unless (r.length % 2).zero? + r +end + +### +# UCS4 is just a straight hex conversion of the unicode codepoint. + +def to_ucs4( range ) + rangestr = "0x" + to_hex(range.begin) + rangestr << "..0x" + to_hex(range.end) if range.begin != range.end + [ rangestr ] +end + +## +# 0x00 - 0x7f -> 0zzzzzzz[7] +# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6] +# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6] +# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6] + +UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff] + +def to_utf8_enc( n ) + r = 0 + if n <= 0x7f + r = n + elsif n <= 0x7ff + y = 0xc0 | (n >> 6) + z = 0x80 | (n & 0x3f) + r = y << 8 | z + elsif n <= 0xffff + x = 0xe0 | (n >> 12) + y = 0x80 | (n >> 6) & 0x3f + z = 0x80 | n & 0x3f + r = x << 16 | y << 8 | z + elsif n <= 0x10ffff + w = 0xf0 | (n >> 18) + x = 0x80 | (n >> 12) & 0x3f + y = 0x80 | (n >> 6) & 0x3f + z = 0x80 | n & 0x3f + r = w << 24 | x << 16 | y << 8 | z + end + + to_hex(r) +end + +def from_utf8_enc( n ) + n = n.hex + r = 0 + if n <= 0x7f + r = n + elsif n <= 0xdfff + y = (n >> 8) & 0x1f + z = n & 0x3f + r = y << 6 | z + elsif n <= 0xefffff + x = (n >> 16) & 0x0f + y = (n >> 8) & 0x3f + z = n & 0x3f + r = x << 10 | y << 6 | z + elsif n <= 0xf7ffffff + w = (n >> 24) & 0x07 + x = (n >> 16) & 0x3f + y = (n >> 8) & 0x3f + z = n & 0x3f + r = w << 18 | x << 12 | y << 6 | z + end + r +end + +### +# Given a range, splits it up into ranges that can be continuously +# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff] +# This is not strictly needed since the current [5.1] unicode standard +# doesn't have ranges that straddle utf8 boundaries. This is included +# for completeness as there is no telling if that will ever change. + +def utf8_ranges( range ) + ranges = [] + UTF8_BOUNDARIES.each do |max| + if range.begin <= max + if range.end <= max + ranges << range + return ranges + end + + ranges << (range.begin .. max) + range = (max + 1) .. range.end + end + end + ranges +end + +def build_range( start, stop ) + size = start.size/2 + left = size - 1 + return [""] if size < 1 + + a = start[0..1] + b = stop[0..1] + + ### + # Shared prefix + + if a == b + return build_range(start[2..-1], stop[2..-1]).map do |elt| + "0x#{a} " + elt + end + end + + ### + # Unshared prefix, end of run + + return ["0x#{a}..0x#{b} "] if left.zero? + + ### + # Unshared prefix, not end of run + # Range can be 0x123456..0x56789A + # Which is equivalent to: + # 0x123456 .. 0x12FFFF + # 0x130000 .. 0x55FFFF + # 0x560000 .. 0x56789A + + ret = [] + ret << build_range(start, a + "FF" * left) + + ### + # Only generate middle range if need be. + + if a.hex+1 != b.hex + max = to_hex(b.hex - 1) + max = "FF" if b == "FF" + ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left + end + + ### + # Don't generate last range if it is covered by first range + + ret << build_range(b + "00" * left, stop) unless b == "FF" + ret.flatten! +end + +def to_utf8( range ) + utf8_ranges( range ).map do |r| + begin_enc = to_utf8_enc(r.begin) + end_enc = to_utf8_enc(r.end) + build_range begin_enc, end_enc + end.flatten! +end + +## +# Perform a 3-way comparison of the number of codepoints advertised by +# the unicode spec for the given range, the originally parsed range, +# and the resulting utf8 encoded range. + +def count_codepoints( code ) + code.split(' ').inject(1) do |acc, elt| + if elt =~ /0x(.+)\.\.0x(.+)/ + if @encoding == :utf8 + acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1) + else + acc * ($2.hex - $1.hex + 1) + end + else + acc + end + end +end + +def is_valid?( range, desc, codes ) + spec_count = 1 + spec_count = $1.to_i if desc =~ /\[(\d+)\]/ + range_count = range.end - range.begin + 1 + + sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) } + sum == spec_count and sum == range_count +end + +## +# Generate the state maching to stdout + +def generate_machine( name, property ) + pipe = " " + @output.puts " #{name} = " + each_alpha( @chart_url, property ) do |range, desc| + + codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range) + + #raise "Invalid encoding of range #{range}: #{codes.inspect}" unless + # is_valid? range, desc, codes + + range_width = codes.map { |a| a.size }.max + range_width = RANGE_WIDTH if range_width < RANGE_WIDTH + + desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11 + desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH + + if desc.size > desc_width + desc = desc[0..desc_width - 4] + "..." + end + + codes.each_with_index do |r, idx| + desc = "" unless idx.zero? + code = "%-#{range_width}s" % r + @output.puts " #{pipe} #{code} ##{desc}" + pipe = "|" + end + end + @output.puts " ;" + @output.puts "" +end + +@output.puts < 0 { + return invalidParams + } + return nil +} + +// SetBudgetLimit sets the BudgetLimit field's value. +func (s *Budget) SetBudgetLimit(v *Spend) *Budget { + s.BudgetLimit = v + return s +} + +// SetBudgetName sets the BudgetName field's value. +func (s *Budget) SetBudgetName(v string) *Budget { + s.BudgetName = &v + return s +} + +// SetBudgetType sets the BudgetType field's value. +func (s *Budget) SetBudgetType(v string) *Budget { + s.BudgetType = &v + return s +} + +// SetCalculatedSpend sets the CalculatedSpend field's value. +func (s *Budget) SetCalculatedSpend(v *CalculatedSpend) *Budget { + s.CalculatedSpend = v + return s +} + +// SetCostFilters sets the CostFilters field's value. +func (s *Budget) SetCostFilters(v map[string][]*string) *Budget { + s.CostFilters = v + return s +} + +// SetCostTypes sets the CostTypes field's value. +func (s *Budget) SetCostTypes(v *CostTypes) *Budget { + s.CostTypes = v + return s +} + +// SetTimePeriod sets the TimePeriod field's value. +func (s *Budget) SetTimePeriod(v *TimePeriod) *Budget { + s.TimePeriod = v + return s +} + +// SetTimeUnit sets the TimeUnit field's value. +func (s *Budget) SetTimeUnit(v string) *Budget { + s.TimeUnit = &v + return s +} + +// A structure that holds the actual and forecasted spend for a budget. +type CalculatedSpend struct { + _ struct{} `type:"structure"` + + // A structure that represents either a cost spend or usage spend. Contains + // an amount and a unit. + // + // ActualSpend is a required field + ActualSpend *Spend `type:"structure" required:"true"` + + // A structure that represents either a cost spend or usage spend. Contains + // an amount and a unit. + ForecastedSpend *Spend `type:"structure"` +} + +// String returns the string representation +func (s CalculatedSpend) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CalculatedSpend) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CalculatedSpend) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CalculatedSpend"} + if s.ActualSpend == nil { + invalidParams.Add(request.NewErrParamRequired("ActualSpend")) + } + if s.ActualSpend != nil { + if err := s.ActualSpend.Validate(); err != nil { + invalidParams.AddNested("ActualSpend", err.(request.ErrInvalidParams)) + } + } + if s.ForecastedSpend != nil { + if err := s.ForecastedSpend.Validate(); err != nil { + invalidParams.AddNested("ForecastedSpend", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActualSpend sets the ActualSpend field's value. +func (s *CalculatedSpend) SetActualSpend(v *Spend) *CalculatedSpend { + s.ActualSpend = v + return s +} + +// SetForecastedSpend sets the ForecastedSpend field's value. +func (s *CalculatedSpend) SetForecastedSpend(v *Spend) *CalculatedSpend { + s.ForecastedSpend = v + return s +} + +// This includes the options for getting the cost of a budget. +type CostTypes struct { + _ struct{} `type:"structure"` + + // A generic boolean value. + // + // IncludeSubscription is a required field + IncludeSubscription *bool `type:"boolean" required:"true"` + + // A generic boolean value. + // + // IncludeTax is a required field + IncludeTax *bool `type:"boolean" required:"true"` + + // A generic boolean value. + // + // UseBlended is a required field + UseBlended *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s CostTypes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CostTypes) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CostTypes) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CostTypes"} + if s.IncludeSubscription == nil { + invalidParams.Add(request.NewErrParamRequired("IncludeSubscription")) + } + if s.IncludeTax == nil { + invalidParams.Add(request.NewErrParamRequired("IncludeTax")) + } + if s.UseBlended == nil { + invalidParams.Add(request.NewErrParamRequired("UseBlended")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIncludeSubscription sets the IncludeSubscription field's value. +func (s *CostTypes) SetIncludeSubscription(v bool) *CostTypes { + s.IncludeSubscription = &v + return s +} + +// SetIncludeTax sets the IncludeTax field's value. +func (s *CostTypes) SetIncludeTax(v bool) *CostTypes { + s.IncludeTax = &v + return s +} + +// SetUseBlended sets the UseBlended field's value. +func (s *CostTypes) SetUseBlended(v bool) *CostTypes { + s.UseBlended = &v + return s +} + +// Request of CreateBudget +type CreateBudgetInput struct { + _ struct{} `type:"structure"` + + // Account Id of the customer. It should be a 12 digit number. + // + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` + + // AWS Budget model + // + // Budget is a required field + Budget *Budget `type:"structure" required:"true"` + + // A list of Notifications, each with a list of subscribers. + NotificationsWithSubscribers []*NotificationWithSubscribers `type:"list"` +} + +// String returns the string representation +func (s CreateBudgetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBudgetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBudgetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBudgetInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) + } + if s.Budget == nil { + invalidParams.Add(request.NewErrParamRequired("Budget")) + } + if s.Budget != nil { + if err := s.Budget.Validate(); err != nil { + invalidParams.AddNested("Budget", err.(request.ErrInvalidParams)) + } + } + if s.NotificationsWithSubscribers != nil { + for i, v := range s.NotificationsWithSubscribers { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NotificationsWithSubscribers", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *CreateBudgetInput) SetAccountId(v string) *CreateBudgetInput { + s.AccountId = &v + return s +} + +// SetBudget sets the Budget field's value. +func (s *CreateBudgetInput) SetBudget(v *Budget) *CreateBudgetInput { + s.Budget = v + return s +} + +// SetNotificationsWithSubscribers sets the NotificationsWithSubscribers field's value. +func (s *CreateBudgetInput) SetNotificationsWithSubscribers(v []*NotificationWithSubscribers) *CreateBudgetInput { + s.NotificationsWithSubscribers = v + return s +} + +// Response of CreateBudget +type CreateBudgetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateBudgetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBudgetOutput) GoString() string { + return s.String() +} + +// Request of CreateNotification +type CreateNotificationInput struct { + _ struct{} `type:"structure"` + + // Account Id of the customer. It should be a 12 digit number. + // + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` + + // A string represents the budget name. No ":" and "\" character is allowed. + // + // BudgetName is a required field + BudgetName *string `type:"string" required:"true"` + + // Notification model. Each budget may contain multiple notifications with different + // settings. + // + // Notification is a required field + Notification *Notification `type:"structure" required:"true"` + + // A list of subscribers. + // + // Subscribers is a required field + Subscribers []*Subscriber `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateNotificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNotificationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateNotificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateNotificationInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) + } + if s.BudgetName == nil { + invalidParams.Add(request.NewErrParamRequired("BudgetName")) + } + if s.Notification == nil { + invalidParams.Add(request.NewErrParamRequired("Notification")) + } + if s.Subscribers == nil { + invalidParams.Add(request.NewErrParamRequired("Subscribers")) + } + if s.Subscribers != nil && len(s.Subscribers) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Subscribers", 1)) + } + if s.Notification != nil { + if err := s.Notification.Validate(); err != nil { + invalidParams.AddNested("Notification", err.(request.ErrInvalidParams)) + } + } + if s.Subscribers != nil { + for i, v := range s.Subscribers { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Subscribers", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *CreateNotificationInput) SetAccountId(v string) *CreateNotificationInput { + s.AccountId = &v + return s +} + +// SetBudgetName sets the BudgetName field's value. +func (s *CreateNotificationInput) SetBudgetName(v string) *CreateNotificationInput { + s.BudgetName = &v + return s +} + +// SetNotification sets the Notification field's value. +func (s *CreateNotificationInput) SetNotification(v *Notification) *CreateNotificationInput { + s.Notification = v + return s +} + +// SetSubscribers sets the Subscribers field's value. +func (s *CreateNotificationInput) SetSubscribers(v []*Subscriber) *CreateNotificationInput { + s.Subscribers = v + return s +} + +// Response of CreateNotification +type CreateNotificationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateNotificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNotificationOutput) GoString() string { + return s.String() +} + +// Request of CreateSubscriber +type CreateSubscriberInput struct { + _ struct{} `type:"structure"` + + // Account Id of the customer. It should be a 12 digit number. + // + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` + + // A string represents the budget name. No ":" and "\" character is allowed. + // + // BudgetName is a required field + BudgetName *string `type:"string" required:"true"` + + // Notification model. Each budget may contain multiple notifications with different + // settings. + // + // Notification is a required field + Notification *Notification `type:"structure" required:"true"` + + // Subscriber model. Each notification may contain multiple subscribers with + // different addresses. + // + // Subscriber is a required field + Subscriber *Subscriber `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateSubscriberInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSubscriberInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSubscriberInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSubscriberInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) + } + if s.BudgetName == nil { + invalidParams.Add(request.NewErrParamRequired("BudgetName")) + } + if s.Notification == nil { + invalidParams.Add(request.NewErrParamRequired("Notification")) + } + if s.Subscriber == nil { + invalidParams.Add(request.NewErrParamRequired("Subscriber")) + } + if s.Notification != nil { + if err := s.Notification.Validate(); err != nil { + invalidParams.AddNested("Notification", err.(request.ErrInvalidParams)) + } + } + if s.Subscriber != nil { + if err := s.Subscriber.Validate(); err != nil { + invalidParams.AddNested("Subscriber", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *CreateSubscriberInput) SetAccountId(v string) *CreateSubscriberInput { + s.AccountId = &v + return s +} + +// SetBudgetName sets the BudgetName field's value. +func (s *CreateSubscriberInput) SetBudgetName(v string) *CreateSubscriberInput { + s.BudgetName = &v + return s +} + +// SetNotification sets the Notification field's value. +func (s *CreateSubscriberInput) SetNotification(v *Notification) *CreateSubscriberInput { + s.Notification = v + return s +} + +// SetSubscriber sets the Subscriber field's value. +func (s *CreateSubscriberInput) SetSubscriber(v *Subscriber) *CreateSubscriberInput { + s.Subscriber = v + return s +} + +// Response of CreateSubscriber +type CreateSubscriberOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateSubscriberOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSubscriberOutput) GoString() string { + return s.String() +} + +// Request of DeleteBudget +type DeleteBudgetInput struct { + _ struct{} `type:"structure"` + + // Account Id of the customer. It should be a 12 digit number. + // + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` + + // A string represents the budget name. No ":" and "\" character is allowed. + // + // BudgetName is a required field + BudgetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBudgetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBudgetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBudgetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBudgetInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) + } + if s.BudgetName == nil { + invalidParams.Add(request.NewErrParamRequired("BudgetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *DeleteBudgetInput) SetAccountId(v string) *DeleteBudgetInput { + s.AccountId = &v + return s +} + +// SetBudgetName sets the BudgetName field's value. +func (s *DeleteBudgetInput) SetBudgetName(v string) *DeleteBudgetInput { + s.BudgetName = &v + return s +} + +// Response of DeleteBudget +type DeleteBudgetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBudgetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBudgetOutput) GoString() string { + return s.String() +} + +// Request of DeleteNotification +type DeleteNotificationInput struct { + _ struct{} `type:"structure"` + + // Account Id of the customer. It should be a 12 digit number. + // + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` + + // A string represents the budget name. No ":" and "\" character is allowed. + // + // BudgetName is a required field + BudgetName *string `type:"string" required:"true"` + + // Notification model. Each budget may contain multiple notifications with different + // settings. + // + // Notification is a required field + Notification *Notification `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteNotificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNotificationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteNotificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteNotificationInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) + } + if s.BudgetName == nil { + invalidParams.Add(request.NewErrParamRequired("BudgetName")) + } + if s.Notification == nil { + invalidParams.Add(request.NewErrParamRequired("Notification")) + } + if s.Notification != nil { + if err := s.Notification.Validate(); err != nil { + invalidParams.AddNested("Notification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *DeleteNotificationInput) SetAccountId(v string) *DeleteNotificationInput { + s.AccountId = &v + return s +} + +// SetBudgetName sets the BudgetName field's value. +func (s *DeleteNotificationInput) SetBudgetName(v string) *DeleteNotificationInput { + s.BudgetName = &v + return s +} + +// SetNotification sets the Notification field's value. +func (s *DeleteNotificationInput) SetNotification(v *Notification) *DeleteNotificationInput { + s.Notification = v + return s +} + +// Response of DeleteNotification +type DeleteNotificationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteNotificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNotificationOutput) GoString() string { + return s.String() +} + +// Request of DeleteSubscriber +type DeleteSubscriberInput struct { + _ struct{} `type:"structure"` + + // Account Id of the customer. It should be a 12 digit number. + // + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` + + // A string represents the budget name. No ":" and "\" character is allowed. + // + // BudgetName is a required field + BudgetName *string `type:"string" required:"true"` + + // Notification model. Each budget may contain multiple notifications with different + // settings. + // + // Notification is a required field + Notification *Notification `type:"structure" required:"true"` + + // Subscriber model. Each notification may contain multiple subscribers with + // different addresses. + // + // Subscriber is a required field + Subscriber *Subscriber `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteSubscriberInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubscriberInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSubscriberInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSubscriberInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) + } + if s.BudgetName == nil { + invalidParams.Add(request.NewErrParamRequired("BudgetName")) + } + if s.Notification == nil { + invalidParams.Add(request.NewErrParamRequired("Notification")) + } + if s.Subscriber == nil { + invalidParams.Add(request.NewErrParamRequired("Subscriber")) + } + if s.Notification != nil { + if err := s.Notification.Validate(); err != nil { + invalidParams.AddNested("Notification", err.(request.ErrInvalidParams)) + } + } + if s.Subscriber != nil { + if err := s.Subscriber.Validate(); err != nil { + invalidParams.AddNested("Subscriber", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *DeleteSubscriberInput) SetAccountId(v string) *DeleteSubscriberInput { + s.AccountId = &v + return s +} + +// SetBudgetName sets the BudgetName field's value. +func (s *DeleteSubscriberInput) SetBudgetName(v string) *DeleteSubscriberInput { + s.BudgetName = &v + return s +} + +// SetNotification sets the Notification field's value. +func (s *DeleteSubscriberInput) SetNotification(v *Notification) *DeleteSubscriberInput { + s.Notification = v + return s +} + +// SetSubscriber sets the Subscriber field's value. +func (s *DeleteSubscriberInput) SetSubscriber(v *Subscriber) *DeleteSubscriberInput { + s.Subscriber = v + return s +} + +// Response of DeleteSubscriber +type DeleteSubscriberOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSubscriberOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubscriberOutput) GoString() string { + return s.String() +} + +// Request of DescribeBudget +type DescribeBudgetInput struct { + _ struct{} `type:"structure"` + + // Account Id of the customer. It should be a 12 digit number. + // + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` + + // A string represents the budget name. No ":" and "\" character is allowed. + // + // BudgetName is a required field + BudgetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeBudgetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBudgetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeBudgetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeBudgetInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) + } + if s.BudgetName == nil { + invalidParams.Add(request.NewErrParamRequired("BudgetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *DescribeBudgetInput) SetAccountId(v string) *DescribeBudgetInput { + s.AccountId = &v + return s +} + +// SetBudgetName sets the BudgetName field's value. +func (s *DescribeBudgetInput) SetBudgetName(v string) *DescribeBudgetInput { + s.BudgetName = &v + return s +} + +// Response of DescribeBudget +type DescribeBudgetOutput struct { + _ struct{} `type:"structure"` + + // AWS Budget model + Budget *Budget `type:"structure"` +} + +// String returns the string representation +func (s DescribeBudgetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBudgetOutput) GoString() string { + return s.String() +} + +// SetBudget sets the Budget field's value. +func (s *DescribeBudgetOutput) SetBudget(v *Budget) *DescribeBudgetOutput { + s.Budget = v + return s +} + +// Request of DescribeBudgets +type DescribeBudgetsInput struct { + _ struct{} `type:"structure"` + + // Account Id of the customer. It should be a 12 digit number. + // + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` + + // An integer to represent how many entries a paginated response contains. Maximum + // is set to 100. + MaxResults *int64 `min:"1" type:"integer"` + + // A generic String. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeBudgetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBudgetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeBudgetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeBudgetsInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *DescribeBudgetsInput) SetAccountId(v string) *DescribeBudgetsInput { + s.AccountId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeBudgetsInput) SetMaxResults(v int64) *DescribeBudgetsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeBudgetsInput) SetNextToken(v string) *DescribeBudgetsInput { + s.NextToken = &v + return s +} + +// Response of DescribeBudgets +type DescribeBudgetsOutput struct { + _ struct{} `type:"structure"` + + // A list of budgets + Budgets []*Budget `type:"list"` + + // A generic String. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeBudgetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBudgetsOutput) GoString() string { + return s.String() +} + +// SetBudgets sets the Budgets field's value. +func (s *DescribeBudgetsOutput) SetBudgets(v []*Budget) *DescribeBudgetsOutput { + s.Budgets = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeBudgetsOutput) SetNextToken(v string) *DescribeBudgetsOutput { + s.NextToken = &v + return s +} + +// Request of DescribeNotificationsForBudget +type DescribeNotificationsForBudgetInput struct { + _ struct{} `type:"structure"` + + // Account Id of the customer. It should be a 12 digit number. + // + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` + + // A string represents the budget name. No ":" and "\" character is allowed. + // + // BudgetName is a required field + BudgetName *string `type:"string" required:"true"` + + // An integer to represent how many entries a paginated response contains. Maximum + // is set to 100. + MaxResults *int64 `min:"1" type:"integer"` + + // A generic String. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeNotificationsForBudgetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNotificationsForBudgetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeNotificationsForBudgetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeNotificationsForBudgetInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) + } + if s.BudgetName == nil { + invalidParams.Add(request.NewErrParamRequired("BudgetName")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *DescribeNotificationsForBudgetInput) SetAccountId(v string) *DescribeNotificationsForBudgetInput { + s.AccountId = &v + return s +} + +// SetBudgetName sets the BudgetName field's value. +func (s *DescribeNotificationsForBudgetInput) SetBudgetName(v string) *DescribeNotificationsForBudgetInput { + s.BudgetName = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeNotificationsForBudgetInput) SetMaxResults(v int64) *DescribeNotificationsForBudgetInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeNotificationsForBudgetInput) SetNextToken(v string) *DescribeNotificationsForBudgetInput { + s.NextToken = &v + return s +} + +// Response of GetNotificationsForBudget +type DescribeNotificationsForBudgetOutput struct { + _ struct{} `type:"structure"` + + // A generic String. + NextToken *string `type:"string"` + + // A list of notifications. + Notifications []*Notification `type:"list"` +} + +// String returns the string representation +func (s DescribeNotificationsForBudgetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNotificationsForBudgetOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeNotificationsForBudgetOutput) SetNextToken(v string) *DescribeNotificationsForBudgetOutput { + s.NextToken = &v + return s +} + +// SetNotifications sets the Notifications field's value. +func (s *DescribeNotificationsForBudgetOutput) SetNotifications(v []*Notification) *DescribeNotificationsForBudgetOutput { + s.Notifications = v + return s +} + +// Request of DescribeSubscribersForNotification +type DescribeSubscribersForNotificationInput struct { + _ struct{} `type:"structure"` + + // Account Id of the customer. It should be a 12 digit number. + // + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` + + // A string represents the budget name. No ":" and "\" character is allowed. + // + // BudgetName is a required field + BudgetName *string `type:"string" required:"true"` + + // An integer to represent how many entries a paginated response contains. Maximum + // is set to 100. + MaxResults *int64 `min:"1" type:"integer"` + + // A generic String. + NextToken *string `type:"string"` + + // Notification model. Each budget may contain multiple notifications with different + // settings. + // + // Notification is a required field + Notification *Notification `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeSubscribersForNotificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubscribersForNotificationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSubscribersForNotificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSubscribersForNotificationInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) + } + if s.BudgetName == nil { + invalidParams.Add(request.NewErrParamRequired("BudgetName")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Notification == nil { + invalidParams.Add(request.NewErrParamRequired("Notification")) + } + if s.Notification != nil { + if err := s.Notification.Validate(); err != nil { + invalidParams.AddNested("Notification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *DescribeSubscribersForNotificationInput) SetAccountId(v string) *DescribeSubscribersForNotificationInput { + s.AccountId = &v + return s +} + +// SetBudgetName sets the BudgetName field's value. +func (s *DescribeSubscribersForNotificationInput) SetBudgetName(v string) *DescribeSubscribersForNotificationInput { + s.BudgetName = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeSubscribersForNotificationInput) SetMaxResults(v int64) *DescribeSubscribersForNotificationInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSubscribersForNotificationInput) SetNextToken(v string) *DescribeSubscribersForNotificationInput { + s.NextToken = &v + return s +} + +// SetNotification sets the Notification field's value. +func (s *DescribeSubscribersForNotificationInput) SetNotification(v *Notification) *DescribeSubscribersForNotificationInput { + s.Notification = v + return s +} + +// Response of DescribeSubscribersForNotification +type DescribeSubscribersForNotificationOutput struct { + _ struct{} `type:"structure"` + + // A generic String. + NextToken *string `type:"string"` + + // A list of subscribers. + Subscribers []*Subscriber `min:"1" type:"list"` +} + +// String returns the string representation +func (s DescribeSubscribersForNotificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubscribersForNotificationOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSubscribersForNotificationOutput) SetNextToken(v string) *DescribeSubscribersForNotificationOutput { + s.NextToken = &v + return s +} + +// SetSubscribers sets the Subscribers field's value. +func (s *DescribeSubscribersForNotificationOutput) SetSubscribers(v []*Subscriber) *DescribeSubscribersForNotificationOutput { + s.Subscribers = v + return s +} + +// Notification model. Each budget may contain multiple notifications with different +// settings. +type Notification struct { + _ struct{} `type:"structure"` + + // The comparison operator of a notification. Currently we support less than, + // equal to and greater than. + // + // ComparisonOperator is a required field + ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperator"` + + // The type of a notification. It should be ACTUAL or FORECASTED. + // + // NotificationType is a required field + NotificationType *string `type:"string" required:"true" enum:"NotificationType"` + + // The threshold of a notification. It should be a number between 0 and 1,000,000,000. + // + // Threshold is a required field + Threshold *float64 `min:"0.1" type:"double" required:"true"` + + // The type of threshold for a notification. It can be PERCENTAGE or ABSOLUTE_VALUE. + ThresholdType *string `type:"string" enum:"ThresholdType"` +} + +// String returns the string representation +func (s Notification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Notification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Notification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Notification"} + if s.ComparisonOperator == nil { + invalidParams.Add(request.NewErrParamRequired("ComparisonOperator")) + } + if s.NotificationType == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationType")) + } + if s.Threshold == nil { + invalidParams.Add(request.NewErrParamRequired("Threshold")) + } + if s.Threshold != nil && *s.Threshold < 0.1 { + invalidParams.Add(request.NewErrParamMinValue("Threshold", 0.1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetComparisonOperator sets the ComparisonOperator field's value. +func (s *Notification) SetComparisonOperator(v string) *Notification { + s.ComparisonOperator = &v + return s +} + +// SetNotificationType sets the NotificationType field's value. +func (s *Notification) SetNotificationType(v string) *Notification { + s.NotificationType = &v + return s +} + +// SetThreshold sets the Threshold field's value. +func (s *Notification) SetThreshold(v float64) *Notification { + s.Threshold = &v + return s +} + +// SetThresholdType sets the ThresholdType field's value. +func (s *Notification) SetThresholdType(v string) *Notification { + s.ThresholdType = &v + return s +} + +// A structure to relate notification and a list of subscribers who belong to +// the notification. +type NotificationWithSubscribers struct { + _ struct{} `type:"structure"` + + // Notification model. Each budget may contain multiple notifications with different + // settings. + // + // Notification is a required field + Notification *Notification `type:"structure" required:"true"` + + // A list of subscribers. + // + // Subscribers is a required field + Subscribers []*Subscriber `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s NotificationWithSubscribers) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationWithSubscribers) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NotificationWithSubscribers) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NotificationWithSubscribers"} + if s.Notification == nil { + invalidParams.Add(request.NewErrParamRequired("Notification")) + } + if s.Subscribers == nil { + invalidParams.Add(request.NewErrParamRequired("Subscribers")) + } + if s.Subscribers != nil && len(s.Subscribers) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Subscribers", 1)) + } + if s.Notification != nil { + if err := s.Notification.Validate(); err != nil { + invalidParams.AddNested("Notification", err.(request.ErrInvalidParams)) + } + } + if s.Subscribers != nil { + for i, v := range s.Subscribers { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Subscribers", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNotification sets the Notification field's value. +func (s *NotificationWithSubscribers) SetNotification(v *Notification) *NotificationWithSubscribers { + s.Notification = v + return s +} + +// SetSubscribers sets the Subscribers field's value. +func (s *NotificationWithSubscribers) SetSubscribers(v []*Subscriber) *NotificationWithSubscribers { + s.Subscribers = v + return s +} + +// A structure that represents either a cost spend or usage spend. Contains +// an amount and a unit. +type Spend struct { + _ struct{} `type:"structure"` + + // A string to represent NumericValue. + // + // Amount is a required field + Amount *string `type:"string" required:"true"` + + // A string to represent budget spend unit. It should be not null and not empty. + // + // Unit is a required field + Unit *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Spend) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Spend) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Spend) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Spend"} + if s.Amount == nil { + invalidParams.Add(request.NewErrParamRequired("Amount")) + } + if s.Unit == nil { + invalidParams.Add(request.NewErrParamRequired("Unit")) + } + if s.Unit != nil && len(*s.Unit) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Unit", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAmount sets the Amount field's value. +func (s *Spend) SetAmount(v string) *Spend { + s.Amount = &v + return s +} + +// SetUnit sets the Unit field's value. +func (s *Spend) SetUnit(v string) *Spend { + s.Unit = &v + return s +} + +// Subscriber model. Each notification may contain multiple subscribers with +// different addresses. +type Subscriber struct { + _ struct{} `type:"structure"` + + // A generic String. + // + // Address is a required field + Address *string `type:"string" required:"true"` + + // The subscription type of the subscriber. It can be SMS or EMAIL. + // + // SubscriptionType is a required field + SubscriptionType *string `type:"string" required:"true" enum:"SubscriptionType"` +} + +// String returns the string representation +func (s Subscriber) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Subscriber) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Subscriber) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Subscriber"} + if s.Address == nil { + invalidParams.Add(request.NewErrParamRequired("Address")) + } + if s.SubscriptionType == nil { + invalidParams.Add(request.NewErrParamRequired("SubscriptionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddress sets the Address field's value. +func (s *Subscriber) SetAddress(v string) *Subscriber { + s.Address = &v + return s +} + +// SetSubscriptionType sets the SubscriptionType field's value. +func (s *Subscriber) SetSubscriptionType(v string) *Subscriber { + s.SubscriptionType = &v + return s +} + +// A time period indicating the start date and end date of a budget. +type TimePeriod struct { + _ struct{} `type:"structure"` + + // A generic timestamp. In Java it is transformed to a Date object. + // + // End is a required field + End *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // A generic timestamp. In Java it is transformed to a Date object. + // + // Start is a required field + Start *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` +} + +// String returns the string representation +func (s TimePeriod) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimePeriod) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TimePeriod) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TimePeriod"} + if s.End == nil { + invalidParams.Add(request.NewErrParamRequired("End")) + } + if s.Start == nil { + invalidParams.Add(request.NewErrParamRequired("Start")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEnd sets the End field's value. +func (s *TimePeriod) SetEnd(v time.Time) *TimePeriod { + s.End = &v + return s +} + +// SetStart sets the Start field's value. +func (s *TimePeriod) SetStart(v time.Time) *TimePeriod { + s.Start = &v + return s +} + +// Request of UpdateBudget +type UpdateBudgetInput struct { + _ struct{} `type:"structure"` + + // Account Id of the customer. It should be a 12 digit number. + // + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` + + // AWS Budget model + // + // NewBudget is a required field + NewBudget *Budget `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateBudgetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBudgetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateBudgetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateBudgetInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) + } + if s.NewBudget == nil { + invalidParams.Add(request.NewErrParamRequired("NewBudget")) + } + if s.NewBudget != nil { + if err := s.NewBudget.Validate(); err != nil { + invalidParams.AddNested("NewBudget", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *UpdateBudgetInput) SetAccountId(v string) *UpdateBudgetInput { + s.AccountId = &v + return s +} + +// SetNewBudget sets the NewBudget field's value. +func (s *UpdateBudgetInput) SetNewBudget(v *Budget) *UpdateBudgetInput { + s.NewBudget = v + return s +} + +// Response of UpdateBudget +type UpdateBudgetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateBudgetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBudgetOutput) GoString() string { + return s.String() +} + +// Request of UpdateNotification +type UpdateNotificationInput struct { + _ struct{} `type:"structure"` + + // Account Id of the customer. It should be a 12 digit number. + // + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` + + // A string represents the budget name. No ":" and "\" character is allowed. + // + // BudgetName is a required field + BudgetName *string `type:"string" required:"true"` + + // Notification model. Each budget may contain multiple notifications with different + // settings. + // + // NewNotification is a required field + NewNotification *Notification `type:"structure" required:"true"` + + // Notification model. Each budget may contain multiple notifications with different + // settings. + // + // OldNotification is a required field + OldNotification *Notification `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateNotificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateNotificationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateNotificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateNotificationInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) + } + if s.BudgetName == nil { + invalidParams.Add(request.NewErrParamRequired("BudgetName")) + } + if s.NewNotification == nil { + invalidParams.Add(request.NewErrParamRequired("NewNotification")) + } + if s.OldNotification == nil { + invalidParams.Add(request.NewErrParamRequired("OldNotification")) + } + if s.NewNotification != nil { + if err := s.NewNotification.Validate(); err != nil { + invalidParams.AddNested("NewNotification", err.(request.ErrInvalidParams)) + } + } + if s.OldNotification != nil { + if err := s.OldNotification.Validate(); err != nil { + invalidParams.AddNested("OldNotification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *UpdateNotificationInput) SetAccountId(v string) *UpdateNotificationInput { + s.AccountId = &v + return s +} + +// SetBudgetName sets the BudgetName field's value. +func (s *UpdateNotificationInput) SetBudgetName(v string) *UpdateNotificationInput { + s.BudgetName = &v + return s +} + +// SetNewNotification sets the NewNotification field's value. +func (s *UpdateNotificationInput) SetNewNotification(v *Notification) *UpdateNotificationInput { + s.NewNotification = v + return s +} + +// SetOldNotification sets the OldNotification field's value. +func (s *UpdateNotificationInput) SetOldNotification(v *Notification) *UpdateNotificationInput { + s.OldNotification = v + return s +} + +// Response of UpdateNotification +type UpdateNotificationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateNotificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateNotificationOutput) GoString() string { + return s.String() +} + +// Request of UpdateSubscriber +type UpdateSubscriberInput struct { + _ struct{} `type:"structure"` + + // Account Id of the customer. It should be a 12 digit number. + // + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` + + // A string represents the budget name. No ":" and "\" character is allowed. + // + // BudgetName is a required field + BudgetName *string `type:"string" required:"true"` + + // Subscriber model. Each notification may contain multiple subscribers with + // different addresses. + // + // NewSubscriber is a required field + NewSubscriber *Subscriber `type:"structure" required:"true"` + + // Notification model. Each budget may contain multiple notifications with different + // settings. + // + // Notification is a required field + Notification *Notification `type:"structure" required:"true"` + + // Subscriber model. Each notification may contain multiple subscribers with + // different addresses. + // + // OldSubscriber is a required field + OldSubscriber *Subscriber `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateSubscriberInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSubscriberInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateSubscriberInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateSubscriberInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) + } + if s.BudgetName == nil { + invalidParams.Add(request.NewErrParamRequired("BudgetName")) + } + if s.NewSubscriber == nil { + invalidParams.Add(request.NewErrParamRequired("NewSubscriber")) + } + if s.Notification == nil { + invalidParams.Add(request.NewErrParamRequired("Notification")) + } + if s.OldSubscriber == nil { + invalidParams.Add(request.NewErrParamRequired("OldSubscriber")) + } + if s.NewSubscriber != nil { + if err := s.NewSubscriber.Validate(); err != nil { + invalidParams.AddNested("NewSubscriber", err.(request.ErrInvalidParams)) + } + } + if s.Notification != nil { + if err := s.Notification.Validate(); err != nil { + invalidParams.AddNested("Notification", err.(request.ErrInvalidParams)) + } + } + if s.OldSubscriber != nil { + if err := s.OldSubscriber.Validate(); err != nil { + invalidParams.AddNested("OldSubscriber", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *UpdateSubscriberInput) SetAccountId(v string) *UpdateSubscriberInput { + s.AccountId = &v + return s +} + +// SetBudgetName sets the BudgetName field's value. +func (s *UpdateSubscriberInput) SetBudgetName(v string) *UpdateSubscriberInput { + s.BudgetName = &v + return s +} + +// SetNewSubscriber sets the NewSubscriber field's value. +func (s *UpdateSubscriberInput) SetNewSubscriber(v *Subscriber) *UpdateSubscriberInput { + s.NewSubscriber = v + return s +} + +// SetNotification sets the Notification field's value. +func (s *UpdateSubscriberInput) SetNotification(v *Notification) *UpdateSubscriberInput { + s.Notification = v + return s +} + +// SetOldSubscriber sets the OldSubscriber field's value. +func (s *UpdateSubscriberInput) SetOldSubscriber(v *Subscriber) *UpdateSubscriberInput { + s.OldSubscriber = v + return s +} + +// Response of UpdateSubscriber +type UpdateSubscriberOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateSubscriberOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSubscriberOutput) GoString() string { + return s.String() +} + +// The type of a budget. It should be COST, USAGE, or RI_UTILIZATION. +const ( + // BudgetTypeUsage is a BudgetType enum value + BudgetTypeUsage = "USAGE" + + // BudgetTypeCost is a BudgetType enum value + BudgetTypeCost = "COST" + + // BudgetTypeRiUtilization is a BudgetType enum value + BudgetTypeRiUtilization = "RI_UTILIZATION" +) + +// The comparison operator of a notification. Currently we support less than, +// equal to and greater than. +const ( + // ComparisonOperatorGreaterThan is a ComparisonOperator enum value + ComparisonOperatorGreaterThan = "GREATER_THAN" + + // ComparisonOperatorLessThan is a ComparisonOperator enum value + ComparisonOperatorLessThan = "LESS_THAN" + + // ComparisonOperatorEqualTo is a ComparisonOperator enum value + ComparisonOperatorEqualTo = "EQUAL_TO" +) + +// The type of a notification. It should be ACTUAL or FORECASTED. +const ( + // NotificationTypeActual is a NotificationType enum value + NotificationTypeActual = "ACTUAL" + + // NotificationTypeForecasted is a NotificationType enum value + NotificationTypeForecasted = "FORECASTED" +) + +// The subscription type of the subscriber. It can be SMS or EMAIL. +const ( + // SubscriptionTypeSns is a SubscriptionType enum value + SubscriptionTypeSns = "SNS" + + // SubscriptionTypeEmail is a SubscriptionType enum value + SubscriptionTypeEmail = "EMAIL" +) + +// The type of threshold for a notification. It can be PERCENTAGE or ABSOLUTE_VALUE. +const ( + // ThresholdTypePercentage is a ThresholdType enum value + ThresholdTypePercentage = "PERCENTAGE" + + // ThresholdTypeAbsoluteValue is a ThresholdType enum value + ThresholdTypeAbsoluteValue = "ABSOLUTE_VALUE" +) + +// The time unit of the budget. e.g. MONTHLY, QUARTERLY, etc. +const ( + // TimeUnitDaily is a TimeUnit enum value + TimeUnitDaily = "DAILY" + + // TimeUnitMonthly is a TimeUnit enum value + TimeUnitMonthly = "MONTHLY" + + // TimeUnitQuarterly is a TimeUnit enum value + TimeUnitQuarterly = "QUARTERLY" + + // TimeUnitAnnually is a TimeUnit enum value + TimeUnitAnnually = "ANNUALLY" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/budgets/doc.go b/vendor/github.com/aws/aws-sdk-go/service/budgets/doc.go new file mode 100644 index 00000000000..84f000c4f8b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/budgets/doc.go @@ -0,0 +1,26 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package budgets provides the client and types for making API +// requests to AWS Budgets. +// +// All public APIs for AWS Budgets +// +// See budgets package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/budgets/ +// +// Using the Client +// +// To AWS Budgets with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Budgets client Budgets for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/budgets/#New +package budgets diff --git a/vendor/github.com/aws/aws-sdk-go/service/budgets/errors.go b/vendor/github.com/aws/aws-sdk-go/service/budgets/errors.go new file mode 100644 index 00000000000..8a0366d6a2e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/budgets/errors.go @@ -0,0 +1,53 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package budgets + +const ( + + // ErrCodeCreationLimitExceededException for service response error code + // "CreationLimitExceededException". + // + // The exception is thrown when customer tries to create a record (e.g. budget), + // but the number this record already exceeds the limitation. + ErrCodeCreationLimitExceededException = "CreationLimitExceededException" + + // ErrCodeDuplicateRecordException for service response error code + // "DuplicateRecordException". + // + // The exception is thrown when customer tries to create a record (e.g. budget) + // that already exists. + ErrCodeDuplicateRecordException = "DuplicateRecordException" + + // ErrCodeExpiredNextTokenException for service response error code + // "ExpiredNextTokenException". + // + // This exception is thrown if the paging token is expired - past its TTL + ErrCodeExpiredNextTokenException = "ExpiredNextTokenException" + + // ErrCodeInternalErrorException for service response error code + // "InternalErrorException". + // + // This exception is thrown on an unknown internal failure. + ErrCodeInternalErrorException = "InternalErrorException" + + // ErrCodeInvalidNextTokenException for service response error code + // "InvalidNextTokenException". + // + // This exception is thrown if paging token signature didn't match the token, + // or the paging token isn't for this request + ErrCodeInvalidNextTokenException = "InvalidNextTokenException" + + // ErrCodeInvalidParameterException for service response error code + // "InvalidParameterException". + // + // This exception is thrown if any request is given an invalid parameter. E.g., + // if a required Date field is null. + ErrCodeInvalidParameterException = "InvalidParameterException" + + // ErrCodeNotFoundException for service response error code + // "NotFoundException". + // + // This exception is thrown if a requested entity is not found. E.g., if a budget + // id doesn't exist for an account ID. + ErrCodeNotFoundException = "NotFoundException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/budgets/service.go b/vendor/github.com/aws/aws-sdk-go/service/budgets/service.go new file mode 100644 index 00000000000..d74d9c96a55 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/budgets/service.go @@ -0,0 +1,95 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package budgets + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// Budgets provides the API operation methods for making requests to +// AWS Budgets. See this package's package overview docs +// for details on the service. +// +// Budgets methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type Budgets struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "budgets" // Service endpoint prefix API calls made to. + EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. +) + +// New creates a new instance of the Budgets client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Budgets client from just a session. +// svc := budgets.New(mySession) +// +// // Create a Budgets client with additional configuration +// svc := budgets.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Budgets { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Budgets { + svc := &Budgets{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2016-10-20", + JSONVersion: "1.1", + TargetPrefix: "AWSBudgetServiceGateway", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Budgets operation and runs any +// custom request initialization. +func (c *Budgets) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go index 8e802dc16bf..b44abab8c9e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go @@ -90,7 +90,7 @@ func (c *CloudFront) CreateCloudFrontOriginAccessIdentityRequest(input *CreateCl // The argument is invalid. // // * ErrCodeInconsistentQuantities "InconsistentQuantities" -// The value of Quantity and the size of Items do not match. +// The value of Quantity and the size of Items don't match. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2017-03-25/CreateCloudFrontOriginAccessIdentity func (c *CloudFront) CreateCloudFrontOriginAccessIdentity(input *CreateCloudFrontOriginAccessIdentityInput) (*CreateCloudFrontOriginAccessIdentityOutput, error) { @@ -189,7 +189,7 @@ func (c *CloudFront) CreateDistributionRequest(input *CreateDistributionInput) ( // Your request contains more trusted signers than are allowed per distribution. // // * ErrCodeTrustedSignerDoesNotExist "TrustedSignerDoesNotExist" -// One or more of your trusted signers do not exist. +// One or more of your trusted signers don't exist. // // * ErrCodeInvalidViewerCertificate "InvalidViewerCertificate" // @@ -249,7 +249,7 @@ func (c *CloudFront) CreateDistributionRequest(input *CreateDistributionInput) ( // * ErrCodeInvalidHeadersForS3Origin "InvalidHeadersForS3Origin" // // * ErrCodeInconsistentQuantities "InconsistentQuantities" -// The value of Quantity and the size of Items do not match. +// The value of Quantity and the size of Items don't match. // // * ErrCodeTooManyCertificates "TooManyCertificates" // You cannot create anymore custom SSL/TLS certificates. @@ -383,7 +383,7 @@ func (c *CloudFront) CreateDistributionWithTagsRequest(input *CreateDistribution // Your request contains more trusted signers than are allowed per distribution. // // * ErrCodeTrustedSignerDoesNotExist "TrustedSignerDoesNotExist" -// One or more of your trusted signers do not exist. +// One or more of your trusted signers don't exist. // // * ErrCodeInvalidViewerCertificate "InvalidViewerCertificate" // @@ -443,7 +443,7 @@ func (c *CloudFront) CreateDistributionWithTagsRequest(input *CreateDistribution // * ErrCodeInvalidHeadersForS3Origin "InvalidHeadersForS3Origin" // // * ErrCodeInconsistentQuantities "InconsistentQuantities" -// The value of Quantity and the size of Items do not match. +// The value of Quantity and the size of Items don't match. // // * ErrCodeTooManyCertificates "TooManyCertificates" // You cannot create anymore custom SSL/TLS certificates. @@ -579,7 +579,7 @@ func (c *CloudFront) CreateInvalidationRequest(input *CreateInvalidationInput) ( // batch requests, or invalidation objects. // // * ErrCodeInconsistentQuantities "InconsistentQuantities" -// The value of Quantity and the size of Items do not match. +// The value of Quantity and the size of Items don't match. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2017-03-25/CreateInvalidation func (c *CloudFront) CreateInvalidation(input *CreateInvalidationInput) (*CreateInvalidationOutput, error) { @@ -702,7 +702,7 @@ func (c *CloudFront) CreateStreamingDistributionRequest(input *CreateStreamingDi // Your request contains more trusted signers than are allowed per distribution. // // * ErrCodeTrustedSignerDoesNotExist "TrustedSignerDoesNotExist" -// One or more of your trusted signers do not exist. +// One or more of your trusted signers don't exist. // // * ErrCodeMissingBody "MissingBody" // This operation requires a body. Ensure that the body is present and the Content-Type @@ -718,7 +718,7 @@ func (c *CloudFront) CreateStreamingDistributionRequest(input *CreateStreamingDi // The argument is invalid. // // * ErrCodeInconsistentQuantities "InconsistentQuantities" -// The value of Quantity and the size of Items do not match. +// The value of Quantity and the size of Items don't match. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2017-03-25/CreateStreamingDistribution func (c *CloudFront) CreateStreamingDistribution(input *CreateStreamingDistributionInput) (*CreateStreamingDistributionOutput, error) { @@ -814,7 +814,7 @@ func (c *CloudFront) CreateStreamingDistributionWithTagsRequest(input *CreateStr // Your request contains more trusted signers than are allowed per distribution. // // * ErrCodeTrustedSignerDoesNotExist "TrustedSignerDoesNotExist" -// One or more of your trusted signers do not exist. +// One or more of your trusted signers don't exist. // // * ErrCodeMissingBody "MissingBody" // This operation requires a body. Ensure that the body is present and the Content-Type @@ -830,7 +830,7 @@ func (c *CloudFront) CreateStreamingDistributionWithTagsRequest(input *CreateStr // The argument is invalid. // // * ErrCodeInconsistentQuantities "InconsistentQuantities" -// The value of Quantity and the size of Items do not match. +// The value of Quantity and the size of Items don't match. // // * ErrCodeInvalidTagging "InvalidTagging" // @@ -1042,6 +1042,92 @@ func (c *CloudFront) DeleteDistributionWithContext(ctx aws.Context, input *Delet return out, req.Send() } +const opDeleteServiceLinkedRole = "DeleteServiceLinkedRole2017_03_25" + +// DeleteServiceLinkedRoleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteServiceLinkedRole operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteServiceLinkedRole for more information on using the DeleteServiceLinkedRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteServiceLinkedRoleRequest method. +// req, resp := client.DeleteServiceLinkedRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2017-03-25/DeleteServiceLinkedRole +func (c *CloudFront) DeleteServiceLinkedRoleRequest(input *DeleteServiceLinkedRoleInput) (req *request.Request, output *DeleteServiceLinkedRoleOutput) { + op := &request.Operation{ + Name: opDeleteServiceLinkedRole, + HTTPMethod: "DELETE", + HTTPPath: "/2017-03-25/service-linked-role/{RoleName}", + } + + if input == nil { + input = &DeleteServiceLinkedRoleInput{} + } + + output = &DeleteServiceLinkedRoleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteServiceLinkedRole API operation for Amazon CloudFront. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudFront's +// API operation DeleteServiceLinkedRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidArgument "InvalidArgument" +// The argument is invalid. +// +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. +// +// * ErrCodeResourceInUse "ResourceInUse" +// +// * ErrCodeNoSuchResource "NoSuchResource" +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2017-03-25/DeleteServiceLinkedRole +func (c *CloudFront) DeleteServiceLinkedRole(input *DeleteServiceLinkedRoleInput) (*DeleteServiceLinkedRoleOutput, error) { + req, out := c.DeleteServiceLinkedRoleRequest(input) + return out, req.Send() +} + +// DeleteServiceLinkedRoleWithContext is the same as DeleteServiceLinkedRole with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteServiceLinkedRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) DeleteServiceLinkedRoleWithContext(ctx aws.Context, input *DeleteServiceLinkedRoleInput, opts ...request.Option) (*DeleteServiceLinkedRoleOutput, error) { + req, out := c.DeleteServiceLinkedRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteStreamingDistribution = "DeleteStreamingDistribution2017_03_25" // DeleteStreamingDistributionRequest generates a "aws/request.Request" representing the @@ -2715,7 +2801,7 @@ func (c *CloudFront) UpdateCloudFrontOriginAccessIdentityRequest(input *UpdateCl // The argument is invalid. // // * ErrCodeInconsistentQuantities "InconsistentQuantities" -// The value of Quantity and the size of Items do not match. +// The value of Quantity and the size of Items don't match. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2017-03-25/UpdateCloudFrontOriginAccessIdentity func (c *CloudFront) UpdateCloudFrontOriginAccessIdentity(input *UpdateCloudFrontOriginAccessIdentityInput) (*UpdateCloudFrontOriginAccessIdentityOutput, error) { @@ -2888,7 +2974,7 @@ func (c *CloudFront) UpdateDistributionRequest(input *UpdateDistributionInput) ( // Your request contains more trusted signers than are allowed per distribution. // // * ErrCodeTrustedSignerDoesNotExist "TrustedSignerDoesNotExist" -// One or more of your trusted signers do not exist. +// One or more of your trusted signers don't exist. // // * ErrCodeInvalidViewerCertificate "InvalidViewerCertificate" // @@ -2923,7 +3009,7 @@ func (c *CloudFront) UpdateDistributionRequest(input *UpdateDistributionInput) ( // * ErrCodeInvalidHeadersForS3Origin "InvalidHeadersForS3Origin" // // * ErrCodeInconsistentQuantities "InconsistentQuantities" -// The value of Quantity and the size of Items do not match. +// The value of Quantity and the size of Items don't match. // // * ErrCodeTooManyCertificates "TooManyCertificates" // You cannot create anymore custom SSL/TLS certificates. @@ -3067,10 +3153,10 @@ func (c *CloudFront) UpdateStreamingDistributionRequest(input *UpdateStreamingDi // Your request contains more trusted signers than are allowed per distribution. // // * ErrCodeTrustedSignerDoesNotExist "TrustedSignerDoesNotExist" -// One or more of your trusted signers do not exist. +// One or more of your trusted signers don't exist. // // * ErrCodeInconsistentQuantities "InconsistentQuantities" -// The value of Quantity and the size of Items do not match. +// The value of Quantity and the size of Items don't match. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2017-03-25/UpdateStreamingDistribution func (c *CloudFront) UpdateStreamingDistribution(input *UpdateStreamingDistributionInput) (*UpdateStreamingDistributionOutput, error) { @@ -4783,7 +4869,7 @@ func (s *CustomOriginConfig) SetOriginSslProtocols(v *OriginSslProtocols) *Custo return s } -// A complex type that describes the default cache behavior if you do not specify +// A complex type that describes the default cache behavior if you don't specify // a CacheBehavior element or if files don't match any of the values of PathPattern // in CacheBehavior elements. You must create exactly one default cache behavior. // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2017-03-25/DefaultCacheBehavior @@ -5195,6 +5281,58 @@ func (s DeleteDistributionOutput) GoString() string { return s.String() } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2017-03-25/DeleteServiceLinkedRoleRequest +type DeleteServiceLinkedRoleInput struct { + _ struct{} `type:"structure"` + + // RoleName is a required field + RoleName *string `location:"uri" locationName:"RoleName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteServiceLinkedRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServiceLinkedRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteServiceLinkedRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteServiceLinkedRoleInput"} + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRoleName sets the RoleName field's value. +func (s *DeleteServiceLinkedRoleInput) SetRoleName(v string) *DeleteServiceLinkedRoleInput { + s.RoleName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2017-03-25/DeleteServiceLinkedRoleOutput +type DeleteServiceLinkedRoleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteServiceLinkedRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServiceLinkedRoleOutput) GoString() string { + return s.String() +} + // The request to delete a streaming distribution. // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2017-03-25/DeleteStreamingDistributionRequest type DeleteStreamingDistributionInput struct { @@ -5289,7 +5427,7 @@ type Distribution struct { // DistributionConfig is a required field DistributionConfig *DistributionConfig `type:"structure" required:"true"` - // The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net. + // The domain name corresponding to the distribution, for example, d111111abcdef8.cloudfront.net. // // DomainName is a required field DomainName *string `type:"string" required:"true"` @@ -5430,7 +5568,7 @@ type DistributionConfig struct { // in the Amazon CloudFront Developer Guide. CustomErrorResponses *CustomErrorResponses `type:"structure"` - // A complex type that describes the default cache behavior if you do not specify + // A complex type that describes the default cache behavior if you don't specify // a CacheBehavior element or if files don't match any of the values of PathPattern // in CacheBehavior elements. You must create exactly one default cache behavior. // @@ -5442,7 +5580,7 @@ type DistributionConfig struct { // instead of an object in your distribution (http://www.example.com/product-description.html). // Specifying a default root object avoids exposing the contents of your distribution. // - // Specify only the object name, for example, index.html. Do not add a / before + // Specify only the object name, for example, index.html. Don't add a / before // the object name. // // If you don't want to specify a default root object when you create a distribution, @@ -5490,7 +5628,7 @@ type DistributionConfig struct { // want to access your content. However, if you're using signed URLs or signed // cookies to restrict access to your content, and if you're using a custom // policy that includes the IpAddress parameter to restrict the IP addresses - // that can access your content, do not enable IPv6. If you want to restrict + // that can access your content, don't enable IPv6. If you want to restrict // access to some content by IP address and not restrict access to other content // (or restrict access but not by IP address), you can create two distributions. // For more information, see Creating a Signed URL Using a Custom Policy (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-custom-policy.html) @@ -5548,18 +5686,90 @@ type DistributionConfig struct { // A complex type that specifies the following: // - // * Which SSL/TLS certificate to use when viewers request objects using - // HTTPS + // * Whether you want viewers to use HTTP or HTTPS to request your objects. // - // * Whether you want CloudFront to use dedicated IP addresses or SNI when - // you're using alternate domain names in your object names + // * If you want viewers to use HTTPS, whether you're using an alternate + // domain name such as example.com or the CloudFront domain name for your + // distribution, such as d111111abcdef8.cloudfront.net. // - // * The minimum protocol version that you want CloudFront to use when communicating - // with viewers + // * If you're using an alternate domain name, whether AWS Certificate Manager + // (ACM) provided the certificate, or you purchased a certificate from a + // third-party certificate authority and imported it into ACM or uploaded + // it to the IAM certificate store. // - // For more information, see Using an HTTPS Connection to Access Your Objects - // (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/SecureConnections.html) - // in the Amazon Amazon CloudFront Developer Guide. + // You must specify only one of the following values: + // + // * ViewerCertificate$ACMCertificateArn + // + // * ViewerCertificate$IAMCertificateId + // + // * ViewerCertificate$CloudFrontDefaultCertificate + // + // Don't specify false for CloudFrontDefaultCertificate. + // + // If you want viewers to use HTTP instead of HTTPS to request your objects: + // Specify the following value: + // + // true + // + // In addition, specify allow-all for ViewerProtocolPolicy for all of your cache + // behaviors. + // + // If you want viewers to use HTTPS to request your objects: Choose the type + // of certificate that you want to use based on whether you're using an alternate + // domain name for your objects or the CloudFront domain name: + // + // * If you're using an alternate domain name, such as example.com: Specify + // one of the following values, depending on whether ACM provided your certificate + // or you purchased your certificate from third-party certificate authority: + // + // ARN for ACM SSL/TLS certificate where + // ARN for ACM SSL/TLS certificate is the ARN for the ACM SSL/TLS certificate + // that you want to use for this distribution. + // + // IAM certificate ID where IAM certificate + // ID is the ID that IAM returned when you added the certificate to the IAM + // certificate store. + // + // If you specify ACMCertificateArn or IAMCertificateId, you must also specify + // a value for SSLSupportMethod. + // + // If you choose to use an ACM certificate or a certificate in the IAM certificate + // store, we recommend that you use only an alternate domain name in your + // object URLs (https://example.com/logo.jpg). If you use the domain name + // that is associated with your CloudFront distribution (such as https://d111111abcdef8.cloudfront.net/logo.jpg) + // and the viewer supports SNI, then CloudFront behaves normally. However, + // if the browser does not support SNI, the user's experience depends on + // the value that you choose for SSLSupportMethod: + // + // vip: The viewer displays a warning because there is a mismatch between the + // CloudFront domain name and the domain name in your SSL/TLS certificate. + // + // sni-only: CloudFront drops the connection with the browser without returning + // the object. + // + // * If you're using the CloudFront domain name for your distribution, such + // as d111111abcdef8.cloudfront.net: Specify the following value: + // + // true + // + // If you want viewers to use HTTPS, you must also specify one of the following + // values in your cache behaviors: + // + // * https-only + // + // * redirect-to-https + // + // You can also optionally require that CloudFront use HTTPS to communicate + // with your origin by specifying one of the following values for the applicable + // origins: + // + // * https-only + // + // * match-viewer + // + // For more information, see Using Alternate Domain Names and HTTPS (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/SecureConnections.html#CNAMEsAndHTTPS) + // in the Amazon CloudFront Developer Guide. ViewerCertificate *ViewerCertificate `type:"structure"` // A unique identifier that specifies the AWS WAF web ACL, if any, to associate @@ -5923,14 +6133,14 @@ type DistributionSummary struct { // CustomErrorResponses is a required field CustomErrorResponses *CustomErrorResponses `type:"structure" required:"true"` - // A complex type that describes the default cache behavior if you do not specify + // A complex type that describes the default cache behavior if you don't specify // a CacheBehavior element or if files don't match any of the values of PathPattern // in CacheBehavior elements. You must create exactly one default cache behavior. // // DefaultCacheBehavior is a required field DefaultCacheBehavior *DefaultCacheBehavior `type:"structure" required:"true"` - // The domain name that corresponds to the distribution. For example: d604721fxaaqy9.cloudfront.net. + // The domain name that corresponds to the distribution, for example, d111111abcdef8.cloudfront.net. // // DomainName is a required field DomainName *string `type:"string" required:"true"` @@ -5985,18 +6195,90 @@ type DistributionSummary struct { // A complex type that specifies the following: // - // * Which SSL/TLS certificate to use when viewers request objects using - // HTTPS + // * Whether you want viewers to use HTTP or HTTPS to request your objects. + // + // * If you want viewers to use HTTPS, whether you're using an alternate + // domain name such as example.com or the CloudFront domain name for your + // distribution, such as d111111abcdef8.cloudfront.net. + // + // * If you're using an alternate domain name, whether AWS Certificate Manager + // (ACM) provided the certificate, or you purchased a certificate from a + // third-party certificate authority and imported it into ACM or uploaded + // it to the IAM certificate store. // - // * Whether you want CloudFront to use dedicated IP addresses or SNI when - // you're using alternate domain names in your object names + // You must specify only one of the following values: // - // * The minimum protocol version that you want CloudFront to use when communicating - // with viewers + // * ViewerCertificate$ACMCertificateArn // - // For more information, see Using an HTTPS Connection to Access Your Objects - // (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/SecureConnections.html) - // in the Amazon Amazon CloudFront Developer Guide. + // * ViewerCertificate$IAMCertificateId + // + // * ViewerCertificate$CloudFrontDefaultCertificate + // + // Don't specify false for CloudFrontDefaultCertificate. + // + // If you want viewers to use HTTP instead of HTTPS to request your objects: + // Specify the following value: + // + // true + // + // In addition, specify allow-all for ViewerProtocolPolicy for all of your cache + // behaviors. + // + // If you want viewers to use HTTPS to request your objects: Choose the type + // of certificate that you want to use based on whether you're using an alternate + // domain name for your objects or the CloudFront domain name: + // + // * If you're using an alternate domain name, such as example.com: Specify + // one of the following values, depending on whether ACM provided your certificate + // or you purchased your certificate from third-party certificate authority: + // + // ARN for ACM SSL/TLS certificate where + // ARN for ACM SSL/TLS certificate is the ARN for the ACM SSL/TLS certificate + // that you want to use for this distribution. + // + // IAM certificate ID where IAM certificate + // ID is the ID that IAM returned when you added the certificate to the IAM + // certificate store. + // + // If you specify ACMCertificateArn or IAMCertificateId, you must also specify + // a value for SSLSupportMethod. + // + // If you choose to use an ACM certificate or a certificate in the IAM certificate + // store, we recommend that you use only an alternate domain name in your + // object URLs (https://example.com/logo.jpg). If you use the domain name + // that is associated with your CloudFront distribution (such as https://d111111abcdef8.cloudfront.net/logo.jpg) + // and the viewer supports SNI, then CloudFront behaves normally. However, + // if the browser does not support SNI, the user's experience depends on + // the value that you choose for SSLSupportMethod: + // + // vip: The viewer displays a warning because there is a mismatch between the + // CloudFront domain name and the domain name in your SSL/TLS certificate. + // + // sni-only: CloudFront drops the connection with the browser without returning + // the object. + // + // * If you're using the CloudFront domain name for your distribution, such + // as d111111abcdef8.cloudfront.net: Specify the following value: + // + // true + // + // If you want viewers to use HTTPS, you must also specify one of the following + // values in your cache behaviors: + // + // * https-only + // + // * redirect-to-https + // + // You can also optionally require that CloudFront use HTTPS to communicate + // with your origin by specifying one of the following values for the applicable + // origins: + // + // * https-only + // + // * match-viewer + // + // For more information, see Using Alternate Domain Names and HTTPS (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/SecureConnections.html#CNAMEsAndHTTPS) + // in the Amazon CloudFront Developer Guide. // // ViewerCertificate is a required field ViewerCertificate *ViewerCertificate `type:"structure" required:"true"` @@ -6140,7 +6422,7 @@ type ForwardedValues struct { Cookies *CookiePreference `type:"structure" required:"true"` // A complex type that specifies the Headers, if any, that you want CloudFront - // to vary upon for this cache behavior. + // to base caching on for this cache behavior. Headers *Headers `type:"structure"` // Indicates whether you want CloudFront to forward query strings to the origin @@ -6257,7 +6539,7 @@ type GeoRestriction struct { // CloudFront and MaxMind both use ISO 3166 country codes. For the current list // of countries and the corresponding codes, see ISO 3166-1-alpha-2 code on // the International Organization for Standardization website. You can also - // refer to the country list in the CloudFront console, which includes both + // refer to the country list on the CloudFront console, which includes both // country names and codes. Items []*string `locationNameList:"Location" type:"list"` @@ -6275,7 +6557,7 @@ type GeoRestriction struct { // restricted by client geo location. // // * blacklist: The Location elements specify the countries in which you - // do not want CloudFront to distribute your content. + // don't want CloudFront to distribute your content. // // * whitelist: The Location elements specify the countries in which you // want CloudFront to distribute your content. @@ -6855,49 +7137,54 @@ func (s *GetStreamingDistributionOutput) SetStreamingDistribution(v *StreamingDi return s } -// A complex type that specifies the headers that you want CloudFront to forward -// to the origin for this cache behavior. +// A complex type that specifies the request headers, if any, that you want +// CloudFront to base caching on for this cache behavior. // -// For the headers that you specify, CloudFront also caches separate versions -// of a specified object based on the header values in viewer requests. For -// example, suppose viewer requests for logo.jpg contain a custom Product header -// that has a value of either Acme or Apex, and you configure CloudFront to -// cache your content based on values in the Product header. CloudFront forwards -// the Product header to the origin and caches the response from the origin -// once for each header value. For more information about caching based on header +// For the headers that you specify, CloudFront caches separate versions of +// a specified object based on the header values in viewer requests. For example, +// suppose viewer requests for logo.jpg contain a custom product header that +// has a value of either acme or apex, and you configure CloudFront to cache +// your content based on values in the product header. CloudFront forwards the +// product header to the origin and caches the response from the origin once +// for each header value. For more information about caching based on header // values, see How CloudFront Forwards and Caches Headers (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/header-caching.html) // in the Amazon CloudFront Developer Guide. // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2017-03-25/Headers type Headers struct { _ struct{} `type:"structure"` - // A complex type that contains one Name element for each header that you want - // CloudFront to forward to the origin and to vary on for this cache behavior. - // If Quantity is 0, omit Items. + // A list that contains one Name element for each header that you want CloudFront + // to use for caching in this cache behavior. If Quantity is 0, omit Items. Items []*string `locationNameList:"Name" type:"list"` - // The number of different headers that you want CloudFront to forward to the - // origin for this cache behavior. You can configure each cache behavior in - // a web distribution to do one of the following: + // The number of different headers that you want CloudFront to base caching + // on for this cache behavior. You can configure each cache behavior in a web + // distribution to do one of the following: // // * Forward all headers to your origin: Specify 1 for Quantity and * for // Name. // - // If you configure CloudFront to forward all headers to your origin, CloudFront - // doesn't cache the objects associated with this cache behavior. Instead, - // it sends every request to the origin. + // CloudFront doesn't cache the objects that are associated with this cache + // behavior. Instead, CloudFront sends every request to the origin. // // * Forward a whitelist of headers you specify: Specify the number of headers - // that you want to forward, and specify the header names in Name elements. - // CloudFront caches your objects based on the values in all of the specified - // headers. CloudFront also forwards the headers that it forwards by default, - // but it caches your objects based only on the headers that you specify. - // + // that you want CloudFront to base caching on. Then specify the header names + // in Name elements. CloudFront caches your objects based on the values in + // the specified headers. // // * Forward only the default headers: Specify 0 for Quantity and omit Items. // In this configuration, CloudFront doesn't cache based on the values in // the request headers. // + // Regardless of which option you choose, CloudFront forwards headers to your + // origin based on whether the origin is an S3 bucket or a custom origin. See + // the following documentation: + // + // * S3 bucket: See HTTP Request Headers That CloudFront Removes or Updates + // (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/RequestAndResponseBehaviorS3Origin.html#request-s3-removed-headers) + // + // * Custom origin: See HTTP Request Headers and CloudFront Behavior (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/RequestAndResponseBehaviorCustomOrigin.html#request-custom-headers-behavior) + // // Quantity is a required field Quantity *int64 `type:"integer" required:"true"` } @@ -7257,19 +7544,34 @@ func (s *KeyPairIds) SetQuantity(v int64) *KeyPairIds { type LambdaFunctionAssociation struct { _ struct{} `type:"structure"` - // Specifies the event type that triggers a Lambda function invocation. Valid - // values are: + // Specifies the event type that triggers a Lambda function invocation. You + // can specify the following values: // - // * viewer-request + // * viewer-request: The function executes when CloudFront receives a request + // from a viewer and before it checks to see whether the requested object + // is in the edge cache. // - // * origin-request + // * origin-request: The function executes only when CloudFront forwards + // a request to your origin. When the requested object is in the edge cache, + // the function doesn't execute. // - // * viewer-response + // * origin-response: The function executes after CloudFront receives a response + // from the origin and before it caches the object in the response. When + // the requested object is in the edge cache, the function doesn't execute. // - // * origin-response + // If the origin returns an HTTP status code other than HTTP 200 (OK), the function + // doesn't execute. + // + // * viewer-response: The function executes before CloudFront returns the + // requested object to the viewer. The function executes regardless of whether + // the object was already in the edge cache. + // + // If the origin returns an HTTP status code other than HTTP 200 (OK), the function + // doesn't execute. EventType *string `type:"string" enum:"EventType"` - // The ARN of the Lambda function. + // The ARN of the Lambda function. You must specify the ARN of a function version; + // you can't specify a Lambda alias or $LATEST. LambdaFunctionARN *string `type:"string"` } @@ -7800,7 +8102,7 @@ type LoggingConfig struct { Bucket *string `type:"string" required:"true"` // Specifies whether you want CloudFront to save access logs to an Amazon S3 - // bucket. If you do not want to enable logging when you create a distribution + // bucket. If you don't want to enable logging when you create a distribution // or if you want to disable logging for an existing distribution, specify false // for Enabled, and specify empty Bucket and Prefix elements. If you specify // false for Enabled but you specify values for Bucket, prefix, and IncludeCookies, @@ -7812,7 +8114,7 @@ type LoggingConfig struct { // Specifies whether you want CloudFront to include cookies in access logs, // specify true for IncludeCookies. If you choose to include cookies in logs, // CloudFront logs all cookies regardless of how you configure the cache behaviors - // for this distribution. If you do not want to include cookies when you create + // for this distribution. If you don't want to include cookies when you create // a distribution or if you want to disable include cookies for an existing // distribution, specify false for IncludeCookies. // @@ -7821,8 +8123,8 @@ type LoggingConfig struct { // An optional string that you want CloudFront to prefix to the access log filenames // for this distribution, for example, myprefix/. If you want to enable logging, - // but you do not want to specify a prefix, you still must include an empty - // Prefix element in the Logging element. + // but you don't want to specify a prefix, you still must include an empty Prefix + // element in the Logging element. // // Prefix is a required field Prefix *string `type:"string" required:"true"` @@ -7908,8 +8210,8 @@ type Origin struct { // // Constraints for Amazon S3 origins: // - // * If you configured Amazon S3 Transfer Acceleration for your bucket, do - // not specify the s3-accelerate endpoint for DomainName. + // * If you configured Amazon S3 Transfer Acceleration for your bucket, don't + // specify the s3-accelerate endpoint for DomainName. // // * The bucket name must be between 3 and 63 characters long (inclusive). // @@ -8056,7 +8358,7 @@ type OriginAccessIdentity struct { // The current configuration information for the identity. CloudFrontOriginAccessIdentityConfig *OriginAccessIdentityConfig `type:"structure"` - // The ID for the origin access identity. For example: E74FTE3AJFJ256A. + // The ID for the origin access identity, for example, E74FTE3AJFJ256A. // // Id is a required field Id *string `type:"string" required:"true"` @@ -8835,7 +9137,7 @@ type StreamingDistribution struct { // ActiveTrustedSigners is a required field ActiveTrustedSigners *ActiveTrustedSigners `type:"structure" required:"true"` - // The domain name that corresponds to the streaming distribution. For example: + // The domain name that corresponds to the streaming distribution, for example, // s5c39gqb8ow64r.cloudfront.net. // // DomainName is a required field @@ -9248,7 +9550,7 @@ type StreamingDistributionSummary struct { // Comment is a required field Comment *string `type:"string" required:"true"` - // The domain name corresponding to the distribution. For example: d604721fxaaqy9.cloudfront.net. + // The domain name corresponding to the distribution, for example, d111111abcdef8.cloudfront.net. // // DomainName is a required field DomainName *string `type:"string" required:"true"` @@ -9258,7 +9560,7 @@ type StreamingDistributionSummary struct { // Enabled is a required field Enabled *bool `type:"boolean" required:"true"` - // The identifier for the distribution. For example: EDFDVBD632BHDS5. + // The identifier for the distribution, for example, EDFDVBD632BHDS5. // // Id is a required field Id *string `type:"string" required:"true"` @@ -9387,19 +9689,19 @@ type StreamingLoggingConfig struct { Bucket *string `type:"string" required:"true"` // Specifies whether you want CloudFront to save access logs to an Amazon S3 - // bucket. If you do not want to enable logging when you create a streaming - // distribution or if you want to disable logging for an existing streaming - // distribution, specify false for Enabled, and specify empty Bucket and Prefix - // elements. If you specify false for Enabled but you specify values for Bucket - // and Prefix, the values are automatically deleted. + // bucket. If you don't want to enable logging when you create a streaming distribution + // or if you want to disable logging for an existing streaming distribution, + // specify false for Enabled, and specify empty Bucket and Prefix elements. + // If you specify false for Enabled but you specify values for Bucket and Prefix, + // the values are automatically deleted. // // Enabled is a required field Enabled *bool `type:"boolean" required:"true"` // An optional string that you want CloudFront to prefix to the access log filenames // for this streaming distribution, for example, myprefix/. If you want to enable - // logging, but you do not want to specify a prefix, you still must include - // an empty Prefix element in the Logging element. + // logging, but you don't want to specify a prefix, you still must include an + // empty Prefix element in the Logging element. // // Prefix is a required field Prefix *string `type:"string" required:"true"` @@ -10116,130 +10418,157 @@ func (s *UpdateStreamingDistributionOutput) SetStreamingDistribution(v *Streamin // A complex type that specifies the following: // -// * Which SSL/TLS certificate to use when viewers request objects using -// HTTPS +// * Whether you want viewers to use HTTP or HTTPS to request your objects. // -// * Whether you want CloudFront to use dedicated IP addresses or SNI when -// you're using alternate domain names in your object names +// * If you want viewers to use HTTPS, whether you're using an alternate +// domain name such as example.com or the CloudFront domain name for your +// distribution, such as d111111abcdef8.cloudfront.net. // -// * The minimum protocol version that you want CloudFront to use when communicating -// with viewers +// * If you're using an alternate domain name, whether AWS Certificate Manager +// (ACM) provided the certificate, or you purchased a certificate from a +// third-party certificate authority and imported it into ACM or uploaded +// it to the IAM certificate store. // -// For more information, see Using an HTTPS Connection to Access Your Objects -// (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/SecureConnections.html) -// in the Amazon Amazon CloudFront Developer Guide. +// You must specify only one of the following values: +// +// * ViewerCertificate$ACMCertificateArn +// +// * ViewerCertificate$IAMCertificateId +// +// * ViewerCertificate$CloudFrontDefaultCertificate +// +// Don't specify false for CloudFrontDefaultCertificate. +// +// If you want viewers to use HTTP instead of HTTPS to request your objects: +// Specify the following value: +// +// true +// +// In addition, specify allow-all for ViewerProtocolPolicy for all of your cache +// behaviors. +// +// If you want viewers to use HTTPS to request your objects: Choose the type +// of certificate that you want to use based on whether you're using an alternate +// domain name for your objects or the CloudFront domain name: +// +// * If you're using an alternate domain name, such as example.com: Specify +// one of the following values, depending on whether ACM provided your certificate +// or you purchased your certificate from third-party certificate authority: +// +// ARN for ACM SSL/TLS certificate where +// ARN for ACM SSL/TLS certificate is the ARN for the ACM SSL/TLS certificate +// that you want to use for this distribution. +// +// IAM certificate ID where IAM certificate +// ID is the ID that IAM returned when you added the certificate to the IAM +// certificate store. +// +// If you specify ACMCertificateArn or IAMCertificateId, you must also specify +// a value for SSLSupportMethod. +// +// If you choose to use an ACM certificate or a certificate in the IAM certificate +// store, we recommend that you use only an alternate domain name in your +// object URLs (https://example.com/logo.jpg). If you use the domain name +// that is associated with your CloudFront distribution (such as https://d111111abcdef8.cloudfront.net/logo.jpg) +// and the viewer supports SNI, then CloudFront behaves normally. However, +// if the browser does not support SNI, the user's experience depends on +// the value that you choose for SSLSupportMethod: +// +// vip: The viewer displays a warning because there is a mismatch between the +// CloudFront domain name and the domain name in your SSL/TLS certificate. +// +// sni-only: CloudFront drops the connection with the browser without returning +// the object. +// +// * If you're using the CloudFront domain name for your distribution, such +// as d111111abcdef8.cloudfront.net: Specify the following value: +// +// true +// +// If you want viewers to use HTTPS, you must also specify one of the following +// values in your cache behaviors: +// +// * https-only +// +// * redirect-to-https +// +// You can also optionally require that CloudFront use HTTPS to communicate +// with your origin by specifying one of the following values for the applicable +// origins: +// +// * https-only +// +// * match-viewer +// +// For more information, see Using Alternate Domain Names and HTTPS (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/SecureConnections.html#CNAMEsAndHTTPS) +// in the Amazon CloudFront Developer Guide. // Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2017-03-25/ViewerCertificate type ViewerCertificate struct { _ struct{} `type:"structure"` + // For information about how and when to use ACMCertificateArn, see ViewerCertificate. ACMCertificateArn *string `type:"string"` - // Include one of these values to specify the following: + // This field has been deprecated. Use one of the following fields instead: // - // * Whether you want viewers to use HTTP or HTTPS to request your objects. + // * ViewerCertificate$ACMCertificateArn // - // * If you want viewers to use HTTPS, whether you're using an alternate - // domain name such as example.com or the CloudFront domain name for your - // distribution, such as d111111abcdef8.cloudfront.net. + // * ViewerCertificate$IAMCertificateId // - // * If you're using an alternate domain name, whether AWS Certificate Manager - // (ACM) provided the certificate, or you purchased a certificate from a - // third-party certificate authority and imported it into ACM or uploaded - // it to the IAM certificate store. - // - // You must specify one (and only one) of the three values. Do not specify false - // for CloudFrontDefaultCertificate. - // - // If you want viewers to use HTTP to request your objects: Specify the following - // value: - // - // true - // - // In addition, specify allow-all for ViewerProtocolPolicy for all of your cache - // behaviors. - // - // If you want viewers to use HTTPS to request your objects: Choose the type - // of certificate that you want to use based on whether you're using an alternate - // domain name for your objects or the CloudFront domain name: - // - // * If you're using an alternate domain name, such as example.com: Specify - // one of the following values, depending on whether ACM provided your certificate - // or you purchased your certificate from third-party certificate authority: - // - // ARN for ACM SSL/TLS certificate where - // ARN for ACM SSL/TLS certificate is the ARN for the ACM SSL/TLS certificate - // that you want to use for this distribution. - // - // IAM certificate ID where IAM certificate - // ID is the ID that IAM returned when you added the certificate to the IAM - // certificate store. - // - // If you specify ACMCertificateArn or IAMCertificateId, you must also specify - // a value for SSLSupportMethod. - // - // If you choose to use an ACM certificate or a certificate in the IAM certificate - // store, we recommend that you use only an alternate domain name in your - // object URLs (https://example.com/logo.jpg). If you use the domain name - // that is associated with your CloudFront distribution (https://d111111abcdef8.cloudfront.net/logo.jpg) - // and the viewer supports SNI, then CloudFront behaves normally. However, - // if the browser does not support SNI, the user's experience depends on - // the value that you choose for SSLSupportMethod: - // - // vip: The viewer displays a warning because there is a mismatch between the - // CloudFront domain name and the domain name in your SSL/TLS certificate. + // * ViewerCertificate$CloudFrontDefaultCertificate + Certificate *string `deprecated:"true" type:"string"` + + // This field has been deprecated. Use one of the following fields instead: // - // sni-only: CloudFront drops the connection with the browser without returning - // the object. + // * ViewerCertificate$ACMCertificateArn // - // * If you're using the CloudFront domain name for your distribution, such - // as d111111abcdef8.cloudfront.net: Specify the following value: + // * ViewerCertificate$IAMCertificateId // - // true + // * ViewerCertificate$CloudFrontDefaultCertificate + CertificateSource *string `deprecated:"true" type:"string" enum:"CertificateSource"` + + // For information about how and when to use CloudFrontDefaultCertificate, see + // ViewerCertificate. + CloudFrontDefaultCertificate *bool `type:"boolean"` + + // For information about how and when to use IAMCertificateId, see ViewerCertificate. + IAMCertificateId *string `type:"string"` + + // Specify the security policy that you want CloudFront to use for HTTPS connections. + // A security policy determines two settings: // - // If you want viewers to use HTTPS, you must also specify one of the following - // values in your cache behaviors: + // * The minimum SSL/TLS protocol that CloudFront uses to communicate with + // viewers // - // https-only + // * The cipher that CloudFront uses to encrypt the content that it returns + // to viewers // - // redirect-to-https + // On the CloudFront console, this setting is called Security policy. // - // You can also optionally require that CloudFront use HTTPS to communicate - // with your origin by specifying one of the following values for the applicable - // origins: + // We recommend that you specify TLSv1.1_2016 unless your users are using browsers + // or devices that do not support TLSv1.1 or later. // - // https-only + // When both of the following are true, you must specify TLSv1 or later for + // the security policy: // - // match-viewer + // * You're using a custom certificate: you specified a value for ACMCertificateArn + // or for IAMCertificateId // - // For more information, see Using Alternate Domain Names and HTTPS (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/SecureConnections.html#CNAMEsAndHTTPS) - // in the Amazon CloudFront Developer Guide. - Certificate *string `deprecated:"true" type:"string"` - - // This field is deprecated. You can use one of the following: [ACMCertificateArn, - // IAMCertificateId, or CloudFrontDefaultCertificate]. - CertificateSource *string `deprecated:"true" type:"string" enum:"CertificateSource"` - - CloudFrontDefaultCertificate *bool `type:"boolean"` - - IAMCertificateId *string `type:"string"` - - // Specify the minimum version of the SSL/TLS protocol that you want CloudFront - // to use for HTTPS connections between viewers and CloudFront: SSLv3 or TLSv1. - // CloudFront serves your objects only to viewers that support SSL/TLS version - // that you specify and later versions. The TLSv1 protocol is more secure, so - // we recommend that you specify SSLv3 only if your users are using browsers - // or devices that don't support TLSv1. Note the following: + // * You're using SNI: you specified sni-only for SSLSupportMethod // - // * If you specify true, - // the minimum SSL protocol version is TLSv1 and can't be changed. + // If you specify true for CloudFrontDefaultCertificate, CloudFront automatically + // sets the security policy to TLSv1 regardless of the value that you specify + // for MinimumProtocolVersion. // - // * If you're using a custom certificate (if you specify a value for ACMCertificateArn - // or for IAMCertificateId) and if you're using SNI (if you specify sni-only - // for SSLSupportMethod), you must specify TLSv1 for MinimumProtocolVersion. + // For information about the relationship between the security policy that you + // choose and the protocols and ciphers that CloudFront uses to communicate + // with viewers, see Supported SSL/TLS Protocols and Ciphers for Communication + // Between Viewers and CloudFront (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/secure-connections-supported-viewer-protocols-ciphers.html#secure-connections-supported-ciphers) + // in the Amazon CloudFront Developer Guide. MinimumProtocolVersion *string `type:"string" enum:"MinimumProtocolVersion"` - // If you specify a value for ACMCertificateArn or for IAMCertificateId, you - // must also specify how you want CloudFront to serve HTTPS requests: using + // If you specify a value for ViewerCertificate$ACMCertificateArn or for ViewerCertificate$IAMCertificateId, + // you must also specify how you want CloudFront to serve HTTPS requests: using // a method that works for all clients or one that works for most clients: // // * vip: CloudFront uses dedicated IP addresses for your content and can @@ -10262,7 +10591,7 @@ type ViewerCertificate struct { // // Use HTTP instead of HTTPS. // - // Do not specify a value for SSLSupportMethod if you specified true. + // Don't specify a value for SSLSupportMethod if you specified true. // // For more information, see Using Alternate Domain Names and HTTPS (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/SecureConnections.html#CNAMEsAndHTTPS.html) // in the Amazon CloudFront Developer Guide. @@ -10405,6 +10734,15 @@ const ( // MinimumProtocolVersionTlsv1 is a MinimumProtocolVersion enum value MinimumProtocolVersionTlsv1 = "TLSv1" + + // MinimumProtocolVersionTlsv12016 is a MinimumProtocolVersion enum value + MinimumProtocolVersionTlsv12016 = "TLSv1_2016" + + // MinimumProtocolVersionTlsv112016 is a MinimumProtocolVersion enum value + MinimumProtocolVersionTlsv112016 = "TLSv1.1_2016" + + // MinimumProtocolVersionTlsv122018 is a MinimumProtocolVersion enum value + MinimumProtocolVersionTlsv122018 = "TLSv1.2_2018" ) const ( diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/doc.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/doc.go index 7dda612a9f8..47efb1a2fe3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/doc.go @@ -4,9 +4,9 @@ // requests to Amazon CloudFront. // // This is the Amazon CloudFront API Reference. This guide is for developers -// who need detailed information about the CloudFront API actions, data types, -// and errors. For detailed information about CloudFront features and their -// associated API calls, see the Amazon CloudFront Developer Guide. +// who need detailed information about CloudFront API actions, data types, and +// errors. For detailed information about CloudFront features, see the Amazon +// CloudFront Developer Guide. // // See https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2017-03-25 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/errors.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/errors.go index 2a38d5442c8..ed66a11e51b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/errors.go @@ -38,7 +38,7 @@ const ( // ErrCodeInconsistentQuantities for service response error code // "InconsistentQuantities". // - // The value of Quantity and the size of Items do not match. + // The value of Quantity and the size of Items don't match. ErrCodeInconsistentQuantities = "InconsistentQuantities" // ErrCodeInvalidArgument for service response error code @@ -222,6 +222,10 @@ const ( // to false. ErrCodePreconditionFailed = "PreconditionFailed" + // ErrCodeResourceInUse for service response error code + // "ResourceInUse". + ErrCodeResourceInUse = "ResourceInUse" + // ErrCodeStreamingDistributionAlreadyExists for service response error code // "StreamingDistributionAlreadyExists". ErrCodeStreamingDistributionAlreadyExists = "StreamingDistributionAlreadyExists" @@ -328,6 +332,6 @@ const ( // ErrCodeTrustedSignerDoesNotExist for service response error code // "TrustedSignerDoesNotExist". // - // One or more of your trusted signers do not exist. + // One or more of your trusted signers don't exist. ErrCodeTrustedSignerDoesNotExist = "TrustedSignerDoesNotExist" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go index ca226cdbc58..3799fa9fd5e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go @@ -362,6 +362,122 @@ func (c *CodeCommit) CreateRepositoryWithContext(ctx aws.Context, input *CreateR return out, req.Send() } +const opDeleteBranch = "DeleteBranch" + +// DeleteBranchRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBranch operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBranch for more information on using the DeleteBranch +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBranchRequest method. +// req, resp := client.DeleteBranchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/DeleteBranch +func (c *CodeCommit) DeleteBranchRequest(input *DeleteBranchInput) (req *request.Request, output *DeleteBranchOutput) { + op := &request.Operation{ + Name: opDeleteBranch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteBranchInput{} + } + + output = &DeleteBranchOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteBranch API operation for AWS CodeCommit. +// +// Deletes a branch from a repository, unless that branch is the default branch +// for the repository. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeCommit's +// API operation DeleteBranch for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" +// A repository name is required but was not specified. +// +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. +// +// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" +// At least one specified repository name is not valid. +// +// This exception only occurs when a specified repository name is not valid. +// Other exceptions occur when a required repository parameter is missing, or +// when a specified repository does not exist. +// +// * ErrCodeBranchNameRequiredException "BranchNameRequiredException" +// A branch name is required but was not specified. +// +// * ErrCodeInvalidBranchNameException "InvalidBranchNameException" +// The specified branch name is not valid. +// +// * ErrCodeDefaultBranchCannotBeDeletedException "DefaultBranchCannotBeDeletedException" +// The specified branch is the default branch for the repository, and cannot +// be deleted. To delete this branch, you must first set another branch as the +// default branch. +// +// * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" +// An encryption integrity check failed. +// +// * ErrCodeEncryptionKeyAccessDeniedException "EncryptionKeyAccessDeniedException" +// An encryption key could not be accessed. +// +// * ErrCodeEncryptionKeyDisabledException "EncryptionKeyDisabledException" +// The encryption key is disabled. +// +// * ErrCodeEncryptionKeyNotFoundException "EncryptionKeyNotFoundException" +// No encryption key was found. +// +// * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" +// The encryption key is not available. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/DeleteBranch +func (c *CodeCommit) DeleteBranch(input *DeleteBranchInput) (*DeleteBranchOutput, error) { + req, out := c.DeleteBranchRequest(input) + return out, req.Send() +} + +// DeleteBranchWithContext is the same as DeleteBranch with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBranch for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) DeleteBranchWithContext(ctx aws.Context, input *DeleteBranchInput, opts ...request.Option) (*DeleteBranchOutput, error) { + req, out := c.DeleteBranchRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteRepository = "DeleteRepository" // DeleteRepositoryRequest generates a "aws/request.Request" representing the @@ -2326,6 +2442,9 @@ type Commit struct { // the email address for the author, as configured in Git. Author *UserInfo `locationName:"author" type:"structure"` + // The full SHA of the specified commit. + CommitId *string `locationName:"commitId" type:"string"` + // Information about the person who committed the specified commit, also known // as the committer. Information includes the date in timestamp format with // GMT offset, the name of the committer, and the email address for the committer, @@ -2368,6 +2487,12 @@ func (s *Commit) SetAuthor(v *UserInfo) *Commit { return s } +// SetCommitId sets the CommitId field's value. +func (s *Commit) SetCommitId(v string) *Commit { + s.CommitId = &v + return s +} + // SetCommitter sets the Committer field's value. func (s *Commit) SetCommitter(v *UserInfo) *Commit { s.Committer = v @@ -2570,6 +2695,92 @@ func (s *CreateRepositoryOutput) SetRepositoryMetadata(v *RepositoryMetadata) *C return s } +// Represents the input of a delete branch operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/DeleteBranchInput +type DeleteBranchInput struct { + _ struct{} `type:"structure"` + + // The name of the branch to delete. + // + // BranchName is a required field + BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` + + // The name of the repository that contains the branch to be deleted. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBranchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBranchInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBranchInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBranchInput"} + if s.BranchName == nil { + invalidParams.Add(request.NewErrParamRequired("BranchName")) + } + if s.BranchName != nil && len(*s.BranchName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BranchName", 1)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBranchName sets the BranchName field's value. +func (s *DeleteBranchInput) SetBranchName(v string) *DeleteBranchInput { + s.BranchName = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DeleteBranchInput) SetRepositoryName(v string) *DeleteBranchInput { + s.RepositoryName = &v + return s +} + +// Represents the output of a delete branch operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/DeleteBranchOutput +type DeleteBranchOutput struct { + _ struct{} `type:"structure"` + + // Information about the branch deleted by the operation, including the branch + // name and the commit ID that was the tip of the branch. + DeletedBranch *BranchInfo `locationName:"deletedBranch" type:"structure"` +} + +// String returns the string representation +func (s DeleteBranchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBranchOutput) GoString() string { + return s.String() +} + +// SetDeletedBranch sets the DeletedBranch field's value. +func (s *DeleteBranchOutput) SetDeletedBranch(v *BranchInfo) *DeleteBranchOutput { + s.DeletedBranch = v + return s +} + // Represents the input of a delete repository operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/DeleteRepositoryInput type DeleteRepositoryInput struct { @@ -2851,7 +3062,7 @@ func (s *GetBranchOutput) SetBranch(v *BranchInfo) *GetBranchOutput { type GetCommitInput struct { _ struct{} `type:"structure"` - // The commit ID. + // The commit ID. Commit IDs are the full SHA of the commit. // // CommitId is a required field CommitId *string `locationName:"commitId" type:"string" required:"true"` @@ -3628,8 +3839,10 @@ func (s *RepositoryNameIdPair) SetRepositoryName(v string) *RepositoryNameIdPair type RepositoryTrigger struct { _ struct{} `type:"structure"` - // The branches that will be included in the trigger configuration. If no branches - // are specified, the trigger will apply to all branches. + // The branches that will be included in the trigger configuration. If you specify + // an empty array, the trigger will apply to all branches. + // + // While no content is required in the array, you must include the array itself. Branches []*string `locationName:"branches" type:"list"` // Any custom data associated with the trigger that will be included in the diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/doc.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/doc.go index 47357f2f4d4..15cea4e5bfa 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codecommit/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/doc.go @@ -34,6 +34,9 @@ // // * CreateBranch, which creates a new branch in a specified repository // +// * DeleteBranch, which deletes the specified branch in a repository unless +// it is the default branch +// // * GetBranch, which returns information about a specified branch // // * ListBranches, which lists all branches for a specified repository diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/errors.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/errors.go index 6200809f44d..ad28090f272 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codecommit/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/errors.go @@ -59,6 +59,14 @@ const ( // A commit was not specified. ErrCodeCommitRequiredException = "CommitRequiredException" + // ErrCodeDefaultBranchCannotBeDeletedException for service response error code + // "DefaultBranchCannotBeDeletedException". + // + // The specified branch is the default branch for the repository, and cannot + // be deleted. To delete this branch, you must first set another branch as the + // default branch. + ErrCodeDefaultBranchCannotBeDeletedException = "DefaultBranchCannotBeDeletedException" + // ErrCodeEncryptionIntegrityChecksFailedException for service response error code // "EncryptionIntegrityChecksFailedException". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/configservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/configservice/api.go index a2a153b5fe6..881e0c3ef7a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/configservice/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/configservice/api.go @@ -6864,4 +6864,7 @@ const ( // ResourceTypeAwsAutoScalingScheduledAction is a ResourceType enum value ResourceTypeAwsAutoScalingScheduledAction = "AWS::AutoScaling::ScheduledAction" + + // ResourceTypeAwsCodeBuildProject is a ResourceType enum value + ResourceTypeAwsCodeBuildProject = "AWS::CodeBuild::Project" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/api.go index 1da7e791ae7..e9b5704f5a5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/api.go @@ -2874,6 +2874,10 @@ func (c *DatabaseMigrationService) DescribeTableStatisticsRequest(input *Describ // Returns table statistics on the database migration task, including table // name, rows inserted, rows updated, and rows deleted. // +// Note that the "last updated" column the DMS console only indicates the time +// that AWS DMS last updated the table statistics record for a table. It does +// not indicate the time of the last update to the table. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -7043,6 +7047,14 @@ func (s *DescribeSchemasOutput) SetSchemas(v []*string) *DescribeSchemasOutput { type DescribeTableStatisticsInput struct { _ struct{} `type:"structure"` + // Filters applied to the describe table statistics action. + // + // Valid filter names: schema-name | table-name | table-state + // + // A combination of filters creates an AND condition where each record matches + // all specified filters. + Filters []*Filter `locationNameList:"Filter" type:"list"` + // An optional pagination token provided by a previous request. If this parameter // is specified, the response includes only records beyond the marker, up to // the value specified by MaxRecords. @@ -7054,7 +7066,7 @@ type DescribeTableStatisticsInput struct { // // Default: 100 // - // Constraints: Minimum 20, maximum 100. + // Constraints: Minimum 20, maximum 500. MaxRecords *int64 `type:"integer"` // The Amazon Resource Name (ARN) of the replication task. @@ -7079,6 +7091,16 @@ func (s *DescribeTableStatisticsInput) Validate() error { if s.ReplicationTaskArn == nil { invalidParams.Add(request.NewErrParamRequired("ReplicationTaskArn")) } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -7086,6 +7108,12 @@ func (s *DescribeTableStatisticsInput) Validate() error { return nil } +// SetFilters sets the Filters field's value. +func (s *DescribeTableStatisticsInput) SetFilters(v []*Filter) *DescribeTableStatisticsInput { + s.Filters = v + return s +} + // SetMarker sets the Marker field's value. func (s *DescribeTableStatisticsInput) SetMarker(v string) *DescribeTableStatisticsInput { s.Marker = &v @@ -7826,7 +7854,8 @@ type ModifyEndpointInput struct { // MONGODB, SYBASE, and SQLSERVER. EngineName *string `type:"string"` - // Additional attributes associated with the connection. + // Additional attributes associated with the connection. To reset this parameter, + // pass the empty string ("") as an argument. ExtraConnectionAttributes *string `type:"string"` // Settings in JSON format for the source MongoDB endpoint. For more information @@ -9921,7 +9950,11 @@ type TableStatistics struct { // The name of the table. TableName *string `type:"string"` - // The state of the table. + // The state of the tables described. + // + // Valid states: Table does not exist | Before load | Full load | Table completed + // | Table cancelled | Table error | Table all | Table updates | Table is being + // reloaded TableState *string `type:"string"` // The number of update actions performed on a table. diff --git a/vendor/github.com/aws/aws-sdk-go/service/directconnect/api.go b/vendor/github.com/aws/aws-sdk-go/service/directconnect/api.go new file mode 100644 index 00000000000..48d4f5dc5a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/directconnect/api.go @@ -0,0 +1,8003 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package directconnect + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAllocateConnectionOnInterconnect = "AllocateConnectionOnInterconnect" + +// AllocateConnectionOnInterconnectRequest generates a "aws/request.Request" representing the +// client's request for the AllocateConnectionOnInterconnect operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AllocateConnectionOnInterconnect for more information on using the AllocateConnectionOnInterconnect +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AllocateConnectionOnInterconnectRequest method. +// req, resp := client.AllocateConnectionOnInterconnectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/AllocateConnectionOnInterconnect +func (c *DirectConnect) AllocateConnectionOnInterconnectRequest(input *AllocateConnectionOnInterconnectInput) (req *request.Request, output *Connection) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, AllocateConnectionOnInterconnect, has been deprecated") + } + op := &request.Operation{ + Name: opAllocateConnectionOnInterconnect, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AllocateConnectionOnInterconnectInput{} + } + + output = &Connection{} + req = c.newRequest(op, input, output) + return +} + +// AllocateConnectionOnInterconnect API operation for AWS Direct Connect. +// +// Deprecated in favor of AllocateHostedConnection. +// +// Creates a hosted connection on an interconnect. +// +// Allocates a VLAN number and a specified amount of bandwidth for use by a +// hosted connection on the given interconnect. +// +// This is intended for use by AWS Direct Connect partners only. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation AllocateConnectionOnInterconnect for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/AllocateConnectionOnInterconnect +func (c *DirectConnect) AllocateConnectionOnInterconnect(input *AllocateConnectionOnInterconnectInput) (*Connection, error) { + req, out := c.AllocateConnectionOnInterconnectRequest(input) + return out, req.Send() +} + +// AllocateConnectionOnInterconnectWithContext is the same as AllocateConnectionOnInterconnect with the addition of +// the ability to pass a context and additional request options. +// +// See AllocateConnectionOnInterconnect for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) AllocateConnectionOnInterconnectWithContext(ctx aws.Context, input *AllocateConnectionOnInterconnectInput, opts ...request.Option) (*Connection, error) { + req, out := c.AllocateConnectionOnInterconnectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAllocateHostedConnection = "AllocateHostedConnection" + +// AllocateHostedConnectionRequest generates a "aws/request.Request" representing the +// client's request for the AllocateHostedConnection operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AllocateHostedConnection for more information on using the AllocateHostedConnection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AllocateHostedConnectionRequest method. +// req, resp := client.AllocateHostedConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/AllocateHostedConnection +func (c *DirectConnect) AllocateHostedConnectionRequest(input *AllocateHostedConnectionInput) (req *request.Request, output *Connection) { + op := &request.Operation{ + Name: opAllocateHostedConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AllocateHostedConnectionInput{} + } + + output = &Connection{} + req = c.newRequest(op, input, output) + return +} + +// AllocateHostedConnection API operation for AWS Direct Connect. +// +// Creates a hosted connection on an interconnect or a link aggregation group +// (LAG). +// +// Allocates a VLAN number and a specified amount of bandwidth for use by a +// hosted connection on the given interconnect or LAG. +// +// This is intended for use by AWS Direct Connect partners only. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation AllocateHostedConnection for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/AllocateHostedConnection +func (c *DirectConnect) AllocateHostedConnection(input *AllocateHostedConnectionInput) (*Connection, error) { + req, out := c.AllocateHostedConnectionRequest(input) + return out, req.Send() +} + +// AllocateHostedConnectionWithContext is the same as AllocateHostedConnection with the addition of +// the ability to pass a context and additional request options. +// +// See AllocateHostedConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) AllocateHostedConnectionWithContext(ctx aws.Context, input *AllocateHostedConnectionInput, opts ...request.Option) (*Connection, error) { + req, out := c.AllocateHostedConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAllocatePrivateVirtualInterface = "AllocatePrivateVirtualInterface" + +// AllocatePrivateVirtualInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the AllocatePrivateVirtualInterface operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AllocatePrivateVirtualInterface for more information on using the AllocatePrivateVirtualInterface +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AllocatePrivateVirtualInterfaceRequest method. +// req, resp := client.AllocatePrivateVirtualInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/AllocatePrivateVirtualInterface +func (c *DirectConnect) AllocatePrivateVirtualInterfaceRequest(input *AllocatePrivateVirtualInterfaceInput) (req *request.Request, output *VirtualInterface) { + op := &request.Operation{ + Name: opAllocatePrivateVirtualInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AllocatePrivateVirtualInterfaceInput{} + } + + output = &VirtualInterface{} + req = c.newRequest(op, input, output) + return +} + +// AllocatePrivateVirtualInterface API operation for AWS Direct Connect. +// +// Provisions a private virtual interface to be owned by another AWS customer. +// +// Virtual interfaces created using this action must be confirmed by the virtual +// interface owner by using the ConfirmPrivateVirtualInterface action. Until +// then, the virtual interface will be in 'Confirming' state, and will not be +// available for handling traffic. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation AllocatePrivateVirtualInterface for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/AllocatePrivateVirtualInterface +func (c *DirectConnect) AllocatePrivateVirtualInterface(input *AllocatePrivateVirtualInterfaceInput) (*VirtualInterface, error) { + req, out := c.AllocatePrivateVirtualInterfaceRequest(input) + return out, req.Send() +} + +// AllocatePrivateVirtualInterfaceWithContext is the same as AllocatePrivateVirtualInterface with the addition of +// the ability to pass a context and additional request options. +// +// See AllocatePrivateVirtualInterface for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) AllocatePrivateVirtualInterfaceWithContext(ctx aws.Context, input *AllocatePrivateVirtualInterfaceInput, opts ...request.Option) (*VirtualInterface, error) { + req, out := c.AllocatePrivateVirtualInterfaceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAllocatePublicVirtualInterface = "AllocatePublicVirtualInterface" + +// AllocatePublicVirtualInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the AllocatePublicVirtualInterface operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AllocatePublicVirtualInterface for more information on using the AllocatePublicVirtualInterface +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AllocatePublicVirtualInterfaceRequest method. +// req, resp := client.AllocatePublicVirtualInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/AllocatePublicVirtualInterface +func (c *DirectConnect) AllocatePublicVirtualInterfaceRequest(input *AllocatePublicVirtualInterfaceInput) (req *request.Request, output *VirtualInterface) { + op := &request.Operation{ + Name: opAllocatePublicVirtualInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AllocatePublicVirtualInterfaceInput{} + } + + output = &VirtualInterface{} + req = c.newRequest(op, input, output) + return +} + +// AllocatePublicVirtualInterface API operation for AWS Direct Connect. +// +// Provisions a public virtual interface to be owned by a different customer. +// +// The owner of a connection calls this function to provision a public virtual +// interface which will be owned by another AWS customer. +// +// Virtual interfaces created using this function must be confirmed by the virtual +// interface owner by calling ConfirmPublicVirtualInterface. Until this step +// has been completed, the virtual interface will be in 'Confirming' state, +// and will not be available for handling traffic. +// +// When creating an IPv6 public virtual interface (addressFamily is 'ipv6'), +// the customer and amazon address fields should be left blank to use auto-assigned +// IPv6 space. Custom IPv6 Addresses are currently not supported. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation AllocatePublicVirtualInterface for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/AllocatePublicVirtualInterface +func (c *DirectConnect) AllocatePublicVirtualInterface(input *AllocatePublicVirtualInterfaceInput) (*VirtualInterface, error) { + req, out := c.AllocatePublicVirtualInterfaceRequest(input) + return out, req.Send() +} + +// AllocatePublicVirtualInterfaceWithContext is the same as AllocatePublicVirtualInterface with the addition of +// the ability to pass a context and additional request options. +// +// See AllocatePublicVirtualInterface for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) AllocatePublicVirtualInterfaceWithContext(ctx aws.Context, input *AllocatePublicVirtualInterfaceInput, opts ...request.Option) (*VirtualInterface, error) { + req, out := c.AllocatePublicVirtualInterfaceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssociateConnectionWithLag = "AssociateConnectionWithLag" + +// AssociateConnectionWithLagRequest generates a "aws/request.Request" representing the +// client's request for the AssociateConnectionWithLag operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateConnectionWithLag for more information on using the AssociateConnectionWithLag +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateConnectionWithLagRequest method. +// req, resp := client.AssociateConnectionWithLagRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/AssociateConnectionWithLag +func (c *DirectConnect) AssociateConnectionWithLagRequest(input *AssociateConnectionWithLagInput) (req *request.Request, output *Connection) { + op := &request.Operation{ + Name: opAssociateConnectionWithLag, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateConnectionWithLagInput{} + } + + output = &Connection{} + req = c.newRequest(op, input, output) + return +} + +// AssociateConnectionWithLag API operation for AWS Direct Connect. +// +// Associates an existing connection with a link aggregation group (LAG). The +// connection is interrupted and re-established as a member of the LAG (connectivity +// to AWS will be interrupted). The connection must be hosted on the same AWS +// Direct Connect endpoint as the LAG, and its bandwidth must match the bandwidth +// for the LAG. You can reassociate a connection that's currently associated +// with a different LAG; however, if removing the connection will cause the +// original LAG to fall below its setting for minimum number of operational +// connections, the request fails. +// +// Any virtual interfaces that are directly associated with the connection are +// automatically re-associated with the LAG. If the connection was originally +// associated with a different LAG, the virtual interfaces remain associated +// with the original LAG. +// +// For interconnects, any hosted connections are automatically re-associated +// with the LAG. If the interconnect was originally associated with a different +// LAG, the hosted connections remain associated with the original LAG. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation AssociateConnectionWithLag for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/AssociateConnectionWithLag +func (c *DirectConnect) AssociateConnectionWithLag(input *AssociateConnectionWithLagInput) (*Connection, error) { + req, out := c.AssociateConnectionWithLagRequest(input) + return out, req.Send() +} + +// AssociateConnectionWithLagWithContext is the same as AssociateConnectionWithLag with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateConnectionWithLag for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) AssociateConnectionWithLagWithContext(ctx aws.Context, input *AssociateConnectionWithLagInput, opts ...request.Option) (*Connection, error) { + req, out := c.AssociateConnectionWithLagRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssociateHostedConnection = "AssociateHostedConnection" + +// AssociateHostedConnectionRequest generates a "aws/request.Request" representing the +// client's request for the AssociateHostedConnection operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateHostedConnection for more information on using the AssociateHostedConnection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateHostedConnectionRequest method. +// req, resp := client.AssociateHostedConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/AssociateHostedConnection +func (c *DirectConnect) AssociateHostedConnectionRequest(input *AssociateHostedConnectionInput) (req *request.Request, output *Connection) { + op := &request.Operation{ + Name: opAssociateHostedConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateHostedConnectionInput{} + } + + output = &Connection{} + req = c.newRequest(op, input, output) + return +} + +// AssociateHostedConnection API operation for AWS Direct Connect. +// +// Associates a hosted connection and its virtual interfaces with a link aggregation +// group (LAG) or interconnect. If the target interconnect or LAG has an existing +// hosted connection with a conflicting VLAN number or IP address, the operation +// fails. This action temporarily interrupts the hosted connection's connectivity +// to AWS as it is being migrated. +// +// This is intended for use by AWS Direct Connect partners only. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation AssociateHostedConnection for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/AssociateHostedConnection +func (c *DirectConnect) AssociateHostedConnection(input *AssociateHostedConnectionInput) (*Connection, error) { + req, out := c.AssociateHostedConnectionRequest(input) + return out, req.Send() +} + +// AssociateHostedConnectionWithContext is the same as AssociateHostedConnection with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateHostedConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) AssociateHostedConnectionWithContext(ctx aws.Context, input *AssociateHostedConnectionInput, opts ...request.Option) (*Connection, error) { + req, out := c.AssociateHostedConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssociateVirtualInterface = "AssociateVirtualInterface" + +// AssociateVirtualInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the AssociateVirtualInterface operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateVirtualInterface for more information on using the AssociateVirtualInterface +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateVirtualInterfaceRequest method. +// req, resp := client.AssociateVirtualInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/AssociateVirtualInterface +func (c *DirectConnect) AssociateVirtualInterfaceRequest(input *AssociateVirtualInterfaceInput) (req *request.Request, output *VirtualInterface) { + op := &request.Operation{ + Name: opAssociateVirtualInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateVirtualInterfaceInput{} + } + + output = &VirtualInterface{} + req = c.newRequest(op, input, output) + return +} + +// AssociateVirtualInterface API operation for AWS Direct Connect. +// +// Associates a virtual interface with a specified link aggregation group (LAG) +// or connection. Connectivity to AWS is temporarily interrupted as the virtual +// interface is being migrated. If the target connection or LAG has an associated +// virtual interface with a conflicting VLAN number or a conflicting IP address, +// the operation fails. +// +// Virtual interfaces associated with a hosted connection cannot be associated +// with a LAG; hosted connections must be migrated along with their virtual +// interfaces using AssociateHostedConnection. +// +// Hosted virtual interfaces (an interface for which the owner of the connection +// is not the owner of physical connection) can only be reassociated by the +// owner of the physical connection. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation AssociateVirtualInterface for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/AssociateVirtualInterface +func (c *DirectConnect) AssociateVirtualInterface(input *AssociateVirtualInterfaceInput) (*VirtualInterface, error) { + req, out := c.AssociateVirtualInterfaceRequest(input) + return out, req.Send() +} + +// AssociateVirtualInterfaceWithContext is the same as AssociateVirtualInterface with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateVirtualInterface for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) AssociateVirtualInterfaceWithContext(ctx aws.Context, input *AssociateVirtualInterfaceInput, opts ...request.Option) (*VirtualInterface, error) { + req, out := c.AssociateVirtualInterfaceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opConfirmConnection = "ConfirmConnection" + +// ConfirmConnectionRequest generates a "aws/request.Request" representing the +// client's request for the ConfirmConnection operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ConfirmConnection for more information on using the ConfirmConnection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ConfirmConnectionRequest method. +// req, resp := client.ConfirmConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/ConfirmConnection +func (c *DirectConnect) ConfirmConnectionRequest(input *ConfirmConnectionInput) (req *request.Request, output *ConfirmConnectionOutput) { + op := &request.Operation{ + Name: opConfirmConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConfirmConnectionInput{} + } + + output = &ConfirmConnectionOutput{} + req = c.newRequest(op, input, output) + return +} + +// ConfirmConnection API operation for AWS Direct Connect. +// +// Confirm the creation of a hosted connection on an interconnect. +// +// Upon creation, the hosted connection is initially in the 'Ordering' state, +// and will remain in this state until the owner calls ConfirmConnection to +// confirm creation of the hosted connection. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation ConfirmConnection for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/ConfirmConnection +func (c *DirectConnect) ConfirmConnection(input *ConfirmConnectionInput) (*ConfirmConnectionOutput, error) { + req, out := c.ConfirmConnectionRequest(input) + return out, req.Send() +} + +// ConfirmConnectionWithContext is the same as ConfirmConnection with the addition of +// the ability to pass a context and additional request options. +// +// See ConfirmConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) ConfirmConnectionWithContext(ctx aws.Context, input *ConfirmConnectionInput, opts ...request.Option) (*ConfirmConnectionOutput, error) { + req, out := c.ConfirmConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opConfirmPrivateVirtualInterface = "ConfirmPrivateVirtualInterface" + +// ConfirmPrivateVirtualInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the ConfirmPrivateVirtualInterface operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ConfirmPrivateVirtualInterface for more information on using the ConfirmPrivateVirtualInterface +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ConfirmPrivateVirtualInterfaceRequest method. +// req, resp := client.ConfirmPrivateVirtualInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/ConfirmPrivateVirtualInterface +func (c *DirectConnect) ConfirmPrivateVirtualInterfaceRequest(input *ConfirmPrivateVirtualInterfaceInput) (req *request.Request, output *ConfirmPrivateVirtualInterfaceOutput) { + op := &request.Operation{ + Name: opConfirmPrivateVirtualInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConfirmPrivateVirtualInterfaceInput{} + } + + output = &ConfirmPrivateVirtualInterfaceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ConfirmPrivateVirtualInterface API operation for AWS Direct Connect. +// +// Accept ownership of a private virtual interface created by another customer. +// +// After the virtual interface owner calls this function, the virtual interface +// will be created and attached to the given virtual private gateway, and will +// be available for handling traffic. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation ConfirmPrivateVirtualInterface for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/ConfirmPrivateVirtualInterface +func (c *DirectConnect) ConfirmPrivateVirtualInterface(input *ConfirmPrivateVirtualInterfaceInput) (*ConfirmPrivateVirtualInterfaceOutput, error) { + req, out := c.ConfirmPrivateVirtualInterfaceRequest(input) + return out, req.Send() +} + +// ConfirmPrivateVirtualInterfaceWithContext is the same as ConfirmPrivateVirtualInterface with the addition of +// the ability to pass a context and additional request options. +// +// See ConfirmPrivateVirtualInterface for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) ConfirmPrivateVirtualInterfaceWithContext(ctx aws.Context, input *ConfirmPrivateVirtualInterfaceInput, opts ...request.Option) (*ConfirmPrivateVirtualInterfaceOutput, error) { + req, out := c.ConfirmPrivateVirtualInterfaceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opConfirmPublicVirtualInterface = "ConfirmPublicVirtualInterface" + +// ConfirmPublicVirtualInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the ConfirmPublicVirtualInterface operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ConfirmPublicVirtualInterface for more information on using the ConfirmPublicVirtualInterface +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ConfirmPublicVirtualInterfaceRequest method. +// req, resp := client.ConfirmPublicVirtualInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/ConfirmPublicVirtualInterface +func (c *DirectConnect) ConfirmPublicVirtualInterfaceRequest(input *ConfirmPublicVirtualInterfaceInput) (req *request.Request, output *ConfirmPublicVirtualInterfaceOutput) { + op := &request.Operation{ + Name: opConfirmPublicVirtualInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ConfirmPublicVirtualInterfaceInput{} + } + + output = &ConfirmPublicVirtualInterfaceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ConfirmPublicVirtualInterface API operation for AWS Direct Connect. +// +// Accept ownership of a public virtual interface created by another customer. +// +// After the virtual interface owner calls this function, the specified virtual +// interface will be created and made available for handling traffic. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation ConfirmPublicVirtualInterface for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/ConfirmPublicVirtualInterface +func (c *DirectConnect) ConfirmPublicVirtualInterface(input *ConfirmPublicVirtualInterfaceInput) (*ConfirmPublicVirtualInterfaceOutput, error) { + req, out := c.ConfirmPublicVirtualInterfaceRequest(input) + return out, req.Send() +} + +// ConfirmPublicVirtualInterfaceWithContext is the same as ConfirmPublicVirtualInterface with the addition of +// the ability to pass a context and additional request options. +// +// See ConfirmPublicVirtualInterface for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) ConfirmPublicVirtualInterfaceWithContext(ctx aws.Context, input *ConfirmPublicVirtualInterfaceInput, opts ...request.Option) (*ConfirmPublicVirtualInterfaceOutput, error) { + req, out := c.ConfirmPublicVirtualInterfaceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateBGPPeer = "CreateBGPPeer" + +// CreateBGPPeerRequest generates a "aws/request.Request" representing the +// client's request for the CreateBGPPeer operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateBGPPeer for more information on using the CreateBGPPeer +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateBGPPeerRequest method. +// req, resp := client.CreateBGPPeerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/CreateBGPPeer +func (c *DirectConnect) CreateBGPPeerRequest(input *CreateBGPPeerInput) (req *request.Request, output *CreateBGPPeerOutput) { + op := &request.Operation{ + Name: opCreateBGPPeer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateBGPPeerInput{} + } + + output = &CreateBGPPeerOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateBGPPeer API operation for AWS Direct Connect. +// +// Creates a new BGP peer on a specified virtual interface. The BGP peer cannot +// be in the same address family (IPv4/IPv6) of an existing BGP peer on the +// virtual interface. +// +// You must create a BGP peer for the corresponding address family in order +// to access AWS resources that also use that address family. +// +// When creating a IPv6 BGP peer, the Amazon address and customer address fields +// must be left blank. IPv6 addresses are automatically assigned from Amazon's +// pool of IPv6 addresses; you cannot specify custom IPv6 addresses. +// +// For a public virtual interface, the Autonomous System Number (ASN) must be +// private or already whitelisted for the virtual interface. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation CreateBGPPeer for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/CreateBGPPeer +func (c *DirectConnect) CreateBGPPeer(input *CreateBGPPeerInput) (*CreateBGPPeerOutput, error) { + req, out := c.CreateBGPPeerRequest(input) + return out, req.Send() +} + +// CreateBGPPeerWithContext is the same as CreateBGPPeer with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBGPPeer for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) CreateBGPPeerWithContext(ctx aws.Context, input *CreateBGPPeerInput, opts ...request.Option) (*CreateBGPPeerOutput, error) { + req, out := c.CreateBGPPeerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateConnection = "CreateConnection" + +// CreateConnectionRequest generates a "aws/request.Request" representing the +// client's request for the CreateConnection operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateConnection for more information on using the CreateConnection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateConnectionRequest method. +// req, resp := client.CreateConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/CreateConnection +func (c *DirectConnect) CreateConnectionRequest(input *CreateConnectionInput) (req *request.Request, output *Connection) { + op := &request.Operation{ + Name: opCreateConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateConnectionInput{} + } + + output = &Connection{} + req = c.newRequest(op, input, output) + return +} + +// CreateConnection API operation for AWS Direct Connect. +// +// Creates a new connection between the customer network and a specific AWS +// Direct Connect location. +// +// A connection links your internal network to an AWS Direct Connect location +// over a standard 1 gigabit or 10 gigabit Ethernet fiber-optic cable. One end +// of the cable is connected to your router, the other to an AWS Direct Connect +// router. An AWS Direct Connect location provides access to Amazon Web Services +// in the region it is associated with. You can establish connections with AWS +// Direct Connect locations in multiple regions, but a connection in one region +// does not provide connectivity to other regions. +// +// You can automatically add the new connection to a link aggregation group +// (LAG) by specifying a LAG ID in the request. This ensures that the new connection +// is allocated on the same AWS Direct Connect endpoint that hosts the specified +// LAG. If there are no available ports on the endpoint, the request fails and +// no connection will be created. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation CreateConnection for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/CreateConnection +func (c *DirectConnect) CreateConnection(input *CreateConnectionInput) (*Connection, error) { + req, out := c.CreateConnectionRequest(input) + return out, req.Send() +} + +// CreateConnectionWithContext is the same as CreateConnection with the addition of +// the ability to pass a context and additional request options. +// +// See CreateConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) CreateConnectionWithContext(ctx aws.Context, input *CreateConnectionInput, opts ...request.Option) (*Connection, error) { + req, out := c.CreateConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateInterconnect = "CreateInterconnect" + +// CreateInterconnectRequest generates a "aws/request.Request" representing the +// client's request for the CreateInterconnect operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateInterconnect for more information on using the CreateInterconnect +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateInterconnectRequest method. +// req, resp := client.CreateInterconnectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/CreateInterconnect +func (c *DirectConnect) CreateInterconnectRequest(input *CreateInterconnectInput) (req *request.Request, output *Interconnect) { + op := &request.Operation{ + Name: opCreateInterconnect, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateInterconnectInput{} + } + + output = &Interconnect{} + req = c.newRequest(op, input, output) + return +} + +// CreateInterconnect API operation for AWS Direct Connect. +// +// Creates a new interconnect between a AWS Direct Connect partner's network +// and a specific AWS Direct Connect location. +// +// An interconnect is a connection which is capable of hosting other connections. +// The AWS Direct Connect partner can use an interconnect to provide sub-1Gbps +// AWS Direct Connect service to tier 2 customers who do not have their own +// connections. Like a standard connection, an interconnect links the AWS Direct +// Connect partner's network to an AWS Direct Connect location over a standard +// 1 Gbps or 10 Gbps Ethernet fiber-optic cable. One end is connected to the +// partner's router, the other to an AWS Direct Connect router. +// +// You can automatically add the new interconnect to a link aggregation group +// (LAG) by specifying a LAG ID in the request. This ensures that the new interconnect +// is allocated on the same AWS Direct Connect endpoint that hosts the specified +// LAG. If there are no available ports on the endpoint, the request fails and +// no interconnect will be created. +// +// For each end customer, the AWS Direct Connect partner provisions a connection +// on their interconnect by calling AllocateConnectionOnInterconnect. The end +// customer can then connect to AWS resources by creating a virtual interface +// on their connection, using the VLAN assigned to them by the AWS Direct Connect +// partner. +// +// This is intended for use by AWS Direct Connect partners only. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation CreateInterconnect for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/CreateInterconnect +func (c *DirectConnect) CreateInterconnect(input *CreateInterconnectInput) (*Interconnect, error) { + req, out := c.CreateInterconnectRequest(input) + return out, req.Send() +} + +// CreateInterconnectWithContext is the same as CreateInterconnect with the addition of +// the ability to pass a context and additional request options. +// +// See CreateInterconnect for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) CreateInterconnectWithContext(ctx aws.Context, input *CreateInterconnectInput, opts ...request.Option) (*Interconnect, error) { + req, out := c.CreateInterconnectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateLag = "CreateLag" + +// CreateLagRequest generates a "aws/request.Request" representing the +// client's request for the CreateLag operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateLag for more information on using the CreateLag +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateLagRequest method. +// req, resp := client.CreateLagRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/CreateLag +func (c *DirectConnect) CreateLagRequest(input *CreateLagInput) (req *request.Request, output *Lag) { + op := &request.Operation{ + Name: opCreateLag, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLagInput{} + } + + output = &Lag{} + req = c.newRequest(op, input, output) + return +} + +// CreateLag API operation for AWS Direct Connect. +// +// Creates a new link aggregation group (LAG) with the specified number of bundled +// physical connections between the customer network and a specific AWS Direct +// Connect location. A LAG is a logical interface that uses the Link Aggregation +// Control Protocol (LACP) to aggregate multiple 1 gigabit or 10 gigabit interfaces, +// allowing you to treat them as a single interface. +// +// All connections in a LAG must use the same bandwidth (for example, 10 Gbps), +// and must terminate at the same AWS Direct Connect endpoint. +// +// You can have up to 10 connections per LAG. Regardless of this limit, if you +// request more connections for the LAG than AWS Direct Connect can allocate +// on a single endpoint, no LAG is created. +// +// You can specify an existing physical connection or interconnect to include +// in the LAG (which counts towards the total number of connections). Doing +// so interrupts the current physical connection or hosted connections, and +// re-establishes them as a member of the LAG. The LAG will be created on the +// same AWS Direct Connect endpoint to which the connection terminates. Any +// virtual interfaces associated with the connection are automatically disassociated +// and re-associated with the LAG. The connection ID does not change. +// +// If the AWS account used to create a LAG is a registered AWS Direct Connect +// partner, the LAG is automatically enabled to host sub-connections. For a +// LAG owned by a partner, any associated virtual interfaces cannot be directly +// configured. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation CreateLag for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/CreateLag +func (c *DirectConnect) CreateLag(input *CreateLagInput) (*Lag, error) { + req, out := c.CreateLagRequest(input) + return out, req.Send() +} + +// CreateLagWithContext is the same as CreateLag with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLag for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) CreateLagWithContext(ctx aws.Context, input *CreateLagInput, opts ...request.Option) (*Lag, error) { + req, out := c.CreateLagRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreatePrivateVirtualInterface = "CreatePrivateVirtualInterface" + +// CreatePrivateVirtualInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the CreatePrivateVirtualInterface operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreatePrivateVirtualInterface for more information on using the CreatePrivateVirtualInterface +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreatePrivateVirtualInterfaceRequest method. +// req, resp := client.CreatePrivateVirtualInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/CreatePrivateVirtualInterface +func (c *DirectConnect) CreatePrivateVirtualInterfaceRequest(input *CreatePrivateVirtualInterfaceInput) (req *request.Request, output *VirtualInterface) { + op := &request.Operation{ + Name: opCreatePrivateVirtualInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePrivateVirtualInterfaceInput{} + } + + output = &VirtualInterface{} + req = c.newRequest(op, input, output) + return +} + +// CreatePrivateVirtualInterface API operation for AWS Direct Connect. +// +// Creates a new private virtual interface. A virtual interface is the VLAN +// that transports AWS Direct Connect traffic. A private virtual interface supports +// sending traffic to a single virtual private cloud (VPC). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation CreatePrivateVirtualInterface for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/CreatePrivateVirtualInterface +func (c *DirectConnect) CreatePrivateVirtualInterface(input *CreatePrivateVirtualInterfaceInput) (*VirtualInterface, error) { + req, out := c.CreatePrivateVirtualInterfaceRequest(input) + return out, req.Send() +} + +// CreatePrivateVirtualInterfaceWithContext is the same as CreatePrivateVirtualInterface with the addition of +// the ability to pass a context and additional request options. +// +// See CreatePrivateVirtualInterface for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) CreatePrivateVirtualInterfaceWithContext(ctx aws.Context, input *CreatePrivateVirtualInterfaceInput, opts ...request.Option) (*VirtualInterface, error) { + req, out := c.CreatePrivateVirtualInterfaceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreatePublicVirtualInterface = "CreatePublicVirtualInterface" + +// CreatePublicVirtualInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the CreatePublicVirtualInterface operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreatePublicVirtualInterface for more information on using the CreatePublicVirtualInterface +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreatePublicVirtualInterfaceRequest method. +// req, resp := client.CreatePublicVirtualInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/CreatePublicVirtualInterface +func (c *DirectConnect) CreatePublicVirtualInterfaceRequest(input *CreatePublicVirtualInterfaceInput) (req *request.Request, output *VirtualInterface) { + op := &request.Operation{ + Name: opCreatePublicVirtualInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePublicVirtualInterfaceInput{} + } + + output = &VirtualInterface{} + req = c.newRequest(op, input, output) + return +} + +// CreatePublicVirtualInterface API operation for AWS Direct Connect. +// +// Creates a new public virtual interface. A virtual interface is the VLAN that +// transports AWS Direct Connect traffic. A public virtual interface supports +// sending traffic to public services of AWS such as Amazon Simple Storage Service +// (Amazon S3). +// +// When creating an IPv6 public virtual interface (addressFamily is 'ipv6'), +// the customer and amazon address fields should be left blank to use auto-assigned +// IPv6 space. Custom IPv6 Addresses are currently not supported. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation CreatePublicVirtualInterface for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/CreatePublicVirtualInterface +func (c *DirectConnect) CreatePublicVirtualInterface(input *CreatePublicVirtualInterfaceInput) (*VirtualInterface, error) { + req, out := c.CreatePublicVirtualInterfaceRequest(input) + return out, req.Send() +} + +// CreatePublicVirtualInterfaceWithContext is the same as CreatePublicVirtualInterface with the addition of +// the ability to pass a context and additional request options. +// +// See CreatePublicVirtualInterface for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) CreatePublicVirtualInterfaceWithContext(ctx aws.Context, input *CreatePublicVirtualInterfaceInput, opts ...request.Option) (*VirtualInterface, error) { + req, out := c.CreatePublicVirtualInterfaceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBGPPeer = "DeleteBGPPeer" + +// DeleteBGPPeerRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBGPPeer operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBGPPeer for more information on using the DeleteBGPPeer +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBGPPeerRequest method. +// req, resp := client.DeleteBGPPeerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DeleteBGPPeer +func (c *DirectConnect) DeleteBGPPeerRequest(input *DeleteBGPPeerInput) (req *request.Request, output *DeleteBGPPeerOutput) { + op := &request.Operation{ + Name: opDeleteBGPPeer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteBGPPeerInput{} + } + + output = &DeleteBGPPeerOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteBGPPeer API operation for AWS Direct Connect. +// +// Deletes a BGP peer on the specified virtual interface that matches the specified +// customer address and ASN. You cannot delete the last BGP peer from a virtual +// interface. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation DeleteBGPPeer for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DeleteBGPPeer +func (c *DirectConnect) DeleteBGPPeer(input *DeleteBGPPeerInput) (*DeleteBGPPeerOutput, error) { + req, out := c.DeleteBGPPeerRequest(input) + return out, req.Send() +} + +// DeleteBGPPeerWithContext is the same as DeleteBGPPeer with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBGPPeer for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) DeleteBGPPeerWithContext(ctx aws.Context, input *DeleteBGPPeerInput, opts ...request.Option) (*DeleteBGPPeerOutput, error) { + req, out := c.DeleteBGPPeerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteConnection = "DeleteConnection" + +// DeleteConnectionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteConnection operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteConnection for more information on using the DeleteConnection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteConnectionRequest method. +// req, resp := client.DeleteConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DeleteConnection +func (c *DirectConnect) DeleteConnectionRequest(input *DeleteConnectionInput) (req *request.Request, output *Connection) { + op := &request.Operation{ + Name: opDeleteConnection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteConnectionInput{} + } + + output = &Connection{} + req = c.newRequest(op, input, output) + return +} + +// DeleteConnection API operation for AWS Direct Connect. +// +// Deletes the connection. +// +// Deleting a connection only stops the AWS Direct Connect port hour and data +// transfer charges. You need to cancel separately with the providers any services +// or charges for cross-connects or network circuits that connect you to the +// AWS Direct Connect location. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation DeleteConnection for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DeleteConnection +func (c *DirectConnect) DeleteConnection(input *DeleteConnectionInput) (*Connection, error) { + req, out := c.DeleteConnectionRequest(input) + return out, req.Send() +} + +// DeleteConnectionWithContext is the same as DeleteConnection with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) DeleteConnectionWithContext(ctx aws.Context, input *DeleteConnectionInput, opts ...request.Option) (*Connection, error) { + req, out := c.DeleteConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteInterconnect = "DeleteInterconnect" + +// DeleteInterconnectRequest generates a "aws/request.Request" representing the +// client's request for the DeleteInterconnect operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteInterconnect for more information on using the DeleteInterconnect +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteInterconnectRequest method. +// req, resp := client.DeleteInterconnectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DeleteInterconnect +func (c *DirectConnect) DeleteInterconnectRequest(input *DeleteInterconnectInput) (req *request.Request, output *DeleteInterconnectOutput) { + op := &request.Operation{ + Name: opDeleteInterconnect, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteInterconnectInput{} + } + + output = &DeleteInterconnectOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteInterconnect API operation for AWS Direct Connect. +// +// Deletes the specified interconnect. +// +// This is intended for use by AWS Direct Connect partners only. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation DeleteInterconnect for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DeleteInterconnect +func (c *DirectConnect) DeleteInterconnect(input *DeleteInterconnectInput) (*DeleteInterconnectOutput, error) { + req, out := c.DeleteInterconnectRequest(input) + return out, req.Send() +} + +// DeleteInterconnectWithContext is the same as DeleteInterconnect with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteInterconnect for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) DeleteInterconnectWithContext(ctx aws.Context, input *DeleteInterconnectInput, opts ...request.Option) (*DeleteInterconnectOutput, error) { + req, out := c.DeleteInterconnectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteLag = "DeleteLag" + +// DeleteLagRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLag operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteLag for more information on using the DeleteLag +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteLagRequest method. +// req, resp := client.DeleteLagRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DeleteLag +func (c *DirectConnect) DeleteLagRequest(input *DeleteLagInput) (req *request.Request, output *Lag) { + op := &request.Operation{ + Name: opDeleteLag, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLagInput{} + } + + output = &Lag{} + req = c.newRequest(op, input, output) + return +} + +// DeleteLag API operation for AWS Direct Connect. +// +// Deletes a link aggregation group (LAG). You cannot delete a LAG if it has +// active virtual interfaces or hosted connections. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation DeleteLag for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DeleteLag +func (c *DirectConnect) DeleteLag(input *DeleteLagInput) (*Lag, error) { + req, out := c.DeleteLagRequest(input) + return out, req.Send() +} + +// DeleteLagWithContext is the same as DeleteLag with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteLag for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) DeleteLagWithContext(ctx aws.Context, input *DeleteLagInput, opts ...request.Option) (*Lag, error) { + req, out := c.DeleteLagRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteVirtualInterface = "DeleteVirtualInterface" + +// DeleteVirtualInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVirtualInterface operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteVirtualInterface for more information on using the DeleteVirtualInterface +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteVirtualInterfaceRequest method. +// req, resp := client.DeleteVirtualInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DeleteVirtualInterface +func (c *DirectConnect) DeleteVirtualInterfaceRequest(input *DeleteVirtualInterfaceInput) (req *request.Request, output *DeleteVirtualInterfaceOutput) { + op := &request.Operation{ + Name: opDeleteVirtualInterface, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVirtualInterfaceInput{} + } + + output = &DeleteVirtualInterfaceOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteVirtualInterface API operation for AWS Direct Connect. +// +// Deletes a virtual interface. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation DeleteVirtualInterface for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DeleteVirtualInterface +func (c *DirectConnect) DeleteVirtualInterface(input *DeleteVirtualInterfaceInput) (*DeleteVirtualInterfaceOutput, error) { + req, out := c.DeleteVirtualInterfaceRequest(input) + return out, req.Send() +} + +// DeleteVirtualInterfaceWithContext is the same as DeleteVirtualInterface with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteVirtualInterface for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) DeleteVirtualInterfaceWithContext(ctx aws.Context, input *DeleteVirtualInterfaceInput, opts ...request.Option) (*DeleteVirtualInterfaceOutput, error) { + req, out := c.DeleteVirtualInterfaceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeConnectionLoa = "DescribeConnectionLoa" + +// DescribeConnectionLoaRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConnectionLoa operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeConnectionLoa for more information on using the DescribeConnectionLoa +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeConnectionLoaRequest method. +// req, resp := client.DescribeConnectionLoaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeConnectionLoa +func (c *DirectConnect) DescribeConnectionLoaRequest(input *DescribeConnectionLoaInput) (req *request.Request, output *DescribeConnectionLoaOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, DescribeConnectionLoa, has been deprecated") + } + op := &request.Operation{ + Name: opDescribeConnectionLoa, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConnectionLoaInput{} + } + + output = &DescribeConnectionLoaOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeConnectionLoa API operation for AWS Direct Connect. +// +// Deprecated in favor of DescribeLoa. +// +// Returns the LOA-CFA for a Connection. +// +// The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is +// a document that your APN partner or service provider uses when establishing +// your cross connect to AWS at the colocation facility. For more information, +// see Requesting Cross Connects at AWS Direct Connect Locations (http://docs.aws.amazon.com/directconnect/latest/UserGuide/Colocation.html) +// in the AWS Direct Connect user guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation DescribeConnectionLoa for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeConnectionLoa +func (c *DirectConnect) DescribeConnectionLoa(input *DescribeConnectionLoaInput) (*DescribeConnectionLoaOutput, error) { + req, out := c.DescribeConnectionLoaRequest(input) + return out, req.Send() +} + +// DescribeConnectionLoaWithContext is the same as DescribeConnectionLoa with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeConnectionLoa for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) DescribeConnectionLoaWithContext(ctx aws.Context, input *DescribeConnectionLoaInput, opts ...request.Option) (*DescribeConnectionLoaOutput, error) { + req, out := c.DescribeConnectionLoaRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeConnections = "DescribeConnections" + +// DescribeConnectionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConnections operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeConnections for more information on using the DescribeConnections +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeConnectionsRequest method. +// req, resp := client.DescribeConnectionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeConnections +func (c *DirectConnect) DescribeConnectionsRequest(input *DescribeConnectionsInput) (req *request.Request, output *Connections) { + op := &request.Operation{ + Name: opDescribeConnections, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConnectionsInput{} + } + + output = &Connections{} + req = c.newRequest(op, input, output) + return +} + +// DescribeConnections API operation for AWS Direct Connect. +// +// Displays all connections in this region. +// +// If a connection ID is provided, the call returns only that particular connection. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation DescribeConnections for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeConnections +func (c *DirectConnect) DescribeConnections(input *DescribeConnectionsInput) (*Connections, error) { + req, out := c.DescribeConnectionsRequest(input) + return out, req.Send() +} + +// DescribeConnectionsWithContext is the same as DescribeConnections with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeConnections for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) DescribeConnectionsWithContext(ctx aws.Context, input *DescribeConnectionsInput, opts ...request.Option) (*Connections, error) { + req, out := c.DescribeConnectionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeConnectionsOnInterconnect = "DescribeConnectionsOnInterconnect" + +// DescribeConnectionsOnInterconnectRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConnectionsOnInterconnect operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeConnectionsOnInterconnect for more information on using the DescribeConnectionsOnInterconnect +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeConnectionsOnInterconnectRequest method. +// req, resp := client.DescribeConnectionsOnInterconnectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeConnectionsOnInterconnect +func (c *DirectConnect) DescribeConnectionsOnInterconnectRequest(input *DescribeConnectionsOnInterconnectInput) (req *request.Request, output *Connections) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, DescribeConnectionsOnInterconnect, has been deprecated") + } + op := &request.Operation{ + Name: opDescribeConnectionsOnInterconnect, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConnectionsOnInterconnectInput{} + } + + output = &Connections{} + req = c.newRequest(op, input, output) + return +} + +// DescribeConnectionsOnInterconnect API operation for AWS Direct Connect. +// +// Deprecated in favor of DescribeHostedConnections. +// +// Returns a list of connections that have been provisioned on the given interconnect. +// +// This is intended for use by AWS Direct Connect partners only. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation DescribeConnectionsOnInterconnect for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeConnectionsOnInterconnect +func (c *DirectConnect) DescribeConnectionsOnInterconnect(input *DescribeConnectionsOnInterconnectInput) (*Connections, error) { + req, out := c.DescribeConnectionsOnInterconnectRequest(input) + return out, req.Send() +} + +// DescribeConnectionsOnInterconnectWithContext is the same as DescribeConnectionsOnInterconnect with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeConnectionsOnInterconnect for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) DescribeConnectionsOnInterconnectWithContext(ctx aws.Context, input *DescribeConnectionsOnInterconnectInput, opts ...request.Option) (*Connections, error) { + req, out := c.DescribeConnectionsOnInterconnectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeHostedConnections = "DescribeHostedConnections" + +// DescribeHostedConnectionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeHostedConnections operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeHostedConnections for more information on using the DescribeHostedConnections +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeHostedConnectionsRequest method. +// req, resp := client.DescribeHostedConnectionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeHostedConnections +func (c *DirectConnect) DescribeHostedConnectionsRequest(input *DescribeHostedConnectionsInput) (req *request.Request, output *Connections) { + op := &request.Operation{ + Name: opDescribeHostedConnections, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeHostedConnectionsInput{} + } + + output = &Connections{} + req = c.newRequest(op, input, output) + return +} + +// DescribeHostedConnections API operation for AWS Direct Connect. +// +// Returns a list of hosted connections that have been provisioned on the given +// interconnect or link aggregation group (LAG). +// +// This is intended for use by AWS Direct Connect partners only. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation DescribeHostedConnections for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeHostedConnections +func (c *DirectConnect) DescribeHostedConnections(input *DescribeHostedConnectionsInput) (*Connections, error) { + req, out := c.DescribeHostedConnectionsRequest(input) + return out, req.Send() +} + +// DescribeHostedConnectionsWithContext is the same as DescribeHostedConnections with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeHostedConnections for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) DescribeHostedConnectionsWithContext(ctx aws.Context, input *DescribeHostedConnectionsInput, opts ...request.Option) (*Connections, error) { + req, out := c.DescribeHostedConnectionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeInterconnectLoa = "DescribeInterconnectLoa" + +// DescribeInterconnectLoaRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInterconnectLoa operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInterconnectLoa for more information on using the DescribeInterconnectLoa +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInterconnectLoaRequest method. +// req, resp := client.DescribeInterconnectLoaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeInterconnectLoa +func (c *DirectConnect) DescribeInterconnectLoaRequest(input *DescribeInterconnectLoaInput) (req *request.Request, output *DescribeInterconnectLoaOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, DescribeInterconnectLoa, has been deprecated") + } + op := &request.Operation{ + Name: opDescribeInterconnectLoa, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInterconnectLoaInput{} + } + + output = &DescribeInterconnectLoaOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInterconnectLoa API operation for AWS Direct Connect. +// +// Deprecated in favor of DescribeLoa. +// +// Returns the LOA-CFA for an Interconnect. +// +// The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is +// a document that is used when establishing your cross connect to AWS at the +// colocation facility. For more information, see Requesting Cross Connects +// at AWS Direct Connect Locations (http://docs.aws.amazon.com/directconnect/latest/UserGuide/Colocation.html) +// in the AWS Direct Connect user guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation DescribeInterconnectLoa for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeInterconnectLoa +func (c *DirectConnect) DescribeInterconnectLoa(input *DescribeInterconnectLoaInput) (*DescribeInterconnectLoaOutput, error) { + req, out := c.DescribeInterconnectLoaRequest(input) + return out, req.Send() +} + +// DescribeInterconnectLoaWithContext is the same as DescribeInterconnectLoa with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInterconnectLoa for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) DescribeInterconnectLoaWithContext(ctx aws.Context, input *DescribeInterconnectLoaInput, opts ...request.Option) (*DescribeInterconnectLoaOutput, error) { + req, out := c.DescribeInterconnectLoaRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeInterconnects = "DescribeInterconnects" + +// DescribeInterconnectsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInterconnects operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInterconnects for more information on using the DescribeInterconnects +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInterconnectsRequest method. +// req, resp := client.DescribeInterconnectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeInterconnects +func (c *DirectConnect) DescribeInterconnectsRequest(input *DescribeInterconnectsInput) (req *request.Request, output *DescribeInterconnectsOutput) { + op := &request.Operation{ + Name: opDescribeInterconnects, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInterconnectsInput{} + } + + output = &DescribeInterconnectsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInterconnects API operation for AWS Direct Connect. +// +// Returns a list of interconnects owned by the AWS account. +// +// If an interconnect ID is provided, it will only return this particular interconnect. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation DescribeInterconnects for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeInterconnects +func (c *DirectConnect) DescribeInterconnects(input *DescribeInterconnectsInput) (*DescribeInterconnectsOutput, error) { + req, out := c.DescribeInterconnectsRequest(input) + return out, req.Send() +} + +// DescribeInterconnectsWithContext is the same as DescribeInterconnects with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInterconnects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) DescribeInterconnectsWithContext(ctx aws.Context, input *DescribeInterconnectsInput, opts ...request.Option) (*DescribeInterconnectsOutput, error) { + req, out := c.DescribeInterconnectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeLags = "DescribeLags" + +// DescribeLagsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLags operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLags for more information on using the DescribeLags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLagsRequest method. +// req, resp := client.DescribeLagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeLags +func (c *DirectConnect) DescribeLagsRequest(input *DescribeLagsInput) (req *request.Request, output *DescribeLagsOutput) { + op := &request.Operation{ + Name: opDescribeLags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLagsInput{} + } + + output = &DescribeLagsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLags API operation for AWS Direct Connect. +// +// Describes the link aggregation groups (LAGs) in your account. +// +// If a LAG ID is provided, only information about the specified LAG is returned. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation DescribeLags for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeLags +func (c *DirectConnect) DescribeLags(input *DescribeLagsInput) (*DescribeLagsOutput, error) { + req, out := c.DescribeLagsRequest(input) + return out, req.Send() +} + +// DescribeLagsWithContext is the same as DescribeLags with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) DescribeLagsWithContext(ctx aws.Context, input *DescribeLagsInput, opts ...request.Option) (*DescribeLagsOutput, error) { + req, out := c.DescribeLagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeLoa = "DescribeLoa" + +// DescribeLoaRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLoa operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLoa for more information on using the DescribeLoa +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLoaRequest method. +// req, resp := client.DescribeLoaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeLoa +func (c *DirectConnect) DescribeLoaRequest(input *DescribeLoaInput) (req *request.Request, output *Loa) { + op := &request.Operation{ + Name: opDescribeLoa, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLoaInput{} + } + + output = &Loa{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLoa API operation for AWS Direct Connect. +// +// Returns the LOA-CFA for a connection, interconnect, or link aggregation group +// (LAG). +// +// The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is +// a document that is used when establishing your cross connect to AWS at the +// colocation facility. For more information, see Requesting Cross Connects +// at AWS Direct Connect Locations (http://docs.aws.amazon.com/directconnect/latest/UserGuide/Colocation.html) +// in the AWS Direct Connect user guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation DescribeLoa for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeLoa +func (c *DirectConnect) DescribeLoa(input *DescribeLoaInput) (*Loa, error) { + req, out := c.DescribeLoaRequest(input) + return out, req.Send() +} + +// DescribeLoaWithContext is the same as DescribeLoa with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLoa for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) DescribeLoaWithContext(ctx aws.Context, input *DescribeLoaInput, opts ...request.Option) (*Loa, error) { + req, out := c.DescribeLoaRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeLocations = "DescribeLocations" + +// DescribeLocationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLocations operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLocations for more information on using the DescribeLocations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLocationsRequest method. +// req, resp := client.DescribeLocationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeLocations +func (c *DirectConnect) DescribeLocationsRequest(input *DescribeLocationsInput) (req *request.Request, output *DescribeLocationsOutput) { + op := &request.Operation{ + Name: opDescribeLocations, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLocationsInput{} + } + + output = &DescribeLocationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLocations API operation for AWS Direct Connect. +// +// Returns the list of AWS Direct Connect locations in the current AWS region. +// These are the locations that may be selected when calling CreateConnection +// or CreateInterconnect. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation DescribeLocations for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeLocations +func (c *DirectConnect) DescribeLocations(input *DescribeLocationsInput) (*DescribeLocationsOutput, error) { + req, out := c.DescribeLocationsRequest(input) + return out, req.Send() +} + +// DescribeLocationsWithContext is the same as DescribeLocations with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLocations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) DescribeLocationsWithContext(ctx aws.Context, input *DescribeLocationsInput, opts ...request.Option) (*DescribeLocationsOutput, error) { + req, out := c.DescribeLocationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeTags = "DescribeTags" + +// DescribeTagsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTags operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTags for more information on using the DescribeTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeTagsRequest method. +// req, resp := client.DescribeTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeTags +func (c *DirectConnect) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { + op := &request.Operation{ + Name: opDescribeTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTagsInput{} + } + + output = &DescribeTagsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTags API operation for AWS Direct Connect. +// +// Describes the tags associated with the specified Direct Connect resources. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation DescribeTags for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeTags +func (c *DirectConnect) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + return out, req.Send() +} + +// DescribeTagsWithContext is the same as DescribeTags with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) DescribeTagsWithContext(ctx aws.Context, input *DescribeTagsInput, opts ...request.Option) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeVirtualGateways = "DescribeVirtualGateways" + +// DescribeVirtualGatewaysRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVirtualGateways operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVirtualGateways for more information on using the DescribeVirtualGateways +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVirtualGatewaysRequest method. +// req, resp := client.DescribeVirtualGatewaysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeVirtualGateways +func (c *DirectConnect) DescribeVirtualGatewaysRequest(input *DescribeVirtualGatewaysInput) (req *request.Request, output *DescribeVirtualGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeVirtualGateways, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVirtualGatewaysInput{} + } + + output = &DescribeVirtualGatewaysOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeVirtualGateways API operation for AWS Direct Connect. +// +// Returns a list of virtual private gateways owned by the AWS account. +// +// You can create one or more AWS Direct Connect private virtual interfaces +// linking to a virtual private gateway. A virtual private gateway can be managed +// via Amazon Virtual Private Cloud (VPC) console or the EC2 CreateVpnGateway +// (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateVpnGateway.html) +// action. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation DescribeVirtualGateways for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeVirtualGateways +func (c *DirectConnect) DescribeVirtualGateways(input *DescribeVirtualGatewaysInput) (*DescribeVirtualGatewaysOutput, error) { + req, out := c.DescribeVirtualGatewaysRequest(input) + return out, req.Send() +} + +// DescribeVirtualGatewaysWithContext is the same as DescribeVirtualGateways with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVirtualGateways for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) DescribeVirtualGatewaysWithContext(ctx aws.Context, input *DescribeVirtualGatewaysInput, opts ...request.Option) (*DescribeVirtualGatewaysOutput, error) { + req, out := c.DescribeVirtualGatewaysRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeVirtualInterfaces = "DescribeVirtualInterfaces" + +// DescribeVirtualInterfacesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVirtualInterfaces operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVirtualInterfaces for more information on using the DescribeVirtualInterfaces +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVirtualInterfacesRequest method. +// req, resp := client.DescribeVirtualInterfacesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeVirtualInterfaces +func (c *DirectConnect) DescribeVirtualInterfacesRequest(input *DescribeVirtualInterfacesInput) (req *request.Request, output *DescribeVirtualInterfacesOutput) { + op := &request.Operation{ + Name: opDescribeVirtualInterfaces, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVirtualInterfacesInput{} + } + + output = &DescribeVirtualInterfacesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeVirtualInterfaces API operation for AWS Direct Connect. +// +// Displays all virtual interfaces for an AWS account. Virtual interfaces deleted +// fewer than 15 minutes before you make the request are also returned. If you +// specify a connection ID, only the virtual interfaces associated with the +// connection are returned. If you specify a virtual interface ID, then only +// a single virtual interface is returned. +// +// A virtual interface (VLAN) transmits the traffic between the AWS Direct Connect +// location and the customer. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation DescribeVirtualInterfaces for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeVirtualInterfaces +func (c *DirectConnect) DescribeVirtualInterfaces(input *DescribeVirtualInterfacesInput) (*DescribeVirtualInterfacesOutput, error) { + req, out := c.DescribeVirtualInterfacesRequest(input) + return out, req.Send() +} + +// DescribeVirtualInterfacesWithContext is the same as DescribeVirtualInterfaces with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVirtualInterfaces for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) DescribeVirtualInterfacesWithContext(ctx aws.Context, input *DescribeVirtualInterfacesInput, opts ...request.Option) (*DescribeVirtualInterfacesOutput, error) { + req, out := c.DescribeVirtualInterfacesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisassociateConnectionFromLag = "DisassociateConnectionFromLag" + +// DisassociateConnectionFromLagRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateConnectionFromLag operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisassociateConnectionFromLag for more information on using the DisassociateConnectionFromLag +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisassociateConnectionFromLagRequest method. +// req, resp := client.DisassociateConnectionFromLagRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DisassociateConnectionFromLag +func (c *DirectConnect) DisassociateConnectionFromLagRequest(input *DisassociateConnectionFromLagInput) (req *request.Request, output *Connection) { + op := &request.Operation{ + Name: opDisassociateConnectionFromLag, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateConnectionFromLagInput{} + } + + output = &Connection{} + req = c.newRequest(op, input, output) + return +} + +// DisassociateConnectionFromLag API operation for AWS Direct Connect. +// +// Disassociates a connection from a link aggregation group (LAG). The connection +// is interrupted and re-established as a standalone connection (the connection +// is not deleted; to delete the connection, use the DeleteConnection request). +// If the LAG has associated virtual interfaces or hosted connections, they +// remain associated with the LAG. A disassociated connection owned by an AWS +// Direct Connect partner is automatically converted to an interconnect. +// +// If disassociating the connection will cause the LAG to fall below its setting +// for minimum number of operational connections, the request fails, except +// when it's the last member of the LAG. If all connections are disassociated, +// the LAG continues to exist as an empty LAG with no physical connections. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation DisassociateConnectionFromLag for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DisassociateConnectionFromLag +func (c *DirectConnect) DisassociateConnectionFromLag(input *DisassociateConnectionFromLagInput) (*Connection, error) { + req, out := c.DisassociateConnectionFromLagRequest(input) + return out, req.Send() +} + +// DisassociateConnectionFromLagWithContext is the same as DisassociateConnectionFromLag with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateConnectionFromLag for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) DisassociateConnectionFromLagWithContext(ctx aws.Context, input *DisassociateConnectionFromLagInput, opts ...request.Option) (*Connection, error) { + req, out := c.DisassociateConnectionFromLagRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/TagResource +func (c *DirectConnect) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// TagResource API operation for AWS Direct Connect. +// +// Adds the specified tags to the specified Direct Connect resource. Each Direct +// Connect resource can have a maximum of 50 tags. +// +// Each tag consists of a key and an optional value. If a tag with the same +// key is already associated with the Direct Connect resource, this action updates +// its value. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeDuplicateTagKeysException "DuplicateTagKeysException" +// A tag key was specified more than once. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// You have reached the limit on the number of tags that can be assigned to +// a Direct Connect resource. +// +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/TagResource +func (c *DirectConnect) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/UntagResource +func (c *DirectConnect) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// UntagResource API operation for AWS Direct Connect. +// +// Removes one or more tags from the specified Direct Connect resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/UntagResource +func (c *DirectConnect) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateLag = "UpdateLag" + +// UpdateLagRequest generates a "aws/request.Request" representing the +// client's request for the UpdateLag operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateLag for more information on using the UpdateLag +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateLagRequest method. +// req, resp := client.UpdateLagRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/UpdateLag +func (c *DirectConnect) UpdateLagRequest(input *UpdateLagInput) (req *request.Request, output *Lag) { + op := &request.Operation{ + Name: opUpdateLag, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateLagInput{} + } + + output = &Lag{} + req = c.newRequest(op, input, output) + return +} + +// UpdateLag API operation for AWS Direct Connect. +// +// Updates the attributes of a link aggregation group (LAG). +// +// You can update the following attributes: +// +// * The name of the LAG. +// +// * The value for the minimum number of connections that must be operational +// for the LAG itself to be operational. +// +// When you create a LAG, the default value for the minimum number of operational +// connections is zero (0). If you update this value, and the number of operational +// connections falls below the specified value, the LAG will automatically go +// down to avoid overutilization of the remaining connections. Adjusting this +// value should be done with care as it could force the LAG down if the value +// is set higher than the current number of operational connections. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation UpdateLag for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// A server-side error occurred during the API call. The error message will +// contain additional details about the cause. +// +// * ErrCodeClientException "ClientException" +// The API was called with invalid parameters. The error message will contain +// additional details about the cause. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/UpdateLag +func (c *DirectConnect) UpdateLag(input *UpdateLagInput) (*Lag, error) { + req, out := c.UpdateLagRequest(input) + return out, req.Send() +} + +// UpdateLagWithContext is the same as UpdateLag with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateLag for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) UpdateLagWithContext(ctx aws.Context, input *UpdateLagInput, opts ...request.Option) (*Lag, error) { + req, out := c.UpdateLagRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Container for the parameters to the AllocateConnectionOnInterconnect operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/AllocateConnectionOnInterconnectRequest +type AllocateConnectionOnInterconnectInput struct { + _ struct{} `type:"structure"` + + // Bandwidth of the connection. + // + // Example: "500Mbps" + // + // Default: None + // + // Values: 50Mbps, 100Mbps, 200Mbps, 300Mbps, 400Mbps, or 500Mbps + // + // Bandwidth is a required field + Bandwidth *string `locationName:"bandwidth" type:"string" required:"true"` + + // Name of the provisioned connection. + // + // Example: "500M Connection to AWS" + // + // Default: None + // + // ConnectionName is a required field + ConnectionName *string `locationName:"connectionName" type:"string" required:"true"` + + // ID of the interconnect on which the connection will be provisioned. + // + // Example: dxcon-456abc78 + // + // Default: None + // + // InterconnectId is a required field + InterconnectId *string `locationName:"interconnectId" type:"string" required:"true"` + + // Numeric account Id of the customer for whom the connection will be provisioned. + // + // Example: 123443215678 + // + // Default: None + // + // OwnerAccount is a required field + OwnerAccount *string `locationName:"ownerAccount" type:"string" required:"true"` + + // The dedicated VLAN provisioned to the connection. + // + // Example: 101 + // + // Default: None + // + // Vlan is a required field + Vlan *int64 `locationName:"vlan" type:"integer" required:"true"` +} + +// String returns the string representation +func (s AllocateConnectionOnInterconnectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocateConnectionOnInterconnectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AllocateConnectionOnInterconnectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AllocateConnectionOnInterconnectInput"} + if s.Bandwidth == nil { + invalidParams.Add(request.NewErrParamRequired("Bandwidth")) + } + if s.ConnectionName == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionName")) + } + if s.InterconnectId == nil { + invalidParams.Add(request.NewErrParamRequired("InterconnectId")) + } + if s.OwnerAccount == nil { + invalidParams.Add(request.NewErrParamRequired("OwnerAccount")) + } + if s.Vlan == nil { + invalidParams.Add(request.NewErrParamRequired("Vlan")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBandwidth sets the Bandwidth field's value. +func (s *AllocateConnectionOnInterconnectInput) SetBandwidth(v string) *AllocateConnectionOnInterconnectInput { + s.Bandwidth = &v + return s +} + +// SetConnectionName sets the ConnectionName field's value. +func (s *AllocateConnectionOnInterconnectInput) SetConnectionName(v string) *AllocateConnectionOnInterconnectInput { + s.ConnectionName = &v + return s +} + +// SetInterconnectId sets the InterconnectId field's value. +func (s *AllocateConnectionOnInterconnectInput) SetInterconnectId(v string) *AllocateConnectionOnInterconnectInput { + s.InterconnectId = &v + return s +} + +// SetOwnerAccount sets the OwnerAccount field's value. +func (s *AllocateConnectionOnInterconnectInput) SetOwnerAccount(v string) *AllocateConnectionOnInterconnectInput { + s.OwnerAccount = &v + return s +} + +// SetVlan sets the Vlan field's value. +func (s *AllocateConnectionOnInterconnectInput) SetVlan(v int64) *AllocateConnectionOnInterconnectInput { + s.Vlan = &v + return s +} + +// Container for the parameters to theHostedConnection operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/AllocateHostedConnectionRequest +type AllocateHostedConnectionInput struct { + _ struct{} `type:"structure"` + + // The bandwidth of the connection. + // + // Example: 500Mbps + // + // Default: None + // + // Values: 50Mbps, 100Mbps, 200Mbps, 300Mbps, 400Mbps, or 500Mbps + // + // Bandwidth is a required field + Bandwidth *string `locationName:"bandwidth" type:"string" required:"true"` + + // The ID of the interconnect or LAG on which the connection will be provisioned. + // + // Example: dxcon-456abc78 or dxlag-abc123 + // + // Default: None + // + // ConnectionId is a required field + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` + + // The name of the provisioned connection. + // + // Example: "500M Connection to AWS" + // + // Default: None + // + // ConnectionName is a required field + ConnectionName *string `locationName:"connectionName" type:"string" required:"true"` + + // The numeric account ID of the customer for whom the connection will be provisioned. + // + // Example: 123443215678 + // + // Default: None + // + // OwnerAccount is a required field + OwnerAccount *string `locationName:"ownerAccount" type:"string" required:"true"` + + // The dedicated VLAN provisioned to the hosted connection. + // + // Example: 101 + // + // Default: None + // + // Vlan is a required field + Vlan *int64 `locationName:"vlan" type:"integer" required:"true"` +} + +// String returns the string representation +func (s AllocateHostedConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocateHostedConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AllocateHostedConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AllocateHostedConnectionInput"} + if s.Bandwidth == nil { + invalidParams.Add(request.NewErrParamRequired("Bandwidth")) + } + if s.ConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionId")) + } + if s.ConnectionName == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionName")) + } + if s.OwnerAccount == nil { + invalidParams.Add(request.NewErrParamRequired("OwnerAccount")) + } + if s.Vlan == nil { + invalidParams.Add(request.NewErrParamRequired("Vlan")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBandwidth sets the Bandwidth field's value. +func (s *AllocateHostedConnectionInput) SetBandwidth(v string) *AllocateHostedConnectionInput { + s.Bandwidth = &v + return s +} + +// SetConnectionId sets the ConnectionId field's value. +func (s *AllocateHostedConnectionInput) SetConnectionId(v string) *AllocateHostedConnectionInput { + s.ConnectionId = &v + return s +} + +// SetConnectionName sets the ConnectionName field's value. +func (s *AllocateHostedConnectionInput) SetConnectionName(v string) *AllocateHostedConnectionInput { + s.ConnectionName = &v + return s +} + +// SetOwnerAccount sets the OwnerAccount field's value. +func (s *AllocateHostedConnectionInput) SetOwnerAccount(v string) *AllocateHostedConnectionInput { + s.OwnerAccount = &v + return s +} + +// SetVlan sets the Vlan field's value. +func (s *AllocateHostedConnectionInput) SetVlan(v int64) *AllocateHostedConnectionInput { + s.Vlan = &v + return s +} + +// Container for the parameters to the AllocatePrivateVirtualInterface operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/AllocatePrivateVirtualInterfaceRequest +type AllocatePrivateVirtualInterfaceInput struct { + _ struct{} `type:"structure"` + + // The connection ID on which the private virtual interface is provisioned. + // + // Default: None + // + // ConnectionId is a required field + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` + + // Detailed information for the private virtual interface to be provisioned. + // + // Default: None + // + // NewPrivateVirtualInterfaceAllocation is a required field + NewPrivateVirtualInterfaceAllocation *NewPrivateVirtualInterfaceAllocation `locationName:"newPrivateVirtualInterfaceAllocation" type:"structure" required:"true"` + + // The AWS account that will own the new private virtual interface. + // + // Default: None + // + // OwnerAccount is a required field + OwnerAccount *string `locationName:"ownerAccount" type:"string" required:"true"` +} + +// String returns the string representation +func (s AllocatePrivateVirtualInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocatePrivateVirtualInterfaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AllocatePrivateVirtualInterfaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AllocatePrivateVirtualInterfaceInput"} + if s.ConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionId")) + } + if s.NewPrivateVirtualInterfaceAllocation == nil { + invalidParams.Add(request.NewErrParamRequired("NewPrivateVirtualInterfaceAllocation")) + } + if s.OwnerAccount == nil { + invalidParams.Add(request.NewErrParamRequired("OwnerAccount")) + } + if s.NewPrivateVirtualInterfaceAllocation != nil { + if err := s.NewPrivateVirtualInterfaceAllocation.Validate(); err != nil { + invalidParams.AddNested("NewPrivateVirtualInterfaceAllocation", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConnectionId sets the ConnectionId field's value. +func (s *AllocatePrivateVirtualInterfaceInput) SetConnectionId(v string) *AllocatePrivateVirtualInterfaceInput { + s.ConnectionId = &v + return s +} + +// SetNewPrivateVirtualInterfaceAllocation sets the NewPrivateVirtualInterfaceAllocation field's value. +func (s *AllocatePrivateVirtualInterfaceInput) SetNewPrivateVirtualInterfaceAllocation(v *NewPrivateVirtualInterfaceAllocation) *AllocatePrivateVirtualInterfaceInput { + s.NewPrivateVirtualInterfaceAllocation = v + return s +} + +// SetOwnerAccount sets the OwnerAccount field's value. +func (s *AllocatePrivateVirtualInterfaceInput) SetOwnerAccount(v string) *AllocatePrivateVirtualInterfaceInput { + s.OwnerAccount = &v + return s +} + +// Container for the parameters to the AllocatePublicVirtualInterface operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/AllocatePublicVirtualInterfaceRequest +type AllocatePublicVirtualInterfaceInput struct { + _ struct{} `type:"structure"` + + // The connection ID on which the public virtual interface is provisioned. + // + // Default: None + // + // ConnectionId is a required field + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` + + // Detailed information for the public virtual interface to be provisioned. + // + // Default: None + // + // NewPublicVirtualInterfaceAllocation is a required field + NewPublicVirtualInterfaceAllocation *NewPublicVirtualInterfaceAllocation `locationName:"newPublicVirtualInterfaceAllocation" type:"structure" required:"true"` + + // The AWS account that will own the new public virtual interface. + // + // Default: None + // + // OwnerAccount is a required field + OwnerAccount *string `locationName:"ownerAccount" type:"string" required:"true"` +} + +// String returns the string representation +func (s AllocatePublicVirtualInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocatePublicVirtualInterfaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AllocatePublicVirtualInterfaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AllocatePublicVirtualInterfaceInput"} + if s.ConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionId")) + } + if s.NewPublicVirtualInterfaceAllocation == nil { + invalidParams.Add(request.NewErrParamRequired("NewPublicVirtualInterfaceAllocation")) + } + if s.OwnerAccount == nil { + invalidParams.Add(request.NewErrParamRequired("OwnerAccount")) + } + if s.NewPublicVirtualInterfaceAllocation != nil { + if err := s.NewPublicVirtualInterfaceAllocation.Validate(); err != nil { + invalidParams.AddNested("NewPublicVirtualInterfaceAllocation", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConnectionId sets the ConnectionId field's value. +func (s *AllocatePublicVirtualInterfaceInput) SetConnectionId(v string) *AllocatePublicVirtualInterfaceInput { + s.ConnectionId = &v + return s +} + +// SetNewPublicVirtualInterfaceAllocation sets the NewPublicVirtualInterfaceAllocation field's value. +func (s *AllocatePublicVirtualInterfaceInput) SetNewPublicVirtualInterfaceAllocation(v *NewPublicVirtualInterfaceAllocation) *AllocatePublicVirtualInterfaceInput { + s.NewPublicVirtualInterfaceAllocation = v + return s +} + +// SetOwnerAccount sets the OwnerAccount field's value. +func (s *AllocatePublicVirtualInterfaceInput) SetOwnerAccount(v string) *AllocatePublicVirtualInterfaceInput { + s.OwnerAccount = &v + return s +} + +// Container for the parameters to the AssociateConnectionWithLag operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/AssociateConnectionWithLagRequest +type AssociateConnectionWithLagInput struct { + _ struct{} `type:"structure"` + + // The ID of the connection. + // + // Example: dxcon-abc123 + // + // Default: None + // + // ConnectionId is a required field + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` + + // The ID of the LAG with which to associate the connection. + // + // Example: dxlag-abc123 + // + // Default: None + // + // LagId is a required field + LagId *string `locationName:"lagId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateConnectionWithLagInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateConnectionWithLagInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateConnectionWithLagInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateConnectionWithLagInput"} + if s.ConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionId")) + } + if s.LagId == nil { + invalidParams.Add(request.NewErrParamRequired("LagId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConnectionId sets the ConnectionId field's value. +func (s *AssociateConnectionWithLagInput) SetConnectionId(v string) *AssociateConnectionWithLagInput { + s.ConnectionId = &v + return s +} + +// SetLagId sets the LagId field's value. +func (s *AssociateConnectionWithLagInput) SetLagId(v string) *AssociateConnectionWithLagInput { + s.LagId = &v + return s +} + +// Container for the parameters to the AssociateHostedConnection operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/AssociateHostedConnectionRequest +type AssociateHostedConnectionInput struct { + _ struct{} `type:"structure"` + + // The ID of the hosted connection. + // + // Example: dxcon-abc123 + // + // Default: None + // + // ConnectionId is a required field + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` + + // The ID of the interconnect or the LAG. + // + // Example: dxcon-abc123 or dxlag-abc123 + // + // Default: None + // + // ParentConnectionId is a required field + ParentConnectionId *string `locationName:"parentConnectionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateHostedConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateHostedConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateHostedConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateHostedConnectionInput"} + if s.ConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionId")) + } + if s.ParentConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("ParentConnectionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConnectionId sets the ConnectionId field's value. +func (s *AssociateHostedConnectionInput) SetConnectionId(v string) *AssociateHostedConnectionInput { + s.ConnectionId = &v + return s +} + +// SetParentConnectionId sets the ParentConnectionId field's value. +func (s *AssociateHostedConnectionInput) SetParentConnectionId(v string) *AssociateHostedConnectionInput { + s.ParentConnectionId = &v + return s +} + +// Container for the parameters to the AssociateVirtualInterface operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/AssociateVirtualInterfaceRequest +type AssociateVirtualInterfaceInput struct { + _ struct{} `type:"structure"` + + // The ID of the LAG or connection with which to associate the virtual interface. + // + // Example: dxlag-abc123 or dxcon-abc123 + // + // Default: None + // + // ConnectionId is a required field + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` + + // The ID of the virtual interface. + // + // Example: dxvif-123dfg56 + // + // Default: None + // + // VirtualInterfaceId is a required field + VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateVirtualInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateVirtualInterfaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateVirtualInterfaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateVirtualInterfaceInput"} + if s.ConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionId")) + } + if s.VirtualInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualInterfaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConnectionId sets the ConnectionId field's value. +func (s *AssociateVirtualInterfaceInput) SetConnectionId(v string) *AssociateVirtualInterfaceInput { + s.ConnectionId = &v + return s +} + +// SetVirtualInterfaceId sets the VirtualInterfaceId field's value. +func (s *AssociateVirtualInterfaceInput) SetVirtualInterfaceId(v string) *AssociateVirtualInterfaceInput { + s.VirtualInterfaceId = &v + return s +} + +// A structure containing information about a BGP peer. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/BGPPeer +type BGPPeer struct { + _ struct{} `type:"structure"` + + // Indicates the address family for the BGP peer. + // + // * ipv4: IPv4 address family + // + // * ipv6: IPv6 address family + AddressFamily *string `locationName:"addressFamily" type:"string" enum:"AddressFamily"` + + // IP address assigned to the Amazon interface. + // + // Example: 192.168.1.1/30 or 2001:db8::1/125 + AmazonAddress *string `locationName:"amazonAddress" type:"string"` + + // The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + // + // Example: 65000 + Asn *int64 `locationName:"asn" type:"integer"` + + // The authentication key for BGP configuration. + // + // Example: asdf34example + AuthKey *string `locationName:"authKey" type:"string"` + + // The state of the BGP peer. + // + // * Verifying: The BGP peering addresses or ASN require validation before + // the BGP peer can be created. This state only applies to BGP peers on a + // public virtual interface. + // + // * Pending: The BGP peer has been created, and is in this state until it + // is ready to be established. + // + // * Available: The BGP peer can be established. + // + // * Deleting: The BGP peer is in the process of being deleted. + // + // * Deleted: The BGP peer has been deleted and cannot be established. + BgpPeerState *string `locationName:"bgpPeerState" type:"string" enum:"BGPPeerState"` + + // The Up/Down state of the BGP peer. + // + // * Up: The BGP peer is established. + // + // * Down: The BGP peer is down. + BgpStatus *string `locationName:"bgpStatus" type:"string" enum:"BGPStatus"` + + // IP address assigned to the customer interface. + // + // Example: 192.168.1.2/30 or 2001:db8::2/125 + CustomerAddress *string `locationName:"customerAddress" type:"string"` +} + +// String returns the string representation +func (s BGPPeer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BGPPeer) GoString() string { + return s.String() +} + +// SetAddressFamily sets the AddressFamily field's value. +func (s *BGPPeer) SetAddressFamily(v string) *BGPPeer { + s.AddressFamily = &v + return s +} + +// SetAmazonAddress sets the AmazonAddress field's value. +func (s *BGPPeer) SetAmazonAddress(v string) *BGPPeer { + s.AmazonAddress = &v + return s +} + +// SetAsn sets the Asn field's value. +func (s *BGPPeer) SetAsn(v int64) *BGPPeer { + s.Asn = &v + return s +} + +// SetAuthKey sets the AuthKey field's value. +func (s *BGPPeer) SetAuthKey(v string) *BGPPeer { + s.AuthKey = &v + return s +} + +// SetBgpPeerState sets the BgpPeerState field's value. +func (s *BGPPeer) SetBgpPeerState(v string) *BGPPeer { + s.BgpPeerState = &v + return s +} + +// SetBgpStatus sets the BgpStatus field's value. +func (s *BGPPeer) SetBgpStatus(v string) *BGPPeer { + s.BgpStatus = &v + return s +} + +// SetCustomerAddress sets the CustomerAddress field's value. +func (s *BGPPeer) SetCustomerAddress(v string) *BGPPeer { + s.CustomerAddress = &v + return s +} + +// Container for the parameters to the ConfirmConnection operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/ConfirmConnectionRequest +type ConfirmConnectionInput struct { + _ struct{} `type:"structure"` + + // The ID of the connection. This field is also used as the ID type for operations + // that use multiple connection types (LAG, interconnect, and/or connection). + // + // Example: dxcon-fg5678gh + // + // Default: None + // + // ConnectionId is a required field + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfirmConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConfirmConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConfirmConnectionInput"} + if s.ConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConnectionId sets the ConnectionId field's value. +func (s *ConfirmConnectionInput) SetConnectionId(v string) *ConfirmConnectionInput { + s.ConnectionId = &v + return s +} + +// The response received when ConfirmConnection is called. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/ConfirmConnectionResponse +type ConfirmConnectionOutput struct { + _ struct{} `type:"structure"` + + // State of the connection. + // + // * Ordering: The initial state of a hosted connection provisioned on an + // interconnect. The connection stays in the ordering state until the owner + // of the hosted connection confirms or declines the connection order. + // + // * Requested: The initial state of a standard connection. The connection + // stays in the requested state until the Letter of Authorization (LOA) is + // sent to the customer. + // + // * Pending: The connection has been approved, and is being initialized. + // + // * Available: The network link is up, and the connection is ready for use. + // + // * Down: The network link is down. + // + // * Deleting: The connection is in the process of being deleted. + // + // * Deleted: The connection has been deleted. + // + // * Rejected: A hosted connection in the 'Ordering' state will enter the + // 'Rejected' state if it is deleted by the end customer. + ConnectionState *string `locationName:"connectionState" type:"string" enum:"ConnectionState"` +} + +// String returns the string representation +func (s ConfirmConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmConnectionOutput) GoString() string { + return s.String() +} + +// SetConnectionState sets the ConnectionState field's value. +func (s *ConfirmConnectionOutput) SetConnectionState(v string) *ConfirmConnectionOutput { + s.ConnectionState = &v + return s +} + +// Container for the parameters to the ConfirmPrivateVirtualInterface operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/ConfirmPrivateVirtualInterfaceRequest +type ConfirmPrivateVirtualInterfaceInput struct { + _ struct{} `type:"structure"` + + // ID of the virtual private gateway that will be attached to the virtual interface. + // + // A virtual private gateway can be managed via the Amazon Virtual Private Cloud + // (VPC) console or the EC2 CreateVpnGateway (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateVpnGateway.html) + // action. + // + // Default: None + // + // VirtualGatewayId is a required field + VirtualGatewayId *string `locationName:"virtualGatewayId" type:"string" required:"true"` + + // The ID of the virtual interface. + // + // Example: dxvif-123dfg56 + // + // Default: None + // + // VirtualInterfaceId is a required field + VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfirmPrivateVirtualInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmPrivateVirtualInterfaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConfirmPrivateVirtualInterfaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConfirmPrivateVirtualInterfaceInput"} + if s.VirtualGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualGatewayId")) + } + if s.VirtualInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualInterfaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetVirtualGatewayId sets the VirtualGatewayId field's value. +func (s *ConfirmPrivateVirtualInterfaceInput) SetVirtualGatewayId(v string) *ConfirmPrivateVirtualInterfaceInput { + s.VirtualGatewayId = &v + return s +} + +// SetVirtualInterfaceId sets the VirtualInterfaceId field's value. +func (s *ConfirmPrivateVirtualInterfaceInput) SetVirtualInterfaceId(v string) *ConfirmPrivateVirtualInterfaceInput { + s.VirtualInterfaceId = &v + return s +} + +// The response received when ConfirmPrivateVirtualInterface is called. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/ConfirmPrivateVirtualInterfaceResponse +type ConfirmPrivateVirtualInterfaceOutput struct { + _ struct{} `type:"structure"` + + // State of the virtual interface. + // + // * Confirming: The creation of the virtual interface is pending confirmation + // from the virtual interface owner. If the owner of the virtual interface + // is different from the owner of the connection on which it is provisioned, + // then the virtual interface will remain in this state until it is confirmed + // by the virtual interface owner. + // + // * Verifying: This state only applies to public virtual interfaces. Each + // public virtual interface needs validation before the virtual interface + // can be created. + // + // * Pending: A virtual interface is in this state from the time that it + // is created until the virtual interface is ready to forward traffic. + // + // * Available: A virtual interface that is able to forward traffic. + // + // * Down: A virtual interface that is BGP down. + // + // * Deleting: A virtual interface is in this state immediately after calling + // DeleteVirtualInterface until it can no longer forward traffic. + // + // * Deleted: A virtual interface that cannot forward traffic. + // + // * Rejected: The virtual interface owner has declined creation of the virtual + // interface. If a virtual interface in the 'Confirming' state is deleted + // by the virtual interface owner, the virtual interface will enter the 'Rejected' + // state. + VirtualInterfaceState *string `locationName:"virtualInterfaceState" type:"string" enum:"VirtualInterfaceState"` +} + +// String returns the string representation +func (s ConfirmPrivateVirtualInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmPrivateVirtualInterfaceOutput) GoString() string { + return s.String() +} + +// SetVirtualInterfaceState sets the VirtualInterfaceState field's value. +func (s *ConfirmPrivateVirtualInterfaceOutput) SetVirtualInterfaceState(v string) *ConfirmPrivateVirtualInterfaceOutput { + s.VirtualInterfaceState = &v + return s +} + +// Container for the parameters to the ConfirmPublicVirtualInterface operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/ConfirmPublicVirtualInterfaceRequest +type ConfirmPublicVirtualInterfaceInput struct { + _ struct{} `type:"structure"` + + // The ID of the virtual interface. + // + // Example: dxvif-123dfg56 + // + // Default: None + // + // VirtualInterfaceId is a required field + VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfirmPublicVirtualInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmPublicVirtualInterfaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConfirmPublicVirtualInterfaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConfirmPublicVirtualInterfaceInput"} + if s.VirtualInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualInterfaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetVirtualInterfaceId sets the VirtualInterfaceId field's value. +func (s *ConfirmPublicVirtualInterfaceInput) SetVirtualInterfaceId(v string) *ConfirmPublicVirtualInterfaceInput { + s.VirtualInterfaceId = &v + return s +} + +// The response received when ConfirmPublicVirtualInterface is called. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/ConfirmPublicVirtualInterfaceResponse +type ConfirmPublicVirtualInterfaceOutput struct { + _ struct{} `type:"structure"` + + // State of the virtual interface. + // + // * Confirming: The creation of the virtual interface is pending confirmation + // from the virtual interface owner. If the owner of the virtual interface + // is different from the owner of the connection on which it is provisioned, + // then the virtual interface will remain in this state until it is confirmed + // by the virtual interface owner. + // + // * Verifying: This state only applies to public virtual interfaces. Each + // public virtual interface needs validation before the virtual interface + // can be created. + // + // * Pending: A virtual interface is in this state from the time that it + // is created until the virtual interface is ready to forward traffic. + // + // * Available: A virtual interface that is able to forward traffic. + // + // * Down: A virtual interface that is BGP down. + // + // * Deleting: A virtual interface is in this state immediately after calling + // DeleteVirtualInterface until it can no longer forward traffic. + // + // * Deleted: A virtual interface that cannot forward traffic. + // + // * Rejected: The virtual interface owner has declined creation of the virtual + // interface. If a virtual interface in the 'Confirming' state is deleted + // by the virtual interface owner, the virtual interface will enter the 'Rejected' + // state. + VirtualInterfaceState *string `locationName:"virtualInterfaceState" type:"string" enum:"VirtualInterfaceState"` +} + +// String returns the string representation +func (s ConfirmPublicVirtualInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfirmPublicVirtualInterfaceOutput) GoString() string { + return s.String() +} + +// SetVirtualInterfaceState sets the VirtualInterfaceState field's value. +func (s *ConfirmPublicVirtualInterfaceOutput) SetVirtualInterfaceState(v string) *ConfirmPublicVirtualInterfaceOutput { + s.VirtualInterfaceState = &v + return s +} + +// A connection represents the physical network connection between the AWS Direct +// Connect location and the customer. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/Connection +type Connection struct { + _ struct{} `type:"structure"` + + // The Direct Connection endpoint which the physical connection terminates on. + AwsDevice *string `locationName:"awsDevice" type:"string"` + + // Bandwidth of the connection. + // + // Example: 1Gbps (for regular connections), or 500Mbps (for hosted connections) + // + // Default: None + Bandwidth *string `locationName:"bandwidth" type:"string"` + + // The ID of the connection. This field is also used as the ID type for operations + // that use multiple connection types (LAG, interconnect, and/or connection). + // + // Example: dxcon-fg5678gh + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string"` + + // The name of the connection. + // + // Example: "My Connection to AWS" + // + // Default: None + ConnectionName *string `locationName:"connectionName" type:"string"` + + // State of the connection. + // + // * Ordering: The initial state of a hosted connection provisioned on an + // interconnect. The connection stays in the ordering state until the owner + // of the hosted connection confirms or declines the connection order. + // + // * Requested: The initial state of a standard connection. The connection + // stays in the requested state until the Letter of Authorization (LOA) is + // sent to the customer. + // + // * Pending: The connection has been approved, and is being initialized. + // + // * Available: The network link is up, and the connection is ready for use. + // + // * Down: The network link is down. + // + // * Deleting: The connection is in the process of being deleted. + // + // * Deleted: The connection has been deleted. + // + // * Rejected: A hosted connection in the 'Ordering' state will enter the + // 'Rejected' state if it is deleted by the end customer. + ConnectionState *string `locationName:"connectionState" type:"string" enum:"ConnectionState"` + + // The ID of the LAG. + // + // Example: dxlag-fg5678gh + LagId *string `locationName:"lagId" type:"string"` + + // The time of the most recent call to DescribeLoa for this connection. + LoaIssueTime *time.Time `locationName:"loaIssueTime" type:"timestamp" timestampFormat:"unix"` + + // Where the connection is located. + // + // Example: EqSV5 + // + // Default: None + Location *string `locationName:"location" type:"string"` + + // The AWS account that will own the new connection. + OwnerAccount *string `locationName:"ownerAccount" type:"string"` + + // The name of the AWS Direct Connect service provider associated with the connection. + PartnerName *string `locationName:"partnerName" type:"string"` + + // The AWS region where the connection is located. + // + // Example: us-east-1 + // + // Default: None + Region *string `locationName:"region" type:"string"` + + // The VLAN ID. + // + // Example: 101 + Vlan *int64 `locationName:"vlan" type:"integer"` +} + +// String returns the string representation +func (s Connection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Connection) GoString() string { + return s.String() +} + +// SetAwsDevice sets the AwsDevice field's value. +func (s *Connection) SetAwsDevice(v string) *Connection { + s.AwsDevice = &v + return s +} + +// SetBandwidth sets the Bandwidth field's value. +func (s *Connection) SetBandwidth(v string) *Connection { + s.Bandwidth = &v + return s +} + +// SetConnectionId sets the ConnectionId field's value. +func (s *Connection) SetConnectionId(v string) *Connection { + s.ConnectionId = &v + return s +} + +// SetConnectionName sets the ConnectionName field's value. +func (s *Connection) SetConnectionName(v string) *Connection { + s.ConnectionName = &v + return s +} + +// SetConnectionState sets the ConnectionState field's value. +func (s *Connection) SetConnectionState(v string) *Connection { + s.ConnectionState = &v + return s +} + +// SetLagId sets the LagId field's value. +func (s *Connection) SetLagId(v string) *Connection { + s.LagId = &v + return s +} + +// SetLoaIssueTime sets the LoaIssueTime field's value. +func (s *Connection) SetLoaIssueTime(v time.Time) *Connection { + s.LoaIssueTime = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *Connection) SetLocation(v string) *Connection { + s.Location = &v + return s +} + +// SetOwnerAccount sets the OwnerAccount field's value. +func (s *Connection) SetOwnerAccount(v string) *Connection { + s.OwnerAccount = &v + return s +} + +// SetPartnerName sets the PartnerName field's value. +func (s *Connection) SetPartnerName(v string) *Connection { + s.PartnerName = &v + return s +} + +// SetRegion sets the Region field's value. +func (s *Connection) SetRegion(v string) *Connection { + s.Region = &v + return s +} + +// SetVlan sets the Vlan field's value. +func (s *Connection) SetVlan(v int64) *Connection { + s.Vlan = &v + return s +} + +// A structure containing a list of connections. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/Connections +type Connections struct { + _ struct{} `type:"structure"` + + // A list of connections. + Connections []*Connection `locationName:"connections" type:"list"` +} + +// String returns the string representation +func (s Connections) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Connections) GoString() string { + return s.String() +} + +// SetConnections sets the Connections field's value. +func (s *Connections) SetConnections(v []*Connection) *Connections { + s.Connections = v + return s +} + +// Container for the parameters to the CreateBGPPeer operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/CreateBGPPeerRequest +type CreateBGPPeerInput struct { + _ struct{} `type:"structure"` + + // Detailed information for the BGP peer to be created. + // + // Default: None + NewBGPPeer *NewBGPPeer `locationName:"newBGPPeer" type:"structure"` + + // The ID of the virtual interface on which the BGP peer will be provisioned. + // + // Example: dxvif-456abc78 + // + // Default: None + VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string"` +} + +// String returns the string representation +func (s CreateBGPPeerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBGPPeerInput) GoString() string { + return s.String() +} + +// SetNewBGPPeer sets the NewBGPPeer field's value. +func (s *CreateBGPPeerInput) SetNewBGPPeer(v *NewBGPPeer) *CreateBGPPeerInput { + s.NewBGPPeer = v + return s +} + +// SetVirtualInterfaceId sets the VirtualInterfaceId field's value. +func (s *CreateBGPPeerInput) SetVirtualInterfaceId(v string) *CreateBGPPeerInput { + s.VirtualInterfaceId = &v + return s +} + +// The response received when CreateBGPPeer is called. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/CreateBGPPeerResponse +type CreateBGPPeerOutput struct { + _ struct{} `type:"structure"` + + // A virtual interface (VLAN) transmits the traffic between the AWS Direct Connect + // location and the customer. + VirtualInterface *VirtualInterface `locationName:"virtualInterface" type:"structure"` +} + +// String returns the string representation +func (s CreateBGPPeerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBGPPeerOutput) GoString() string { + return s.String() +} + +// SetVirtualInterface sets the VirtualInterface field's value. +func (s *CreateBGPPeerOutput) SetVirtualInterface(v *VirtualInterface) *CreateBGPPeerOutput { + s.VirtualInterface = v + return s +} + +// Container for the parameters to the CreateConnection operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/CreateConnectionRequest +type CreateConnectionInput struct { + _ struct{} `type:"structure"` + + // Bandwidth of the connection. + // + // Example: 1Gbps + // + // Default: None + // + // Bandwidth is a required field + Bandwidth *string `locationName:"bandwidth" type:"string" required:"true"` + + // The name of the connection. + // + // Example: "My Connection to AWS" + // + // Default: None + // + // ConnectionName is a required field + ConnectionName *string `locationName:"connectionName" type:"string" required:"true"` + + // The ID of the LAG. + // + // Example: dxlag-fg5678gh + LagId *string `locationName:"lagId" type:"string"` + + // Where the connection is located. + // + // Example: EqSV5 + // + // Default: None + // + // Location is a required field + Location *string `locationName:"location" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateConnectionInput"} + if s.Bandwidth == nil { + invalidParams.Add(request.NewErrParamRequired("Bandwidth")) + } + if s.ConnectionName == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionName")) + } + if s.Location == nil { + invalidParams.Add(request.NewErrParamRequired("Location")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBandwidth sets the Bandwidth field's value. +func (s *CreateConnectionInput) SetBandwidth(v string) *CreateConnectionInput { + s.Bandwidth = &v + return s +} + +// SetConnectionName sets the ConnectionName field's value. +func (s *CreateConnectionInput) SetConnectionName(v string) *CreateConnectionInput { + s.ConnectionName = &v + return s +} + +// SetLagId sets the LagId field's value. +func (s *CreateConnectionInput) SetLagId(v string) *CreateConnectionInput { + s.LagId = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *CreateConnectionInput) SetLocation(v string) *CreateConnectionInput { + s.Location = &v + return s +} + +// Container for the parameters to the CreateInterconnect operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/CreateInterconnectRequest +type CreateInterconnectInput struct { + _ struct{} `type:"structure"` + + // The port bandwidth + // + // Example: 1Gbps + // + // Default: None + // + // Available values: 1Gbps,10Gbps + // + // Bandwidth is a required field + Bandwidth *string `locationName:"bandwidth" type:"string" required:"true"` + + // The name of the interconnect. + // + // Example: "1G Interconnect to AWS" + // + // Default: None + // + // InterconnectName is a required field + InterconnectName *string `locationName:"interconnectName" type:"string" required:"true"` + + // The ID of the LAG. + // + // Example: dxlag-fg5678gh + LagId *string `locationName:"lagId" type:"string"` + + // Where the interconnect is located + // + // Example: EqSV5 + // + // Default: None + // + // Location is a required field + Location *string `locationName:"location" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateInterconnectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInterconnectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateInterconnectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateInterconnectInput"} + if s.Bandwidth == nil { + invalidParams.Add(request.NewErrParamRequired("Bandwidth")) + } + if s.InterconnectName == nil { + invalidParams.Add(request.NewErrParamRequired("InterconnectName")) + } + if s.Location == nil { + invalidParams.Add(request.NewErrParamRequired("Location")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBandwidth sets the Bandwidth field's value. +func (s *CreateInterconnectInput) SetBandwidth(v string) *CreateInterconnectInput { + s.Bandwidth = &v + return s +} + +// SetInterconnectName sets the InterconnectName field's value. +func (s *CreateInterconnectInput) SetInterconnectName(v string) *CreateInterconnectInput { + s.InterconnectName = &v + return s +} + +// SetLagId sets the LagId field's value. +func (s *CreateInterconnectInput) SetLagId(v string) *CreateInterconnectInput { + s.LagId = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *CreateInterconnectInput) SetLocation(v string) *CreateInterconnectInput { + s.Location = &v + return s +} + +// Container for the parameters to the CreateLag operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/CreateLagRequest +type CreateLagInput struct { + _ struct{} `type:"structure"` + + // The ID of an existing connection to migrate to the LAG. + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string"` + + // The bandwidth of the individual physical connections bundled by the LAG. + // + // Default: None + // + // Available values: 1Gbps, 10Gbps + // + // ConnectionsBandwidth is a required field + ConnectionsBandwidth *string `locationName:"connectionsBandwidth" type:"string" required:"true"` + + // The name of the LAG. + // + // Example: "3x10G LAG to AWS" + // + // Default: None + // + // LagName is a required field + LagName *string `locationName:"lagName" type:"string" required:"true"` + + // The AWS Direct Connect location in which the LAG should be allocated. + // + // Example: EqSV5 + // + // Default: None + // + // Location is a required field + Location *string `locationName:"location" type:"string" required:"true"` + + // The number of physical connections initially provisioned and bundled by the + // LAG. + // + // Default: None + // + // NumberOfConnections is a required field + NumberOfConnections *int64 `locationName:"numberOfConnections" type:"integer" required:"true"` +} + +// String returns the string representation +func (s CreateLagInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLagInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLagInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLagInput"} + if s.ConnectionsBandwidth == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionsBandwidth")) + } + if s.LagName == nil { + invalidParams.Add(request.NewErrParamRequired("LagName")) + } + if s.Location == nil { + invalidParams.Add(request.NewErrParamRequired("Location")) + } + if s.NumberOfConnections == nil { + invalidParams.Add(request.NewErrParamRequired("NumberOfConnections")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConnectionId sets the ConnectionId field's value. +func (s *CreateLagInput) SetConnectionId(v string) *CreateLagInput { + s.ConnectionId = &v + return s +} + +// SetConnectionsBandwidth sets the ConnectionsBandwidth field's value. +func (s *CreateLagInput) SetConnectionsBandwidth(v string) *CreateLagInput { + s.ConnectionsBandwidth = &v + return s +} + +// SetLagName sets the LagName field's value. +func (s *CreateLagInput) SetLagName(v string) *CreateLagInput { + s.LagName = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *CreateLagInput) SetLocation(v string) *CreateLagInput { + s.Location = &v + return s +} + +// SetNumberOfConnections sets the NumberOfConnections field's value. +func (s *CreateLagInput) SetNumberOfConnections(v int64) *CreateLagInput { + s.NumberOfConnections = &v + return s +} + +// Container for the parameters to the CreatePrivateVirtualInterface operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/CreatePrivateVirtualInterfaceRequest +type CreatePrivateVirtualInterfaceInput struct { + _ struct{} `type:"structure"` + + // The ID of the connection. This field is also used as the ID type for operations + // that use multiple connection types (LAG, interconnect, and/or connection). + // + // Example: dxcon-fg5678gh + // + // Default: None + // + // ConnectionId is a required field + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` + + // Detailed information for the private virtual interface to be created. + // + // Default: None + // + // NewPrivateVirtualInterface is a required field + NewPrivateVirtualInterface *NewPrivateVirtualInterface `locationName:"newPrivateVirtualInterface" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreatePrivateVirtualInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePrivateVirtualInterfaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePrivateVirtualInterfaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePrivateVirtualInterfaceInput"} + if s.ConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionId")) + } + if s.NewPrivateVirtualInterface == nil { + invalidParams.Add(request.NewErrParamRequired("NewPrivateVirtualInterface")) + } + if s.NewPrivateVirtualInterface != nil { + if err := s.NewPrivateVirtualInterface.Validate(); err != nil { + invalidParams.AddNested("NewPrivateVirtualInterface", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConnectionId sets the ConnectionId field's value. +func (s *CreatePrivateVirtualInterfaceInput) SetConnectionId(v string) *CreatePrivateVirtualInterfaceInput { + s.ConnectionId = &v + return s +} + +// SetNewPrivateVirtualInterface sets the NewPrivateVirtualInterface field's value. +func (s *CreatePrivateVirtualInterfaceInput) SetNewPrivateVirtualInterface(v *NewPrivateVirtualInterface) *CreatePrivateVirtualInterfaceInput { + s.NewPrivateVirtualInterface = v + return s +} + +// Container for the parameters to the CreatePublicVirtualInterface operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/CreatePublicVirtualInterfaceRequest +type CreatePublicVirtualInterfaceInput struct { + _ struct{} `type:"structure"` + + // The ID of the connection. This field is also used as the ID type for operations + // that use multiple connection types (LAG, interconnect, and/or connection). + // + // Example: dxcon-fg5678gh + // + // Default: None + // + // ConnectionId is a required field + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` + + // Detailed information for the public virtual interface to be created. + // + // Default: None + // + // NewPublicVirtualInterface is a required field + NewPublicVirtualInterface *NewPublicVirtualInterface `locationName:"newPublicVirtualInterface" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreatePublicVirtualInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePublicVirtualInterfaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePublicVirtualInterfaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePublicVirtualInterfaceInput"} + if s.ConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionId")) + } + if s.NewPublicVirtualInterface == nil { + invalidParams.Add(request.NewErrParamRequired("NewPublicVirtualInterface")) + } + if s.NewPublicVirtualInterface != nil { + if err := s.NewPublicVirtualInterface.Validate(); err != nil { + invalidParams.AddNested("NewPublicVirtualInterface", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConnectionId sets the ConnectionId field's value. +func (s *CreatePublicVirtualInterfaceInput) SetConnectionId(v string) *CreatePublicVirtualInterfaceInput { + s.ConnectionId = &v + return s +} + +// SetNewPublicVirtualInterface sets the NewPublicVirtualInterface field's value. +func (s *CreatePublicVirtualInterfaceInput) SetNewPublicVirtualInterface(v *NewPublicVirtualInterface) *CreatePublicVirtualInterfaceInput { + s.NewPublicVirtualInterface = v + return s +} + +// Container for the parameters to the DeleteBGPPeer operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DeleteBGPPeerRequest +type DeleteBGPPeerInput struct { + _ struct{} `type:"structure"` + + // The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + // + // Example: 65000 + Asn *int64 `locationName:"asn" type:"integer"` + + // IP address assigned to the customer interface. + // + // Example: 192.168.1.2/30 or 2001:db8::2/125 + CustomerAddress *string `locationName:"customerAddress" type:"string"` + + // The ID of the virtual interface from which the BGP peer will be deleted. + // + // Example: dxvif-456abc78 + // + // Default: None + VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string"` +} + +// String returns the string representation +func (s DeleteBGPPeerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBGPPeerInput) GoString() string { + return s.String() +} + +// SetAsn sets the Asn field's value. +func (s *DeleteBGPPeerInput) SetAsn(v int64) *DeleteBGPPeerInput { + s.Asn = &v + return s +} + +// SetCustomerAddress sets the CustomerAddress field's value. +func (s *DeleteBGPPeerInput) SetCustomerAddress(v string) *DeleteBGPPeerInput { + s.CustomerAddress = &v + return s +} + +// SetVirtualInterfaceId sets the VirtualInterfaceId field's value. +func (s *DeleteBGPPeerInput) SetVirtualInterfaceId(v string) *DeleteBGPPeerInput { + s.VirtualInterfaceId = &v + return s +} + +// The response received when DeleteBGPPeer is called. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DeleteBGPPeerResponse +type DeleteBGPPeerOutput struct { + _ struct{} `type:"structure"` + + // A virtual interface (VLAN) transmits the traffic between the AWS Direct Connect + // location and the customer. + VirtualInterface *VirtualInterface `locationName:"virtualInterface" type:"structure"` +} + +// String returns the string representation +func (s DeleteBGPPeerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBGPPeerOutput) GoString() string { + return s.String() +} + +// SetVirtualInterface sets the VirtualInterface field's value. +func (s *DeleteBGPPeerOutput) SetVirtualInterface(v *VirtualInterface) *DeleteBGPPeerOutput { + s.VirtualInterface = v + return s +} + +// Container for the parameters to the DeleteConnection operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DeleteConnectionRequest +type DeleteConnectionInput struct { + _ struct{} `type:"structure"` + + // The ID of the connection. This field is also used as the ID type for operations + // that use multiple connection types (LAG, interconnect, and/or connection). + // + // Example: dxcon-fg5678gh + // + // Default: None + // + // ConnectionId is a required field + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteConnectionInput"} + if s.ConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConnectionId sets the ConnectionId field's value. +func (s *DeleteConnectionInput) SetConnectionId(v string) *DeleteConnectionInput { + s.ConnectionId = &v + return s +} + +// Container for the parameters to the DeleteInterconnect operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DeleteInterconnectRequest +type DeleteInterconnectInput struct { + _ struct{} `type:"structure"` + + // The ID of the interconnect. + // + // Example: dxcon-abc123 + // + // InterconnectId is a required field + InterconnectId *string `locationName:"interconnectId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteInterconnectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInterconnectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteInterconnectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteInterconnectInput"} + if s.InterconnectId == nil { + invalidParams.Add(request.NewErrParamRequired("InterconnectId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInterconnectId sets the InterconnectId field's value. +func (s *DeleteInterconnectInput) SetInterconnectId(v string) *DeleteInterconnectInput { + s.InterconnectId = &v + return s +} + +// The response received when DeleteInterconnect is called. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DeleteInterconnectResponse +type DeleteInterconnectOutput struct { + _ struct{} `type:"structure"` + + // State of the interconnect. + // + // * Requested: The initial state of an interconnect. The interconnect stays + // in the requested state until the Letter of Authorization (LOA) is sent + // to the customer. + // + // * Pending: The interconnect has been approved, and is being initialized. + // + // * Available: The network link is up, and the interconnect is ready for + // use. + // + // * Down: The network link is down. + // + // * Deleting: The interconnect is in the process of being deleted. + // + // * Deleted: The interconnect has been deleted. + InterconnectState *string `locationName:"interconnectState" type:"string" enum:"InterconnectState"` +} + +// String returns the string representation +func (s DeleteInterconnectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInterconnectOutput) GoString() string { + return s.String() +} + +// SetInterconnectState sets the InterconnectState field's value. +func (s *DeleteInterconnectOutput) SetInterconnectState(v string) *DeleteInterconnectOutput { + s.InterconnectState = &v + return s +} + +// Container for the parameters to the DeleteLag operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DeleteLagRequest +type DeleteLagInput struct { + _ struct{} `type:"structure"` + + // The ID of the LAG to delete. + // + // Example: dxlag-abc123 + // + // Default: None + // + // LagId is a required field + LagId *string `locationName:"lagId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLagInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLagInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLagInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLagInput"} + if s.LagId == nil { + invalidParams.Add(request.NewErrParamRequired("LagId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLagId sets the LagId field's value. +func (s *DeleteLagInput) SetLagId(v string) *DeleteLagInput { + s.LagId = &v + return s +} + +// Container for the parameters to the DeleteVirtualInterface operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DeleteVirtualInterfaceRequest +type DeleteVirtualInterfaceInput struct { + _ struct{} `type:"structure"` + + // The ID of the virtual interface. + // + // Example: dxvif-123dfg56 + // + // Default: None + // + // VirtualInterfaceId is a required field + VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVirtualInterfaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVirtualInterfaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVirtualInterfaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVirtualInterfaceInput"} + if s.VirtualInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualInterfaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetVirtualInterfaceId sets the VirtualInterfaceId field's value. +func (s *DeleteVirtualInterfaceInput) SetVirtualInterfaceId(v string) *DeleteVirtualInterfaceInput { + s.VirtualInterfaceId = &v + return s +} + +// The response received when DeleteVirtualInterface is called. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DeleteVirtualInterfaceResponse +type DeleteVirtualInterfaceOutput struct { + _ struct{} `type:"structure"` + + // State of the virtual interface. + // + // * Confirming: The creation of the virtual interface is pending confirmation + // from the virtual interface owner. If the owner of the virtual interface + // is different from the owner of the connection on which it is provisioned, + // then the virtual interface will remain in this state until it is confirmed + // by the virtual interface owner. + // + // * Verifying: This state only applies to public virtual interfaces. Each + // public virtual interface needs validation before the virtual interface + // can be created. + // + // * Pending: A virtual interface is in this state from the time that it + // is created until the virtual interface is ready to forward traffic. + // + // * Available: A virtual interface that is able to forward traffic. + // + // * Down: A virtual interface that is BGP down. + // + // * Deleting: A virtual interface is in this state immediately after calling + // DeleteVirtualInterface until it can no longer forward traffic. + // + // * Deleted: A virtual interface that cannot forward traffic. + // + // * Rejected: The virtual interface owner has declined creation of the virtual + // interface. If a virtual interface in the 'Confirming' state is deleted + // by the virtual interface owner, the virtual interface will enter the 'Rejected' + // state. + VirtualInterfaceState *string `locationName:"virtualInterfaceState" type:"string" enum:"VirtualInterfaceState"` +} + +// String returns the string representation +func (s DeleteVirtualInterfaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVirtualInterfaceOutput) GoString() string { + return s.String() +} + +// SetVirtualInterfaceState sets the VirtualInterfaceState field's value. +func (s *DeleteVirtualInterfaceOutput) SetVirtualInterfaceState(v string) *DeleteVirtualInterfaceOutput { + s.VirtualInterfaceState = &v + return s +} + +// Container for the parameters to the DescribeConnectionLoa operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeConnectionLoaRequest +type DescribeConnectionLoaInput struct { + _ struct{} `type:"structure"` + + // The ID of the connection. This field is also used as the ID type for operations + // that use multiple connection types (LAG, interconnect, and/or connection). + // + // Example: dxcon-fg5678gh + // + // Default: None + // + // ConnectionId is a required field + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` + + // A standard media type indicating the content type of the LOA-CFA document. + // Currently, the only supported value is "application/pdf". + // + // Default: application/pdf + LoaContentType *string `locationName:"loaContentType" type:"string" enum:"LoaContentType"` + + // The name of the APN partner or service provider who establishes connectivity + // on your behalf. If you supply this parameter, the LOA-CFA lists the provider + // name alongside your company name as the requester of the cross connect. + // + // Default: None + ProviderName *string `locationName:"providerName" type:"string"` +} + +// String returns the string representation +func (s DescribeConnectionLoaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConnectionLoaInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeConnectionLoaInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeConnectionLoaInput"} + if s.ConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConnectionId sets the ConnectionId field's value. +func (s *DescribeConnectionLoaInput) SetConnectionId(v string) *DescribeConnectionLoaInput { + s.ConnectionId = &v + return s +} + +// SetLoaContentType sets the LoaContentType field's value. +func (s *DescribeConnectionLoaInput) SetLoaContentType(v string) *DescribeConnectionLoaInput { + s.LoaContentType = &v + return s +} + +// SetProviderName sets the ProviderName field's value. +func (s *DescribeConnectionLoaInput) SetProviderName(v string) *DescribeConnectionLoaInput { + s.ProviderName = &v + return s +} + +// The response received when DescribeConnectionLoa is called. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeConnectionLoaResponse +type DescribeConnectionLoaOutput struct { + _ struct{} `type:"structure"` + + // A structure containing the Letter of Authorization - Connecting Facility + // Assignment (LOA-CFA) for a connection. + Loa *Loa `locationName:"loa" type:"structure"` +} + +// String returns the string representation +func (s DescribeConnectionLoaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConnectionLoaOutput) GoString() string { + return s.String() +} + +// SetLoa sets the Loa field's value. +func (s *DescribeConnectionLoaOutput) SetLoa(v *Loa) *DescribeConnectionLoaOutput { + s.Loa = v + return s +} + +// Container for the parameters to the DescribeConnections operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeConnectionsRequest +type DescribeConnectionsInput struct { + _ struct{} `type:"structure"` + + // The ID of the connection. This field is also used as the ID type for operations + // that use multiple connection types (LAG, interconnect, and/or connection). + // + // Example: dxcon-fg5678gh + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string"` +} + +// String returns the string representation +func (s DescribeConnectionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConnectionsInput) GoString() string { + return s.String() +} + +// SetConnectionId sets the ConnectionId field's value. +func (s *DescribeConnectionsInput) SetConnectionId(v string) *DescribeConnectionsInput { + s.ConnectionId = &v + return s +} + +// Container for the parameters to the DescribeConnectionsOnInterconnect operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeConnectionsOnInterconnectRequest +type DescribeConnectionsOnInterconnectInput struct { + _ struct{} `type:"structure"` + + // ID of the interconnect on which a list of connection is provisioned. + // + // Example: dxcon-abc123 + // + // Default: None + // + // InterconnectId is a required field + InterconnectId *string `locationName:"interconnectId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeConnectionsOnInterconnectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConnectionsOnInterconnectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeConnectionsOnInterconnectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeConnectionsOnInterconnectInput"} + if s.InterconnectId == nil { + invalidParams.Add(request.NewErrParamRequired("InterconnectId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInterconnectId sets the InterconnectId field's value. +func (s *DescribeConnectionsOnInterconnectInput) SetInterconnectId(v string) *DescribeConnectionsOnInterconnectInput { + s.InterconnectId = &v + return s +} + +// Container for the parameters to the DescribeHostedConnections operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeHostedConnectionsRequest +type DescribeHostedConnectionsInput struct { + _ struct{} `type:"structure"` + + // The ID of the interconnect or LAG on which the hosted connections are provisioned. + // + // Example: dxcon-abc123 or dxlag-abc123 + // + // Default: None + // + // ConnectionId is a required field + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeHostedConnectionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeHostedConnectionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeHostedConnectionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeHostedConnectionsInput"} + if s.ConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConnectionId sets the ConnectionId field's value. +func (s *DescribeHostedConnectionsInput) SetConnectionId(v string) *DescribeHostedConnectionsInput { + s.ConnectionId = &v + return s +} + +// Container for the parameters to the DescribeInterconnectLoa operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeInterconnectLoaRequest +type DescribeInterconnectLoaInput struct { + _ struct{} `type:"structure"` + + // The ID of the interconnect. + // + // Example: dxcon-abc123 + // + // InterconnectId is a required field + InterconnectId *string `locationName:"interconnectId" type:"string" required:"true"` + + // A standard media type indicating the content type of the LOA-CFA document. + // Currently, the only supported value is "application/pdf". + // + // Default: application/pdf + LoaContentType *string `locationName:"loaContentType" type:"string" enum:"LoaContentType"` + + // The name of the service provider who establishes connectivity on your behalf. + // If you supply this parameter, the LOA-CFA lists the provider name alongside + // your company name as the requester of the cross connect. + // + // Default: None + ProviderName *string `locationName:"providerName" type:"string"` +} + +// String returns the string representation +func (s DescribeInterconnectLoaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInterconnectLoaInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInterconnectLoaInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInterconnectLoaInput"} + if s.InterconnectId == nil { + invalidParams.Add(request.NewErrParamRequired("InterconnectId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInterconnectId sets the InterconnectId field's value. +func (s *DescribeInterconnectLoaInput) SetInterconnectId(v string) *DescribeInterconnectLoaInput { + s.InterconnectId = &v + return s +} + +// SetLoaContentType sets the LoaContentType field's value. +func (s *DescribeInterconnectLoaInput) SetLoaContentType(v string) *DescribeInterconnectLoaInput { + s.LoaContentType = &v + return s +} + +// SetProviderName sets the ProviderName field's value. +func (s *DescribeInterconnectLoaInput) SetProviderName(v string) *DescribeInterconnectLoaInput { + s.ProviderName = &v + return s +} + +// The response received when DescribeInterconnectLoa is called. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeInterconnectLoaResponse +type DescribeInterconnectLoaOutput struct { + _ struct{} `type:"structure"` + + // A structure containing the Letter of Authorization - Connecting Facility + // Assignment (LOA-CFA) for a connection. + Loa *Loa `locationName:"loa" type:"structure"` +} + +// String returns the string representation +func (s DescribeInterconnectLoaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInterconnectLoaOutput) GoString() string { + return s.String() +} + +// SetLoa sets the Loa field's value. +func (s *DescribeInterconnectLoaOutput) SetLoa(v *Loa) *DescribeInterconnectLoaOutput { + s.Loa = v + return s +} + +// Container for the parameters to the DescribeInterconnects operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeInterconnectsRequest +type DescribeInterconnectsInput struct { + _ struct{} `type:"structure"` + + // The ID of the interconnect. + // + // Example: dxcon-abc123 + InterconnectId *string `locationName:"interconnectId" type:"string"` +} + +// String returns the string representation +func (s DescribeInterconnectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInterconnectsInput) GoString() string { + return s.String() +} + +// SetInterconnectId sets the InterconnectId field's value. +func (s *DescribeInterconnectsInput) SetInterconnectId(v string) *DescribeInterconnectsInput { + s.InterconnectId = &v + return s +} + +// A structure containing a list of interconnects. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/Interconnects +type DescribeInterconnectsOutput struct { + _ struct{} `type:"structure"` + + // A list of interconnects. + Interconnects []*Interconnect `locationName:"interconnects" type:"list"` +} + +// String returns the string representation +func (s DescribeInterconnectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInterconnectsOutput) GoString() string { + return s.String() +} + +// SetInterconnects sets the Interconnects field's value. +func (s *DescribeInterconnectsOutput) SetInterconnects(v []*Interconnect) *DescribeInterconnectsOutput { + s.Interconnects = v + return s +} + +// Container for the parameters to the DescribeLags operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeLagsRequest +type DescribeLagsInput struct { + _ struct{} `type:"structure"` + + // The ID of the LAG. + // + // Example: dxlag-abc123 + // + // Default: None + LagId *string `locationName:"lagId" type:"string"` +} + +// String returns the string representation +func (s DescribeLagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLagsInput) GoString() string { + return s.String() +} + +// SetLagId sets the LagId field's value. +func (s *DescribeLagsInput) SetLagId(v string) *DescribeLagsInput { + s.LagId = &v + return s +} + +// A structure containing a list of LAGs. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/Lags +type DescribeLagsOutput struct { + _ struct{} `type:"structure"` + + // A list of LAGs. + Lags []*Lag `locationName:"lags" type:"list"` +} + +// String returns the string representation +func (s DescribeLagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLagsOutput) GoString() string { + return s.String() +} + +// SetLags sets the Lags field's value. +func (s *DescribeLagsOutput) SetLags(v []*Lag) *DescribeLagsOutput { + s.Lags = v + return s +} + +// Container for the parameters to the DescribeLoa operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeLoaRequest +type DescribeLoaInput struct { + _ struct{} `type:"structure"` + + // The ID of a connection, LAG, or interconnect for which to get the LOA-CFA + // information. + // + // Example: dxcon-abc123 or dxlag-abc123 + // + // Default: None + // + // ConnectionId is a required field + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` + + // A standard media type indicating the content type of the LOA-CFA document. + // Currently, the only supported value is "application/pdf". + // + // Default: application/pdf + LoaContentType *string `locationName:"loaContentType" type:"string" enum:"LoaContentType"` + + // The name of the service provider who establishes connectivity on your behalf. + // If you supply this parameter, the LOA-CFA lists the provider name alongside + // your company name as the requester of the cross connect. + // + // Default: None + ProviderName *string `locationName:"providerName" type:"string"` +} + +// String returns the string representation +func (s DescribeLoaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoaInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLoaInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLoaInput"} + if s.ConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConnectionId sets the ConnectionId field's value. +func (s *DescribeLoaInput) SetConnectionId(v string) *DescribeLoaInput { + s.ConnectionId = &v + return s +} + +// SetLoaContentType sets the LoaContentType field's value. +func (s *DescribeLoaInput) SetLoaContentType(v string) *DescribeLoaInput { + s.LoaContentType = &v + return s +} + +// SetProviderName sets the ProviderName field's value. +func (s *DescribeLoaInput) SetProviderName(v string) *DescribeLoaInput { + s.ProviderName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeLocationsInput +type DescribeLocationsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeLocationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocationsInput) GoString() string { + return s.String() +} + +// A location is a network facility where AWS Direct Connect routers are available +// to be connected. Generally, these are colocation hubs where many network +// providers have equipment, and where cross connects can be delivered. Locations +// include a name and facility code, and must be provided when creating a connection. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/Locations +type DescribeLocationsOutput struct { + _ struct{} `type:"structure"` + + // A list of colocation hubs where network providers have equipment. Most regions + // have multiple locations available. + Locations []*Location `locationName:"locations" type:"list"` +} + +// String returns the string representation +func (s DescribeLocationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocationsOutput) GoString() string { + return s.String() +} + +// SetLocations sets the Locations field's value. +func (s *DescribeLocationsOutput) SetLocations(v []*Location) *DescribeLocationsOutput { + s.Locations = v + return s +} + +// Container for the parameters to the DescribeTags operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeTagsRequest +type DescribeTagsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Names (ARNs) of the Direct Connect resources. + // + // ResourceArns is a required field + ResourceArns []*string `locationName:"resourceArns" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTagsInput"} + if s.ResourceArns == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArns")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArns sets the ResourceArns field's value. +func (s *DescribeTagsInput) SetResourceArns(v []*string) *DescribeTagsInput { + s.ResourceArns = v + return s +} + +// The response received when DescribeTags is called. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeTagsResponse +type DescribeTagsOutput struct { + _ struct{} `type:"structure"` + + // Information about the tags. + ResourceTags []*ResourceTag `locationName:"resourceTags" type:"list"` +} + +// String returns the string representation +func (s DescribeTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsOutput) GoString() string { + return s.String() +} + +// SetResourceTags sets the ResourceTags field's value. +func (s *DescribeTagsOutput) SetResourceTags(v []*ResourceTag) *DescribeTagsOutput { + s.ResourceTags = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeVirtualGatewaysInput +type DescribeVirtualGatewaysInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeVirtualGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVirtualGatewaysInput) GoString() string { + return s.String() +} + +// A structure containing a list of virtual private gateways. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/VirtualGateways +type DescribeVirtualGatewaysOutput struct { + _ struct{} `type:"structure"` + + // A list of virtual private gateways. + VirtualGateways []*VirtualGateway `locationName:"virtualGateways" type:"list"` +} + +// String returns the string representation +func (s DescribeVirtualGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVirtualGatewaysOutput) GoString() string { + return s.String() +} + +// SetVirtualGateways sets the VirtualGateways field's value. +func (s *DescribeVirtualGatewaysOutput) SetVirtualGateways(v []*VirtualGateway) *DescribeVirtualGatewaysOutput { + s.VirtualGateways = v + return s +} + +// Container for the parameters to the DescribeVirtualInterfaces operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DescribeVirtualInterfacesRequest +type DescribeVirtualInterfacesInput struct { + _ struct{} `type:"structure"` + + // The ID of the connection. This field is also used as the ID type for operations + // that use multiple connection types (LAG, interconnect, and/or connection). + // + // Example: dxcon-fg5678gh + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string"` + + // The ID of the virtual interface. + // + // Example: dxvif-123dfg56 + // + // Default: None + VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string"` +} + +// String returns the string representation +func (s DescribeVirtualInterfacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVirtualInterfacesInput) GoString() string { + return s.String() +} + +// SetConnectionId sets the ConnectionId field's value. +func (s *DescribeVirtualInterfacesInput) SetConnectionId(v string) *DescribeVirtualInterfacesInput { + s.ConnectionId = &v + return s +} + +// SetVirtualInterfaceId sets the VirtualInterfaceId field's value. +func (s *DescribeVirtualInterfacesInput) SetVirtualInterfaceId(v string) *DescribeVirtualInterfacesInput { + s.VirtualInterfaceId = &v + return s +} + +// A structure containing a list of virtual interfaces. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/VirtualInterfaces +type DescribeVirtualInterfacesOutput struct { + _ struct{} `type:"structure"` + + // A list of virtual interfaces. + VirtualInterfaces []*VirtualInterface `locationName:"virtualInterfaces" type:"list"` +} + +// String returns the string representation +func (s DescribeVirtualInterfacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVirtualInterfacesOutput) GoString() string { + return s.String() +} + +// SetVirtualInterfaces sets the VirtualInterfaces field's value. +func (s *DescribeVirtualInterfacesOutput) SetVirtualInterfaces(v []*VirtualInterface) *DescribeVirtualInterfacesOutput { + s.VirtualInterfaces = v + return s +} + +// Container for the parameters to the DisassociateConnectionFromLag operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DisassociateConnectionFromLagRequest +type DisassociateConnectionFromLagInput struct { + _ struct{} `type:"structure"` + + // The ID of the connection to disassociate from the LAG. + // + // Example: dxcon-abc123 + // + // Default: None + // + // ConnectionId is a required field + ConnectionId *string `locationName:"connectionId" type:"string" required:"true"` + + // The ID of the LAG. + // + // Example: dxlag-abc123 + // + // Default: None + // + // LagId is a required field + LagId *string `locationName:"lagId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisassociateConnectionFromLagInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateConnectionFromLagInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateConnectionFromLagInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateConnectionFromLagInput"} + if s.ConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionId")) + } + if s.LagId == nil { + invalidParams.Add(request.NewErrParamRequired("LagId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConnectionId sets the ConnectionId field's value. +func (s *DisassociateConnectionFromLagInput) SetConnectionId(v string) *DisassociateConnectionFromLagInput { + s.ConnectionId = &v + return s +} + +// SetLagId sets the LagId field's value. +func (s *DisassociateConnectionFromLagInput) SetLagId(v string) *DisassociateConnectionFromLagInput { + s.LagId = &v + return s +} + +// An interconnect is a connection that can host other connections. +// +// Like a standard AWS Direct Connect connection, an interconnect represents +// the physical connection between an AWS Direct Connect partner's network and +// a specific Direct Connect location. An AWS Direct Connect partner who owns +// an interconnect can provision hosted connections on the interconnect for +// their end customers, thereby providing the end customers with connectivity +// to AWS services. +// +// The resources of the interconnect, including bandwidth and VLAN numbers, +// are shared by all of the hosted connections on the interconnect, and the +// owner of the interconnect determines how these resources are assigned. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/Interconnect +type Interconnect struct { + _ struct{} `type:"structure"` + + // The Direct Connection endpoint which the physical connection terminates on. + AwsDevice *string `locationName:"awsDevice" type:"string"` + + // Bandwidth of the connection. + // + // Example: 1Gbps + // + // Default: None + Bandwidth *string `locationName:"bandwidth" type:"string"` + + // The ID of the interconnect. + // + // Example: dxcon-abc123 + InterconnectId *string `locationName:"interconnectId" type:"string"` + + // The name of the interconnect. + // + // Example: "1G Interconnect to AWS" + InterconnectName *string `locationName:"interconnectName" type:"string"` + + // State of the interconnect. + // + // * Requested: The initial state of an interconnect. The interconnect stays + // in the requested state until the Letter of Authorization (LOA) is sent + // to the customer. + // + // * Pending: The interconnect has been approved, and is being initialized. + // + // * Available: The network link is up, and the interconnect is ready for + // use. + // + // * Down: The network link is down. + // + // * Deleting: The interconnect is in the process of being deleted. + // + // * Deleted: The interconnect has been deleted. + InterconnectState *string `locationName:"interconnectState" type:"string" enum:"InterconnectState"` + + // The ID of the LAG. + // + // Example: dxlag-fg5678gh + LagId *string `locationName:"lagId" type:"string"` + + // The time of the most recent call to DescribeInterconnectLoa for this Interconnect. + LoaIssueTime *time.Time `locationName:"loaIssueTime" type:"timestamp" timestampFormat:"unix"` + + // Where the connection is located. + // + // Example: EqSV5 + // + // Default: None + Location *string `locationName:"location" type:"string"` + + // The AWS region where the connection is located. + // + // Example: us-east-1 + // + // Default: None + Region *string `locationName:"region" type:"string"` +} + +// String returns the string representation +func (s Interconnect) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Interconnect) GoString() string { + return s.String() +} + +// SetAwsDevice sets the AwsDevice field's value. +func (s *Interconnect) SetAwsDevice(v string) *Interconnect { + s.AwsDevice = &v + return s +} + +// SetBandwidth sets the Bandwidth field's value. +func (s *Interconnect) SetBandwidth(v string) *Interconnect { + s.Bandwidth = &v + return s +} + +// SetInterconnectId sets the InterconnectId field's value. +func (s *Interconnect) SetInterconnectId(v string) *Interconnect { + s.InterconnectId = &v + return s +} + +// SetInterconnectName sets the InterconnectName field's value. +func (s *Interconnect) SetInterconnectName(v string) *Interconnect { + s.InterconnectName = &v + return s +} + +// SetInterconnectState sets the InterconnectState field's value. +func (s *Interconnect) SetInterconnectState(v string) *Interconnect { + s.InterconnectState = &v + return s +} + +// SetLagId sets the LagId field's value. +func (s *Interconnect) SetLagId(v string) *Interconnect { + s.LagId = &v + return s +} + +// SetLoaIssueTime sets the LoaIssueTime field's value. +func (s *Interconnect) SetLoaIssueTime(v time.Time) *Interconnect { + s.LoaIssueTime = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *Interconnect) SetLocation(v string) *Interconnect { + s.Location = &v + return s +} + +// SetRegion sets the Region field's value. +func (s *Interconnect) SetRegion(v string) *Interconnect { + s.Region = &v + return s +} + +// Describes a link aggregation group (LAG). A LAG is a connection that uses +// the Link Aggregation Control Protocol (LACP) to logically aggregate a bundle +// of physical connections. Like an interconnect, it can host other connections. +// All connections in a LAG must terminate on the same physical AWS Direct Connect +// endpoint, and must be the same bandwidth. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/Lag +type Lag struct { + _ struct{} `type:"structure"` + + // Indicates whether the LAG can host other connections. + // + // This is intended for use by AWS Direct Connect partners only. + AllowsHostedConnections *bool `locationName:"allowsHostedConnections" type:"boolean"` + + // The AWS Direct Connection endpoint that hosts the LAG. + AwsDevice *string `locationName:"awsDevice" type:"string"` + + // A list of connections bundled by this LAG. + Connections []*Connection `locationName:"connections" type:"list"` + + // The individual bandwidth of the physical connections bundled by the LAG. + // + // Available values: 1Gbps, 10Gbps + ConnectionsBandwidth *string `locationName:"connectionsBandwidth" type:"string"` + + // The ID of the LAG. + // + // Example: dxlag-fg5678gh + LagId *string `locationName:"lagId" type:"string"` + + // The name of the LAG. + LagName *string `locationName:"lagName" type:"string"` + + // The state of the LAG. + // + // * Requested: The initial state of a LAG. The LAG stays in the requested + // state until the Letter of Authorization (LOA) is available. + // + // * Pending: The LAG has been approved, and is being initialized. + // + // * Available: The network link is established, and the LAG is ready for + // use. + // + // * Down: The network link is down. + // + // * Deleting: The LAG is in the process of being deleted. + // + // * Deleted: The LAG has been deleted. + LagState *string `locationName:"lagState" type:"string" enum:"LagState"` + + // Where the connection is located. + // + // Example: EqSV5 + // + // Default: None + Location *string `locationName:"location" type:"string"` + + // The minimum number of physical connections that must be operational for the + // LAG itself to be operational. If the number of operational connections drops + // below this setting, the LAG state changes to down. This value can help to + // ensure that a LAG is not overutilized if a significant number of its bundled + // connections go down. + MinimumLinks *int64 `locationName:"minimumLinks" type:"integer"` + + // The number of physical connections bundled by the LAG, up to a maximum of + // 10. + NumberOfConnections *int64 `locationName:"numberOfConnections" type:"integer"` + + // The owner of the LAG. + OwnerAccount *string `locationName:"ownerAccount" type:"string"` + + // The AWS region where the connection is located. + // + // Example: us-east-1 + // + // Default: None + Region *string `locationName:"region" type:"string"` +} + +// String returns the string representation +func (s Lag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Lag) GoString() string { + return s.String() +} + +// SetAllowsHostedConnections sets the AllowsHostedConnections field's value. +func (s *Lag) SetAllowsHostedConnections(v bool) *Lag { + s.AllowsHostedConnections = &v + return s +} + +// SetAwsDevice sets the AwsDevice field's value. +func (s *Lag) SetAwsDevice(v string) *Lag { + s.AwsDevice = &v + return s +} + +// SetConnections sets the Connections field's value. +func (s *Lag) SetConnections(v []*Connection) *Lag { + s.Connections = v + return s +} + +// SetConnectionsBandwidth sets the ConnectionsBandwidth field's value. +func (s *Lag) SetConnectionsBandwidth(v string) *Lag { + s.ConnectionsBandwidth = &v + return s +} + +// SetLagId sets the LagId field's value. +func (s *Lag) SetLagId(v string) *Lag { + s.LagId = &v + return s +} + +// SetLagName sets the LagName field's value. +func (s *Lag) SetLagName(v string) *Lag { + s.LagName = &v + return s +} + +// SetLagState sets the LagState field's value. +func (s *Lag) SetLagState(v string) *Lag { + s.LagState = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *Lag) SetLocation(v string) *Lag { + s.Location = &v + return s +} + +// SetMinimumLinks sets the MinimumLinks field's value. +func (s *Lag) SetMinimumLinks(v int64) *Lag { + s.MinimumLinks = &v + return s +} + +// SetNumberOfConnections sets the NumberOfConnections field's value. +func (s *Lag) SetNumberOfConnections(v int64) *Lag { + s.NumberOfConnections = &v + return s +} + +// SetOwnerAccount sets the OwnerAccount field's value. +func (s *Lag) SetOwnerAccount(v string) *Lag { + s.OwnerAccount = &v + return s +} + +// SetRegion sets the Region field's value. +func (s *Lag) SetRegion(v string) *Lag { + s.Region = &v + return s +} + +// A structure containing the Letter of Authorization - Connecting Facility +// Assignment (LOA-CFA) for a connection. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/Loa +type Loa struct { + _ struct{} `type:"structure"` + + // The binary contents of the LOA-CFA document. + // + // LoaContent is automatically base64 encoded/decoded by the SDK. + LoaContent []byte `locationName:"loaContent" type:"blob"` + + // A standard media type indicating the content type of the LOA-CFA document. + // Currently, the only supported value is "application/pdf". + // + // Default: application/pdf + LoaContentType *string `locationName:"loaContentType" type:"string" enum:"LoaContentType"` +} + +// String returns the string representation +func (s Loa) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Loa) GoString() string { + return s.String() +} + +// SetLoaContent sets the LoaContent field's value. +func (s *Loa) SetLoaContent(v []byte) *Loa { + s.LoaContent = v + return s +} + +// SetLoaContentType sets the LoaContentType field's value. +func (s *Loa) SetLoaContentType(v string) *Loa { + s.LoaContentType = &v + return s +} + +// An AWS Direct Connect location where connections and interconnects can be +// requested. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/Location +type Location struct { + _ struct{} `type:"structure"` + + // The code used to indicate the AWS Direct Connect location. + LocationCode *string `locationName:"locationCode" type:"string"` + + // The name of the AWS Direct Connect location. The name includes the colocation + // partner name and the physical site of the lit building. + LocationName *string `locationName:"locationName" type:"string"` +} + +// String returns the string representation +func (s Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Location) GoString() string { + return s.String() +} + +// SetLocationCode sets the LocationCode field's value. +func (s *Location) SetLocationCode(v string) *Location { + s.LocationCode = &v + return s +} + +// SetLocationName sets the LocationName field's value. +func (s *Location) SetLocationName(v string) *Location { + s.LocationName = &v + return s +} + +// A structure containing information about a new BGP peer. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/NewBGPPeer +type NewBGPPeer struct { + _ struct{} `type:"structure"` + + // Indicates the address family for the BGP peer. + // + // * ipv4: IPv4 address family + // + // * ipv6: IPv6 address family + AddressFamily *string `locationName:"addressFamily" type:"string" enum:"AddressFamily"` + + // IP address assigned to the Amazon interface. + // + // Example: 192.168.1.1/30 or 2001:db8::1/125 + AmazonAddress *string `locationName:"amazonAddress" type:"string"` + + // The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + // + // Example: 65000 + Asn *int64 `locationName:"asn" type:"integer"` + + // The authentication key for BGP configuration. + // + // Example: asdf34example + AuthKey *string `locationName:"authKey" type:"string"` + + // IP address assigned to the customer interface. + // + // Example: 192.168.1.2/30 or 2001:db8::2/125 + CustomerAddress *string `locationName:"customerAddress" type:"string"` +} + +// String returns the string representation +func (s NewBGPPeer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NewBGPPeer) GoString() string { + return s.String() +} + +// SetAddressFamily sets the AddressFamily field's value. +func (s *NewBGPPeer) SetAddressFamily(v string) *NewBGPPeer { + s.AddressFamily = &v + return s +} + +// SetAmazonAddress sets the AmazonAddress field's value. +func (s *NewBGPPeer) SetAmazonAddress(v string) *NewBGPPeer { + s.AmazonAddress = &v + return s +} + +// SetAsn sets the Asn field's value. +func (s *NewBGPPeer) SetAsn(v int64) *NewBGPPeer { + s.Asn = &v + return s +} + +// SetAuthKey sets the AuthKey field's value. +func (s *NewBGPPeer) SetAuthKey(v string) *NewBGPPeer { + s.AuthKey = &v + return s +} + +// SetCustomerAddress sets the CustomerAddress field's value. +func (s *NewBGPPeer) SetCustomerAddress(v string) *NewBGPPeer { + s.CustomerAddress = &v + return s +} + +// A structure containing information about a new private virtual interface. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/NewPrivateVirtualInterface +type NewPrivateVirtualInterface struct { + _ struct{} `type:"structure"` + + // Indicates the address family for the BGP peer. + // + // * ipv4: IPv4 address family + // + // * ipv6: IPv6 address family + AddressFamily *string `locationName:"addressFamily" type:"string" enum:"AddressFamily"` + + // IP address assigned to the Amazon interface. + // + // Example: 192.168.1.1/30 or 2001:db8::1/125 + AmazonAddress *string `locationName:"amazonAddress" type:"string"` + + // The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + // + // Example: 65000 + // + // Asn is a required field + Asn *int64 `locationName:"asn" type:"integer" required:"true"` + + // The authentication key for BGP configuration. + // + // Example: asdf34example + AuthKey *string `locationName:"authKey" type:"string"` + + // IP address assigned to the customer interface. + // + // Example: 192.168.1.2/30 or 2001:db8::2/125 + CustomerAddress *string `locationName:"customerAddress" type:"string"` + + // The ID of the virtual private gateway to a VPC. This only applies to private + // virtual interfaces. + // + // Example: vgw-123er56 + // + // VirtualGatewayId is a required field + VirtualGatewayId *string `locationName:"virtualGatewayId" type:"string" required:"true"` + + // The name of the virtual interface assigned by the customer. + // + // Example: "My VPC" + // + // VirtualInterfaceName is a required field + VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string" required:"true"` + + // The VLAN ID. + // + // Example: 101 + // + // Vlan is a required field + Vlan *int64 `locationName:"vlan" type:"integer" required:"true"` +} + +// String returns the string representation +func (s NewPrivateVirtualInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NewPrivateVirtualInterface) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NewPrivateVirtualInterface) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NewPrivateVirtualInterface"} + if s.Asn == nil { + invalidParams.Add(request.NewErrParamRequired("Asn")) + } + if s.VirtualGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualGatewayId")) + } + if s.VirtualInterfaceName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualInterfaceName")) + } + if s.Vlan == nil { + invalidParams.Add(request.NewErrParamRequired("Vlan")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddressFamily sets the AddressFamily field's value. +func (s *NewPrivateVirtualInterface) SetAddressFamily(v string) *NewPrivateVirtualInterface { + s.AddressFamily = &v + return s +} + +// SetAmazonAddress sets the AmazonAddress field's value. +func (s *NewPrivateVirtualInterface) SetAmazonAddress(v string) *NewPrivateVirtualInterface { + s.AmazonAddress = &v + return s +} + +// SetAsn sets the Asn field's value. +func (s *NewPrivateVirtualInterface) SetAsn(v int64) *NewPrivateVirtualInterface { + s.Asn = &v + return s +} + +// SetAuthKey sets the AuthKey field's value. +func (s *NewPrivateVirtualInterface) SetAuthKey(v string) *NewPrivateVirtualInterface { + s.AuthKey = &v + return s +} + +// SetCustomerAddress sets the CustomerAddress field's value. +func (s *NewPrivateVirtualInterface) SetCustomerAddress(v string) *NewPrivateVirtualInterface { + s.CustomerAddress = &v + return s +} + +// SetVirtualGatewayId sets the VirtualGatewayId field's value. +func (s *NewPrivateVirtualInterface) SetVirtualGatewayId(v string) *NewPrivateVirtualInterface { + s.VirtualGatewayId = &v + return s +} + +// SetVirtualInterfaceName sets the VirtualInterfaceName field's value. +func (s *NewPrivateVirtualInterface) SetVirtualInterfaceName(v string) *NewPrivateVirtualInterface { + s.VirtualInterfaceName = &v + return s +} + +// SetVlan sets the Vlan field's value. +func (s *NewPrivateVirtualInterface) SetVlan(v int64) *NewPrivateVirtualInterface { + s.Vlan = &v + return s +} + +// A structure containing information about a private virtual interface that +// will be provisioned on a connection. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/NewPrivateVirtualInterfaceAllocation +type NewPrivateVirtualInterfaceAllocation struct { + _ struct{} `type:"structure"` + + // Indicates the address family for the BGP peer. + // + // * ipv4: IPv4 address family + // + // * ipv6: IPv6 address family + AddressFamily *string `locationName:"addressFamily" type:"string" enum:"AddressFamily"` + + // IP address assigned to the Amazon interface. + // + // Example: 192.168.1.1/30 or 2001:db8::1/125 + AmazonAddress *string `locationName:"amazonAddress" type:"string"` + + // The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + // + // Example: 65000 + // + // Asn is a required field + Asn *int64 `locationName:"asn" type:"integer" required:"true"` + + // The authentication key for BGP configuration. + // + // Example: asdf34example + AuthKey *string `locationName:"authKey" type:"string"` + + // IP address assigned to the customer interface. + // + // Example: 192.168.1.2/30 or 2001:db8::2/125 + CustomerAddress *string `locationName:"customerAddress" type:"string"` + + // The name of the virtual interface assigned by the customer. + // + // Example: "My VPC" + // + // VirtualInterfaceName is a required field + VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string" required:"true"` + + // The VLAN ID. + // + // Example: 101 + // + // Vlan is a required field + Vlan *int64 `locationName:"vlan" type:"integer" required:"true"` +} + +// String returns the string representation +func (s NewPrivateVirtualInterfaceAllocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NewPrivateVirtualInterfaceAllocation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NewPrivateVirtualInterfaceAllocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NewPrivateVirtualInterfaceAllocation"} + if s.Asn == nil { + invalidParams.Add(request.NewErrParamRequired("Asn")) + } + if s.VirtualInterfaceName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualInterfaceName")) + } + if s.Vlan == nil { + invalidParams.Add(request.NewErrParamRequired("Vlan")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddressFamily sets the AddressFamily field's value. +func (s *NewPrivateVirtualInterfaceAllocation) SetAddressFamily(v string) *NewPrivateVirtualInterfaceAllocation { + s.AddressFamily = &v + return s +} + +// SetAmazonAddress sets the AmazonAddress field's value. +func (s *NewPrivateVirtualInterfaceAllocation) SetAmazonAddress(v string) *NewPrivateVirtualInterfaceAllocation { + s.AmazonAddress = &v + return s +} + +// SetAsn sets the Asn field's value. +func (s *NewPrivateVirtualInterfaceAllocation) SetAsn(v int64) *NewPrivateVirtualInterfaceAllocation { + s.Asn = &v + return s +} + +// SetAuthKey sets the AuthKey field's value. +func (s *NewPrivateVirtualInterfaceAllocation) SetAuthKey(v string) *NewPrivateVirtualInterfaceAllocation { + s.AuthKey = &v + return s +} + +// SetCustomerAddress sets the CustomerAddress field's value. +func (s *NewPrivateVirtualInterfaceAllocation) SetCustomerAddress(v string) *NewPrivateVirtualInterfaceAllocation { + s.CustomerAddress = &v + return s +} + +// SetVirtualInterfaceName sets the VirtualInterfaceName field's value. +func (s *NewPrivateVirtualInterfaceAllocation) SetVirtualInterfaceName(v string) *NewPrivateVirtualInterfaceAllocation { + s.VirtualInterfaceName = &v + return s +} + +// SetVlan sets the Vlan field's value. +func (s *NewPrivateVirtualInterfaceAllocation) SetVlan(v int64) *NewPrivateVirtualInterfaceAllocation { + s.Vlan = &v + return s +} + +// A structure containing information about a new public virtual interface. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/NewPublicVirtualInterface +type NewPublicVirtualInterface struct { + _ struct{} `type:"structure"` + + // Indicates the address family for the BGP peer. + // + // * ipv4: IPv4 address family + // + // * ipv6: IPv6 address family + AddressFamily *string `locationName:"addressFamily" type:"string" enum:"AddressFamily"` + + // IP address assigned to the Amazon interface. + // + // Example: 192.168.1.1/30 or 2001:db8::1/125 + AmazonAddress *string `locationName:"amazonAddress" type:"string"` + + // The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + // + // Example: 65000 + // + // Asn is a required field + Asn *int64 `locationName:"asn" type:"integer" required:"true"` + + // The authentication key for BGP configuration. + // + // Example: asdf34example + AuthKey *string `locationName:"authKey" type:"string"` + + // IP address assigned to the customer interface. + // + // Example: 192.168.1.2/30 or 2001:db8::2/125 + CustomerAddress *string `locationName:"customerAddress" type:"string"` + + // A list of routes to be advertised to the AWS network in this region (public + // virtual interface). + RouteFilterPrefixes []*RouteFilterPrefix `locationName:"routeFilterPrefixes" type:"list"` + + // The name of the virtual interface assigned by the customer. + // + // Example: "My VPC" + // + // VirtualInterfaceName is a required field + VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string" required:"true"` + + // The VLAN ID. + // + // Example: 101 + // + // Vlan is a required field + Vlan *int64 `locationName:"vlan" type:"integer" required:"true"` +} + +// String returns the string representation +func (s NewPublicVirtualInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NewPublicVirtualInterface) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NewPublicVirtualInterface) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NewPublicVirtualInterface"} + if s.Asn == nil { + invalidParams.Add(request.NewErrParamRequired("Asn")) + } + if s.VirtualInterfaceName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualInterfaceName")) + } + if s.Vlan == nil { + invalidParams.Add(request.NewErrParamRequired("Vlan")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddressFamily sets the AddressFamily field's value. +func (s *NewPublicVirtualInterface) SetAddressFamily(v string) *NewPublicVirtualInterface { + s.AddressFamily = &v + return s +} + +// SetAmazonAddress sets the AmazonAddress field's value. +func (s *NewPublicVirtualInterface) SetAmazonAddress(v string) *NewPublicVirtualInterface { + s.AmazonAddress = &v + return s +} + +// SetAsn sets the Asn field's value. +func (s *NewPublicVirtualInterface) SetAsn(v int64) *NewPublicVirtualInterface { + s.Asn = &v + return s +} + +// SetAuthKey sets the AuthKey field's value. +func (s *NewPublicVirtualInterface) SetAuthKey(v string) *NewPublicVirtualInterface { + s.AuthKey = &v + return s +} + +// SetCustomerAddress sets the CustomerAddress field's value. +func (s *NewPublicVirtualInterface) SetCustomerAddress(v string) *NewPublicVirtualInterface { + s.CustomerAddress = &v + return s +} + +// SetRouteFilterPrefixes sets the RouteFilterPrefixes field's value. +func (s *NewPublicVirtualInterface) SetRouteFilterPrefixes(v []*RouteFilterPrefix) *NewPublicVirtualInterface { + s.RouteFilterPrefixes = v + return s +} + +// SetVirtualInterfaceName sets the VirtualInterfaceName field's value. +func (s *NewPublicVirtualInterface) SetVirtualInterfaceName(v string) *NewPublicVirtualInterface { + s.VirtualInterfaceName = &v + return s +} + +// SetVlan sets the Vlan field's value. +func (s *NewPublicVirtualInterface) SetVlan(v int64) *NewPublicVirtualInterface { + s.Vlan = &v + return s +} + +// A structure containing information about a public virtual interface that +// will be provisioned on a connection. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/NewPublicVirtualInterfaceAllocation +type NewPublicVirtualInterfaceAllocation struct { + _ struct{} `type:"structure"` + + // Indicates the address family for the BGP peer. + // + // * ipv4: IPv4 address family + // + // * ipv6: IPv6 address family + AddressFamily *string `locationName:"addressFamily" type:"string" enum:"AddressFamily"` + + // IP address assigned to the Amazon interface. + // + // Example: 192.168.1.1/30 or 2001:db8::1/125 + AmazonAddress *string `locationName:"amazonAddress" type:"string"` + + // The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + // + // Example: 65000 + // + // Asn is a required field + Asn *int64 `locationName:"asn" type:"integer" required:"true"` + + // The authentication key for BGP configuration. + // + // Example: asdf34example + AuthKey *string `locationName:"authKey" type:"string"` + + // IP address assigned to the customer interface. + // + // Example: 192.168.1.2/30 or 2001:db8::2/125 + CustomerAddress *string `locationName:"customerAddress" type:"string"` + + // A list of routes to be advertised to the AWS network in this region (public + // virtual interface). + RouteFilterPrefixes []*RouteFilterPrefix `locationName:"routeFilterPrefixes" type:"list"` + + // The name of the virtual interface assigned by the customer. + // + // Example: "My VPC" + // + // VirtualInterfaceName is a required field + VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string" required:"true"` + + // The VLAN ID. + // + // Example: 101 + // + // Vlan is a required field + Vlan *int64 `locationName:"vlan" type:"integer" required:"true"` +} + +// String returns the string representation +func (s NewPublicVirtualInterfaceAllocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NewPublicVirtualInterfaceAllocation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NewPublicVirtualInterfaceAllocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NewPublicVirtualInterfaceAllocation"} + if s.Asn == nil { + invalidParams.Add(request.NewErrParamRequired("Asn")) + } + if s.VirtualInterfaceName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualInterfaceName")) + } + if s.Vlan == nil { + invalidParams.Add(request.NewErrParamRequired("Vlan")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddressFamily sets the AddressFamily field's value. +func (s *NewPublicVirtualInterfaceAllocation) SetAddressFamily(v string) *NewPublicVirtualInterfaceAllocation { + s.AddressFamily = &v + return s +} + +// SetAmazonAddress sets the AmazonAddress field's value. +func (s *NewPublicVirtualInterfaceAllocation) SetAmazonAddress(v string) *NewPublicVirtualInterfaceAllocation { + s.AmazonAddress = &v + return s +} + +// SetAsn sets the Asn field's value. +func (s *NewPublicVirtualInterfaceAllocation) SetAsn(v int64) *NewPublicVirtualInterfaceAllocation { + s.Asn = &v + return s +} + +// SetAuthKey sets the AuthKey field's value. +func (s *NewPublicVirtualInterfaceAllocation) SetAuthKey(v string) *NewPublicVirtualInterfaceAllocation { + s.AuthKey = &v + return s +} + +// SetCustomerAddress sets the CustomerAddress field's value. +func (s *NewPublicVirtualInterfaceAllocation) SetCustomerAddress(v string) *NewPublicVirtualInterfaceAllocation { + s.CustomerAddress = &v + return s +} + +// SetRouteFilterPrefixes sets the RouteFilterPrefixes field's value. +func (s *NewPublicVirtualInterfaceAllocation) SetRouteFilterPrefixes(v []*RouteFilterPrefix) *NewPublicVirtualInterfaceAllocation { + s.RouteFilterPrefixes = v + return s +} + +// SetVirtualInterfaceName sets the VirtualInterfaceName field's value. +func (s *NewPublicVirtualInterfaceAllocation) SetVirtualInterfaceName(v string) *NewPublicVirtualInterfaceAllocation { + s.VirtualInterfaceName = &v + return s +} + +// SetVlan sets the Vlan field's value. +func (s *NewPublicVirtualInterfaceAllocation) SetVlan(v int64) *NewPublicVirtualInterfaceAllocation { + s.Vlan = &v + return s +} + +// The tags associated with a Direct Connect resource. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/ResourceTag +type ResourceTag struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Direct Connect resource. + ResourceArn *string `locationName:"resourceArn" type:"string"` + + // The tags. + Tags []*Tag `locationName:"tags" min:"1" type:"list"` +} + +// String returns the string representation +func (s ResourceTag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceTag) GoString() string { + return s.String() +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ResourceTag) SetResourceArn(v string) *ResourceTag { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ResourceTag) SetTags(v []*Tag) *ResourceTag { + s.Tags = v + return s +} + +// A route filter prefix that the customer can advertise through Border Gateway +// Protocol (BGP) over a public virtual interface. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/RouteFilterPrefix +type RouteFilterPrefix struct { + _ struct{} `type:"structure"` + + // CIDR notation for the advertised route. Multiple routes are separated by + // commas. + // + // IPv6 CIDRs must be at least a /64 or shorter + // + // Example: 10.10.10.0/24,10.10.11.0/24,2001:db8::/64 + Cidr *string `locationName:"cidr" type:"string"` +} + +// String returns the string representation +func (s RouteFilterPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RouteFilterPrefix) GoString() string { + return s.String() +} + +// SetCidr sets the Cidr field's value. +func (s *RouteFilterPrefix) SetCidr(v string) *RouteFilterPrefix { + s.Cidr = &v + return s +} + +// Information about a tag. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/Tag +type Tag struct { + _ struct{} `type:"structure"` + + // The key of the tag. + // + // Key is a required field + Key *string `locationName:"key" min:"1" type:"string" required:"true"` + + // The value of the tag. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +// Container for the parameters to the TagResource operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/TagResourceRequest +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Direct Connect resource. + // + // Example: arn:aws:directconnect:us-east-1:123456789012:dxcon/dxcon-fg5678gh + // + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"` + + // The list of tags to add. + // + // Tags is a required field + Tags []*Tag `locationName:"tags" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +// The response received when TagResource is called. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/TagResourceResponse +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the UntagResource operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/UntagResourceRequest +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Direct Connect resource. + // + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"` + + // The list of tag keys to remove. + // + // TagKeys is a required field + TagKeys []*string `locationName:"tagKeys" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +// The response received when UntagResource is called. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/UntagResourceResponse +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + +// Container for the parameters to the UpdateLag operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/UpdateLagRequest +type UpdateLagInput struct { + _ struct{} `type:"structure"` + + // The ID of the LAG to update. + // + // Example: dxlag-abc123 + // + // Default: None + // + // LagId is a required field + LagId *string `locationName:"lagId" type:"string" required:"true"` + + // The name for the LAG. + // + // Example: "3x10G LAG to AWS" + // + // Default: None + LagName *string `locationName:"lagName" type:"string"` + + // The minimum number of physical connections that must be operational for the + // LAG itself to be operational. + // + // Default: None + MinimumLinks *int64 `locationName:"minimumLinks" type:"integer"` +} + +// String returns the string representation +func (s UpdateLagInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateLagInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateLagInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateLagInput"} + if s.LagId == nil { + invalidParams.Add(request.NewErrParamRequired("LagId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLagId sets the LagId field's value. +func (s *UpdateLagInput) SetLagId(v string) *UpdateLagInput { + s.LagId = &v + return s +} + +// SetLagName sets the LagName field's value. +func (s *UpdateLagInput) SetLagName(v string) *UpdateLagInput { + s.LagName = &v + return s +} + +// SetMinimumLinks sets the MinimumLinks field's value. +func (s *UpdateLagInput) SetMinimumLinks(v int64) *UpdateLagInput { + s.MinimumLinks = &v + return s +} + +// You can create one or more AWS Direct Connect private virtual interfaces +// linking to your virtual private gateway. +// +// Virtual private gateways can be managed using the Amazon Virtual Private +// Cloud (Amazon VPC) console or the Amazon EC2 CreateVpnGateway action (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-CreateVpnGateway.html). +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/VirtualGateway +type VirtualGateway struct { + _ struct{} `type:"structure"` + + // The ID of the virtual private gateway to a VPC. This only applies to private + // virtual interfaces. + // + // Example: vgw-123er56 + VirtualGatewayId *string `locationName:"virtualGatewayId" type:"string"` + + // State of the virtual private gateway. + // + // * Pending: This is the initial state after calling CreateVpnGateway. + // + // * Available: Ready for use by a private virtual interface. + // + // * Deleting: This is the initial state after calling DeleteVpnGateway. + // + // * Deleted: In this state, a private virtual interface is unable to send + // traffic over this gateway. + VirtualGatewayState *string `locationName:"virtualGatewayState" type:"string"` +} + +// String returns the string representation +func (s VirtualGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VirtualGateway) GoString() string { + return s.String() +} + +// SetVirtualGatewayId sets the VirtualGatewayId field's value. +func (s *VirtualGateway) SetVirtualGatewayId(v string) *VirtualGateway { + s.VirtualGatewayId = &v + return s +} + +// SetVirtualGatewayState sets the VirtualGatewayState field's value. +func (s *VirtualGateway) SetVirtualGatewayState(v string) *VirtualGateway { + s.VirtualGatewayState = &v + return s +} + +// A virtual interface (VLAN) transmits the traffic between the AWS Direct Connect +// location and the customer. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/VirtualInterface +type VirtualInterface struct { + _ struct{} `type:"structure"` + + // Indicates the address family for the BGP peer. + // + // * ipv4: IPv4 address family + // + // * ipv6: IPv6 address family + AddressFamily *string `locationName:"addressFamily" type:"string" enum:"AddressFamily"` + + // IP address assigned to the Amazon interface. + // + // Example: 192.168.1.1/30 or 2001:db8::1/125 + AmazonAddress *string `locationName:"amazonAddress" type:"string"` + + // The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration. + // + // Example: 65000 + Asn *int64 `locationName:"asn" type:"integer"` + + // The authentication key for BGP configuration. + // + // Example: asdf34example + AuthKey *string `locationName:"authKey" type:"string"` + + // A list of the BGP peers configured on this virtual interface. + BgpPeers []*BGPPeer `locationName:"bgpPeers" type:"list"` + + // The ID of the connection. This field is also used as the ID type for operations + // that use multiple connection types (LAG, interconnect, and/or connection). + // + // Example: dxcon-fg5678gh + // + // Default: None + ConnectionId *string `locationName:"connectionId" type:"string"` + + // IP address assigned to the customer interface. + // + // Example: 192.168.1.2/30 or 2001:db8::2/125 + CustomerAddress *string `locationName:"customerAddress" type:"string"` + + // Information for generating the customer router configuration. + CustomerRouterConfig *string `locationName:"customerRouterConfig" type:"string"` + + // Where the connection is located. + // + // Example: EqSV5 + // + // Default: None + Location *string `locationName:"location" type:"string"` + + // The AWS account that will own the new virtual interface. + OwnerAccount *string `locationName:"ownerAccount" type:"string"` + + // A list of routes to be advertised to the AWS network in this region (public + // virtual interface). + RouteFilterPrefixes []*RouteFilterPrefix `locationName:"routeFilterPrefixes" type:"list"` + + // The ID of the virtual private gateway to a VPC. This only applies to private + // virtual interfaces. + // + // Example: vgw-123er56 + VirtualGatewayId *string `locationName:"virtualGatewayId" type:"string"` + + // The ID of the virtual interface. + // + // Example: dxvif-123dfg56 + // + // Default: None + VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string"` + + // The name of the virtual interface assigned by the customer. + // + // Example: "My VPC" + VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string"` + + // State of the virtual interface. + // + // * Confirming: The creation of the virtual interface is pending confirmation + // from the virtual interface owner. If the owner of the virtual interface + // is different from the owner of the connection on which it is provisioned, + // then the virtual interface will remain in this state until it is confirmed + // by the virtual interface owner. + // + // * Verifying: This state only applies to public virtual interfaces. Each + // public virtual interface needs validation before the virtual interface + // can be created. + // + // * Pending: A virtual interface is in this state from the time that it + // is created until the virtual interface is ready to forward traffic. + // + // * Available: A virtual interface that is able to forward traffic. + // + // * Down: A virtual interface that is BGP down. + // + // * Deleting: A virtual interface is in this state immediately after calling + // DeleteVirtualInterface until it can no longer forward traffic. + // + // * Deleted: A virtual interface that cannot forward traffic. + // + // * Rejected: The virtual interface owner has declined creation of the virtual + // interface. If a virtual interface in the 'Confirming' state is deleted + // by the virtual interface owner, the virtual interface will enter the 'Rejected' + // state. + VirtualInterfaceState *string `locationName:"virtualInterfaceState" type:"string" enum:"VirtualInterfaceState"` + + // The type of virtual interface. + // + // Example: private (Amazon VPC) or public (Amazon S3, Amazon DynamoDB, and + // so on.) + VirtualInterfaceType *string `locationName:"virtualInterfaceType" type:"string"` + + // The VLAN ID. + // + // Example: 101 + Vlan *int64 `locationName:"vlan" type:"integer"` +} + +// String returns the string representation +func (s VirtualInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VirtualInterface) GoString() string { + return s.String() +} + +// SetAddressFamily sets the AddressFamily field's value. +func (s *VirtualInterface) SetAddressFamily(v string) *VirtualInterface { + s.AddressFamily = &v + return s +} + +// SetAmazonAddress sets the AmazonAddress field's value. +func (s *VirtualInterface) SetAmazonAddress(v string) *VirtualInterface { + s.AmazonAddress = &v + return s +} + +// SetAsn sets the Asn field's value. +func (s *VirtualInterface) SetAsn(v int64) *VirtualInterface { + s.Asn = &v + return s +} + +// SetAuthKey sets the AuthKey field's value. +func (s *VirtualInterface) SetAuthKey(v string) *VirtualInterface { + s.AuthKey = &v + return s +} + +// SetBgpPeers sets the BgpPeers field's value. +func (s *VirtualInterface) SetBgpPeers(v []*BGPPeer) *VirtualInterface { + s.BgpPeers = v + return s +} + +// SetConnectionId sets the ConnectionId field's value. +func (s *VirtualInterface) SetConnectionId(v string) *VirtualInterface { + s.ConnectionId = &v + return s +} + +// SetCustomerAddress sets the CustomerAddress field's value. +func (s *VirtualInterface) SetCustomerAddress(v string) *VirtualInterface { + s.CustomerAddress = &v + return s +} + +// SetCustomerRouterConfig sets the CustomerRouterConfig field's value. +func (s *VirtualInterface) SetCustomerRouterConfig(v string) *VirtualInterface { + s.CustomerRouterConfig = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *VirtualInterface) SetLocation(v string) *VirtualInterface { + s.Location = &v + return s +} + +// SetOwnerAccount sets the OwnerAccount field's value. +func (s *VirtualInterface) SetOwnerAccount(v string) *VirtualInterface { + s.OwnerAccount = &v + return s +} + +// SetRouteFilterPrefixes sets the RouteFilterPrefixes field's value. +func (s *VirtualInterface) SetRouteFilterPrefixes(v []*RouteFilterPrefix) *VirtualInterface { + s.RouteFilterPrefixes = v + return s +} + +// SetVirtualGatewayId sets the VirtualGatewayId field's value. +func (s *VirtualInterface) SetVirtualGatewayId(v string) *VirtualInterface { + s.VirtualGatewayId = &v + return s +} + +// SetVirtualInterfaceId sets the VirtualInterfaceId field's value. +func (s *VirtualInterface) SetVirtualInterfaceId(v string) *VirtualInterface { + s.VirtualInterfaceId = &v + return s +} + +// SetVirtualInterfaceName sets the VirtualInterfaceName field's value. +func (s *VirtualInterface) SetVirtualInterfaceName(v string) *VirtualInterface { + s.VirtualInterfaceName = &v + return s +} + +// SetVirtualInterfaceState sets the VirtualInterfaceState field's value. +func (s *VirtualInterface) SetVirtualInterfaceState(v string) *VirtualInterface { + s.VirtualInterfaceState = &v + return s +} + +// SetVirtualInterfaceType sets the VirtualInterfaceType field's value. +func (s *VirtualInterface) SetVirtualInterfaceType(v string) *VirtualInterface { + s.VirtualInterfaceType = &v + return s +} + +// SetVlan sets the Vlan field's value. +func (s *VirtualInterface) SetVlan(v int64) *VirtualInterface { + s.Vlan = &v + return s +} + +// Indicates the address family for the BGP peer. +// +// * ipv4: IPv4 address family +// +// * ipv6: IPv6 address family +const ( + // AddressFamilyIpv4 is a AddressFamily enum value + AddressFamilyIpv4 = "ipv4" + + // AddressFamilyIpv6 is a AddressFamily enum value + AddressFamilyIpv6 = "ipv6" +) + +// The state of the BGP peer. +// +// * Verifying: The BGP peering addresses or ASN require validation before +// the BGP peer can be created. This state only applies to BGP peers on a +// public virtual interface. +// +// * Pending: The BGP peer has been created, and is in this state until it +// is ready to be established. +// +// * Available: The BGP peer can be established. +// +// * Deleting: The BGP peer is in the process of being deleted. +// +// * Deleted: The BGP peer has been deleted and cannot be established. +const ( + // BGPPeerStateVerifying is a BGPPeerState enum value + BGPPeerStateVerifying = "verifying" + + // BGPPeerStatePending is a BGPPeerState enum value + BGPPeerStatePending = "pending" + + // BGPPeerStateAvailable is a BGPPeerState enum value + BGPPeerStateAvailable = "available" + + // BGPPeerStateDeleting is a BGPPeerState enum value + BGPPeerStateDeleting = "deleting" + + // BGPPeerStateDeleted is a BGPPeerState enum value + BGPPeerStateDeleted = "deleted" +) + +// The Up/Down state of the BGP peer. +// +// * Up: The BGP peer is established. +// +// * Down: The BGP peer is down. +const ( + // BGPStatusUp is a BGPStatus enum value + BGPStatusUp = "up" + + // BGPStatusDown is a BGPStatus enum value + BGPStatusDown = "down" +) + +// State of the connection. +// +// * Ordering: The initial state of a hosted connection provisioned on an +// interconnect. The connection stays in the ordering state until the owner +// of the hosted connection confirms or declines the connection order. +// +// * Requested: The initial state of a standard connection. The connection +// stays in the requested state until the Letter of Authorization (LOA) is +// sent to the customer. +// +// * Pending: The connection has been approved, and is being initialized. +// +// * Available: The network link is up, and the connection is ready for use. +// +// * Down: The network link is down. +// +// * Deleting: The connection is in the process of being deleted. +// +// * Deleted: The connection has been deleted. +// +// * Rejected: A hosted connection in the 'Ordering' state will enter the +// 'Rejected' state if it is deleted by the end customer. +const ( + // ConnectionStateOrdering is a ConnectionState enum value + ConnectionStateOrdering = "ordering" + + // ConnectionStateRequested is a ConnectionState enum value + ConnectionStateRequested = "requested" + + // ConnectionStatePending is a ConnectionState enum value + ConnectionStatePending = "pending" + + // ConnectionStateAvailable is a ConnectionState enum value + ConnectionStateAvailable = "available" + + // ConnectionStateDown is a ConnectionState enum value + ConnectionStateDown = "down" + + // ConnectionStateDeleting is a ConnectionState enum value + ConnectionStateDeleting = "deleting" + + // ConnectionStateDeleted is a ConnectionState enum value + ConnectionStateDeleted = "deleted" + + // ConnectionStateRejected is a ConnectionState enum value + ConnectionStateRejected = "rejected" +) + +// State of the interconnect. +// +// * Requested: The initial state of an interconnect. The interconnect stays +// in the requested state until the Letter of Authorization (LOA) is sent +// to the customer. +// +// * Pending: The interconnect has been approved, and is being initialized. +// +// * Available: The network link is up, and the interconnect is ready for +// use. +// +// * Down: The network link is down. +// +// * Deleting: The interconnect is in the process of being deleted. +// +// * Deleted: The interconnect has been deleted. +const ( + // InterconnectStateRequested is a InterconnectState enum value + InterconnectStateRequested = "requested" + + // InterconnectStatePending is a InterconnectState enum value + InterconnectStatePending = "pending" + + // InterconnectStateAvailable is a InterconnectState enum value + InterconnectStateAvailable = "available" + + // InterconnectStateDown is a InterconnectState enum value + InterconnectStateDown = "down" + + // InterconnectStateDeleting is a InterconnectState enum value + InterconnectStateDeleting = "deleting" + + // InterconnectStateDeleted is a InterconnectState enum value + InterconnectStateDeleted = "deleted" +) + +// The state of the LAG. +// +// * Requested: The initial state of a LAG. The LAG stays in the requested +// state until the Letter of Authorization (LOA) is available. +// +// * Pending: The LAG has been approved, and is being initialized. +// +// * Available: The network link is established, and the LAG is ready for +// use. +// +// * Down: The network link is down. +// +// * Deleting: The LAG is in the process of being deleted. +// +// * Deleted: The LAG has been deleted. +const ( + // LagStateRequested is a LagState enum value + LagStateRequested = "requested" + + // LagStatePending is a LagState enum value + LagStatePending = "pending" + + // LagStateAvailable is a LagState enum value + LagStateAvailable = "available" + + // LagStateDown is a LagState enum value + LagStateDown = "down" + + // LagStateDeleting is a LagState enum value + LagStateDeleting = "deleting" + + // LagStateDeleted is a LagState enum value + LagStateDeleted = "deleted" +) + +// A standard media type indicating the content type of the LOA-CFA document. +// Currently, the only supported value is "application/pdf". +// +// Default: application/pdf +const ( + // LoaContentTypeApplicationPdf is a LoaContentType enum value + LoaContentTypeApplicationPdf = "application/pdf" +) + +// State of the virtual interface. +// +// * Confirming: The creation of the virtual interface is pending confirmation +// from the virtual interface owner. If the owner of the virtual interface +// is different from the owner of the connection on which it is provisioned, +// then the virtual interface will remain in this state until it is confirmed +// by the virtual interface owner. +// +// * Verifying: This state only applies to public virtual interfaces. Each +// public virtual interface needs validation before the virtual interface +// can be created. +// +// * Pending: A virtual interface is in this state from the time that it +// is created until the virtual interface is ready to forward traffic. +// +// * Available: A virtual interface that is able to forward traffic. +// +// * Down: A virtual interface that is BGP down. +// +// * Deleting: A virtual interface is in this state immediately after calling +// DeleteVirtualInterface until it can no longer forward traffic. +// +// * Deleted: A virtual interface that cannot forward traffic. +// +// * Rejected: The virtual interface owner has declined creation of the virtual +// interface. If a virtual interface in the 'Confirming' state is deleted +// by the virtual interface owner, the virtual interface will enter the 'Rejected' +// state. +const ( + // VirtualInterfaceStateConfirming is a VirtualInterfaceState enum value + VirtualInterfaceStateConfirming = "confirming" + + // VirtualInterfaceStateVerifying is a VirtualInterfaceState enum value + VirtualInterfaceStateVerifying = "verifying" + + // VirtualInterfaceStatePending is a VirtualInterfaceState enum value + VirtualInterfaceStatePending = "pending" + + // VirtualInterfaceStateAvailable is a VirtualInterfaceState enum value + VirtualInterfaceStateAvailable = "available" + + // VirtualInterfaceStateDown is a VirtualInterfaceState enum value + VirtualInterfaceStateDown = "down" + + // VirtualInterfaceStateDeleting is a VirtualInterfaceState enum value + VirtualInterfaceStateDeleting = "deleting" + + // VirtualInterfaceStateDeleted is a VirtualInterfaceState enum value + VirtualInterfaceStateDeleted = "deleted" + + // VirtualInterfaceStateRejected is a VirtualInterfaceState enum value + VirtualInterfaceStateRejected = "rejected" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/directconnect/doc.go b/vendor/github.com/aws/aws-sdk-go/service/directconnect/doc.go new file mode 100644 index 00000000000..7cd3ca61886 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/directconnect/doc.go @@ -0,0 +1,39 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package directconnect provides the client and types for making API +// requests to AWS Direct Connect. +// +// AWS Direct Connect links your internal network to an AWS Direct Connect location +// over a standard 1 gigabit or 10 gigabit Ethernet fiber-optic cable. One end +// of the cable is connected to your router, the other to an AWS Direct Connect +// router. With this connection in place, you can create virtual interfaces +// directly to the AWS cloud (for example, to Amazon Elastic Compute Cloud (Amazon +// EC2) and Amazon Simple Storage Service (Amazon S3)) and to Amazon Virtual +// Private Cloud (Amazon VPC), bypassing Internet service providers in your +// network path. An AWS Direct Connect location provides access to AWS in the +// region it is associated with, as well as access to other US regions. For +// example, you can provision a single connection to any AWS Direct Connect +// location in the US and use it to access public AWS services in all US Regions +// and AWS GovCloud (US). +// +// See https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25 for more information on this service. +// +// See directconnect package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/directconnect/ +// +// Using the Client +// +// To AWS Direct Connect with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Direct Connect client DirectConnect for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/directconnect/#New +package directconnect diff --git a/vendor/github.com/aws/aws-sdk-go/service/directconnect/errors.go b/vendor/github.com/aws/aws-sdk-go/service/directconnect/errors.go new file mode 100644 index 00000000000..cbf9bf2f536 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/directconnect/errors.go @@ -0,0 +1,33 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package directconnect + +const ( + + // ErrCodeClientException for service response error code + // "ClientException". + // + // The API was called with invalid parameters. The error message will contain + // additional details about the cause. + ErrCodeClientException = "ClientException" + + // ErrCodeDuplicateTagKeysException for service response error code + // "DuplicateTagKeysException". + // + // A tag key was specified more than once. + ErrCodeDuplicateTagKeysException = "DuplicateTagKeysException" + + // ErrCodeServerException for service response error code + // "ServerException". + // + // A server-side error occurred during the API call. The error message will + // contain additional details about the cause. + ErrCodeServerException = "ServerException" + + // ErrCodeTooManyTagsException for service response error code + // "TooManyTagsException". + // + // You have reached the limit on the number of tags that can be assigned to + // a Direct Connect resource. + ErrCodeTooManyTagsException = "TooManyTagsException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/directconnect/service.go b/vendor/github.com/aws/aws-sdk-go/service/directconnect/service.go new file mode 100644 index 00000000000..1798ae11a1a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/directconnect/service.go @@ -0,0 +1,95 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package directconnect + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// DirectConnect provides the API operation methods for making requests to +// AWS Direct Connect. See this package's package overview docs +// for details on the service. +// +// DirectConnect methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type DirectConnect struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "directconnect" // Service endpoint prefix API calls made to. + EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. +) + +// New creates a new instance of the DirectConnect client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a DirectConnect client from just a session. +// svc := directconnect.New(mySession) +// +// // Create a DirectConnect client with additional configuration +// svc := directconnect.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *DirectConnect { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DirectConnect { + svc := &DirectConnect{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-10-25", + JSONVersion: "1.1", + TargetPrefix: "OvertureService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a DirectConnect operation and runs any +// custom request initialization. +func (c *DirectConnect) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 64c1aac0df2..ed9b326c0c2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -15403,9 +15403,10 @@ func (c *EC2) GetReservedInstancesExchangeQuoteRequest(input *GetReservedInstanc // GetReservedInstancesExchangeQuote API operation for Amazon Elastic Compute Cloud. // -// Returns details about the values and term of your specified Convertible Reserved -// Instances. When a target configuration is specified, it returns information -// about whether the exchange is valid and can be performed. +// Returns a quote and exchange information for exchanging one or more specified +// Convertible Reserved Instances for a new Convertible Reserved Instance. If +// the exchange cannot be performed, the reason is returned in the response. +// Use AcceptReservedInstancesExchangeQuote to perform the exchange. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -16539,9 +16540,9 @@ func (c *EC2) ModifyReservedInstancesRequest(input *ModifyReservedInstancesInput // ModifyReservedInstances API operation for Amazon Elastic Compute Cloud. // // Modifies the Availability Zone, instance count, instance type, or network -// platform (EC2-Classic or EC2-VPC) of your Standard Reserved Instances. The -// Reserved Instances to be modified must be identical, except for Availability -// Zone, network platform, and instance type. +// platform (EC2-Classic or EC2-VPC) of your Reserved Instances. The Reserved +// Instances to be modified must be identical, except for Availability Zone, +// network platform, and instance type. // // For more information, see Modifying Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-modifying.html) // in the Amazon Elastic Compute Cloud User Guide. @@ -17267,6 +17268,89 @@ func (c *EC2) ModifyVpcPeeringConnectionOptionsWithContext(ctx aws.Context, inpu return out, req.Send() } +const opModifyVpcTenancy = "ModifyVpcTenancy" + +// ModifyVpcTenancyRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpcTenancy operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyVpcTenancy for more information on using the ModifyVpcTenancy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyVpcTenancyRequest method. +// req, resp := client.ModifyVpcTenancyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcTenancy +func (c *EC2) ModifyVpcTenancyRequest(input *ModifyVpcTenancyInput) (req *request.Request, output *ModifyVpcTenancyOutput) { + op := &request.Operation{ + Name: opModifyVpcTenancy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpcTenancyInput{} + } + + output = &ModifyVpcTenancyOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyVpcTenancy API operation for Amazon Elastic Compute Cloud. +// +// Modifies the instance tenancy attribute of the specified VPC. You can change +// the instance tenancy attribute of a VPC to default only. You cannot change +// the instance tenancy attribute to dedicated. +// +// After you modify the tenancy of the VPC, any new instances that you launch +// into the VPC have a tenancy of default, unless you specify otherwise during +// launch. The tenancy of any existing instances in the VPC is not affected. +// +// For more information about Dedicated Instances, see Dedicated Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-instance.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyVpcTenancy for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcTenancy +func (c *EC2) ModifyVpcTenancy(input *ModifyVpcTenancyInput) (*ModifyVpcTenancyOutput, error) { + req, out := c.ModifyVpcTenancyRequest(input) + return out, req.Send() +} + +// ModifyVpcTenancyWithContext is the same as ModifyVpcTenancy with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVpcTenancy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVpcTenancyWithContext(ctx aws.Context, input *ModifyVpcTenancyInput, opts ...request.Option) (*ModifyVpcTenancyOutput, error) { + req, out := c.ModifyVpcTenancyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opMonitorInstances = "MonitorInstances" // MonitorInstancesRequest generates a "aws/request.Request" representing the @@ -20315,14 +20399,14 @@ type AcceptReservedInstancesExchangeQuoteInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The IDs of the Convertible Reserved Instances to exchange for other Convertible - // Reserved Instances of the same or higher value. + // The IDs of the Convertible Reserved Instances to exchange for another Convertible + // Reserved Instance of the same or higher value. // // ReservedInstanceIds is a required field ReservedInstanceIds []*string `locationName:"ReservedInstanceId" locationNameList:"ReservedInstanceId" type:"list" required:"true"` - // The configurations of the Convertible Reserved Instance offerings that you - // are purchasing in this exchange. + // The configuration of the target Convertible Reserved Instance to exchange + // for your current Convertible Reserved Instances. TargetConfigurations []*TargetConfigurationRequest `locationName:"TargetConfiguration" locationNameList:"TargetConfigurationRequest" type:"list"` } @@ -35140,6 +35224,14 @@ type DescribeSecurityGroupsInput struct { // // Default: Describes all your security groups. GroupNames []*string `locationName:"GroupName" locationNameList:"GroupName" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another request with the returned NextToken value. + // This value can be between 5 and 1000. + MaxResults *int64 `type:"integer"` + + // The token to request the next page of results. + NextToken *string `type:"string"` } // String returns the string representation @@ -35176,11 +35268,27 @@ func (s *DescribeSecurityGroupsInput) SetGroupNames(v []*string) *DescribeSecuri return s } +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeSecurityGroupsInput) SetMaxResults(v int64) *DescribeSecurityGroupsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSecurityGroupsInput) SetNextToken(v string) *DescribeSecurityGroupsInput { + s.NextToken = &v + return s +} + // Contains the output of DescribeSecurityGroups. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSecurityGroupsResult type DescribeSecurityGroupsOutput struct { _ struct{} `type:"structure"` + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + // Information about one or more security groups. SecurityGroups []*SecurityGroup `locationName:"securityGroupInfo" locationNameList:"item" type:"list"` } @@ -35195,6 +35303,12 @@ func (s DescribeSecurityGroupsOutput) GoString() string { return s.String() } +// SetNextToken sets the NextToken field's value. +func (s *DescribeSecurityGroupsOutput) SetNextToken(v string) *DescribeSecurityGroupsOutput { + s.NextToken = &v + return s +} + // SetSecurityGroups sets the SecurityGroups field's value. func (s *DescribeSecurityGroupsOutput) SetSecurityGroups(v []*SecurityGroup) *DescribeSecurityGroupsOutput { s.SecurityGroups = v @@ -41051,7 +41165,7 @@ type GetReservedInstancesExchangeQuoteInput struct { // ReservedInstanceIds is a required field ReservedInstanceIds []*string `locationName:"ReservedInstanceId" locationNameList:"ReservedInstanceId" type:"list" required:"true"` - // The configuration requirements of the Convertible Reserved Instances to exchange + // The configuration of the target Convertible Reserved Instance to exchange // for your current Convertible Reserved Instances. TargetConfigurations []*TargetConfigurationRequest `locationName:"TargetConfiguration" locationNameList:"TargetConfigurationRequest" type:"list"` } @@ -45219,9 +45333,6 @@ type LaunchSpecification struct { AddressingType *string `locationName:"addressingType" type:"string"` // One or more block device mapping entries. - // - // Although you can specify encrypted EBS volumes in this block device mapping - // for your Spot Instances, these volumes are not encrypted. BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` // Indicates whether the instance is optimized for EBS I/O. This optimization @@ -47316,6 +47427,97 @@ func (s *ModifyVpcPeeringConnectionOptionsOutput) SetRequesterPeeringConnectionO return s } +// Contains the parameters for ModifyVpcTenancy. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcTenancyRequest +type ModifyVpcTenancyInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the operation, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The instance tenancy attribute for the VPC. + // + // InstanceTenancy is a required field + InstanceTenancy *string `type:"string" required:"true" enum:"VpcTenancy"` + + // The ID of the VPC. + // + // VpcId is a required field + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpcTenancyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcTenancyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVpcTenancyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVpcTenancyInput"} + if s.InstanceTenancy == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceTenancy")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyVpcTenancyInput) SetDryRun(v bool) *ModifyVpcTenancyInput { + s.DryRun = &v + return s +} + +// SetInstanceTenancy sets the InstanceTenancy field's value. +func (s *ModifyVpcTenancyInput) SetInstanceTenancy(v string) *ModifyVpcTenancyInput { + s.InstanceTenancy = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *ModifyVpcTenancyInput) SetVpcId(v string) *ModifyVpcTenancyInput { + s.VpcId = &v + return s +} + +// Contains the output of ModifyVpcTenancy. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcTenancyResult +type ModifyVpcTenancyOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, returns an error. + ReturnValue *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifyVpcTenancyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcTenancyOutput) GoString() string { + return s.String() +} + +// SetReturnValue sets the ReturnValue field's value. +func (s *ModifyVpcTenancyOutput) SetReturnValue(v bool) *ModifyVpcTenancyOutput { + s.ReturnValue = &v + return s +} + // Contains the parameters for MonitorInstances. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/MonitorInstancesRequest type MonitorInstancesInput struct { @@ -51311,9 +51513,6 @@ type RequestSpotLaunchSpecification struct { AddressingType *string `locationName:"addressingType" type:"string"` // One or more block device mapping entries. - // - // Although you can specify encrypted EBS volumes in this block device mapping - // for your Spot Instances, these volumes are not encrypted. BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` // Indicates whether the instance is optimized for EBS I/O. This optimization @@ -53542,9 +53741,9 @@ type RunInstancesInput struct { // The user data to make available to the instance. For more information, see // Running Commands on Your Linux Instance at Launch (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) // (Linux) and Adding User Data (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data) - // (Windows). If you are using an AWS SDK or command line tool, base64-encoding - // is performed for you, and you can load the text from a file. Otherwise, you - // must provide base64-encoded text. + // (Windows). If you are using a command line tool, base64-encoding is performed + // for you, and you can load the text from a file. Otherwise, you must provide + // base64-encoded text. UserData *string `type:"string"` } @@ -60726,6 +60925,15 @@ const ( // InstanceTypeP216xlarge is a InstanceType enum value InstanceTypeP216xlarge = "p2.16xlarge" + // InstanceTypeP32xlarge is a InstanceType enum value + InstanceTypeP32xlarge = "p3.2xlarge" + + // InstanceTypeP38xlarge is a InstanceType enum value + InstanceTypeP38xlarge = "p3.8xlarge" + + // InstanceTypeP316xlarge is a InstanceType enum value + InstanceTypeP316xlarge = "p3.16xlarge" + // InstanceTypeD2Xlarge is a InstanceType enum value InstanceTypeD2Xlarge = "d2.xlarge" @@ -61464,6 +61672,11 @@ const ( VpcStateAvailable = "available" ) +const ( + // VpcTenancyDefault is a VpcTenancy enum value + VpcTenancyDefault = "default" +) + const ( // VpnStatePending is a VpnState enum value VpnStatePending = "pending" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go index f9136852fda..5938049e376 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go @@ -329,9 +329,9 @@ func (c *ECR) CompleteLayerUploadRequest(input *CompleteLayerUploadInput) (req * // CompleteLayerUpload API operation for Amazon EC2 Container Registry. // -// Inform Amazon ECR that the image layer upload for a specified registry, repository -// name, and upload ID, has completed. You can optionally provide a sha256 digest -// of the image layer for data validation purposes. +// Informs Amazon ECR that the image layer upload has completed for a specified +// registry, repository name, and upload ID. You can optionally provide a sha256 +// digest of the image layer for data validation purposes. // // This operation is used by the Amazon ECR proxy, and it is not intended for // general use by customers for pulling and pushing images. In most cases, you @@ -487,6 +487,96 @@ func (c *ECR) CreateRepositoryWithContext(ctx aws.Context, input *CreateReposito return out, req.Send() } +const opDeleteLifecyclePolicy = "DeleteLifecyclePolicy" + +// DeleteLifecyclePolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLifecyclePolicy operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteLifecyclePolicy for more information on using the DeleteLifecyclePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteLifecyclePolicyRequest method. +// req, resp := client.DeleteLifecyclePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteLifecyclePolicy +func (c *ECR) DeleteLifecyclePolicyRequest(input *DeleteLifecyclePolicyInput) (req *request.Request, output *DeleteLifecyclePolicyOutput) { + op := &request.Operation{ + Name: opDeleteLifecyclePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLifecyclePolicyInput{} + } + + output = &DeleteLifecyclePolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteLifecyclePolicy API operation for Amazon EC2 Container Registry. +// +// Deletes the specified lifecycle policy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation DeleteLifecyclePolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ErrCodeLifecyclePolicyNotFoundException "LifecyclePolicyNotFoundException" +// The lifecycle policy could not be found, and no policy is set to the repository. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteLifecyclePolicy +func (c *ECR) DeleteLifecyclePolicy(input *DeleteLifecyclePolicyInput) (*DeleteLifecyclePolicyOutput, error) { + req, out := c.DeleteLifecyclePolicyRequest(input) + return out, req.Send() +} + +// DeleteLifecyclePolicyWithContext is the same as DeleteLifecyclePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteLifecyclePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) DeleteLifecyclePolicyWithContext(ctx aws.Context, input *DeleteLifecyclePolicyInput, opts ...request.Option) (*DeleteLifecyclePolicyOutput, error) { + req, out := c.DeleteLifecyclePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteRepository = "DeleteRepository" // DeleteRepositoryRequest generates a "aws/request.Request" representing the @@ -1155,6 +1245,186 @@ func (c *ECR) GetDownloadUrlForLayerWithContext(ctx aws.Context, input *GetDownl return out, req.Send() } +const opGetLifecyclePolicy = "GetLifecyclePolicy" + +// GetLifecyclePolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetLifecyclePolicy operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetLifecyclePolicy for more information on using the GetLifecyclePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetLifecyclePolicyRequest method. +// req, resp := client.GetLifecyclePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicy +func (c *ECR) GetLifecyclePolicyRequest(input *GetLifecyclePolicyInput) (req *request.Request, output *GetLifecyclePolicyOutput) { + op := &request.Operation{ + Name: opGetLifecyclePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetLifecyclePolicyInput{} + } + + output = &GetLifecyclePolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetLifecyclePolicy API operation for Amazon EC2 Container Registry. +// +// Retrieves the specified lifecycle policy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation GetLifecyclePolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ErrCodeLifecyclePolicyNotFoundException "LifecyclePolicyNotFoundException" +// The lifecycle policy could not be found, and no policy is set to the repository. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicy +func (c *ECR) GetLifecyclePolicy(input *GetLifecyclePolicyInput) (*GetLifecyclePolicyOutput, error) { + req, out := c.GetLifecyclePolicyRequest(input) + return out, req.Send() +} + +// GetLifecyclePolicyWithContext is the same as GetLifecyclePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetLifecyclePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) GetLifecyclePolicyWithContext(ctx aws.Context, input *GetLifecyclePolicyInput, opts ...request.Option) (*GetLifecyclePolicyOutput, error) { + req, out := c.GetLifecyclePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetLifecyclePolicyPreview = "GetLifecyclePolicyPreview" + +// GetLifecyclePolicyPreviewRequest generates a "aws/request.Request" representing the +// client's request for the GetLifecyclePolicyPreview operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetLifecyclePolicyPreview for more information on using the GetLifecyclePolicyPreview +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetLifecyclePolicyPreviewRequest method. +// req, resp := client.GetLifecyclePolicyPreviewRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyPreview +func (c *ECR) GetLifecyclePolicyPreviewRequest(input *GetLifecyclePolicyPreviewInput) (req *request.Request, output *GetLifecyclePolicyPreviewOutput) { + op := &request.Operation{ + Name: opGetLifecyclePolicyPreview, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetLifecyclePolicyPreviewInput{} + } + + output = &GetLifecyclePolicyPreviewOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetLifecyclePolicyPreview API operation for Amazon EC2 Container Registry. +// +// Retrieves the results of the specified lifecycle policy preview request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation GetLifecyclePolicyPreview for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ErrCodeLifecyclePolicyPreviewNotFoundException "LifecyclePolicyPreviewNotFoundException" +// There is no dry run for this repository. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyPreview +func (c *ECR) GetLifecyclePolicyPreview(input *GetLifecyclePolicyPreviewInput) (*GetLifecyclePolicyPreviewOutput, error) { + req, out := c.GetLifecyclePolicyPreviewRequest(input) + return out, req.Send() +} + +// GetLifecyclePolicyPreviewWithContext is the same as GetLifecyclePolicyPreview with the addition of +// the ability to pass a context and additional request options. +// +// See GetLifecyclePolicyPreview for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) GetLifecyclePolicyPreviewWithContext(ctx aws.Context, input *GetLifecyclePolicyPreviewInput, opts ...request.Option) (*GetLifecyclePolicyPreviewOutput, error) { + req, out := c.GetLifecyclePolicyPreviewRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetRepositoryPolicy = "GetRepositoryPolicy" // GetRepositoryPolicyRequest generates a "aws/request.Request" representing the @@ -1556,8 +1826,8 @@ func (c *ECR) PutImageRequest(input *PutImageInput) (req *request.Request, outpu // repository and ensure that you are performing operations on the correct registry. // // * ErrCodeImageAlreadyExistsException "ImageAlreadyExistsException" -// The specified image has already been pushed, and there are no changes to -// the manifest or image tag since the last push. +// The specified image has already been pushed, and there were no changes to +// the manifest or image tag after the last push. // // * ErrCodeLayersNotFoundException "LayersNotFoundException" // The specified layers could not be found, or the specified layer is not valid @@ -1591,58 +1861,58 @@ func (c *ECR) PutImageWithContext(ctx aws.Context, input *PutImageInput, opts .. return out, req.Send() } -const opSetRepositoryPolicy = "SetRepositoryPolicy" +const opPutLifecyclePolicy = "PutLifecyclePolicy" -// SetRepositoryPolicyRequest generates a "aws/request.Request" representing the -// client's request for the SetRepositoryPolicy operation. The "output" return +// PutLifecyclePolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutLifecyclePolicy operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See SetRepositoryPolicy for more information on using the SetRepositoryPolicy +// See PutLifecyclePolicy for more information on using the PutLifecyclePolicy // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the SetRepositoryPolicyRequest method. -// req, resp := client.SetRepositoryPolicyRequest(params) +// // Example sending a request using the PutLifecyclePolicyRequest method. +// req, resp := client.PutLifecyclePolicyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicy -func (c *ECR) SetRepositoryPolicyRequest(input *SetRepositoryPolicyInput) (req *request.Request, output *SetRepositoryPolicyOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutLifecyclePolicy +func (c *ECR) PutLifecyclePolicyRequest(input *PutLifecyclePolicyInput) (req *request.Request, output *PutLifecyclePolicyOutput) { op := &request.Operation{ - Name: opSetRepositoryPolicy, + Name: opPutLifecyclePolicy, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &SetRepositoryPolicyInput{} + input = &PutLifecyclePolicyInput{} } - output = &SetRepositoryPolicyOutput{} + output = &PutLifecyclePolicyOutput{} req = c.newRequest(op, input, output) return } -// SetRepositoryPolicy API operation for Amazon EC2 Container Registry. +// PutLifecyclePolicy API operation for Amazon EC2 Container Registry. // -// Applies a repository policy on a specified repository to control access permissions. +// Creates or updates a lifecycle policy. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon EC2 Container Registry's -// API operation SetRepositoryPolicy for usage and error information. +// API operation PutLifecyclePolicy for usage and error information. // // Returned Error Codes: // * ErrCodeServerException "ServerException" @@ -1656,84 +1926,80 @@ func (c *ECR) SetRepositoryPolicyRequest(input *SetRepositoryPolicyInput) (req * // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicy -func (c *ECR) SetRepositoryPolicy(input *SetRepositoryPolicyInput) (*SetRepositoryPolicyOutput, error) { - req, out := c.SetRepositoryPolicyRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutLifecyclePolicy +func (c *ECR) PutLifecyclePolicy(input *PutLifecyclePolicyInput) (*PutLifecyclePolicyOutput, error) { + req, out := c.PutLifecyclePolicyRequest(input) return out, req.Send() } -// SetRepositoryPolicyWithContext is the same as SetRepositoryPolicy with the addition of +// PutLifecyclePolicyWithContext is the same as PutLifecyclePolicy with the addition of // the ability to pass a context and additional request options. // -// See SetRepositoryPolicy for details on how to use this API operation. +// See PutLifecyclePolicy for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ECR) SetRepositoryPolicyWithContext(ctx aws.Context, input *SetRepositoryPolicyInput, opts ...request.Option) (*SetRepositoryPolicyOutput, error) { - req, out := c.SetRepositoryPolicyRequest(input) +func (c *ECR) PutLifecyclePolicyWithContext(ctx aws.Context, input *PutLifecyclePolicyInput, opts ...request.Option) (*PutLifecyclePolicyOutput, error) { + req, out := c.PutLifecyclePolicyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUploadLayerPart = "UploadLayerPart" +const opSetRepositoryPolicy = "SetRepositoryPolicy" -// UploadLayerPartRequest generates a "aws/request.Request" representing the -// client's request for the UploadLayerPart operation. The "output" return +// SetRepositoryPolicyRequest generates a "aws/request.Request" representing the +// client's request for the SetRepositoryPolicy operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UploadLayerPart for more information on using the UploadLayerPart +// See SetRepositoryPolicy for more information on using the SetRepositoryPolicy // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UploadLayerPartRequest method. -// req, resp := client.UploadLayerPartRequest(params) +// // Example sending a request using the SetRepositoryPolicyRequest method. +// req, resp := client.SetRepositoryPolicyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPart -func (c *ECR) UploadLayerPartRequest(input *UploadLayerPartInput) (req *request.Request, output *UploadLayerPartOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicy +func (c *ECR) SetRepositoryPolicyRequest(input *SetRepositoryPolicyInput) (req *request.Request, output *SetRepositoryPolicyOutput) { op := &request.Operation{ - Name: opUploadLayerPart, + Name: opSetRepositoryPolicy, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UploadLayerPartInput{} + input = &SetRepositoryPolicyInput{} } - output = &UploadLayerPartOutput{} + output = &SetRepositoryPolicyOutput{} req = c.newRequest(op, input, output) return } -// UploadLayerPart API operation for Amazon EC2 Container Registry. -// -// Uploads an image layer part to Amazon ECR. +// SetRepositoryPolicy API operation for Amazon EC2 Container Registry. // -// This operation is used by the Amazon ECR proxy, and it is not intended for -// general use by customers for pulling and pushing images. In most cases, you -// should use the docker CLI to pull, tag, and push images. +// Applies a repository policy on a specified repository to control access permissions. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon EC2 Container Registry's -// API operation UploadLayerPart for usage and error information. +// API operation SetRepositoryPolicy for usage and error information. // // Returned Error Codes: // * ErrCodeServerException "ServerException" @@ -1743,8 +2009,194 @@ func (c *ECR) UploadLayerPartRequest(input *UploadLayerPartInput) (req *request. // The specified parameter is invalid. Review the available parameters for the // API request. // -// * ErrCodeInvalidLayerPartException "InvalidLayerPartException" -// The layer part size is not valid, or the first byte specified is not consecutive +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicy +func (c *ECR) SetRepositoryPolicy(input *SetRepositoryPolicyInput) (*SetRepositoryPolicyOutput, error) { + req, out := c.SetRepositoryPolicyRequest(input) + return out, req.Send() +} + +// SetRepositoryPolicyWithContext is the same as SetRepositoryPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See SetRepositoryPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) SetRepositoryPolicyWithContext(ctx aws.Context, input *SetRepositoryPolicyInput, opts ...request.Option) (*SetRepositoryPolicyOutput, error) { + req, out := c.SetRepositoryPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartLifecyclePolicyPreview = "StartLifecyclePolicyPreview" + +// StartLifecyclePolicyPreviewRequest generates a "aws/request.Request" representing the +// client's request for the StartLifecyclePolicyPreview operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartLifecyclePolicyPreview for more information on using the StartLifecyclePolicyPreview +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartLifecyclePolicyPreviewRequest method. +// req, resp := client.StartLifecyclePolicyPreviewRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview +func (c *ECR) StartLifecyclePolicyPreviewRequest(input *StartLifecyclePolicyPreviewInput) (req *request.Request, output *StartLifecyclePolicyPreviewOutput) { + op := &request.Operation{ + Name: opStartLifecyclePolicyPreview, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartLifecyclePolicyPreviewInput{} + } + + output = &StartLifecyclePolicyPreviewOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartLifecyclePolicyPreview API operation for Amazon EC2 Container Registry. +// +// Starts a preview of the specified lifecycle policy. This allows you to see +// the results before creating the lifecycle policy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation StartLifecyclePolicyPreview for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ErrCodeLifecyclePolicyNotFoundException "LifecyclePolicyNotFoundException" +// The lifecycle policy could not be found, and no policy is set to the repository. +// +// * ErrCodeLifecyclePolicyPreviewInProgressException "LifecyclePolicyPreviewInProgressException" +// The previous lifecycle policy preview request has not completed. Please try +// again later. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview +func (c *ECR) StartLifecyclePolicyPreview(input *StartLifecyclePolicyPreviewInput) (*StartLifecyclePolicyPreviewOutput, error) { + req, out := c.StartLifecyclePolicyPreviewRequest(input) + return out, req.Send() +} + +// StartLifecyclePolicyPreviewWithContext is the same as StartLifecyclePolicyPreview with the addition of +// the ability to pass a context and additional request options. +// +// See StartLifecyclePolicyPreview for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) StartLifecyclePolicyPreviewWithContext(ctx aws.Context, input *StartLifecyclePolicyPreviewInput, opts ...request.Option) (*StartLifecyclePolicyPreviewOutput, error) { + req, out := c.StartLifecyclePolicyPreviewRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadLayerPart = "UploadLayerPart" + +// UploadLayerPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadLayerPart operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadLayerPart for more information on using the UploadLayerPart +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadLayerPartRequest method. +// req, resp := client.UploadLayerPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPart +func (c *ECR) UploadLayerPartRequest(input *UploadLayerPartInput) (req *request.Request, output *UploadLayerPartOutput) { + op := &request.Operation{ + Name: opUploadLayerPart, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UploadLayerPartInput{} + } + + output = &UploadLayerPartOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadLayerPart API operation for Amazon EC2 Container Registry. +// +// Uploads an image layer part to Amazon ECR. +// +// This operation is used by the Amazon ECR proxy, and it is not intended for +// general use by customers for pulling and pushing images. In most cases, you +// should use the docker CLI to pull, tag, and push images. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation UploadLayerPart for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeInvalidLayerPartException "InvalidLayerPartException" +// The layer part size is not valid, or the first byte specified is not consecutive // to the last byte of a previous layer part upload. // // * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" @@ -2360,11 +2812,115 @@ func (s *CreateRepositoryOutput) SetRepository(v *Repository) *CreateRepositoryO return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteLifecyclePolicyRequest +type DeleteLifecyclePolicyInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that is associated with the repository policy + // to
 delete. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLifecyclePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLifecyclePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLifecyclePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLifecyclePolicyInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegistryId sets the RegistryId field's value. +func (s *DeleteLifecyclePolicyInput) SetRegistryId(v string) *DeleteLifecyclePolicyInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DeleteLifecyclePolicyInput) SetRepositoryName(v string) *DeleteLifecyclePolicyInput { + s.RepositoryName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteLifecyclePolicyResponse +type DeleteLifecyclePolicyOutput struct { + _ struct{} `type:"structure"` + + // The time stamp of the last time that the lifecycle policy was run. + LastEvaluatedAt *time.Time `locationName:"lastEvaluatedAt" type:"timestamp" timestampFormat:"unix"` + + // The JSON repository policy text. + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s DeleteLifecyclePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLifecyclePolicyOutput) GoString() string { + return s.String() +} + +// SetLastEvaluatedAt sets the LastEvaluatedAt field's value. +func (s *DeleteLifecyclePolicyOutput) SetLastEvaluatedAt(v time.Time) *DeleteLifecyclePolicyOutput { + s.LastEvaluatedAt = &v + return s +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *DeleteLifecyclePolicyOutput) SetLifecyclePolicyText(v string) *DeleteLifecyclePolicyOutput { + s.LifecyclePolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *DeleteLifecyclePolicyOutput) SetRegistryId(v string) *DeleteLifecyclePolicyOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DeleteLifecyclePolicyOutput) SetRepositoryName(v string) *DeleteLifecyclePolicyOutput { + s.RepositoryName = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepositoryRequest type DeleteRepositoryInput struct { _ struct{} `type:"structure"` - // Force the deletion of the repository if it contains images. + // If a repository contains images, forces the deletion. Force *bool `locationName:"force" type:"boolean"` // The AWS account ID associated with the registry that contains the repository @@ -2990,33 +3546,33 @@ func (s *GetDownloadUrlForLayerOutput) SetLayerDigest(v string) *GetDownloadUrlF return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRepositoryPolicyRequest -type GetRepositoryPolicyInput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyRequest +type GetLifecyclePolicyInput struct { _ struct{} `type:"structure"` // The AWS account ID associated with the registry that contains the repository. // If you do not specify a registry, the default registry is assumed. RegistryId *string `locationName:"registryId" type:"string"` - // The name of the repository whose policy you want to retrieve. + // The name of the repository with the policy to retrieve. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` } // String returns the string representation -func (s GetRepositoryPolicyInput) String() string { +func (s GetLifecyclePolicyInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetRepositoryPolicyInput) GoString() string { +func (s GetLifecyclePolicyInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetRepositoryPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetRepositoryPolicyInput"} +func (s *GetLifecyclePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetLifecyclePolicyInput"} if s.RepositoryName == nil { invalidParams.Add(request.NewErrParamRequired("RepositoryName")) } @@ -3031,23 +3587,26 @@ func (s *GetRepositoryPolicyInput) Validate() error { } // SetRegistryId sets the RegistryId field's value. -func (s *GetRepositoryPolicyInput) SetRegistryId(v string) *GetRepositoryPolicyInput { +func (s *GetLifecyclePolicyInput) SetRegistryId(v string) *GetLifecyclePolicyInput { s.RegistryId = &v return s } // SetRepositoryName sets the RepositoryName field's value. -func (s *GetRepositoryPolicyInput) SetRepositoryName(v string) *GetRepositoryPolicyInput { +func (s *GetLifecyclePolicyInput) SetRepositoryName(v string) *GetLifecyclePolicyInput { s.RepositoryName = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRepositoryPolicyResponse -type GetRepositoryPolicyOutput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyResponse +type GetLifecyclePolicyOutput struct { _ struct{} `type:"structure"` - // The JSON repository policy text associated with the repository. - PolicyText *string `locationName:"policyText" type:"string"` + // The time stamp of the last time that the lifecycle policy was run. + LastEvaluatedAt *time.Time `locationName:"lastEvaluatedAt" type:"timestamp" timestampFormat:"unix"` + + // The JSON repository policy text. + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"` // The registry ID associated with the request. RegistryId *string `locationName:"registryId" type:"string"` @@ -3057,16 +3616,303 @@ type GetRepositoryPolicyOutput struct { } // String returns the string representation -func (s GetRepositoryPolicyOutput) String() string { +func (s GetLifecyclePolicyOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetRepositoryPolicyOutput) GoString() string { +func (s GetLifecyclePolicyOutput) GoString() string { return s.String() } -// SetPolicyText sets the PolicyText field's value. +// SetLastEvaluatedAt sets the LastEvaluatedAt field's value. +func (s *GetLifecyclePolicyOutput) SetLastEvaluatedAt(v time.Time) *GetLifecyclePolicyOutput { + s.LastEvaluatedAt = &v + return s +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *GetLifecyclePolicyOutput) SetLifecyclePolicyText(v string) *GetLifecyclePolicyOutput { + s.LifecyclePolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *GetLifecyclePolicyOutput) SetRegistryId(v string) *GetLifecyclePolicyOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetLifecyclePolicyOutput) SetRepositoryName(v string) *GetLifecyclePolicyOutput { + s.RepositoryName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyPreviewRequest +type GetLifecyclePolicyPreviewInput struct { + _ struct{} `type:"structure"` + + // An optional parameter that filters results based on image tag status and + // all tags, if tagged. + Filter *LifecyclePolicyPreviewFilter `locationName:"filter" type:"structure"` + + // The list of imageIDs to be included. + ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"` + + // The maximum number of repository results returned by GetLifecyclePolicyPreviewRequest + // in
 paginated output. When this parameter is used, GetLifecyclePolicyPreviewRequest + // only returns
 maxResults results in a single page along with a nextToken + // response element. The remaining results of the initial request can be seen + // by sending
 another GetLifecyclePolicyPreviewRequest request with the returned + // nextToken
 value. This value can be between 1 and 100. If this
 parameter + // is not used, then GetLifecyclePolicyPreviewRequest returns up to
 100 results + // and a nextToken value, if
 applicable. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The nextToken value returned from a previous paginated
 GetLifecyclePolicyPreviewRequest + // request where maxResults was used and the
 results exceeded the value of + // that parameter. Pagination continues from the end of the
 previous results + // that returned the nextToken value. This value is
 null when there are no + // more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository with the policy to retrieve. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetLifecyclePolicyPreviewInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLifecyclePolicyPreviewInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetLifecyclePolicyPreviewInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetLifecyclePolicyPreviewInput"} + if s.ImageIds != nil && len(s.ImageIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageIds", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *GetLifecyclePolicyPreviewInput) SetFilter(v *LifecyclePolicyPreviewFilter) *GetLifecyclePolicyPreviewInput { + s.Filter = v + return s +} + +// SetImageIds sets the ImageIds field's value. +func (s *GetLifecyclePolicyPreviewInput) SetImageIds(v []*ImageIdentifier) *GetLifecyclePolicyPreviewInput { + s.ImageIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetLifecyclePolicyPreviewInput) SetMaxResults(v int64) *GetLifecyclePolicyPreviewInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetLifecyclePolicyPreviewInput) SetNextToken(v string) *GetLifecyclePolicyPreviewInput { + s.NextToken = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *GetLifecyclePolicyPreviewInput) SetRegistryId(v string) *GetLifecyclePolicyPreviewInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetLifecyclePolicyPreviewInput) SetRepositoryName(v string) *GetLifecyclePolicyPreviewInput { + s.RepositoryName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyPreviewResponse +type GetLifecyclePolicyPreviewOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text. + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"` + + // The nextToken value to include in a future GetLifecyclePolicyPreview request. + // When the results of a GetLifecyclePolicyPreview request exceed maxResults, + // this value can be used to retrieve the next page of results. This value is + // null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The results of the lifecycle policy preview request. + PreviewResults []*LifecyclePolicyPreviewResult `locationName:"previewResults" type:"list"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` + + // The status of the lifecycle policy preview request. + Status *string `locationName:"status" type:"string" enum:"LifecyclePolicyPreviewStatus"` + + // The list of images that is returned as a result of the action. + Summary *LifecyclePolicyPreviewSummary `locationName:"summary" type:"structure"` +} + +// String returns the string representation +func (s GetLifecyclePolicyPreviewOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLifecyclePolicyPreviewOutput) GoString() string { + return s.String() +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetLifecyclePolicyText(v string) *GetLifecyclePolicyPreviewOutput { + s.LifecyclePolicyText = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetNextToken(v string) *GetLifecyclePolicyPreviewOutput { + s.NextToken = &v + return s +} + +// SetPreviewResults sets the PreviewResults field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetPreviewResults(v []*LifecyclePolicyPreviewResult) *GetLifecyclePolicyPreviewOutput { + s.PreviewResults = v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetRegistryId(v string) *GetLifecyclePolicyPreviewOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetRepositoryName(v string) *GetLifecyclePolicyPreviewOutput { + s.RepositoryName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetStatus(v string) *GetLifecyclePolicyPreviewOutput { + s.Status = &v + return s +} + +// SetSummary sets the Summary field's value. +func (s *GetLifecyclePolicyPreviewOutput) SetSummary(v *LifecyclePolicyPreviewSummary) *GetLifecyclePolicyPreviewOutput { + s.Summary = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRepositoryPolicyRequest +type GetRepositoryPolicyInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository with the policy to retrieve. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRepositoryPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRepositoryPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRepositoryPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRepositoryPolicyInput"} + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegistryId sets the RegistryId field's value. +func (s *GetRepositoryPolicyInput) SetRegistryId(v string) *GetRepositoryPolicyInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *GetRepositoryPolicyInput) SetRepositoryName(v string) *GetRepositoryPolicyInput { + s.RepositoryName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRepositoryPolicyResponse +type GetRepositoryPolicyOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text associated with the repository. + PolicyText *string `locationName:"policyText" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s GetRepositoryPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRepositoryPolicyOutput) GoString() string { + return s.String() +} + +// SetPolicyText sets the PolicyText field's value. func (s *GetRepositoryPolicyOutput) SetPolicyText(v string) *GetRepositoryPolicyOutput { s.PolicyText = &v return s @@ -3293,11 +4139,11 @@ func (s *ImageIdentifier) SetImageTag(v string) *ImageIdentifier { type InitiateLayerUploadInput struct { _ struct{} `type:"structure"` - // The AWS account ID associated with the registry that you intend to upload - // layers to. If you do not specify a registry, the default registry is assumed. + // The AWS account ID associated with the registry to which you intend to upload + // layers. If you do not specify a registry, the default registry is assumed. RegistryId *string `locationName:"registryId" type:"string"` - // The name of the repository that you intend to upload layers to. + // The name of the repository to which you intend to upload layers. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` @@ -3472,6 +4318,143 @@ func (s *LayerFailure) SetLayerDigest(v string) *LayerFailure { return s } +// The filter for the lifecycle policy preview. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/LifecyclePolicyPreviewFilter +type LifecyclePolicyPreviewFilter struct { + _ struct{} `type:"structure"` + + // The tag status of the image. + TagStatus *string `locationName:"tagStatus" type:"string" enum:"TagStatus"` +} + +// String returns the string representation +func (s LifecyclePolicyPreviewFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecyclePolicyPreviewFilter) GoString() string { + return s.String() +} + +// SetTagStatus sets the TagStatus field's value. +func (s *LifecyclePolicyPreviewFilter) SetTagStatus(v string) *LifecyclePolicyPreviewFilter { + s.TagStatus = &v + return s +} + +// The result of the lifecycle policy preview. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/LifecyclePolicyPreviewResult +type LifecyclePolicyPreviewResult struct { + _ struct{} `type:"structure"` + + // The type of action to be taken. + Action *LifecyclePolicyRuleAction `locationName:"action" type:"structure"` + + // The priority of the applied rule. + AppliedRulePriority *int64 `locationName:"appliedRulePriority" min:"1" type:"integer"` + + // The sha256 digest of the image manifest. + ImageDigest *string `locationName:"imageDigest" type:"string"` + + // The date and time, expressed in standard JavaScript date format, at which + // the current image was pushed to the repository. + ImagePushedAt *time.Time `locationName:"imagePushedAt" type:"timestamp" timestampFormat:"unix"` + + // The list of tags associated with this image. + ImageTags []*string `locationName:"imageTags" type:"list"` +} + +// String returns the string representation +func (s LifecyclePolicyPreviewResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecyclePolicyPreviewResult) GoString() string { + return s.String() +} + +// SetAction sets the Action field's value. +func (s *LifecyclePolicyPreviewResult) SetAction(v *LifecyclePolicyRuleAction) *LifecyclePolicyPreviewResult { + s.Action = v + return s +} + +// SetAppliedRulePriority sets the AppliedRulePriority field's value. +func (s *LifecyclePolicyPreviewResult) SetAppliedRulePriority(v int64) *LifecyclePolicyPreviewResult { + s.AppliedRulePriority = &v + return s +} + +// SetImageDigest sets the ImageDigest field's value. +func (s *LifecyclePolicyPreviewResult) SetImageDigest(v string) *LifecyclePolicyPreviewResult { + s.ImageDigest = &v + return s +} + +// SetImagePushedAt sets the ImagePushedAt field's value. +func (s *LifecyclePolicyPreviewResult) SetImagePushedAt(v time.Time) *LifecyclePolicyPreviewResult { + s.ImagePushedAt = &v + return s +} + +// SetImageTags sets the ImageTags field's value. +func (s *LifecyclePolicyPreviewResult) SetImageTags(v []*string) *LifecyclePolicyPreviewResult { + s.ImageTags = v + return s +} + +// The summary of the lifecycle policy preview request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/LifecyclePolicyPreviewSummary +type LifecyclePolicyPreviewSummary struct { + _ struct{} `type:"structure"` + + // The number of expiring images. + ExpiringImageTotalCount *int64 `locationName:"expiringImageTotalCount" type:"integer"` +} + +// String returns the string representation +func (s LifecyclePolicyPreviewSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecyclePolicyPreviewSummary) GoString() string { + return s.String() +} + +// SetExpiringImageTotalCount sets the ExpiringImageTotalCount field's value. +func (s *LifecyclePolicyPreviewSummary) SetExpiringImageTotalCount(v int64) *LifecyclePolicyPreviewSummary { + s.ExpiringImageTotalCount = &v + return s +} + +// The type of action to be taken. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/LifecyclePolicyRuleAction +type LifecyclePolicyRuleAction struct { + _ struct{} `type:"structure"` + + // The type of action to be taken. + Type *string `locationName:"type" type:"string" enum:"ImageActionType"` +} + +// String returns the string representation +func (s LifecyclePolicyRuleAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecyclePolicyRuleAction) GoString() string { + return s.String() +} + +// SetType sets the Type field's value. +func (s *LifecyclePolicyRuleAction) SetType(v string) *LifecyclePolicyRuleAction { + s.Type = &v + return s +} + // An object representing a filter on a ListImages operation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListImagesFilter type ListImagesFilter struct { @@ -3524,11 +4507,11 @@ type ListImagesInput struct { NextToken *string `locationName:"nextToken" type:"string"` // The AWS account ID associated with the registry that contains the repository - // to list images in. If you do not specify a registry, the default registry + // in which to list images. If you do not specify a registry, the default registry // is assumed. RegistryId *string `locationName:"registryId" type:"string"` - // The repository whose image IDs are to be listed. + // The repository with image IDs to be listed. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` @@ -3730,28 +4713,138 @@ func (s *PutImageOutput) SetImage(v *Image) *PutImageOutput { return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutLifecyclePolicyRequest +type PutLifecyclePolicyInput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text to apply to the repository. + // + // LifecyclePolicyText is a required field + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string" required:"true"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do
 not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository to receive the policy. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutLifecyclePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLifecyclePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutLifecyclePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutLifecyclePolicyInput"} + if s.LifecyclePolicyText == nil { + invalidParams.Add(request.NewErrParamRequired("LifecyclePolicyText")) + } + if s.LifecyclePolicyText != nil && len(*s.LifecyclePolicyText) < 100 { + invalidParams.Add(request.NewErrParamMinLen("LifecyclePolicyText", 100)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *PutLifecyclePolicyInput) SetLifecyclePolicyText(v string) *PutLifecyclePolicyInput { + s.LifecyclePolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *PutLifecyclePolicyInput) SetRegistryId(v string) *PutLifecyclePolicyInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *PutLifecyclePolicyInput) SetRepositoryName(v string) *PutLifecyclePolicyInput { + s.RepositoryName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutLifecyclePolicyResponse +type PutLifecyclePolicyOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text. + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s PutLifecyclePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutLifecyclePolicyOutput) GoString() string { + return s.String() +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *PutLifecyclePolicyOutput) SetLifecyclePolicyText(v string) *PutLifecyclePolicyOutput { + s.LifecyclePolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *PutLifecyclePolicyOutput) SetRegistryId(v string) *PutLifecyclePolicyOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *PutLifecyclePolicyOutput) SetRepositoryName(v string) *PutLifecyclePolicyOutput { + s.RepositoryName = &v + return s +} + // An object representing a repository. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/Repository type Repository struct { _ struct{} `type:"structure"` - // The date and time, in JavaScript date/time format, when the repository was - // created. + // The date and time, in JavaScript date format, when the repository was created. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` // The AWS account ID associated with the registry that contains the repository. RegistryId *string `locationName:"registryId" type:"string"` // The Amazon Resource Name (ARN) that identifies the repository. The ARN contains - // the arn:aws:ecr namespace, followed by the region of the repository, the - // AWS account ID of the repository owner, the repository namespace, and then - // the repository name. For example, arn:aws:ecr:region:012345678910:repository/test. + // the arn:aws:ecr namespace, followed by the region of the repository, AWS + // account ID of the repository owner, repository namespace, and repository + // name. For example, arn:aws:ecr:region:012345678910:repository/test. RepositoryArn *string `locationName:"repositoryArn" type:"string"` // The name of the repository. RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` - // The URI for the repository. You can use this URI for Docker push and pull + // The URI for the repository. You can use this URI for Docker push or pull // operations. RepositoryUri *string `locationName:"repositoryUri" type:"string"` } @@ -3915,6 +5008,122 @@ func (s *SetRepositoryPolicyOutput) SetRepositoryName(v string) *SetRepositoryPo return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreviewRequest +type StartLifecyclePolicyPreviewInput struct { + _ struct{} `type:"structure"` + + // The policy to be evaluated against. If you do not specify a policy, the current + // policy for the repository is used. + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"` + + // The AWS account ID associated with the registry that contains the repository. + // If you do not specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository to be evaluated. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartLifecyclePolicyPreviewInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartLifecyclePolicyPreviewInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartLifecyclePolicyPreviewInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartLifecyclePolicyPreviewInput"} + if s.LifecyclePolicyText != nil && len(*s.LifecyclePolicyText) < 100 { + invalidParams.Add(request.NewErrParamMinLen("LifecyclePolicyText", 100)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *StartLifecyclePolicyPreviewInput) SetLifecyclePolicyText(v string) *StartLifecyclePolicyPreviewInput { + s.LifecyclePolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *StartLifecyclePolicyPreviewInput) SetRegistryId(v string) *StartLifecyclePolicyPreviewInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *StartLifecyclePolicyPreviewInput) SetRepositoryName(v string) *StartLifecyclePolicyPreviewInput { + s.RepositoryName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreviewResponse +type StartLifecyclePolicyPreviewOutput struct { + _ struct{} `type:"structure"` + + // The JSON repository policy text. + LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` + + // The status of the lifecycle policy preview request. + Status *string `locationName:"status" type:"string" enum:"LifecyclePolicyPreviewStatus"` +} + +// String returns the string representation +func (s StartLifecyclePolicyPreviewOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartLifecyclePolicyPreviewOutput) GoString() string { + return s.String() +} + +// SetLifecyclePolicyText sets the LifecyclePolicyText field's value. +func (s *StartLifecyclePolicyPreviewOutput) SetLifecyclePolicyText(v string) *StartLifecyclePolicyPreviewOutput { + s.LifecyclePolicyText = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *StartLifecyclePolicyPreviewOutput) SetRegistryId(v string) *StartLifecyclePolicyPreviewOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *StartLifecyclePolicyPreviewOutput) SetRepositoryName(v string) *StartLifecyclePolicyPreviewOutput { + s.RepositoryName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *StartLifecyclePolicyPreviewOutput) SetStatus(v string) *StartLifecyclePolicyPreviewOutput { + s.Status = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPartRequest type UploadLayerPartInput struct { _ struct{} `type:"structure"` @@ -3936,11 +5145,11 @@ type UploadLayerPartInput struct { // PartLastByte is a required field PartLastByte *int64 `locationName:"partLastByte" type:"long" required:"true"` - // The AWS account ID associated with the registry that you are uploading layer - // parts to. If you do not specify a registry, the default registry is assumed. + // The AWS account ID associated with the registry to which you are uploading + // layer parts. If you do not specify a registry, the default registry is assumed. RegistryId *string `locationName:"registryId" type:"string"` - // The name of the repository that you are uploading layer parts to. + // The name of the repository to which you are uploading layer parts. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` @@ -4077,6 +5286,11 @@ func (s *UploadLayerPartOutput) SetUploadId(v string) *UploadLayerPartOutput { return s } +const ( + // ImageActionTypeExpire is a ImageActionType enum value + ImageActionTypeExpire = "EXPIRE" +) + const ( // ImageFailureCodeInvalidImageDigest is a ImageFailureCode enum value ImageFailureCodeInvalidImageDigest = "InvalidImageDigest" @@ -4110,6 +5324,20 @@ const ( LayerFailureCodeMissingLayerDigest = "MissingLayerDigest" ) +const ( + // LifecyclePolicyPreviewStatusInProgress is a LifecyclePolicyPreviewStatus enum value + LifecyclePolicyPreviewStatusInProgress = "IN_PROGRESS" + + // LifecyclePolicyPreviewStatusComplete is a LifecyclePolicyPreviewStatus enum value + LifecyclePolicyPreviewStatusComplete = "COMPLETE" + + // LifecyclePolicyPreviewStatusExpired is a LifecyclePolicyPreviewStatus enum value + LifecyclePolicyPreviewStatusExpired = "EXPIRED" + + // LifecyclePolicyPreviewStatusFailed is a LifecyclePolicyPreviewStatus enum value + LifecyclePolicyPreviewStatusFailed = "FAILED" +) + const ( // TagStatusTagged is a TagStatus enum value TagStatusTagged = "TAGGED" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go index 987aa72fa68..c2dc9e8a87d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go @@ -3,11 +3,11 @@ // Package ecr provides the client and types for making API // requests to Amazon EC2 Container Registry. // -// Amazon EC2 Container Registry (Amazon ECR) is a managed AWS Docker registry -// service. Customers can use the familiar Docker CLI to push, pull, and manage -// images. Amazon ECR provides a secure, scalable, and reliable registry. Amazon -// ECR supports private Docker repositories with resource-based permissions -// using AWS IAM so that specific users or Amazon EC2 instances can access repositories +// Amazon EC2 Container Registry (Amazon ECR) is a managed Docker registry service. +// Customers can use the familiar Docker CLI to push, pull, and manage images. +// Amazon ECR provides a secure, scalable, and reliable registry. Amazon ECR +// supports private Docker repositories with resource-based permissions using +// IAM so that specific users or Amazon EC2 instances can access repositories // and images. Developers can use the Docker CLI to author and manage images. // // See https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21 for more information on this service. diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go index 4399e6a2957..2a2caaeddc4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go @@ -13,8 +13,8 @@ const ( // ErrCodeImageAlreadyExistsException for service response error code // "ImageAlreadyExistsException". // - // The specified image has already been pushed, and there are no changes to - // the manifest or image tag since the last push. + // The specified image has already been pushed, and there were no changes to + // the manifest or image tag after the last push. ErrCodeImageAlreadyExistsException = "ImageAlreadyExistsException" // ErrCodeImageNotFoundException for service response error code @@ -70,6 +70,25 @@ const ( // for this repository. ErrCodeLayersNotFoundException = "LayersNotFoundException" + // ErrCodeLifecyclePolicyNotFoundException for service response error code + // "LifecyclePolicyNotFoundException". + // + // The lifecycle policy could not be found, and no policy is set to the repository. + ErrCodeLifecyclePolicyNotFoundException = "LifecyclePolicyNotFoundException" + + // ErrCodeLifecyclePolicyPreviewInProgressException for service response error code + // "LifecyclePolicyPreviewInProgressException". + // + // The previous lifecycle policy preview request has not completed. Please try + // again later. + ErrCodeLifecyclePolicyPreviewInProgressException = "LifecyclePolicyPreviewInProgressException" + + // ErrCodeLifecyclePolicyPreviewNotFoundException for service response error code + // "LifecyclePolicyPreviewNotFoundException". + // + // There is no dry run for this repository. + ErrCodeLifecyclePolicyPreviewNotFoundException = "LifecyclePolicyPreviewNotFoundException" + // ErrCodeLimitExceededException for service response error code // "LimitExceededException". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go b/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go index 022c899a797..926a76bbb24 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go @@ -4143,6 +4143,14 @@ func (c *ElastiCache) RebootCacheClusterRequest(input *RebootCacheClusterInput) // // When the reboot is complete, a cache cluster event is created. // +// Rebooting a cluster is currently supported on Memcached and Redis (cluster +// mode disabled) clusters. Rebooting is not supported on Redis (cluster mode +// enabled) clusters. +// +// If you make changes to parameters that require a Redis (cluster mode enabled) +// cluster reboot for the changes to be applied, see Rebooting a Cluster (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Clusters.Rebooting.htm) +// for an alternate process. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4789,6 +4797,20 @@ func (s *AvailabilityZone) SetName(v string) *AvailabilityZone { type CacheCluster struct { _ struct{} `type:"structure"` + // A flag that enables encryption at-rest when set to true. + // + // You cannot modify the value of AtRestEncryptionEnabled after the cluster + // is created. To enable at-rest encryption on a cluster you must set AtRestEncryptionEnabled + // to true when you create a cluster. + // + // Default: false + AtRestEncryptionEnabled *bool `type:"boolean"` + + // A flag that enables using an AuthToken (password) when issuing Redis commands. + // + // Default: false + AuthTokenEnabled *bool `type:"boolean"` + // This parameter is currently disabled. AutoMinorVersionUpgrade *bool `type:"boolean"` @@ -4806,38 +4828,61 @@ type CacheCluster struct { // The name of the compute and memory capacity node type for the cache cluster. // - // Valid node types are as follows: + // The following node types are supported by ElastiCache. Generally speaking, + // the current generation types provide more memory and computational power + // at lower cost when compared to their equivalent previous generation counterparts. // // * General purpose: // - // Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, - // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge, cache.m4.large, cache.m4.xlarge, - // cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + // Current generation: + // + // T2 node types:cache.t2.micro, cache.t2.small, cache.t2.medium + // + // M3 node types:cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // + // M4 node types:cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, + // cache.m4.10xlarge // - // Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, - // cache.m1.xlarge + // Previous generation: (not recommended) // - // * Compute optimized: cache.c1.xlarge + // T1 node types:cache.t1.micro + // + // M1 node types:cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge + // + // * Compute optimized: + // + // Previous generation: (not recommended) + // + // C1 node types:cache.c1.xlarge // // * Memory optimized: // - // Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // Current generation: + // + // R3 node types:cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, // cache.r3.8xlarge // - // Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // Previous generation: (not recommended) + // + // M2 node types:cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge // // Notes: // // * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon // VPC). // - // * Redis backup/restore is not supported for Redis (cluster mode disabled) - // T1 and T2 instances. Backup/restore is supported on Redis (cluster mode - // enabled) T2 instances. + // * Redis (cluster mode disabled): Redis backup/restore is not supported + // on T1 and T2 instances. + // + // * Redis (cluster mode enabled): Backup/restore is not supported on T1 + // instances. // // * Redis Append-only files (AOF) functionality is not supported for T1 // or T2 instances. // + // Supported node types are available in all regions except as noted in the + // following table. + // // For a complete listing of node types and specifications, see Amazon ElastiCache // Product Features and Details (http://aws.amazon.com/elasticache/details) // and either Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific) @@ -4937,6 +4982,15 @@ type CacheCluster struct { // // Example: 05:00-09:00 SnapshotWindow *string `type:"string"` + + // A flag that enables in-transit encryption when set to true. + // + // You cannot modify the value of TransitEncryptionEnabled after the cluster + // is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled + // to true when you create a cluster. + // + // Default: false + TransitEncryptionEnabled *bool `type:"boolean"` } // String returns the string representation @@ -4949,6 +5003,18 @@ func (s CacheCluster) GoString() string { return s.String() } +// SetAtRestEncryptionEnabled sets the AtRestEncryptionEnabled field's value. +func (s *CacheCluster) SetAtRestEncryptionEnabled(v bool) *CacheCluster { + s.AtRestEncryptionEnabled = &v + return s +} + +// SetAuthTokenEnabled sets the AuthTokenEnabled field's value. +func (s *CacheCluster) SetAuthTokenEnabled(v bool) *CacheCluster { + s.AuthTokenEnabled = &v + return s +} + // SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value. func (s *CacheCluster) SetAutoMinorVersionUpgrade(v bool) *CacheCluster { s.AutoMinorVersionUpgrade = &v @@ -5081,6 +5147,12 @@ func (s *CacheCluster) SetSnapshotWindow(v string) *CacheCluster { return s } +// SetTransitEncryptionEnabled sets the TransitEncryptionEnabled field's value. +func (s *CacheCluster) SetTransitEncryptionEnabled(v bool) *CacheCluster { + s.TransitEncryptionEnabled = &v + return s +} + // Provides all of the details about a particular cache engine version. // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CacheEngineVersion type CacheEngineVersion struct { @@ -5148,38 +5220,61 @@ func (s *CacheEngineVersion) SetEngineVersion(v string) *CacheEngineVersion { // runs its own instance of the cluster's protocol-compliant caching software // - either Memcached or Redis. // -// Valid node types are as follows: +// The following node types are supported by ElastiCache. Generally speaking, +// the current generation types provide more memory and computational power +// at lower cost when compared to their equivalent previous generation counterparts. // // * General purpose: // -// Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, -// cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge, cache.m4.large, cache.m4.xlarge, -// cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge +// Current generation: +// +// T2 node types:cache.t2.micro, cache.t2.small, cache.t2.medium // -// Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, -// cache.m1.xlarge +// M3 node types:cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge // -// * Compute optimized: cache.c1.xlarge +// M4 node types:cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, +// cache.m4.10xlarge +// +// Previous generation: (not recommended) +// +// T1 node types:cache.t1.micro +// +// M1 node types:cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge +// +// * Compute optimized: +// +// Previous generation: (not recommended) +// +// C1 node types:cache.c1.xlarge // // * Memory optimized: // -// Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, +// Current generation: +// +// R3 node types:cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, // cache.r3.8xlarge // -// Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge +// Previous generation: (not recommended) +// +// M2 node types:cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge // // Notes: // // * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon // VPC). // -// * Redis backup/restore is not supported for Redis (cluster mode disabled) -// T1 and T2 instances. Backup/restore is supported on Redis (cluster mode -// enabled) T2 instances. +// * Redis (cluster mode disabled): Redis backup/restore is not supported +// on T1 and T2 instances. +// +// * Redis (cluster mode enabled): Backup/restore is not supported on T1 +// instances. // // * Redis Append-only files (AOF) functionality is not supported for T1 // or T2 instances. // +// Supported node types are available in all regions except as noted in the +// following table. +// // For a complete listing of node types and specifications, see Amazon ElastiCache // Product Features and Details (http://aws.amazon.com/elasticache/details) // and either Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific) @@ -5797,10 +5892,10 @@ type CreateCacheClusterInput struct { // // * Must be at least 16 characters and no more than 128 characters in length. // - // * Cannot contain any of the following characters: '/', '"', or "@". + // * Cannot contain any of the following characters: '/', '"', or '@'. // // For more information, see AUTH password (http://redis.io/commands/AUTH) at - // Redis. + // http://redis.io/commands/AUTH. AuthToken *string `type:"string"` // This parameter is currently disabled. @@ -5822,38 +5917,61 @@ type CreateCacheClusterInput struct { // The compute and memory capacity of the nodes in the node group (shard). // - // Valid node types are as follows: + // The following node types are supported by ElastiCache. Generally speaking, + // the current generation types provide more memory and computational power + // at lower cost when compared to their equivalent previous generation counterparts. // // * General purpose: // - // Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, - // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge, cache.m4.large, cache.m4.xlarge, - // cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + // Current generation: + // + // T2 node types:cache.t2.micro, cache.t2.small, cache.t2.medium // - // Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, - // cache.m1.xlarge + // M3 node types:cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge // - // * Compute optimized: cache.c1.xlarge + // M4 node types:cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, + // cache.m4.10xlarge + // + // Previous generation: (not recommended) + // + // T1 node types:cache.t1.micro + // + // M1 node types:cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge + // + // * Compute optimized: + // + // Previous generation: (not recommended) + // + // C1 node types:cache.c1.xlarge // // * Memory optimized: // - // Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // Current generation: + // + // R3 node types:cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, // cache.r3.8xlarge // - // Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // Previous generation: (not recommended) + // + // M2 node types:cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge // // Notes: // // * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon // VPC). // - // * Redis backup/restore is not supported for Redis (cluster mode disabled) - // T1 and T2 instances. Backup/restore is supported on Redis (cluster mode - // enabled) T2 instances. + // * Redis (cluster mode disabled): Redis backup/restore is not supported + // on T1 and T2 instances. + // + // * Redis (cluster mode enabled): Backup/restore is not supported on T1 + // instances. // // * Redis Append-only files (AOF) functionality is not supported for T1 // or T2 instances. // + // Supported node types are available in all regions except as noted in the + // following table. + // // For a complete listing of node types and specifications, see Amazon ElastiCache // Product Features and Details (http://aws.amazon.com/elasticache/details) // and either Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific) @@ -6027,11 +6145,10 @@ type CreateCacheClusterInput struct { // If you do not specify this parameter, ElastiCache automatically chooses an // appropriate time range. // - // Note: This parameter is only valid if the Engine parameter is redis. + // This parameter is only valid if the Engine parameter is redis. SnapshotWindow *string `type:"string"` - // A list of cost allocation tags to be added to this resource. A tag is a key-value - // pair. A tag key must be accompanied by a tag value. + // A list of cost allocation tags to be added to this resource. Tags []*Tag `locationNameList:"Tag" type:"list"` } @@ -6510,18 +6627,38 @@ func (s *CreateCacheSubnetGroupOutput) SetCacheSubnetGroup(v *CacheSubnetGroup) type CreateReplicationGroupInput struct { _ struct{} `type:"structure"` + // A flag that enables encryption at rest when set to true. + // + // You cannot modify the value of AtRestEncryptionEnabled after the replication + // group is created. To enable encryption at rest on a replication group you + // must set AtRestEncryptionEnabled to true when you create the replication + // group. + // + // This parameter is valid only if the Engine parameter is redis and the cluster + // is being created in an Amazon VPC. + // + // Default: false + AtRestEncryptionEnabled *bool `type:"boolean"` + // Reserved parameter. The password used to access a password protected server. // + // This parameter is valid only if: + // + // * The parameter TransitEncryptionEnabled was set to true when the cluster + // was created. + // + // * The line requirepass was added to the database configuration file. + // // Password constraints: // // * Must be only printable ASCII characters. // // * Must be at least 16 characters and no more than 128 characters in length. // - // * Cannot contain any of the following characters: '/', '"', or "@". + // * Cannot contain any of the following characters: '/', '"', or '@'. // // For more information, see AUTH password (http://redis.io/commands/AUTH) at - // Redis. + // http://redis.io/commands/AUTH. AuthToken *string `type:"string"` // This parameter is currently disabled. @@ -6538,49 +6675,73 @@ type CreateReplicationGroupInput struct { // // Default: false // - // ElastiCache Multi-AZ replication groups is not supported on: + // Amazon ElastiCache for Redis does not support Multi-AZ with automatic failover + // on: // - // Redis versions earlier than 2.8.6. + // * Redis versions earlier than 2.8.6. // - // Redis (cluster mode disabled): T1 and T2 node types. + // * Redis (cluster mode disabled): T1 and T2 cache node types. // - // Redis (cluster mode enabled): T2 node types. + // * Redis (cluster mode enabled): T1 node types. AutomaticFailoverEnabled *bool `type:"boolean"` // The compute and memory capacity of the nodes in the node group (shard). // - // Valid node types are as follows: + // The following node types are supported by ElastiCache. Generally speaking, + // the current generation types provide more memory and computational power + // at lower cost when compared to their equivalent previous generation counterparts. // // * General purpose: // - // Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, - // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge, cache.m4.large, cache.m4.xlarge, - // cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + // Current generation: // - // Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, - // cache.m1.xlarge + // T2 node types:cache.t2.micro, cache.t2.small, cache.t2.medium // - // * Compute optimized: cache.c1.xlarge + // M3 node types:cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // + // M4 node types:cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, + // cache.m4.10xlarge + // + // Previous generation: (not recommended) + // + // T1 node types:cache.t1.micro + // + // M1 node types:cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge + // + // * Compute optimized: + // + // Previous generation: (not recommended) + // + // C1 node types:cache.c1.xlarge // // * Memory optimized: // - // Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // Current generation: + // + // R3 node types:cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, // cache.r3.8xlarge // - // Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // Previous generation: (not recommended) + // + // M2 node types:cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge // // Notes: // // * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon // VPC). // - // * Redis backup/restore is not supported for Redis (cluster mode disabled) - // T1 and T2 instances. Backup/restore is supported on Redis (cluster mode - // enabled) T2 instances. + // * Redis (cluster mode disabled): Redis backup/restore is not supported + // on T1 and T2 instances. + // + // * Redis (cluster mode enabled): Backup/restore is not supported on T1 + // instances. // // * Redis Append-only files (AOF) functionality is not supported for T1 // or T2 instances. // + // Supported node types are available in all regions except as noted in the + // following table. + // // For a complete listing of node types and specifications, see Amazon ElastiCache // Product Features and Details (http://aws.amazon.com/elasticache/details) // and either Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific) @@ -6753,24 +6914,18 @@ type CreateReplicationGroupInput struct { // of node groups configured by NodeGroupConfiguration regardless of the number // of ARNs specified here. // - // This parameter is only valid if the Engine parameter is redis. - // // Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb SnapshotArns []*string `locationNameList:"SnapshotArn" type:"list"` // The name of a snapshot from which to restore data into the new replication // group. The snapshot status changes to restoring while the new replication // group is being created. - // - // This parameter is only valid if the Engine parameter is redis. SnapshotName *string `type:"string"` // The number of days for which ElastiCache retains automatic snapshots before // deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot // that was taken today is retained for 5 days before being deleted. // - // This parameter is only valid if the Engine parameter is redis. - // // Default: 0 (i.e., automatic backups are disabled for this cache cluster). SnapshotRetentionLimit *int64 `type:"integer"` @@ -6781,13 +6936,26 @@ type CreateReplicationGroupInput struct { // // If you do not specify this parameter, ElastiCache automatically chooses an // appropriate time range. - // - // This parameter is only valid if the Engine parameter is redis. SnapshotWindow *string `type:"string"` // A list of cost allocation tags to be added to this resource. A tag is a key-value - // pair. A tag key must be accompanied by a tag value. + // pair. Tags []*Tag `locationNameList:"Tag" type:"list"` + + // A flag that enables in-transit encryption when set to true. + // + // You cannot modify the value of TransitEncryptionEnabled after the cluster + // is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled + // to true when you create a cluster. + // + // This parameter is valid only if the Engine parameter is redis, the EngineVersion + // parameter is 3.2.4 or later, and the cluster is being created in an Amazon + // VPC. + // + // If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup. + // + // Default: false + TransitEncryptionEnabled *bool `type:"boolean"` } // String returns the string representation @@ -6816,6 +6984,12 @@ func (s *CreateReplicationGroupInput) Validate() error { return nil } +// SetAtRestEncryptionEnabled sets the AtRestEncryptionEnabled field's value. +func (s *CreateReplicationGroupInput) SetAtRestEncryptionEnabled(v bool) *CreateReplicationGroupInput { + s.AtRestEncryptionEnabled = &v + return s +} + // SetAuthToken sets the AuthToken field's value. func (s *CreateReplicationGroupInput) SetAuthToken(v string) *CreateReplicationGroupInput { s.AuthToken = &v @@ -6972,6 +7146,12 @@ func (s *CreateReplicationGroupInput) SetTags(v []*Tag) *CreateReplicationGroupI return s } +// SetTransitEncryptionEnabled sets the TransitEncryptionEnabled field's value. +func (s *CreateReplicationGroupInput) SetTransitEncryptionEnabled(v bool) *CreateReplicationGroupInput { + s.TransitEncryptionEnabled = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateReplicationGroupResult type CreateReplicationGroupOutput struct { _ struct{} `type:"structure"` @@ -8415,38 +8595,61 @@ type DescribeReservedCacheNodesInput struct { // The cache node type filter value. Use this parameter to show only those reservations // matching the specified cache node type. // - // Valid node types are as follows: + // The following node types are supported by ElastiCache. Generally speaking, + // the current generation types provide more memory and computational power + // at lower cost when compared to their equivalent previous generation counterparts. // // * General purpose: // - // Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, - // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge, cache.m4.large, cache.m4.xlarge, - // cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + // Current generation: + // + // T2 node types:cache.t2.micro, cache.t2.small, cache.t2.medium + // + // M3 node types:cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge // - // Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, - // cache.m1.xlarge + // M4 node types:cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, + // cache.m4.10xlarge // - // * Compute optimized: cache.c1.xlarge + // Previous generation: (not recommended) + // + // T1 node types:cache.t1.micro + // + // M1 node types:cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge + // + // * Compute optimized: + // + // Previous generation: (not recommended) + // + // C1 node types:cache.c1.xlarge // // * Memory optimized: // - // Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // Current generation: + // + // R3 node types:cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, // cache.r3.8xlarge // - // Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // Previous generation: (not recommended) + // + // M2 node types:cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge // // Notes: // // * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon // VPC). // - // * Redis backup/restore is not supported for Redis (cluster mode disabled) - // T1 and T2 instances. Backup/restore is supported on Redis (cluster mode - // enabled) T2 instances. + // * Redis (cluster mode disabled): Redis backup/restore is not supported + // on T1 and T2 instances. + // + // * Redis (cluster mode enabled): Backup/restore is not supported on T1 + // instances. // // * Redis Append-only files (AOF) functionality is not supported for T1 // or T2 instances. // + // Supported node types are available in all regions except as noted in the + // following table. + // // For a complete listing of node types and specifications, see Amazon ElastiCache // Product Features and Details (http://aws.amazon.com/elasticache/details) // and either Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific) @@ -8558,38 +8761,61 @@ type DescribeReservedCacheNodesOfferingsInput struct { // The cache node type filter value. Use this parameter to show only the available // offerings matching the specified cache node type. // - // Valid node types are as follows: + // The following node types are supported by ElastiCache. Generally speaking, + // the current generation types provide more memory and computational power + // at lower cost when compared to their equivalent previous generation counterparts. // // * General purpose: // - // Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, - // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge, cache.m4.large, cache.m4.xlarge, - // cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + // Current generation: + // + // T2 node types:cache.t2.micro, cache.t2.small, cache.t2.medium + // + // M3 node types:cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge // - // Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, - // cache.m1.xlarge + // M4 node types:cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, + // cache.m4.10xlarge // - // * Compute optimized: cache.c1.xlarge + // Previous generation: (not recommended) + // + // T1 node types:cache.t1.micro + // + // M1 node types:cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge + // + // * Compute optimized: + // + // Previous generation: (not recommended) + // + // C1 node types:cache.c1.xlarge // // * Memory optimized: // - // Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // Current generation: + // + // R3 node types:cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, // cache.r3.8xlarge // - // Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // Previous generation: (not recommended) + // + // M2 node types:cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge // // Notes: // // * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon // VPC). // - // * Redis backup/restore is not supported for Redis (cluster mode disabled) - // T1 and T2 instances. Backup/restore is supported on Redis (cluster mode - // enabled) T2 instances. + // * Redis (cluster mode disabled): Redis backup/restore is not supported + // on T1 and T2 instances. + // + // * Redis (cluster mode enabled): Backup/restore is not supported on T1 + // instances. // // * Redis Append-only files (AOF) functionality is not supported for T1 // or T2 instances. // + // Supported node types are available in all regions except as noted in the + // following table. + // // For a complete listing of node types and specifications, see Amazon ElastiCache // Product Features and Details (http://aws.amazon.com/elasticache/details) // and either Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific) @@ -9250,8 +9476,8 @@ type ModifyCacheClusterInput struct { // and the value of NumCacheNodes in the request. // // For example: If you have 3 active cache nodes, 7 pending cache nodes, and - // the number of cache nodes in this ModifyCacheCluser call is 5, you must list - // 2 (7 - 5) cache node IDs to remove. + // the number of cache nodes in this ModifyCacheCluster call is 5, you must + // list 2 (7 - 5) cache node IDs to remove. CacheNodeIdsToRemove []*string `locationNameList:"CacheNodeId" type:"list"` // A valid cache node type that you want to scale this cache cluster up to. @@ -9763,13 +9989,14 @@ type ModifyReplicationGroupInput struct { // // Valid values: true | false // - // ElastiCache Multi-AZ replication groups are not supported on: + // Amazon ElastiCache for Redis does not support Multi-AZ with automatic failover + // on: // - // Redis versions earlier than 2.8.6. + // * Redis versions earlier than 2.8.6. // - // Redis (cluster mode disabled):T1 and T2 cache node types. + // * Redis (cluster mode disabled): T1 and T2 cache node types. // - // Redis (cluster mode enabled): T1 node types. + // * Redis (cluster mode enabled): T1 node types. AutomaticFailoverEnabled *bool `type:"boolean"` // A valid cache node type that you want to scale this replication group to. @@ -10811,15 +11038,31 @@ func (s *RemoveTagsFromResourceInput) SetTagKeys(v []*string) *RemoveTagsFromRes type ReplicationGroup struct { _ struct{} `type:"structure"` - // Indicates the status of Multi-AZ for this replication group. + // A flag that enables encryption at-rest when set to true. + // + // You cannot modify the value of AtRestEncryptionEnabled after the cluster + // is created. To enable encryption at-rest on a cluster you must set AtRestEncryptionEnabled + // to true when you create a cluster. + // + // Default: false + AtRestEncryptionEnabled *bool `type:"boolean"` + + // A flag that enables using an AuthToken (password) when issuing Redis commands. + // + // Default: false + AuthTokenEnabled *bool `type:"boolean"` + + // Indicates the status of Multi-AZ with automatic failover for this Redis replication + // group. // - // ElastiCache Multi-AZ replication groups are not supported on: + // Amazon ElastiCache for Redis does not support Multi-AZ with automatic failover + // on: // - // Redis versions earlier than 2.8.6. + // * Redis versions earlier than 2.8.6. // - // Redis (cluster mode disabled):T1 and T2 cache node types. + // * Redis (cluster mode disabled): T1 and T2 cache node types. // - // Redis (cluster mode enabled): T1 node types. + // * Redis (cluster mode enabled): T1 node types. AutomaticFailover *string `type:"string" enum:"AutomaticFailoverStatus"` // The name of the compute and memory capacity node type for each node in the @@ -10871,7 +11114,7 @@ type ReplicationGroup struct { // If you do not specify this parameter, ElastiCache automatically chooses an // appropriate time range. // - // Note: This parameter is only valid if the Engine parameter is redis. + // This parameter is only valid if the Engine parameter is redis. SnapshotWindow *string `type:"string"` // The cache cluster ID that is used as the daily snapshot source for the replication @@ -10881,6 +11124,15 @@ type ReplicationGroup struct { // The current state of this replication group - creating, available, modifying, // deleting, create-failed, snapshotting. Status *string `type:"string"` + + // A flag that enables in-transit encryption when set to true. + // + // You cannot modify the value of TransitEncryptionEnabled after the cluster + // is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled + // to true when you create a cluster. + // + // Default: false + TransitEncryptionEnabled *bool `type:"boolean"` } // String returns the string representation @@ -10893,6 +11145,18 @@ func (s ReplicationGroup) GoString() string { return s.String() } +// SetAtRestEncryptionEnabled sets the AtRestEncryptionEnabled field's value. +func (s *ReplicationGroup) SetAtRestEncryptionEnabled(v bool) *ReplicationGroup { + s.AtRestEncryptionEnabled = &v + return s +} + +// SetAuthTokenEnabled sets the AuthTokenEnabled field's value. +func (s *ReplicationGroup) SetAuthTokenEnabled(v bool) *ReplicationGroup { + s.AuthTokenEnabled = &v + return s +} + // SetAutomaticFailover sets the AutomaticFailover field's value. func (s *ReplicationGroup) SetAutomaticFailover(v string) *ReplicationGroup { s.AutomaticFailover = &v @@ -10971,21 +11235,29 @@ func (s *ReplicationGroup) SetStatus(v string) *ReplicationGroup { return s } +// SetTransitEncryptionEnabled sets the TransitEncryptionEnabled field's value. +func (s *ReplicationGroup) SetTransitEncryptionEnabled(v bool) *ReplicationGroup { + s.TransitEncryptionEnabled = &v + return s +} + // The settings to be applied to the Redis replication group, either immediately // or during the next maintenance window. // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ReplicationGroupPendingModifiedValues type ReplicationGroupPendingModifiedValues struct { _ struct{} `type:"structure"` - // Indicates the status of Multi-AZ for this Redis replication group. + // Indicates the status of Multi-AZ with automatic failover for this Redis replication + // group. // - // ElastiCache Multi-AZ replication groups are not supported on: + // Amazon ElastiCache for Redis does not support Multi-AZ with automatic failover + // on: // - // Redis versions earlier than 2.8.6. + // * Redis versions earlier than 2.8.6. // - // Redis (cluster mode disabled):T1 and T2 cache node types. + // * Redis (cluster mode disabled): T1 and T2 cache node types. // - // Redis (cluster mode enabled): T1 node types. + // * Redis (cluster mode enabled): T1 node types. AutomaticFailoverStatus *string `type:"string" enum:"PendingAutomaticFailoverStatus"` // The primary cluster ID that is applied immediately (if --apply-immediately @@ -11025,38 +11297,61 @@ type ReservedCacheNode struct { // The cache node type for the reserved cache nodes. // - // Valid node types are as follows: + // The following node types are supported by ElastiCache. Generally speaking, + // the current generation types provide more memory and computational power + // at lower cost when compared to their equivalent previous generation counterparts. // // * General purpose: // - // Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, - // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge, cache.m4.large, cache.m4.xlarge, - // cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + // Current generation: + // + // T2 node types:cache.t2.micro, cache.t2.small, cache.t2.medium // - // Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, - // cache.m1.xlarge + // M3 node types:cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge // - // * Compute optimized: cache.c1.xlarge + // M4 node types:cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, + // cache.m4.10xlarge + // + // Previous generation: (not recommended) + // + // T1 node types:cache.t1.micro + // + // M1 node types:cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge + // + // * Compute optimized: + // + // Previous generation: (not recommended) + // + // C1 node types:cache.c1.xlarge // // * Memory optimized: // - // Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // Current generation: + // + // R3 node types:cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, // cache.r3.8xlarge // - // Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // Previous generation: (not recommended) + // + // M2 node types:cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge // // Notes: // // * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon // VPC). // - // * Redis backup/restore is not supported for Redis (cluster mode disabled) - // T1 and T2 instances. Backup/restore is supported on Redis (cluster mode - // enabled) T2 instances. + // * Redis (cluster mode disabled): Redis backup/restore is not supported + // on T1 and T2 instances. + // + // * Redis (cluster mode enabled): Backup/restore is not supported on T1 + // instances. // // * Redis Append-only files (AOF) functionality is not supported for T1 // or T2 instances. // + // Supported node types are available in all regions except as noted in the + // following table. + // // For a complete listing of node types and specifications, see Amazon ElastiCache // Product Features and Details (http://aws.amazon.com/elasticache/details) // and either Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific) @@ -11183,38 +11478,61 @@ type ReservedCacheNodesOffering struct { // The cache node type for the reserved cache node. // - // Valid node types are as follows: + // The following node types are supported by ElastiCache. Generally speaking, + // the current generation types provide more memory and computational power + // at lower cost when compared to their equivalent previous generation counterparts. // // * General purpose: // - // Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, - // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge, cache.m4.large, cache.m4.xlarge, - // cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + // Current generation: + // + // T2 node types:cache.t2.micro, cache.t2.small, cache.t2.medium // - // Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, - // cache.m1.xlarge + // M3 node types:cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge // - // * Compute optimized: cache.c1.xlarge + // M4 node types:cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, + // cache.m4.10xlarge + // + // Previous generation: (not recommended) + // + // T1 node types:cache.t1.micro + // + // M1 node types:cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge + // + // * Compute optimized: + // + // Previous generation: (not recommended) + // + // C1 node types:cache.c1.xlarge // // * Memory optimized: // - // Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // Current generation: + // + // R3 node types:cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, // cache.r3.8xlarge // - // Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // Previous generation: (not recommended) + // + // M2 node types:cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge // // Notes: // // * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon // VPC). // - // * Redis backup/restore is not supported for Redis (cluster mode disabled) - // T1 and T2 instances. Backup/restore is supported on Redis (cluster mode - // enabled) T2 instances. + // * Redis (cluster mode disabled): Redis backup/restore is not supported + // on T1 and T2 instances. + // + // * Redis (cluster mode enabled): Backup/restore is not supported on T1 + // instances. // // * Redis Append-only files (AOF) functionality is not supported for T1 // or T2 instances. // + // Supported node types are available in all regions except as noted in the + // following table. + // // For a complete listing of node types and specifications, see Amazon ElastiCache // Product Features and Details (http://aws.amazon.com/elasticache/details) // and either Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific) @@ -11510,15 +11828,17 @@ type Snapshot struct { // This parameter is currently disabled. AutoMinorVersionUpgrade *bool `type:"boolean"` - // Indicates the status of Multi-AZ for the source replication group. + // Indicates the status of Multi-AZ with automatic failover for the source Redis + // replication group. // - // ElastiCache Multi-AZ replication groups are not supported on: + // Amazon ElastiCache for Redis does not support Multi-AZ with automatic failover + // on: // - // Redis versions earlier than 2.8.6. + // * Redis versions earlier than 2.8.6. // - // Redis (cluster mode disabled):T1 and T2 cache node types. + // * Redis (cluster mode disabled): T1 and T2 cache node types. // - // Redis (cluster mode enabled): T1 node types. + // * Redis (cluster mode enabled): T1 node types. AutomaticFailover *string `type:"string" enum:"AutomaticFailoverStatus"` // The date and time when the source cache cluster was created. @@ -11530,38 +11850,61 @@ type Snapshot struct { // The name of the compute and memory capacity node type for the source cache // cluster. // - // Valid node types are as follows: + // The following node types are supported by ElastiCache. Generally speaking, + // the current generation types provide more memory and computational power + // at lower cost when compared to their equivalent previous generation counterparts. // // * General purpose: // - // Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, - // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge, cache.m4.large, cache.m4.xlarge, - // cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge + // Current generation: // - // Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, - // cache.m1.xlarge + // T2 node types:cache.t2.micro, cache.t2.small, cache.t2.medium // - // * Compute optimized: cache.c1.xlarge + // M3 node types:cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // + // M4 node types:cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, + // cache.m4.10xlarge + // + // Previous generation: (not recommended) + // + // T1 node types:cache.t1.micro + // + // M1 node types:cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge + // + // * Compute optimized: + // + // Previous generation: (not recommended) + // + // C1 node types:cache.c1.xlarge // // * Memory optimized: // - // Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // Current generation: + // + // R3 node types:cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, // cache.r3.8xlarge // - // Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // Previous generation: (not recommended) + // + // M2 node types:cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge // // Notes: // // * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon // VPC). // - // * Redis backup/restore is not supported for Redis (cluster mode disabled) - // T1 and T2 instances. Backup/restore is supported on Redis (cluster mode - // enabled) T2 instances. + // * Redis (cluster mode disabled): Redis backup/restore is not supported + // on T1 and T2 instances. + // + // * Redis (cluster mode enabled): Backup/restore is not supported on T1 + // instances. // // * Redis Append-only files (AOF) functionality is not supported for T1 // or T2 instances. // + // Supported node types are available in all regions except as noted in the + // following table. + // // For a complete listing of node types and specifications, see Amazon ElastiCache // Product Features and Details (http://aws.amazon.com/elasticache/details) // and either Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/api.go b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/api.go index 794d3e47914..565f5bec963 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/api.go @@ -2518,6 +2518,96 @@ func (c *ElasticBeanstalk) ListPlatformVersionsWithContext(ctx aws.Context, inpu return out, req.Send() } +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticbeanstalk-2010-12-01/ListTagsForResource +func (c *ElasticBeanstalk) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for AWS Elastic Beanstalk. +// +// Returns the tags applied to an AWS Elastic Beanstalk resource. The response +// contains a list of tag key-value pairs. +// +// Currently, Elastic Beanstalk only supports tagging Elastic Beanstalk environments. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Elastic Beanstalk's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInsufficientPrivilegesException "InsufficientPrivilegesException" +// The specified account does not have sufficient privileges for one of more +// AWS services. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource doesn't exist for the specified Amazon Resource Name (ARN). +// +// * ErrCodeResourceTypeNotSupportedException "ResourceTypeNotSupportedException" +// The type of the specified Amazon Resource Name (ARN) isn't supported for +// this operation. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticbeanstalk-2010-12-01/ListTagsForResource +func (c *ElasticBeanstalk) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticBeanstalk) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opRebuildEnvironment = "RebuildEnvironment" // RebuildEnvironmentRequest generates a "aws/request.Request" representing the @@ -3420,6 +3510,109 @@ func (c *ElasticBeanstalk) UpdateEnvironmentWithContext(ctx aws.Context, input * return out, req.Send() } +const opUpdateTagsForResource = "UpdateTagsForResource" + +// UpdateTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTagsForResource operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateTagsForResource for more information on using the UpdateTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateTagsForResourceRequest method. +// req, resp := client.UpdateTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticbeanstalk-2010-12-01/UpdateTagsForResource +func (c *ElasticBeanstalk) UpdateTagsForResourceRequest(input *UpdateTagsForResourceInput) (req *request.Request, output *UpdateTagsForResourceOutput) { + op := &request.Operation{ + Name: opUpdateTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateTagsForResourceInput{} + } + + output = &UpdateTagsForResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateTagsForResource API operation for AWS Elastic Beanstalk. +// +// Update the list of tags applied to an AWS Elastic Beanstalk resource. Two +// lists can be passed: TagsToAdd for tags to add or update, and TagsToRemove. +// +// Currently, Elastic Beanstalk only supports tagging of Elastic Beanstalk environments. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Elastic Beanstalk's +// API operation UpdateTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInsufficientPrivilegesException "InsufficientPrivilegesException" +// The specified account does not have sufficient privileges for one of more +// AWS services. +// +// * ErrCodeOperationInProgressException "OperationInProgressFailure" +// Unable to perform the specified operation because another operation that +// effects an element in this activity is already in progress. +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// The number of tags in the resource would exceed the number of tags that each +// resource can have. +// +// To calculate this, the operation considers both the number of tags the resource +// already has and the tags this operation would add if it succeeded. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// A resource doesn't exist for the specified Amazon Resource Name (ARN). +// +// * ErrCodeResourceTypeNotSupportedException "ResourceTypeNotSupportedException" +// The type of the specified Amazon Resource Name (ARN) isn't supported for +// this operation. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticbeanstalk-2010-12-01/UpdateTagsForResource +func (c *ElasticBeanstalk) UpdateTagsForResource(input *UpdateTagsForResourceInput) (*UpdateTagsForResourceOutput, error) { + req, out := c.UpdateTagsForResourceRequest(input) + return out, req.Send() +} + +// UpdateTagsForResourceWithContext is the same as UpdateTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticBeanstalk) UpdateTagsForResourceWithContext(ctx aws.Context, input *UpdateTagsForResourceInput, opts ...request.Option) (*UpdateTagsForResourceOutput, error) { + req, out := c.UpdateTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opValidateConfigurationSettings = "ValidateConfigurationSettings" // ValidateConfigurationSettingsRequest generates a "aws/request.Request" representing the @@ -8290,6 +8483,80 @@ func (s *ListPlatformVersionsOutput) SetPlatformSummaryList(v []*PlatformSummary return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticbeanstalk-2010-12-01/ListTagsForResourceMessage +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resouce for which a tag list is requested. + // + // Must be the ARN of an Elastic Beanstalk environment. + // + // ResourceArn is a required field + ResourceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticbeanstalk-2010-12-01/ResourceTagsDescriptionMessage +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resouce for which a tag list was requested. + ResourceArn *string `type:"string"` + + // A list of tag key-value pairs. + ResourceTags []*Tag `type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceOutput) SetResourceArn(v string) *ListTagsForResourceOutput { + s.ResourceArn = &v + return s +} + +// SetResourceTags sets the ResourceTags field's value. +func (s *ListTagsForResourceOutput) SetResourceTags(v []*Tag) *ListTagsForResourceOutput { + s.ResourceTags = v + return s +} + // Describes the properties of a Listener for the LoadBalancer. // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticbeanstalk-2010-12-01/Listener type Listener struct { @@ -10710,6 +10977,94 @@ func (s *UpdateEnvironmentInput) SetVersionLabel(v string) *UpdateEnvironmentInp return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticbeanstalk-2010-12-01/UpdateTagsForResourceMessage +type UpdateTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resouce to be updated. + // + // Must be the ARN of an Elastic Beanstalk environment. + // + // ResourceArn is a required field + ResourceArn *string `type:"string" required:"true"` + + // A list of tags to add or update. + // + // If a key of an existing tag is added, the tag's value is updated. + TagsToAdd []*Tag `type:"list"` + + // A list of tag keys to remove. + // + // If a tag key doesn't exist, it is silently ignored. + TagsToRemove []*string `type:"list"` +} + +// String returns the string representation +func (s UpdateTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.TagsToAdd != nil { + for i, v := range s.TagsToAdd { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagsToAdd", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UpdateTagsForResourceInput) SetResourceArn(v string) *UpdateTagsForResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagsToAdd sets the TagsToAdd field's value. +func (s *UpdateTagsForResourceInput) SetTagsToAdd(v []*Tag) *UpdateTagsForResourceInput { + s.TagsToAdd = v + return s +} + +// SetTagsToRemove sets the TagsToRemove field's value. +func (s *UpdateTagsForResourceInput) SetTagsToRemove(v []*string) *UpdateTagsForResourceInput { + s.TagsToRemove = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticbeanstalk-2010-12-01/UpdateTagsForResourceOutput +type UpdateTagsForResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTagsForResourceOutput) GoString() string { + return s.String() +} + // A list of validation messages for a specified configuration template. // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticbeanstalk-2010-12-01/ValidateConfigurationSettingsMessage type ValidateConfigurationSettingsInput struct { diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/errors.go b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/errors.go index 0ed1bd63358..6373e870ae7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/errors.go @@ -44,6 +44,19 @@ const ( // running on it. ErrCodePlatformVersionStillReferencedException = "PlatformVersionStillReferencedException" + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // A resource doesn't exist for the specified Amazon Resource Name (ARN). + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeResourceTypeNotSupportedException for service response error code + // "ResourceTypeNotSupportedException". + // + // The type of the specified Amazon Resource Name (ARN) isn't supported for + // this operation. + ErrCodeResourceTypeNotSupportedException = "ResourceTypeNotSupportedException" + // ErrCodeS3LocationNotInServiceRegionException for service response error code // "S3LocationNotInServiceRegionException". // @@ -112,4 +125,14 @@ const ( // You have exceeded the maximum number of allowed platforms associated with // the account. ErrCodeTooManyPlatformsException = "TooManyPlatformsException" + + // ErrCodeTooManyTagsException for service response error code + // "TooManyTagsException". + // + // The number of tags in the resource would exceed the number of tags that each + // resource can have. + // + // To calculate this, the operation considers both the number of tags the resource + // already has and the tags this operation would add if it succeeded. + ErrCodeTooManyTagsException = "TooManyTagsException" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go index 7e4e7db5fc9..2ca61fa43e5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go @@ -299,6 +299,97 @@ func (c *ElasticsearchService) DeleteElasticsearchDomainWithContext(ctx aws.Cont return out, req.Send() } +const opDeleteElasticsearchServiceRole = "DeleteElasticsearchServiceRole" + +// DeleteElasticsearchServiceRoleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteElasticsearchServiceRole operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteElasticsearchServiceRole for more information on using the DeleteElasticsearchServiceRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteElasticsearchServiceRoleRequest method. +// req, resp := client.DeleteElasticsearchServiceRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ElasticsearchService) DeleteElasticsearchServiceRoleRequest(input *DeleteElasticsearchServiceRoleInput) (req *request.Request, output *DeleteElasticsearchServiceRoleOutput) { + op := &request.Operation{ + Name: opDeleteElasticsearchServiceRole, + HTTPMethod: "DELETE", + HTTPPath: "/2015-01-01/es/role", + } + + if input == nil { + input = &DeleteElasticsearchServiceRoleInput{} + } + + output = &DeleteElasticsearchServiceRoleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteElasticsearchServiceRole API operation for Amazon Elasticsearch Service. +// +// Deletes the service-linked role that Elasticsearch Service uses to manage +// and maintain VPC domains. Role deletion will fail if any existing VPC domains +// use the role. You must delete any such Elasticsearch domains before deleting +// the role. See Deleting Elasticsearch Service Role (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html#es-enabling-slr) +// in VPC Endpoints for Amazon Elasticsearch Service Domains. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elasticsearch Service's +// API operation DeleteElasticsearchServiceRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBaseException "BaseException" +// An error occurred while processing the request. +// +// * ErrCodeInternalException "InternalException" +// The request processing has failed because of an unknown error, exception +// or failure (the failure is internal to the service) . Gives http status code +// of 500. +// +// * ErrCodeValidationException "ValidationException" +// An exception for missing / invalid input fields. Gives http status code of +// 400. +// +func (c *ElasticsearchService) DeleteElasticsearchServiceRole(input *DeleteElasticsearchServiceRoleInput) (*DeleteElasticsearchServiceRoleOutput, error) { + req, out := c.DeleteElasticsearchServiceRoleRequest(input) + return out, req.Send() +} + +// DeleteElasticsearchServiceRoleWithContext is the same as DeleteElasticsearchServiceRole with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteElasticsearchServiceRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) DeleteElasticsearchServiceRoleWithContext(ctx aws.Context, input *DeleteElasticsearchServiceRoleInput, opts ...request.Option) (*DeleteElasticsearchServiceRoleOutput, error) { + req, out := c.DeleteElasticsearchServiceRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeElasticsearchDomain = "DescribeElasticsearchDomain" // DescribeElasticsearchDomainRequest generates a "aws/request.Request" representing the @@ -1550,9 +1641,18 @@ type CreateElasticsearchDomainInput struct { // in the Amazon Elasticsearch Service Developer Guide. ElasticsearchVersion *string `type:"string"` + // Map of LogType and LogPublishingOption, each containing options to publish + // a given type of Elasticsearch log. + LogPublishingOptions map[string]*LogPublishingOption `type:"map"` + // Option to set time, in UTC format, of the daily automated snapshot. Default // value is 0 hours. SnapshotOptions *SnapshotOptions `type:"structure"` + + // Options to specify the subnets and security groups for VPC endpoint. For + // more information, see Creating a VPC (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html#es-creating-vpc) + // in VPC Endpoints for Amazon Elasticsearch Service Domains + VPCOptions *VPCOptions `type:"structure"` } // String returns the string representation @@ -1617,12 +1717,24 @@ func (s *CreateElasticsearchDomainInput) SetElasticsearchVersion(v string) *Crea return s } +// SetLogPublishingOptions sets the LogPublishingOptions field's value. +func (s *CreateElasticsearchDomainInput) SetLogPublishingOptions(v map[string]*LogPublishingOption) *CreateElasticsearchDomainInput { + s.LogPublishingOptions = v + return s +} + // SetSnapshotOptions sets the SnapshotOptions field's value. func (s *CreateElasticsearchDomainInput) SetSnapshotOptions(v *SnapshotOptions) *CreateElasticsearchDomainInput { s.SnapshotOptions = v return s } +// SetVPCOptions sets the VPCOptions field's value. +func (s *CreateElasticsearchDomainInput) SetVPCOptions(v *VPCOptions) *CreateElasticsearchDomainInput { + s.VPCOptions = v + return s +} + // The result of a CreateElasticsearchDomain operation. Contains the status // of the newly created Elasticsearch domain. type CreateElasticsearchDomainOutput struct { @@ -1717,6 +1829,34 @@ func (s *DeleteElasticsearchDomainOutput) SetDomainStatus(v *ElasticsearchDomain return s } +type DeleteElasticsearchServiceRoleInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteElasticsearchServiceRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteElasticsearchServiceRoleInput) GoString() string { + return s.String() +} + +type DeleteElasticsearchServiceRoleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteElasticsearchServiceRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteElasticsearchServiceRoleOutput) GoString() string { + return s.String() +} + // Container for the parameters to the DescribeElasticsearchDomainConfig operation. // Specifies the domain name for which you want configuration information. type DescribeElasticsearchDomainConfigInput struct { @@ -2265,8 +2405,15 @@ type ElasticsearchDomainConfig struct { // String of format X.Y to specify version for the Elasticsearch domain. ElasticsearchVersion *ElasticsearchVersionStatus `type:"structure"` + // Log publishing options for the given domain. + LogPublishingOptions *LogPublishingOptionsStatus `type:"structure"` + // Specifies the SnapshotOptions for the Elasticsearch domain. SnapshotOptions *SnapshotOptionsStatus `type:"structure"` + + // The VPCOptions for the specified domain. For more information, see VPC Endpoints + // for Amazon Elasticsearch Service Domains (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html). + VPCOptions *VPCDerivedInfoStatus `type:"structure"` } // String returns the string representation @@ -2309,12 +2456,24 @@ func (s *ElasticsearchDomainConfig) SetElasticsearchVersion(v *ElasticsearchVers return s } +// SetLogPublishingOptions sets the LogPublishingOptions field's value. +func (s *ElasticsearchDomainConfig) SetLogPublishingOptions(v *LogPublishingOptionsStatus) *ElasticsearchDomainConfig { + s.LogPublishingOptions = v + return s +} + // SetSnapshotOptions sets the SnapshotOptions field's value. func (s *ElasticsearchDomainConfig) SetSnapshotOptions(v *SnapshotOptionsStatus) *ElasticsearchDomainConfig { s.SnapshotOptions = v return s } +// SetVPCOptions sets the VPCOptions field's value. +func (s *ElasticsearchDomainConfig) SetVPCOptions(v *VPCDerivedInfoStatus) *ElasticsearchDomainConfig { + s.VPCOptions = v + return s +} + // The current status of an Elasticsearch domain. type ElasticsearchDomainStatus struct { _ struct{} `type:"structure"` @@ -2371,6 +2530,13 @@ type ElasticsearchDomainStatus struct { // requests. Endpoint *string `type:"string"` + // Map containing the Elasticsearch domain endpoints used to submit index and + // search requests. Example key, value: 'vpc','vpc-endpoint-h2dsd34efgyghrtguk5gt6j2foh4.us-east-1.es.amazonaws.com'. + Endpoints map[string]*string `type:"map"` + + // Log publishing options for the given domain. + LogPublishingOptions map[string]*LogPublishingOption `type:"map"` + // The status of the Elasticsearch domain configuration. True if Amazon Elasticsearch // Service is processing configuration changes. False if the configuration is // active. @@ -2378,6 +2544,10 @@ type ElasticsearchDomainStatus struct { // Specifies the status of the SnapshotOptions SnapshotOptions *SnapshotOptions `type:"structure"` + + // The VPCOptions for the specified domain. For more information, see VPC Endpoints + // for Amazon Elasticsearch Service Domains (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html). + VPCOptions *VPCDerivedInfo `type:"structure"` } // String returns the string representation @@ -2456,6 +2626,18 @@ func (s *ElasticsearchDomainStatus) SetEndpoint(v string) *ElasticsearchDomainSt return s } +// SetEndpoints sets the Endpoints field's value. +func (s *ElasticsearchDomainStatus) SetEndpoints(v map[string]*string) *ElasticsearchDomainStatus { + s.Endpoints = v + return s +} + +// SetLogPublishingOptions sets the LogPublishingOptions field's value. +func (s *ElasticsearchDomainStatus) SetLogPublishingOptions(v map[string]*LogPublishingOption) *ElasticsearchDomainStatus { + s.LogPublishingOptions = v + return s +} + // SetProcessing sets the Processing field's value. func (s *ElasticsearchDomainStatus) SetProcessing(v bool) *ElasticsearchDomainStatus { s.Processing = &v @@ -2468,6 +2650,12 @@ func (s *ElasticsearchDomainStatus) SetSnapshotOptions(v *SnapshotOptions) *Elas return s } +// SetVPCOptions sets the VPCOptions field's value. +func (s *ElasticsearchDomainStatus) SetVPCOptions(v *VPCDerivedInfo) *ElasticsearchDomainStatus { + s.VPCOptions = v + return s +} + // Status of the Elasticsearch version options for the specified Elasticsearch // domain. type ElasticsearchVersionStatus struct { @@ -2905,6 +3093,75 @@ func (s *ListTagsOutput) SetTagList(v []*Tag) *ListTagsOutput { return s } +// Log Publishing option that is set for given domain. Attributes and their details: CloudWatchLogsLogGroupArn: ARN of the Cloudwatch +// log group to which log needs to be published. +// Enabled: Whether the log publishing for given log type is enabled or not +type LogPublishingOption struct { + _ struct{} `type:"structure"` + + // ARN of the Cloudwatch log group to which log needs to be published. + CloudWatchLogsLogGroupArn *string `type:"string"` + + // Specifies whether given log publishing option is enabled or not. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s LogPublishingOption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogPublishingOption) GoString() string { + return s.String() +} + +// SetCloudWatchLogsLogGroupArn sets the CloudWatchLogsLogGroupArn field's value. +func (s *LogPublishingOption) SetCloudWatchLogsLogGroupArn(v string) *LogPublishingOption { + s.CloudWatchLogsLogGroupArn = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *LogPublishingOption) SetEnabled(v bool) *LogPublishingOption { + s.Enabled = &v + return s +} + +// The configured log publishing options for the domain and their current status. +type LogPublishingOptionsStatus struct { + _ struct{} `type:"structure"` + + // The log publishing options configured for the Elasticsearch domain. + Options map[string]*LogPublishingOption `type:"map"` + + // The status of the log publishing options for the Elasticsearch domain. See + // OptionStatus for the status information that's included. + Status *OptionStatus `type:"structure"` +} + +// String returns the string representation +func (s LogPublishingOptionsStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogPublishingOptionsStatus) GoString() string { + return s.String() +} + +// SetOptions sets the Options field's value. +func (s *LogPublishingOptionsStatus) SetOptions(v map[string]*LogPublishingOption) *LogPublishingOptionsStatus { + s.Options = v + return s +} + +// SetStatus sets the Status field's value. +func (s *LogPublishingOptionsStatus) SetStatus(v *OptionStatus) *LogPublishingOptionsStatus { + s.Status = v + return s +} + // Provides the current status of the entity. type OptionStatus struct { _ struct{} `type:"structure"` @@ -3282,9 +3539,18 @@ type UpdateElasticsearchDomainConfigInput struct { // The type and number of instances to instantiate for the domain cluster. ElasticsearchClusterConfig *ElasticsearchClusterConfig `type:"structure"` + // Map of LogType and LogPublishingOption, each containing options to publish + // a given type of Elasticsearch log. + LogPublishingOptions map[string]*LogPublishingOption `type:"map"` + // Option to set the time, in UTC format, for the daily automated snapshot. // Default value is 0 hours. SnapshotOptions *SnapshotOptions `type:"structure"` + + // Options to specify the subnets and security groups for VPC endpoint. For + // more information, see Creating a VPC (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html#es-creating-vpc) + // in VPC Endpoints for Amazon Elasticsearch Service Domains + VPCOptions *VPCOptions `type:"structure"` } // String returns the string representation @@ -3343,12 +3609,24 @@ func (s *UpdateElasticsearchDomainConfigInput) SetElasticsearchClusterConfig(v * return s } +// SetLogPublishingOptions sets the LogPublishingOptions field's value. +func (s *UpdateElasticsearchDomainConfigInput) SetLogPublishingOptions(v map[string]*LogPublishingOption) *UpdateElasticsearchDomainConfigInput { + s.LogPublishingOptions = v + return s +} + // SetSnapshotOptions sets the SnapshotOptions field's value. func (s *UpdateElasticsearchDomainConfigInput) SetSnapshotOptions(v *SnapshotOptions) *UpdateElasticsearchDomainConfigInput { s.SnapshotOptions = v return s } +// SetVPCOptions sets the VPCOptions field's value. +func (s *UpdateElasticsearchDomainConfigInput) SetVPCOptions(v *VPCOptions) *UpdateElasticsearchDomainConfigInput { + s.VPCOptions = v + return s +} + // The result of an UpdateElasticsearchDomain request. Contains the status of // the Elasticsearch domain being updated. type UpdateElasticsearchDomainConfigOutput struct { @@ -3376,6 +3654,133 @@ func (s *UpdateElasticsearchDomainConfigOutput) SetDomainConfig(v *Elasticsearch return s } +// Options to specify the subnets and security groups for VPC endpoint. For +// more information, see VPC Endpoints for Amazon Elasticsearch Service Domains +// (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html). +type VPCDerivedInfo struct { + _ struct{} `type:"structure"` + + // The availability zones for the Elasticsearch domain. Exists only if the domain + // was created with VPCOptions. + AvailabilityZones []*string `type:"list"` + + // Specifies the security groups for VPC endpoint. + SecurityGroupIds []*string `type:"list"` + + // Specifies the subnets for VPC endpoint. + SubnetIds []*string `type:"list"` + + // The VPC Id for the Elasticsearch domain. Exists only if the domain was created + // with VPCOptions. + VPCId *string `type:"string"` +} + +// String returns the string representation +func (s VPCDerivedInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VPCDerivedInfo) GoString() string { + return s.String() +} + +// SetAvailabilityZones sets the AvailabilityZones field's value. +func (s *VPCDerivedInfo) SetAvailabilityZones(v []*string) *VPCDerivedInfo { + s.AvailabilityZones = v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *VPCDerivedInfo) SetSecurityGroupIds(v []*string) *VPCDerivedInfo { + s.SecurityGroupIds = v + return s +} + +// SetSubnetIds sets the SubnetIds field's value. +func (s *VPCDerivedInfo) SetSubnetIds(v []*string) *VPCDerivedInfo { + s.SubnetIds = v + return s +} + +// SetVPCId sets the VPCId field's value. +func (s *VPCDerivedInfo) SetVPCId(v string) *VPCDerivedInfo { + s.VPCId = &v + return s +} + +// Status of the VPC options for the specified Elasticsearch domain. +type VPCDerivedInfoStatus struct { + _ struct{} `type:"structure"` + + // Specifies the VPC options for the specified Elasticsearch domain. + // + // Options is a required field + Options *VPCDerivedInfo `type:"structure" required:"true"` + + // Specifies the status of the VPC options for the specified Elasticsearch domain. + // + // Status is a required field + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s VPCDerivedInfoStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VPCDerivedInfoStatus) GoString() string { + return s.String() +} + +// SetOptions sets the Options field's value. +func (s *VPCDerivedInfoStatus) SetOptions(v *VPCDerivedInfo) *VPCDerivedInfoStatus { + s.Options = v + return s +} + +// SetStatus sets the Status field's value. +func (s *VPCDerivedInfoStatus) SetStatus(v *OptionStatus) *VPCDerivedInfoStatus { + s.Status = v + return s +} + +// Options to specify the subnets and security groups for VPC endpoint. For +// more information, see VPC Endpoints for Amazon Elasticsearch Service Domains +// (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html). +type VPCOptions struct { + _ struct{} `type:"structure"` + + // Specifies the security groups for VPC endpoint. + SecurityGroupIds []*string `type:"list"` + + // Specifies the subnets for VPC endpoint. + SubnetIds []*string `type:"list"` +} + +// String returns the string representation +func (s VPCOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VPCOptions) GoString() string { + return s.String() +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *VPCOptions) SetSecurityGroupIds(v []*string) *VPCOptions { + s.SecurityGroupIds = v + return s +} + +// SetSubnetIds sets the SubnetIds field's value. +func (s *VPCOptions) SetSubnetIds(v []*string) *VPCOptions { + s.SubnetIds = v + return s +} + const ( // ESPartitionInstanceTypeM3MediumElasticsearch is a ESPartitionInstanceType enum value ESPartitionInstanceTypeM3MediumElasticsearch = "m3.medium.elasticsearch" @@ -3478,6 +3883,37 @@ const ( // ESPartitionInstanceTypeR416xlargeElasticsearch is a ESPartitionInstanceType enum value ESPartitionInstanceTypeR416xlargeElasticsearch = "r4.16xlarge.elasticsearch" + + // ESPartitionInstanceTypeI3LargeElasticsearch is a ESPartitionInstanceType enum value + ESPartitionInstanceTypeI3LargeElasticsearch = "i3.large.elasticsearch" + + // ESPartitionInstanceTypeI3XlargeElasticsearch is a ESPartitionInstanceType enum value + ESPartitionInstanceTypeI3XlargeElasticsearch = "i3.xlarge.elasticsearch" + + // ESPartitionInstanceTypeI32xlargeElasticsearch is a ESPartitionInstanceType enum value + ESPartitionInstanceTypeI32xlargeElasticsearch = "i3.2xlarge.elasticsearch" + + // ESPartitionInstanceTypeI34xlargeElasticsearch is a ESPartitionInstanceType enum value + ESPartitionInstanceTypeI34xlargeElasticsearch = "i3.4xlarge.elasticsearch" + + // ESPartitionInstanceTypeI38xlargeElasticsearch is a ESPartitionInstanceType enum value + ESPartitionInstanceTypeI38xlargeElasticsearch = "i3.8xlarge.elasticsearch" + + // ESPartitionInstanceTypeI316xlargeElasticsearch is a ESPartitionInstanceType enum value + ESPartitionInstanceTypeI316xlargeElasticsearch = "i3.16xlarge.elasticsearch" +) + +// Type of Log File, it can be one of the following: INDEX_SLOW_LOGS: Index +// slow logs contains insert requests that took more time than configured index +// query log threshold to execute. +// SEARCH_SLOW_LOGS: Search slow logs contains search queries that took more +// time than configured search query log threshold to execute. +const ( + // LogTypeIndexSlowLogs is a LogType enum value + LogTypeIndexSlowLogs = "INDEX_SLOW_LOGS" + + // LogTypeSearchSlowLogs is a LogType enum value + LogTypeSearchSlowLogs = "SEARCH_SLOW_LOGS" ) // The state of a requested change. One of the following: diff --git a/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go b/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go index 57abd6ff6b6..a8ecd8e1f5d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go @@ -5222,13 +5222,19 @@ type Blueprint struct { // The end-user license agreement URL for the image or blueprint. LicenseUrl *string `locationName:"licenseUrl" type:"string"` - // The minimum machine size required to run this blueprint. 0 indicates that - // the blueprint runs on all instances. + // The minimum bundle power required to run this blueprint. For example, you + // need a bundle with a power value of 500 or more to create an instance that + // uses a blueprint with a minimum power value of 500. 0 indicates that the + // blueprint runs on all instance sizes. MinPower *int64 `locationName:"minPower" type:"integer"` // The friendly name of the blueprint (e.g., Amazon Linux). Name *string `locationName:"name" type:"string"` + // The operating system platform (either Linux/Unix-based or Windows Server-based) + // of the blueprint. + Platform *string `locationName:"platform" type:"string" enum:"InstancePlatform"` + // The product URL to learn more about the image or blueprint. ProductUrl *string `locationName:"productUrl" type:"string"` @@ -5295,6 +5301,12 @@ func (s *Blueprint) SetName(v string) *Blueprint { return s } +// SetPlatform sets the Platform field's value. +func (s *Blueprint) SetPlatform(v string) *Blueprint { + s.Platform = &v + return s +} + // SetProductUrl sets the ProductUrl field's value. func (s *Blueprint) SetProductUrl(v string) *Blueprint { s.ProductUrl = &v @@ -5343,7 +5355,11 @@ type Bundle struct { // A friendly name for the bundle (e.g., Micro). Name *string `locationName:"name" type:"string"` - // The power of the bundle (e.g., 500). + // A numeric value that represents the power of the bundle (e.g., 500). You + // can use the bundle's power value in conjunction with a blueprint's minimum + // power value to determine whether the blueprint will run on the bundle. For + // example, you need a bundle with a power value of 500 or more to create an + // instance that uses a blueprint with a minimum power value of 500. Power *int64 `locationName:"power" type:"integer"` // The price in US dollars (e.g., 5.0). @@ -5352,6 +5368,12 @@ type Bundle struct { // The amount of RAM in GB (e.g., 2.0). RamSizeInGb *float64 `locationName:"ramSizeInGb" type:"float"` + // The operating system platform (Linux/Unix-based or Windows Server-based) + // that the bundle supports. You can only launch a WINDOWS bundle on a blueprint + // that supports the WINDOWS platform. LINUX_UNIX blueprints require a LINUX_UNIX + // bundle. + SupportedPlatforms []*string `locationName:"supportedPlatforms" type:"list"` + // The data transfer rate per month in GB (e.g., 2000). TransferPerMonthInGb *int64 `locationName:"transferPerMonthInGb" type:"integer"` } @@ -5420,6 +5442,12 @@ func (s *Bundle) SetRamSizeInGb(v float64) *Bundle { return s } +// SetSupportedPlatforms sets the SupportedPlatforms field's value. +func (s *Bundle) SetSupportedPlatforms(v []*string) *Bundle { + s.SupportedPlatforms = v + return s +} + // SetTransferPerMonthInGb sets the TransferPerMonthInGb field's value. func (s *Bundle) SetTransferPerMonthInGb(v int64) *Bundle { s.TransferPerMonthInGb = &v @@ -5913,7 +5941,7 @@ type CreateInstancesInput struct { // Depending on the machine image you choose, the command to get software on // your instance varies. Amazon Linux and CentOS use yum, Debian and Ubuntu // use apt-get, and FreeBSD uses pkg. For a complete list, see the Dev Guide - // (http://lightsail.aws.amazon.com/ls/docs/getting-started/articles/pre-installed-apps). + // (https://lightsail.aws.amazon.com/ls/docs/getting-started/article/compare-options-choose-lightsail-instance-image). UserData *string `locationName:"userData" type:"string"` } @@ -8562,9 +8590,26 @@ type InstanceAccessDetails struct { // The public IP address of the Amazon Lightsail instance. IpAddress *string `locationName:"ipAddress" type:"string"` - // For RDP access, the temporary password of the Amazon EC2 instance. + // For RDP access, the password for your Amazon Lightsail instance. Password + // will be an empty string if the password for your new instance is not ready + // yet. When you create an instance, it can take up to 15 minutes for the instance + // to be ready. + // + // If you create an instance using any key pair other than the default (LightsailDefaultKeyPair), + // password will always be an empty string. + // + // If you change the Administrator password on the instance, Lightsail will + // continue to return the original password value. When accessing the instance + // using RDP, you need to manually enter the Administrator password after changing + // it from the default. Password *string `locationName:"password" type:"string"` + // For a Windows Server-based instance, an object with the data you can use + // to retrieve your password. This is only needed if password is empty and the + // instance is not new (and therefore the password is not ready yet). When you + // create an instance, it can take up to 15 minutes for the instance to be ready. + PasswordData *PasswordData `locationName:"passwordData" type:"structure"` + // For SSH access, the temporary private key. For OpenSSH clients (e.g., command // line SSH), you should save this value to tempkey). PrivateKey *string `locationName:"privateKey" type:"string"` @@ -8616,6 +8661,12 @@ func (s *InstanceAccessDetails) SetPassword(v string) *InstanceAccessDetails { return s } +// SetPasswordData sets the PasswordData field's value. +func (s *InstanceAccessDetails) SetPasswordData(v *PasswordData) *InstanceAccessDetails { + s.PasswordData = v + return s +} + // SetPrivateKey sets the PrivateKey field's value. func (s *InstanceAccessDetails) SetPrivateKey(v string) *InstanceAccessDetails { s.PrivateKey = &v @@ -9475,6 +9526,59 @@ func (s *Operation) SetStatusChangedAt(v time.Time) *Operation { return s } +// The password data for the Windows Server-based instance, including the ciphertext +// and the key pair name. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/PasswordData +type PasswordData struct { + _ struct{} `type:"structure"` + + // The encrypted password. Ciphertext will be an empty string if access to your + // new instance is not ready yet. When you create an instance, it can take up + // to 15 minutes for the instance to be ready. + // + // If you use the default key pair (LightsailDefaultKeyPair), the decrypted + // password will be available in the password field. + // + // If you are using a custom key pair, you need to use your own means of decryption. + // + // If you change the Administrator password on the instance, Lightsail will + // continue to return the original ciphertext value. When accessing the instance + // using RDP, you need to manually enter the Administrator password after changing + // it from the default. + Ciphertext *string `locationName:"ciphertext" type:"string"` + + // The name of the key pair that you used when creating your instance. If no + // key pair name was specified when creating the instance, Lightsail uses the + // default key pair (LightsailDefaultKeyPair). + // + // If you are using a custom key pair, you need to use your own means of decrypting + // your password using the ciphertext. Lightsail creates the ciphertext by encrypting + // your password with the public key part of this key pair. + KeyPairName *string `locationName:"keyPairName" type:"string"` +} + +// String returns the string representation +func (s PasswordData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PasswordData) GoString() string { + return s.String() +} + +// SetCiphertext sets the Ciphertext field's value. +func (s *PasswordData) SetCiphertext(v string) *PasswordData { + s.Ciphertext = &v + return s +} + +// SetKeyPairName sets the KeyPairName field's value. +func (s *PasswordData) SetKeyPairName(v string) *PasswordData { + s.KeyPairName = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/PeerVpcRequest type PeerVpcInput struct { _ struct{} `type:"structure"` @@ -10242,6 +10346,14 @@ const ( InstanceMetricNameStatusCheckFailedSystem = "StatusCheckFailed_System" ) +const ( + // InstancePlatformLinuxUnix is a InstancePlatform enum value + InstancePlatformLinuxUnix = "LINUX_UNIX" + + // InstancePlatformWindows is a InstancePlatform enum value + InstancePlatformWindows = "WINDOWS" +) + const ( // InstanceSnapshotStatePending is a InstanceSnapshotState enum value InstanceSnapshotStatePending = "pending" diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/api.go b/vendor/github.com/aws/aws-sdk-go/service/rds/api.go index afb4d200bbd..cefb70a4d3f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/rds/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/api.go @@ -6214,6 +6214,90 @@ func (c *RDS) DescribeSourceRegionsWithContext(ctx aws.Context, input *DescribeS return out, req.Send() } +const opDescribeValidDBInstanceModifications = "DescribeValidDBInstanceModifications" + +// DescribeValidDBInstanceModificationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeValidDBInstanceModifications operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeValidDBInstanceModifications for more information on using the DescribeValidDBInstanceModifications +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeValidDBInstanceModificationsRequest method. +// req, resp := client.DescribeValidDBInstanceModificationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DescribeValidDBInstanceModifications +func (c *RDS) DescribeValidDBInstanceModificationsRequest(input *DescribeValidDBInstanceModificationsInput) (req *request.Request, output *DescribeValidDBInstanceModificationsOutput) { + op := &request.Operation{ + Name: opDescribeValidDBInstanceModifications, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeValidDBInstanceModificationsInput{} + } + + output = &DescribeValidDBInstanceModificationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeValidDBInstanceModifications API operation for Amazon Relational Database Service. +// +// You can call DescribeValidDBInstanceModifications to learn what modifications +// you can make to your DB instance. You can use this information when you call +// ModifyDBInstance. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation DescribeValidDBInstanceModifications for usage and error information. +// +// Returned Error Codes: +// * ErrCodeDBInstanceNotFoundFault "DBInstanceNotFound" +// DBInstanceIdentifier does not refer to an existing DB instance. +// +// * ErrCodeInvalidDBInstanceStateFault "InvalidDBInstanceState" +// The specified DB instance is not in the available state. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DescribeValidDBInstanceModifications +func (c *RDS) DescribeValidDBInstanceModifications(input *DescribeValidDBInstanceModificationsInput) (*DescribeValidDBInstanceModificationsOutput, error) { + req, out := c.DescribeValidDBInstanceModificationsRequest(input) + return out, req.Send() +} + +// DescribeValidDBInstanceModificationsWithContext is the same as DescribeValidDBInstanceModifications with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeValidDBInstanceModifications for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DescribeValidDBInstanceModificationsWithContext(ctx aws.Context, input *DescribeValidDBInstanceModificationsInput, opts ...request.Option) (*DescribeValidDBInstanceModificationsOutput, error) { + req, out := c.DescribeValidDBInstanceModificationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDownloadDBLogFilePortion = "DownloadDBLogFilePortion" // DownloadDBLogFilePortionRequest generates a "aws/request.Request" representing the @@ -6907,7 +6991,8 @@ func (c *RDS) ModifyDBInstanceRequest(input *ModifyDBInstanceInput) (req *reques // // Modifies settings for a DB instance. You can change one or more database // configuration parameters by specifying these parameters and the new values -// in the request. +// in the request. To learn what modifications you can make to your DB instance, +// call DescribeValidDBInstanceModifications before you call ModifyDBInstance. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9508,9 +9593,7 @@ func (s AddRoleToDBClusterOutput) GoString() string { type AddSourceIdentifierToSubscriptionInput struct { _ struct{} `type:"structure"` - // The identifier of the event source to be added. An identifier must begin - // with a letter and must contain only ASCII letters, digits, and hyphens; it - // cannot end with a hyphen or contain two consecutive hyphens. + // The identifier of the event source to be added. // // Constraints: // @@ -10061,7 +10144,7 @@ type CopyDBClusterParameterGroupInput struct { // // * Cannot be null, empty, or blank // - // * Must contain from 1 to 255 alphanumeric characters or hyphens + // * Must contain from 1 to 255 letters, numbers, or hyphens // // * First character must be a letter // @@ -10228,12 +10311,6 @@ type CopyDBClusterSnapshotInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens. - // - // * First character must be a letter. - // - // * Cannot end with a hyphen or contain two consecutive hyphens. - // // * Must specify a valid system snapshot in the "available" state. // // * If the source snapshot is in the same AWS Region as the copy, specify @@ -10261,7 +10338,7 @@ type CopyDBClusterSnapshotInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens. + // * Must contain from 1 to 63 letters, numbers, or hyphens. // // * First character must be a letter. // @@ -10410,7 +10487,7 @@ type CopyDBParameterGroupInput struct { // // * Cannot be null, empty, or blank // - // * Must contain from 1 to 255 alphanumeric characters or hyphens + // * Must contain from 1 to 255 letters, numbers, or hyphens // // * First character must be a letter // @@ -10625,7 +10702,7 @@ type CopyDBSnapshotInput struct { // // * Cannot be null, empty, or blank // - // * Must contain from 1 to 255 alphanumeric characters or hyphens + // * Must contain from 1 to 255 letters, numbers, or hyphens // // * First character must be a letter // @@ -10782,7 +10859,7 @@ type CopyOptionGroupInput struct { // // * Cannot be null, empty, or blank // - // * Must contain from 1 to 255 alphanumeric characters or hyphens + // * Must contain from 1 to 255 letters, numbers, or hyphens // // * First character must be a letter // @@ -10897,7 +10974,7 @@ type CreateDBClusterInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens. + // * Must contain from 1 to 63 letters, numbers, or hyphens. // // * First character must be a letter. // @@ -10913,17 +10990,13 @@ type CreateDBClusterInput struct { // // Constraints: // - // * Must be 1 to 255 alphanumeric characters - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * If supplied, must match the name of an existing DBClusterParameterGroup. DBClusterParameterGroupName *string `type:"string"` // A DB subnet group to associate with this DB cluster. // - // Constraints: Must contain no more than 255 alphanumeric characters, periods, - // underscores, spaces, or hyphens. Must not be default. + // Constraints: Must match the name of an existing DBSubnetGroup. Must not be + // default. // // Example: mySubnetgroup DBSubnetGroupName *string `type:"string"` @@ -10990,7 +11063,7 @@ type CreateDBClusterInput struct { // // Constraints: // - // * Must be 1 to 16 alphanumeric characters. + // * Must be 1 to 16 letters or numbers. // // * First character must be a letter. // @@ -11309,11 +11382,7 @@ type CreateDBClusterParameterGroupInput struct { // // Constraints: // - // * Must be 1 to 255 alphanumeric characters - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * Must match the name of an existing DBClusterParameterGroup. // // This value is stored as a lowercase string. // @@ -11428,11 +11497,7 @@ type CreateDBClusterSnapshotInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens. - // - // * First character must be a letter. - // - // * Cannot end with a hyphen or contain two consecutive hyphens. + // * Must match the identifier of an existing DBCluster. // // Example: my-cluster1 // @@ -11444,7 +11509,7 @@ type CreateDBClusterSnapshotInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens. + // * Must contain from 1 to 63 letters, numbers, or hyphens. // // * First character must be a letter. // @@ -11687,7 +11752,7 @@ type CreateDBInstanceInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens. + // * Must contain from 1 to 63 letters, numbers, or hyphens. // // * First character must be a letter. // @@ -11710,7 +11775,7 @@ type CreateDBInstanceInput struct { // // Constraints: // - // * Must contain 1 to 64 alphanumeric characters + // * Must contain 1 to 64 letters or numbers. // // * Cannot be a word reserved by the specified database engine // @@ -11721,7 +11786,7 @@ type CreateDBInstanceInput struct { // // Constraints: // - // * Must contain 1 to 64 alphanumeric characters + // * Must contain 1 to 64 letters or numbers. // // * Cannot be a word reserved by the specified database engine // @@ -11733,7 +11798,7 @@ type CreateDBInstanceInput struct { // // Constraints: // - // * Must contain 1 to 63 alphanumeric characters + // * Must contain 1 to 63 letters, numbers, or underscores. // // * Must begin with a letter or an underscore. Subsequent characters can // be letters, underscores, or digits (0-9). @@ -11764,7 +11829,7 @@ type CreateDBInstanceInput struct { // // Constraints: // - // * Must contain 1 to 64 alphanumeric characters + // * Must contain 1 to 64 letters or numbers. // // * Cannot be a word reserved by the specified database engine DBName *string `type:"string"` @@ -11775,7 +11840,7 @@ type CreateDBInstanceInput struct { // // Constraints: // - // * Must be 1 to 255 alphanumeric characters + // * Must be 1 to 255 letters, numbers, or hyphens. // // * First character must be a letter // @@ -12065,7 +12130,9 @@ type CreateDBInstanceInput struct { // // Constraints: // - // * Must be 1 to 16 alphanumeric characters. + // * Required for MariaDB. + // + // * Must be 1 to 16 letters or numbers. // // * Cannot be a reserved word for the chosen database engine. // @@ -12073,9 +12140,11 @@ type CreateDBInstanceInput struct { // // Constraints: // - // * Must be 1 to 128 alphanumeric characters. + // * Required for SQL Server. // - // * First character must be a letter. + // * Must be 1 to 128 letters or numbers. + // + // * The first character must be a letter. // // * Cannot be a reserved word for the chosen database engine. // @@ -12083,7 +12152,9 @@ type CreateDBInstanceInput struct { // // Constraints: // - // * Must be 1 to 16 alphanumeric characters. + // * Required for MySQL. + // + // * Must be 1 to 16 letters or numbers. // // * First character must be a letter. // @@ -12093,7 +12164,9 @@ type CreateDBInstanceInput struct { // // Constraints: // - // * Must be 1 to 30 alphanumeric characters. + // * Required for Oracle. + // + // * Must be 1 to 30 letters or numbers. // // * First character must be a letter. // @@ -12103,7 +12176,9 @@ type CreateDBInstanceInput struct { // // Constraints: // - // * Must be 1 to 63 alphanumeric characters. + // * Required for PostgreSQL. + // + // * Must be 1 to 63 letters or numbers. // // * First character must be a letter. // @@ -12663,6 +12738,8 @@ type CreateDBInstanceReadReplicaInput struct { // * Can only be specified if the source DB instance identifier specifies // a DB instance in another AWS Region. // + // * If supplied, must match the name of an existing DBSubnetGroup. + // // * The specified DB subnet group must be in the same AWS Region in which // the operation is running. // @@ -12675,9 +12752,6 @@ type CreateDBInstanceReadReplicaInput struct { // Not specify a DB subnet group. All these Read Replicas will be created outside // of any VPC. // - // Constraints: Must contain no more than 255 alphanumeric characters, periods, - // underscores, spaces, or hyphens. Must not be default. - // // Example: mySubnetgroup DBSubnetGroupName *string `type:"string"` @@ -13070,7 +13144,7 @@ type CreateDBParameterGroupInput struct { // // Constraints: // - // * Must be 1 to 255 alphanumeric characters + // * Must be 1 to 255 letters, numbers, or hyphens. // // * First character must be a letter // @@ -13184,7 +13258,7 @@ type CreateDBSecurityGroupInput struct { // // Constraints: // - // * Must be 1 to 255 alphanumeric characters + // * Must be 1 to 255 letters, numbers, or hyphens. // // * First character must be a letter // @@ -13284,15 +13358,11 @@ func (s *CreateDBSecurityGroupOutput) SetDBSecurityGroup(v *DBSecurityGroup) *Cr type CreateDBSnapshotInput struct { _ struct{} `type:"structure"` - // The DB instance identifier. This is the unique key that identifies a DB instance. + // The identifier of the DB instance that you want to create the snapshot of. // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * Must match the identifier of an existing DBInstance. // // DBInstanceIdentifier is a required field DBInstanceIdentifier *string `type:"string" required:"true"` @@ -13303,7 +13373,7 @@ type CreateDBSnapshotInput struct { // // * Cannot be null, empty, or blank // - // * Must contain from 1 to 255 alphanumeric characters or hyphens + // * Must contain from 1 to 255 letters, numbers, or hyphens // // * First character must be a letter // @@ -13403,8 +13473,8 @@ type CreateDBSubnetGroupInput struct { // The name for the DB subnet group. This value is stored as a lowercase string. // - // Constraints: Must contain no more than 255 alphanumeric characters, periods, - // underscores, spaces, or hyphens. Must not be default. + // Constraints: Must contain no more than 255 letters, numbers, periods, underscores, + // spaces, or hyphens. Must not be default. // // Example: mySubnetgroup // @@ -13690,7 +13760,7 @@ type CreateOptionGroupInput struct { // // Constraints: // - // * Must be 1 to 255 alphanumeric characters or hyphens + // * Must be 1 to 255 letters, numbers, or hyphens // // * First character must be a letter // @@ -14325,7 +14395,7 @@ type DBClusterParameterGroupNameMessage struct { // // Constraints: // - // * Must be 1 to 255 alphanumeric characters + // * Must be 1 to 255 letters or numbers. // // * First character must be a letter // @@ -16112,11 +16182,7 @@ type DeleteDBClusterInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * Must match an existing DBClusterIdentifier. // // DBClusterIdentifier is a required field DBClusterIdentifier *string `type:"string" required:"true"` @@ -16129,7 +16195,7 @@ type DeleteDBClusterInput struct { // // Constraints: // - // * Must be 1 to 255 alphanumeric characters + // * Must be 1 to 255 letters, numbers, or hyphens. // // * First character must be a letter // @@ -16370,11 +16436,7 @@ type DeleteDBInstanceInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * Must match the name of an existing DB instance. // // DBInstanceIdentifier is a required field DBInstanceIdentifier *string `type:"string" required:"true"` @@ -16387,7 +16449,7 @@ type DeleteDBInstanceInput struct { // // Constraints: // - // * Must be 1 to 255 alphanumeric characters + // * Must be 1 to 255 letters or numbers. // // * First character must be a letter // @@ -16562,7 +16624,7 @@ type DeleteDBSecurityGroupInput struct { // // Constraints: // - // * Must be 1 to 255 alphanumeric characters + // * Must be 1 to 255 letters, numbers, or hyphens. // // * First character must be a letter // @@ -16700,8 +16762,8 @@ type DeleteDBSubnetGroupInput struct { // // Constraints: // - // Constraints: Must contain no more than 255 alphanumeric characters, periods, - // underscores, spaces, or hyphens. Must not be default. + // Constraints: Must match the name of an existing DBSubnetGroup. Must not be + // default. // // Example: mySubnetgroup // @@ -16924,11 +16986,7 @@ type DescribeCertificatesInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * Must match an existing CertificateIdentifier. CertificateIdentifier *string `type:"string"` // This parameter is not currently supported. @@ -17047,11 +17105,7 @@ type DescribeDBClusterParameterGroupsInput struct { // // Constraints: // - // * Must be 1 to 255 alphanumeric characters - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * If supplied, must match the name of an existing DBClusterParameterGroup. DBClusterParameterGroupName *string `type:"string"` // This parameter is not currently supported. @@ -17170,11 +17224,7 @@ type DescribeDBClusterParametersInput struct { // // Constraints: // - // * Must be 1 to 255 alphanumeric characters - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * If supplied, must match the name of an existing DBClusterParameterGroup. // // DBClusterParameterGroupName is a required field DBClusterParameterGroupName *string `type:"string" required:"true"` @@ -17379,11 +17429,7 @@ type DescribeDBClusterSnapshotsInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * If supplied, must match the identifier of an existing DBCluster. DBClusterIdentifier *string `type:"string"` // A specific DB cluster snapshot identifier to describe. This parameter cannot @@ -17392,11 +17438,7 @@ type DescribeDBClusterSnapshotsInput struct { // // Constraints: // - // * Must be 1 to 255 alphanumeric characters - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * If supplied, must match the identifier of an existing DBClusterSnapshot. // // * If this identifier is for an automated snapshot, the SnapshotType parameter // must also be specified. @@ -17587,11 +17629,7 @@ type DescribeDBClustersInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * If supplied, must match an existing DBClusterIdentifier. DBClusterIdentifier *string `type:"string"` // A filter that specifies one or more DB clusters to describe. @@ -17715,11 +17753,7 @@ type DescribeDBEngineVersionsInput struct { // // Constraints: // - // * Must be 1 to 255 alphanumeric characters - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * If supplied, must match an existing DBParameterGroupFamily. DBParameterGroupFamily *string `type:"string"` // Indicates that only the default version of the specified engine or engine @@ -17892,11 +17926,7 @@ type DescribeDBInstancesInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * If supplied, must match the identifier of an existing DBInstance. DBInstanceIdentifier *string `type:"string"` // A filter that specifies one or more DB instances to describe. @@ -18071,11 +18101,7 @@ type DescribeDBLogFilesInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * Must match the identifier of an existing DBInstance. // // DBInstanceIdentifier is a required field DBInstanceIdentifier *string `type:"string" required:"true"` @@ -18222,11 +18248,7 @@ type DescribeDBParameterGroupsInput struct { // // Constraints: // - // * Must be 1 to 255 alphanumeric characters - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * If supplied, must match the name of an existing DBClusterParameterGroup. DBParameterGroupName *string `type:"string"` // This parameter is not currently supported. @@ -18346,11 +18368,7 @@ type DescribeDBParametersInput struct { // // Constraints: // - // * Must be 1 to 255 alphanumeric characters - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * If supplied, must match the name of an existing DBParameterGroup. // // DBParameterGroupName is a required field DBParameterGroupName *string `type:"string" required:"true"` @@ -18674,11 +18692,7 @@ type DescribeDBSnapshotsInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * If supplied, must match the identifier of an existing DBInstance. DBInstanceIdentifier *string `type:"string"` // A specific DB snapshot identifier to describe. This parameter cannot be used @@ -18687,11 +18701,7 @@ type DescribeDBSnapshotsInput struct { // // Constraints: // - // * Must be 1 to 255 alphanumeric characters. - // - // * First character must be a letter. - // - // * Cannot end with a hyphen or contain two consecutive hyphens. + // * If supplied, must match the identifier of an existing DBSnapshot. // // * If this identifier is for an automated snapshot, the SnapshotType parameter // must also be specified. @@ -20621,6 +20631,71 @@ func (s *DescribeSourceRegionsOutput) SetSourceRegions(v []*SourceRegion) *Descr return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DescribeValidDBInstanceModificationsMessage +type DescribeValidDBInstanceModificationsInput struct { + _ struct{} `type:"structure"` + + // The customer identifier or the ARN of your DB instance. + // + // DBInstanceIdentifier is a required field + DBInstanceIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeValidDBInstanceModificationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeValidDBInstanceModificationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeValidDBInstanceModificationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeValidDBInstanceModificationsInput"} + if s.DBInstanceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBInstanceIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDBInstanceIdentifier sets the DBInstanceIdentifier field's value. +func (s *DescribeValidDBInstanceModificationsInput) SetDBInstanceIdentifier(v string) *DescribeValidDBInstanceModificationsInput { + s.DBInstanceIdentifier = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DescribeValidDBInstanceModificationsResult +type DescribeValidDBInstanceModificationsOutput struct { + _ struct{} `type:"structure"` + + // Information about valid modifications that you can make to your DB instance. + // Contains the result of a successful call to the DescribeValidDBInstanceModifications + // action. You can use this information when you call ModifyDBInstance. + ValidDBInstanceModificationsMessage *ValidDBInstanceModificationsMessage `type:"structure"` +} + +// String returns the string representation +func (s DescribeValidDBInstanceModificationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeValidDBInstanceModificationsOutput) GoString() string { + return s.String() +} + +// SetValidDBInstanceModificationsMessage sets the ValidDBInstanceModificationsMessage field's value. +func (s *DescribeValidDBInstanceModificationsOutput) SetValidDBInstanceModificationsMessage(v *ValidDBInstanceModificationsMessage) *DescribeValidDBInstanceModificationsOutput { + s.ValidDBInstanceModificationsMessage = v + return s +} + // An Active Directory Domain membership record associated with the DB instance. // Please also see https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DomainMembership type DomainMembership struct { @@ -20675,6 +20750,40 @@ func (s *DomainMembership) SetStatus(v string) *DomainMembership { return s } +// A range of double values. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DoubleRange +type DoubleRange struct { + _ struct{} `type:"structure"` + + // The minimum value in the range. + From *float64 `type:"double"` + + // The maximum value in the range. + To *float64 `type:"double"` +} + +// String returns the string representation +func (s DoubleRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DoubleRange) GoString() string { + return s.String() +} + +// SetFrom sets the From field's value. +func (s *DoubleRange) SetFrom(v float64) *DoubleRange { + s.From = &v + return s +} + +// SetTo sets the To field's value. +func (s *DoubleRange) SetTo(v float64) *DoubleRange { + s.To = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DownloadDBLogFilePortionMessage type DownloadDBLogFilePortionInput struct { _ struct{} `type:"structure"` @@ -20684,11 +20793,7 @@ type DownloadDBLogFilePortionInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * Must match the identifier of an existing DBInstance. // // DBInstanceIdentifier is a required field DBInstanceIdentifier *string `type:"string" required:"true"` @@ -21208,11 +21313,7 @@ type FailoverDBClusterInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * Must match the identifier of an existing DBCluster. DBClusterIdentifier *string `type:"string"` // The name of the instance to promote to the primary instance. @@ -21491,13 +21592,7 @@ type ModifyDBClusterInput struct { // // Constraints: // - // * Must be the identifier for an existing DB cluster. - // - // * Must contain from 1 to 63 alphanumeric characters or hyphens. - // - // * First character must be a letter. - // - // * Cannot end with a hyphen or contain two consecutive hyphens. + // * Must match the identifier of an existing DBCluster. // // DBClusterIdentifier is a required field DBClusterIdentifier *string `type:"string" required:"true"` @@ -21522,9 +21617,9 @@ type ModifyDBClusterInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens + // * Must contain from 1 to 63 letters, numbers, or hyphens // - // * First character must be a letter + // * The first character must be a letter // // * Cannot end with a hyphen or contain two consecutive hyphens // @@ -22058,13 +22153,7 @@ type ModifyDBInstanceInput struct { // // Constraints: // - // * Must be the identifier for an existing DB instance - // - // * Must contain from 1 to 63 alphanumeric characters or hyphens - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * Must match the identifier of an existing DBInstance. // // DBInstanceIdentifier is a required field DBInstanceIdentifier *string `type:"string" required:"true"` @@ -22136,11 +22225,7 @@ type ModifyDBInstanceInput struct { // // Constraints: // - // * Must be 1 to 255 alphanumeric characters - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * If supplied, must match existing DBSecurityGroups. DBSecurityGroups []*string `locationNameList:"DBSecurityGroupName" type:"list"` // The new DB subnet group for the DB instance. You can use this parameter to @@ -22152,8 +22237,7 @@ type ModifyDBInstanceInput struct { // is applied during the next maintenance window, unless you specify true for // the ApplyImmediately parameter. // - // Constraints: Must contain no more than 255 alphanumeric characters, periods, - // underscores, spaces, or hyphens. + // Constraints: If supplied, must match the name of an existing DBSubnetGroup. // // Example: mySubnetGroup DBSubnetGroupName *string `type:"string"` @@ -22255,9 +22339,25 @@ type ModifyDBInstanceInput struct { // // Default: Uses existing setting // - // Constraints: Must be 8 to 41 alphanumeric characters (MySQL, MariaDB, and - // Amazon Aurora), 8 to 30 alphanumeric characters (Oracle), or 8 to 128 alphanumeric - // characters (SQL Server). + // MariaDB + // + // Constraints: Must contain from 8 to 41 characters. + // + // Microsoft SQL Server + // + // Constraints: Must contain from 8 to 128 characters. + // + // MySQL + // + // Constraints: Must contain from 8 to 41 characters. + // + // Oracle + // + // Constraints: Must contain from 8 to 30 characters. + // + // PostgreSQL + // + // Constraints: Must contain from 8 to 128 characters. // // Amazon RDS API actions never return the password, so this action provides // a way to regain access to a primary instance user if the password is lost. @@ -22298,11 +22398,13 @@ type ModifyDBInstanceInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens + // * Must contain from 1 to 63 letters, numbers, or hyphens. // - // * First character must be a letter + // * The first character must be a letter. // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * Cannot end with a hyphen or contain two consecutive hyphens. + // + // Example: mydbinstance NewDBInstanceIdentifier *string `type:"string"` // Indicates that the DB instance should be associated with the specified option @@ -22410,11 +22512,7 @@ type ModifyDBInstanceInput struct { // // Constraints: // - // * Must be 1 to 255 alphanumeric characters - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * If supplied, must match existing VpcSecurityGroupIds. VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` } @@ -22695,13 +22793,7 @@ type ModifyDBParameterGroupInput struct { // // Constraints: // - // * Must be the name of an existing DB parameter group - // - // * Must be 1 to 255 alphanumeric characters - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * If supplied, must match the name of an existing DBParameterGroup. // // DBParameterGroupName is a required field DBParameterGroupName *string `type:"string" required:"true"` @@ -22990,9 +23082,10 @@ type ModifyDBSubnetGroupInput struct { DBSubnetGroupDescription *string `type:"string"` // The name for the DB subnet group. This value is stored as a lowercase string. + // You can't modify the default subnet group. // - // Constraints: Must contain no more than 255 alphanumeric characters, periods, - // underscores, spaces, or hyphens. Must not be default. + // Constraints: Must match the name of an existing DBSubnetGroup. Must not be + // default. // // Example: mySubnetgroup // @@ -24477,11 +24570,7 @@ type PromoteReadReplicaDBClusterInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens. - // - // * First character must be a letter. - // - // * Cannot end with a hyphen or contain two consecutive hyphens. + // * Must match the identifier of an existing DBCluster Read Replica. // // Example: my-cluster-replica1 // @@ -24575,13 +24664,7 @@ type PromoteReadReplicaInput struct { // // Constraints: // - // * Must be the identifier for an existing Read Replica DB instance - // - // * Must contain from 1 to 63 alphanumeric characters or hyphens - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * Must match the identifier of an existing Read Replica DB instance. // // Example: mydbinstance // @@ -24782,6 +24865,52 @@ func (s *PurchaseReservedDBInstancesOfferingOutput) SetReservedDBInstance(v *Res return s } +// A range of integer values. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/Range +type Range struct { + _ struct{} `type:"structure"` + + // The minimum value in the range. + From *int64 `type:"integer"` + + // The step value for the range. For example, if you have a range of 5,000 to + // 10,000, with a step value of 1,000, the valid values start at 5,000 and step + // up by 1,000. Even though 7,500 is within the range, it isn't a valid value + // for the range. The valid values are 5,000, 6,000, 7,000, 8,000... + Step *int64 `type:"integer"` + + // The maximum value in the range. + To *int64 `type:"integer"` +} + +// String returns the string representation +func (s Range) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Range) GoString() string { + return s.String() +} + +// SetFrom sets the From field's value. +func (s *Range) SetFrom(v int64) *Range { + s.From = &v + return s +} + +// SetStep sets the Step field's value. +func (s *Range) SetStep(v int64) *Range { + s.Step = &v + return s +} + +// SetTo sets the To field's value. +func (s *Range) SetTo(v int64) *Range { + s.To = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/RebootDBInstanceMessage type RebootDBInstanceInput struct { _ struct{} `type:"structure"` @@ -24790,11 +24919,7 @@ type RebootDBInstanceInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * Must match the identifier of an existing DBInstance. // // DBInstanceIdentifier is a required field DBInstanceIdentifier *string `type:"string" required:"true"` @@ -25459,11 +25584,7 @@ type ResetDBParameterGroupInput struct { // // Constraints: // - // * Must be 1 to 255 alphanumeric characters - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * Must match the name of an existing DBParameterGroup. // // DBParameterGroupName is a required field DBParameterGroupName *string `type:"string" required:"true"` @@ -25604,7 +25725,7 @@ type RestoreDBClusterFromS3Input struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens. + // * Must contain from 1 to 63 letters, numbers, or hyphens. // // * First character must be a letter. // @@ -25620,17 +25741,12 @@ type RestoreDBClusterFromS3Input struct { // // Constraints: // - // * Must be 1 to 255 alphanumeric characters - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * If supplied, must match the name of an existing DBClusterParameterGroup. DBClusterParameterGroupName *string `type:"string"` // A DB subnet group to associate with the restored DB cluster. // - // Constraints: Must contain no more than 255 alphanumeric characters, periods, - // underscores, spaces, or hyphens. Must not be default. + // Constraints: If supplied, must match the name of an existing DBSubnetGroup. // // Example: mySubnetgroup DBSubnetGroupName *string `type:"string"` @@ -25683,7 +25799,7 @@ type RestoreDBClusterFromS3Input struct { // // Constraints: // - // * Must be 1 to 16 alphanumeric characters. + // * Must be 1 to 16 letters or numbers. // // * First character must be a letter. // @@ -26030,7 +26146,7 @@ type RestoreDBClusterFromSnapshotInput struct { // // Constraints: // - // * Must contain from 1 to 255 alphanumeric characters or hyphens + // * Must contain from 1 to 255 letters, numbers, or hyphens // // * First character must be a letter // @@ -26043,8 +26159,7 @@ type RestoreDBClusterFromSnapshotInput struct { // The name of the DB subnet group to use for the new DB cluster. // - // Constraints: Must contain no more than 255 alphanumeric characters, periods, - // underscores, spaces, or hyphens. Must not be default. + // Constraints: If supplied, must match the name of an existing DBSubnetGroup. // // Example: mySubnetgroup DBSubnetGroupName *string `type:"string"` @@ -26107,11 +26222,7 @@ type RestoreDBClusterFromSnapshotInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * Must match the identifier of an existing Snapshot. // // SnapshotIdentifier is a required field SnapshotIdentifier *string `type:"string" required:"true"` @@ -26276,7 +26387,7 @@ type RestoreDBClusterToPointInTimeInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens + // * Must contain from 1 to 63 letters, numbers, or hyphens // // * First character must be a letter // @@ -26287,8 +26398,7 @@ type RestoreDBClusterToPointInTimeInput struct { // The DB subnet group name to use for the new DB cluster. // - // Constraints: Must contain no more than 255 alphanumeric characters, periods, - // underscores, spaces, or hyphens. Must not be default. + // Constraints: If supplied, must match the name of an existing DBSubnetGroup. // // Example: mySubnetgroup DBSubnetGroupName *string `type:"string"` @@ -26372,13 +26482,7 @@ type RestoreDBClusterToPointInTimeInput struct { // // Constraints: // - // * Must be the identifier of an existing database instance - // - // * Must contain from 1 to 63 alphanumeric characters or hyphens - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * Must match the identifier of an existing DBCluster. // // SourceDBClusterIdentifier is a required field SourceDBClusterIdentifier *string `type:"string" required:"true"` @@ -26570,7 +26674,7 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens + // * Must contain from 1 to 63 numbers, letters, or hyphens // // * First character must be a letter // @@ -26590,22 +26694,17 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // // Constraints: // - // * Must contain from 1 to 255 alphanumeric characters or hyphens + // * Must match the identifier of an existing DBSnapshot. // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens - // - // If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier - // must be the ARN of the shared DB snapshot. + // * If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier + // must be the ARN of the shared DB snapshot. // // DBSnapshotIdentifier is a required field DBSnapshotIdentifier *string `type:"string" required:"true"` // The DB subnet group name to use for the new instance. // - // Constraints: Must contain no more than 255 alphanumeric characters, periods, - // underscores, spaces, or hyphens. Must not be default. + // Constraints: If supplied, must match the name of an existing DBSubnetGroup. // // Example: mySubnetgroup DBSubnetGroupName *string `type:"string"` @@ -26638,8 +26737,31 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // Constraint: Must be compatible with the engine of the source. You can restore // a MariaDB 10.1 DB instance from a MySQL 5.6 snapshot. // - // Valid Values: MySQL | mariadb | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee - // | sqlserver-se | sqlserver-ex | sqlserver-web | postgres | aurora + // Valid Values: + // + // * aurora + // + // * mariadb + // + // * mysql + // + // * oracle-ee + // + // * oracle-se2 + // + // * oracle-se1 + // + // * oracle-se + // + // * postgres + // + // * sqlserver-ee + // + // * sqlserver-se + // + // * sqlserver-ex + // + // * sqlserver-web Engine *string `type:"string"` // Specifies the amount of provisioned IOPS for the DB instance, expressed in @@ -26955,8 +27077,7 @@ type RestoreDBInstanceToPointInTimeInput struct { // The DB subnet group name to use for the new instance. // - // Constraints: Must contain no more than 255 alphanumeric characters, periods, - // underscores, spaces, or hyphens. Must not be default. + // Constraints: If supplied, must match the name of an existing DBSubnetGroup. // // Example: mySubnetgroup DBSubnetGroupName *string `type:"string"` @@ -26988,8 +27109,31 @@ type RestoreDBInstanceToPointInTimeInput struct { // // Constraint: Must be compatible with the engine of the source // - // Valid Values: MySQL | mariadb | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee - // | sqlserver-se | sqlserver-ex | sqlserver-web | postgres | aurora + // Valid Values: + // + // * aurora + // + // * mariadb + // + // * mysql + // + // * oracle-ee + // + // * oracle-se2 + // + // * oracle-se1 + // + // * oracle-se + // + // * postgres + // + // * sqlserver-ee + // + // * sqlserver-se + // + // * sqlserver-ex + // + // * sqlserver-web Engine *string `type:"string"` // The amount of Provisioned IOPS (input/output operations per second) to be @@ -27064,13 +27208,7 @@ type RestoreDBInstanceToPointInTimeInput struct { // // Constraints: // - // * Must be the identifier of an existing database instance - // - // * Must contain from 1 to 63 alphanumeric characters or hyphens - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * Must match the identifier of an existing DBInstance. // // SourceDBInstanceIdentifier is a required field SourceDBInstanceIdentifier *string `type:"string" required:"true"` @@ -27091,7 +27229,7 @@ type RestoreDBInstanceToPointInTimeInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens + // * Must contain from 1 to 63 letters, numbers, or hyphens // // * First character must be a letter // @@ -27824,6 +27962,89 @@ func (s *UpgradeTarget) SetIsMajorVersionUpgrade(v bool) *UpgradeTarget { return s } +// Information about valid modifications that you can make to your DB instance. +// Contains the result of a successful call to the DescribeValidDBInstanceModifications +// action. You can use this information when you call ModifyDBInstance. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/ValidDBInstanceModificationsMessage +type ValidDBInstanceModificationsMessage struct { + _ struct{} `type:"structure"` + + // Valid storage options for your DB instance. + Storage []*ValidStorageOptions `locationNameList:"ValidStorageOptions" type:"list"` +} + +// String returns the string representation +func (s ValidDBInstanceModificationsMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidDBInstanceModificationsMessage) GoString() string { + return s.String() +} + +// SetStorage sets the Storage field's value. +func (s *ValidDBInstanceModificationsMessage) SetStorage(v []*ValidStorageOptions) *ValidDBInstanceModificationsMessage { + s.Storage = v + return s +} + +// Information about valid modifications that you can make to your DB instance. +// Contains the result of a successful call to the DescribeValidDBInstanceModifications +// action. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/ValidStorageOptions +type ValidStorageOptions struct { + _ struct{} `type:"structure"` + + // The valid range of Provisioned IOPS to gigabytes of storage multiplier. For + // example, 3-10, which means that provisioned IOPS can be between 3 and 10 + // times storage. + IopsToStorageRatio []*DoubleRange `locationNameList:"DoubleRange" type:"list"` + + // The valid range of provisioned IOPS. For example, 1000-20000. + ProvisionedIops []*Range `locationNameList:"Range" type:"list"` + + // The valid range of storage in gigabytes. For example, 100 to 6144. + StorageSize []*Range `locationNameList:"Range" type:"list"` + + // The valid storage types for your DB instance. For example, gp2, io1. + StorageType *string `type:"string"` +} + +// String returns the string representation +func (s ValidStorageOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidStorageOptions) GoString() string { + return s.String() +} + +// SetIopsToStorageRatio sets the IopsToStorageRatio field's value. +func (s *ValidStorageOptions) SetIopsToStorageRatio(v []*DoubleRange) *ValidStorageOptions { + s.IopsToStorageRatio = v + return s +} + +// SetProvisionedIops sets the ProvisionedIops field's value. +func (s *ValidStorageOptions) SetProvisionedIops(v []*Range) *ValidStorageOptions { + s.ProvisionedIops = v + return s +} + +// SetStorageSize sets the StorageSize field's value. +func (s *ValidStorageOptions) SetStorageSize(v []*Range) *ValidStorageOptions { + s.StorageSize = v + return s +} + +// SetStorageType sets the StorageType field's value. +func (s *ValidStorageOptions) SetStorageType(v string) *ValidStorageOptions { + s.StorageType = &v + return s +} + // This data type is used as a response element for queries on VPC security // group membership. // Please also see https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/VpcSecurityGroupMembership diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/rds/waiters.go index cd45bcdc3cc..0c21e88bbae 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/rds/waiters.go +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/waiters.go @@ -150,3 +150,145 @@ func (c *RDS) WaitUntilDBInstanceDeletedWithContext(ctx aws.Context, input *Desc return w.WaitWithContext(ctx) } + +// WaitUntilDBSnapshotAvailable uses the Amazon RDS API operation +// DescribeDBSnapshots to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *RDS) WaitUntilDBSnapshotAvailable(input *DescribeDBSnapshotsInput) error { + return c.WaitUntilDBSnapshotAvailableWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilDBSnapshotAvailableWithContext is an extended version of WaitUntilDBSnapshotAvailable. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) WaitUntilDBSnapshotAvailableWithContext(ctx aws.Context, input *DescribeDBSnapshotsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilDBSnapshotAvailable", + MaxAttempts: 60, + Delay: request.ConstantWaiterDelay(30 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "DBSnapshots[].Status", + Expected: "available", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "DBSnapshots[].Status", + Expected: "deleted", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "DBSnapshots[].Status", + Expected: "deleting", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "DBSnapshots[].Status", + Expected: "failed", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "DBSnapshots[].Status", + Expected: "incompatible-restore", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "DBSnapshots[].Status", + Expected: "incompatible-parameters", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeDBSnapshotsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeDBSnapshotsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilDBSnapshotDeleted uses the Amazon RDS API operation +// DescribeDBSnapshots to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *RDS) WaitUntilDBSnapshotDeleted(input *DescribeDBSnapshotsInput) error { + return c.WaitUntilDBSnapshotDeletedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilDBSnapshotDeletedWithContext is an extended version of WaitUntilDBSnapshotDeleted. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) WaitUntilDBSnapshotDeletedWithContext(ctx aws.Context, input *DescribeDBSnapshotsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilDBSnapshotDeleted", + MaxAttempts: 60, + Delay: request.ConstantWaiterDelay(30 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "DBSnapshots[].Status", + Expected: "deleted", + }, + { + State: request.SuccessWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "DBSnapshotNotFound", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "DBSnapshots[].Status", + Expected: "creating", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "DBSnapshots[].Status", + Expected: "modifying", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "DBSnapshots[].Status", + Expected: "rebooting", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "DBSnapshots[].Status", + Expected: "resetting-master-credentials", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeDBSnapshotsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeDBSnapshotsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ses/api.go b/vendor/github.com/aws/aws-sdk-go/service/ses/api.go index 90e4ba45ac7..96d8bd5268b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ses/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ses/api.go @@ -357,9 +357,8 @@ func (c *SES) CreateConfigurationSetTrackingOptionsRequest(input *CreateConfigur // // By default, images and links used for tracking open and click events are // hosted on domains operated by Amazon SES. You can configure a subdomain of -// your own to handle these events by redirecting them to the Amazon SES-operated -// domain. For information about using configuration sets, see Configuring Custom -// Domains to Handle Open and Click Tracking (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/configure-custom-open-click-domains.html) +// your own to handle these events. For information about using configuration +// sets, see Configuring Custom Domains to Handle Open and Click Tracking (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/configure-custom-open-click-domains.html) // in the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/Welcome.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -694,6 +693,97 @@ func (c *SES) CreateReceiptRuleSetWithContext(ctx aws.Context, input *CreateRece return out, req.Send() } +const opCreateTemplate = "CreateTemplate" + +// CreateTemplateRequest generates a "aws/request.Request" representing the +// client's request for the CreateTemplate operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTemplate for more information on using the CreateTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTemplateRequest method. +// req, resp := client.CreateTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateTemplate +func (c *SES) CreateTemplateRequest(input *CreateTemplateInput) (req *request.Request, output *CreateTemplateOutput) { + op := &request.Operation{ + Name: opCreateTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTemplateInput{} + } + + output = &CreateTemplateOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTemplate API operation for Amazon Simple Email Service. +// +// Creates an email template. Email templates enable you to send personalized +// email to one or more destinations in a single API operation. For more information, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation CreateTemplate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAlreadyExistsException "AlreadyExists" +// Indicates that a resource could not be created because of a naming conflict. +// +// * ErrCodeInvalidTemplateException "InvalidTemplate" +// Indicates that a template could not be created because it contained invalid +// JSON. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// Indicates that a resource could not be created because of service limits. +// For a list of Amazon SES limits, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/limits.html). +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateTemplate +func (c *SES) CreateTemplate(input *CreateTemplateInput) (*CreateTemplateOutput, error) { + req, out := c.CreateTemplateRequest(input) + return out, req.Send() +} + +// CreateTemplateWithContext is the same as CreateTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) CreateTemplateWithContext(ctx aws.Context, input *CreateTemplateInput, opts ...request.Option) (*CreateTemplateOutput, error) { + req, out := c.CreateTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteConfigurationSet = "DeleteConfigurationSet" // DeleteConfigurationSetRequest generates a "aws/request.Request" representing the @@ -913,9 +1003,8 @@ func (c *SES) DeleteConfigurationSetTrackingOptionsRequest(input *DeleteConfigur // // By default, images and links used for tracking open and click events are // hosted on domains operated by Amazon SES. You can configure a subdomain of -// your own to handle these events by redirecting them to the Amazon SES-operated -// domain. For information about using configuration sets, see Configuring Custom -// Domains to Handle Open and Click Tracking (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/configure-custom-open-click-domains.html) +// your own to handle these events. For information about using configuration +// sets, see Configuring Custom Domains to Handle Open and Click Tracking (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/configure-custom-open-click-domains.html) // in the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/Welcome.html). // // Deleting this kind of association will result in emails sent using the specified @@ -1369,6 +1458,82 @@ func (c *SES) DeleteReceiptRuleSetWithContext(ctx aws.Context, input *DeleteRece return out, req.Send() } +const opDeleteTemplate = "DeleteTemplate" + +// DeleteTemplateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTemplate operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTemplate for more information on using the DeleteTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTemplateRequest method. +// req, resp := client.DeleteTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteTemplate +func (c *SES) DeleteTemplateRequest(input *DeleteTemplateInput) (req *request.Request, output *DeleteTemplateOutput) { + op := &request.Operation{ + Name: opDeleteTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTemplateInput{} + } + + output = &DeleteTemplateOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteTemplate API operation for Amazon Simple Email Service. +// +// Deletes an email template. +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation DeleteTemplate for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteTemplate +func (c *SES) DeleteTemplate(input *DeleteTemplateInput) (*DeleteTemplateOutput, error) { + req, out := c.DeleteTemplateRequest(input) + return out, req.Send() +} + +// DeleteTemplateWithContext is the same as DeleteTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) DeleteTemplateWithContext(ctx aws.Context, input *DeleteTemplateInput, opts ...request.Option) (*DeleteTemplateOutput, error) { + req, out := c.DeleteTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteVerifiedEmailAddress = "DeleteVerifiedEmailAddress" // DeleteVerifiedEmailAddressRequest generates a "aws/request.Request" representing the @@ -2368,6 +2533,89 @@ func (c *SES) GetSendStatisticsWithContext(ctx aws.Context, input *GetSendStatis return out, req.Send() } +const opGetTemplate = "GetTemplate" + +// GetTemplateRequest generates a "aws/request.Request" representing the +// client's request for the GetTemplate operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetTemplate for more information on using the GetTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetTemplateRequest method. +// req, resp := client.GetTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/GetTemplate +func (c *SES) GetTemplateRequest(input *GetTemplateInput) (req *request.Request, output *GetTemplateOutput) { + op := &request.Operation{ + Name: opGetTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetTemplateInput{} + } + + output = &GetTemplateOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetTemplate API operation for Amazon Simple Email Service. +// +// Displays the template object (which includes the Subject line, HTML part +// and text part) for the template you specify. +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation GetTemplate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeTemplateDoesNotExistException "TemplateDoesNotExist" +// Indicates that the Template object you specified does not exist in your Amazon +// SES account. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/GetTemplate +func (c *SES) GetTemplate(input *GetTemplateInput) (*GetTemplateOutput, error) { + req, out := c.GetTemplateRequest(input) + return out, req.Send() +} + +// GetTemplateWithContext is the same as GetTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See GetTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) GetTemplateWithContext(ctx aws.Context, input *GetTemplateInput, opts ...request.Option) (*GetTemplateOutput, error) { + req, out := c.GetTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListConfigurationSets = "ListConfigurationSets" // ListConfigurationSetsRequest generates a "aws/request.Request" representing the @@ -2418,8 +2666,8 @@ func (c *SES) ListConfigurationSetsRequest(input *ListConfigurationSetsInput) (r // in the Amazon SES Developer Guide. // // You can execute this operation no more than once per second. This operation -// will return up to 50 configuration sets each time it is run. If your Amazon -// SES account has more than 50 configuration sets, this operation will also +// will return up to 1,000 configuration sets each time it is run. If your Amazon +// SES account has more than 1,000 configuration sets, this operation will also // return a NextToken element. You can then execute the ListConfigurationSets // operation again, passing the NextToken parameter and the value of the NextToken // element to retrieve additional results. @@ -2831,6 +3079,82 @@ func (c *SES) ListReceiptRuleSetsWithContext(ctx aws.Context, input *ListReceipt return out, req.Send() } +const opListTemplates = "ListTemplates" + +// ListTemplatesRequest generates a "aws/request.Request" representing the +// client's request for the ListTemplates operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTemplates for more information on using the ListTemplates +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTemplatesRequest method. +// req, resp := client.ListTemplatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/ListTemplates +func (c *SES) ListTemplatesRequest(input *ListTemplatesInput) (req *request.Request, output *ListTemplatesOutput) { + op := &request.Operation{ + Name: opListTemplates, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTemplatesInput{} + } + + output = &ListTemplatesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTemplates API operation for Amazon Simple Email Service. +// +// Lists the email templates present in your Amazon SES account. +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation ListTemplates for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/ListTemplates +func (c *SES) ListTemplates(input *ListTemplatesInput) (*ListTemplatesOutput, error) { + req, out := c.ListTemplatesRequest(input) + return out, req.Send() +} + +// ListTemplatesWithContext is the same as ListTemplates with the addition of +// the ability to pass a context and additional request options. +// +// See ListTemplates for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) ListTemplatesWithContext(ctx aws.Context, input *ListTemplatesInput, opts ...request.Option) (*ListTemplatesOutput, error) { + req, out := c.ListTemplatesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListVerifiedEmailAddresses = "ListVerifiedEmailAddresses" // ListVerifiedEmailAddressesRequest generates a "aws/request.Request" representing the @@ -3177,57 +3501,60 @@ func (c *SES) SendBounceWithContext(ctx aws.Context, input *SendBounceInput, opt return out, req.Send() } -const opSendEmail = "SendEmail" +const opSendBulkTemplatedEmail = "SendBulkTemplatedEmail" -// SendEmailRequest generates a "aws/request.Request" representing the -// client's request for the SendEmail operation. The "output" return +// SendBulkTemplatedEmailRequest generates a "aws/request.Request" representing the +// client's request for the SendBulkTemplatedEmail operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See SendEmail for more information on using the SendEmail +// See SendBulkTemplatedEmail for more information on using the SendBulkTemplatedEmail // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the SendEmailRequest method. -// req, resp := client.SendEmailRequest(params) +// // Example sending a request using the SendBulkTemplatedEmailRequest method. +// req, resp := client.SendBulkTemplatedEmailRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendEmail -func (c *SES) SendEmailRequest(input *SendEmailInput) (req *request.Request, output *SendEmailOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendBulkTemplatedEmail +func (c *SES) SendBulkTemplatedEmailRequest(input *SendBulkTemplatedEmailInput) (req *request.Request, output *SendBulkTemplatedEmailOutput) { op := &request.Operation{ - Name: opSendEmail, + Name: opSendBulkTemplatedEmail, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &SendEmailInput{} + input = &SendBulkTemplatedEmailInput{} } - output = &SendEmailOutput{} + output = &SendBulkTemplatedEmailOutput{} req = c.newRequest(op, input, output) return } -// SendEmail API operation for Amazon Simple Email Service. +// SendBulkTemplatedEmail API operation for Amazon Simple Email Service. // -// Composes an email message and immediately queues it for sending. In order -// to send email using the SendEmail operation, your message must meet the following -// requirements: +// Composes an email message to multiple destinations. The message body is created +// using an email template. // -// * The message must be sent from a verified email address or domain. If -// you attempt to send email using a non-verified address or domain, the -// operation will result in an "Email address not verified" error. +// In order to send email using the SendBulkTemplatedEmail operation, your call +// to the API must meet the following requirements: +// +// * The call must refer to an existing email template. You can create email +// templates using the CreateTemplate operation. +// +// * The message must be sent from a verified email address or domain. // // * If your account is still in the Amazon SES sandbox, you may only send // to verified addresses or domains, or to email addresses associated with @@ -3235,10 +3562,125 @@ func (c *SES) SendEmailRequest(input *SendEmailInput) (req *request.Request, out // Email Addresses and Domains (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html) // in the Amazon SES Developer Guide. // -// * The total size of the message, including attachments, must be smaller -// than 10 MB. +// * The total size of the message, including attachments, must be less than +// 10 MB. // -// * The message must include at least one recipient email address. The recipient +// * Each Destination parameter must include at least one recipient email +// address. The recipient address can be a To: address, a CC: address, or +// a BCC: address. If a recipient email address is invalid (that is, it is +// not in the format UserName@[SubDomain.]Domain.TopLevelDomain), the entire +// message will be rejected, even if the message contains other recipients +// that are valid. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation SendBulkTemplatedEmail for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMessageRejected "MessageRejected" +// Indicates that the action failed, and the message could not be sent. Check +// the error stack for more information about what caused the error. +// +// * ErrCodeMailFromDomainNotVerifiedException "MailFromDomainNotVerifiedException" +// Indicates that the message could not be sent because Amazon SES could not +// read the MX record required to use the specified MAIL FROM domain. For information +// about editing the custom MAIL FROM domain settings for an identity, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from-edit.html). +// +// * ErrCodeConfigurationSetDoesNotExistException "ConfigurationSetDoesNotExist" +// Indicates that the configuration set does not exist. +// +// * ErrCodeTemplateDoesNotExistException "TemplateDoesNotExist" +// Indicates that the Template object you specified does not exist in your Amazon +// SES account. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendBulkTemplatedEmail +func (c *SES) SendBulkTemplatedEmail(input *SendBulkTemplatedEmailInput) (*SendBulkTemplatedEmailOutput, error) { + req, out := c.SendBulkTemplatedEmailRequest(input) + return out, req.Send() +} + +// SendBulkTemplatedEmailWithContext is the same as SendBulkTemplatedEmail with the addition of +// the ability to pass a context and additional request options. +// +// See SendBulkTemplatedEmail for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) SendBulkTemplatedEmailWithContext(ctx aws.Context, input *SendBulkTemplatedEmailInput, opts ...request.Option) (*SendBulkTemplatedEmailOutput, error) { + req, out := c.SendBulkTemplatedEmailRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSendEmail = "SendEmail" + +// SendEmailRequest generates a "aws/request.Request" representing the +// client's request for the SendEmail operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SendEmail for more information on using the SendEmail +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SendEmailRequest method. +// req, resp := client.SendEmailRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendEmail +func (c *SES) SendEmailRequest(input *SendEmailInput) (req *request.Request, output *SendEmailOutput) { + op := &request.Operation{ + Name: opSendEmail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendEmailInput{} + } + + output = &SendEmailOutput{} + req = c.newRequest(op, input, output) + return +} + +// SendEmail API operation for Amazon Simple Email Service. +// +// Composes an email message and immediately queues it for sending. In order +// to send email using the SendEmail operation, your message must meet the following +// requirements: +// +// * The message must be sent from a verified email address or domain. If +// you attempt to send email using a non-verified address or domain, the +// operation will result in an "Email address not verified" error. +// +// * If your account is still in the Amazon SES sandbox, you may only send +// to verified addresses or domains, or to email addresses associated with +// the Amazon SES Mailbox Simulator. For more information, see Verifying +// Email Addresses and Domains (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html) +// in the Amazon SES Developer Guide. +// +// * The total size of the message, including attachments, must be smaller +// than 10 MB. +// +// * The message must include at least one recipient email address. The recipient // address can be a To: address, a CC: address, or a BCC: address. If a recipient // email address is invalid (that is, it is not in the format UserName@[SubDomain.]Domain.TopLevelDomain), // the entire message will be rejected, even if the message contains other @@ -3459,6 +3901,129 @@ func (c *SES) SendRawEmailWithContext(ctx aws.Context, input *SendRawEmailInput, return out, req.Send() } +const opSendTemplatedEmail = "SendTemplatedEmail" + +// SendTemplatedEmailRequest generates a "aws/request.Request" representing the +// client's request for the SendTemplatedEmail operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SendTemplatedEmail for more information on using the SendTemplatedEmail +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SendTemplatedEmailRequest method. +// req, resp := client.SendTemplatedEmailRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendTemplatedEmail +func (c *SES) SendTemplatedEmailRequest(input *SendTemplatedEmailInput) (req *request.Request, output *SendTemplatedEmailOutput) { + op := &request.Operation{ + Name: opSendTemplatedEmail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendTemplatedEmailInput{} + } + + output = &SendTemplatedEmailOutput{} + req = c.newRequest(op, input, output) + return +} + +// SendTemplatedEmail API operation for Amazon Simple Email Service. +// +// Composes an email message using an email template and immediately queues +// it for sending. +// +// In order to send email using the SendTemplatedEmail operation, your call +// to the API must meet the following requirements: +// +// * The call must refer to an existing email template. You can create email +// templates using the CreateTemplate operation. +// +// * The message must be sent from a verified email address or domain. +// +// * If your account is still in the Amazon SES sandbox, you may only send +// to verified addresses or domains, or to email addresses associated with +// the Amazon SES Mailbox Simulator. For more information, see Verifying +// Email Addresses and Domains (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html) +// in the Amazon SES Developer Guide. +// +// * The total size of the message, including attachments, must be less than +// 10 MB. +// +// * Calls to the SendTemplatedEmail operation may only include one Destination +// parameter. A destination is a set of recipients who will receive the same +// version of the email. The Destination parameter can include up to 50 recipients, +// across the To:, CC: and BCC: fields. +// +// * The Destination parameter must include at least one recipient email +// address. The recipient address can be a To: address, a CC: address, or +// a BCC: address. If a recipient email address is invalid (that is, it is +// not in the format UserName@[SubDomain.]Domain.TopLevelDomain), the entire +// message will be rejected, even if the message contains other recipients +// that are valid. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation SendTemplatedEmail for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMessageRejected "MessageRejected" +// Indicates that the action failed, and the message could not be sent. Check +// the error stack for more information about what caused the error. +// +// * ErrCodeMailFromDomainNotVerifiedException "MailFromDomainNotVerifiedException" +// Indicates that the message could not be sent because Amazon SES could not +// read the MX record required to use the specified MAIL FROM domain. For information +// about editing the custom MAIL FROM domain settings for an identity, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from-edit.html). +// +// * ErrCodeConfigurationSetDoesNotExistException "ConfigurationSetDoesNotExist" +// Indicates that the configuration set does not exist. +// +// * ErrCodeTemplateDoesNotExistException "TemplateDoesNotExist" +// Indicates that the Template object you specified does not exist in your Amazon +// SES account. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendTemplatedEmail +func (c *SES) SendTemplatedEmail(input *SendTemplatedEmailInput) (*SendTemplatedEmailOutput, error) { + req, out := c.SendTemplatedEmailRequest(input) + return out, req.Send() +} + +// SendTemplatedEmailWithContext is the same as SendTemplatedEmail with the addition of +// the ability to pass a context and additional request options. +// +// See SendTemplatedEmail for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) SendTemplatedEmailWithContext(ctx aws.Context, input *SendTemplatedEmailInput, opts ...request.Option) (*SendTemplatedEmailOutput, error) { + req, out := c.SendTemplatedEmailRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opSetActiveReceiptRuleSet = "SetActiveReceiptRuleSet" // SetActiveReceiptRuleSetRequest generates a "aws/request.Request" representing the @@ -4057,6 +4622,98 @@ func (c *SES) SetReceiptRulePositionWithContext(ctx aws.Context, input *SetRecei return out, req.Send() } +const opTestRenderTemplate = "TestRenderTemplate" + +// TestRenderTemplateRequest generates a "aws/request.Request" representing the +// client's request for the TestRenderTemplate operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TestRenderTemplate for more information on using the TestRenderTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TestRenderTemplateRequest method. +// req, resp := client.TestRenderTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/TestRenderTemplate +func (c *SES) TestRenderTemplateRequest(input *TestRenderTemplateInput) (req *request.Request, output *TestRenderTemplateOutput) { + op := &request.Operation{ + Name: opTestRenderTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TestRenderTemplateInput{} + } + + output = &TestRenderTemplateOutput{} + req = c.newRequest(op, input, output) + return +} + +// TestRenderTemplate API operation for Amazon Simple Email Service. +// +// Creates a preview of the MIME content of an email when provided with a template +// and a set of replacement data. +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation TestRenderTemplate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeTemplateDoesNotExistException "TemplateDoesNotExist" +// Indicates that the Template object you specified does not exist in your Amazon +// SES account. +// +// * ErrCodeInvalidRenderingParameterException "InvalidRenderingParameter" +// Indicates that one or more of the replacement values you provided is invalid. +// This error may occur when the TemplateData object contains invalid JSON. +// +// * ErrCodeMissingRenderingAttributeException "MissingRenderingAttribute" +// Indicates that one or more of the replacement values for the specified template +// was not specified. Ensure that the TemplateData object contains references +// to all of the replacement tags in the specified template. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/TestRenderTemplate +func (c *SES) TestRenderTemplate(input *TestRenderTemplateInput) (*TestRenderTemplateOutput, error) { + req, out := c.TestRenderTemplateRequest(input) + return out, req.Send() +} + +// TestRenderTemplateWithContext is the same as TestRenderTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See TestRenderTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) TestRenderTemplateWithContext(ctx aws.Context, input *TestRenderTemplateInput, opts ...request.Option) (*TestRenderTemplateOutput, error) { + req, out := c.TestRenderTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateConfigurationSetEventDestination = "UpdateConfigurationSetEventDestination" // UpdateConfigurationSetEventDestinationRequest generates a "aws/request.Request" representing the @@ -4211,9 +4868,8 @@ func (c *SES) UpdateConfigurationSetTrackingOptionsRequest(input *UpdateConfigur // // By default, images and links used for tracking open and click events are // hosted on domains operated by Amazon SES. You can configure a subdomain of -// your own to handle these events by redirecting them to the Amazon SES-operated -// domain. For information about using configuration sets, see Configuring Custom -// Domains to Handle Open and Click Tracking (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/configure-custom-open-click-domains.html) +// your own to handle these events. For information about using configuration +// sets, see Configuring Custom Domains to Handle Open and Click Tracking (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/configure-custom-open-click-domains.html) // in the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/Welcome.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4368,57 +5024,145 @@ func (c *SES) UpdateReceiptRuleWithContext(ctx aws.Context, input *UpdateReceipt return out, req.Send() } -const opVerifyDomainDkim = "VerifyDomainDkim" +const opUpdateTemplate = "UpdateTemplate" -// VerifyDomainDkimRequest generates a "aws/request.Request" representing the -// client's request for the VerifyDomainDkim operation. The "output" return +// UpdateTemplateRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTemplate operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See VerifyDomainDkim for more information on using the VerifyDomainDkim +// See UpdateTemplate for more information on using the UpdateTemplate // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the VerifyDomainDkimRequest method. -// req, resp := client.VerifyDomainDkimRequest(params) +// // Example sending a request using the UpdateTemplateRequest method. +// req, resp := client.UpdateTemplateRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/VerifyDomainDkim -func (c *SES) VerifyDomainDkimRequest(input *VerifyDomainDkimInput) (req *request.Request, output *VerifyDomainDkimOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/UpdateTemplate +func (c *SES) UpdateTemplateRequest(input *UpdateTemplateInput) (req *request.Request, output *UpdateTemplateOutput) { op := &request.Operation{ - Name: opVerifyDomainDkim, + Name: opUpdateTemplate, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &VerifyDomainDkimInput{} + input = &UpdateTemplateInput{} } - output = &VerifyDomainDkimOutput{} + output = &UpdateTemplateOutput{} req = c.newRequest(op, input, output) return } -// VerifyDomainDkim API operation for Amazon Simple Email Service. +// UpdateTemplate API operation for Amazon Simple Email Service. // -// Returns a set of DKIM tokens for a domain. DKIM tokens are character strings -// that represent your domain's identity. Using these tokens, you will need -// to create DNS CNAME records that point to DKIM public keys hosted by Amazon -// SES. Amazon Web Services will eventually detect that you have updated your -// DNS records; this detection process may take up to 72 hours. Upon successful -// detection, Amazon SES will be able to DKIM-sign email originating from that -// domain. +// Updates an email template. Email templates enable you to send personalized +// email to one or more destinations in a single API operation. For more information, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html). +// +// You can execute this operation no more than once per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Email Service's +// API operation UpdateTemplate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeTemplateDoesNotExistException "TemplateDoesNotExist" +// Indicates that the Template object you specified does not exist in your Amazon +// SES account. +// +// * ErrCodeInvalidTemplateException "InvalidTemplate" +// Indicates that a template could not be created because it contained invalid +// JSON. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/UpdateTemplate +func (c *SES) UpdateTemplate(input *UpdateTemplateInput) (*UpdateTemplateOutput, error) { + req, out := c.UpdateTemplateRequest(input) + return out, req.Send() +} + +// UpdateTemplateWithContext is the same as UpdateTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SES) UpdateTemplateWithContext(ctx aws.Context, input *UpdateTemplateInput, opts ...request.Option) (*UpdateTemplateOutput, error) { + req, out := c.UpdateTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opVerifyDomainDkim = "VerifyDomainDkim" + +// VerifyDomainDkimRequest generates a "aws/request.Request" representing the +// client's request for the VerifyDomainDkim operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See VerifyDomainDkim for more information on using the VerifyDomainDkim +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the VerifyDomainDkimRequest method. +// req, resp := client.VerifyDomainDkimRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/VerifyDomainDkim +func (c *SES) VerifyDomainDkimRequest(input *VerifyDomainDkimInput) (req *request.Request, output *VerifyDomainDkimOutput) { + op := &request.Operation{ + Name: opVerifyDomainDkim, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &VerifyDomainDkimInput{} + } + + output = &VerifyDomainDkimOutput{} + req = c.newRequest(op, input, output) + return +} + +// VerifyDomainDkim API operation for Amazon Simple Email Service. +// +// Returns a set of DKIM tokens for a domain. DKIM tokens are character strings +// that represent your domain's identity. Using these tokens, you will need +// to create DNS CNAME records that point to DKIM public keys hosted by Amazon +// SES. Amazon Web Services will eventually detect that you have updated your +// DNS records; this detection process may take up to 72 hours. Upon successful +// detection, Amazon SES will be able to DKIM-sign email originating from that +// domain. // // You can execute this operation no more than once per second. // @@ -4984,6 +5728,166 @@ func (s *BouncedRecipientInfo) SetRecipientDsnFields(v *RecipientDsnFields) *Bou return s } +// An array that contains one or more Destinations, as well as the tags and +// replacement data associated with each of those Destinations. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/BulkEmailDestination +type BulkEmailDestination struct { + _ struct{} `type:"structure"` + + // Represents the destination of the message, consisting of To:, CC:, and BCC: + // fields. + // + // By default, the string must be 7-bit ASCII. If the text must contain any + // other characters, then you must use MIME encoded-word syntax (RFC 2047) instead + // of a literal string. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?=. + // For more information, see RFC 2047 (https://tools.ietf.org/html/rfc2047). + // + // Destination is a required field + Destination *Destination `type:"structure" required:"true"` + + // A list of tags, in the form of name/value pairs, to apply to an email that + // you send using SendBulkTemplatedEmail. Tags correspond to characteristics + // of the email that you define, so that you can publish email sending events. + ReplacementTags []*MessageTag `type:"list"` + + // A list of replacement values to apply to the template. This parameter is + // a JSON object, typically consisting of key-value pairs in which the keys + // correspond to replacement tags in the email template. + ReplacementTemplateData *string `type:"string"` +} + +// String returns the string representation +func (s BulkEmailDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BulkEmailDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BulkEmailDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BulkEmailDestination"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.ReplacementTags != nil { + for i, v := range s.ReplacementTags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplacementTags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *BulkEmailDestination) SetDestination(v *Destination) *BulkEmailDestination { + s.Destination = v + return s +} + +// SetReplacementTags sets the ReplacementTags field's value. +func (s *BulkEmailDestination) SetReplacementTags(v []*MessageTag) *BulkEmailDestination { + s.ReplacementTags = v + return s +} + +// SetReplacementTemplateData sets the ReplacementTemplateData field's value. +func (s *BulkEmailDestination) SetReplacementTemplateData(v string) *BulkEmailDestination { + s.ReplacementTemplateData = &v + return s +} + +// An object that contains the response from the SendBulkTemplatedEmail operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/BulkEmailDestinationStatus +type BulkEmailDestinationStatus struct { + _ struct{} `type:"structure"` + + // A description of an error that prevented a message being sent using the SendBulkTemplatedEmail + // operation. + Error *string `type:"string"` + + // The unique message identifier returned from the SendBulkTemplatedEmail operation. + MessageId *string `type:"string"` + + // The status of a message sent using the SendBulkTemplatedEmail operation. + // + // Possible values for this parameter include: + // + // * Success: Amazon SES accepted the message, and will attempt to deliver + // it to the recipients. + // + // * MessageRejected: The message was rejected because it contained a virus. + // + // * MailFromDomainNotVerified: The sender's email address or domain was + // not verified. + // + // * ConfigurationSetDoesNotExist: The configuration set you specified does + // not exist. + // + // * TemplateDoesNotExist: The template you specified does not exist. + // + // * AccountSuspended: Your account has been shut down because of issues + // related to your email sending practices. + // + // * AccountThrottled: The number of emails you can send has been reduced + // because your account has exceeded its allocated sending limit. + // + // * AccountDailyQuotaExceeded: You have reached or exceeded the maximum + // number of emails you can send from your account in a 24-hour period. + // + // * InvalidSendingPoolName: The configuration set you specified refers to + // an IP pool that does not exist. + // + // * InvalidParameterValue: One or more of the parameters you specified when + // calling this operation was invalid. See the error message for additional + // information. + // + // * TransientFailure: Amazon SES was unable to process your request because + // of a temporary issue. + // + // * Failed: Amazon SES was unable to process your request. See the error + // message for additional information. + Status *string `type:"string" enum:"BulkEmailStatus"` +} + +// String returns the string representation +func (s BulkEmailDestinationStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BulkEmailDestinationStatus) GoString() string { + return s.String() +} + +// SetError sets the Error field's value. +func (s *BulkEmailDestinationStatus) SetError(v string) *BulkEmailDestinationStatus { + s.Error = &v + return s +} + +// SetMessageId sets the MessageId field's value. +func (s *BulkEmailDestinationStatus) SetMessageId(v string) *BulkEmailDestinationStatus { + s.MessageId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *BulkEmailDestinationStatus) SetStatus(v string) *BulkEmailDestinationStatus { + s.Status = &v + return s +} + // Represents a request to create a receipt rule set by cloning an existing // one. You use receipt rule sets to receive email with Amazon SES. For more // information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). @@ -5751,6 +6655,68 @@ func (s CreateReceiptRuleSetOutput) GoString() string { return s.String() } +// Represents a request to create an email template. For more information, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html). +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateTemplateRequest +type CreateTemplateInput struct { + _ struct{} `type:"structure"` + + // The content of the email, composed of a subject line, an HTML part, and a + // text-only part. + // + // Template is a required field + Template *Template `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTemplateInput"} + if s.Template == nil { + invalidParams.Add(request.NewErrParamRequired("Template")) + } + if s.Template != nil { + if err := s.Template.Validate(); err != nil { + invalidParams.AddNested("Template", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTemplate sets the Template field's value. +func (s *CreateTemplateInput) SetTemplate(v *Template) *CreateTemplateInput { + s.Template = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/CreateTemplateResponse +type CreateTemplateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTemplateOutput) GoString() string { + return s.String() +} + // Represents a request to delete a configuration set event destination. Configuration // set event destinations are associated with configuration sets, which enable // you to publish email sending events. For information about using configuration @@ -6265,6 +7231,62 @@ func (s DeleteReceiptRuleSetOutput) GoString() string { return s.String() } +// Represents a request to delete an email template. For more information, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html). +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteTemplateRequest +type DeleteTemplateInput struct { + _ struct{} `type:"structure"` + + // The name of the template to be deleted. + // + // TemplateName is a required field + TemplateName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTemplateInput"} + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTemplateName sets the TemplateName field's value. +func (s *DeleteTemplateInput) SetTemplateName(v string) *DeleteTemplateInput { + s.TemplateName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteTemplateResponse +type DeleteTemplateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTemplateOutput) GoString() string { + return s.String() +} + // Represents a request to delete an email address from the list of email addresses // you have attempted to verify under your AWS account. // Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/DeleteVerifiedEmailAddressRequest @@ -7350,21 +8372,85 @@ func (s *GetSendStatisticsOutput) SetSendDataPoints(v []*SendDataPoint) *GetSend return s } -// Represents the DKIM attributes of a verified email address or a domain. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/IdentityDkimAttributes -type IdentityDkimAttributes struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/GetTemplateRequest +type GetTemplateInput struct { _ struct{} `type:"structure"` - // True if DKIM signing is enabled for email sent from the identity; false otherwise. - // The default value is true. + // The name of the template you want to retrieve. // - // DkimEnabled is a required field - DkimEnabled *bool `type:"boolean" required:"true"` + // TemplateName is a required field + TemplateName *string `type:"string" required:"true"` +} - // A set of character strings that represent the domain's identity. Using these - // tokens, you will need to create DNS CNAME records that point to DKIM public - // keys hosted by Amazon SES. Amazon Web Services will eventually detect that - // you have updated your DNS records; this detection process may take up to +// String returns the string representation +func (s GetTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTemplateInput"} + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTemplateName sets the TemplateName field's value. +func (s *GetTemplateInput) SetTemplateName(v string) *GetTemplateInput { + s.TemplateName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/GetTemplateResponse +type GetTemplateOutput struct { + _ struct{} `type:"structure"` + + // The content of the email, composed of a subject line, an HTML part, and a + // text-only part. + Template *Template `type:"structure"` +} + +// String returns the string representation +func (s GetTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTemplateOutput) GoString() string { + return s.String() +} + +// SetTemplate sets the Template field's value. +func (s *GetTemplateOutput) SetTemplate(v *Template) *GetTemplateOutput { + s.Template = v + return s +} + +// Represents the DKIM attributes of a verified email address or a domain. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/IdentityDkimAttributes +type IdentityDkimAttributes struct { + _ struct{} `type:"structure"` + + // True if DKIM signing is enabled for email sent from the identity; false otherwise. + // The default value is true. + // + // DkimEnabled is a required field + DkimEnabled *bool `type:"boolean" required:"true"` + + // A set of character strings that represent the domain's identity. Using these + // tokens, you will need to create DNS CNAME records that point to DKIM public + // keys hosted by Amazon SES. Amazon Web Services will eventually detect that + // you have updated your DNS records; this detection process may take up to // 72 hours. Upon successful detection, Amazon SES will be able to DKIM-sign // email originating from that domain. (This only applies to domain identities, // not email address identities.) @@ -8099,6 +9185,76 @@ func (s *ListReceiptRuleSetsOutput) SetRuleSets(v []*ReceiptRuleSetMetadata) *Li return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/ListTemplatesRequest +type ListTemplatesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of templates to return. This value must be at least 1 + // and less than or equal to 10. If you do not specify a value, or if you specify + // a value less than 1 or greater than 10, the operation will return up to 10 + // results. + MaxItems *int64 `type:"integer"` + + // The token to use for pagination. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListTemplatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTemplatesInput) GoString() string { + return s.String() +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListTemplatesInput) SetMaxItems(v int64) *ListTemplatesInput { + s.MaxItems = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTemplatesInput) SetNextToken(v string) *ListTemplatesInput { + s.NextToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/ListTemplatesResponse +type ListTemplatesOutput struct { + _ struct{} `type:"structure"` + + // The token to use for pagination. + NextToken *string `type:"string"` + + // An array the contains the name of creation time stamp for each template in + // your Amazon SES account. + TemplatesMetadata []*TemplateMetadata `type:"list"` +} + +// String returns the string representation +func (s ListTemplatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTemplatesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTemplatesOutput) SetNextToken(v string) *ListTemplatesOutput { + s.NextToken = &v + return s +} + +// SetTemplatesMetadata sets the TemplatesMetadata field's value. +func (s *ListTemplatesOutput) SetTemplatesMetadata(v []*TemplateMetadata) *ListTemplatesOutput { + s.TemplatesMetadata = v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/ListVerifiedEmailAddressesInput type ListVerifiedEmailAddressesInput struct { _ struct{} `type:"structure"` @@ -9523,6 +10679,242 @@ func (s *SendBounceOutput) SetMessageId(v string) *SendBounceOutput { return s } +// Represents a request to send a templated email to multiple destinations using +// Amazon SES. For more information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html). +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendBulkTemplatedEmailRequest +type SendBulkTemplatedEmailInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set to use when you send an email using SendBulkTemplatedEmail. + ConfigurationSetName *string `type:"string"` + + // A list of tags, in the form of name/value pairs, to apply to an email that + // you send to a destination using SendBulkTemplatedEmail. + DefaultTags []*MessageTag `type:"list"` + + // A list of replacement values to apply to the template when replacement data + // is not specified in a Destination object. These values act as a default or + // fallback option when no other data is available. + // + // The template data is a JSON object, typically consisting of key-value pairs + // in which the keys correspond to replacement tags in the email template. + DefaultTemplateData *string `type:"string"` + + // One or more Destination objects. All of the recipients in a Destination will + // receive the same version of the email. You can specify up to 50 Destination + // objects within a Destinations array. + // + // Destinations is a required field + Destinations []*BulkEmailDestination `type:"list" required:"true"` + + // The reply-to email address(es) for the message. If the recipient replies + // to the message, each reply-to address will receive the reply. + ReplyToAddresses []*string `type:"list"` + + // The email address that bounces and complaints will be forwarded to when feedback + // forwarding is enabled. If the message cannot be delivered to the recipient, + // then an error message will be returned from the recipient's ISP; this message + // will then be forwarded to the email address specified by the ReturnPath parameter. + // The ReturnPath parameter is never overwritten. This email address must be + // either individually verified with Amazon SES, or from a domain that has been + // verified with Amazon SES. + ReturnPath *string `type:"string"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to use the email address specified in the ReturnPath parameter. + // + // For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) + // attaches a policy to it that authorizes you to use feedback@example.com, + // then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, + // and the ReturnPath to be feedback@example.com. + // + // For more information about sending authorization, see the Amazon SES Developer + // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + ReturnPathArn *string `type:"string"` + + // The email address that is sending the email. This email address must be either + // individually verified with Amazon SES, or from a domain that has been verified + // with Amazon SES. For information about verifying identities, see the Amazon + // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). + // + // If you are sending on behalf of another user and have been permitted to do + // so by a sending authorization policy, then you must also specify the SourceArn + // parameter. For more information about sending authorization, see the Amazon + // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + // + // In all cases, the email address must be 7-bit ASCII. If the text must contain + // any other characters, then you must use MIME encoded-word syntax (RFC 2047) + // instead of a literal string. MIME encoded-word syntax uses the following + // form: =?charset?encoding?encoded-text?=. For more information, see RFC 2047 + // (https://tools.ietf.org/html/rfc2047). + // + // Source is a required field + Source *string `type:"string" required:"true"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to send for the email address specified in the Source parameter. + // + // For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) + // attaches a policy to it that authorizes you to send from user@example.com, + // then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, + // and the Source to be user@example.com. + // + // For more information about sending authorization, see the Amazon SES Developer + // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + SourceArn *string `type:"string"` + + // The template to use when sending this email. + // + // Template is a required field + Template *string `type:"string" required:"true"` + + // The ARN of the template to use when sending this email. + TemplateArn *string `type:"string"` +} + +// String returns the string representation +func (s SendBulkTemplatedEmailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendBulkTemplatedEmailInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendBulkTemplatedEmailInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendBulkTemplatedEmailInput"} + if s.Destinations == nil { + invalidParams.Add(request.NewErrParamRequired("Destinations")) + } + if s.Source == nil { + invalidParams.Add(request.NewErrParamRequired("Source")) + } + if s.Template == nil { + invalidParams.Add(request.NewErrParamRequired("Template")) + } + if s.DefaultTags != nil { + for i, v := range s.DefaultTags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DefaultTags", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Destinations != nil { + for i, v := range s.Destinations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Destinations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigurationSetName sets the ConfigurationSetName field's value. +func (s *SendBulkTemplatedEmailInput) SetConfigurationSetName(v string) *SendBulkTemplatedEmailInput { + s.ConfigurationSetName = &v + return s +} + +// SetDefaultTags sets the DefaultTags field's value. +func (s *SendBulkTemplatedEmailInput) SetDefaultTags(v []*MessageTag) *SendBulkTemplatedEmailInput { + s.DefaultTags = v + return s +} + +// SetDefaultTemplateData sets the DefaultTemplateData field's value. +func (s *SendBulkTemplatedEmailInput) SetDefaultTemplateData(v string) *SendBulkTemplatedEmailInput { + s.DefaultTemplateData = &v + return s +} + +// SetDestinations sets the Destinations field's value. +func (s *SendBulkTemplatedEmailInput) SetDestinations(v []*BulkEmailDestination) *SendBulkTemplatedEmailInput { + s.Destinations = v + return s +} + +// SetReplyToAddresses sets the ReplyToAddresses field's value. +func (s *SendBulkTemplatedEmailInput) SetReplyToAddresses(v []*string) *SendBulkTemplatedEmailInput { + s.ReplyToAddresses = v + return s +} + +// SetReturnPath sets the ReturnPath field's value. +func (s *SendBulkTemplatedEmailInput) SetReturnPath(v string) *SendBulkTemplatedEmailInput { + s.ReturnPath = &v + return s +} + +// SetReturnPathArn sets the ReturnPathArn field's value. +func (s *SendBulkTemplatedEmailInput) SetReturnPathArn(v string) *SendBulkTemplatedEmailInput { + s.ReturnPathArn = &v + return s +} + +// SetSource sets the Source field's value. +func (s *SendBulkTemplatedEmailInput) SetSource(v string) *SendBulkTemplatedEmailInput { + s.Source = &v + return s +} + +// SetSourceArn sets the SourceArn field's value. +func (s *SendBulkTemplatedEmailInput) SetSourceArn(v string) *SendBulkTemplatedEmailInput { + s.SourceArn = &v + return s +} + +// SetTemplate sets the Template field's value. +func (s *SendBulkTemplatedEmailInput) SetTemplate(v string) *SendBulkTemplatedEmailInput { + s.Template = &v + return s +} + +// SetTemplateArn sets the TemplateArn field's value. +func (s *SendBulkTemplatedEmailInput) SetTemplateArn(v string) *SendBulkTemplatedEmailInput { + s.TemplateArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendBulkTemplatedEmailResponse +type SendBulkTemplatedEmailOutput struct { + _ struct{} `type:"structure"` + + // The unique message identifier returned from the SendBulkTemplatedEmail action. + // + // Status is a required field + Status []*BulkEmailDestinationStatus `type:"list" required:"true"` +} + +// String returns the string representation +func (s SendBulkTemplatedEmailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendBulkTemplatedEmailOutput) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *SendBulkTemplatedEmailOutput) SetStatus(v []*BulkEmailDestinationStatus) *SendBulkTemplatedEmailOutput { + s.Status = v + return s +} + // Represents sending statistics data. Each SendDataPoint contains statistics // for a 15-minute period of sending activity. // Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendDataPoint @@ -9856,20 +11248,217 @@ type SendRawEmailInput struct { // SendRawEmail in this guide, or see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks-email.html). ReturnPathArn *string `type:"string"` - // The identity's email address. If you do not provide a value for this parameter, - // you must specify a "From" address in the raw text of the message. (You can - // also specify both.) + // The identity's email address. If you do not provide a value for this parameter, + // you must specify a "From" address in the raw text of the message. (You can + // also specify both.) + // + // By default, the string must be 7-bit ASCII. If the text must contain any + // other characters, then you must use MIME encoded-word syntax (RFC 2047) instead + // of a literal string. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?=. + // For more information, see RFC 2047 (https://tools.ietf.org/html/rfc2047). + // + // If you specify the Source parameter and have feedback forwarding enabled, + // then bounces and complaints will be sent to this email address. This takes + // precedence over any Return-Path header that you might include in the raw + // text of the message. + Source *string `type:"string"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to send for the email address specified in the Source parameter. + // + // For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) + // attaches a policy to it that authorizes you to send from user@example.com, + // then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, + // and the Source to be user@example.com. + // + // Instead of using this parameter, you can use the X-header X-SES-SOURCE-ARN + // in the raw message of the email. If you use both the SourceArn parameter + // and the corresponding X-header, Amazon SES uses the value of the SourceArn + // parameter. + // + // For information about when to use this parameter, see the description of + // SendRawEmail in this guide, or see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks-email.html). + SourceArn *string `type:"string"` + + // A list of tags, in the form of name/value pairs, to apply to an email that + // you send using SendRawEmail. Tags correspond to characteristics of the email + // that you define, so that you can publish email sending events. + Tags []*MessageTag `type:"list"` +} + +// String returns the string representation +func (s SendRawEmailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendRawEmailInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendRawEmailInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendRawEmailInput"} + if s.RawMessage == nil { + invalidParams.Add(request.NewErrParamRequired("RawMessage")) + } + if s.RawMessage != nil { + if err := s.RawMessage.Validate(); err != nil { + invalidParams.AddNested("RawMessage", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfigurationSetName sets the ConfigurationSetName field's value. +func (s *SendRawEmailInput) SetConfigurationSetName(v string) *SendRawEmailInput { + s.ConfigurationSetName = &v + return s +} + +// SetDestinations sets the Destinations field's value. +func (s *SendRawEmailInput) SetDestinations(v []*string) *SendRawEmailInput { + s.Destinations = v + return s +} + +// SetFromArn sets the FromArn field's value. +func (s *SendRawEmailInput) SetFromArn(v string) *SendRawEmailInput { + s.FromArn = &v + return s +} + +// SetRawMessage sets the RawMessage field's value. +func (s *SendRawEmailInput) SetRawMessage(v *RawMessage) *SendRawEmailInput { + s.RawMessage = v + return s +} + +// SetReturnPathArn sets the ReturnPathArn field's value. +func (s *SendRawEmailInput) SetReturnPathArn(v string) *SendRawEmailInput { + s.ReturnPathArn = &v + return s +} + +// SetSource sets the Source field's value. +func (s *SendRawEmailInput) SetSource(v string) *SendRawEmailInput { + s.Source = &v + return s +} + +// SetSourceArn sets the SourceArn field's value. +func (s *SendRawEmailInput) SetSourceArn(v string) *SendRawEmailInput { + s.SourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *SendRawEmailInput) SetTags(v []*MessageTag) *SendRawEmailInput { + s.Tags = v + return s +} + +// Represents a unique message ID. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendRawEmailResponse +type SendRawEmailOutput struct { + _ struct{} `type:"structure"` + + // The unique message identifier returned from the SendRawEmail action. + // + // MessageId is a required field + MessageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendRawEmailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendRawEmailOutput) GoString() string { + return s.String() +} + +// SetMessageId sets the MessageId field's value. +func (s *SendRawEmailOutput) SetMessageId(v string) *SendRawEmailOutput { + s.MessageId = &v + return s +} + +// Represents a request to send a templated email using Amazon SES. For more +// information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-personalized-email-api.html). +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendTemplatedEmailRequest +type SendTemplatedEmailInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set to use when you send an email using SendTemplatedEmail. + ConfigurationSetName *string `type:"string"` + + // The destination for this email, composed of To:, CC:, and BCC: fields. A + // Destination can include up to 50 recipients across these three fields. + // + // Destination is a required field + Destination *Destination `type:"structure" required:"true"` + + // The reply-to email address(es) for the message. If the recipient replies + // to the message, each reply-to address will receive the reply. + ReplyToAddresses []*string `type:"list"` + + // The email address that bounces and complaints will be forwarded to when feedback + // forwarding is enabled. If the message cannot be delivered to the recipient, + // then an error message will be returned from the recipient's ISP; this message + // will then be forwarded to the email address specified by the ReturnPath parameter. + // The ReturnPath parameter is never overwritten. This email address must be + // either individually verified with Amazon SES, or from a domain that has been + // verified with Amazon SES. + ReturnPath *string `type:"string"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to use the email address specified in the ReturnPath parameter. + // + // For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) + // attaches a policy to it that authorizes you to use feedback@example.com, + // then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, + // and the ReturnPath to be feedback@example.com. + // + // For more information about sending authorization, see the Amazon SES Developer + // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + ReturnPathArn *string `type:"string"` + + // The email address that is sending the email. This email address must be either + // individually verified with Amazon SES, or from a domain that has been verified + // with Amazon SES. For information about verifying identities, see the Amazon + // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). // - // By default, the string must be 7-bit ASCII. If the text must contain any - // other characters, then you must use MIME encoded-word syntax (RFC 2047) instead - // of a literal string. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?=. - // For more information, see RFC 2047 (https://tools.ietf.org/html/rfc2047). + // If you are sending on behalf of another user and have been permitted to do + // so by a sending authorization policy, then you must also specify the SourceArn + // parameter. For more information about sending authorization, see the Amazon + // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). // - // If you specify the Source parameter and have feedback forwarding enabled, - // then bounces and complaints will be sent to this email address. This takes - // precedence over any Return-Path header that you might include in the raw - // text of the message. - Source *string `type:"string"` + // In all cases, the email address must be 7-bit ASCII. If the text must contain + // any other characters, then you must use MIME encoded-word syntax (RFC 2047) + // instead of a literal string. MIME encoded-word syntax uses the following + // form: =?charset?encoding?encoded-text?=. For more information, see RFC 2047 + // (https://tools.ietf.org/html/rfc2047). + // + // Source is a required field + Source *string `type:"string" required:"true"` // This parameter is used only for sending authorization. It is the ARN of the // identity that is associated with the sending authorization policy that permits @@ -9880,41 +11469,55 @@ type SendRawEmailInput struct { // then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, // and the Source to be user@example.com. // - // Instead of using this parameter, you can use the X-header X-SES-SOURCE-ARN - // in the raw message of the email. If you use both the SourceArn parameter - // and the corresponding X-header, Amazon SES uses the value of the SourceArn - // parameter. - // - // For information about when to use this parameter, see the description of - // SendRawEmail in this guide, or see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks-email.html). + // For more information about sending authorization, see the Amazon SES Developer + // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). SourceArn *string `type:"string"` // A list of tags, in the form of name/value pairs, to apply to an email that - // you send using SendRawEmail. Tags correspond to characteristics of the email - // that you define, so that you can publish email sending events. + // you send using SendTemplatedEmail. Tags correspond to characteristics of + // the email that you define, so that you can publish email sending events. Tags []*MessageTag `type:"list"` + + // The template to use when sending this email. + // + // Template is a required field + Template *string `type:"string" required:"true"` + + // The ARN of the template to use when sending this email. + TemplateArn *string `type:"string"` + + // A list of replacement values to apply to the template. This parameter is + // a JSON object, typically consisting of key-value pairs in which the keys + // correspond to replacement tags in the email template. + // + // TemplateData is a required field + TemplateData *string `type:"string" required:"true"` } // String returns the string representation -func (s SendRawEmailInput) String() string { +func (s SendTemplatedEmailInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SendRawEmailInput) GoString() string { +func (s SendTemplatedEmailInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *SendRawEmailInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SendRawEmailInput"} - if s.RawMessage == nil { - invalidParams.Add(request.NewErrParamRequired("RawMessage")) +func (s *SendTemplatedEmailInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendTemplatedEmailInput"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) } - if s.RawMessage != nil { - if err := s.RawMessage.Validate(); err != nil { - invalidParams.AddNested("RawMessage", err.(request.ErrInvalidParams)) - } + if s.Source == nil { + invalidParams.Add(request.NewErrParamRequired("Source")) + } + if s.Template == nil { + invalidParams.Add(request.NewErrParamRequired("Template")) + } + if s.TemplateData == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateData")) } if s.Tags != nil { for i, v := range s.Tags { @@ -9934,76 +11537,93 @@ func (s *SendRawEmailInput) Validate() error { } // SetConfigurationSetName sets the ConfigurationSetName field's value. -func (s *SendRawEmailInput) SetConfigurationSetName(v string) *SendRawEmailInput { +func (s *SendTemplatedEmailInput) SetConfigurationSetName(v string) *SendTemplatedEmailInput { s.ConfigurationSetName = &v return s } -// SetDestinations sets the Destinations field's value. -func (s *SendRawEmailInput) SetDestinations(v []*string) *SendRawEmailInput { - s.Destinations = v +// SetDestination sets the Destination field's value. +func (s *SendTemplatedEmailInput) SetDestination(v *Destination) *SendTemplatedEmailInput { + s.Destination = v return s } -// SetFromArn sets the FromArn field's value. -func (s *SendRawEmailInput) SetFromArn(v string) *SendRawEmailInput { - s.FromArn = &v +// SetReplyToAddresses sets the ReplyToAddresses field's value. +func (s *SendTemplatedEmailInput) SetReplyToAddresses(v []*string) *SendTemplatedEmailInput { + s.ReplyToAddresses = v return s } -// SetRawMessage sets the RawMessage field's value. -func (s *SendRawEmailInput) SetRawMessage(v *RawMessage) *SendRawEmailInput { - s.RawMessage = v +// SetReturnPath sets the ReturnPath field's value. +func (s *SendTemplatedEmailInput) SetReturnPath(v string) *SendTemplatedEmailInput { + s.ReturnPath = &v return s } // SetReturnPathArn sets the ReturnPathArn field's value. -func (s *SendRawEmailInput) SetReturnPathArn(v string) *SendRawEmailInput { +func (s *SendTemplatedEmailInput) SetReturnPathArn(v string) *SendTemplatedEmailInput { s.ReturnPathArn = &v return s } // SetSource sets the Source field's value. -func (s *SendRawEmailInput) SetSource(v string) *SendRawEmailInput { +func (s *SendTemplatedEmailInput) SetSource(v string) *SendTemplatedEmailInput { s.Source = &v return s } // SetSourceArn sets the SourceArn field's value. -func (s *SendRawEmailInput) SetSourceArn(v string) *SendRawEmailInput { +func (s *SendTemplatedEmailInput) SetSourceArn(v string) *SendTemplatedEmailInput { s.SourceArn = &v return s } // SetTags sets the Tags field's value. -func (s *SendRawEmailInput) SetTags(v []*MessageTag) *SendRawEmailInput { +func (s *SendTemplatedEmailInput) SetTags(v []*MessageTag) *SendTemplatedEmailInput { s.Tags = v return s } -// Represents a unique message ID. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendRawEmailResponse -type SendRawEmailOutput struct { +// SetTemplate sets the Template field's value. +func (s *SendTemplatedEmailInput) SetTemplate(v string) *SendTemplatedEmailInput { + s.Template = &v + return s +} + +// SetTemplateArn sets the TemplateArn field's value. +func (s *SendTemplatedEmailInput) SetTemplateArn(v string) *SendTemplatedEmailInput { + s.TemplateArn = &v + return s +} + +// SetTemplateData sets the TemplateData field's value. +func (s *SendTemplatedEmailInput) SetTemplateData(v string) *SendTemplatedEmailInput { + s.TemplateData = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/SendTemplatedEmailResponse +type SendTemplatedEmailOutput struct { _ struct{} `type:"structure"` - // The unique message identifier returned from the SendRawEmail action. + // The unique message identifier returned from the SendTemplatedEmail action. // // MessageId is a required field MessageId *string `type:"string" required:"true"` } // String returns the string representation -func (s SendRawEmailOutput) String() string { +func (s SendTemplatedEmailOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SendRawEmailOutput) GoString() string { +func (s SendTemplatedEmailOutput) GoString() string { return s.String() } // SetMessageId sets the MessageId field's value. -func (s *SendRawEmailOutput) SetMessageId(v string) *SendRawEmailOutput { +func (s *SendTemplatedEmailOutput) SetMessageId(v string) *SendTemplatedEmailOutput { s.MessageId = &v return s } @@ -10611,6 +12231,190 @@ func (s *StopAction) SetTopicArn(v string) *StopAction { return s } +// The content of the email, composed of a subject line, an HTML part, and a +// text-only part. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/Template +type Template struct { + _ struct{} `type:"structure"` + + // The HTML body of the email. + HtmlPart *string `type:"string"` + + // The subject line of the email. + SubjectPart *string `type:"string"` + + // The name of the template. You will refer to this name when you send email + // using the SendTemplatedEmail or SendBulkTemplatedEmail operations. + // + // TemplateName is a required field + TemplateName *string `type:"string" required:"true"` + + // The email body that will be visible to recipients whose email clients do + // not display HTML. + TextPart *string `type:"string"` +} + +// String returns the string representation +func (s Template) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Template) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Template) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Template"} + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHtmlPart sets the HtmlPart field's value. +func (s *Template) SetHtmlPart(v string) *Template { + s.HtmlPart = &v + return s +} + +// SetSubjectPart sets the SubjectPart field's value. +func (s *Template) SetSubjectPart(v string) *Template { + s.SubjectPart = &v + return s +} + +// SetTemplateName sets the TemplateName field's value. +func (s *Template) SetTemplateName(v string) *Template { + s.TemplateName = &v + return s +} + +// SetTextPart sets the TextPart field's value. +func (s *Template) SetTextPart(v string) *Template { + s.TextPart = &v + return s +} + +// Information about an email template. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/TemplateMetadata +type TemplateMetadata struct { + _ struct{} `type:"structure"` + + // The time and date the template was created. + CreatedTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The name of the template. + Name *string `type:"string"` +} + +// String returns the string representation +func (s TemplateMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TemplateMetadata) GoString() string { + return s.String() +} + +// SetCreatedTimestamp sets the CreatedTimestamp field's value. +func (s *TemplateMetadata) SetCreatedTimestamp(v time.Time) *TemplateMetadata { + s.CreatedTimestamp = &v + return s +} + +// SetName sets the Name field's value. +func (s *TemplateMetadata) SetName(v string) *TemplateMetadata { + s.Name = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/TestRenderTemplateRequest +type TestRenderTemplateInput struct { + _ struct{} `type:"structure"` + + // A list of replacement values to apply to the template. This parameter is + // a JSON object, typically consisting of key-value pairs in which the keys + // correspond to replacement tags in the email template. + // + // TemplateData is a required field + TemplateData *string `type:"string" required:"true"` + + // The name of the template that you want to render. + // + // TemplateName is a required field + TemplateName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s TestRenderTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestRenderTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TestRenderTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TestRenderTemplateInput"} + if s.TemplateData == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateData")) + } + if s.TemplateName == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTemplateData sets the TemplateData field's value. +func (s *TestRenderTemplateInput) SetTemplateData(v string) *TestRenderTemplateInput { + s.TemplateData = &v + return s +} + +// SetTemplateName sets the TemplateName field's value. +func (s *TestRenderTemplateInput) SetTemplateName(v string) *TestRenderTemplateInput { + s.TemplateName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/TestRenderTemplateResponse +type TestRenderTemplateOutput struct { + _ struct{} `type:"structure"` + + // The complete MIME message rendered by applying the data in the TemplateData + // parameter to the template specified in the TemplateName parameter. + RenderedTemplate *string `type:"string"` +} + +// String returns the string representation +func (s TestRenderTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TestRenderTemplateOutput) GoString() string { + return s.String() +} + +// SetRenderedTemplate sets the RenderedTemplate field's value. +func (s *TestRenderTemplateOutput) SetRenderedTemplate(v string) *TestRenderTemplateOutput { + s.RenderedTemplate = &v + return s +} + // A domain that is used to redirect email recipients to an Amazon SES-operated // domain. This domain captures open and click events generated by Amazon SES // emails. @@ -10876,6 +12680,66 @@ func (s UpdateReceiptRuleOutput) GoString() string { return s.String() } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/UpdateTemplateRequest +type UpdateTemplateInput struct { + _ struct{} `type:"structure"` + + // The content of the email, composed of a subject line, an HTML part, and a + // text-only part. + // + // Template is a required field + Template *Template `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateTemplateInput"} + if s.Template == nil { + invalidParams.Add(request.NewErrParamRequired("Template")) + } + if s.Template != nil { + if err := s.Template.Validate(); err != nil { + invalidParams.AddNested("Template", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTemplate sets the Template field's value. +func (s *UpdateTemplateInput) SetTemplate(v *Template) *UpdateTemplateInput { + s.Template = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/email-2010-12-01/UpdateTemplateResponse +type UpdateTemplateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTemplateOutput) GoString() string { + return s.String() +} + // Represents a request to generate the CNAME records needed to set up Easy // DKIM with Amazon SES. For more information about setting up Easy DKIM, see // the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). @@ -11239,6 +13103,44 @@ const ( BounceTypeTemporaryFailure = "TemporaryFailure" ) +const ( + // BulkEmailStatusSuccess is a BulkEmailStatus enum value + BulkEmailStatusSuccess = "Success" + + // BulkEmailStatusMessageRejected is a BulkEmailStatus enum value + BulkEmailStatusMessageRejected = "MessageRejected" + + // BulkEmailStatusMailFromDomainNotVerified is a BulkEmailStatus enum value + BulkEmailStatusMailFromDomainNotVerified = "MailFromDomainNotVerified" + + // BulkEmailStatusConfigurationSetDoesNotExist is a BulkEmailStatus enum value + BulkEmailStatusConfigurationSetDoesNotExist = "ConfigurationSetDoesNotExist" + + // BulkEmailStatusTemplateDoesNotExist is a BulkEmailStatus enum value + BulkEmailStatusTemplateDoesNotExist = "TemplateDoesNotExist" + + // BulkEmailStatusAccountSuspended is a BulkEmailStatus enum value + BulkEmailStatusAccountSuspended = "AccountSuspended" + + // BulkEmailStatusAccountThrottled is a BulkEmailStatus enum value + BulkEmailStatusAccountThrottled = "AccountThrottled" + + // BulkEmailStatusAccountDailyQuotaExceeded is a BulkEmailStatus enum value + BulkEmailStatusAccountDailyQuotaExceeded = "AccountDailyQuotaExceeded" + + // BulkEmailStatusInvalidSendingPoolName is a BulkEmailStatus enum value + BulkEmailStatusInvalidSendingPoolName = "InvalidSendingPoolName" + + // BulkEmailStatusInvalidParameterValue is a BulkEmailStatus enum value + BulkEmailStatusInvalidParameterValue = "InvalidParameterValue" + + // BulkEmailStatusTransientFailure is a BulkEmailStatus enum value + BulkEmailStatusTransientFailure = "TransientFailure" + + // BulkEmailStatusFailed is a BulkEmailStatus enum value + BulkEmailStatusFailed = "Failed" +) + const ( // ConfigurationSetAttributeEventDestinations is a ConfigurationSetAttribute enum value ConfigurationSetAttributeEventDestinations = "eventDestinations" @@ -11310,6 +13212,9 @@ const ( // EventTypeClick is a EventType enum value EventTypeClick = "click" + + // EventTypeRenderingFailure is a EventType enum value + EventTypeRenderingFailure = "renderingFailure" ) const ( diff --git a/vendor/github.com/aws/aws-sdk-go/service/ses/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ses/errors.go index 3cdc428bd42..a30d70f5105 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ses/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ses/errors.go @@ -79,6 +79,13 @@ const ( // more information about what caused the error. ErrCodeInvalidPolicyException = "InvalidPolicy" + // ErrCodeInvalidRenderingParameterException for service response error code + // "InvalidRenderingParameter". + // + // Indicates that one or more of the replacement values you provided is invalid. + // This error may occur when the TemplateData object contains invalid JSON. + ErrCodeInvalidRenderingParameterException = "InvalidRenderingParameter" + // ErrCodeInvalidS3ConfigurationException for service response error code // "InvalidS3Configuration". // @@ -103,6 +110,13 @@ const ( // about giving permissions, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). ErrCodeInvalidSnsTopicException = "InvalidSnsTopic" + // ErrCodeInvalidTemplateException for service response error code + // "InvalidTemplate". + // + // Indicates that a template could not be created because it contained invalid + // JSON. + ErrCodeInvalidTemplateException = "InvalidTemplate" + // ErrCodeInvalidTrackingOptionsException for service response error code // "InvalidTrackingOptions". // @@ -137,6 +151,14 @@ const ( // the error stack for more information about what caused the error. ErrCodeMessageRejected = "MessageRejected" + // ErrCodeMissingRenderingAttributeException for service response error code + // "MissingRenderingAttribute". + // + // Indicates that one or more of the replacement values for the specified template + // was not specified. Ensure that the TemplateData object contains references + // to all of the replacement tags in the specified template. + ErrCodeMissingRenderingAttributeException = "MissingRenderingAttribute" + // ErrCodeRuleDoesNotExistException for service response error code // "RuleDoesNotExist". // @@ -149,6 +171,13 @@ const ( // Indicates that the provided receipt rule set does not exist. ErrCodeRuleSetDoesNotExistException = "RuleSetDoesNotExist" + // ErrCodeTemplateDoesNotExistException for service response error code + // "TemplateDoesNotExist". + // + // Indicates that the Template object you specified does not exist in your Amazon + // SES account. + ErrCodeTemplateDoesNotExistException = "TemplateDoesNotExist" + // ErrCodeTrackingOptionsAlreadyExistsException for service response error code // "TrackingOptionsAlreadyExistsException". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/shield/api.go b/vendor/github.com/aws/aws-sdk-go/service/shield/api.go new file mode 100644 index 00000000000..7446db223d7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/shield/api.go @@ -0,0 +1,1829 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package shield + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCreateProtection = "CreateProtection" + +// CreateProtectionRequest generates a "aws/request.Request" representing the +// client's request for the CreateProtection operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateProtection for more information on using the CreateProtection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateProtectionRequest method. +// req, resp := client.CreateProtectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/CreateProtection +func (c *Shield) CreateProtectionRequest(input *CreateProtectionInput) (req *request.Request, output *CreateProtectionOutput) { + op := &request.Operation{ + Name: opCreateProtection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateProtectionInput{} + } + + output = &CreateProtectionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateProtection API operation for AWS Shield. +// +// Enables AWS Shield Advanced for a specific AWS resource. The resource can +// be an Amazon CloudFront distribution, Elastic Load Balancing load balancer, +// or an Amazon Route 53 hosted zone. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Shield's +// API operation CreateProtection for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalErrorException "InternalErrorException" +// Exception that indicates that a problem occurred with the service infrastructure. +// You can retry the request. +// +// * ErrCodeInvalidResourceException "InvalidResourceException" +// Exception that indicates that the resource is invalid. You might not have +// access to the resource, or the resource might not exist. +// +// * ErrCodeInvalidOperationException "InvalidOperationException" +// Exception that indicates that the operation would not cause any change to +// occur. +// +// * ErrCodeLimitsExceededException "LimitsExceededException" +// Exception that indicates that the operation would exceed a limit. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// Exception indicating the specified resource already exists. +// +// * ErrCodeOptimisticLockException "OptimisticLockException" +// Exception that indicates that the protection state has been modified by another +// client. You can retry the request. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Exception indicating the specified resource does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/CreateProtection +func (c *Shield) CreateProtection(input *CreateProtectionInput) (*CreateProtectionOutput, error) { + req, out := c.CreateProtectionRequest(input) + return out, req.Send() +} + +// CreateProtectionWithContext is the same as CreateProtection with the addition of +// the ability to pass a context and additional request options. +// +// See CreateProtection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Shield) CreateProtectionWithContext(ctx aws.Context, input *CreateProtectionInput, opts ...request.Option) (*CreateProtectionOutput, error) { + req, out := c.CreateProtectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateSubscription = "CreateSubscription" + +// CreateSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the CreateSubscription operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateSubscription for more information on using the CreateSubscription +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateSubscriptionRequest method. +// req, resp := client.CreateSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/CreateSubscription +func (c *Shield) CreateSubscriptionRequest(input *CreateSubscriptionInput) (req *request.Request, output *CreateSubscriptionOutput) { + op := &request.Operation{ + Name: opCreateSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSubscriptionInput{} + } + + output = &CreateSubscriptionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateSubscription API operation for AWS Shield. +// +// Activates AWS Shield Advanced for an account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Shield's +// API operation CreateSubscription for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalErrorException "InternalErrorException" +// Exception that indicates that a problem occurred with the service infrastructure. +// You can retry the request. +// +// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" +// Exception indicating the specified resource already exists. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/CreateSubscription +func (c *Shield) CreateSubscription(input *CreateSubscriptionInput) (*CreateSubscriptionOutput, error) { + req, out := c.CreateSubscriptionRequest(input) + return out, req.Send() +} + +// CreateSubscriptionWithContext is the same as CreateSubscription with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSubscription for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Shield) CreateSubscriptionWithContext(ctx aws.Context, input *CreateSubscriptionInput, opts ...request.Option) (*CreateSubscriptionOutput, error) { + req, out := c.CreateSubscriptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteProtection = "DeleteProtection" + +// DeleteProtectionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteProtection operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteProtection for more information on using the DeleteProtection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteProtectionRequest method. +// req, resp := client.DeleteProtectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DeleteProtection +func (c *Shield) DeleteProtectionRequest(input *DeleteProtectionInput) (req *request.Request, output *DeleteProtectionOutput) { + op := &request.Operation{ + Name: opDeleteProtection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteProtectionInput{} + } + + output = &DeleteProtectionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteProtection API operation for AWS Shield. +// +// Deletes an AWS Shield Advanced Protection. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Shield's +// API operation DeleteProtection for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalErrorException "InternalErrorException" +// Exception that indicates that a problem occurred with the service infrastructure. +// You can retry the request. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Exception indicating the specified resource does not exist. +// +// * ErrCodeOptimisticLockException "OptimisticLockException" +// Exception that indicates that the protection state has been modified by another +// client. You can retry the request. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DeleteProtection +func (c *Shield) DeleteProtection(input *DeleteProtectionInput) (*DeleteProtectionOutput, error) { + req, out := c.DeleteProtectionRequest(input) + return out, req.Send() +} + +// DeleteProtectionWithContext is the same as DeleteProtection with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteProtection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Shield) DeleteProtectionWithContext(ctx aws.Context, input *DeleteProtectionInput, opts ...request.Option) (*DeleteProtectionOutput, error) { + req, out := c.DeleteProtectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteSubscription = "DeleteSubscription" + +// DeleteSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSubscription operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteSubscription for more information on using the DeleteSubscription +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteSubscriptionRequest method. +// req, resp := client.DeleteSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DeleteSubscription +func (c *Shield) DeleteSubscriptionRequest(input *DeleteSubscriptionInput) (req *request.Request, output *DeleteSubscriptionOutput) { + op := &request.Operation{ + Name: opDeleteSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSubscriptionInput{} + } + + output = &DeleteSubscriptionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteSubscription API operation for AWS Shield. +// +// Removes AWS Shield Advanced from an account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Shield's +// API operation DeleteSubscription for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalErrorException "InternalErrorException" +// Exception that indicates that a problem occurred with the service infrastructure. +// You can retry the request. +// +// * ErrCodeLockedSubscriptionException "LockedSubscriptionException" +// Exception that indicates that the subscription has been modified by another +// client. You can retry the request. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Exception indicating the specified resource does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DeleteSubscription +func (c *Shield) DeleteSubscription(input *DeleteSubscriptionInput) (*DeleteSubscriptionOutput, error) { + req, out := c.DeleteSubscriptionRequest(input) + return out, req.Send() +} + +// DeleteSubscriptionWithContext is the same as DeleteSubscription with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteSubscription for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Shield) DeleteSubscriptionWithContext(ctx aws.Context, input *DeleteSubscriptionInput, opts ...request.Option) (*DeleteSubscriptionOutput, error) { + req, out := c.DeleteSubscriptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeAttack = "DescribeAttack" + +// DescribeAttackRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAttack operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAttack for more information on using the DescribeAttack +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeAttackRequest method. +// req, resp := client.DescribeAttackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DescribeAttack +func (c *Shield) DescribeAttackRequest(input *DescribeAttackInput) (req *request.Request, output *DescribeAttackOutput) { + op := &request.Operation{ + Name: opDescribeAttack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAttackInput{} + } + + output = &DescribeAttackOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAttack API operation for AWS Shield. +// +// Describes the details of a DDoS attack. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Shield's +// API operation DescribeAttack for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalErrorException "InternalErrorException" +// Exception that indicates that a problem occurred with the service infrastructure. +// You can retry the request. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// Exception that indicates that the parameters passed to the API are invalid. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DescribeAttack +func (c *Shield) DescribeAttack(input *DescribeAttackInput) (*DescribeAttackOutput, error) { + req, out := c.DescribeAttackRequest(input) + return out, req.Send() +} + +// DescribeAttackWithContext is the same as DescribeAttack with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAttack for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Shield) DescribeAttackWithContext(ctx aws.Context, input *DescribeAttackInput, opts ...request.Option) (*DescribeAttackOutput, error) { + req, out := c.DescribeAttackRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeProtection = "DescribeProtection" + +// DescribeProtectionRequest generates a "aws/request.Request" representing the +// client's request for the DescribeProtection operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeProtection for more information on using the DescribeProtection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeProtectionRequest method. +// req, resp := client.DescribeProtectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DescribeProtection +func (c *Shield) DescribeProtectionRequest(input *DescribeProtectionInput) (req *request.Request, output *DescribeProtectionOutput) { + op := &request.Operation{ + Name: opDescribeProtection, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeProtectionInput{} + } + + output = &DescribeProtectionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeProtection API operation for AWS Shield. +// +// Lists the details of a Protection object. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Shield's +// API operation DescribeProtection for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalErrorException "InternalErrorException" +// Exception that indicates that a problem occurred with the service infrastructure. +// You can retry the request. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Exception indicating the specified resource does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DescribeProtection +func (c *Shield) DescribeProtection(input *DescribeProtectionInput) (*DescribeProtectionOutput, error) { + req, out := c.DescribeProtectionRequest(input) + return out, req.Send() +} + +// DescribeProtectionWithContext is the same as DescribeProtection with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeProtection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Shield) DescribeProtectionWithContext(ctx aws.Context, input *DescribeProtectionInput, opts ...request.Option) (*DescribeProtectionOutput, error) { + req, out := c.DescribeProtectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeSubscription = "DescribeSubscription" + +// DescribeSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSubscription operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSubscription for more information on using the DescribeSubscription +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSubscriptionRequest method. +// req, resp := client.DescribeSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DescribeSubscription +func (c *Shield) DescribeSubscriptionRequest(input *DescribeSubscriptionInput) (req *request.Request, output *DescribeSubscriptionOutput) { + op := &request.Operation{ + Name: opDescribeSubscription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSubscriptionInput{} + } + + output = &DescribeSubscriptionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSubscription API operation for AWS Shield. +// +// Provides details about the AWS Shield Advanced subscription for an account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Shield's +// API operation DescribeSubscription for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalErrorException "InternalErrorException" +// Exception that indicates that a problem occurred with the service infrastructure. +// You can retry the request. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Exception indicating the specified resource does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DescribeSubscription +func (c *Shield) DescribeSubscription(input *DescribeSubscriptionInput) (*DescribeSubscriptionOutput, error) { + req, out := c.DescribeSubscriptionRequest(input) + return out, req.Send() +} + +// DescribeSubscriptionWithContext is the same as DescribeSubscription with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSubscription for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Shield) DescribeSubscriptionWithContext(ctx aws.Context, input *DescribeSubscriptionInput, opts ...request.Option) (*DescribeSubscriptionOutput, error) { + req, out := c.DescribeSubscriptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListAttacks = "ListAttacks" + +// ListAttacksRequest generates a "aws/request.Request" representing the +// client's request for the ListAttacks operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAttacks for more information on using the ListAttacks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAttacksRequest method. +// req, resp := client.ListAttacksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/ListAttacks +func (c *Shield) ListAttacksRequest(input *ListAttacksInput) (req *request.Request, output *ListAttacksOutput) { + op := &request.Operation{ + Name: opListAttacks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListAttacksInput{} + } + + output = &ListAttacksOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListAttacks API operation for AWS Shield. +// +// Returns all ongoing DDoS attacks or all DDoS attacks during a specified time +// period. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Shield's +// API operation ListAttacks for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalErrorException "InternalErrorException" +// Exception that indicates that a problem occurred with the service infrastructure. +// You can retry the request. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// Exception that indicates that the parameters passed to the API are invalid. +// +// * ErrCodeInvalidOperationException "InvalidOperationException" +// Exception that indicates that the operation would not cause any change to +// occur. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/ListAttacks +func (c *Shield) ListAttacks(input *ListAttacksInput) (*ListAttacksOutput, error) { + req, out := c.ListAttacksRequest(input) + return out, req.Send() +} + +// ListAttacksWithContext is the same as ListAttacks with the addition of +// the ability to pass a context and additional request options. +// +// See ListAttacks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Shield) ListAttacksWithContext(ctx aws.Context, input *ListAttacksInput, opts ...request.Option) (*ListAttacksOutput, error) { + req, out := c.ListAttacksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListProtections = "ListProtections" + +// ListProtectionsRequest generates a "aws/request.Request" representing the +// client's request for the ListProtections operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListProtections for more information on using the ListProtections +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListProtectionsRequest method. +// req, resp := client.ListProtectionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/ListProtections +func (c *Shield) ListProtectionsRequest(input *ListProtectionsInput) (req *request.Request, output *ListProtectionsOutput) { + op := &request.Operation{ + Name: opListProtections, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListProtectionsInput{} + } + + output = &ListProtectionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListProtections API operation for AWS Shield. +// +// Lists all Protection objects for the account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Shield's +// API operation ListProtections for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalErrorException "InternalErrorException" +// Exception that indicates that a problem occurred with the service infrastructure. +// You can retry the request. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// Exception indicating the specified resource does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/ListProtections +func (c *Shield) ListProtections(input *ListProtectionsInput) (*ListProtectionsOutput, error) { + req, out := c.ListProtectionsRequest(input) + return out, req.Send() +} + +// ListProtectionsWithContext is the same as ListProtections with the addition of +// the ability to pass a context and additional request options. +// +// See ListProtections for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Shield) ListProtectionsWithContext(ctx aws.Context, input *ListProtectionsInput, opts ...request.Option) (*ListProtectionsOutput, error) { + req, out := c.ListProtectionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// The details of a DDoS attack. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/AttackDetail +type AttackDetail struct { + _ struct{} `type:"structure"` + + // List of counters that describe the attack for the specified time period. + AttackCounters []*SummarizedCounter `type:"list"` + + // The unique identifier (ID) of the attack. + AttackId *string `min:"1" type:"string"` + + // The time the attack ended, in the format 2016-12-16T13:50Z. + EndTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // List of mitigation actions taken for the attack. + Mitigations []*Mitigation `type:"list"` + + // The ARN (Amazon Resource Name) of the resource that was attacked. + ResourceArn *string `min:"1" type:"string"` + + // The time the attack started, in the format 2016-12-16T13:50Z. + StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // If applicable, additional detail about the resource being attacked, for example, + // IP address or URL. + SubResources []*SubResourceSummary `type:"list"` +} + +// String returns the string representation +func (s AttackDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttackDetail) GoString() string { + return s.String() +} + +// SetAttackCounters sets the AttackCounters field's value. +func (s *AttackDetail) SetAttackCounters(v []*SummarizedCounter) *AttackDetail { + s.AttackCounters = v + return s +} + +// SetAttackId sets the AttackId field's value. +func (s *AttackDetail) SetAttackId(v string) *AttackDetail { + s.AttackId = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *AttackDetail) SetEndTime(v time.Time) *AttackDetail { + s.EndTime = &v + return s +} + +// SetMitigations sets the Mitigations field's value. +func (s *AttackDetail) SetMitigations(v []*Mitigation) *AttackDetail { + s.Mitigations = v + return s +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *AttackDetail) SetResourceArn(v string) *AttackDetail { + s.ResourceArn = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *AttackDetail) SetStartTime(v time.Time) *AttackDetail { + s.StartTime = &v + return s +} + +// SetSubResources sets the SubResources field's value. +func (s *AttackDetail) SetSubResources(v []*SubResourceSummary) *AttackDetail { + s.SubResources = v + return s +} + +// Summarizes all DDoS attacks for a specified time period. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/AttackSummary +type AttackSummary struct { + _ struct{} `type:"structure"` + + // The unique identifier (ID) of the attack. + AttackId *string `type:"string"` + + // The list of attacks for a specified time period. + AttackVectors []*AttackVectorDescription `type:"list"` + + // The end time of the attack, in the format 2016-12-16T13:50Z. + EndTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The ARN (Amazon Resource Name) of the resource that was attacked. + ResourceArn *string `type:"string"` + + // The start time of the attack, in the format 2016-12-16T13:50Z. + StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s AttackSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttackSummary) GoString() string { + return s.String() +} + +// SetAttackId sets the AttackId field's value. +func (s *AttackSummary) SetAttackId(v string) *AttackSummary { + s.AttackId = &v + return s +} + +// SetAttackVectors sets the AttackVectors field's value. +func (s *AttackSummary) SetAttackVectors(v []*AttackVectorDescription) *AttackSummary { + s.AttackVectors = v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *AttackSummary) SetEndTime(v time.Time) *AttackSummary { + s.EndTime = &v + return s +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *AttackSummary) SetResourceArn(v string) *AttackSummary { + s.ResourceArn = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *AttackSummary) SetStartTime(v time.Time) *AttackSummary { + s.StartTime = &v + return s +} + +// Describes the attack. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/AttackVectorDescription +type AttackVectorDescription struct { + _ struct{} `type:"structure"` + + // The attack type, for example, SNMP reflection or SYN flood. + // + // VectorType is a required field + VectorType *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AttackVectorDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttackVectorDescription) GoString() string { + return s.String() +} + +// SetVectorType sets the VectorType field's value. +func (s *AttackVectorDescription) SetVectorType(v string) *AttackVectorDescription { + s.VectorType = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/CreateProtectionRequest +type CreateProtectionInput struct { + _ struct{} `type:"structure"` + + // Friendly name for the Protection you are creating. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The ARN (Amazon Resource Name) of the resource to be protected. + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateProtectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateProtectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateProtectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateProtectionInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *CreateProtectionInput) SetName(v string) *CreateProtectionInput { + s.Name = &v + return s +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *CreateProtectionInput) SetResourceArn(v string) *CreateProtectionInput { + s.ResourceArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/CreateProtectionResponse +type CreateProtectionOutput struct { + _ struct{} `type:"structure"` + + // The unique identifier (ID) for the Protection object that is created. + ProtectionId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateProtectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateProtectionOutput) GoString() string { + return s.String() +} + +// SetProtectionId sets the ProtectionId field's value. +func (s *CreateProtectionOutput) SetProtectionId(v string) *CreateProtectionOutput { + s.ProtectionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/CreateSubscriptionRequest +type CreateSubscriptionInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSubscriptionInput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/CreateSubscriptionResponse +type CreateSubscriptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSubscriptionOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DeleteProtectionRequest +type DeleteProtectionInput struct { + _ struct{} `type:"structure"` + + // The unique identifier (ID) for the Protection object to be deleted. + // + // ProtectionId is a required field + ProtectionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteProtectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteProtectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteProtectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteProtectionInput"} + if s.ProtectionId == nil { + invalidParams.Add(request.NewErrParamRequired("ProtectionId")) + } + if s.ProtectionId != nil && len(*s.ProtectionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ProtectionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetProtectionId sets the ProtectionId field's value. +func (s *DeleteProtectionInput) SetProtectionId(v string) *DeleteProtectionInput { + s.ProtectionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DeleteProtectionResponse +type DeleteProtectionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteProtectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteProtectionOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DeleteSubscriptionRequest +type DeleteSubscriptionInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubscriptionInput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DeleteSubscriptionResponse +type DeleteSubscriptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSubscriptionOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DescribeAttackRequest +type DescribeAttackInput struct { + _ struct{} `type:"structure"` + + // The unique identifier (ID) for the attack that to be described. + // + // AttackId is a required field + AttackId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeAttackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAttackInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAttackInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAttackInput"} + if s.AttackId == nil { + invalidParams.Add(request.NewErrParamRequired("AttackId")) + } + if s.AttackId != nil && len(*s.AttackId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttackId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttackId sets the AttackId field's value. +func (s *DescribeAttackInput) SetAttackId(v string) *DescribeAttackInput { + s.AttackId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DescribeAttackResponse +type DescribeAttackOutput struct { + _ struct{} `type:"structure"` + + // The attack that is described. + Attack *AttackDetail `type:"structure"` +} + +// String returns the string representation +func (s DescribeAttackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAttackOutput) GoString() string { + return s.String() +} + +// SetAttack sets the Attack field's value. +func (s *DescribeAttackOutput) SetAttack(v *AttackDetail) *DescribeAttackOutput { + s.Attack = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DescribeProtectionRequest +type DescribeProtectionInput struct { + _ struct{} `type:"structure"` + + // The unique identifier (ID) for the Protection object that is described. + // + // ProtectionId is a required field + ProtectionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeProtectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeProtectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeProtectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeProtectionInput"} + if s.ProtectionId == nil { + invalidParams.Add(request.NewErrParamRequired("ProtectionId")) + } + if s.ProtectionId != nil && len(*s.ProtectionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ProtectionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetProtectionId sets the ProtectionId field's value. +func (s *DescribeProtectionInput) SetProtectionId(v string) *DescribeProtectionInput { + s.ProtectionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DescribeProtectionResponse +type DescribeProtectionOutput struct { + _ struct{} `type:"structure"` + + // The Protection object that is described. + Protection *Protection `type:"structure"` +} + +// String returns the string representation +func (s DescribeProtectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeProtectionOutput) GoString() string { + return s.String() +} + +// SetProtection sets the Protection field's value. +func (s *DescribeProtectionOutput) SetProtection(v *Protection) *DescribeProtectionOutput { + s.Protection = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DescribeSubscriptionRequest +type DescribeSubscriptionInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubscriptionInput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DescribeSubscriptionResponse +type DescribeSubscriptionOutput struct { + _ struct{} `type:"structure"` + + // The AWS Shield Advanced subscription details for an account. + Subscription *Subscription `type:"structure"` +} + +// String returns the string representation +func (s DescribeSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubscriptionOutput) GoString() string { + return s.String() +} + +// SetSubscription sets the Subscription field's value. +func (s *DescribeSubscriptionOutput) SetSubscription(v *Subscription) *DescribeSubscriptionOutput { + s.Subscription = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/ListAttacksRequest +type ListAttacksInput struct { + _ struct{} `type:"structure"` + + // The end of the time period for the attacks. + EndTime *TimeRange `type:"structure"` + + // The maximum number of AttackSummary objects to be returned. If this is left + // blank, the first 20 results will be returned. + MaxResults *int64 `type:"integer"` + + // The ListAttacksRequest.NextMarker value from a previous call to ListAttacksRequest. + // Pass null if this is the first call. + NextToken *string `min:"1" type:"string"` + + // The ARN (Amazon Resource Name) of the resource that was attacked. If this + // is left blank, all applicable resources for this account will be included. + ResourceArns []*string `type:"list"` + + // The time period for the attacks. + StartTime *TimeRange `type:"structure"` +} + +// String returns the string representation +func (s ListAttacksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttacksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAttacksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAttacksInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndTime sets the EndTime field's value. +func (s *ListAttacksInput) SetEndTime(v *TimeRange) *ListAttacksInput { + s.EndTime = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListAttacksInput) SetMaxResults(v int64) *ListAttacksInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAttacksInput) SetNextToken(v string) *ListAttacksInput { + s.NextToken = &v + return s +} + +// SetResourceArns sets the ResourceArns field's value. +func (s *ListAttacksInput) SetResourceArns(v []*string) *ListAttacksInput { + s.ResourceArns = v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *ListAttacksInput) SetStartTime(v *TimeRange) *ListAttacksInput { + s.StartTime = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/ListAttacksResponse +type ListAttacksOutput struct { + _ struct{} `type:"structure"` + + // The attack information for the specified time range. + AttackSummaries []*AttackSummary `type:"list"` + + // The token returned by a previous call to indicate that there is more data + // available. If not null, more results are available. Pass this value for the + // NextMarker parameter in a subsequent call to ListAttacks to retrieve the + // next set of items. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAttacksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttacksOutput) GoString() string { + return s.String() +} + +// SetAttackSummaries sets the AttackSummaries field's value. +func (s *ListAttacksOutput) SetAttackSummaries(v []*AttackSummary) *ListAttacksOutput { + s.AttackSummaries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAttacksOutput) SetNextToken(v string) *ListAttacksOutput { + s.NextToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/ListProtectionsRequest +type ListProtectionsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of Protection objects to be returned. If this is left + // blank the first 20 results will be returned. + MaxResults *int64 `type:"integer"` + + // The ListProtectionsRequest.NextToken value from a previous call to ListProtections. + // Pass null if this is the first call. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListProtectionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListProtectionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListProtectionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListProtectionsInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListProtectionsInput) SetMaxResults(v int64) *ListProtectionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListProtectionsInput) SetNextToken(v string) *ListProtectionsInput { + s.NextToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/ListProtectionsResponse +type ListProtectionsOutput struct { + _ struct{} `type:"structure"` + + // If you specify a value for MaxResults and you have more Protections than + // the value of MaxResults, AWS Shield Advanced returns a NextToken value in + // the response that allows you to list another group of Protections. For the + // second and subsequent ListProtections requests, specify the value of NextToken + // from the previous response to get information about another batch of Protections. + NextToken *string `min:"1" type:"string"` + + // The array of enabled Protection objects. + Protections []*Protection `type:"list"` +} + +// String returns the string representation +func (s ListProtectionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListProtectionsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListProtectionsOutput) SetNextToken(v string) *ListProtectionsOutput { + s.NextToken = &v + return s +} + +// SetProtections sets the Protections field's value. +func (s *ListProtectionsOutput) SetProtections(v []*Protection) *ListProtectionsOutput { + s.Protections = v + return s +} + +// The mitigation applied to a DDoS attack. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/Mitigation +type Mitigation struct { + _ struct{} `type:"structure"` + + // The name of the mitigation taken for this attack. + MitigationName *string `type:"string"` +} + +// String returns the string representation +func (s Mitigation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Mitigation) GoString() string { + return s.String() +} + +// SetMitigationName sets the MitigationName field's value. +func (s *Mitigation) SetMitigationName(v string) *Mitigation { + s.MitigationName = &v + return s +} + +// An object that represents a resource that is under DDoS protection. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/Protection +type Protection struct { + _ struct{} `type:"structure"` + + // The unique identifier (ID) of the protection. + Id *string `min:"1" type:"string"` + + // The friendly name of the protection. For example, My CloudFront distributions. + Name *string `min:"1" type:"string"` + + // The ARN (Amazon Resource Name) of the AWS resource that is protected. + ResourceArn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Protection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Protection) GoString() string { + return s.String() +} + +// SetId sets the Id field's value. +func (s *Protection) SetId(v string) *Protection { + s.Id = &v + return s +} + +// SetName sets the Name field's value. +func (s *Protection) SetName(v string) *Protection { + s.Name = &v + return s +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *Protection) SetResourceArn(v string) *Protection { + s.ResourceArn = &v + return s +} + +// The attack information for the specified SubResource. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/SubResourceSummary +type SubResourceSummary struct { + _ struct{} `type:"structure"` + + // The list of attack types and associated counters. + AttackVectors []*SummarizedAttackVector `type:"list"` + + // The counters that describe the details of the attack. + Counters []*SummarizedCounter `type:"list"` + + // The unique identifier (ID) of the SubResource. + Id *string `type:"string"` + + // The SubResource type. + Type *string `type:"string" enum:"SubResourceType"` +} + +// String returns the string representation +func (s SubResourceSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubResourceSummary) GoString() string { + return s.String() +} + +// SetAttackVectors sets the AttackVectors field's value. +func (s *SubResourceSummary) SetAttackVectors(v []*SummarizedAttackVector) *SubResourceSummary { + s.AttackVectors = v + return s +} + +// SetCounters sets the Counters field's value. +func (s *SubResourceSummary) SetCounters(v []*SummarizedCounter) *SubResourceSummary { + s.Counters = v + return s +} + +// SetId sets the Id field's value. +func (s *SubResourceSummary) SetId(v string) *SubResourceSummary { + s.Id = &v + return s +} + +// SetType sets the Type field's value. +func (s *SubResourceSummary) SetType(v string) *SubResourceSummary { + s.Type = &v + return s +} + +// Information about the AWS Shield Advanced subscription for an account. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/Subscription +type Subscription struct { + _ struct{} `type:"structure"` + + // The start time of the subscription, in the format "2016-12-16T13:50Z". + StartTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The length, in seconds, of the AWS Shield Advanced subscription for the account. + TimeCommitmentInSeconds *int64 `type:"long"` +} + +// String returns the string representation +func (s Subscription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Subscription) GoString() string { + return s.String() +} + +// SetStartTime sets the StartTime field's value. +func (s *Subscription) SetStartTime(v time.Time) *Subscription { + s.StartTime = &v + return s +} + +// SetTimeCommitmentInSeconds sets the TimeCommitmentInSeconds field's value. +func (s *Subscription) SetTimeCommitmentInSeconds(v int64) *Subscription { + s.TimeCommitmentInSeconds = &v + return s +} + +// A summary of information about the attack. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/SummarizedAttackVector +type SummarizedAttackVector struct { + _ struct{} `type:"structure"` + + // The list of counters that describe the details of the attack. + VectorCounters []*SummarizedCounter `type:"list"` + + // The attack type, for example, SNMP reflection or SYN flood. + // + // VectorType is a required field + VectorType *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SummarizedAttackVector) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SummarizedAttackVector) GoString() string { + return s.String() +} + +// SetVectorCounters sets the VectorCounters field's value. +func (s *SummarizedAttackVector) SetVectorCounters(v []*SummarizedCounter) *SummarizedAttackVector { + s.VectorCounters = v + return s +} + +// SetVectorType sets the VectorType field's value. +func (s *SummarizedAttackVector) SetVectorType(v string) *SummarizedAttackVector { + s.VectorType = &v + return s +} + +// The counter that describes a DDoS attack. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/SummarizedCounter +type SummarizedCounter struct { + _ struct{} `type:"structure"` + + // The average value of the counter for a specified time period. + Average *float64 `type:"double"` + + // The maximum value of the counter for a specified time period. + Max *float64 `type:"double"` + + // The number of counters for a specified time period. + N *int64 `type:"integer"` + + // The counter name. + Name *string `type:"string"` + + // The total of counter values for a specified time period. + Sum *float64 `type:"double"` + + // The unit of the counters. + Unit *string `type:"string"` +} + +// String returns the string representation +func (s SummarizedCounter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SummarizedCounter) GoString() string { + return s.String() +} + +// SetAverage sets the Average field's value. +func (s *SummarizedCounter) SetAverage(v float64) *SummarizedCounter { + s.Average = &v + return s +} + +// SetMax sets the Max field's value. +func (s *SummarizedCounter) SetMax(v float64) *SummarizedCounter { + s.Max = &v + return s +} + +// SetN sets the N field's value. +func (s *SummarizedCounter) SetN(v int64) *SummarizedCounter { + s.N = &v + return s +} + +// SetName sets the Name field's value. +func (s *SummarizedCounter) SetName(v string) *SummarizedCounter { + s.Name = &v + return s +} + +// SetSum sets the Sum field's value. +func (s *SummarizedCounter) SetSum(v float64) *SummarizedCounter { + s.Sum = &v + return s +} + +// SetUnit sets the Unit field's value. +func (s *SummarizedCounter) SetUnit(v string) *SummarizedCounter { + s.Unit = &v + return s +} + +// The time range. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/TimeRange +type TimeRange struct { + _ struct{} `type:"structure"` + + // The start time, in the format 2016-12-16T13:50Z. + FromInclusive *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The end time, in the format 2016-12-16T15:50Z. + ToExclusive *time.Time `type:"timestamp" timestampFormat:"unix"` +} + +// String returns the string representation +func (s TimeRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimeRange) GoString() string { + return s.String() +} + +// SetFromInclusive sets the FromInclusive field's value. +func (s *TimeRange) SetFromInclusive(v time.Time) *TimeRange { + s.FromInclusive = &v + return s +} + +// SetToExclusive sets the ToExclusive field's value. +func (s *TimeRange) SetToExclusive(v time.Time) *TimeRange { + s.ToExclusive = &v + return s +} + +const ( + // SubResourceTypeIp is a SubResourceType enum value + SubResourceTypeIp = "IP" + + // SubResourceTypeUrl is a SubResourceType enum value + SubResourceTypeUrl = "URL" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/shield/doc.go b/vendor/github.com/aws/aws-sdk-go/service/shield/doc.go new file mode 100644 index 00000000000..f9a6916ddd7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/shield/doc.go @@ -0,0 +1,32 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package shield provides the client and types for making API +// requests to AWS Shield. +// +// This is the AWS Shield Advanced API Reference. This guide is for developers +// who need detailed information about the AWS Shield Advanced API actions, +// data types, and errors. For detailed information about AWS WAF and AWS Shield +// Advanced features and an overview of how to use the AWS WAF and AWS Shield +// Advanced APIs, see the AWS WAF and AWS Shield Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// See https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02 for more information on this service. +// +// See shield package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/shield/ +// +// Using the Client +// +// To AWS Shield with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Shield client Shield for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/shield/#New +package shield diff --git a/vendor/github.com/aws/aws-sdk-go/service/shield/errors.go b/vendor/github.com/aws/aws-sdk-go/service/shield/errors.go new file mode 100644 index 00000000000..be2c5bf9168 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/shield/errors.go @@ -0,0 +1,65 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package shield + +const ( + + // ErrCodeInternalErrorException for service response error code + // "InternalErrorException". + // + // Exception that indicates that a problem occurred with the service infrastructure. + // You can retry the request. + ErrCodeInternalErrorException = "InternalErrorException" + + // ErrCodeInvalidOperationException for service response error code + // "InvalidOperationException". + // + // Exception that indicates that the operation would not cause any change to + // occur. + ErrCodeInvalidOperationException = "InvalidOperationException" + + // ErrCodeInvalidParameterException for service response error code + // "InvalidParameterException". + // + // Exception that indicates that the parameters passed to the API are invalid. + ErrCodeInvalidParameterException = "InvalidParameterException" + + // ErrCodeInvalidResourceException for service response error code + // "InvalidResourceException". + // + // Exception that indicates that the resource is invalid. You might not have + // access to the resource, or the resource might not exist. + ErrCodeInvalidResourceException = "InvalidResourceException" + + // ErrCodeLimitsExceededException for service response error code + // "LimitsExceededException". + // + // Exception that indicates that the operation would exceed a limit. + ErrCodeLimitsExceededException = "LimitsExceededException" + + // ErrCodeLockedSubscriptionException for service response error code + // "LockedSubscriptionException". + // + // Exception that indicates that the subscription has been modified by another + // client. You can retry the request. + ErrCodeLockedSubscriptionException = "LockedSubscriptionException" + + // ErrCodeOptimisticLockException for service response error code + // "OptimisticLockException". + // + // Exception that indicates that the protection state has been modified by another + // client. You can retry the request. + ErrCodeOptimisticLockException = "OptimisticLockException" + + // ErrCodeResourceAlreadyExistsException for service response error code + // "ResourceAlreadyExistsException". + // + // Exception indicating the specified resource already exists. + ErrCodeResourceAlreadyExistsException = "ResourceAlreadyExistsException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // Exception indicating the specified resource does not exist. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/shield/service.go b/vendor/github.com/aws/aws-sdk-go/service/shield/service.go new file mode 100644 index 00000000000..31680396c5a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/shield/service.go @@ -0,0 +1,95 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package shield + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// Shield provides the API operation methods for making requests to +// AWS Shield. See this package's package overview docs +// for details on the service. +// +// Shield methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type Shield struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "shield" // Service endpoint prefix API calls made to. + EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. +) + +// New creates a new instance of the Shield client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a Shield client from just a session. +// svc := shield.New(mySession) +// +// // Create a Shield client with additional configuration +// svc := shield.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *Shield { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Shield { + svc := &Shield{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2016-06-02", + JSONVersion: "1.1", + TargetPrefix: "AWSShield_20160616", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a Shield operation and runs any +// custom request initialization. +func (c *Shield) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go index ea87cbabb02..082f6928784 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go @@ -64,12 +64,12 @@ func (c *SQS) AddPermissionRequest(input *AddPermissionInput) (req *request.Requ // When you create a queue, you have full control access rights for the queue. // Only you, the owner of the queue, can grant or deny permissions to the queue. // For more information about these permissions, see Shared Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/acp-overview.html) -// in the Amazon SQS Developer Guide. +// in the Amazon Simple Queue Service Developer Guide. // // AddPermission writes an Amazon-SQS-generated policy. If you want to write // your own policy, use SetQueueAttributes to upload your policy. For more information // about writing your own policy, see Using The Access Policy Language (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AccessPolicyLanguage.html) -// in the Amazon SQS Developer Guide. +// in the Amazon Simple Queue Service Developer Guide. // // Some actions take lists of parameters. These lists are specified using the // param.n notation. Values of n are integers starting from 1. For example, @@ -165,7 +165,7 @@ func (c *SQS) ChangeMessageVisibilityRequest(input *ChangeMessageVisibilityInput // value. The maximum allowed timeout value is 12 hours. Thus, you can't extend // the timeout of a message in an existing queue to more than a total visibility // timeout of 12 hours. For more information, see Visibility Timeout (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) -// in the Amazon SQS Developer Guide. +// in the Amazon Simple Queue Service Developer Guide. // // For example, you have a message with a visibility timeout of 5 minutes. After // 3 minutes, you call ChangeMessageVisiblity with a timeout of 10 minutes. @@ -392,7 +392,7 @@ func (c *SQS) CreateQueueRequest(input *CreateQueueInput) (req *request.Request, // new FIFO queue for your application or delete your existing standard queue // and recreate it as a FIFO queue. For more information, see Moving From // a Standard Queue to a FIFO Queue (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-moving) -// in the Amazon SQS Developer Guide. +// in the Amazon Simple Queue Service Developer Guide. // // * If you don't provide a value for an attribute, the queue is created // with the default value for the attribute. @@ -895,7 +895,7 @@ func (c *SQS) GetQueueUrlRequest(input *GetQueueUrlInput) (req *request.Request, // parameter to specify the account ID of the queue's owner. The queue's owner // must grant you permission to access the queue. For more information about // shared queue access, see AddPermission or see Shared Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/acp-overview.html) -// in the Amazon SQS Developer Guide. +// in the Amazon Simple Queue Service Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -979,7 +979,7 @@ func (c *SQS) ListDeadLetterSourceQueuesRequest(input *ListDeadLetterSourceQueue // // For more information about using dead-letter queues, see Using Amazon SQS // Dead-Letter Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) -// in the Amazon SQS Developer Guide. +// in the Amazon Simple Queue Service Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1014,6 +1014,100 @@ func (c *SQS) ListDeadLetterSourceQueuesWithContext(ctx aws.Context, input *List return out, req.Send() } +const opListQueueTags = "ListQueueTags" + +// ListQueueTagsRequest generates a "aws/request.Request" representing the +// client's request for the ListQueueTags operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListQueueTags for more information on using the ListQueueTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListQueueTagsRequest method. +// req, resp := client.ListQueueTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ListQueueTags +func (c *SQS) ListQueueTagsRequest(input *ListQueueTagsInput) (req *request.Request, output *ListQueueTagsOutput) { + op := &request.Operation{ + Name: opListQueueTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListQueueTagsInput{} + } + + output = &ListQueueTagsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListQueueTags API operation for Amazon Simple Queue Service. +// +// List all cost allocation tags added to the specified Amazon SQS queue. For +// an overview, see Tagging Amazon SQS Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-tagging-queues.html) +// in the Amazon Simple Queue Service Developer Guide. +// +// When you use queue tags, keep the following guidelines in mind: +// +// * Adding more than 50 tags to a queue isn't recommended. +// +// * Tags don't have any semantic meaning. Amazon SQS interprets tags as +// character strings. +// +// * Tags are case-sensitive. +// +// * A new tag with a key identical to that of an existing tag overwrites +// the existing tag. +// +// * Tagging API actions are limited to 5 TPS per AWS account. If your application +// requires a higher throughput, file a technical support request (https://console.aws.amazon.com/support/home#/case/create?issueType=technical). +// +// For a full list of tag restrictions, see Limits Related to Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) +// in the Amazon Simple Queue Service Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Queue Service's +// API operation ListQueueTags for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ListQueueTags +func (c *SQS) ListQueueTags(input *ListQueueTagsInput) (*ListQueueTagsOutput, error) { + req, out := c.ListQueueTagsRequest(input) + return out, req.Send() +} + +// ListQueueTagsWithContext is the same as ListQueueTags with the addition of +// the ability to pass a context and additional request options. +// +// See ListQueueTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SQS) ListQueueTagsWithContext(ctx aws.Context, input *ListQueueTagsInput, opts ...request.Option) (*ListQueueTagsOutput, error) { + req, out := c.ListQueueTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListQueues = "ListQueues" // ListQueuesRequest generates a "aws/request.Request" representing the @@ -1232,7 +1326,7 @@ func (c *SQS) ReceiveMessageRequest(input *ReceiveMessageInput) (req *request.Re // Retrieves one or more messages (up to 10), from the specified queue. Using // the WaitTimeSeconds parameter enables long-poll support. For more information, // see Amazon SQS Long Polling (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-long-polling.html) -// in the Amazon SQS Developer Guide. +// in the Amazon Simple Queue Service Developer Guide. // // Short poll is the default behavior where a weighted random set of machines // is sampled on a ReceiveMessage call. Thus, only the messages on the sampled @@ -1259,14 +1353,14 @@ func (c *SQS) ReceiveMessageRequest(input *ReceiveMessageInput) (req *request.Re // // The receipt handle is the identifier you must provide when deleting the message. // For more information, see Queue and Message Identifiers (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-message-identifiers.html) -// in the Amazon SQS Developer Guide. +// in the Amazon Simple Queue Service Developer Guide. // // You can provide the VisibilityTimeout parameter in your request. The parameter // is applied to the messages that Amazon SQS returns in the response. If you // don't include the parameter, the overall visibility timeout for the queue // is used for the returned messages. For more information, see Visibility Timeout // (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) -// in the Amazon SQS Developer Guide. +// in the Amazon Simple Queue Service Developer Guide. // // A message that isn't deleted or a message whose visibility isn't extended // before the visibility timeout expires counts as a failed receive. Depending @@ -1692,6 +1786,198 @@ func (c *SQS) SetQueueAttributesWithContext(ctx aws.Context, input *SetQueueAttr return out, req.Send() } +const opTagQueue = "TagQueue" + +// TagQueueRequest generates a "aws/request.Request" representing the +// client's request for the TagQueue operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagQueue for more information on using the TagQueue +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagQueueRequest method. +// req, resp := client.TagQueueRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/TagQueue +func (c *SQS) TagQueueRequest(input *TagQueueInput) (req *request.Request, output *TagQueueOutput) { + op := &request.Operation{ + Name: opTagQueue, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagQueueInput{} + } + + output = &TagQueueOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagQueue API operation for Amazon Simple Queue Service. +// +// Add cost allocation tags to the specified Amazon SQS queue. For an overview, +// see Tagging Amazon SQS Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-tagging-queues.html) +// in the Amazon Simple Queue Service Developer Guide. +// +// When you use queue tags, keep the following guidelines in mind: +// +// * Adding more than 50 tags to a queue isn't recommended. +// +// * Tags don't have any semantic meaning. Amazon SQS interprets tags as +// character strings. +// +// * Tags are case-sensitive. +// +// * A new tag with a key identical to that of an existing tag overwrites +// the existing tag. +// +// * Tagging API actions are limited to 5 TPS per AWS account. If your application +// requires a higher throughput, file a technical support request (https://console.aws.amazon.com/support/home#/case/create?issueType=technical). +// +// For a full list of tag restrictions, see Limits Related to Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) +// in the Amazon Simple Queue Service Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Queue Service's +// API operation TagQueue for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/TagQueue +func (c *SQS) TagQueue(input *TagQueueInput) (*TagQueueOutput, error) { + req, out := c.TagQueueRequest(input) + return out, req.Send() +} + +// TagQueueWithContext is the same as TagQueue with the addition of +// the ability to pass a context and additional request options. +// +// See TagQueue for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SQS) TagQueueWithContext(ctx aws.Context, input *TagQueueInput, opts ...request.Option) (*TagQueueOutput, error) { + req, out := c.TagQueueRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagQueue = "UntagQueue" + +// UntagQueueRequest generates a "aws/request.Request" representing the +// client's request for the UntagQueue operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagQueue for more information on using the UntagQueue +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagQueueRequest method. +// req, resp := client.UntagQueueRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/UntagQueue +func (c *SQS) UntagQueueRequest(input *UntagQueueInput) (req *request.Request, output *UntagQueueOutput) { + op := &request.Operation{ + Name: opUntagQueue, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagQueueInput{} + } + + output = &UntagQueueOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagQueue API operation for Amazon Simple Queue Service. +// +// Remove cost allocation tags from the specified Amazon SQS queue. For an overview, +// see Tagging Amazon SQS Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-tagging-queues.html) +// in the Amazon Simple Queue Service Developer Guide. +// +// When you use queue tags, keep the following guidelines in mind: +// +// * Adding more than 50 tags to a queue isn't recommended. +// +// * Tags don't have any semantic meaning. Amazon SQS interprets tags as +// character strings. +// +// * Tags are case-sensitive. +// +// * A new tag with a key identical to that of an existing tag overwrites +// the existing tag. +// +// * Tagging API actions are limited to 5 TPS per AWS account. If your application +// requires a higher throughput, file a technical support request (https://console.aws.amazon.com/support/home#/case/create?issueType=technical). +// +// For a full list of tag restrictions, see Limits Related to Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) +// in the Amazon Simple Queue Service Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Queue Service's +// API operation UntagQueue for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/UntagQueue +func (c *SQS) UntagQueue(input *UntagQueueInput) (*UntagQueueOutput, error) { + req, out := c.UntagQueueRequest(input) + return out, req.Send() +} + +// UntagQueueWithContext is the same as UntagQueue with the addition of +// the ability to pass a context and additional request options. +// +// See UntagQueue for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SQS) UntagQueueWithContext(ctx aws.Context, input *UntagQueueInput, opts ...request.Option) (*UntagQueueOutput, error) { + req, out := c.UntagQueueRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/AddPermissionRequest type AddPermissionInput struct { _ struct{} `type:"structure"` @@ -1700,7 +1986,7 @@ type AddPermissionInput struct { // who is given permission. The principal must have an AWS account, but does // not need to be signed up for Amazon SQS. For information about locating the // AWS account identification, see Your AWS Identifiers (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AWSCredentials.html) - // in the Amazon SQS Developer Guide. + // in the Amazon Simple Queue Service Developer Guide. // // AWSAccountIds is a required field AWSAccountIds []*string `locationNameList:"AWSAccountId" type:"list" flattened:"true" required:"true"` @@ -1723,7 +2009,7 @@ type AddPermissionInput struct { // * SendMessage // // For more information about these actions, see Understanding Permissions (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/acp-overview.html#PermissionTypes) - // in the Amazon SQS Developer Guide. + // in the Amazon Simple Queue Service Developer Guide. // // Specifying SendMessage, DeleteMessage, or ChangeMessageVisibility for ActionName.n // also grants permissions for the corresponding batch versions of those actions: @@ -2206,7 +2492,7 @@ type CreateQueueInput struct { // queue functionality of the source queue. For more information about the // redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter // Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) - // in the Amazon SQS Developer Guide. + // in the Amazon Simple Queue Service Developer Guide. // // deadLetterTargetArn - The Amazon Resource Name (ARN) of the dead-letter queue // to which Amazon SQS moves messages after the value of maxReceiveCount @@ -2221,7 +2507,7 @@ type CreateQueueInput struct { // * VisibilityTimeout - The visibility timeout for the queue. Valid values: // An integer from 0 to 43,200 (12 hours). The default is 30. For more information // about the visibility timeout, see Visibility Timeout (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) - // in the Amazon SQS Developer Guide. + // in the Amazon Simple Queue Service Developer Guide. // // The following attributes apply only to server-side-encryption (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html): // @@ -2250,12 +2536,12 @@ type CreateQueueInput struct { // the MessageGroupId for your messages explicitly. // // For more information, see FIFO Queue Logic (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-understanding-logic) - // in the Amazon SQS Developer Guide. + // in the Amazon Simple Queue Service Developer Guide. // // * ContentBasedDeduplication - Enables content-based deduplication. Valid // values: true, false. For more information, see Exactly-Once Processing // (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) - // in the Amazon SQS Developer Guide. + // in the Amazon Simple Queue Service Developer Guide. // // Every message must have a unique MessageDeduplicationId, // @@ -2703,7 +2989,7 @@ type GetQueueAttributesInput struct { // * ApproximateNumberOfMessages - Returns the approximate number of visible // messages in a queue. For more information, see Resources Required to Process // Messages (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-resources-required-process-messages.html) - // in the Amazon SQS Developer Guide. + // in the Amazon Simple Queue Service Developer Guide. // // * ApproximateNumberOfMessagesDelayed - Returns the approximate number // of messages that are waiting to be added to the queue. @@ -2711,7 +2997,7 @@ type GetQueueAttributesInput struct { // * ApproximateNumberOfMessagesNotVisible - Returns the approximate number // of messages that have not timed-out and aren't deleted. For more information, // see Resources Required to Process Messages (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-resources-required-process-messages.html) - // in the Amazon SQS Developer Guide. + // in the Amazon Simple Queue Service Developer Guide. // // * CreatedTimestamp - Returns the time when the queue was created in seconds // (epoch time (http://en.wikipedia.org/wiki/Unix_time)). @@ -2738,7 +3024,7 @@ type GetQueueAttributesInput struct { // dead-letter queue functionality of the source queue. For more information // about the redrive policy and dead-letter queues, see Using Amazon SQS // Dead-Letter Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) - // in the Amazon SQS Developer Guide. + // in the Amazon Simple Queue Service Developer Guide. // // deadLetterTargetArn - The Amazon Resource Name (ARN) of the dead-letter queue // to which Amazon SQS moves messages after the value of maxReceiveCount @@ -2750,7 +3036,7 @@ type GetQueueAttributesInput struct { // * VisibilityTimeout - Returns the visibility timeout for the queue. For // more information about the visibility timeout, see Visibility Timeout // (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) - // in the Amazon SQS Developer Guide. + // in the Amazon Simple Queue Service Developer Guide. // // The following attributes apply only to server-side-encryption (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html): // @@ -2769,7 +3055,7 @@ type GetQueueAttributesInput struct { // // * FifoQueue - Returns whether the queue is FIFO. For more information, // see FIFO Queue Logic (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-understanding-logic) - // in the Amazon SQS Developer Guide. + // in the Amazon Simple Queue Service Developer Guide. // // To determine whether a queue is FIFO (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html), // you can check whether QueueName ends with the .fifo suffix. @@ -2777,7 +3063,7 @@ type GetQueueAttributesInput struct { // * ContentBasedDeduplication - Returns whether content-based deduplication // is enabled for the queue. For more information, see Exactly-Once Processing // (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) - // in the Amazon SQS Developer Guide. + // in the Amazon Simple Queue Service Developer Guide. AttributeNames []*string `locationNameList:"AttributeName" type:"list" flattened:"true"` // The URL of the Amazon SQS queue whose attribute information is retrieved. @@ -2900,7 +3186,7 @@ func (s *GetQueueUrlInput) SetQueueOwnerAWSAccountId(v string) *GetQueueUrlInput } // For more information, see Responses (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/UnderstandingResponses.html) -// in the Amazon SQS Developer Guide. +// in the Amazon Simple Queue Service Developer Guide. // Please also see https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/GetQueueUrlResult type GetQueueUrlOutput struct { _ struct{} `type:"structure"` @@ -2994,6 +3280,69 @@ func (s *ListDeadLetterSourceQueuesOutput) SetQueueUrls(v []*string) *ListDeadLe return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ListQueueTagsRequest +type ListQueueTagsInput struct { + _ struct{} `type:"structure"` + + // The URL of the queue. + // + // QueueUrl is a required field + QueueUrl *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListQueueTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListQueueTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListQueueTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListQueueTagsInput"} + if s.QueueUrl == nil { + invalidParams.Add(request.NewErrParamRequired("QueueUrl")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetQueueUrl sets the QueueUrl field's value. +func (s *ListQueueTagsInput) SetQueueUrl(v string) *ListQueueTagsInput { + s.QueueUrl = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ListQueueTagsResult +type ListQueueTagsOutput struct { + _ struct{} `type:"structure"` + + // The list of all tags added to the specified queue. + Tags map[string]*string `locationName:"Tag" locationNameKey:"Key" locationNameValue:"Value" type:"map" flattened:"true"` +} + +// String returns the string representation +func (s ListQueueTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListQueueTagsOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListQueueTagsOutput) SetTags(v map[string]*string) *ListQueueTagsOutput { + s.Tags = v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ListQueuesRequest type ListQueuesInput struct { _ struct{} `type:"structure"` @@ -3071,7 +3420,7 @@ type Message struct { // Each message attribute consists of a Name, Type, and Value. For more information, // see Message Attribute Items and Validation (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html#message-attributes-items-validation) - // in the Amazon SQS Developer Guide. + // in the Amazon Simple Queue Service Developer Guide. MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` // A unique identifier for the message. A MessageIdis considered unique across @@ -3161,7 +3510,7 @@ type MessageAttributeValue struct { // // You can also append custom labels. For more information, see Message Attribute // Data Types and Validation (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html#message-attributes-data-types-validation) - // in the Amazon SQS Developer Guide. + // in the Amazon Simple Queue Service Developer Guide. // // DataType is a required field DataType *string `type:"string" required:"true"` @@ -3740,7 +4089,7 @@ type SendMessageBatchRequestEntry struct { // Each message attribute consists of a Name, Type, and Value. For more information, // see Message Attribute Items and Validation (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html#message-attributes-items-validation) - // in the Amazon SQS Developer Guide. + // in the Amazon Simple Queue Service Developer Guide. MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` // The body of the message. @@ -3755,7 +4104,7 @@ type SendMessageBatchRequestEntry struct { // subsequent messages with the same MessageDeduplicationId are accepted successfully // but aren't delivered. For more information, see Exactly-Once Processing // (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) - // in the Amazon SQS Developer Guide. + // in the Amazon Simple Queue Service Developer Guide. // // * Every message must have a unique MessageDeduplicationId, // @@ -3990,7 +4339,7 @@ type SendMessageInput struct { // Each message attribute consists of a Name, Type, and Value. For more information, // see Message Attribute Items and Validation (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html#message-attributes-items-validation) - // in the Amazon SQS Developer Guide. + // in the Amazon Simple Queue Service Developer Guide. MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` // The message to send. The maximum string size is 256 KB. @@ -4013,7 +4362,7 @@ type SendMessageInput struct { // MessageDeduplicationId are accepted successfully but aren't delivered during // the 5-minute deduplication interval. For more information, see Exactly-Once // Processing (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) - // in the Amazon SQS Developer Guide. + // in the Amazon Simple Queue Service Developer Guide. // // * Every message must have a unique MessageDeduplicationId, // @@ -4181,7 +4530,7 @@ type SendMessageOutput struct { // An attribute containing the MessageId of the message sent to the queue. For // more information, see Queue and Message Identifiers (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-message-identifiers.html) - // in the Amazon SQS Developer Guide. + // in the Amazon Simple Queue Service Developer Guide. MessageId *string `type:"string"` // This parameter applies only to FIFO (first-in-first-out) queues. @@ -4262,7 +4611,7 @@ type SetQueueAttributesInput struct { // queue functionality of the source queue. For more information about the // redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter // Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) - // in the Amazon SQS Developer Guide. + // in the Amazon Simple Queue Service Developer Guide. // // deadLetterTargetArn - The Amazon Resource Name (ARN) of the dead-letter queue // to which Amazon SQS moves messages after the value of maxReceiveCount @@ -4277,7 +4626,7 @@ type SetQueueAttributesInput struct { // * VisibilityTimeout - The visibility timeout for the queue. Valid values: // an integer from 0 to 43,200 (12 hours). The default is 30. For more information // about the visibility timeout, see Visibility Timeout (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) - // in the Amazon SQS Developer Guide. + // in the Amazon Simple Queue Service Developer Guide. // // The following attributes apply only to server-side-encryption (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html): // @@ -4303,7 +4652,7 @@ type SetQueueAttributesInput struct { // // * ContentBasedDeduplication - Enables content-based deduplication. For // more information, see Exactly-Once Processing (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) - // in the Amazon SQS Developer Guide. + // in the Amazon Simple Queue Service Developer Guide. // // Every message must have a unique MessageDeduplicationId, // @@ -4407,6 +4756,142 @@ func (s SetQueueAttributesOutput) GoString() string { return s.String() } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/TagQueueRequest +type TagQueueInput struct { + _ struct{} `type:"structure"` + + // The URL of the queue. + // + // QueueUrl is a required field + QueueUrl *string `type:"string" required:"true"` + + // The list of tags to be added to the specified queue. + // + // Tags is a required field + Tags map[string]*string `locationName:"Tag" locationNameKey:"Key" locationNameValue:"Value" type:"map" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s TagQueueInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagQueueInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagQueueInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagQueueInput"} + if s.QueueUrl == nil { + invalidParams.Add(request.NewErrParamRequired("QueueUrl")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetQueueUrl sets the QueueUrl field's value. +func (s *TagQueueInput) SetQueueUrl(v string) *TagQueueInput { + s.QueueUrl = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagQueueInput) SetTags(v map[string]*string) *TagQueueInput { + s.Tags = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/TagQueueOutput +type TagQueueOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagQueueOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagQueueOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/UntagQueueRequest +type UntagQueueInput struct { + _ struct{} `type:"structure"` + + // The URL of the queue. + // + // QueueUrl is a required field + QueueUrl *string `type:"string" required:"true"` + + // The list of tags to be removed from the specified queue. + // + // TagKeys is a required field + TagKeys []*string `locationNameList:"TagKey" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s UntagQueueInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagQueueInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagQueueInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagQueueInput"} + if s.QueueUrl == nil { + invalidParams.Add(request.NewErrParamRequired("QueueUrl")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetQueueUrl sets the QueueUrl field's value. +func (s *UntagQueueInput) SetQueueUrl(v string) *UntagQueueInput { + s.QueueUrl = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagQueueInput) SetTagKeys(v []*string) *UntagQueueInput { + s.TagKeys = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/UntagQueueOutput +type UntagQueueOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagQueueOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagQueueOutput) GoString() string { + return s.String() +} + const ( // MessageSystemAttributeNameSenderId is a MessageSystemAttributeName enum value MessageSystemAttributeNameSenderId = "SenderId" diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/doc.go index 8299fc7b45c..96bce059504 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sqs/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/doc.go @@ -29,7 +29,7 @@ // // * Amazon SQS Product Page (http://aws.amazon.com/sqs/) // -// * Amazon SQS Developer Guide +// * Amazon Simple Queue Service Developer Guide // // Making API Requests (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/MakingRequestsArticle.html) // diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go b/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go index 3463f57cd67..00615d7c164 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go @@ -1008,6 +1008,10 @@ func (c *SSM) DeleteActivationRequest(input *DeleteActivationInput) (req *reques // * ErrCodeInternalServerError "InternalServerError" // An error occurred on the server side. // +// * ErrCodeTooManyUpdates "TooManyUpdates" +// There are concurrent updates for a resource that supports one update at a +// time. +// // Please also see https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeleteActivation func (c *SSM) DeleteActivation(input *DeleteActivationInput) (*DeleteActivationOutput, error) { req, out := c.DeleteActivationRequest(input) @@ -5284,6 +5288,10 @@ func (c *SSM) GetParameterRequest(input *GetParameterInput) (req *request.Reques // * ErrCodeParameterNotFound "ParameterNotFound" // The parameter could not be found. Verify the name and try again. // +// * ErrCodeParameterVersionNotFound "ParameterVersionNotFound" +// The specified parameter version was not found. Verify the parameter name +// and version, and try again. +// // Please also see https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetParameter func (c *SSM) GetParameter(input *GetParameterInput) (*GetParameterOutput, error) { req, out := c.GetParameterRequest(input) @@ -7602,6 +7610,9 @@ func (c *SSM) PutParameterRequest(input *PutParameterInput) (req *request.Reques // * ErrCodeInvalidAllowedPatternException "InvalidAllowedPatternException" // The request does not meet the regular expression requirement. // +// * ErrCodeParameterMaxVersionLimitExceeded "ParameterMaxVersionLimitExceeded" +// The parameter exceeded the maximum number of allowed versions. +// // * ErrCodeParameterPatternMismatchException "ParameterPatternMismatchException" // The parameter name is not valid. // @@ -9467,6 +9478,11 @@ type AddTagsToResourceInput struct { // The resource ID you want to tag. // + // For the ManagedInstance, MaintenanceWindow, and PatchBaseline values, use + // the ID of the resource, such as mw-01234361858c9b57b for a Maintenance Window. + // + // For the Document and Parameter values, use the name of the resource. + // // ResourceId is a required field ResourceId *string `type:"string" required:"true"` @@ -22676,6 +22692,9 @@ type Parameter struct { // The parameter value. Value *string `min:"1" type:"string"` + + // The parameter version. + Version *int64 `type:"long"` } // String returns the string representation @@ -22706,6 +22725,12 @@ func (s *Parameter) SetValue(v string) *Parameter { return s } +// SetVersion sets the Version field's value. +func (s *Parameter) SetVersion(v int64) *Parameter { + s.Version = &v + return s +} + // Information about parameter usage. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ParameterHistory type ParameterHistory struct { @@ -22717,7 +22742,7 @@ type ParameterHistory struct { AllowedPattern *string `type:"string"` // Information about the parameter. - Description *string `min:"1" type:"string"` + Description *string `type:"string"` // The ID of the query key used for this parameter. KeyId *string `min:"1" type:"string"` @@ -22736,6 +22761,9 @@ type ParameterHistory struct { // The parameter value. Value *string `min:"1" type:"string"` + + // The parameter version. + Version *int64 `type:"long"` } // String returns the string representation @@ -22796,6 +22824,12 @@ func (s *ParameterHistory) SetValue(v string) *ParameterHistory { return s } +// SetVersion sets the Version field's value. +func (s *ParameterHistory) SetVersion(v int64) *ParameterHistory { + s.Version = &v + return s +} + // Metada includes information like the ARN of the last user and the date/time // the parameter was last used. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ParameterMetadata @@ -22808,7 +22842,7 @@ type ParameterMetadata struct { AllowedPattern *string `type:"string"` // Description of the parameter actions. - Description *string `min:"1" type:"string"` + Description *string `type:"string"` // The ID of the query key used for this parameter. KeyId *string `min:"1" type:"string"` @@ -22825,6 +22859,9 @@ type ParameterMetadata struct { // The type of parameter. Valid parameter types include the following: String, // String list, Secure string. Type *string `type:"string" enum:"ParameterType"` + + // The parameter version. + Version *int64 `type:"long"` } // String returns the string representation @@ -22879,6 +22916,12 @@ func (s *ParameterMetadata) SetType(v string) *ParameterMetadata { return s } +// SetVersion sets the Version field's value. +func (s *ParameterMetadata) SetVersion(v int64) *ParameterMetadata { + s.Version = &v + return s +} + // One or more filters. Use a filter to return a more specific list of results. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/ParameterStringFilter type ParameterStringFilter struct { @@ -23885,7 +23928,7 @@ type PutParameterInput struct { AllowedPattern *string `type:"string"` // Information about the parameter that you want to add to the system - Description *string `min:"1" type:"string"` + Description *string `type:"string"` // The KMS Key ID that you want to use to encrypt a parameter when you choose // the SecureString data type. If you don't specify a key ID, the system uses @@ -23924,9 +23967,6 @@ func (s PutParameterInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *PutParameterInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "PutParameterInput"} - if s.Description != nil && len(*s.Description) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Description", 1)) - } if s.KeyId != nil && len(*s.KeyId) < 1 { invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) } @@ -23997,6 +24037,14 @@ func (s *PutParameterInput) SetValue(v string) *PutParameterInput { // Please also see https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/PutParameterResult type PutParameterOutput struct { _ struct{} `type:"structure"` + + // The new version number of a parameter. If you edit a parameter value, Parameter + // Store automatically creates a new version and assigns this new version a + // unique ID. You can reference a parameter version ID in API actions or in + // Systems Manager documents (SSM documents). By default, if you don't specify + // a specific version, the system returns the latest parameter value when a + // parameter is called. + Version *int64 `type:"long"` } // String returns the string representation @@ -24009,6 +24057,12 @@ func (s PutParameterOutput) GoString() string { return s.String() } +// SetVersion sets the Version field's value. +func (s *PutParameterOutput) SetVersion(v int64) *PutParameterOutput { + s.Version = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/RegisterDefaultPatchBaselineRequest type RegisterDefaultPatchBaselineInput struct { _ struct{} `type:"structure"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go index 556e114029a..38c5a1c6ac0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go @@ -453,6 +453,12 @@ const ( // or more parameters and try again. ErrCodeParameterLimitExceeded = "ParameterLimitExceeded" + // ErrCodeParameterMaxVersionLimitExceeded for service response error code + // "ParameterMaxVersionLimitExceeded". + // + // The parameter exceeded the maximum number of allowed versions. + ErrCodeParameterMaxVersionLimitExceeded = "ParameterMaxVersionLimitExceeded" + // ErrCodeParameterNotFound for service response error code // "ParameterNotFound". // @@ -465,6 +471,13 @@ const ( // The parameter name is not valid. ErrCodeParameterPatternMismatchException = "ParameterPatternMismatchException" + // ErrCodeParameterVersionNotFound for service response error code + // "ParameterVersionNotFound". + // + // The specified parameter version was not found. Verify the parameter name + // and version, and try again. + ErrCodeParameterVersionNotFound = "ParameterVersionNotFound" + // ErrCodeResourceDataSyncAlreadyExistsException for service response error code // "ResourceDataSyncAlreadyExistsException". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/waf/api.go b/vendor/github.com/aws/aws-sdk-go/service/waf/api.go index 47400993aaf..c6aa1c4c680 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/waf/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/waf/api.go @@ -118,7 +118,7 @@ func (c *WAF) CreateByteMatchSetRequest(input *CreateByteMatchSetInput) (req *re // BLOCK, or COUNT. // // * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. +// HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value // for Data. @@ -158,6 +158,152 @@ func (c *WAF) CreateByteMatchSetWithContext(ctx aws.Context, input *CreateByteMa return out, req.Send() } +const opCreateGeoMatchSet = "CreateGeoMatchSet" + +// CreateGeoMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateGeoMatchSet operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateGeoMatchSet for more information on using the CreateGeoMatchSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateGeoMatchSetRequest method. +// req, resp := client.CreateGeoMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateGeoMatchSet +func (c *WAF) CreateGeoMatchSetRequest(input *CreateGeoMatchSetInput) (req *request.Request, output *CreateGeoMatchSetOutput) { + op := &request.Operation{ + Name: opCreateGeoMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateGeoMatchSetInput{} + } + + output = &CreateGeoMatchSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateGeoMatchSet API operation for AWS WAF. +// +// Creates an GeoMatchSet, which you use to specify which web requests you want +// to allow or block based on the country that the requests originate from. +// For example, if you're receiving a lot of requests from one or more countries +// and you want to block the requests, you can create an GeoMatchSet that contains +// those countries and then configure AWS WAF to block the requests. +// +// To create and configure a GeoMatchSet, perform the following steps: +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a CreateGeoMatchSet request. +// +// Submit a CreateGeoMatchSet request. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateGeoMatchSet request. +// +// Submit an UpdateGeoMatchSetSet request to specify the countries that you +// want AWS WAF to watch for. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF's +// API operation CreateGeoMatchSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeStaleDataException "StaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeInvalidAccountException "InvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeDisallowedNameException "DisallowedNameException" +// The name specified is invalid. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeLimitsExceededException "LimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateGeoMatchSet +func (c *WAF) CreateGeoMatchSet(input *CreateGeoMatchSetInput) (*CreateGeoMatchSetOutput, error) { + req, out := c.CreateGeoMatchSetRequest(input) + return out, req.Send() +} + +// CreateGeoMatchSetWithContext is the same as CreateGeoMatchSet with the addition of +// the ability to pass a context and additional request options. +// +// See CreateGeoMatchSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAF) CreateGeoMatchSetWithContext(ctx aws.Context, input *CreateGeoMatchSetInput, opts ...request.Option) (*CreateGeoMatchSetOutput, error) { + req, out := c.CreateGeoMatchSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateIPSet = "CreateIPSet" // CreateIPSetRequest generates a "aws/request.Request" representing the @@ -269,7 +415,7 @@ func (c *WAF) CreateIPSetRequest(input *CreateIPSetInput) (req *request.Request, // BLOCK, or COUNT. // // * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. +// HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value // for Data. @@ -451,7 +597,7 @@ func (c *WAF) CreateRateBasedRuleRequest(input *CreateRateBasedRuleInput) (req * // BLOCK, or COUNT. // // * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. +// HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value // for Data. @@ -487,83 +633,70 @@ func (c *WAF) CreateRateBasedRuleWithContext(ctx aws.Context, input *CreateRateB return out, req.Send() } -const opCreateRule = "CreateRule" +const opCreateRegexMatchSet = "CreateRegexMatchSet" -// CreateRuleRequest generates a "aws/request.Request" representing the -// client's request for the CreateRule operation. The "output" return +// CreateRegexMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateRegexMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See CreateRule for more information on using the CreateRule +// See CreateRegexMatchSet for more information on using the CreateRegexMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the CreateRuleRequest method. -// req, resp := client.CreateRuleRequest(params) +// // Example sending a request using the CreateRegexMatchSetRequest method. +// req, resp := client.CreateRegexMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateRule -func (c *WAF) CreateRuleRequest(input *CreateRuleInput) (req *request.Request, output *CreateRuleOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateRegexMatchSet +func (c *WAF) CreateRegexMatchSetRequest(input *CreateRegexMatchSetInput) (req *request.Request, output *CreateRegexMatchSetOutput) { op := &request.Operation{ - Name: opCreateRule, + Name: opCreateRegexMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &CreateRuleInput{} + input = &CreateRegexMatchSetInput{} } - output = &CreateRuleOutput{} + output = &CreateRegexMatchSetOutput{} req = c.newRequest(op, input, output) return } -// CreateRule API operation for AWS WAF. -// -// Creates a Rule, which contains the IPSet objects, ByteMatchSet objects, and -// other predicates that identify the requests that you want to block. If you -// add more than one predicate to a Rule, a request must match all of the specifications -// to be allowed or blocked. For example, suppose you add the following to a -// Rule: -// -// * An IPSet that matches the IP address 192.0.2.44/32 -// -// * A ByteMatchSet that matches BadBot in the User-Agent header +// CreateRegexMatchSet API operation for AWS WAF. // -// You then add the Rule to a WebACL and specify that you want to blocks requests -// that satisfy the Rule. For a request to be blocked, it must come from the -// IP address 192.0.2.44 and the User-Agent header in the request must contain -// the value BadBot. +// Creates a RegexMatchSet. You then use UpdateRegexMatchSet to identify the +// part of a web request that you want AWS WAF to inspect, such as the values +// of the User-Agent header or the query string. For example, you can create +// a RegexMatchSet that contains a RegexMatchTuple that looks for any requests +// with User-Agent headers that match a RegexPatternSet with pattern B[a@]dB[o0]t. +// You can then configure AWS WAF to reject those requests. // -// To create and configure a Rule, perform the following steps: -// -// Create and update the predicates that you want to include in the Rule. For -// more information, see CreateByteMatchSet, CreateIPSet, and CreateSqlInjectionMatchSet. +// To create and configure a RegexMatchSet, perform the following steps: // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a CreateRule request. +// parameter of a CreateRegexMatchSet request. // -// Submit a CreateRule request. +// Submit a CreateRegexMatchSet request. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateRule request. -// -// Submit an UpdateRule request to specify the predicates that you want to include -// in the Rule. +// parameter of an UpdateRegexMatchSet request. // -// Create and update a WebACL that contains the Rule. For more information, -// see CreateWebACL. +// Submit an UpdateRegexMatchSet request to specify the part of the request +// that you want AWS WAF to inspect (for example, the header or the URI) and +// the value, using a RegexPatternSet, that you want AWS WAF to watch for. // // For more information about how to use the AWS WAF API to allow or block HTTP // requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). @@ -573,7 +706,7 @@ func (c *WAF) CreateRuleRequest(input *CreateRuleInput) (req *request.Request, o // the error. // // See the AWS API reference guide for AWS WAF's -// API operation CreateRule for usage and error information. +// API operation CreateRegexMatchSet for usage and error information. // // Returned Error Codes: // * ErrCodeStaleDataException "StaleDataException" @@ -587,127 +720,94 @@ func (c *WAF) CreateRuleRequest(input *CreateRuleInput) (req *request.Request, o // * ErrCodeDisallowedNameException "DisallowedNameException" // The name specified is invalid. // -// * ErrCodeInvalidParameterException "InvalidParameterException" -// The operation failed because AWS WAF didn't recognize a parameter in the -// request. For example: -// -// * You specified an invalid parameter name. -// -// * You specified an invalid value. -// -// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) -// using an action other than INSERT or DELETE. -// -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to create a RateBasedRule with a RateKey value other than -// IP. -// -// * You tried to update a WebACL with a WafActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. -// -// * You tried to update a ByteMatchSet with a Field of HEADER but no value -// for Data. -// -// * Your request references an ARN that is malformed, or corresponds to -// a resource with which a web ACL cannot be associated. -// // * ErrCodeLimitsExceededException "LimitsExceededException" // The operation exceeds a resource limit, for example, the maximum number of // WebACL objects that you can create for an AWS account. For more information, // see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateRule -func (c *WAF) CreateRule(input *CreateRuleInput) (*CreateRuleOutput, error) { - req, out := c.CreateRuleRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateRegexMatchSet +func (c *WAF) CreateRegexMatchSet(input *CreateRegexMatchSetInput) (*CreateRegexMatchSetOutput, error) { + req, out := c.CreateRegexMatchSetRequest(input) return out, req.Send() } -// CreateRuleWithContext is the same as CreateRule with the addition of +// CreateRegexMatchSetWithContext is the same as CreateRegexMatchSet with the addition of // the ability to pass a context and additional request options. // -// See CreateRule for details on how to use this API operation. +// See CreateRegexMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) CreateRuleWithContext(ctx aws.Context, input *CreateRuleInput, opts ...request.Option) (*CreateRuleOutput, error) { - req, out := c.CreateRuleRequest(input) +func (c *WAF) CreateRegexMatchSetWithContext(ctx aws.Context, input *CreateRegexMatchSetInput, opts ...request.Option) (*CreateRegexMatchSetOutput, error) { + req, out := c.CreateRegexMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opCreateSizeConstraintSet = "CreateSizeConstraintSet" +const opCreateRegexPatternSet = "CreateRegexPatternSet" -// CreateSizeConstraintSetRequest generates a "aws/request.Request" representing the -// client's request for the CreateSizeConstraintSet operation. The "output" return +// CreateRegexPatternSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateRegexPatternSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See CreateSizeConstraintSet for more information on using the CreateSizeConstraintSet +// See CreateRegexPatternSet for more information on using the CreateRegexPatternSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the CreateSizeConstraintSetRequest method. -// req, resp := client.CreateSizeConstraintSetRequest(params) +// // Example sending a request using the CreateRegexPatternSetRequest method. +// req, resp := client.CreateRegexPatternSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateSizeConstraintSet -func (c *WAF) CreateSizeConstraintSetRequest(input *CreateSizeConstraintSetInput) (req *request.Request, output *CreateSizeConstraintSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateRegexPatternSet +func (c *WAF) CreateRegexPatternSetRequest(input *CreateRegexPatternSetInput) (req *request.Request, output *CreateRegexPatternSetOutput) { op := &request.Operation{ - Name: opCreateSizeConstraintSet, + Name: opCreateRegexPatternSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &CreateSizeConstraintSetInput{} + input = &CreateRegexPatternSetInput{} } - output = &CreateSizeConstraintSetOutput{} + output = &CreateRegexPatternSetOutput{} req = c.newRequest(op, input, output) return } -// CreateSizeConstraintSet API operation for AWS WAF. +// CreateRegexPatternSet API operation for AWS WAF. // -// Creates a SizeConstraintSet. You then use UpdateSizeConstraintSet to identify -// the part of a web request that you want AWS WAF to check for length, such -// as the length of the User-Agent header or the length of the query string. -// For example, you can create a SizeConstraintSet that matches any requests -// that have a query string that is longer than 100 bytes. You can then configure -// AWS WAF to reject those requests. +// Creates a RegexPatternSet. You then use UpdateRegexPatternSet to specify +// the regular expression (regex) pattern that you want AWS WAF to search for, +// such as B[a@]dB[o0]t. You can then configure AWS WAF to reject those requests. // -// To create and configure a SizeConstraintSet, perform the following steps: +// To create and configure a RegexPatternSet, perform the following steps: // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a CreateSizeConstraintSet request. +// parameter of a CreateRegexPatternSet request. // -// Submit a CreateSizeConstraintSet request. +// Submit a CreateRegexPatternSet request. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateSizeConstraintSet request. +// parameter of an UpdateRegexPatternSet request. // -// Submit an UpdateSizeConstraintSet request to specify the part of the request -// that you want AWS WAF to inspect (for example, the header or the URI) and -// the value that you want AWS WAF to watch for. +// Submit an UpdateRegexPatternSet request to specify the string that you want +// AWS WAF to watch for. // // For more information about how to use the AWS WAF API to allow or block HTTP // requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). @@ -717,7 +817,7 @@ func (c *WAF) CreateSizeConstraintSetRequest(input *CreateSizeConstraintSetInput // the error. // // See the AWS API reference guide for AWS WAF's -// API operation CreateSizeConstraintSet for usage and error information. +// API operation CreateRegexPatternSet for usage and error information. // // Returned Error Codes: // * ErrCodeStaleDataException "StaleDataException" @@ -728,130 +828,114 @@ func (c *WAF) CreateSizeConstraintSetRequest(input *CreateSizeConstraintSetInput // The operation failed because of a system problem, even though the request // was valid. Retry your request. // -// * ErrCodeInvalidAccountException "InvalidAccountException" -// The operation failed because you tried to create, update, or delete an object -// by using an invalid account identifier. -// // * ErrCodeDisallowedNameException "DisallowedNameException" // The name specified is invalid. // -// * ErrCodeInvalidParameterException "InvalidParameterException" -// The operation failed because AWS WAF didn't recognize a parameter in the -// request. For example: -// -// * You specified an invalid parameter name. -// -// * You specified an invalid value. -// -// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) -// using an action other than INSERT or DELETE. -// -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to create a RateBasedRule with a RateKey value other than -// IP. -// -// * You tried to update a WebACL with a WafActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. -// -// * You tried to update a ByteMatchSet with a Field of HEADER but no value -// for Data. -// -// * Your request references an ARN that is malformed, or corresponds to -// a resource with which a web ACL cannot be associated. -// // * ErrCodeLimitsExceededException "LimitsExceededException" // The operation exceeds a resource limit, for example, the maximum number of // WebACL objects that you can create for an AWS account. For more information, // see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateSizeConstraintSet -func (c *WAF) CreateSizeConstraintSet(input *CreateSizeConstraintSetInput) (*CreateSizeConstraintSetOutput, error) { - req, out := c.CreateSizeConstraintSetRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateRegexPatternSet +func (c *WAF) CreateRegexPatternSet(input *CreateRegexPatternSetInput) (*CreateRegexPatternSetOutput, error) { + req, out := c.CreateRegexPatternSetRequest(input) return out, req.Send() } -// CreateSizeConstraintSetWithContext is the same as CreateSizeConstraintSet with the addition of +// CreateRegexPatternSetWithContext is the same as CreateRegexPatternSet with the addition of // the ability to pass a context and additional request options. // -// See CreateSizeConstraintSet for details on how to use this API operation. +// See CreateRegexPatternSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) CreateSizeConstraintSetWithContext(ctx aws.Context, input *CreateSizeConstraintSetInput, opts ...request.Option) (*CreateSizeConstraintSetOutput, error) { - req, out := c.CreateSizeConstraintSetRequest(input) +func (c *WAF) CreateRegexPatternSetWithContext(ctx aws.Context, input *CreateRegexPatternSetInput, opts ...request.Option) (*CreateRegexPatternSetOutput, error) { + req, out := c.CreateRegexPatternSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opCreateSqlInjectionMatchSet = "CreateSqlInjectionMatchSet" +const opCreateRule = "CreateRule" -// CreateSqlInjectionMatchSetRequest generates a "aws/request.Request" representing the -// client's request for the CreateSqlInjectionMatchSet operation. The "output" return +// CreateRuleRequest generates a "aws/request.Request" representing the +// client's request for the CreateRule operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See CreateSqlInjectionMatchSet for more information on using the CreateSqlInjectionMatchSet +// See CreateRule for more information on using the CreateRule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the CreateSqlInjectionMatchSetRequest method. -// req, resp := client.CreateSqlInjectionMatchSetRequest(params) +// // Example sending a request using the CreateRuleRequest method. +// req, resp := client.CreateRuleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateSqlInjectionMatchSet -func (c *WAF) CreateSqlInjectionMatchSetRequest(input *CreateSqlInjectionMatchSetInput) (req *request.Request, output *CreateSqlInjectionMatchSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateRule +func (c *WAF) CreateRuleRequest(input *CreateRuleInput) (req *request.Request, output *CreateRuleOutput) { op := &request.Operation{ - Name: opCreateSqlInjectionMatchSet, + Name: opCreateRule, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &CreateSqlInjectionMatchSetInput{} + input = &CreateRuleInput{} } - output = &CreateSqlInjectionMatchSetOutput{} + output = &CreateRuleOutput{} req = c.newRequest(op, input, output) return } -// CreateSqlInjectionMatchSet API operation for AWS WAF. +// CreateRule API operation for AWS WAF. // -// Creates a SqlInjectionMatchSet, which you use to allow, block, or count requests -// that contain snippets of SQL code in a specified part of web requests. AWS -// WAF searches for character sequences that are likely to be malicious strings. +// Creates a Rule, which contains the IPSet objects, ByteMatchSet objects, and +// other predicates that identify the requests that you want to block. If you +// add more than one predicate to a Rule, a request must match all of the specifications +// to be allowed or blocked. For example, suppose you add the following to a +// Rule: // -// To create and configure a SqlInjectionMatchSet, perform the following steps: +// * An IPSet that matches the IP address 192.0.2.44/32 +// +// * A ByteMatchSet that matches BadBot in the User-Agent header +// +// You then add the Rule to a WebACL and specify that you want to blocks requests +// that satisfy the Rule. For a request to be blocked, it must come from the +// IP address 192.0.2.44 and the User-Agent header in the request must contain +// the value BadBot. +// +// To create and configure a Rule, perform the following steps: +// +// Create and update the predicates that you want to include in the Rule. For +// more information, see CreateByteMatchSet, CreateIPSet, and CreateSqlInjectionMatchSet. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a CreateSqlInjectionMatchSet request. +// parameter of a CreateRule request. // -// Submit a CreateSqlInjectionMatchSet request. +// Submit a CreateRule request. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateSqlInjectionMatchSet request. +// parameter of an UpdateRule request. // -// Submit an UpdateSqlInjectionMatchSet request to specify the parts of web -// requests in which you want to allow, block, or count malicious SQL code. +// Submit an UpdateRule request to specify the predicates that you want to include +// in the Rule. +// +// Create and update a WebACL that contains the Rule. For more information, +// see CreateWebACL. // // For more information about how to use the AWS WAF API to allow or block HTTP // requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). @@ -861,19 +945,19 @@ func (c *WAF) CreateSqlInjectionMatchSetRequest(input *CreateSqlInjectionMatchSe // the error. // // See the AWS API reference guide for AWS WAF's -// API operation CreateSqlInjectionMatchSet for usage and error information. +// API operation CreateRule for usage and error information. // // Returned Error Codes: -// * ErrCodeDisallowedNameException "DisallowedNameException" -// The name specified is invalid. +// * ErrCodeStaleDataException "StaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. // // * ErrCodeInternalErrorException "InternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. // -// * ErrCodeInvalidAccountException "InvalidAccountException" -// The operation failed because you tried to create, update, or delete an object -// by using an invalid account identifier. +// * ErrCodeDisallowedNameException "DisallowedNameException" +// The name specified is invalid. // // * ErrCodeInvalidParameterException "InvalidParameterException" // The operation failed because AWS WAF didn't recognize a parameter in the @@ -896,7 +980,7 @@ func (c *WAF) CreateSqlInjectionMatchSetRequest(input *CreateSqlInjectionMatchSe // BLOCK, or COUNT. // // * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. +// HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value // for Data. @@ -904,120 +988,108 @@ func (c *WAF) CreateSqlInjectionMatchSetRequest(input *CreateSqlInjectionMatchSe // * Your request references an ARN that is malformed, or corresponds to // a resource with which a web ACL cannot be associated. // -// * ErrCodeStaleDataException "StaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. -// // * ErrCodeLimitsExceededException "LimitsExceededException" // The operation exceeds a resource limit, for example, the maximum number of // WebACL objects that you can create for an AWS account. For more information, // see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateSqlInjectionMatchSet -func (c *WAF) CreateSqlInjectionMatchSet(input *CreateSqlInjectionMatchSetInput) (*CreateSqlInjectionMatchSetOutput, error) { - req, out := c.CreateSqlInjectionMatchSetRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateRule +func (c *WAF) CreateRule(input *CreateRuleInput) (*CreateRuleOutput, error) { + req, out := c.CreateRuleRequest(input) return out, req.Send() } -// CreateSqlInjectionMatchSetWithContext is the same as CreateSqlInjectionMatchSet with the addition of +// CreateRuleWithContext is the same as CreateRule with the addition of // the ability to pass a context and additional request options. // -// See CreateSqlInjectionMatchSet for details on how to use this API operation. +// See CreateRule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) CreateSqlInjectionMatchSetWithContext(ctx aws.Context, input *CreateSqlInjectionMatchSetInput, opts ...request.Option) (*CreateSqlInjectionMatchSetOutput, error) { - req, out := c.CreateSqlInjectionMatchSetRequest(input) +func (c *WAF) CreateRuleWithContext(ctx aws.Context, input *CreateRuleInput, opts ...request.Option) (*CreateRuleOutput, error) { + req, out := c.CreateRuleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opCreateWebACL = "CreateWebACL" +const opCreateSizeConstraintSet = "CreateSizeConstraintSet" -// CreateWebACLRequest generates a "aws/request.Request" representing the -// client's request for the CreateWebACL operation. The "output" return +// CreateSizeConstraintSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateSizeConstraintSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See CreateWebACL for more information on using the CreateWebACL +// See CreateSizeConstraintSet for more information on using the CreateSizeConstraintSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the CreateWebACLRequest method. -// req, resp := client.CreateWebACLRequest(params) +// // Example sending a request using the CreateSizeConstraintSetRequest method. +// req, resp := client.CreateSizeConstraintSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateWebACL -func (c *WAF) CreateWebACLRequest(input *CreateWebACLInput) (req *request.Request, output *CreateWebACLOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateSizeConstraintSet +func (c *WAF) CreateSizeConstraintSetRequest(input *CreateSizeConstraintSetInput) (req *request.Request, output *CreateSizeConstraintSetOutput) { op := &request.Operation{ - Name: opCreateWebACL, + Name: opCreateSizeConstraintSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &CreateWebACLInput{} + input = &CreateSizeConstraintSetInput{} } - output = &CreateWebACLOutput{} + output = &CreateSizeConstraintSetOutput{} req = c.newRequest(op, input, output) return } -// CreateWebACL API operation for AWS WAF. -// -// Creates a WebACL, which contains the Rules that identify the CloudFront web -// requests that you want to allow, block, or count. AWS WAF evaluates Rules -// in order based on the value of Priority for each Rule. -// -// You also specify a default action, either ALLOW or BLOCK. If a web request -// doesn't match any of the Rules in a WebACL, AWS WAF responds to the request -// with the default action. -// -// To create and configure a WebACL, perform the following steps: +// CreateSizeConstraintSet API operation for AWS WAF. // -// Create and update the ByteMatchSet objects and other predicates that you -// want to include in Rules. For more information, see CreateByteMatchSet, UpdateByteMatchSet, -// CreateIPSet, UpdateIPSet, CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet. +// Creates a SizeConstraintSet. You then use UpdateSizeConstraintSet to identify +// the part of a web request that you want AWS WAF to check for length, such +// as the length of the User-Agent header or the length of the query string. +// For example, you can create a SizeConstraintSet that matches any requests +// that have a query string that is longer than 100 bytes. You can then configure +// AWS WAF to reject those requests. // -// Create and update the Rules that you want to include in the WebACL. For more -// information, see CreateRule and UpdateRule. +// To create and configure a SizeConstraintSet, perform the following steps: // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a CreateWebACL request. +// parameter of a CreateSizeConstraintSet request. // -// Submit a CreateWebACL request. +// Submit a CreateSizeConstraintSet request. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateWebACL request. +// parameter of an UpdateSizeConstraintSet request. // -// Submit an UpdateWebACL request to specify the Rules that you want to include -// in the WebACL, to specify the default action, and to associate the WebACL -// with a CloudFront distribution. +// Submit an UpdateSizeConstraintSet request to specify the part of the request +// that you want AWS WAF to inspect (for example, the header or the URI) and +// the value that you want AWS WAF to watch for. // -// For more information about how to use the AWS WAF API, see the AWS WAF Developer -// Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation CreateWebACL for usage and error information. +// API operation CreateSizeConstraintSet for usage and error information. // // Returned Error Codes: // * ErrCodeStaleDataException "StaleDataException" @@ -1056,7 +1128,7 @@ func (c *WAF) CreateWebACLRequest(input *CreateWebACLInput) (req *request.Reques // BLOCK, or COUNT. // // * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. +// HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value // for Data. @@ -1070,89 +1142,88 @@ func (c *WAF) CreateWebACLRequest(input *CreateWebACLInput) (req *request.Reques // see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateWebACL -func (c *WAF) CreateWebACL(input *CreateWebACLInput) (*CreateWebACLOutput, error) { - req, out := c.CreateWebACLRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateSizeConstraintSet +func (c *WAF) CreateSizeConstraintSet(input *CreateSizeConstraintSetInput) (*CreateSizeConstraintSetOutput, error) { + req, out := c.CreateSizeConstraintSetRequest(input) return out, req.Send() } -// CreateWebACLWithContext is the same as CreateWebACL with the addition of +// CreateSizeConstraintSetWithContext is the same as CreateSizeConstraintSet with the addition of // the ability to pass a context and additional request options. // -// See CreateWebACL for details on how to use this API operation. +// See CreateSizeConstraintSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) CreateWebACLWithContext(ctx aws.Context, input *CreateWebACLInput, opts ...request.Option) (*CreateWebACLOutput, error) { - req, out := c.CreateWebACLRequest(input) +func (c *WAF) CreateSizeConstraintSetWithContext(ctx aws.Context, input *CreateSizeConstraintSetInput, opts ...request.Option) (*CreateSizeConstraintSetOutput, error) { + req, out := c.CreateSizeConstraintSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opCreateXssMatchSet = "CreateXssMatchSet" +const opCreateSqlInjectionMatchSet = "CreateSqlInjectionMatchSet" -// CreateXssMatchSetRequest generates a "aws/request.Request" representing the -// client's request for the CreateXssMatchSet operation. The "output" return +// CreateSqlInjectionMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateSqlInjectionMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See CreateXssMatchSet for more information on using the CreateXssMatchSet +// See CreateSqlInjectionMatchSet for more information on using the CreateSqlInjectionMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the CreateXssMatchSetRequest method. -// req, resp := client.CreateXssMatchSetRequest(params) +// // Example sending a request using the CreateSqlInjectionMatchSetRequest method. +// req, resp := client.CreateSqlInjectionMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateXssMatchSet -func (c *WAF) CreateXssMatchSetRequest(input *CreateXssMatchSetInput) (req *request.Request, output *CreateXssMatchSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateSqlInjectionMatchSet +func (c *WAF) CreateSqlInjectionMatchSetRequest(input *CreateSqlInjectionMatchSetInput) (req *request.Request, output *CreateSqlInjectionMatchSetOutput) { op := &request.Operation{ - Name: opCreateXssMatchSet, + Name: opCreateSqlInjectionMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &CreateXssMatchSetInput{} + input = &CreateSqlInjectionMatchSetInput{} } - output = &CreateXssMatchSetOutput{} + output = &CreateSqlInjectionMatchSetOutput{} req = c.newRequest(op, input, output) return } -// CreateXssMatchSet API operation for AWS WAF. +// CreateSqlInjectionMatchSet API operation for AWS WAF. // -// Creates an XssMatchSet, which you use to allow, block, or count requests -// that contain cross-site scripting attacks in the specified part of web requests. -// AWS WAF searches for character sequences that are likely to be malicious -// strings. +// Creates a SqlInjectionMatchSet, which you use to allow, block, or count requests +// that contain snippets of SQL code in a specified part of web requests. AWS +// WAF searches for character sequences that are likely to be malicious strings. // -// To create and configure an XssMatchSet, perform the following steps: +// To create and configure a SqlInjectionMatchSet, perform the following steps: // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a CreateXssMatchSet request. +// parameter of a CreateSqlInjectionMatchSet request. // -// Submit a CreateXssMatchSet request. +// Submit a CreateSqlInjectionMatchSet request. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateXssMatchSet request. +// parameter of an UpdateSqlInjectionMatchSet request. // -// Submit an UpdateXssMatchSet request to specify the parts of web requests -// in which you want to allow, block, or count cross-site scripting attacks. +// Submit an UpdateSqlInjectionMatchSet request to specify the parts of web +// requests in which you want to allow, block, or count malicious SQL code. // // For more information about how to use the AWS WAF API to allow or block HTTP // requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). @@ -1162,7 +1233,7 @@ func (c *WAF) CreateXssMatchSetRequest(input *CreateXssMatchSetInput) (req *requ // the error. // // See the AWS API reference guide for AWS WAF's -// API operation CreateXssMatchSet for usage and error information. +// API operation CreateSqlInjectionMatchSet for usage and error information. // // Returned Error Codes: // * ErrCodeDisallowedNameException "DisallowedNameException" @@ -1197,7 +1268,7 @@ func (c *WAF) CreateXssMatchSetRequest(input *CreateXssMatchSetInput) (req *requ // BLOCK, or COUNT. // // * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. +// HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value // for Data. @@ -1215,96 +1286,116 @@ func (c *WAF) CreateXssMatchSetRequest(input *CreateXssMatchSetInput) (req *requ // see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateXssMatchSet -func (c *WAF) CreateXssMatchSet(input *CreateXssMatchSetInput) (*CreateXssMatchSetOutput, error) { - req, out := c.CreateXssMatchSetRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateSqlInjectionMatchSet +func (c *WAF) CreateSqlInjectionMatchSet(input *CreateSqlInjectionMatchSetInput) (*CreateSqlInjectionMatchSetOutput, error) { + req, out := c.CreateSqlInjectionMatchSetRequest(input) return out, req.Send() } -// CreateXssMatchSetWithContext is the same as CreateXssMatchSet with the addition of +// CreateSqlInjectionMatchSetWithContext is the same as CreateSqlInjectionMatchSet with the addition of // the ability to pass a context and additional request options. // -// See CreateXssMatchSet for details on how to use this API operation. +// See CreateSqlInjectionMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) CreateXssMatchSetWithContext(ctx aws.Context, input *CreateXssMatchSetInput, opts ...request.Option) (*CreateXssMatchSetOutput, error) { - req, out := c.CreateXssMatchSetRequest(input) +func (c *WAF) CreateSqlInjectionMatchSetWithContext(ctx aws.Context, input *CreateSqlInjectionMatchSetInput, opts ...request.Option) (*CreateSqlInjectionMatchSetOutput, error) { + req, out := c.CreateSqlInjectionMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteByteMatchSet = "DeleteByteMatchSet" +const opCreateWebACL = "CreateWebACL" -// DeleteByteMatchSetRequest generates a "aws/request.Request" representing the -// client's request for the DeleteByteMatchSet operation. The "output" return +// CreateWebACLRequest generates a "aws/request.Request" representing the +// client's request for the CreateWebACL operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteByteMatchSet for more information on using the DeleteByteMatchSet +// See CreateWebACL for more information on using the CreateWebACL // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteByteMatchSetRequest method. -// req, resp := client.DeleteByteMatchSetRequest(params) +// // Example sending a request using the CreateWebACLRequest method. +// req, resp := client.CreateWebACLRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteByteMatchSet -func (c *WAF) DeleteByteMatchSetRequest(input *DeleteByteMatchSetInput) (req *request.Request, output *DeleteByteMatchSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateWebACL +func (c *WAF) CreateWebACLRequest(input *CreateWebACLInput) (req *request.Request, output *CreateWebACLOutput) { op := &request.Operation{ - Name: opDeleteByteMatchSet, + Name: opCreateWebACL, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DeleteByteMatchSetInput{} + input = &CreateWebACLInput{} } - output = &DeleteByteMatchSetOutput{} + output = &CreateWebACLOutput{} req = c.newRequest(op, input, output) return } -// DeleteByteMatchSet API operation for AWS WAF. -// -// Permanently deletes a ByteMatchSet. You can't delete a ByteMatchSet if it's -// still used in any Rules or if it still includes any ByteMatchTuple objects -// (any filters). +// CreateWebACL API operation for AWS WAF. // -// If you just want to remove a ByteMatchSet from a Rule, use UpdateRule. +// Creates a WebACL, which contains the Rules that identify the CloudFront web +// requests that you want to allow, block, or count. AWS WAF evaluates Rules +// in order based on the value of Priority for each Rule. // -// To permanently delete a ByteMatchSet, perform the following steps: +// You also specify a default action, either ALLOW or BLOCK. If a web request +// doesn't match any of the Rules in a WebACL, AWS WAF responds to the request +// with the default action. // -// Update the ByteMatchSet to remove filters, if any. For more information, -// see UpdateByteMatchSet. +// To create and configure a WebACL, perform the following steps: +// +// Create and update the ByteMatchSet objects and other predicates that you +// want to include in Rules. For more information, see CreateByteMatchSet, UpdateByteMatchSet, +// CreateIPSet, UpdateIPSet, CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet. +// +// Create and update the Rules that you want to include in the WebACL. For more +// information, see CreateRule and UpdateRule. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a DeleteByteMatchSet request. +// parameter of a CreateWebACL request. // -// Submit a DeleteByteMatchSet request. +// Submit a CreateWebACL request. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateWebACL request. +// +// Submit an UpdateWebACL request to specify the Rules that you want to include +// in the WebACL, to specify the default action, and to associate the WebACL +// with a CloudFront distribution. +// +// For more information about how to use the AWS WAF API, see the AWS WAF Developer +// Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation DeleteByteMatchSet for usage and error information. +// API operation CreateWebACL for usage and error information. // // Returned Error Codes: +// * ErrCodeStaleDataException "StaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// // * ErrCodeInternalErrorException "InternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. @@ -1313,127 +1404,141 @@ func (c *WAF) DeleteByteMatchSetRequest(input *DeleteByteMatchSetInput) (req *re // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// * ErrCodeNonexistentItemException "NonexistentItemException" -// The operation failed because the referenced object doesn't exist. +// * ErrCodeDisallowedNameException "DisallowedNameException" +// The name specified is invalid. // -// * ErrCodeReferencedItemException "ReferencedItemException" -// The operation failed because you tried to delete an object that is still -// in use. For example: +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: // -// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// * You specified an invalid parameter name. // -// * You tried to delete a Rule that is still referenced by a WebACL. +// * You specified an invalid value. // -// * ErrCodeStaleDataException "StaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. // -// * ErrCodeNonEmptyEntityException "NonEmptyEntityException" -// The operation failed because you tried to delete an object that isn't empty. -// For example: +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. // -// * You tried to delete a WebACL that still contains one or more Rule objects. +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. // -// * You tried to delete a Rule that still contains one or more ByteMatchSet -// objects or other predicates. +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. // -// * You tried to delete a ByteMatchSet that contains one or more ByteMatchTuple -// objects. +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. // -// * You tried to delete an IPSet that references one or more IP addresses. +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteByteMatchSet -func (c *WAF) DeleteByteMatchSet(input *DeleteByteMatchSetInput) (*DeleteByteMatchSetOutput, error) { - req, out := c.DeleteByteMatchSetRequest(input) +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeLimitsExceededException "LimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateWebACL +func (c *WAF) CreateWebACL(input *CreateWebACLInput) (*CreateWebACLOutput, error) { + req, out := c.CreateWebACLRequest(input) return out, req.Send() } -// DeleteByteMatchSetWithContext is the same as DeleteByteMatchSet with the addition of +// CreateWebACLWithContext is the same as CreateWebACL with the addition of // the ability to pass a context and additional request options. // -// See DeleteByteMatchSet for details on how to use this API operation. +// See CreateWebACL for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) DeleteByteMatchSetWithContext(ctx aws.Context, input *DeleteByteMatchSetInput, opts ...request.Option) (*DeleteByteMatchSetOutput, error) { - req, out := c.DeleteByteMatchSetRequest(input) +func (c *WAF) CreateWebACLWithContext(ctx aws.Context, input *CreateWebACLInput, opts ...request.Option) (*CreateWebACLOutput, error) { + req, out := c.CreateWebACLRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteIPSet = "DeleteIPSet" +const opCreateXssMatchSet = "CreateXssMatchSet" -// DeleteIPSetRequest generates a "aws/request.Request" representing the -// client's request for the DeleteIPSet operation. The "output" return +// CreateXssMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateXssMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteIPSet for more information on using the DeleteIPSet +// See CreateXssMatchSet for more information on using the CreateXssMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteIPSetRequest method. -// req, resp := client.DeleteIPSetRequest(params) +// // Example sending a request using the CreateXssMatchSetRequest method. +// req, resp := client.CreateXssMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteIPSet -func (c *WAF) DeleteIPSetRequest(input *DeleteIPSetInput) (req *request.Request, output *DeleteIPSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateXssMatchSet +func (c *WAF) CreateXssMatchSetRequest(input *CreateXssMatchSetInput) (req *request.Request, output *CreateXssMatchSetOutput) { op := &request.Operation{ - Name: opDeleteIPSet, + Name: opCreateXssMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DeleteIPSetInput{} + input = &CreateXssMatchSetInput{} } - output = &DeleteIPSetOutput{} + output = &CreateXssMatchSetOutput{} req = c.newRequest(op, input, output) return } -// DeleteIPSet API operation for AWS WAF. +// CreateXssMatchSet API operation for AWS WAF. // -// Permanently deletes an IPSet. You can't delete an IPSet if it's still used -// in any Rules or if it still includes any IP addresses. +// Creates an XssMatchSet, which you use to allow, block, or count requests +// that contain cross-site scripting attacks in the specified part of web requests. +// AWS WAF searches for character sequences that are likely to be malicious +// strings. // -// If you just want to remove an IPSet from a Rule, use UpdateRule. +// To create and configure an XssMatchSet, perform the following steps: // -// To permanently delete an IPSet from AWS WAF, perform the following steps: +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a CreateXssMatchSet request. // -// Update the IPSet to remove IP address ranges, if any. For more information, -// see UpdateIPSet. +// Submit a CreateXssMatchSet request. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a DeleteIPSet request. +// parameter of an UpdateXssMatchSet request. // -// Submit a DeleteIPSet request. +// Submit an UpdateXssMatchSet request to specify the parts of web requests +// in which you want to allow, block, or count cross-site scripting attacks. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation DeleteIPSet for usage and error information. +// API operation CreateXssMatchSet for usage and error information. // // Returned Error Codes: -// * ErrCodeStaleDataException "StaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. +// * ErrCodeDisallowedNameException "DisallowedNameException" +// The name specified is invalid. // // * ErrCodeInternalErrorException "InternalErrorException" // The operation failed because of a system problem, even though the request @@ -1443,126 +1548,135 @@ func (c *WAF) DeleteIPSetRequest(input *DeleteIPSetInput) (req *request.Request, // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// * ErrCodeNonexistentItemException "NonexistentItemException" -// The operation failed because the referenced object doesn't exist. +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: // -// * ErrCodeReferencedItemException "ReferencedItemException" -// The operation failed because you tried to delete an object that is still -// in use. For example: +// * You specified an invalid parameter name. // -// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// * You specified an invalid value. // -// * You tried to delete a Rule that is still referenced by a WebACL. +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. // -// * ErrCodeNonEmptyEntityException "NonEmptyEntityException" -// The operation failed because you tried to delete an object that isn't empty. -// For example: +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. // -// * You tried to delete a WebACL that still contains one or more Rule objects. +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. // -// * You tried to delete a Rule that still contains one or more ByteMatchSet -// objects or other predicates. +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. // -// * You tried to delete a ByteMatchSet that contains one or more ByteMatchTuple -// objects. +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. // -// * You tried to delete an IPSet that references one or more IP addresses. +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteIPSet -func (c *WAF) DeleteIPSet(input *DeleteIPSetInput) (*DeleteIPSetOutput, error) { - req, out := c.DeleteIPSetRequest(input) +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeStaleDataException "StaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeLimitsExceededException "LimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateXssMatchSet +func (c *WAF) CreateXssMatchSet(input *CreateXssMatchSetInput) (*CreateXssMatchSetOutput, error) { + req, out := c.CreateXssMatchSetRequest(input) return out, req.Send() } -// DeleteIPSetWithContext is the same as DeleteIPSet with the addition of +// CreateXssMatchSetWithContext is the same as CreateXssMatchSet with the addition of // the ability to pass a context and additional request options. // -// See DeleteIPSet for details on how to use this API operation. +// See CreateXssMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) DeleteIPSetWithContext(ctx aws.Context, input *DeleteIPSetInput, opts ...request.Option) (*DeleteIPSetOutput, error) { - req, out := c.DeleteIPSetRequest(input) +func (c *WAF) CreateXssMatchSetWithContext(ctx aws.Context, input *CreateXssMatchSetInput, opts ...request.Option) (*CreateXssMatchSetOutput, error) { + req, out := c.CreateXssMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteRateBasedRule = "DeleteRateBasedRule" +const opDeleteByteMatchSet = "DeleteByteMatchSet" -// DeleteRateBasedRuleRequest generates a "aws/request.Request" representing the -// client's request for the DeleteRateBasedRule operation. The "output" return +// DeleteByteMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteByteMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteRateBasedRule for more information on using the DeleteRateBasedRule +// See DeleteByteMatchSet for more information on using the DeleteByteMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteRateBasedRuleRequest method. -// req, resp := client.DeleteRateBasedRuleRequest(params) +// // Example sending a request using the DeleteByteMatchSetRequest method. +// req, resp := client.DeleteByteMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRateBasedRule -func (c *WAF) DeleteRateBasedRuleRequest(input *DeleteRateBasedRuleInput) (req *request.Request, output *DeleteRateBasedRuleOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteByteMatchSet +func (c *WAF) DeleteByteMatchSetRequest(input *DeleteByteMatchSetInput) (req *request.Request, output *DeleteByteMatchSetOutput) { op := &request.Operation{ - Name: opDeleteRateBasedRule, + Name: opDeleteByteMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DeleteRateBasedRuleInput{} + input = &DeleteByteMatchSetInput{} } - output = &DeleteRateBasedRuleOutput{} + output = &DeleteByteMatchSetOutput{} req = c.newRequest(op, input, output) return } -// DeleteRateBasedRule API operation for AWS WAF. +// DeleteByteMatchSet API operation for AWS WAF. // -// Permanently deletes a RateBasedRule. You can't delete a rule if it's still -// used in any WebACL objects or if it still includes any predicates, such as -// ByteMatchSet objects. +// Permanently deletes a ByteMatchSet. You can't delete a ByteMatchSet if it's +// still used in any Rules or if it still includes any ByteMatchTuple objects +// (any filters). // -// If you just want to remove a rule from a WebACL, use UpdateWebACL. +// If you just want to remove a ByteMatchSet from a Rule, use UpdateRule. // -// To permanently delete a RateBasedRule from AWS WAF, perform the following -// steps: +// To permanently delete a ByteMatchSet, perform the following steps: // -// Update the RateBasedRule to remove predicates, if any. For more information, -// see UpdateRateBasedRule. +// Update the ByteMatchSet to remove filters, if any. For more information, +// see UpdateByteMatchSet. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a DeleteRateBasedRule request. +// parameter of a DeleteByteMatchSet request. // -// Submit a DeleteRateBasedRule request. +// Submit a DeleteByteMatchSet request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation DeleteRateBasedRule for usage and error information. +// API operation DeleteByteMatchSet for usage and error information. // // Returned Error Codes: -// * ErrCodeStaleDataException "StaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. -// // * ErrCodeInternalErrorException "InternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. @@ -1582,7 +1696,11 @@ func (c *WAF) DeleteRateBasedRuleRequest(input *DeleteRateBasedRuleInput) (req * // // * You tried to delete a Rule that is still referenced by a WebACL. // -// * ErrCodeNonEmptyEntityException "NonEmptyEntityException" +// * ErrCodeStaleDataException "StaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeNonEmptyEntityException "NonEmptyEntityException" // The operation failed because you tried to delete an object that isn't empty. // For example: // @@ -1596,93 +1714,93 @@ func (c *WAF) DeleteRateBasedRuleRequest(input *DeleteRateBasedRuleInput) (req * // // * You tried to delete an IPSet that references one or more IP addresses. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRateBasedRule -func (c *WAF) DeleteRateBasedRule(input *DeleteRateBasedRuleInput) (*DeleteRateBasedRuleOutput, error) { - req, out := c.DeleteRateBasedRuleRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteByteMatchSet +func (c *WAF) DeleteByteMatchSet(input *DeleteByteMatchSetInput) (*DeleteByteMatchSetOutput, error) { + req, out := c.DeleteByteMatchSetRequest(input) return out, req.Send() } -// DeleteRateBasedRuleWithContext is the same as DeleteRateBasedRule with the addition of +// DeleteByteMatchSetWithContext is the same as DeleteByteMatchSet with the addition of // the ability to pass a context and additional request options. // -// See DeleteRateBasedRule for details on how to use this API operation. +// See DeleteByteMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) DeleteRateBasedRuleWithContext(ctx aws.Context, input *DeleteRateBasedRuleInput, opts ...request.Option) (*DeleteRateBasedRuleOutput, error) { - req, out := c.DeleteRateBasedRuleRequest(input) +func (c *WAF) DeleteByteMatchSetWithContext(ctx aws.Context, input *DeleteByteMatchSetInput, opts ...request.Option) (*DeleteByteMatchSetOutput, error) { + req, out := c.DeleteByteMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteRule = "DeleteRule" +const opDeleteGeoMatchSet = "DeleteGeoMatchSet" -// DeleteRuleRequest generates a "aws/request.Request" representing the -// client's request for the DeleteRule operation. The "output" return +// DeleteGeoMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteGeoMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteRule for more information on using the DeleteRule +// See DeleteGeoMatchSet for more information on using the DeleteGeoMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteRuleRequest method. -// req, resp := client.DeleteRuleRequest(params) +// // Example sending a request using the DeleteGeoMatchSetRequest method. +// req, resp := client.DeleteGeoMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRule -func (c *WAF) DeleteRuleRequest(input *DeleteRuleInput) (req *request.Request, output *DeleteRuleOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteGeoMatchSet +func (c *WAF) DeleteGeoMatchSetRequest(input *DeleteGeoMatchSetInput) (req *request.Request, output *DeleteGeoMatchSetOutput) { op := &request.Operation{ - Name: opDeleteRule, + Name: opDeleteGeoMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DeleteRuleInput{} + input = &DeleteGeoMatchSetInput{} } - output = &DeleteRuleOutput{} + output = &DeleteGeoMatchSetOutput{} req = c.newRequest(op, input, output) return } -// DeleteRule API operation for AWS WAF. +// DeleteGeoMatchSet API operation for AWS WAF. // -// Permanently deletes a Rule. You can't delete a Rule if it's still used in -// any WebACL objects or if it still includes any predicates, such as ByteMatchSet -// objects. +// Permanently deletes a GeoMatchSet. You can't delete a GeoMatchSet if it's +// still used in any Rules or if it still includes any countries. // -// If you just want to remove a Rule from a WebACL, use UpdateWebACL. +// If you just want to remove a GeoMatchSet from a Rule, use UpdateRule. // -// To permanently delete a Rule from AWS WAF, perform the following steps: +// To permanently delete a GeoMatchSet from AWS WAF, perform the following steps: // -// Update the Rule to remove predicates, if any. For more information, see UpdateRule. +// Update the GeoMatchSet to remove any countries. For more information, see +// UpdateGeoMatchSet. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a DeleteRule request. +// parameter of a DeleteGeoMatchSet request. // -// Submit a DeleteRule request. +// Submit a DeleteGeoMatchSet request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation DeleteRule for usage and error information. +// API operation DeleteGeoMatchSet for usage and error information. // // Returned Error Codes: // * ErrCodeStaleDataException "StaleDataException" @@ -1722,94 +1840,93 @@ func (c *WAF) DeleteRuleRequest(input *DeleteRuleInput) (req *request.Request, o // // * You tried to delete an IPSet that references one or more IP addresses. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRule -func (c *WAF) DeleteRule(input *DeleteRuleInput) (*DeleteRuleOutput, error) { - req, out := c.DeleteRuleRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteGeoMatchSet +func (c *WAF) DeleteGeoMatchSet(input *DeleteGeoMatchSetInput) (*DeleteGeoMatchSetOutput, error) { + req, out := c.DeleteGeoMatchSetRequest(input) return out, req.Send() } -// DeleteRuleWithContext is the same as DeleteRule with the addition of +// DeleteGeoMatchSetWithContext is the same as DeleteGeoMatchSet with the addition of // the ability to pass a context and additional request options. // -// See DeleteRule for details on how to use this API operation. +// See DeleteGeoMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) DeleteRuleWithContext(ctx aws.Context, input *DeleteRuleInput, opts ...request.Option) (*DeleteRuleOutput, error) { - req, out := c.DeleteRuleRequest(input) +func (c *WAF) DeleteGeoMatchSetWithContext(ctx aws.Context, input *DeleteGeoMatchSetInput, opts ...request.Option) (*DeleteGeoMatchSetOutput, error) { + req, out := c.DeleteGeoMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteSizeConstraintSet = "DeleteSizeConstraintSet" +const opDeleteIPSet = "DeleteIPSet" -// DeleteSizeConstraintSetRequest generates a "aws/request.Request" representing the -// client's request for the DeleteSizeConstraintSet operation. The "output" return +// DeleteIPSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteIPSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteSizeConstraintSet for more information on using the DeleteSizeConstraintSet +// See DeleteIPSet for more information on using the DeleteIPSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteSizeConstraintSetRequest method. -// req, resp := client.DeleteSizeConstraintSetRequest(params) +// // Example sending a request using the DeleteIPSetRequest method. +// req, resp := client.DeleteIPSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteSizeConstraintSet -func (c *WAF) DeleteSizeConstraintSetRequest(input *DeleteSizeConstraintSetInput) (req *request.Request, output *DeleteSizeConstraintSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteIPSet +func (c *WAF) DeleteIPSetRequest(input *DeleteIPSetInput) (req *request.Request, output *DeleteIPSetOutput) { op := &request.Operation{ - Name: opDeleteSizeConstraintSet, + Name: opDeleteIPSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DeleteSizeConstraintSetInput{} + input = &DeleteIPSetInput{} } - output = &DeleteSizeConstraintSetOutput{} + output = &DeleteIPSetOutput{} req = c.newRequest(op, input, output) return } -// DeleteSizeConstraintSet API operation for AWS WAF. +// DeleteIPSet API operation for AWS WAF. // -// Permanently deletes a SizeConstraintSet. You can't delete a SizeConstraintSet -// if it's still used in any Rules or if it still includes any SizeConstraint -// objects (any filters). +// Permanently deletes an IPSet. You can't delete an IPSet if it's still used +// in any Rules or if it still includes any IP addresses. // -// If you just want to remove a SizeConstraintSet from a Rule, use UpdateRule. +// If you just want to remove an IPSet from a Rule, use UpdateRule. // -// To permanently delete a SizeConstraintSet, perform the following steps: +// To permanently delete an IPSet from AWS WAF, perform the following steps: // -// Update the SizeConstraintSet to remove filters, if any. For more information, -// see UpdateSizeConstraintSet. +// Update the IPSet to remove IP address ranges, if any. For more information, +// see UpdateIPSet. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a DeleteSizeConstraintSet request. +// parameter of a DeleteIPSet request. // -// Submit a DeleteSizeConstraintSet request. +// Submit a DeleteIPSet request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation DeleteSizeConstraintSet for usage and error information. +// API operation DeleteIPSet for usage and error information. // // Returned Error Codes: // * ErrCodeStaleDataException "StaleDataException" @@ -1849,97 +1966,101 @@ func (c *WAF) DeleteSizeConstraintSetRequest(input *DeleteSizeConstraintSetInput // // * You tried to delete an IPSet that references one or more IP addresses. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteSizeConstraintSet -func (c *WAF) DeleteSizeConstraintSet(input *DeleteSizeConstraintSetInput) (*DeleteSizeConstraintSetOutput, error) { - req, out := c.DeleteSizeConstraintSetRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteIPSet +func (c *WAF) DeleteIPSet(input *DeleteIPSetInput) (*DeleteIPSetOutput, error) { + req, out := c.DeleteIPSetRequest(input) return out, req.Send() } -// DeleteSizeConstraintSetWithContext is the same as DeleteSizeConstraintSet with the addition of +// DeleteIPSetWithContext is the same as DeleteIPSet with the addition of // the ability to pass a context and additional request options. // -// See DeleteSizeConstraintSet for details on how to use this API operation. +// See DeleteIPSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) DeleteSizeConstraintSetWithContext(ctx aws.Context, input *DeleteSizeConstraintSetInput, opts ...request.Option) (*DeleteSizeConstraintSetOutput, error) { - req, out := c.DeleteSizeConstraintSetRequest(input) +func (c *WAF) DeleteIPSetWithContext(ctx aws.Context, input *DeleteIPSetInput, opts ...request.Option) (*DeleteIPSetOutput, error) { + req, out := c.DeleteIPSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteSqlInjectionMatchSet = "DeleteSqlInjectionMatchSet" +const opDeleteRateBasedRule = "DeleteRateBasedRule" -// DeleteSqlInjectionMatchSetRequest generates a "aws/request.Request" representing the -// client's request for the DeleteSqlInjectionMatchSet operation. The "output" return +// DeleteRateBasedRuleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRateBasedRule operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteSqlInjectionMatchSet for more information on using the DeleteSqlInjectionMatchSet +// See DeleteRateBasedRule for more information on using the DeleteRateBasedRule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteSqlInjectionMatchSetRequest method. -// req, resp := client.DeleteSqlInjectionMatchSetRequest(params) +// // Example sending a request using the DeleteRateBasedRuleRequest method. +// req, resp := client.DeleteRateBasedRuleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteSqlInjectionMatchSet -func (c *WAF) DeleteSqlInjectionMatchSetRequest(input *DeleteSqlInjectionMatchSetInput) (req *request.Request, output *DeleteSqlInjectionMatchSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRateBasedRule +func (c *WAF) DeleteRateBasedRuleRequest(input *DeleteRateBasedRuleInput) (req *request.Request, output *DeleteRateBasedRuleOutput) { op := &request.Operation{ - Name: opDeleteSqlInjectionMatchSet, + Name: opDeleteRateBasedRule, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DeleteSqlInjectionMatchSetInput{} + input = &DeleteRateBasedRuleInput{} } - output = &DeleteSqlInjectionMatchSetOutput{} + output = &DeleteRateBasedRuleOutput{} req = c.newRequest(op, input, output) return } -// DeleteSqlInjectionMatchSet API operation for AWS WAF. +// DeleteRateBasedRule API operation for AWS WAF. // -// Permanently deletes a SqlInjectionMatchSet. You can't delete a SqlInjectionMatchSet -// if it's still used in any Rules or if it still contains any SqlInjectionMatchTuple -// objects. +// Permanently deletes a RateBasedRule. You can't delete a rule if it's still +// used in any WebACL objects or if it still includes any predicates, such as +// ByteMatchSet objects. // -// If you just want to remove a SqlInjectionMatchSet from a Rule, use UpdateRule. +// If you just want to remove a rule from a WebACL, use UpdateWebACL. // -// To permanently delete a SqlInjectionMatchSet from AWS WAF, perform the following +// To permanently delete a RateBasedRule from AWS WAF, perform the following // steps: // -// Update the SqlInjectionMatchSet to remove filters, if any. For more information, -// see UpdateSqlInjectionMatchSet. +// Update the RateBasedRule to remove predicates, if any. For more information, +// see UpdateRateBasedRule. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a DeleteSqlInjectionMatchSet request. +// parameter of a DeleteRateBasedRule request. // -// Submit a DeleteSqlInjectionMatchSet request. +// Submit a DeleteRateBasedRule request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation DeleteSqlInjectionMatchSet for usage and error information. +// API operation DeleteRateBasedRule for usage and error information. // // Returned Error Codes: +// * ErrCodeStaleDataException "StaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// // * ErrCodeInternalErrorException "InternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. @@ -1959,10 +2080,6 @@ func (c *WAF) DeleteSqlInjectionMatchSetRequest(input *DeleteSqlInjectionMatchSe // // * You tried to delete a Rule that is still referenced by a WebACL. // -// * ErrCodeStaleDataException "StaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. -// // * ErrCodeNonEmptyEntityException "NonEmptyEntityException" // The operation failed because you tried to delete an object that isn't empty. // For example: @@ -1977,96 +2094,96 @@ func (c *WAF) DeleteSqlInjectionMatchSetRequest(input *DeleteSqlInjectionMatchSe // // * You tried to delete an IPSet that references one or more IP addresses. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteSqlInjectionMatchSet -func (c *WAF) DeleteSqlInjectionMatchSet(input *DeleteSqlInjectionMatchSetInput) (*DeleteSqlInjectionMatchSetOutput, error) { - req, out := c.DeleteSqlInjectionMatchSetRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRateBasedRule +func (c *WAF) DeleteRateBasedRule(input *DeleteRateBasedRuleInput) (*DeleteRateBasedRuleOutput, error) { + req, out := c.DeleteRateBasedRuleRequest(input) return out, req.Send() } -// DeleteSqlInjectionMatchSetWithContext is the same as DeleteSqlInjectionMatchSet with the addition of +// DeleteRateBasedRuleWithContext is the same as DeleteRateBasedRule with the addition of // the ability to pass a context and additional request options. // -// See DeleteSqlInjectionMatchSet for details on how to use this API operation. +// See DeleteRateBasedRule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) DeleteSqlInjectionMatchSetWithContext(ctx aws.Context, input *DeleteSqlInjectionMatchSetInput, opts ...request.Option) (*DeleteSqlInjectionMatchSetOutput, error) { - req, out := c.DeleteSqlInjectionMatchSetRequest(input) +func (c *WAF) DeleteRateBasedRuleWithContext(ctx aws.Context, input *DeleteRateBasedRuleInput, opts ...request.Option) (*DeleteRateBasedRuleOutput, error) { + req, out := c.DeleteRateBasedRuleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteWebACL = "DeleteWebACL" +const opDeleteRegexMatchSet = "DeleteRegexMatchSet" -// DeleteWebACLRequest generates a "aws/request.Request" representing the -// client's request for the DeleteWebACL operation. The "output" return +// DeleteRegexMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRegexMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteWebACL for more information on using the DeleteWebACL +// See DeleteRegexMatchSet for more information on using the DeleteRegexMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteWebACLRequest method. -// req, resp := client.DeleteWebACLRequest(params) +// // Example sending a request using the DeleteRegexMatchSetRequest method. +// req, resp := client.DeleteRegexMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteWebACL -func (c *WAF) DeleteWebACLRequest(input *DeleteWebACLInput) (req *request.Request, output *DeleteWebACLOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRegexMatchSet +func (c *WAF) DeleteRegexMatchSetRequest(input *DeleteRegexMatchSetInput) (req *request.Request, output *DeleteRegexMatchSetOutput) { op := &request.Operation{ - Name: opDeleteWebACL, + Name: opDeleteRegexMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DeleteWebACLInput{} + input = &DeleteRegexMatchSetInput{} } - output = &DeleteWebACLOutput{} + output = &DeleteRegexMatchSetOutput{} req = c.newRequest(op, input, output) return } -// DeleteWebACL API operation for AWS WAF. +// DeleteRegexMatchSet API operation for AWS WAF. // -// Permanently deletes a WebACL. You can't delete a WebACL if it still contains -// any Rules. +// Permanently deletes a RegexMatchSet. You can't delete a RegexMatchSet if +// it's still used in any Rules or if it still includes any RegexMatchTuples +// objects (any filters). // -// To delete a WebACL, perform the following steps: +// If you just want to remove a RegexMatchSet from a Rule, use UpdateRule. // -// Update the WebACL to remove Rules, if any. For more information, see UpdateWebACL. +// To permanently delete a RegexMatchSet, perform the following steps: +// +// Update the RegexMatchSet to remove filters, if any. For more information, +// see UpdateRegexMatchSet. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a DeleteWebACL request. +// parameter of a DeleteRegexMatchSet request. // -// Submit a DeleteWebACL request. +// Submit a DeleteRegexMatchSet request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation DeleteWebACL for usage and error information. +// API operation DeleteRegexMatchSet for usage and error information. // // Returned Error Codes: -// * ErrCodeStaleDataException "StaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. -// // * ErrCodeInternalErrorException "InternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. @@ -2086,6 +2203,10 @@ func (c *WAF) DeleteWebACLRequest(input *DeleteWebACLInput) (req *request.Reques // // * You tried to delete a Rule that is still referenced by a WebACL. // +// * ErrCodeStaleDataException "StaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// // * ErrCodeNonEmptyEntityException "NonEmptyEntityException" // The operation failed because you tried to delete an object that isn't empty. // For example: @@ -2100,94 +2221,82 @@ func (c *WAF) DeleteWebACLRequest(input *DeleteWebACLInput) (req *request.Reques // // * You tried to delete an IPSet that references one or more IP addresses. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteWebACL -func (c *WAF) DeleteWebACL(input *DeleteWebACLInput) (*DeleteWebACLOutput, error) { - req, out := c.DeleteWebACLRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRegexMatchSet +func (c *WAF) DeleteRegexMatchSet(input *DeleteRegexMatchSetInput) (*DeleteRegexMatchSetOutput, error) { + req, out := c.DeleteRegexMatchSetRequest(input) return out, req.Send() } -// DeleteWebACLWithContext is the same as DeleteWebACL with the addition of +// DeleteRegexMatchSetWithContext is the same as DeleteRegexMatchSet with the addition of // the ability to pass a context and additional request options. // -// See DeleteWebACL for details on how to use this API operation. +// See DeleteRegexMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) DeleteWebACLWithContext(ctx aws.Context, input *DeleteWebACLInput, opts ...request.Option) (*DeleteWebACLOutput, error) { - req, out := c.DeleteWebACLRequest(input) +func (c *WAF) DeleteRegexMatchSetWithContext(ctx aws.Context, input *DeleteRegexMatchSetInput, opts ...request.Option) (*DeleteRegexMatchSetOutput, error) { + req, out := c.DeleteRegexMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteXssMatchSet = "DeleteXssMatchSet" +const opDeleteRegexPatternSet = "DeleteRegexPatternSet" -// DeleteXssMatchSetRequest generates a "aws/request.Request" representing the -// client's request for the DeleteXssMatchSet operation. The "output" return +// DeleteRegexPatternSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRegexPatternSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteXssMatchSet for more information on using the DeleteXssMatchSet +// See DeleteRegexPatternSet for more information on using the DeleteRegexPatternSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteXssMatchSetRequest method. -// req, resp := client.DeleteXssMatchSetRequest(params) +// // Example sending a request using the DeleteRegexPatternSetRequest method. +// req, resp := client.DeleteRegexPatternSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteXssMatchSet -func (c *WAF) DeleteXssMatchSetRequest(input *DeleteXssMatchSetInput) (req *request.Request, output *DeleteXssMatchSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRegexPatternSet +func (c *WAF) DeleteRegexPatternSetRequest(input *DeleteRegexPatternSetInput) (req *request.Request, output *DeleteRegexPatternSetOutput) { op := &request.Operation{ - Name: opDeleteXssMatchSet, + Name: opDeleteRegexPatternSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DeleteXssMatchSetInput{} + input = &DeleteRegexPatternSetInput{} } - output = &DeleteXssMatchSetOutput{} + output = &DeleteRegexPatternSetOutput{} req = c.newRequest(op, input, output) return } -// DeleteXssMatchSet API operation for AWS WAF. -// -// Permanently deletes an XssMatchSet. You can't delete an XssMatchSet if it's -// still used in any Rules or if it still contains any XssMatchTuple objects. -// -// If you just want to remove an XssMatchSet from a Rule, use UpdateRule. -// -// To permanently delete an XssMatchSet from AWS WAF, perform the following -// steps: -// -// Update the XssMatchSet to remove filters, if any. For more information, see -// UpdateXssMatchSet. -// -// Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a DeleteXssMatchSet request. +// DeleteRegexPatternSet API operation for AWS WAF. // -// Submit a DeleteXssMatchSet request. +// Permanently deletes a RegexPatternSet. You can't delete a RegexPatternSet +// if it's still used in any RegexMatchSet or if the RegexPatternSet is not +// empty. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation DeleteXssMatchSet for usage and error information. +// API operation DeleteRegexPatternSet for usage and error information. // // Returned Error Codes: // * ErrCodeInternalErrorException "InternalErrorException" @@ -2227,82 +2336,99 @@ func (c *WAF) DeleteXssMatchSetRequest(input *DeleteXssMatchSetInput) (req *requ // // * You tried to delete an IPSet that references one or more IP addresses. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteXssMatchSet -func (c *WAF) DeleteXssMatchSet(input *DeleteXssMatchSetInput) (*DeleteXssMatchSetOutput, error) { - req, out := c.DeleteXssMatchSetRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRegexPatternSet +func (c *WAF) DeleteRegexPatternSet(input *DeleteRegexPatternSetInput) (*DeleteRegexPatternSetOutput, error) { + req, out := c.DeleteRegexPatternSetRequest(input) return out, req.Send() } -// DeleteXssMatchSetWithContext is the same as DeleteXssMatchSet with the addition of +// DeleteRegexPatternSetWithContext is the same as DeleteRegexPatternSet with the addition of // the ability to pass a context and additional request options. // -// See DeleteXssMatchSet for details on how to use this API operation. +// See DeleteRegexPatternSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) DeleteXssMatchSetWithContext(ctx aws.Context, input *DeleteXssMatchSetInput, opts ...request.Option) (*DeleteXssMatchSetOutput, error) { - req, out := c.DeleteXssMatchSetRequest(input) +func (c *WAF) DeleteRegexPatternSetWithContext(ctx aws.Context, input *DeleteRegexPatternSetInput, opts ...request.Option) (*DeleteRegexPatternSetOutput, error) { + req, out := c.DeleteRegexPatternSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetByteMatchSet = "GetByteMatchSet" +const opDeleteRule = "DeleteRule" -// GetByteMatchSetRequest generates a "aws/request.Request" representing the -// client's request for the GetByteMatchSet operation. The "output" return +// DeleteRuleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRule operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetByteMatchSet for more information on using the GetByteMatchSet +// See DeleteRule for more information on using the DeleteRule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetByteMatchSetRequest method. -// req, resp := client.GetByteMatchSetRequest(params) +// // Example sending a request using the DeleteRuleRequest method. +// req, resp := client.DeleteRuleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetByteMatchSet -func (c *WAF) GetByteMatchSetRequest(input *GetByteMatchSetInput) (req *request.Request, output *GetByteMatchSetOutput) { - op := &request.Operation{ - Name: opGetByteMatchSet, +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRule +func (c *WAF) DeleteRuleRequest(input *DeleteRuleInput) (req *request.Request, output *DeleteRuleOutput) { + op := &request.Operation{ + Name: opDeleteRule, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetByteMatchSetInput{} + input = &DeleteRuleInput{} } - output = &GetByteMatchSetOutput{} + output = &DeleteRuleOutput{} req = c.newRequest(op, input, output) return } -// GetByteMatchSet API operation for AWS WAF. +// DeleteRule API operation for AWS WAF. // -// Returns the ByteMatchSet specified by ByteMatchSetId. +// Permanently deletes a Rule. You can't delete a Rule if it's still used in +// any WebACL objects or if it still includes any predicates, such as ByteMatchSet +// objects. +// +// If you just want to remove a Rule from a WebACL, use UpdateWebACL. +// +// To permanently delete a Rule from AWS WAF, perform the following steps: +// +// Update the Rule to remove predicates, if any. For more information, see UpdateRule. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a DeleteRule request. +// +// Submit a DeleteRule request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation GetByteMatchSet for usage and error information. +// API operation DeleteRule for usage and error information. // // Returned Error Codes: +// * ErrCodeStaleDataException "StaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// // * ErrCodeInternalErrorException "InternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. @@ -2314,269 +2440,373 @@ func (c *WAF) GetByteMatchSetRequest(input *GetByteMatchSetInput) (req *request. // * ErrCodeNonexistentItemException "NonexistentItemException" // The operation failed because the referenced object doesn't exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetByteMatchSet -func (c *WAF) GetByteMatchSet(input *GetByteMatchSetInput) (*GetByteMatchSetOutput, error) { - req, out := c.GetByteMatchSetRequest(input) +// * ErrCodeReferencedItemException "ReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeNonEmptyEntityException "NonEmptyEntityException" +// The operation failed because you tried to delete an object that isn't empty. +// For example: +// +// * You tried to delete a WebACL that still contains one or more Rule objects. +// +// * You tried to delete a Rule that still contains one or more ByteMatchSet +// objects or other predicates. +// +// * You tried to delete a ByteMatchSet that contains one or more ByteMatchTuple +// objects. +// +// * You tried to delete an IPSet that references one or more IP addresses. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRule +func (c *WAF) DeleteRule(input *DeleteRuleInput) (*DeleteRuleOutput, error) { + req, out := c.DeleteRuleRequest(input) return out, req.Send() } -// GetByteMatchSetWithContext is the same as GetByteMatchSet with the addition of +// DeleteRuleWithContext is the same as DeleteRule with the addition of // the ability to pass a context and additional request options. // -// See GetByteMatchSet for details on how to use this API operation. +// See DeleteRule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) GetByteMatchSetWithContext(ctx aws.Context, input *GetByteMatchSetInput, opts ...request.Option) (*GetByteMatchSetOutput, error) { - req, out := c.GetByteMatchSetRequest(input) +func (c *WAF) DeleteRuleWithContext(ctx aws.Context, input *DeleteRuleInput, opts ...request.Option) (*DeleteRuleOutput, error) { + req, out := c.DeleteRuleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetChangeToken = "GetChangeToken" +const opDeleteSizeConstraintSet = "DeleteSizeConstraintSet" -// GetChangeTokenRequest generates a "aws/request.Request" representing the -// client's request for the GetChangeToken operation. The "output" return +// DeleteSizeConstraintSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSizeConstraintSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetChangeToken for more information on using the GetChangeToken +// See DeleteSizeConstraintSet for more information on using the DeleteSizeConstraintSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetChangeTokenRequest method. -// req, resp := client.GetChangeTokenRequest(params) +// // Example sending a request using the DeleteSizeConstraintSetRequest method. +// req, resp := client.DeleteSizeConstraintSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetChangeToken -func (c *WAF) GetChangeTokenRequest(input *GetChangeTokenInput) (req *request.Request, output *GetChangeTokenOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteSizeConstraintSet +func (c *WAF) DeleteSizeConstraintSetRequest(input *DeleteSizeConstraintSetInput) (req *request.Request, output *DeleteSizeConstraintSetOutput) { op := &request.Operation{ - Name: opGetChangeToken, + Name: opDeleteSizeConstraintSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetChangeTokenInput{} + input = &DeleteSizeConstraintSetInput{} } - output = &GetChangeTokenOutput{} + output = &DeleteSizeConstraintSetOutput{} req = c.newRequest(op, input, output) return } -// GetChangeToken API operation for AWS WAF. +// DeleteSizeConstraintSet API operation for AWS WAF. // -// When you want to create, update, or delete AWS WAF objects, get a change -// token and include the change token in the create, update, or delete request. -// Change tokens ensure that your application doesn't submit conflicting requests -// to AWS WAF. +// Permanently deletes a SizeConstraintSet. You can't delete a SizeConstraintSet +// if it's still used in any Rules or if it still includes any SizeConstraint +// objects (any filters). // -// Each create, update, or delete request must use a unique change token. If -// your application submits a GetChangeToken request and then submits a second -// GetChangeToken request before submitting a create, update, or delete request, -// the second GetChangeToken request returns the same value as the first GetChangeToken -// request. +// If you just want to remove a SizeConstraintSet from a Rule, use UpdateRule. // -// When you use a change token in a create, update, or delete request, the status -// of the change token changes to PENDING, which indicates that AWS WAF is propagating -// the change to all AWS WAF servers. Use GetChangeTokenStatus to determine -// the status of your change token. +// To permanently delete a SizeConstraintSet, perform the following steps: +// +// Update the SizeConstraintSet to remove filters, if any. For more information, +// see UpdateSizeConstraintSet. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a DeleteSizeConstraintSet request. +// +// Submit a DeleteSizeConstraintSet request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation GetChangeToken for usage and error information. +// API operation DeleteSizeConstraintSet for usage and error information. // // Returned Error Codes: +// * ErrCodeStaleDataException "StaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// // * ErrCodeInternalErrorException "InternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetChangeToken -func (c *WAF) GetChangeToken(input *GetChangeTokenInput) (*GetChangeTokenOutput, error) { - req, out := c.GetChangeTokenRequest(input) +// * ErrCodeInvalidAccountException "InvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeNonexistentItemException "NonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeReferencedItemException "ReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeNonEmptyEntityException "NonEmptyEntityException" +// The operation failed because you tried to delete an object that isn't empty. +// For example: +// +// * You tried to delete a WebACL that still contains one or more Rule objects. +// +// * You tried to delete a Rule that still contains one or more ByteMatchSet +// objects or other predicates. +// +// * You tried to delete a ByteMatchSet that contains one or more ByteMatchTuple +// objects. +// +// * You tried to delete an IPSet that references one or more IP addresses. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteSizeConstraintSet +func (c *WAF) DeleteSizeConstraintSet(input *DeleteSizeConstraintSetInput) (*DeleteSizeConstraintSetOutput, error) { + req, out := c.DeleteSizeConstraintSetRequest(input) return out, req.Send() } -// GetChangeTokenWithContext is the same as GetChangeToken with the addition of +// DeleteSizeConstraintSetWithContext is the same as DeleteSizeConstraintSet with the addition of // the ability to pass a context and additional request options. // -// See GetChangeToken for details on how to use this API operation. +// See DeleteSizeConstraintSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) GetChangeTokenWithContext(ctx aws.Context, input *GetChangeTokenInput, opts ...request.Option) (*GetChangeTokenOutput, error) { - req, out := c.GetChangeTokenRequest(input) +func (c *WAF) DeleteSizeConstraintSetWithContext(ctx aws.Context, input *DeleteSizeConstraintSetInput, opts ...request.Option) (*DeleteSizeConstraintSetOutput, error) { + req, out := c.DeleteSizeConstraintSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetChangeTokenStatus = "GetChangeTokenStatus" +const opDeleteSqlInjectionMatchSet = "DeleteSqlInjectionMatchSet" -// GetChangeTokenStatusRequest generates a "aws/request.Request" representing the -// client's request for the GetChangeTokenStatus operation. The "output" return +// DeleteSqlInjectionMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSqlInjectionMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetChangeTokenStatus for more information on using the GetChangeTokenStatus +// See DeleteSqlInjectionMatchSet for more information on using the DeleteSqlInjectionMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetChangeTokenStatusRequest method. -// req, resp := client.GetChangeTokenStatusRequest(params) +// // Example sending a request using the DeleteSqlInjectionMatchSetRequest method. +// req, resp := client.DeleteSqlInjectionMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetChangeTokenStatus -func (c *WAF) GetChangeTokenStatusRequest(input *GetChangeTokenStatusInput) (req *request.Request, output *GetChangeTokenStatusOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteSqlInjectionMatchSet +func (c *WAF) DeleteSqlInjectionMatchSetRequest(input *DeleteSqlInjectionMatchSetInput) (req *request.Request, output *DeleteSqlInjectionMatchSetOutput) { op := &request.Operation{ - Name: opGetChangeTokenStatus, + Name: opDeleteSqlInjectionMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetChangeTokenStatusInput{} + input = &DeleteSqlInjectionMatchSetInput{} } - output = &GetChangeTokenStatusOutput{} + output = &DeleteSqlInjectionMatchSetOutput{} req = c.newRequest(op, input, output) return } -// GetChangeTokenStatus API operation for AWS WAF. +// DeleteSqlInjectionMatchSet API operation for AWS WAF. // -// Returns the status of a ChangeToken that you got by calling GetChangeToken. -// ChangeTokenStatus is one of the following values: +// Permanently deletes a SqlInjectionMatchSet. You can't delete a SqlInjectionMatchSet +// if it's still used in any Rules or if it still contains any SqlInjectionMatchTuple +// objects. // -// * PROVISIONED: You requested the change token by calling GetChangeToken, -// but you haven't used it yet in a call to create, update, or delete an -// AWS WAF object. +// If you just want to remove a SqlInjectionMatchSet from a Rule, use UpdateRule. // -// * PENDING: AWS WAF is propagating the create, update, or delete request -// to all AWS WAF servers. +// To permanently delete a SqlInjectionMatchSet from AWS WAF, perform the following +// steps: // -// * IN_SYNC: Propagation is complete. +// Update the SqlInjectionMatchSet to remove filters, if any. For more information, +// see UpdateSqlInjectionMatchSet. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a DeleteSqlInjectionMatchSet request. +// +// Submit a DeleteSqlInjectionMatchSet request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation GetChangeTokenStatus for usage and error information. +// API operation DeleteSqlInjectionMatchSet for usage and error information. // // Returned Error Codes: -// * ErrCodeNonexistentItemException "NonexistentItemException" -// The operation failed because the referenced object doesn't exist. -// // * ErrCodeInternalErrorException "InternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetChangeTokenStatus -func (c *WAF) GetChangeTokenStatus(input *GetChangeTokenStatusInput) (*GetChangeTokenStatusOutput, error) { - req, out := c.GetChangeTokenStatusRequest(input) - return out, req.Send() -} - -// GetChangeTokenStatusWithContext is the same as GetChangeTokenStatus with the addition of -// the ability to pass a context and additional request options. +// * ErrCodeInvalidAccountException "InvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. // -// See GetChangeTokenStatus for details on how to use this API operation. +// * ErrCodeNonexistentItemException "NonexistentItemException" +// The operation failed because the referenced object doesn't exist. // -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *WAF) GetChangeTokenStatusWithContext(ctx aws.Context, input *GetChangeTokenStatusInput, opts ...request.Option) (*GetChangeTokenStatusOutput, error) { - req, out := c.GetChangeTokenStatusRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetIPSet = "GetIPSet" - -// GetIPSetRequest generates a "aws/request.Request" representing the -// client's request for the GetIPSet operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. +// * ErrCodeReferencedItemException "ReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: // -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. // -// See GetIPSet for more information on using the GetIPSet -// API call, and error handling. +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeStaleDataException "StaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeNonEmptyEntityException "NonEmptyEntityException" +// The operation failed because you tried to delete an object that isn't empty. +// For example: +// +// * You tried to delete a WebACL that still contains one or more Rule objects. +// +// * You tried to delete a Rule that still contains one or more ByteMatchSet +// objects or other predicates. +// +// * You tried to delete a ByteMatchSet that contains one or more ByteMatchTuple +// objects. +// +// * You tried to delete an IPSet that references one or more IP addresses. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteSqlInjectionMatchSet +func (c *WAF) DeleteSqlInjectionMatchSet(input *DeleteSqlInjectionMatchSetInput) (*DeleteSqlInjectionMatchSetOutput, error) { + req, out := c.DeleteSqlInjectionMatchSetRequest(input) + return out, req.Send() +} + +// DeleteSqlInjectionMatchSetWithContext is the same as DeleteSqlInjectionMatchSet with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteSqlInjectionMatchSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAF) DeleteSqlInjectionMatchSetWithContext(ctx aws.Context, input *DeleteSqlInjectionMatchSetInput, opts ...request.Option) (*DeleteSqlInjectionMatchSetOutput, error) { + req, out := c.DeleteSqlInjectionMatchSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteWebACL = "DeleteWebACL" + +// DeleteWebACLRequest generates a "aws/request.Request" representing the +// client's request for the DeleteWebACL operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteWebACL for more information on using the DeleteWebACL +// API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetIPSetRequest method. -// req, resp := client.GetIPSetRequest(params) +// // Example sending a request using the DeleteWebACLRequest method. +// req, resp := client.DeleteWebACLRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetIPSet -func (c *WAF) GetIPSetRequest(input *GetIPSetInput) (req *request.Request, output *GetIPSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteWebACL +func (c *WAF) DeleteWebACLRequest(input *DeleteWebACLInput) (req *request.Request, output *DeleteWebACLOutput) { op := &request.Operation{ - Name: opGetIPSet, + Name: opDeleteWebACL, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetIPSetInput{} + input = &DeleteWebACLInput{} } - output = &GetIPSetOutput{} + output = &DeleteWebACLOutput{} req = c.newRequest(op, input, output) return } -// GetIPSet API operation for AWS WAF. +// DeleteWebACL API operation for AWS WAF. // -// Returns the IPSet that is specified by IPSetId. +// Permanently deletes a WebACL. You can't delete a WebACL if it still contains +// any Rules. +// +// To delete a WebACL, perform the following steps: +// +// Update the WebACL to remove Rules, if any. For more information, see UpdateWebACL. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a DeleteWebACL request. +// +// Submit a DeleteWebACL request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation GetIPSet for usage and error information. +// API operation DeleteWebACL for usage and error information. // // Returned Error Codes: +// * ErrCodeStaleDataException "StaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// // * ErrCodeInternalErrorException "InternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. @@ -2588,81 +2818,116 @@ func (c *WAF) GetIPSetRequest(input *GetIPSetInput) (req *request.Request, outpu // * ErrCodeNonexistentItemException "NonexistentItemException" // The operation failed because the referenced object doesn't exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetIPSet -func (c *WAF) GetIPSet(input *GetIPSetInput) (*GetIPSetOutput, error) { - req, out := c.GetIPSetRequest(input) +// * ErrCodeReferencedItemException "ReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeNonEmptyEntityException "NonEmptyEntityException" +// The operation failed because you tried to delete an object that isn't empty. +// For example: +// +// * You tried to delete a WebACL that still contains one or more Rule objects. +// +// * You tried to delete a Rule that still contains one or more ByteMatchSet +// objects or other predicates. +// +// * You tried to delete a ByteMatchSet that contains one or more ByteMatchTuple +// objects. +// +// * You tried to delete an IPSet that references one or more IP addresses. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteWebACL +func (c *WAF) DeleteWebACL(input *DeleteWebACLInput) (*DeleteWebACLOutput, error) { + req, out := c.DeleteWebACLRequest(input) return out, req.Send() } -// GetIPSetWithContext is the same as GetIPSet with the addition of +// DeleteWebACLWithContext is the same as DeleteWebACL with the addition of // the ability to pass a context and additional request options. // -// See GetIPSet for details on how to use this API operation. +// See DeleteWebACL for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) GetIPSetWithContext(ctx aws.Context, input *GetIPSetInput, opts ...request.Option) (*GetIPSetOutput, error) { - req, out := c.GetIPSetRequest(input) +func (c *WAF) DeleteWebACLWithContext(ctx aws.Context, input *DeleteWebACLInput, opts ...request.Option) (*DeleteWebACLOutput, error) { + req, out := c.DeleteWebACLRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetRateBasedRule = "GetRateBasedRule" +const opDeleteXssMatchSet = "DeleteXssMatchSet" -// GetRateBasedRuleRequest generates a "aws/request.Request" representing the -// client's request for the GetRateBasedRule operation. The "output" return +// DeleteXssMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteXssMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetRateBasedRule for more information on using the GetRateBasedRule +// See DeleteXssMatchSet for more information on using the DeleteXssMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetRateBasedRuleRequest method. -// req, resp := client.GetRateBasedRuleRequest(params) +// // Example sending a request using the DeleteXssMatchSetRequest method. +// req, resp := client.DeleteXssMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRateBasedRule -func (c *WAF) GetRateBasedRuleRequest(input *GetRateBasedRuleInput) (req *request.Request, output *GetRateBasedRuleOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteXssMatchSet +func (c *WAF) DeleteXssMatchSetRequest(input *DeleteXssMatchSetInput) (req *request.Request, output *DeleteXssMatchSetOutput) { op := &request.Operation{ - Name: opGetRateBasedRule, + Name: opDeleteXssMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetRateBasedRuleInput{} + input = &DeleteXssMatchSetInput{} } - output = &GetRateBasedRuleOutput{} + output = &DeleteXssMatchSetOutput{} req = c.newRequest(op, input, output) return } -// GetRateBasedRule API operation for AWS WAF. +// DeleteXssMatchSet API operation for AWS WAF. // -// Returns the RateBasedRule that is specified by the RuleId that you included -// in the GetRateBasedRule request. +// Permanently deletes an XssMatchSet. You can't delete an XssMatchSet if it's +// still used in any Rules or if it still contains any XssMatchTuple objects. +// +// If you just want to remove an XssMatchSet from a Rule, use UpdateRule. +// +// To permanently delete an XssMatchSet from AWS WAF, perform the following +// steps: +// +// Update the XssMatchSet to remove filters, if any. For more information, see +// UpdateXssMatchSet. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a DeleteXssMatchSet request. +// +// Submit a DeleteXssMatchSet request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation GetRateBasedRule for usage and error information. +// API operation DeleteXssMatchSet for usage and error information. // // Returned Error Codes: // * ErrCodeInternalErrorException "InternalErrorException" @@ -2676,83 +2941,106 @@ func (c *WAF) GetRateBasedRuleRequest(input *GetRateBasedRuleInput) (req *reques // * ErrCodeNonexistentItemException "NonexistentItemException" // The operation failed because the referenced object doesn't exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRateBasedRule -func (c *WAF) GetRateBasedRule(input *GetRateBasedRuleInput) (*GetRateBasedRuleOutput, error) { - req, out := c.GetRateBasedRuleRequest(input) +// * ErrCodeReferencedItemException "ReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeStaleDataException "StaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeNonEmptyEntityException "NonEmptyEntityException" +// The operation failed because you tried to delete an object that isn't empty. +// For example: +// +// * You tried to delete a WebACL that still contains one or more Rule objects. +// +// * You tried to delete a Rule that still contains one or more ByteMatchSet +// objects or other predicates. +// +// * You tried to delete a ByteMatchSet that contains one or more ByteMatchTuple +// objects. +// +// * You tried to delete an IPSet that references one or more IP addresses. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteXssMatchSet +func (c *WAF) DeleteXssMatchSet(input *DeleteXssMatchSetInput) (*DeleteXssMatchSetOutput, error) { + req, out := c.DeleteXssMatchSetRequest(input) return out, req.Send() } -// GetRateBasedRuleWithContext is the same as GetRateBasedRule with the addition of +// DeleteXssMatchSetWithContext is the same as DeleteXssMatchSet with the addition of // the ability to pass a context and additional request options. // -// See GetRateBasedRule for details on how to use this API operation. +// See DeleteXssMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) GetRateBasedRuleWithContext(ctx aws.Context, input *GetRateBasedRuleInput, opts ...request.Option) (*GetRateBasedRuleOutput, error) { - req, out := c.GetRateBasedRuleRequest(input) +func (c *WAF) DeleteXssMatchSetWithContext(ctx aws.Context, input *DeleteXssMatchSetInput, opts ...request.Option) (*DeleteXssMatchSetOutput, error) { + req, out := c.DeleteXssMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetRateBasedRuleManagedKeys = "GetRateBasedRuleManagedKeys" +const opGetByteMatchSet = "GetByteMatchSet" -// GetRateBasedRuleManagedKeysRequest generates a "aws/request.Request" representing the -// client's request for the GetRateBasedRuleManagedKeys operation. The "output" return +// GetByteMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the GetByteMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetRateBasedRuleManagedKeys for more information on using the GetRateBasedRuleManagedKeys +// See GetByteMatchSet for more information on using the GetByteMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetRateBasedRuleManagedKeysRequest method. -// req, resp := client.GetRateBasedRuleManagedKeysRequest(params) +// // Example sending a request using the GetByteMatchSetRequest method. +// req, resp := client.GetByteMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRateBasedRuleManagedKeys -func (c *WAF) GetRateBasedRuleManagedKeysRequest(input *GetRateBasedRuleManagedKeysInput) (req *request.Request, output *GetRateBasedRuleManagedKeysOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetByteMatchSet +func (c *WAF) GetByteMatchSetRequest(input *GetByteMatchSetInput) (req *request.Request, output *GetByteMatchSetOutput) { op := &request.Operation{ - Name: opGetRateBasedRuleManagedKeys, + Name: opGetByteMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetRateBasedRuleManagedKeysInput{} + input = &GetByteMatchSetInput{} } - output = &GetRateBasedRuleManagedKeysOutput{} + output = &GetByteMatchSetOutput{} req = c.newRequest(op, input, output) return } -// GetRateBasedRuleManagedKeys API operation for AWS WAF. +// GetByteMatchSet API operation for AWS WAF. // -// Returns an array of IP addresses currently being blocked by the RateBasedRule -// that is specified by the RuleId. The maximum number of managed keys that -// will be blocked is 10,000. If more than 10,000 addresses exceed the rate -// limit, the 10,000 addresses with the highest rates will be blocked. +// Returns the ByteMatchSet specified by ByteMatchSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation GetRateBasedRuleManagedKeys for usage and error information. +// API operation GetByteMatchSet for usage and error information. // // Returned Error Codes: // * ErrCodeInternalErrorException "InternalErrorException" @@ -2766,207 +3054,184 @@ func (c *WAF) GetRateBasedRuleManagedKeysRequest(input *GetRateBasedRuleManagedK // * ErrCodeNonexistentItemException "NonexistentItemException" // The operation failed because the referenced object doesn't exist. // -// * ErrCodeInvalidParameterException "InvalidParameterException" -// The operation failed because AWS WAF didn't recognize a parameter in the -// request. For example: -// -// * You specified an invalid parameter name. -// -// * You specified an invalid value. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetByteMatchSet +func (c *WAF) GetByteMatchSet(input *GetByteMatchSetInput) (*GetByteMatchSetOutput, error) { + req, out := c.GetByteMatchSetRequest(input) + return out, req.Send() +} + +// GetByteMatchSetWithContext is the same as GetByteMatchSet with the addition of +// the ability to pass a context and additional request options. // -// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) -// using an action other than INSERT or DELETE. -// -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to create a RateBasedRule with a RateKey value other than -// IP. -// -// * You tried to update a WebACL with a WafActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. -// -// * You tried to update a ByteMatchSet with a Field of HEADER but no value -// for Data. -// -// * Your request references an ARN that is malformed, or corresponds to -// a resource with which a web ACL cannot be associated. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRateBasedRuleManagedKeys -func (c *WAF) GetRateBasedRuleManagedKeys(input *GetRateBasedRuleManagedKeysInput) (*GetRateBasedRuleManagedKeysOutput, error) { - req, out := c.GetRateBasedRuleManagedKeysRequest(input) - return out, req.Send() -} - -// GetRateBasedRuleManagedKeysWithContext is the same as GetRateBasedRuleManagedKeys with the addition of -// the ability to pass a context and additional request options. -// -// See GetRateBasedRuleManagedKeys for details on how to use this API operation. +// See GetByteMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) GetRateBasedRuleManagedKeysWithContext(ctx aws.Context, input *GetRateBasedRuleManagedKeysInput, opts ...request.Option) (*GetRateBasedRuleManagedKeysOutput, error) { - req, out := c.GetRateBasedRuleManagedKeysRequest(input) +func (c *WAF) GetByteMatchSetWithContext(ctx aws.Context, input *GetByteMatchSetInput, opts ...request.Option) (*GetByteMatchSetOutput, error) { + req, out := c.GetByteMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetRule = "GetRule" +const opGetChangeToken = "GetChangeToken" -// GetRuleRequest generates a "aws/request.Request" representing the -// client's request for the GetRule operation. The "output" return +// GetChangeTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetChangeToken operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetRule for more information on using the GetRule +// See GetChangeToken for more information on using the GetChangeToken // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetRuleRequest method. -// req, resp := client.GetRuleRequest(params) +// // Example sending a request using the GetChangeTokenRequest method. +// req, resp := client.GetChangeTokenRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRule -func (c *WAF) GetRuleRequest(input *GetRuleInput) (req *request.Request, output *GetRuleOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetChangeToken +func (c *WAF) GetChangeTokenRequest(input *GetChangeTokenInput) (req *request.Request, output *GetChangeTokenOutput) { op := &request.Operation{ - Name: opGetRule, + Name: opGetChangeToken, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetRuleInput{} + input = &GetChangeTokenInput{} } - output = &GetRuleOutput{} + output = &GetChangeTokenOutput{} req = c.newRequest(op, input, output) return } -// GetRule API operation for AWS WAF. +// GetChangeToken API operation for AWS WAF. // -// Returns the Rule that is specified by the RuleId that you included in the -// GetRule request. +// When you want to create, update, or delete AWS WAF objects, get a change +// token and include the change token in the create, update, or delete request. +// Change tokens ensure that your application doesn't submit conflicting requests +// to AWS WAF. +// +// Each create, update, or delete request must use a unique change token. If +// your application submits a GetChangeToken request and then submits a second +// GetChangeToken request before submitting a create, update, or delete request, +// the second GetChangeToken request returns the same value as the first GetChangeToken +// request. +// +// When you use a change token in a create, update, or delete request, the status +// of the change token changes to PENDING, which indicates that AWS WAF is propagating +// the change to all AWS WAF servers. Use GetChangeTokenStatus to determine +// the status of your change token. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation GetRule for usage and error information. +// API operation GetChangeToken for usage and error information. // // Returned Error Codes: // * ErrCodeInternalErrorException "InternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. // -// * ErrCodeInvalidAccountException "InvalidAccountException" -// The operation failed because you tried to create, update, or delete an object -// by using an invalid account identifier. -// -// * ErrCodeNonexistentItemException "NonexistentItemException" -// The operation failed because the referenced object doesn't exist. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRule -func (c *WAF) GetRule(input *GetRuleInput) (*GetRuleOutput, error) { - req, out := c.GetRuleRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetChangeToken +func (c *WAF) GetChangeToken(input *GetChangeTokenInput) (*GetChangeTokenOutput, error) { + req, out := c.GetChangeTokenRequest(input) return out, req.Send() } -// GetRuleWithContext is the same as GetRule with the addition of +// GetChangeTokenWithContext is the same as GetChangeToken with the addition of // the ability to pass a context and additional request options. // -// See GetRule for details on how to use this API operation. +// See GetChangeToken for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) GetRuleWithContext(ctx aws.Context, input *GetRuleInput, opts ...request.Option) (*GetRuleOutput, error) { - req, out := c.GetRuleRequest(input) +func (c *WAF) GetChangeTokenWithContext(ctx aws.Context, input *GetChangeTokenInput, opts ...request.Option) (*GetChangeTokenOutput, error) { + req, out := c.GetChangeTokenRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetSampledRequests = "GetSampledRequests" +const opGetChangeTokenStatus = "GetChangeTokenStatus" -// GetSampledRequestsRequest generates a "aws/request.Request" representing the -// client's request for the GetSampledRequests operation. The "output" return +// GetChangeTokenStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetChangeTokenStatus operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetSampledRequests for more information on using the GetSampledRequests +// See GetChangeTokenStatus for more information on using the GetChangeTokenStatus // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetSampledRequestsRequest method. -// req, resp := client.GetSampledRequestsRequest(params) +// // Example sending a request using the GetChangeTokenStatusRequest method. +// req, resp := client.GetChangeTokenStatusRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetSampledRequests -func (c *WAF) GetSampledRequestsRequest(input *GetSampledRequestsInput) (req *request.Request, output *GetSampledRequestsOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetChangeTokenStatus +func (c *WAF) GetChangeTokenStatusRequest(input *GetChangeTokenStatusInput) (req *request.Request, output *GetChangeTokenStatusOutput) { op := &request.Operation{ - Name: opGetSampledRequests, + Name: opGetChangeTokenStatus, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetSampledRequestsInput{} + input = &GetChangeTokenStatusInput{} } - output = &GetSampledRequestsOutput{} + output = &GetChangeTokenStatusOutput{} req = c.newRequest(op, input, output) return } -// GetSampledRequests API operation for AWS WAF. +// GetChangeTokenStatus API operation for AWS WAF. // -// Gets detailed information about a specified number of requests--a sample--that -// AWS WAF randomly selects from among the first 5,000 requests that your AWS -// resource received during a time range that you choose. You can specify a -// sample size of up to 500 requests, and you can specify any time range in -// the previous three hours. +// Returns the status of a ChangeToken that you got by calling GetChangeToken. +// ChangeTokenStatus is one of the following values: // -// GetSampledRequests returns a time range, which is usually the time range -// that you specified. However, if your resource (such as a CloudFront distribution) -// received 5,000 requests before the specified time range elapsed, GetSampledRequests -// returns an updated time range. This new time range indicates the actual period -// during which AWS WAF selected the requests in the sample. +// * PROVISIONED: You requested the change token by calling GetChangeToken, +// but you haven't used it yet in a call to create, update, or delete an +// AWS WAF object. +// +// * PENDING: AWS WAF is propagating the create, update, or delete request +// to all AWS WAF servers. +// +// * IN_SYNC: Propagation is complete. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation GetSampledRequests for usage and error information. +// API operation GetChangeTokenStatus for usage and error information. // // Returned Error Codes: // * ErrCodeNonexistentItemException "NonexistentItemException" @@ -2976,80 +3241,80 @@ func (c *WAF) GetSampledRequestsRequest(input *GetSampledRequestsInput) (req *re // The operation failed because of a system problem, even though the request // was valid. Retry your request. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetSampledRequests -func (c *WAF) GetSampledRequests(input *GetSampledRequestsInput) (*GetSampledRequestsOutput, error) { - req, out := c.GetSampledRequestsRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetChangeTokenStatus +func (c *WAF) GetChangeTokenStatus(input *GetChangeTokenStatusInput) (*GetChangeTokenStatusOutput, error) { + req, out := c.GetChangeTokenStatusRequest(input) return out, req.Send() } -// GetSampledRequestsWithContext is the same as GetSampledRequests with the addition of +// GetChangeTokenStatusWithContext is the same as GetChangeTokenStatus with the addition of // the ability to pass a context and additional request options. // -// See GetSampledRequests for details on how to use this API operation. +// See GetChangeTokenStatus for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) GetSampledRequestsWithContext(ctx aws.Context, input *GetSampledRequestsInput, opts ...request.Option) (*GetSampledRequestsOutput, error) { - req, out := c.GetSampledRequestsRequest(input) +func (c *WAF) GetChangeTokenStatusWithContext(ctx aws.Context, input *GetChangeTokenStatusInput, opts ...request.Option) (*GetChangeTokenStatusOutput, error) { + req, out := c.GetChangeTokenStatusRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetSizeConstraintSet = "GetSizeConstraintSet" +const opGetGeoMatchSet = "GetGeoMatchSet" -// GetSizeConstraintSetRequest generates a "aws/request.Request" representing the -// client's request for the GetSizeConstraintSet operation. The "output" return +// GetGeoMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the GetGeoMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetSizeConstraintSet for more information on using the GetSizeConstraintSet +// See GetGeoMatchSet for more information on using the GetGeoMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetSizeConstraintSetRequest method. -// req, resp := client.GetSizeConstraintSetRequest(params) +// // Example sending a request using the GetGeoMatchSetRequest method. +// req, resp := client.GetGeoMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetSizeConstraintSet -func (c *WAF) GetSizeConstraintSetRequest(input *GetSizeConstraintSetInput) (req *request.Request, output *GetSizeConstraintSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetGeoMatchSet +func (c *WAF) GetGeoMatchSetRequest(input *GetGeoMatchSetInput) (req *request.Request, output *GetGeoMatchSetOutput) { op := &request.Operation{ - Name: opGetSizeConstraintSet, + Name: opGetGeoMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetSizeConstraintSetInput{} + input = &GetGeoMatchSetInput{} } - output = &GetSizeConstraintSetOutput{} + output = &GetGeoMatchSetOutput{} req = c.newRequest(op, input, output) return } -// GetSizeConstraintSet API operation for AWS WAF. +// GetGeoMatchSet API operation for AWS WAF. // -// Returns the SizeConstraintSet specified by SizeConstraintSetId. +// Returns the GeoMatchSet that is specified by GeoMatchSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation GetSizeConstraintSet for usage and error information. +// API operation GetGeoMatchSet for usage and error information. // // Returned Error Codes: // * ErrCodeInternalErrorException "InternalErrorException" @@ -3063,80 +3328,80 @@ func (c *WAF) GetSizeConstraintSetRequest(input *GetSizeConstraintSetInput) (req // * ErrCodeNonexistentItemException "NonexistentItemException" // The operation failed because the referenced object doesn't exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetSizeConstraintSet -func (c *WAF) GetSizeConstraintSet(input *GetSizeConstraintSetInput) (*GetSizeConstraintSetOutput, error) { - req, out := c.GetSizeConstraintSetRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetGeoMatchSet +func (c *WAF) GetGeoMatchSet(input *GetGeoMatchSetInput) (*GetGeoMatchSetOutput, error) { + req, out := c.GetGeoMatchSetRequest(input) return out, req.Send() } -// GetSizeConstraintSetWithContext is the same as GetSizeConstraintSet with the addition of +// GetGeoMatchSetWithContext is the same as GetGeoMatchSet with the addition of // the ability to pass a context and additional request options. // -// See GetSizeConstraintSet for details on how to use this API operation. +// See GetGeoMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) GetSizeConstraintSetWithContext(ctx aws.Context, input *GetSizeConstraintSetInput, opts ...request.Option) (*GetSizeConstraintSetOutput, error) { - req, out := c.GetSizeConstraintSetRequest(input) +func (c *WAF) GetGeoMatchSetWithContext(ctx aws.Context, input *GetGeoMatchSetInput, opts ...request.Option) (*GetGeoMatchSetOutput, error) { + req, out := c.GetGeoMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetSqlInjectionMatchSet = "GetSqlInjectionMatchSet" +const opGetIPSet = "GetIPSet" -// GetSqlInjectionMatchSetRequest generates a "aws/request.Request" representing the -// client's request for the GetSqlInjectionMatchSet operation. The "output" return +// GetIPSetRequest generates a "aws/request.Request" representing the +// client's request for the GetIPSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetSqlInjectionMatchSet for more information on using the GetSqlInjectionMatchSet +// See GetIPSet for more information on using the GetIPSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetSqlInjectionMatchSetRequest method. -// req, resp := client.GetSqlInjectionMatchSetRequest(params) +// // Example sending a request using the GetIPSetRequest method. +// req, resp := client.GetIPSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetSqlInjectionMatchSet -func (c *WAF) GetSqlInjectionMatchSetRequest(input *GetSqlInjectionMatchSetInput) (req *request.Request, output *GetSqlInjectionMatchSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetIPSet +func (c *WAF) GetIPSetRequest(input *GetIPSetInput) (req *request.Request, output *GetIPSetOutput) { op := &request.Operation{ - Name: opGetSqlInjectionMatchSet, + Name: opGetIPSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetSqlInjectionMatchSetInput{} + input = &GetIPSetInput{} } - output = &GetSqlInjectionMatchSetOutput{} + output = &GetIPSetOutput{} req = c.newRequest(op, input, output) return } -// GetSqlInjectionMatchSet API operation for AWS WAF. +// GetIPSet API operation for AWS WAF. // -// Returns the SqlInjectionMatchSet that is specified by SqlInjectionMatchSetId. +// Returns the IPSet that is specified by IPSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation GetSqlInjectionMatchSet for usage and error information. +// API operation GetIPSet for usage and error information. // // Returned Error Codes: // * ErrCodeInternalErrorException "InternalErrorException" @@ -3150,80 +3415,81 @@ func (c *WAF) GetSqlInjectionMatchSetRequest(input *GetSqlInjectionMatchSetInput // * ErrCodeNonexistentItemException "NonexistentItemException" // The operation failed because the referenced object doesn't exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetSqlInjectionMatchSet -func (c *WAF) GetSqlInjectionMatchSet(input *GetSqlInjectionMatchSetInput) (*GetSqlInjectionMatchSetOutput, error) { - req, out := c.GetSqlInjectionMatchSetRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetIPSet +func (c *WAF) GetIPSet(input *GetIPSetInput) (*GetIPSetOutput, error) { + req, out := c.GetIPSetRequest(input) return out, req.Send() } -// GetSqlInjectionMatchSetWithContext is the same as GetSqlInjectionMatchSet with the addition of +// GetIPSetWithContext is the same as GetIPSet with the addition of // the ability to pass a context and additional request options. // -// See GetSqlInjectionMatchSet for details on how to use this API operation. +// See GetIPSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) GetSqlInjectionMatchSetWithContext(ctx aws.Context, input *GetSqlInjectionMatchSetInput, opts ...request.Option) (*GetSqlInjectionMatchSetOutput, error) { - req, out := c.GetSqlInjectionMatchSetRequest(input) +func (c *WAF) GetIPSetWithContext(ctx aws.Context, input *GetIPSetInput, opts ...request.Option) (*GetIPSetOutput, error) { + req, out := c.GetIPSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetWebACL = "GetWebACL" +const opGetRateBasedRule = "GetRateBasedRule" -// GetWebACLRequest generates a "aws/request.Request" representing the -// client's request for the GetWebACL operation. The "output" return +// GetRateBasedRuleRequest generates a "aws/request.Request" representing the +// client's request for the GetRateBasedRule operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetWebACL for more information on using the GetWebACL +// See GetRateBasedRule for more information on using the GetRateBasedRule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetWebACLRequest method. -// req, resp := client.GetWebACLRequest(params) +// // Example sending a request using the GetRateBasedRuleRequest method. +// req, resp := client.GetRateBasedRuleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetWebACL -func (c *WAF) GetWebACLRequest(input *GetWebACLInput) (req *request.Request, output *GetWebACLOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRateBasedRule +func (c *WAF) GetRateBasedRuleRequest(input *GetRateBasedRuleInput) (req *request.Request, output *GetRateBasedRuleOutput) { op := &request.Operation{ - Name: opGetWebACL, + Name: opGetRateBasedRule, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetWebACLInput{} + input = &GetRateBasedRuleInput{} } - output = &GetWebACLOutput{} + output = &GetRateBasedRuleOutput{} req = c.newRequest(op, input, output) return } -// GetWebACL API operation for AWS WAF. +// GetRateBasedRule API operation for AWS WAF. // -// Returns the WebACL that is specified by WebACLId. +// Returns the RateBasedRule that is specified by the RuleId that you included +// in the GetRateBasedRule request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation GetWebACL for usage and error information. +// API operation GetRateBasedRule for usage and error information. // // Returned Error Codes: // * ErrCodeInternalErrorException "InternalErrorException" @@ -3237,80 +3503,83 @@ func (c *WAF) GetWebACLRequest(input *GetWebACLInput) (req *request.Request, out // * ErrCodeNonexistentItemException "NonexistentItemException" // The operation failed because the referenced object doesn't exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetWebACL -func (c *WAF) GetWebACL(input *GetWebACLInput) (*GetWebACLOutput, error) { - req, out := c.GetWebACLRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRateBasedRule +func (c *WAF) GetRateBasedRule(input *GetRateBasedRuleInput) (*GetRateBasedRuleOutput, error) { + req, out := c.GetRateBasedRuleRequest(input) return out, req.Send() } -// GetWebACLWithContext is the same as GetWebACL with the addition of +// GetRateBasedRuleWithContext is the same as GetRateBasedRule with the addition of // the ability to pass a context and additional request options. // -// See GetWebACL for details on how to use this API operation. +// See GetRateBasedRule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) GetWebACLWithContext(ctx aws.Context, input *GetWebACLInput, opts ...request.Option) (*GetWebACLOutput, error) { - req, out := c.GetWebACLRequest(input) +func (c *WAF) GetRateBasedRuleWithContext(ctx aws.Context, input *GetRateBasedRuleInput, opts ...request.Option) (*GetRateBasedRuleOutput, error) { + req, out := c.GetRateBasedRuleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetXssMatchSet = "GetXssMatchSet" +const opGetRateBasedRuleManagedKeys = "GetRateBasedRuleManagedKeys" -// GetXssMatchSetRequest generates a "aws/request.Request" representing the -// client's request for the GetXssMatchSet operation. The "output" return +// GetRateBasedRuleManagedKeysRequest generates a "aws/request.Request" representing the +// client's request for the GetRateBasedRuleManagedKeys operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetXssMatchSet for more information on using the GetXssMatchSet +// See GetRateBasedRuleManagedKeys for more information on using the GetRateBasedRuleManagedKeys // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetXssMatchSetRequest method. -// req, resp := client.GetXssMatchSetRequest(params) +// // Example sending a request using the GetRateBasedRuleManagedKeysRequest method. +// req, resp := client.GetRateBasedRuleManagedKeysRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetXssMatchSet -func (c *WAF) GetXssMatchSetRequest(input *GetXssMatchSetInput) (req *request.Request, output *GetXssMatchSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRateBasedRuleManagedKeys +func (c *WAF) GetRateBasedRuleManagedKeysRequest(input *GetRateBasedRuleManagedKeysInput) (req *request.Request, output *GetRateBasedRuleManagedKeysOutput) { op := &request.Operation{ - Name: opGetXssMatchSet, + Name: opGetRateBasedRuleManagedKeys, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetXssMatchSetInput{} + input = &GetRateBasedRuleManagedKeysInput{} } - output = &GetXssMatchSetOutput{} + output = &GetRateBasedRuleManagedKeysOutput{} req = c.newRequest(op, input, output) return } -// GetXssMatchSet API operation for AWS WAF. +// GetRateBasedRuleManagedKeys API operation for AWS WAF. // -// Returns the XssMatchSet that is specified by XssMatchSetId. +// Returns an array of IP addresses currently being blocked by the RateBasedRule +// that is specified by the RuleId. The maximum number of managed keys that +// will be blocked is 10,000. If more than 10,000 addresses exceed the rate +// limit, the 10,000 addresses with the highest rates will be blocked. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation GetXssMatchSet for usage and error information. +// API operation GetRateBasedRuleManagedKeys for usage and error information. // // Returned Error Codes: // * ErrCodeInternalErrorException "InternalErrorException" @@ -3324,80 +3593,109 @@ func (c *WAF) GetXssMatchSetRequest(input *GetXssMatchSetInput) (req *request.Re // * ErrCodeNonexistentItemException "NonexistentItemException" // The operation failed because the referenced object doesn't exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetXssMatchSet -func (c *WAF) GetXssMatchSet(input *GetXssMatchSetInput) (*GetXssMatchSetOutput, error) { - req, out := c.GetXssMatchSetRequest(input) +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRateBasedRuleManagedKeys +func (c *WAF) GetRateBasedRuleManagedKeys(input *GetRateBasedRuleManagedKeysInput) (*GetRateBasedRuleManagedKeysOutput, error) { + req, out := c.GetRateBasedRuleManagedKeysRequest(input) return out, req.Send() } -// GetXssMatchSetWithContext is the same as GetXssMatchSet with the addition of +// GetRateBasedRuleManagedKeysWithContext is the same as GetRateBasedRuleManagedKeys with the addition of // the ability to pass a context and additional request options. // -// See GetXssMatchSet for details on how to use this API operation. +// See GetRateBasedRuleManagedKeys for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) GetXssMatchSetWithContext(ctx aws.Context, input *GetXssMatchSetInput, opts ...request.Option) (*GetXssMatchSetOutput, error) { - req, out := c.GetXssMatchSetRequest(input) +func (c *WAF) GetRateBasedRuleManagedKeysWithContext(ctx aws.Context, input *GetRateBasedRuleManagedKeysInput, opts ...request.Option) (*GetRateBasedRuleManagedKeysOutput, error) { + req, out := c.GetRateBasedRuleManagedKeysRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListByteMatchSets = "ListByteMatchSets" +const opGetRegexMatchSet = "GetRegexMatchSet" -// ListByteMatchSetsRequest generates a "aws/request.Request" representing the -// client's request for the ListByteMatchSets operation. The "output" return +// GetRegexMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the GetRegexMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListByteMatchSets for more information on using the ListByteMatchSets +// See GetRegexMatchSet for more information on using the GetRegexMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListByteMatchSetsRequest method. -// req, resp := client.ListByteMatchSetsRequest(params) +// // Example sending a request using the GetRegexMatchSetRequest method. +// req, resp := client.GetRegexMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListByteMatchSets -func (c *WAF) ListByteMatchSetsRequest(input *ListByteMatchSetsInput) (req *request.Request, output *ListByteMatchSetsOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRegexMatchSet +func (c *WAF) GetRegexMatchSetRequest(input *GetRegexMatchSetInput) (req *request.Request, output *GetRegexMatchSetOutput) { op := &request.Operation{ - Name: opListByteMatchSets, + Name: opGetRegexMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ListByteMatchSetsInput{} + input = &GetRegexMatchSetInput{} } - output = &ListByteMatchSetsOutput{} + output = &GetRegexMatchSetOutput{} req = c.newRequest(op, input, output) return } -// ListByteMatchSets API operation for AWS WAF. +// GetRegexMatchSet API operation for AWS WAF. // -// Returns an array of ByteMatchSetSummary objects. +// Returns the RegexMatchSet specified by RegexMatchSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation ListByteMatchSets for usage and error information. +// API operation GetRegexMatchSet for usage and error information. // // Returned Error Codes: // * ErrCodeInternalErrorException "InternalErrorException" @@ -3408,80 +3706,83 @@ func (c *WAF) ListByteMatchSetsRequest(input *ListByteMatchSetsInput) (req *requ // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListByteMatchSets -func (c *WAF) ListByteMatchSets(input *ListByteMatchSetsInput) (*ListByteMatchSetsOutput, error) { - req, out := c.ListByteMatchSetsRequest(input) +// * ErrCodeNonexistentItemException "NonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRegexMatchSet +func (c *WAF) GetRegexMatchSet(input *GetRegexMatchSetInput) (*GetRegexMatchSetOutput, error) { + req, out := c.GetRegexMatchSetRequest(input) return out, req.Send() } -// ListByteMatchSetsWithContext is the same as ListByteMatchSets with the addition of +// GetRegexMatchSetWithContext is the same as GetRegexMatchSet with the addition of // the ability to pass a context and additional request options. // -// See ListByteMatchSets for details on how to use this API operation. +// See GetRegexMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) ListByteMatchSetsWithContext(ctx aws.Context, input *ListByteMatchSetsInput, opts ...request.Option) (*ListByteMatchSetsOutput, error) { - req, out := c.ListByteMatchSetsRequest(input) +func (c *WAF) GetRegexMatchSetWithContext(ctx aws.Context, input *GetRegexMatchSetInput, opts ...request.Option) (*GetRegexMatchSetOutput, error) { + req, out := c.GetRegexMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListIPSets = "ListIPSets" +const opGetRegexPatternSet = "GetRegexPatternSet" -// ListIPSetsRequest generates a "aws/request.Request" representing the -// client's request for the ListIPSets operation. The "output" return +// GetRegexPatternSetRequest generates a "aws/request.Request" representing the +// client's request for the GetRegexPatternSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListIPSets for more information on using the ListIPSets +// See GetRegexPatternSet for more information on using the GetRegexPatternSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListIPSetsRequest method. -// req, resp := client.ListIPSetsRequest(params) +// // Example sending a request using the GetRegexPatternSetRequest method. +// req, resp := client.GetRegexPatternSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListIPSets -func (c *WAF) ListIPSetsRequest(input *ListIPSetsInput) (req *request.Request, output *ListIPSetsOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRegexPatternSet +func (c *WAF) GetRegexPatternSetRequest(input *GetRegexPatternSetInput) (req *request.Request, output *GetRegexPatternSetOutput) { op := &request.Operation{ - Name: opListIPSets, + Name: opGetRegexPatternSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ListIPSetsInput{} + input = &GetRegexPatternSetInput{} } - output = &ListIPSetsOutput{} + output = &GetRegexPatternSetOutput{} req = c.newRequest(op, input, output) return } -// ListIPSets API operation for AWS WAF. +// GetRegexPatternSet API operation for AWS WAF. // -// Returns an array of IPSetSummary objects in the response. +// Returns the RegexPatternSet specified by RegexPatternSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation ListIPSets for usage and error information. +// API operation GetRegexPatternSet for usage and error information. // // Returned Error Codes: // * ErrCodeInternalErrorException "InternalErrorException" @@ -3492,80 +3793,84 @@ func (c *WAF) ListIPSetsRequest(input *ListIPSetsInput) (req *request.Request, o // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListIPSets -func (c *WAF) ListIPSets(input *ListIPSetsInput) (*ListIPSetsOutput, error) { - req, out := c.ListIPSetsRequest(input) +// * ErrCodeNonexistentItemException "NonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRegexPatternSet +func (c *WAF) GetRegexPatternSet(input *GetRegexPatternSetInput) (*GetRegexPatternSetOutput, error) { + req, out := c.GetRegexPatternSetRequest(input) return out, req.Send() } -// ListIPSetsWithContext is the same as ListIPSets with the addition of +// GetRegexPatternSetWithContext is the same as GetRegexPatternSet with the addition of // the ability to pass a context and additional request options. // -// See ListIPSets for details on how to use this API operation. +// See GetRegexPatternSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) ListIPSetsWithContext(ctx aws.Context, input *ListIPSetsInput, opts ...request.Option) (*ListIPSetsOutput, error) { - req, out := c.ListIPSetsRequest(input) +func (c *WAF) GetRegexPatternSetWithContext(ctx aws.Context, input *GetRegexPatternSetInput, opts ...request.Option) (*GetRegexPatternSetOutput, error) { + req, out := c.GetRegexPatternSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListRateBasedRules = "ListRateBasedRules" +const opGetRule = "GetRule" -// ListRateBasedRulesRequest generates a "aws/request.Request" representing the -// client's request for the ListRateBasedRules operation. The "output" return +// GetRuleRequest generates a "aws/request.Request" representing the +// client's request for the GetRule operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListRateBasedRules for more information on using the ListRateBasedRules +// See GetRule for more information on using the GetRule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListRateBasedRulesRequest method. -// req, resp := client.ListRateBasedRulesRequest(params) +// // Example sending a request using the GetRuleRequest method. +// req, resp := client.GetRuleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListRateBasedRules -func (c *WAF) ListRateBasedRulesRequest(input *ListRateBasedRulesInput) (req *request.Request, output *ListRateBasedRulesOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRule +func (c *WAF) GetRuleRequest(input *GetRuleInput) (req *request.Request, output *GetRuleOutput) { op := &request.Operation{ - Name: opListRateBasedRules, + Name: opGetRule, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ListRateBasedRulesInput{} + input = &GetRuleInput{} } - output = &ListRateBasedRulesOutput{} + output = &GetRuleOutput{} req = c.newRequest(op, input, output) return } -// ListRateBasedRules API operation for AWS WAF. +// GetRule API operation for AWS WAF. // -// Returns an array of RuleSummary objects. +// Returns the Rule that is specified by the RuleId that you included in the +// GetRule request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation ListRateBasedRules for usage and error information. +// API operation GetRule for usage and error information. // // Returned Error Codes: // * ErrCodeInternalErrorException "InternalErrorException" @@ -3576,164 +3881,176 @@ func (c *WAF) ListRateBasedRulesRequest(input *ListRateBasedRulesInput) (req *re // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListRateBasedRules -func (c *WAF) ListRateBasedRules(input *ListRateBasedRulesInput) (*ListRateBasedRulesOutput, error) { - req, out := c.ListRateBasedRulesRequest(input) +// * ErrCodeNonexistentItemException "NonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRule +func (c *WAF) GetRule(input *GetRuleInput) (*GetRuleOutput, error) { + req, out := c.GetRuleRequest(input) return out, req.Send() } -// ListRateBasedRulesWithContext is the same as ListRateBasedRules with the addition of +// GetRuleWithContext is the same as GetRule with the addition of // the ability to pass a context and additional request options. // -// See ListRateBasedRules for details on how to use this API operation. +// See GetRule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) ListRateBasedRulesWithContext(ctx aws.Context, input *ListRateBasedRulesInput, opts ...request.Option) (*ListRateBasedRulesOutput, error) { - req, out := c.ListRateBasedRulesRequest(input) +func (c *WAF) GetRuleWithContext(ctx aws.Context, input *GetRuleInput, opts ...request.Option) (*GetRuleOutput, error) { + req, out := c.GetRuleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListRules = "ListRules" +const opGetSampledRequests = "GetSampledRequests" -// ListRulesRequest generates a "aws/request.Request" representing the -// client's request for the ListRules operation. The "output" return +// GetSampledRequestsRequest generates a "aws/request.Request" representing the +// client's request for the GetSampledRequests operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListRules for more information on using the ListRules +// See GetSampledRequests for more information on using the GetSampledRequests // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListRulesRequest method. -// req, resp := client.ListRulesRequest(params) +// // Example sending a request using the GetSampledRequestsRequest method. +// req, resp := client.GetSampledRequestsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListRules -func (c *WAF) ListRulesRequest(input *ListRulesInput) (req *request.Request, output *ListRulesOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetSampledRequests +func (c *WAF) GetSampledRequestsRequest(input *GetSampledRequestsInput) (req *request.Request, output *GetSampledRequestsOutput) { op := &request.Operation{ - Name: opListRules, + Name: opGetSampledRequests, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ListRulesInput{} + input = &GetSampledRequestsInput{} } - output = &ListRulesOutput{} + output = &GetSampledRequestsOutput{} req = c.newRequest(op, input, output) return } -// ListRules API operation for AWS WAF. +// GetSampledRequests API operation for AWS WAF. // -// Returns an array of RuleSummary objects. +// Gets detailed information about a specified number of requests--a sample--that +// AWS WAF randomly selects from among the first 5,000 requests that your AWS +// resource received during a time range that you choose. You can specify a +// sample size of up to 500 requests, and you can specify any time range in +// the previous three hours. +// +// GetSampledRequests returns a time range, which is usually the time range +// that you specified. However, if your resource (such as a CloudFront distribution) +// received 5,000 requests before the specified time range elapsed, GetSampledRequests +// returns an updated time range. This new time range indicates the actual period +// during which AWS WAF selected the requests in the sample. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation ListRules for usage and error information. +// API operation GetSampledRequests for usage and error information. // // Returned Error Codes: +// * ErrCodeNonexistentItemException "NonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// // * ErrCodeInternalErrorException "InternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. // -// * ErrCodeInvalidAccountException "InvalidAccountException" -// The operation failed because you tried to create, update, or delete an object -// by using an invalid account identifier. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListRules -func (c *WAF) ListRules(input *ListRulesInput) (*ListRulesOutput, error) { - req, out := c.ListRulesRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetSampledRequests +func (c *WAF) GetSampledRequests(input *GetSampledRequestsInput) (*GetSampledRequestsOutput, error) { + req, out := c.GetSampledRequestsRequest(input) return out, req.Send() } -// ListRulesWithContext is the same as ListRules with the addition of +// GetSampledRequestsWithContext is the same as GetSampledRequests with the addition of // the ability to pass a context and additional request options. // -// See ListRules for details on how to use this API operation. +// See GetSampledRequests for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) ListRulesWithContext(ctx aws.Context, input *ListRulesInput, opts ...request.Option) (*ListRulesOutput, error) { - req, out := c.ListRulesRequest(input) +func (c *WAF) GetSampledRequestsWithContext(ctx aws.Context, input *GetSampledRequestsInput, opts ...request.Option) (*GetSampledRequestsOutput, error) { + req, out := c.GetSampledRequestsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListSizeConstraintSets = "ListSizeConstraintSets" +const opGetSizeConstraintSet = "GetSizeConstraintSet" -// ListSizeConstraintSetsRequest generates a "aws/request.Request" representing the -// client's request for the ListSizeConstraintSets operation. The "output" return +// GetSizeConstraintSetRequest generates a "aws/request.Request" representing the +// client's request for the GetSizeConstraintSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListSizeConstraintSets for more information on using the ListSizeConstraintSets +// See GetSizeConstraintSet for more information on using the GetSizeConstraintSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListSizeConstraintSetsRequest method. -// req, resp := client.ListSizeConstraintSetsRequest(params) +// // Example sending a request using the GetSizeConstraintSetRequest method. +// req, resp := client.GetSizeConstraintSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListSizeConstraintSets -func (c *WAF) ListSizeConstraintSetsRequest(input *ListSizeConstraintSetsInput) (req *request.Request, output *ListSizeConstraintSetsOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetSizeConstraintSet +func (c *WAF) GetSizeConstraintSetRequest(input *GetSizeConstraintSetInput) (req *request.Request, output *GetSizeConstraintSetOutput) { op := &request.Operation{ - Name: opListSizeConstraintSets, + Name: opGetSizeConstraintSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ListSizeConstraintSetsInput{} + input = &GetSizeConstraintSetInput{} } - output = &ListSizeConstraintSetsOutput{} + output = &GetSizeConstraintSetOutput{} req = c.newRequest(op, input, output) return } -// ListSizeConstraintSets API operation for AWS WAF. +// GetSizeConstraintSet API operation for AWS WAF. // -// Returns an array of SizeConstraintSetSummary objects. +// Returns the SizeConstraintSet specified by SizeConstraintSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation ListSizeConstraintSets for usage and error information. +// API operation GetSizeConstraintSet for usage and error information. // // Returned Error Codes: // * ErrCodeInternalErrorException "InternalErrorException" @@ -3744,80 +4061,83 @@ func (c *WAF) ListSizeConstraintSetsRequest(input *ListSizeConstraintSetsInput) // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListSizeConstraintSets -func (c *WAF) ListSizeConstraintSets(input *ListSizeConstraintSetsInput) (*ListSizeConstraintSetsOutput, error) { - req, out := c.ListSizeConstraintSetsRequest(input) +// * ErrCodeNonexistentItemException "NonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetSizeConstraintSet +func (c *WAF) GetSizeConstraintSet(input *GetSizeConstraintSetInput) (*GetSizeConstraintSetOutput, error) { + req, out := c.GetSizeConstraintSetRequest(input) return out, req.Send() } -// ListSizeConstraintSetsWithContext is the same as ListSizeConstraintSets with the addition of +// GetSizeConstraintSetWithContext is the same as GetSizeConstraintSet with the addition of // the ability to pass a context and additional request options. // -// See ListSizeConstraintSets for details on how to use this API operation. +// See GetSizeConstraintSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) ListSizeConstraintSetsWithContext(ctx aws.Context, input *ListSizeConstraintSetsInput, opts ...request.Option) (*ListSizeConstraintSetsOutput, error) { - req, out := c.ListSizeConstraintSetsRequest(input) +func (c *WAF) GetSizeConstraintSetWithContext(ctx aws.Context, input *GetSizeConstraintSetInput, opts ...request.Option) (*GetSizeConstraintSetOutput, error) { + req, out := c.GetSizeConstraintSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListSqlInjectionMatchSets = "ListSqlInjectionMatchSets" +const opGetSqlInjectionMatchSet = "GetSqlInjectionMatchSet" -// ListSqlInjectionMatchSetsRequest generates a "aws/request.Request" representing the -// client's request for the ListSqlInjectionMatchSets operation. The "output" return +// GetSqlInjectionMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the GetSqlInjectionMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListSqlInjectionMatchSets for more information on using the ListSqlInjectionMatchSets +// See GetSqlInjectionMatchSet for more information on using the GetSqlInjectionMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListSqlInjectionMatchSetsRequest method. -// req, resp := client.ListSqlInjectionMatchSetsRequest(params) +// // Example sending a request using the GetSqlInjectionMatchSetRequest method. +// req, resp := client.GetSqlInjectionMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListSqlInjectionMatchSets -func (c *WAF) ListSqlInjectionMatchSetsRequest(input *ListSqlInjectionMatchSetsInput) (req *request.Request, output *ListSqlInjectionMatchSetsOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetSqlInjectionMatchSet +func (c *WAF) GetSqlInjectionMatchSetRequest(input *GetSqlInjectionMatchSetInput) (req *request.Request, output *GetSqlInjectionMatchSetOutput) { op := &request.Operation{ - Name: opListSqlInjectionMatchSets, + Name: opGetSqlInjectionMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ListSqlInjectionMatchSetsInput{} + input = &GetSqlInjectionMatchSetInput{} } - output = &ListSqlInjectionMatchSetsOutput{} + output = &GetSqlInjectionMatchSetOutput{} req = c.newRequest(op, input, output) return } -// ListSqlInjectionMatchSets API operation for AWS WAF. +// GetSqlInjectionMatchSet API operation for AWS WAF. // -// Returns an array of SqlInjectionMatchSet objects. +// Returns the SqlInjectionMatchSet that is specified by SqlInjectionMatchSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation ListSqlInjectionMatchSets for usage and error information. +// API operation GetSqlInjectionMatchSet for usage and error information. // // Returned Error Codes: // * ErrCodeInternalErrorException "InternalErrorException" @@ -3828,80 +4148,83 @@ func (c *WAF) ListSqlInjectionMatchSetsRequest(input *ListSqlInjectionMatchSetsI // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListSqlInjectionMatchSets -func (c *WAF) ListSqlInjectionMatchSets(input *ListSqlInjectionMatchSetsInput) (*ListSqlInjectionMatchSetsOutput, error) { - req, out := c.ListSqlInjectionMatchSetsRequest(input) +// * ErrCodeNonexistentItemException "NonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetSqlInjectionMatchSet +func (c *WAF) GetSqlInjectionMatchSet(input *GetSqlInjectionMatchSetInput) (*GetSqlInjectionMatchSetOutput, error) { + req, out := c.GetSqlInjectionMatchSetRequest(input) return out, req.Send() } -// ListSqlInjectionMatchSetsWithContext is the same as ListSqlInjectionMatchSets with the addition of +// GetSqlInjectionMatchSetWithContext is the same as GetSqlInjectionMatchSet with the addition of // the ability to pass a context and additional request options. // -// See ListSqlInjectionMatchSets for details on how to use this API operation. +// See GetSqlInjectionMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) ListSqlInjectionMatchSetsWithContext(ctx aws.Context, input *ListSqlInjectionMatchSetsInput, opts ...request.Option) (*ListSqlInjectionMatchSetsOutput, error) { - req, out := c.ListSqlInjectionMatchSetsRequest(input) +func (c *WAF) GetSqlInjectionMatchSetWithContext(ctx aws.Context, input *GetSqlInjectionMatchSetInput, opts ...request.Option) (*GetSqlInjectionMatchSetOutput, error) { + req, out := c.GetSqlInjectionMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListWebACLs = "ListWebACLs" +const opGetWebACL = "GetWebACL" -// ListWebACLsRequest generates a "aws/request.Request" representing the -// client's request for the ListWebACLs operation. The "output" return +// GetWebACLRequest generates a "aws/request.Request" representing the +// client's request for the GetWebACL operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListWebACLs for more information on using the ListWebACLs +// See GetWebACL for more information on using the GetWebACL // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListWebACLsRequest method. -// req, resp := client.ListWebACLsRequest(params) +// // Example sending a request using the GetWebACLRequest method. +// req, resp := client.GetWebACLRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListWebACLs -func (c *WAF) ListWebACLsRequest(input *ListWebACLsInput) (req *request.Request, output *ListWebACLsOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetWebACL +func (c *WAF) GetWebACLRequest(input *GetWebACLInput) (req *request.Request, output *GetWebACLOutput) { op := &request.Operation{ - Name: opListWebACLs, + Name: opGetWebACL, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ListWebACLsInput{} + input = &GetWebACLInput{} } - output = &ListWebACLsOutput{} + output = &GetWebACLOutput{} req = c.newRequest(op, input, output) return } -// ListWebACLs API operation for AWS WAF. +// GetWebACL API operation for AWS WAF. // -// Returns an array of WebACLSummary objects in the response. +// Returns the WebACL that is specified by WebACLId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation ListWebACLs for usage and error information. +// API operation GetWebACL for usage and error information. // // Returned Error Codes: // * ErrCodeInternalErrorException "InternalErrorException" @@ -3912,80 +4235,83 @@ func (c *WAF) ListWebACLsRequest(input *ListWebACLsInput) (req *request.Request, // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListWebACLs -func (c *WAF) ListWebACLs(input *ListWebACLsInput) (*ListWebACLsOutput, error) { - req, out := c.ListWebACLsRequest(input) +// * ErrCodeNonexistentItemException "NonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetWebACL +func (c *WAF) GetWebACL(input *GetWebACLInput) (*GetWebACLOutput, error) { + req, out := c.GetWebACLRequest(input) return out, req.Send() } -// ListWebACLsWithContext is the same as ListWebACLs with the addition of +// GetWebACLWithContext is the same as GetWebACL with the addition of // the ability to pass a context and additional request options. // -// See ListWebACLs for details on how to use this API operation. +// See GetWebACL for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) ListWebACLsWithContext(ctx aws.Context, input *ListWebACLsInput, opts ...request.Option) (*ListWebACLsOutput, error) { - req, out := c.ListWebACLsRequest(input) +func (c *WAF) GetWebACLWithContext(ctx aws.Context, input *GetWebACLInput, opts ...request.Option) (*GetWebACLOutput, error) { + req, out := c.GetWebACLRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListXssMatchSets = "ListXssMatchSets" +const opGetXssMatchSet = "GetXssMatchSet" -// ListXssMatchSetsRequest generates a "aws/request.Request" representing the -// client's request for the ListXssMatchSets operation. The "output" return +// GetXssMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the GetXssMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListXssMatchSets for more information on using the ListXssMatchSets +// See GetXssMatchSet for more information on using the GetXssMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListXssMatchSetsRequest method. -// req, resp := client.ListXssMatchSetsRequest(params) +// // Example sending a request using the GetXssMatchSetRequest method. +// req, resp := client.GetXssMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListXssMatchSets -func (c *WAF) ListXssMatchSetsRequest(input *ListXssMatchSetsInput) (req *request.Request, output *ListXssMatchSetsOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetXssMatchSet +func (c *WAF) GetXssMatchSetRequest(input *GetXssMatchSetInput) (req *request.Request, output *GetXssMatchSetOutput) { op := &request.Operation{ - Name: opListXssMatchSets, + Name: opGetXssMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ListXssMatchSetsInput{} + input = &GetXssMatchSetInput{} } - output = &ListXssMatchSetsOutput{} + output = &GetXssMatchSetOutput{} req = c.newRequest(op, input, output) return } -// ListXssMatchSets API operation for AWS WAF. +// GetXssMatchSet API operation for AWS WAF. // -// Returns an array of XssMatchSet objects. +// Returns the XssMatchSet that is specified by XssMatchSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation ListXssMatchSets for usage and error information. +// API operation GetXssMatchSet for usage and error information. // // Returned Error Codes: // * ErrCodeInternalErrorException "InternalErrorException" @@ -3996,116 +4322,83 @@ func (c *WAF) ListXssMatchSetsRequest(input *ListXssMatchSetsInput) (req *reques // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListXssMatchSets -func (c *WAF) ListXssMatchSets(input *ListXssMatchSetsInput) (*ListXssMatchSetsOutput, error) { - req, out := c.ListXssMatchSetsRequest(input) +// * ErrCodeNonexistentItemException "NonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetXssMatchSet +func (c *WAF) GetXssMatchSet(input *GetXssMatchSetInput) (*GetXssMatchSetOutput, error) { + req, out := c.GetXssMatchSetRequest(input) return out, req.Send() } -// ListXssMatchSetsWithContext is the same as ListXssMatchSets with the addition of +// GetXssMatchSetWithContext is the same as GetXssMatchSet with the addition of // the ability to pass a context and additional request options. // -// See ListXssMatchSets for details on how to use this API operation. +// See GetXssMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) ListXssMatchSetsWithContext(ctx aws.Context, input *ListXssMatchSetsInput, opts ...request.Option) (*ListXssMatchSetsOutput, error) { - req, out := c.ListXssMatchSetsRequest(input) +func (c *WAF) GetXssMatchSetWithContext(ctx aws.Context, input *GetXssMatchSetInput, opts ...request.Option) (*GetXssMatchSetOutput, error) { + req, out := c.GetXssMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateByteMatchSet = "UpdateByteMatchSet" +const opListByteMatchSets = "ListByteMatchSets" -// UpdateByteMatchSetRequest generates a "aws/request.Request" representing the -// client's request for the UpdateByteMatchSet operation. The "output" return +// ListByteMatchSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListByteMatchSets operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateByteMatchSet for more information on using the UpdateByteMatchSet +// See ListByteMatchSets for more information on using the ListByteMatchSets // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateByteMatchSetRequest method. -// req, resp := client.UpdateByteMatchSetRequest(params) +// // Example sending a request using the ListByteMatchSetsRequest method. +// req, resp := client.ListByteMatchSetsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateByteMatchSet -func (c *WAF) UpdateByteMatchSetRequest(input *UpdateByteMatchSetInput) (req *request.Request, output *UpdateByteMatchSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListByteMatchSets +func (c *WAF) ListByteMatchSetsRequest(input *ListByteMatchSetsInput) (req *request.Request, output *ListByteMatchSetsOutput) { op := &request.Operation{ - Name: opUpdateByteMatchSet, + Name: opListByteMatchSets, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateByteMatchSetInput{} + input = &ListByteMatchSetsInput{} } - output = &UpdateByteMatchSetOutput{} + output = &ListByteMatchSetsOutput{} req = c.newRequest(op, input, output) return } -// UpdateByteMatchSet API operation for AWS WAF. -// -// Inserts or deletes ByteMatchTuple objects (filters) in a ByteMatchSet. For -// each ByteMatchTuple object, you specify the following values: -// -// * Whether to insert or delete the object from the array. If you want to -// change a ByteMatchSetUpdate object, you delete the existing object and -// add a new one. -// -// * The part of a web request that you want AWS WAF to inspect, such as -// a query string or the value of the User-Agent header. -// -// * The bytes (typically a string that corresponds with ASCII characters) -// that you want AWS WAF to look for. For more information, including how -// you specify the values for the AWS WAF API and the AWS CLI or SDKs, see -// TargetString in the ByteMatchTuple data type. -// -// * Where to look, such as at the beginning or the end of a query string. -// -// * Whether to perform any conversions on the request, such as converting -// it to lowercase, before inspecting it for the specified string. -// -// For example, you can add a ByteMatchSetUpdate object that matches web requests -// in which User-Agent headers contain the string BadBot. You can then configure -// AWS WAF to block those requests. -// -// To create and configure a ByteMatchSet, perform the following steps: -// -// Create a ByteMatchSet. For more information, see CreateByteMatchSet. -// -// Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateByteMatchSet request. -// -// Submit an UpdateByteMatchSet request to specify the part of the request that -// you want AWS WAF to inspect (for example, the header or the URI) and the -// value that you want AWS WAF to watch for. +// ListByteMatchSets API operation for AWS WAF. // -// For more information about how to use the AWS WAF API to allow or block HTTP -// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// Returns an array of ByteMatchSetSummary objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation UpdateByteMatchSet for usage and error information. +// API operation ListByteMatchSets for usage and error information. // // Returned Error Codes: // * ErrCodeInternalErrorException "InternalErrorException" @@ -4116,213 +4409,250 @@ func (c *WAF) UpdateByteMatchSetRequest(input *UpdateByteMatchSetInput) (req *re // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// * ErrCodeInvalidOperationException "InvalidOperationException" -// The operation failed because there was nothing to do. For example: +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListByteMatchSets +func (c *WAF) ListByteMatchSets(input *ListByteMatchSetsInput) (*ListByteMatchSetsOutput, error) { + req, out := c.ListByteMatchSetsRequest(input) + return out, req.Send() +} + +// ListByteMatchSetsWithContext is the same as ListByteMatchSets with the addition of +// the ability to pass a context and additional request options. // -// * You tried to remove a Rule from a WebACL, but the Rule isn't in the -// specified WebACL. +// See ListByteMatchSets for details on how to use this API operation. // -// * You tried to remove an IP address from an IPSet, but the IP address -// isn't in the specified IPSet. +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAF) ListByteMatchSetsWithContext(ctx aws.Context, input *ListByteMatchSetsInput, opts ...request.Option) (*ListByteMatchSetsOutput, error) { + req, out := c.ListByteMatchSetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListGeoMatchSets = "ListGeoMatchSets" + +// ListGeoMatchSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListGeoMatchSets operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. // -// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple -// isn't in the specified WebACL. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// * You tried to add a Rule to a WebACL, but the Rule already exists in -// the specified WebACL. +// See ListGeoMatchSets for more information on using the ListGeoMatchSets +// API call, and error handling. // -// * You tried to add an IP address to an IPSet, but the IP address already -// exists in the specified IPSet. -// -// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple -// already exists in the specified WebACL. -// -// * ErrCodeInvalidParameterException "InvalidParameterException" -// The operation failed because AWS WAF didn't recognize a parameter in the -// request. For example: -// -// * You specified an invalid parameter name. -// -// * You specified an invalid value. -// -// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) -// using an action other than INSERT or DELETE. -// -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to create a RateBasedRule with a RateKey value other than -// IP. -// -// * You tried to update a WebACL with a WafActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// * You tried to update a ByteMatchSet with a Field of HEADER but no value -// for Data. // -// * Your request references an ARN that is malformed, or corresponds to -// a resource with which a web ACL cannot be associated. +// // Example sending a request using the ListGeoMatchSetsRequest method. +// req, resp := client.ListGeoMatchSetsRequest(params) // -// * ErrCodeNonexistentContainerException "NonexistentContainerException" -// The operation failed because you tried to add an object to or delete an object -// from another object that doesn't exist. For example: +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't -// exist. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListGeoMatchSets +func (c *WAF) ListGeoMatchSetsRequest(input *ListGeoMatchSetsInput) (req *request.Request, output *ListGeoMatchSetsOutput) { + op := &request.Operation{ + Name: opListGeoMatchSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListGeoMatchSetsInput{} + } + + output = &ListGeoMatchSetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListGeoMatchSets API operation for AWS WAF. // -// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule -// that doesn't exist. +// Returns an array of GeoMatchSetSummary objects in the response. // -// * You tried to add an IP address to or delete an IP address from an IPSet -// that doesn't exist. +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // -// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from -// a ByteMatchSet that doesn't exist. +// See the AWS API reference guide for AWS WAF's +// API operation ListGeoMatchSets for usage and error information. // -// * ErrCodeNonexistentItemException "NonexistentItemException" -// The operation failed because the referenced object doesn't exist. +// Returned Error Codes: +// * ErrCodeInternalErrorException "InternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. // -// * ErrCodeStaleDataException "StaleDataException" +// * ErrCodeInvalidAccountException "InvalidAccountException" // The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. -// -// * ErrCodeLimitsExceededException "LimitsExceededException" -// The operation exceeds a resource limit, for example, the maximum number of -// WebACL objects that you can create for an AWS account. For more information, -// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) -// in the AWS WAF Developer Guide. +// by using an invalid account identifier. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateByteMatchSet -func (c *WAF) UpdateByteMatchSet(input *UpdateByteMatchSetInput) (*UpdateByteMatchSetOutput, error) { - req, out := c.UpdateByteMatchSetRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListGeoMatchSets +func (c *WAF) ListGeoMatchSets(input *ListGeoMatchSetsInput) (*ListGeoMatchSetsOutput, error) { + req, out := c.ListGeoMatchSetsRequest(input) return out, req.Send() } -// UpdateByteMatchSetWithContext is the same as UpdateByteMatchSet with the addition of +// ListGeoMatchSetsWithContext is the same as ListGeoMatchSets with the addition of // the ability to pass a context and additional request options. // -// See UpdateByteMatchSet for details on how to use this API operation. +// See ListGeoMatchSets for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) UpdateByteMatchSetWithContext(ctx aws.Context, input *UpdateByteMatchSetInput, opts ...request.Option) (*UpdateByteMatchSetOutput, error) { - req, out := c.UpdateByteMatchSetRequest(input) +func (c *WAF) ListGeoMatchSetsWithContext(ctx aws.Context, input *ListGeoMatchSetsInput, opts ...request.Option) (*ListGeoMatchSetsOutput, error) { + req, out := c.ListGeoMatchSetsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateIPSet = "UpdateIPSet" +const opListIPSets = "ListIPSets" -// UpdateIPSetRequest generates a "aws/request.Request" representing the -// client's request for the UpdateIPSet operation. The "output" return +// ListIPSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListIPSets operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateIPSet for more information on using the UpdateIPSet +// See ListIPSets for more information on using the ListIPSets // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateIPSetRequest method. -// req, resp := client.UpdateIPSetRequest(params) +// // Example sending a request using the ListIPSetsRequest method. +// req, resp := client.ListIPSetsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateIPSet -func (c *WAF) UpdateIPSetRequest(input *UpdateIPSetInput) (req *request.Request, output *UpdateIPSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListIPSets +func (c *WAF) ListIPSetsRequest(input *ListIPSetsInput) (req *request.Request, output *ListIPSetsOutput) { op := &request.Operation{ - Name: opUpdateIPSet, + Name: opListIPSets, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateIPSetInput{} + input = &ListIPSetsInput{} } - output = &UpdateIPSetOutput{} + output = &ListIPSetsOutput{} req = c.newRequest(op, input, output) return } -// UpdateIPSet API operation for AWS WAF. -// -// Inserts or deletes IPSetDescriptor objects in an IPSet. For each IPSetDescriptor -// object, you specify the following values: +// ListIPSets API operation for AWS WAF. // -// * Whether to insert or delete the object from the array. If you want to -// change an IPSetDescriptor object, you delete the existing object and add -// a new one. +// Returns an array of IPSetSummary objects in the response. // -// * The IP address version, IPv4 or IPv6. +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // -// * The IP address in CIDR notation, for example, 192.0.2.0/24 (for the -// range of IP addresses from 192.0.2.0 to 192.0.2.255) or 192.0.2.44/32 -// (for the individual IP address 192.0.2.44). +// See the AWS API reference guide for AWS WAF's +// API operation ListIPSets for usage and error information. // -// AWS WAF supports /8, /16, /24, and /32 IP address ranges for IPv4, and /24, -// /32, /48, /56, /64 and /128 for IPv6. For more information about CIDR notation, -// see the Wikipedia entry Classless Inter-Domain Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). +// Returned Error Codes: +// * ErrCodeInternalErrorException "InternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. // -// IPv6 addresses can be represented using any of the following formats: +// * ErrCodeInvalidAccountException "InvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. // -// * 1111:0000:0000:0000:0000:0000:0000:0111/128 +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListIPSets +func (c *WAF) ListIPSets(input *ListIPSetsInput) (*ListIPSetsOutput, error) { + req, out := c.ListIPSetsRequest(input) + return out, req.Send() +} + +// ListIPSetsWithContext is the same as ListIPSets with the addition of +// the ability to pass a context and additional request options. // -// * 1111:0:0:0:0:0:0:0111/128 +// See ListIPSets for details on how to use this API operation. // -// * 1111::0111/128 +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAF) ListIPSetsWithContext(ctx aws.Context, input *ListIPSetsInput, opts ...request.Option) (*ListIPSetsOutput, error) { + req, out := c.ListIPSetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListRateBasedRules = "ListRateBasedRules" + +// ListRateBasedRulesRequest generates a "aws/request.Request" representing the +// client's request for the ListRateBasedRules operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. // -// * 1111::111/128 +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// You use an IPSet to specify which web requests you want to allow or block -// based on the IP addresses that the requests originated from. For example, -// if you're receiving a lot of requests from one or a small number of IP addresses -// and you want to block the requests, you can create an IPSet that specifies -// those IP addresses, and then configure AWS WAF to block the requests. +// See ListRateBasedRules for more information on using the ListRateBasedRules +// API call, and error handling. // -// To create and configure an IPSet, perform the following steps: +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Submit a CreateIPSet request. // -// Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateIPSet request. +// // Example sending a request using the ListRateBasedRulesRequest method. +// req, resp := client.ListRateBasedRulesRequest(params) // -// Submit an UpdateIPSet request to specify the IP addresses that you want AWS -// WAF to watch for. +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// When you update an IPSet, you specify the IP addresses that you want to add -// and/or the IP addresses that you want to delete. If you want to change an -// IP address, you delete the existing IP address and add the new one. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListRateBasedRules +func (c *WAF) ListRateBasedRulesRequest(input *ListRateBasedRulesInput) (req *request.Request, output *ListRateBasedRulesOutput) { + op := &request.Operation{ + Name: opListRateBasedRules, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListRateBasedRulesInput{} + } + + output = &ListRateBasedRulesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListRateBasedRules API operation for AWS WAF. // -// For more information about how to use the AWS WAF API to allow or block HTTP -// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// Returns an array of RuleSummary objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation UpdateIPSet for usage and error information. +// API operation ListRateBasedRules for usage and error information. // // Returned Error Codes: -// * ErrCodeStaleDataException "StaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. -// // * ErrCodeInternalErrorException "InternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. @@ -4331,206 +4661,166 @@ func (c *WAF) UpdateIPSetRequest(input *UpdateIPSetInput) (req *request.Request, // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// * ErrCodeInvalidOperationException "InvalidOperationException" -// The operation failed because there was nothing to do. For example: +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListRateBasedRules +func (c *WAF) ListRateBasedRules(input *ListRateBasedRulesInput) (*ListRateBasedRulesOutput, error) { + req, out := c.ListRateBasedRulesRequest(input) + return out, req.Send() +} + +// ListRateBasedRulesWithContext is the same as ListRateBasedRules with the addition of +// the ability to pass a context and additional request options. // -// * You tried to remove a Rule from a WebACL, but the Rule isn't in the -// specified WebACL. +// See ListRateBasedRules for details on how to use this API operation. // -// * You tried to remove an IP address from an IPSet, but the IP address -// isn't in the specified IPSet. +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAF) ListRateBasedRulesWithContext(ctx aws.Context, input *ListRateBasedRulesInput, opts ...request.Option) (*ListRateBasedRulesOutput, error) { + req, out := c.ListRateBasedRulesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListRegexMatchSets = "ListRegexMatchSets" + +// ListRegexMatchSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListRegexMatchSets operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. // -// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple -// isn't in the specified WebACL. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// * You tried to add a Rule to a WebACL, but the Rule already exists in -// the specified WebACL. -// -// * You tried to add an IP address to an IPSet, but the IP address already -// exists in the specified IPSet. -// -// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple -// already exists in the specified WebACL. -// -// * ErrCodeInvalidParameterException "InvalidParameterException" -// The operation failed because AWS WAF didn't recognize a parameter in the -// request. For example: -// -// * You specified an invalid parameter name. -// -// * You specified an invalid value. -// -// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) -// using an action other than INSERT or DELETE. -// -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to create a RateBasedRule with a RateKey value other than -// IP. -// -// * You tried to update a WebACL with a WafActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. -// -// * You tried to update a ByteMatchSet with a Field of HEADER but no value -// for Data. -// -// * Your request references an ARN that is malformed, or corresponds to -// a resource with which a web ACL cannot be associated. +// See ListRegexMatchSets for more information on using the ListRegexMatchSets +// API call, and error handling. // -// * ErrCodeNonexistentContainerException "NonexistentContainerException" -// The operation failed because you tried to add an object to or delete an object -// from another object that doesn't exist. For example: +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't -// exist. // -// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule -// that doesn't exist. +// // Example sending a request using the ListRegexMatchSetsRequest method. +// req, resp := client.ListRegexMatchSetsRequest(params) // -// * You tried to add an IP address to or delete an IP address from an IPSet -// that doesn't exist. +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from -// a ByteMatchSet that doesn't exist. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListRegexMatchSets +func (c *WAF) ListRegexMatchSetsRequest(input *ListRegexMatchSetsInput) (req *request.Request, output *ListRegexMatchSetsOutput) { + op := &request.Operation{ + Name: opListRegexMatchSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListRegexMatchSetsInput{} + } + + output = &ListRegexMatchSetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListRegexMatchSets API operation for AWS WAF. // -// * ErrCodeNonexistentItemException "NonexistentItemException" -// The operation failed because the referenced object doesn't exist. +// Returns an array of RegexMatchSetSummary objects. // -// * ErrCodeReferencedItemException "ReferencedItemException" -// The operation failed because you tried to delete an object that is still -// in use. For example: +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // -// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// See the AWS API reference guide for AWS WAF's +// API operation ListRegexMatchSets for usage and error information. // -// * You tried to delete a Rule that is still referenced by a WebACL. +// Returned Error Codes: +// * ErrCodeInternalErrorException "InternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. // -// * ErrCodeLimitsExceededException "LimitsExceededException" -// The operation exceeds a resource limit, for example, the maximum number of -// WebACL objects that you can create for an AWS account. For more information, -// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) -// in the AWS WAF Developer Guide. +// * ErrCodeInvalidAccountException "InvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateIPSet -func (c *WAF) UpdateIPSet(input *UpdateIPSetInput) (*UpdateIPSetOutput, error) { - req, out := c.UpdateIPSetRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListRegexMatchSets +func (c *WAF) ListRegexMatchSets(input *ListRegexMatchSetsInput) (*ListRegexMatchSetsOutput, error) { + req, out := c.ListRegexMatchSetsRequest(input) return out, req.Send() } -// UpdateIPSetWithContext is the same as UpdateIPSet with the addition of +// ListRegexMatchSetsWithContext is the same as ListRegexMatchSets with the addition of // the ability to pass a context and additional request options. // -// See UpdateIPSet for details on how to use this API operation. +// See ListRegexMatchSets for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) UpdateIPSetWithContext(ctx aws.Context, input *UpdateIPSetInput, opts ...request.Option) (*UpdateIPSetOutput, error) { - req, out := c.UpdateIPSetRequest(input) +func (c *WAF) ListRegexMatchSetsWithContext(ctx aws.Context, input *ListRegexMatchSetsInput, opts ...request.Option) (*ListRegexMatchSetsOutput, error) { + req, out := c.ListRegexMatchSetsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateRateBasedRule = "UpdateRateBasedRule" +const opListRegexPatternSets = "ListRegexPatternSets" -// UpdateRateBasedRuleRequest generates a "aws/request.Request" representing the -// client's request for the UpdateRateBasedRule operation. The "output" return +// ListRegexPatternSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListRegexPatternSets operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateRateBasedRule for more information on using the UpdateRateBasedRule +// See ListRegexPatternSets for more information on using the ListRegexPatternSets // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateRateBasedRuleRequest method. -// req, resp := client.UpdateRateBasedRuleRequest(params) +// // Example sending a request using the ListRegexPatternSetsRequest method. +// req, resp := client.ListRegexPatternSetsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateRateBasedRule -func (c *WAF) UpdateRateBasedRuleRequest(input *UpdateRateBasedRuleInput) (req *request.Request, output *UpdateRateBasedRuleOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListRegexPatternSets +func (c *WAF) ListRegexPatternSetsRequest(input *ListRegexPatternSetsInput) (req *request.Request, output *ListRegexPatternSetsOutput) { op := &request.Operation{ - Name: opUpdateRateBasedRule, + Name: opListRegexPatternSets, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateRateBasedRuleInput{} + input = &ListRegexPatternSetsInput{} } - output = &UpdateRateBasedRuleOutput{} + output = &ListRegexPatternSetsOutput{} req = c.newRequest(op, input, output) return } -// UpdateRateBasedRule API operation for AWS WAF. -// -// Inserts or deletes Predicate objects in a rule and updates the RateLimit -// in the rule. -// -// Each Predicate object identifies a predicate, such as a ByteMatchSet or an -// IPSet, that specifies the web requests that you want to block or count. The -// RateLimit specifies the number of requests every five minutes that triggers -// the rule. -// -// If you add more than one predicate to a RateBasedRule, a request must match -// all the predicates and exceed the RateLimit to be counted or blocked. For -// example, suppose you add the following to a RateBasedRule: -// -// * An IPSet that matches the IP address 192.0.2.44/32 -// -// * A ByteMatchSet that matches BadBot in the User-Agent header -// -// Further, you specify a RateLimit of 15,000. -// -// You then add the RateBasedRule to a WebACL and specify that you want to block -// requests that satisfy the rule. For a request to be blocked, it must come -// from the IP address 192.0.2.44 and the User-Agent header in the request must -// contain the value BadBot. Further, requests that match these two conditions -// much be received at a rate of more than 15,000 every five minutes. If the -// rate drops below this limit, AWS WAF no longer blocks the requests. -// -// As a second example, suppose you want to limit requests to a particular page -// on your site. To do this, you could add the following to a RateBasedRule: -// -// * A ByteMatchSet with FieldToMatch of URI +// ListRegexPatternSets API operation for AWS WAF. // -// * A PositionalConstraint of STARTS_WITH -// -// * A TargetString of login -// -// Further, you specify a RateLimit of 15,000. -// -// By adding this RateBasedRule to a WebACL, you could limit requests to your -// login page without affecting the rest of your site. +// Returns an array of RegexPatternSetSummary objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation UpdateRateBasedRule for usage and error information. +// API operation ListRegexPatternSets for usage and error information. // // Returned Error Codes: -// * ErrCodeStaleDataException "StaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. -// // * ErrCodeInternalErrorException "InternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. @@ -4539,201 +4829,82 @@ func (c *WAF) UpdateRateBasedRuleRequest(input *UpdateRateBasedRuleInput) (req * // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// * ErrCodeInvalidOperationException "InvalidOperationException" -// The operation failed because there was nothing to do. For example: -// -// * You tried to remove a Rule from a WebACL, but the Rule isn't in the -// specified WebACL. -// -// * You tried to remove an IP address from an IPSet, but the IP address -// isn't in the specified IPSet. -// -// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple -// isn't in the specified WebACL. -// -// * You tried to add a Rule to a WebACL, but the Rule already exists in -// the specified WebACL. -// -// * You tried to add an IP address to an IPSet, but the IP address already -// exists in the specified IPSet. -// -// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple -// already exists in the specified WebACL. -// -// * ErrCodeInvalidParameterException "InvalidParameterException" -// The operation failed because AWS WAF didn't recognize a parameter in the -// request. For example: -// -// * You specified an invalid parameter name. -// -// * You specified an invalid value. -// -// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) -// using an action other than INSERT or DELETE. -// -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to create a RateBasedRule with a RateKey value other than -// IP. -// -// * You tried to update a WebACL with a WafActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. -// -// * You tried to update a ByteMatchSet with a Field of HEADER but no value -// for Data. -// -// * Your request references an ARN that is malformed, or corresponds to -// a resource with which a web ACL cannot be associated. -// -// * ErrCodeNonexistentContainerException "NonexistentContainerException" -// The operation failed because you tried to add an object to or delete an object -// from another object that doesn't exist. For example: -// -// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't -// exist. -// -// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule -// that doesn't exist. -// -// * You tried to add an IP address to or delete an IP address from an IPSet -// that doesn't exist. -// -// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from -// a ByteMatchSet that doesn't exist. -// -// * ErrCodeNonexistentItemException "NonexistentItemException" -// The operation failed because the referenced object doesn't exist. -// -// * ErrCodeReferencedItemException "ReferencedItemException" -// The operation failed because you tried to delete an object that is still -// in use. For example: -// -// * You tried to delete a ByteMatchSet that is still referenced by a Rule. -// -// * You tried to delete a Rule that is still referenced by a WebACL. -// -// * ErrCodeLimitsExceededException "LimitsExceededException" -// The operation exceeds a resource limit, for example, the maximum number of -// WebACL objects that you can create for an AWS account. For more information, -// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) -// in the AWS WAF Developer Guide. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateRateBasedRule -func (c *WAF) UpdateRateBasedRule(input *UpdateRateBasedRuleInput) (*UpdateRateBasedRuleOutput, error) { - req, out := c.UpdateRateBasedRuleRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListRegexPatternSets +func (c *WAF) ListRegexPatternSets(input *ListRegexPatternSetsInput) (*ListRegexPatternSetsOutput, error) { + req, out := c.ListRegexPatternSetsRequest(input) return out, req.Send() } -// UpdateRateBasedRuleWithContext is the same as UpdateRateBasedRule with the addition of +// ListRegexPatternSetsWithContext is the same as ListRegexPatternSets with the addition of // the ability to pass a context and additional request options. // -// See UpdateRateBasedRule for details on how to use this API operation. +// See ListRegexPatternSets for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) UpdateRateBasedRuleWithContext(ctx aws.Context, input *UpdateRateBasedRuleInput, opts ...request.Option) (*UpdateRateBasedRuleOutput, error) { - req, out := c.UpdateRateBasedRuleRequest(input) +func (c *WAF) ListRegexPatternSetsWithContext(ctx aws.Context, input *ListRegexPatternSetsInput, opts ...request.Option) (*ListRegexPatternSetsOutput, error) { + req, out := c.ListRegexPatternSetsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateRule = "UpdateRule" +const opListRules = "ListRules" -// UpdateRuleRequest generates a "aws/request.Request" representing the -// client's request for the UpdateRule operation. The "output" return +// ListRulesRequest generates a "aws/request.Request" representing the +// client's request for the ListRules operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateRule for more information on using the UpdateRule +// See ListRules for more information on using the ListRules // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateRuleRequest method. -// req, resp := client.UpdateRuleRequest(params) +// // Example sending a request using the ListRulesRequest method. +// req, resp := client.ListRulesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateRule -func (c *WAF) UpdateRuleRequest(input *UpdateRuleInput) (req *request.Request, output *UpdateRuleOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListRules +func (c *WAF) ListRulesRequest(input *ListRulesInput) (req *request.Request, output *ListRulesOutput) { op := &request.Operation{ - Name: opUpdateRule, + Name: opListRules, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateRuleInput{} + input = &ListRulesInput{} } - output = &UpdateRuleOutput{} + output = &ListRulesOutput{} req = c.newRequest(op, input, output) return } -// UpdateRule API operation for AWS WAF. -// -// Inserts or deletes Predicate objects in a Rule. Each Predicate object identifies -// a predicate, such as a ByteMatchSet or an IPSet, that specifies the web requests -// that you want to allow, block, or count. If you add more than one predicate -// to a Rule, a request must match all of the specifications to be allowed, -// blocked, or counted. For example, suppose you add the following to a Rule: -// -// * A ByteMatchSet that matches the value BadBot in the User-Agent header -// -// * An IPSet that matches the IP address 192.0.2.44 -// -// You then add the Rule to a WebACL and specify that you want to block requests -// that satisfy the Rule. For a request to be blocked, the User-Agent header -// in the request must contain the value BadBotand the request must originate -// from the IP address 192.0.2.44. -// -// To create and configure a Rule, perform the following steps: -// -// Create and update the predicates that you want to include in the Rule. -// -// Create the Rule. See CreateRule. -// -// Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateRule request. -// -// Submit an UpdateRule request to add predicates to the Rule. -// -// Create and update a WebACL that contains the Rule. See CreateWebACL. -// -// If you want to replace one ByteMatchSet or IPSet with another, you delete -// the existing one and add the new one. +// ListRules API operation for AWS WAF. // -// For more information about how to use the AWS WAF API to allow or block HTTP -// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// Returns an array of RuleSummary objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation UpdateRule for usage and error information. +// API operation ListRules for usage and error information. // // Returned Error Codes: -// * ErrCodeStaleDataException "StaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. -// // * ErrCodeInternalErrorException "InternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. @@ -4742,207 +4913,82 @@ func (c *WAF) UpdateRuleRequest(input *UpdateRuleInput) (req *request.Request, o // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// * ErrCodeInvalidOperationException "InvalidOperationException" -// The operation failed because there was nothing to do. For example: -// -// * You tried to remove a Rule from a WebACL, but the Rule isn't in the -// specified WebACL. -// -// * You tried to remove an IP address from an IPSet, but the IP address -// isn't in the specified IPSet. -// -// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple -// isn't in the specified WebACL. -// -// * You tried to add a Rule to a WebACL, but the Rule already exists in -// the specified WebACL. -// -// * You tried to add an IP address to an IPSet, but the IP address already -// exists in the specified IPSet. -// -// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple -// already exists in the specified WebACL. -// -// * ErrCodeInvalidParameterException "InvalidParameterException" -// The operation failed because AWS WAF didn't recognize a parameter in the -// request. For example: -// -// * You specified an invalid parameter name. -// -// * You specified an invalid value. -// -// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) -// using an action other than INSERT or DELETE. -// -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to create a RateBasedRule with a RateKey value other than -// IP. -// -// * You tried to update a WebACL with a WafActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. -// -// * You tried to update a ByteMatchSet with a Field of HEADER but no value -// for Data. -// -// * Your request references an ARN that is malformed, or corresponds to -// a resource with which a web ACL cannot be associated. -// -// * ErrCodeNonexistentContainerException "NonexistentContainerException" -// The operation failed because you tried to add an object to or delete an object -// from another object that doesn't exist. For example: -// -// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't -// exist. -// -// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule -// that doesn't exist. -// -// * You tried to add an IP address to or delete an IP address from an IPSet -// that doesn't exist. -// -// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from -// a ByteMatchSet that doesn't exist. -// -// * ErrCodeNonexistentItemException "NonexistentItemException" -// The operation failed because the referenced object doesn't exist. -// -// * ErrCodeReferencedItemException "ReferencedItemException" -// The operation failed because you tried to delete an object that is still -// in use. For example: -// -// * You tried to delete a ByteMatchSet that is still referenced by a Rule. -// -// * You tried to delete a Rule that is still referenced by a WebACL. -// -// * ErrCodeLimitsExceededException "LimitsExceededException" -// The operation exceeds a resource limit, for example, the maximum number of -// WebACL objects that you can create for an AWS account. For more information, -// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) -// in the AWS WAF Developer Guide. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateRule -func (c *WAF) UpdateRule(input *UpdateRuleInput) (*UpdateRuleOutput, error) { - req, out := c.UpdateRuleRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListRules +func (c *WAF) ListRules(input *ListRulesInput) (*ListRulesOutput, error) { + req, out := c.ListRulesRequest(input) return out, req.Send() } -// UpdateRuleWithContext is the same as UpdateRule with the addition of +// ListRulesWithContext is the same as ListRules with the addition of // the ability to pass a context and additional request options. // -// See UpdateRule for details on how to use this API operation. +// See ListRules for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) UpdateRuleWithContext(ctx aws.Context, input *UpdateRuleInput, opts ...request.Option) (*UpdateRuleOutput, error) { - req, out := c.UpdateRuleRequest(input) +func (c *WAF) ListRulesWithContext(ctx aws.Context, input *ListRulesInput, opts ...request.Option) (*ListRulesOutput, error) { + req, out := c.ListRulesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateSizeConstraintSet = "UpdateSizeConstraintSet" +const opListSizeConstraintSets = "ListSizeConstraintSets" -// UpdateSizeConstraintSetRequest generates a "aws/request.Request" representing the -// client's request for the UpdateSizeConstraintSet operation. The "output" return +// ListSizeConstraintSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListSizeConstraintSets operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateSizeConstraintSet for more information on using the UpdateSizeConstraintSet +// See ListSizeConstraintSets for more information on using the ListSizeConstraintSets // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateSizeConstraintSetRequest method. -// req, resp := client.UpdateSizeConstraintSetRequest(params) +// // Example sending a request using the ListSizeConstraintSetsRequest method. +// req, resp := client.ListSizeConstraintSetsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateSizeConstraintSet -func (c *WAF) UpdateSizeConstraintSetRequest(input *UpdateSizeConstraintSetInput) (req *request.Request, output *UpdateSizeConstraintSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListSizeConstraintSets +func (c *WAF) ListSizeConstraintSetsRequest(input *ListSizeConstraintSetsInput) (req *request.Request, output *ListSizeConstraintSetsOutput) { op := &request.Operation{ - Name: opUpdateSizeConstraintSet, + Name: opListSizeConstraintSets, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateSizeConstraintSetInput{} + input = &ListSizeConstraintSetsInput{} } - output = &UpdateSizeConstraintSetOutput{} + output = &ListSizeConstraintSetsOutput{} req = c.newRequest(op, input, output) return } -// UpdateSizeConstraintSet API operation for AWS WAF. -// -// Inserts or deletes SizeConstraint objects (filters) in a SizeConstraintSet. -// For each SizeConstraint object, you specify the following values: -// -// * Whether to insert or delete the object from the array. If you want to -// change a SizeConstraintSetUpdate object, you delete the existing object -// and add a new one. -// -// * The part of a web request that you want AWS WAF to evaluate, such as -// the length of a query string or the length of the User-Agent header. -// -// * Whether to perform any transformations on the request, such as converting -// it to lowercase, before checking its length. Note that transformations -// of the request body are not supported because the AWS resource forwards -// only the first 8192 bytes of your request to AWS WAF. -// -// * A ComparisonOperator used for evaluating the selected part of the request -// against the specified Size, such as equals, greater than, less than, and -// so on. -// -// * The length, in bytes, that you want AWS WAF to watch for in selected -// part of the request. The length is computed after applying the transformation. -// -// For example, you can add a SizeConstraintSetUpdate object that matches web -// requests in which the length of the User-Agent header is greater than 100 -// bytes. You can then configure AWS WAF to block those requests. -// -// To create and configure a SizeConstraintSet, perform the following steps: -// -// Create a SizeConstraintSet. For more information, see CreateSizeConstraintSet. -// -// Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateSizeConstraintSet request. -// -// Submit an UpdateSizeConstraintSet request to specify the part of the request -// that you want AWS WAF to inspect (for example, the header or the URI) and -// the value that you want AWS WAF to watch for. +// ListSizeConstraintSets API operation for AWS WAF. // -// For more information about how to use the AWS WAF API to allow or block HTTP -// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// Returns an array of SizeConstraintSetSummary objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation UpdateSizeConstraintSet for usage and error information. +// API operation ListSizeConstraintSets for usage and error information. // // Returned Error Codes: -// * ErrCodeStaleDataException "StaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. -// // * ErrCodeInternalErrorException "InternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. @@ -4951,194 +4997,248 @@ func (c *WAF) UpdateSizeConstraintSetRequest(input *UpdateSizeConstraintSetInput // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// * ErrCodeInvalidOperationException "InvalidOperationException" -// The operation failed because there was nothing to do. For example: +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListSizeConstraintSets +func (c *WAF) ListSizeConstraintSets(input *ListSizeConstraintSetsInput) (*ListSizeConstraintSetsOutput, error) { + req, out := c.ListSizeConstraintSetsRequest(input) + return out, req.Send() +} + +// ListSizeConstraintSetsWithContext is the same as ListSizeConstraintSets with the addition of +// the ability to pass a context and additional request options. // -// * You tried to remove a Rule from a WebACL, but the Rule isn't in the -// specified WebACL. +// See ListSizeConstraintSets for details on how to use this API operation. // -// * You tried to remove an IP address from an IPSet, but the IP address -// isn't in the specified IPSet. +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAF) ListSizeConstraintSetsWithContext(ctx aws.Context, input *ListSizeConstraintSetsInput, opts ...request.Option) (*ListSizeConstraintSetsOutput, error) { + req, out := c.ListSizeConstraintSetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListSqlInjectionMatchSets = "ListSqlInjectionMatchSets" + +// ListSqlInjectionMatchSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListSqlInjectionMatchSets operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. // -// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple -// isn't in the specified WebACL. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// * You tried to add a Rule to a WebACL, but the Rule already exists in -// the specified WebACL. +// See ListSqlInjectionMatchSets for more information on using the ListSqlInjectionMatchSets +// API call, and error handling. // -// * You tried to add an IP address to an IPSet, but the IP address already -// exists in the specified IPSet. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple -// already exists in the specified WebACL. // -// * ErrCodeInvalidParameterException "InvalidParameterException" -// The operation failed because AWS WAF didn't recognize a parameter in the -// request. For example: +// // Example sending a request using the ListSqlInjectionMatchSetsRequest method. +// req, resp := client.ListSqlInjectionMatchSetsRequest(params) // -// * You specified an invalid parameter name. +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// * You specified an invalid value. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListSqlInjectionMatchSets +func (c *WAF) ListSqlInjectionMatchSetsRequest(input *ListSqlInjectionMatchSetsInput) (req *request.Request, output *ListSqlInjectionMatchSetsOutput) { + op := &request.Operation{ + Name: opListSqlInjectionMatchSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListSqlInjectionMatchSetsInput{} + } + + output = &ListSqlInjectionMatchSetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListSqlInjectionMatchSets API operation for AWS WAF. // -// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) -// using an action other than INSERT or DELETE. +// Returns an array of SqlInjectionMatchSet objects. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, -// BLOCK, or COUNT. +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // -// * You tried to create a RateBasedRule with a RateKey value other than -// IP. +// See the AWS API reference guide for AWS WAF's +// API operation ListSqlInjectionMatchSets for usage and error information. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, -// BLOCK, or COUNT. +// Returned Error Codes: +// * ErrCodeInternalErrorException "InternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. +// * ErrCodeInvalidAccountException "InvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. // -// * You tried to update a ByteMatchSet with a Field of HEADER but no value -// for Data. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListSqlInjectionMatchSets +func (c *WAF) ListSqlInjectionMatchSets(input *ListSqlInjectionMatchSetsInput) (*ListSqlInjectionMatchSetsOutput, error) { + req, out := c.ListSqlInjectionMatchSetsRequest(input) + return out, req.Send() +} + +// ListSqlInjectionMatchSetsWithContext is the same as ListSqlInjectionMatchSets with the addition of +// the ability to pass a context and additional request options. // -// * Your request references an ARN that is malformed, or corresponds to -// a resource with which a web ACL cannot be associated. +// See ListSqlInjectionMatchSets for details on how to use this API operation. // -// * ErrCodeNonexistentContainerException "NonexistentContainerException" -// The operation failed because you tried to add an object to or delete an object -// from another object that doesn't exist. For example: +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAF) ListSqlInjectionMatchSetsWithContext(ctx aws.Context, input *ListSqlInjectionMatchSetsInput, opts ...request.Option) (*ListSqlInjectionMatchSetsOutput, error) { + req, out := c.ListSqlInjectionMatchSetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListWebACLs = "ListWebACLs" + +// ListWebACLsRequest generates a "aws/request.Request" representing the +// client's request for the ListWebACLs operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. // -// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't -// exist. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule -// that doesn't exist. +// See ListWebACLs for more information on using the ListWebACLs +// API call, and error handling. // -// * You tried to add an IP address to or delete an IP address from an IPSet -// that doesn't exist. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from -// a ByteMatchSet that doesn't exist. // -// * ErrCodeNonexistentItemException "NonexistentItemException" -// The operation failed because the referenced object doesn't exist. +// // Example sending a request using the ListWebACLsRequest method. +// req, resp := client.ListWebACLsRequest(params) // -// * ErrCodeReferencedItemException "ReferencedItemException" -// The operation failed because you tried to delete an object that is still -// in use. For example: +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListWebACLs +func (c *WAF) ListWebACLsRequest(input *ListWebACLsInput) (req *request.Request, output *ListWebACLsOutput) { + op := &request.Operation{ + Name: opListWebACLs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListWebACLsInput{} + } + + output = &ListWebACLsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListWebACLs API operation for AWS WAF. // -// * You tried to delete a Rule that is still referenced by a WebACL. +// Returns an array of WebACLSummary objects in the response. // -// * ErrCodeLimitsExceededException "LimitsExceededException" -// The operation exceeds a resource limit, for example, the maximum number of -// WebACL objects that you can create for an AWS account. For more information, -// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) -// in the AWS WAF Developer Guide. +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateSizeConstraintSet -func (c *WAF) UpdateSizeConstraintSet(input *UpdateSizeConstraintSetInput) (*UpdateSizeConstraintSetOutput, error) { - req, out := c.UpdateSizeConstraintSetRequest(input) +// See the AWS API reference guide for AWS WAF's +// API operation ListWebACLs for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalErrorException "InternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeInvalidAccountException "InvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListWebACLs +func (c *WAF) ListWebACLs(input *ListWebACLsInput) (*ListWebACLsOutput, error) { + req, out := c.ListWebACLsRequest(input) return out, req.Send() } -// UpdateSizeConstraintSetWithContext is the same as UpdateSizeConstraintSet with the addition of +// ListWebACLsWithContext is the same as ListWebACLs with the addition of // the ability to pass a context and additional request options. // -// See UpdateSizeConstraintSet for details on how to use this API operation. +// See ListWebACLs for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) UpdateSizeConstraintSetWithContext(ctx aws.Context, input *UpdateSizeConstraintSetInput, opts ...request.Option) (*UpdateSizeConstraintSetOutput, error) { - req, out := c.UpdateSizeConstraintSetRequest(input) +func (c *WAF) ListWebACLsWithContext(ctx aws.Context, input *ListWebACLsInput, opts ...request.Option) (*ListWebACLsOutput, error) { + req, out := c.ListWebACLsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateSqlInjectionMatchSet = "UpdateSqlInjectionMatchSet" +const opListXssMatchSets = "ListXssMatchSets" -// UpdateSqlInjectionMatchSetRequest generates a "aws/request.Request" representing the -// client's request for the UpdateSqlInjectionMatchSet operation. The "output" return +// ListXssMatchSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListXssMatchSets operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateSqlInjectionMatchSet for more information on using the UpdateSqlInjectionMatchSet +// See ListXssMatchSets for more information on using the ListXssMatchSets // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateSqlInjectionMatchSetRequest method. -// req, resp := client.UpdateSqlInjectionMatchSetRequest(params) +// // Example sending a request using the ListXssMatchSetsRequest method. +// req, resp := client.ListXssMatchSetsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateSqlInjectionMatchSet -func (c *WAF) UpdateSqlInjectionMatchSetRequest(input *UpdateSqlInjectionMatchSetInput) (req *request.Request, output *UpdateSqlInjectionMatchSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListXssMatchSets +func (c *WAF) ListXssMatchSetsRequest(input *ListXssMatchSetsInput) (req *request.Request, output *ListXssMatchSetsOutput) { op := &request.Operation{ - Name: opUpdateSqlInjectionMatchSet, + Name: opListXssMatchSets, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateSqlInjectionMatchSetInput{} + input = &ListXssMatchSetsInput{} } - output = &UpdateSqlInjectionMatchSetOutput{} + output = &ListXssMatchSetsOutput{} req = c.newRequest(op, input, output) return } -// UpdateSqlInjectionMatchSet API operation for AWS WAF. -// -// Inserts or deletes SqlInjectionMatchTuple objects (filters) in a SqlInjectionMatchSet. -// For each SqlInjectionMatchTuple object, you specify the following values: -// -// * Action: Whether to insert the object into or delete the object from -// the array. To change a SqlInjectionMatchTuple, you delete the existing -// object and add a new one. -// -// * FieldToMatch: The part of web requests that you want AWS WAF to inspect -// and, if you want AWS WAF to inspect a header, the name of the header. -// -// * TextTransformation: Which text transformation, if any, to perform on -// the web request before inspecting the request for snippets of malicious -// SQL code. -// -// You use SqlInjectionMatchSet objects to specify which CloudFront requests -// you want to allow, block, or count. For example, if you're receiving requests -// that contain snippets of SQL code in the query string and you want to block -// the requests, you can create a SqlInjectionMatchSet with the applicable settings, -// and then configure AWS WAF to block the requests. -// -// To create and configure a SqlInjectionMatchSet, perform the following steps: -// -// Submit a CreateSqlInjectionMatchSet request. -// -// Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateIPSet request. -// -// Submit an UpdateSqlInjectionMatchSet request to specify the parts of web -// requests that you want AWS WAF to inspect for snippets of SQL code. +// ListXssMatchSets API operation for AWS WAF. // -// For more information about how to use the AWS WAF API to allow or block HTTP -// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// Returns an array of XssMatchSet objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF's -// API operation UpdateSqlInjectionMatchSet for usage and error information. +// API operation ListXssMatchSets for usage and error information. // // Returned Error Codes: // * ErrCodeInternalErrorException "InternalErrorException" @@ -5149,196 +5249,106 @@ func (c *WAF) UpdateSqlInjectionMatchSetRequest(input *UpdateSqlInjectionMatchSe // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// * ErrCodeInvalidOperationException "InvalidOperationException" -// The operation failed because there was nothing to do. For example: -// -// * You tried to remove a Rule from a WebACL, but the Rule isn't in the -// specified WebACL. -// -// * You tried to remove an IP address from an IPSet, but the IP address -// isn't in the specified IPSet. -// -// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple -// isn't in the specified WebACL. -// -// * You tried to add a Rule to a WebACL, but the Rule already exists in -// the specified WebACL. -// -// * You tried to add an IP address to an IPSet, but the IP address already -// exists in the specified IPSet. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListXssMatchSets +func (c *WAF) ListXssMatchSets(input *ListXssMatchSetsInput) (*ListXssMatchSetsOutput, error) { + req, out := c.ListXssMatchSetsRequest(input) + return out, req.Send() +} + +// ListXssMatchSetsWithContext is the same as ListXssMatchSets with the addition of +// the ability to pass a context and additional request options. // -// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple -// already exists in the specified WebACL. -// -// * ErrCodeInvalidParameterException "InvalidParameterException" -// The operation failed because AWS WAF didn't recognize a parameter in the -// request. For example: -// -// * You specified an invalid parameter name. -// -// * You specified an invalid value. -// -// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) -// using an action other than INSERT or DELETE. -// -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to create a RateBasedRule with a RateKey value other than -// IP. -// -// * You tried to update a WebACL with a WafActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. -// -// * You tried to update a ByteMatchSet with a Field of HEADER but no value -// for Data. -// -// * Your request references an ARN that is malformed, or corresponds to -// a resource with which a web ACL cannot be associated. -// -// * ErrCodeNonexistentContainerException "NonexistentContainerException" -// The operation failed because you tried to add an object to or delete an object -// from another object that doesn't exist. For example: -// -// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't -// exist. -// -// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule -// that doesn't exist. -// -// * You tried to add an IP address to or delete an IP address from an IPSet -// that doesn't exist. -// -// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from -// a ByteMatchSet that doesn't exist. -// -// * ErrCodeNonexistentItemException "NonexistentItemException" -// The operation failed because the referenced object doesn't exist. -// -// * ErrCodeStaleDataException "StaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. -// -// * ErrCodeLimitsExceededException "LimitsExceededException" -// The operation exceeds a resource limit, for example, the maximum number of -// WebACL objects that you can create for an AWS account. For more information, -// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) -// in the AWS WAF Developer Guide. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateSqlInjectionMatchSet -func (c *WAF) UpdateSqlInjectionMatchSet(input *UpdateSqlInjectionMatchSetInput) (*UpdateSqlInjectionMatchSetOutput, error) { - req, out := c.UpdateSqlInjectionMatchSetRequest(input) - return out, req.Send() -} - -// UpdateSqlInjectionMatchSetWithContext is the same as UpdateSqlInjectionMatchSet with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateSqlInjectionMatchSet for details on how to use this API operation. +// See ListXssMatchSets for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) UpdateSqlInjectionMatchSetWithContext(ctx aws.Context, input *UpdateSqlInjectionMatchSetInput, opts ...request.Option) (*UpdateSqlInjectionMatchSetOutput, error) { - req, out := c.UpdateSqlInjectionMatchSetRequest(input) +func (c *WAF) ListXssMatchSetsWithContext(ctx aws.Context, input *ListXssMatchSetsInput, opts ...request.Option) (*ListXssMatchSetsOutput, error) { + req, out := c.ListXssMatchSetsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateWebACL = "UpdateWebACL" +const opUpdateByteMatchSet = "UpdateByteMatchSet" -// UpdateWebACLRequest generates a "aws/request.Request" representing the -// client's request for the UpdateWebACL operation. The "output" return +// UpdateByteMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateByteMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateWebACL for more information on using the UpdateWebACL +// See UpdateByteMatchSet for more information on using the UpdateByteMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateWebACLRequest method. -// req, resp := client.UpdateWebACLRequest(params) +// // Example sending a request using the UpdateByteMatchSetRequest method. +// req, resp := client.UpdateByteMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateWebACL -func (c *WAF) UpdateWebACLRequest(input *UpdateWebACLInput) (req *request.Request, output *UpdateWebACLOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateByteMatchSet +func (c *WAF) UpdateByteMatchSetRequest(input *UpdateByteMatchSetInput) (req *request.Request, output *UpdateByteMatchSetOutput) { op := &request.Operation{ - Name: opUpdateWebACL, + Name: opUpdateByteMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateWebACLInput{} + input = &UpdateByteMatchSetInput{} } - output = &UpdateWebACLOutput{} + output = &UpdateByteMatchSetOutput{} req = c.newRequest(op, input, output) return } -// UpdateWebACL API operation for AWS WAF. +// UpdateByteMatchSet API operation for AWS WAF. // -// Inserts or deletes ActivatedRule objects in a WebACL. Each Rule identifies -// web requests that you want to allow, block, or count. When you update a WebACL, -// you specify the following values: +// Inserts or deletes ByteMatchTuple objects (filters) in a ByteMatchSet. For +// each ByteMatchTuple object, you specify the following values: // -// * A default action for the WebACL, either ALLOW or BLOCK. AWS WAF performs -// the default action if a request doesn't match the criteria in any of the -// Rules in a WebACL. +// * Whether to insert or delete the object from the array. If you want to +// change a ByteMatchSetUpdate object, you delete the existing object and +// add a new one. // -// * The Rules that you want to add and/or delete. If you want to replace -// one Rule with another, you delete the existing Rule and add the new one. +// * The part of a web request that you want AWS WAF to inspect, such as +// a query string or the value of the User-Agent header. // -// * For each Rule, whether you want AWS WAF to allow requests, block requests, -// or count requests that match the conditions in the Rule. +// * The bytes (typically a string that corresponds with ASCII characters) +// that you want AWS WAF to look for. For more information, including how +// you specify the values for the AWS WAF API and the AWS CLI or SDKs, see +// TargetString in the ByteMatchTuple data type. // -// * The order in which you want AWS WAF to evaluate the Rules in a WebACL. -// If you add more than one Rule to a WebACL, AWS WAF evaluates each request -// against the Rules in order based on the value of Priority. (The Rule that -// has the lowest value for Priority is evaluated first.) When a web request -// matches all of the predicates (such as ByteMatchSets and IPSets) in a -// Rule, AWS WAF immediately takes the corresponding action, allow or block, -// and doesn't evaluate the request against the remaining Rules in the WebACL, -// if any. +// * Where to look, such as at the beginning or the end of a query string. // -// To create and configure a WebACL, perform the following steps: +// * Whether to perform any conversions on the request, such as converting +// it to lowercase, before inspecting it for the specified string. // -// Create and update the predicates that you want to include in Rules. For more -// information, see CreateByteMatchSet, UpdateByteMatchSet, CreateIPSet, UpdateIPSet, -// CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet. +// For example, you can add a ByteMatchSetUpdate object that matches web requests +// in which User-Agent headers contain the string BadBot. You can then configure +// AWS WAF to block those requests. // -// Create and update the Rules that you want to include in the WebACL. For more -// information, see CreateRule and UpdateRule. +// To create and configure a ByteMatchSet, perform the following steps: // -// Create a WebACL. See CreateWebACL. +// Create a ByteMatchSet. For more information, see CreateByteMatchSet. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateWebACL request. -// -// Submit an UpdateWebACL request to specify the Rules that you want to include -// in the WebACL, to specify the default action, and to associate the WebACL -// with a CloudFront distribution. +// parameter of an UpdateByteMatchSet request. // -// Be aware that if you try to add a RATE_BASED rule to a web ACL without setting -// the rule type when first creating the rule, the UpdateWebACL request will -// fail because the request tries to add a REGULAR rule (the default rule type) -// with the specified ID, which does not exist. +// Submit an UpdateByteMatchSet request to specify the part of the request that +// you want AWS WAF to inspect (for example, the header or the URI) and the +// value that you want AWS WAF to watch for. // // For more information about how to use the AWS WAF API to allow or block HTTP // requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). @@ -5348,13 +5358,9 @@ func (c *WAF) UpdateWebACLRequest(input *UpdateWebACLInput) (req *request.Reques // the error. // // See the AWS API reference guide for AWS WAF's -// API operation UpdateWebACL for usage and error information. +// API operation UpdateByteMatchSet for usage and error information. // // Returned Error Codes: -// * ErrCodeStaleDataException "StaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. -// // * ErrCodeInternalErrorException "InternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. @@ -5405,7 +5411,7 @@ func (c *WAF) UpdateWebACLRequest(input *UpdateWebACLInput) (req *request.Reques // BLOCK, or COUNT. // // * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. +// HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value // for Data. @@ -5432,13 +5438,9 @@ func (c *WAF) UpdateWebACLRequest(input *UpdateWebACLInput) (req *request.Reques // * ErrCodeNonexistentItemException "NonexistentItemException" // The operation failed because the referenced object doesn't exist. // -// * ErrCodeReferencedItemException "ReferencedItemException" -// The operation failed because you tried to delete an object that is still -// in use. For example: -// -// * You tried to delete a ByteMatchSet that is still referenced by a Rule. -// -// * You tried to delete a Rule that is still referenced by a WebACL. +// * ErrCodeStaleDataException "StaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. // // * ErrCodeLimitsExceededException "LimitsExceededException" // The operation exceeds a resource limit, for example, the maximum number of @@ -5446,101 +5448,97 @@ func (c *WAF) UpdateWebACLRequest(input *UpdateWebACLInput) (req *request.Reques // see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateWebACL -func (c *WAF) UpdateWebACL(input *UpdateWebACLInput) (*UpdateWebACLOutput, error) { - req, out := c.UpdateWebACLRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateByteMatchSet +func (c *WAF) UpdateByteMatchSet(input *UpdateByteMatchSetInput) (*UpdateByteMatchSetOutput, error) { + req, out := c.UpdateByteMatchSetRequest(input) return out, req.Send() } -// UpdateWebACLWithContext is the same as UpdateWebACL with the addition of +// UpdateByteMatchSetWithContext is the same as UpdateByteMatchSet with the addition of // the ability to pass a context and additional request options. // -// See UpdateWebACL for details on how to use this API operation. +// See UpdateByteMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) UpdateWebACLWithContext(ctx aws.Context, input *UpdateWebACLInput, opts ...request.Option) (*UpdateWebACLOutput, error) { - req, out := c.UpdateWebACLRequest(input) +func (c *WAF) UpdateByteMatchSetWithContext(ctx aws.Context, input *UpdateByteMatchSetInput, opts ...request.Option) (*UpdateByteMatchSetOutput, error) { + req, out := c.UpdateByteMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateXssMatchSet = "UpdateXssMatchSet" +const opUpdateGeoMatchSet = "UpdateGeoMatchSet" -// UpdateXssMatchSetRequest generates a "aws/request.Request" representing the -// client's request for the UpdateXssMatchSet operation. The "output" return +// UpdateGeoMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateGeoMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateXssMatchSet for more information on using the UpdateXssMatchSet +// See UpdateGeoMatchSet for more information on using the UpdateGeoMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateXssMatchSetRequest method. -// req, resp := client.UpdateXssMatchSetRequest(params) +// // Example sending a request using the UpdateGeoMatchSetRequest method. +// req, resp := client.UpdateGeoMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateXssMatchSet -func (c *WAF) UpdateXssMatchSetRequest(input *UpdateXssMatchSetInput) (req *request.Request, output *UpdateXssMatchSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateGeoMatchSet +func (c *WAF) UpdateGeoMatchSetRequest(input *UpdateGeoMatchSetInput) (req *request.Request, output *UpdateGeoMatchSetOutput) { op := &request.Operation{ - Name: opUpdateXssMatchSet, + Name: opUpdateGeoMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateXssMatchSetInput{} + input = &UpdateGeoMatchSetInput{} } - output = &UpdateXssMatchSetOutput{} + output = &UpdateGeoMatchSetOutput{} req = c.newRequest(op, input, output) return } -// UpdateXssMatchSet API operation for AWS WAF. +// UpdateGeoMatchSet API operation for AWS WAF. // -// Inserts or deletes XssMatchTuple objects (filters) in an XssMatchSet. For -// each XssMatchTuple object, you specify the following values: +// Inserts or deletes GeoMatchConstraint objects in an GeoMatchSet. For each +// GeoMatchConstraint object, you specify the following values: // -// * Action: Whether to insert the object into or delete the object from -// the array. To change a XssMatchTuple, you delete the existing object and +// * Whether to insert or delete the object from the array. If you want to +// change an GeoMatchConstraint object, you delete the existing object and // add a new one. // -// * FieldToMatch: The part of web requests that you want AWS WAF to inspect -// and, if you want AWS WAF to inspect a header, the name of the header. -// -// * TextTransformation: Which text transformation, if any, to perform on -// the web request before inspecting the request for cross-site scripting -// attacks. +// * The Type. The only valid value for Type is Country. // -// You use XssMatchSet objects to specify which CloudFront requests you want -// to allow, block, or count. For example, if you're receiving requests that -// contain cross-site scripting attacks in the request body and you want to -// block the requests, you can create an XssMatchSet with the applicable settings, -// and then configure AWS WAF to block the requests. +// * The Value, which is a two character code for the country to add to the +// GeoMatchConstraint object. Valid codes are listed in GeoMatchConstraint$Value. // -// To create and configure an XssMatchSet, perform the following steps: +// To create and configure an GeoMatchSet, perform the following steps: // -// Submit a CreateXssMatchSet request. +// Submit a CreateGeoMatchSet request. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateIPSet request. +// parameter of an UpdateGeoMatchSet request. // -// Submit an UpdateXssMatchSet request to specify the parts of web requests -// that you want AWS WAF to inspect for cross-site scripting attacks. +// Submit an UpdateGeoMatchSet request to specify the country that you want +// AWS WAF to watch for. +// +// When you update an GeoMatchSet, you specify the country that you want to +// add and/or the country that you want to delete. If you want to change a country, +// you delete the existing country and add the new one. // // For more information about how to use the AWS WAF API to allow or block HTTP // requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). @@ -5550,9 +5548,13 @@ func (c *WAF) UpdateXssMatchSetRequest(input *UpdateXssMatchSetInput) (req *requ // the error. // // See the AWS API reference guide for AWS WAF's -// API operation UpdateXssMatchSet for usage and error information. +// API operation UpdateGeoMatchSet for usage and error information. // // Returned Error Codes: +// * ErrCodeStaleDataException "StaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// // * ErrCodeInternalErrorException "InternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. @@ -5603,7 +5605,7 @@ func (c *WAF) UpdateXssMatchSetRequest(input *UpdateXssMatchSetInput) (req *requ // BLOCK, or COUNT. // // * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. +// HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value // for Data. @@ -5630,9 +5632,13 @@ func (c *WAF) UpdateXssMatchSetRequest(input *UpdateXssMatchSetInput) (req *requ // * ErrCodeNonexistentItemException "NonexistentItemException" // The operation failed because the referenced object doesn't exist. // -// * ErrCodeStaleDataException "StaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. +// * ErrCodeReferencedItemException "ReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. // // * ErrCodeLimitsExceededException "LimitsExceededException" // The operation exceeds a resource limit, for example, the maximum number of @@ -5640,108 +5646,4882 @@ func (c *WAF) UpdateXssMatchSetRequest(input *UpdateXssMatchSetInput) (req *requ // see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateXssMatchSet -func (c *WAF) UpdateXssMatchSet(input *UpdateXssMatchSetInput) (*UpdateXssMatchSetOutput, error) { - req, out := c.UpdateXssMatchSetRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateGeoMatchSet +func (c *WAF) UpdateGeoMatchSet(input *UpdateGeoMatchSetInput) (*UpdateGeoMatchSetOutput, error) { + req, out := c.UpdateGeoMatchSetRequest(input) return out, req.Send() } -// UpdateXssMatchSetWithContext is the same as UpdateXssMatchSet with the addition of +// UpdateGeoMatchSetWithContext is the same as UpdateGeoMatchSet with the addition of // the ability to pass a context and additional request options. // -// See UpdateXssMatchSet for details on how to use this API operation. +// See UpdateGeoMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAF) UpdateXssMatchSetWithContext(ctx aws.Context, input *UpdateXssMatchSetInput, opts ...request.Option) (*UpdateXssMatchSetOutput, error) { - req, out := c.UpdateXssMatchSetRequest(input) +func (c *WAF) UpdateGeoMatchSetWithContext(ctx aws.Context, input *UpdateGeoMatchSetInput, opts ...request.Option) (*UpdateGeoMatchSetOutput, error) { + req, out := c.UpdateGeoMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// The ActivatedRule object in an UpdateWebACL request specifies a Rule that -// you want to insert or delete, the priority of the Rule in the WebACL, and -// the action that you want AWS WAF to take when a web request matches the Rule -// (ALLOW, BLOCK, or COUNT). -// -// To specify whether to insert or delete a Rule, use the Action parameter in -// the WebACLUpdate data type. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ActivatedRule -type ActivatedRule struct { - _ struct{} `type:"structure"` +const opUpdateIPSet = "UpdateIPSet" - // Specifies the action that CloudFront or AWS WAF takes when a web request - // matches the conditions in the Rule. Valid values for Action include the following: - // - // * ALLOW: CloudFront responds with the requested object. - // - // * BLOCK: CloudFront responds with an HTTP 403 (Forbidden) status code. - // - // * COUNT: AWS WAF increments a counter of requests that match the conditions - // in the rule and then continues to inspect the web request based on the +// UpdateIPSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateIPSet operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateIPSet for more information on using the UpdateIPSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateIPSetRequest method. +// req, resp := client.UpdateIPSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateIPSet +func (c *WAF) UpdateIPSetRequest(input *UpdateIPSetInput) (req *request.Request, output *UpdateIPSetOutput) { + op := &request.Operation{ + Name: opUpdateIPSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateIPSetInput{} + } + + output = &UpdateIPSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateIPSet API operation for AWS WAF. +// +// Inserts or deletes IPSetDescriptor objects in an IPSet. For each IPSetDescriptor +// object, you specify the following values: +// +// * Whether to insert or delete the object from the array. If you want to +// change an IPSetDescriptor object, you delete the existing object and add +// a new one. +// +// * The IP address version, IPv4 or IPv6. +// +// * The IP address in CIDR notation, for example, 192.0.2.0/24 (for the +// range of IP addresses from 192.0.2.0 to 192.0.2.255) or 192.0.2.44/32 +// (for the individual IP address 192.0.2.44). +// +// AWS WAF supports /8, /16, /24, and /32 IP address ranges for IPv4, and /24, +// /32, /48, /56, /64 and /128 for IPv6. For more information about CIDR notation, +// see the Wikipedia entry Classless Inter-Domain Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). +// +// IPv6 addresses can be represented using any of the following formats: +// +// * 1111:0000:0000:0000:0000:0000:0000:0111/128 +// +// * 1111:0:0:0:0:0:0:0111/128 +// +// * 1111::0111/128 +// +// * 1111::111/128 +// +// You use an IPSet to specify which web requests you want to allow or block +// based on the IP addresses that the requests originated from. For example, +// if you're receiving a lot of requests from one or a small number of IP addresses +// and you want to block the requests, you can create an IPSet that specifies +// those IP addresses, and then configure AWS WAF to block the requests. +// +// To create and configure an IPSet, perform the following steps: +// +// Submit a CreateIPSet request. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateIPSet request. +// +// Submit an UpdateIPSet request to specify the IP addresses that you want AWS +// WAF to watch for. +// +// When you update an IPSet, you specify the IP addresses that you want to add +// and/or the IP addresses that you want to delete. If you want to change an +// IP address, you delete the existing IP address and add the new one. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF's +// API operation UpdateIPSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeStaleDataException "StaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeInvalidAccountException "InvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeInvalidOperationException "InvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeNonexistentContainerException "NonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeNonexistentItemException "NonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeReferencedItemException "ReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeLimitsExceededException "LimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateIPSet +func (c *WAF) UpdateIPSet(input *UpdateIPSetInput) (*UpdateIPSetOutput, error) { + req, out := c.UpdateIPSetRequest(input) + return out, req.Send() +} + +// UpdateIPSetWithContext is the same as UpdateIPSet with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateIPSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAF) UpdateIPSetWithContext(ctx aws.Context, input *UpdateIPSetInput, opts ...request.Option) (*UpdateIPSetOutput, error) { + req, out := c.UpdateIPSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateRateBasedRule = "UpdateRateBasedRule" + +// UpdateRateBasedRuleRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRateBasedRule operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateRateBasedRule for more information on using the UpdateRateBasedRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateRateBasedRuleRequest method. +// req, resp := client.UpdateRateBasedRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateRateBasedRule +func (c *WAF) UpdateRateBasedRuleRequest(input *UpdateRateBasedRuleInput) (req *request.Request, output *UpdateRateBasedRuleOutput) { + op := &request.Operation{ + Name: opUpdateRateBasedRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRateBasedRuleInput{} + } + + output = &UpdateRateBasedRuleOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateRateBasedRule API operation for AWS WAF. +// +// Inserts or deletes Predicate objects in a rule and updates the RateLimit +// in the rule. +// +// Each Predicate object identifies a predicate, such as a ByteMatchSet or an +// IPSet, that specifies the web requests that you want to block or count. The +// RateLimit specifies the number of requests every five minutes that triggers +// the rule. +// +// If you add more than one predicate to a RateBasedRule, a request must match +// all the predicates and exceed the RateLimit to be counted or blocked. For +// example, suppose you add the following to a RateBasedRule: +// +// * An IPSet that matches the IP address 192.0.2.44/32 +// +// * A ByteMatchSet that matches BadBot in the User-Agent header +// +// Further, you specify a RateLimit of 15,000. +// +// You then add the RateBasedRule to a WebACL and specify that you want to block +// requests that satisfy the rule. For a request to be blocked, it must come +// from the IP address 192.0.2.44 and the User-Agent header in the request must +// contain the value BadBot. Further, requests that match these two conditions +// much be received at a rate of more than 15,000 every five minutes. If the +// rate drops below this limit, AWS WAF no longer blocks the requests. +// +// As a second example, suppose you want to limit requests to a particular page +// on your site. To do this, you could add the following to a RateBasedRule: +// +// * A ByteMatchSet with FieldToMatch of URI +// +// * A PositionalConstraint of STARTS_WITH +// +// * A TargetString of login +// +// Further, you specify a RateLimit of 15,000. +// +// By adding this RateBasedRule to a WebACL, you could limit requests to your +// login page without affecting the rest of your site. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF's +// API operation UpdateRateBasedRule for usage and error information. +// +// Returned Error Codes: +// * ErrCodeStaleDataException "StaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeInvalidAccountException "InvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeInvalidOperationException "InvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeNonexistentContainerException "NonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeNonexistentItemException "NonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeReferencedItemException "ReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeLimitsExceededException "LimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateRateBasedRule +func (c *WAF) UpdateRateBasedRule(input *UpdateRateBasedRuleInput) (*UpdateRateBasedRuleOutput, error) { + req, out := c.UpdateRateBasedRuleRequest(input) + return out, req.Send() +} + +// UpdateRateBasedRuleWithContext is the same as UpdateRateBasedRule with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateRateBasedRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAF) UpdateRateBasedRuleWithContext(ctx aws.Context, input *UpdateRateBasedRuleInput, opts ...request.Option) (*UpdateRateBasedRuleOutput, error) { + req, out := c.UpdateRateBasedRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateRegexMatchSet = "UpdateRegexMatchSet" + +// UpdateRegexMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRegexMatchSet operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateRegexMatchSet for more information on using the UpdateRegexMatchSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateRegexMatchSetRequest method. +// req, resp := client.UpdateRegexMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateRegexMatchSet +func (c *WAF) UpdateRegexMatchSetRequest(input *UpdateRegexMatchSetInput) (req *request.Request, output *UpdateRegexMatchSetOutput) { + op := &request.Operation{ + Name: opUpdateRegexMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRegexMatchSetInput{} + } + + output = &UpdateRegexMatchSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateRegexMatchSet API operation for AWS WAF. +// +// Inserts or deletes RegexMatchSetUpdate objects (filters) in a RegexMatchSet. +// For each RegexMatchSetUpdate object, you specify the following values: +// +// * Whether to insert or delete the object from the array. If you want to +// change a RegexMatchSetUpdate object, you delete the existing object and +// add a new one. +// +// * The part of a web request that you want AWS WAF to inspect, such as +// a query string or the value of the User-Agent header. +// +// * The identifier of the pattern (a regular expression) that you want AWS +// WAF to look for. For more information, see RegexPatternSet. +// +// * Whether to perform any conversions on the request, such as converting +// it to lowercase, before inspecting it for the specified string. +// +// For example, you can create a RegexPatternSet that matches any requests with +// User-Agent headers that contain the string B[a@]dB[o0]t. You can then configure +// AWS WAF to reject those requests. +// +// To create and configure a RegexMatchSet, perform the following steps: +// +// Create a RegexMatchSet. For more information, see CreateRegexMatchSet. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateRegexMatchSet request. +// +// Submit an UpdateRegexMatchSet request to specify the part of the request +// that you want AWS WAF to inspect (for example, the header or the URI) and +// the identifier of the RegexPatternSet that contain the regular expression +// patters you want AWS WAF to watch for. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF's +// API operation UpdateRegexMatchSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeStaleDataException "StaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeDisallowedNameException "DisallowedNameException" +// The name specified is invalid. +// +// * ErrCodeLimitsExceededException "LimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// * ErrCodeNonexistentItemException "NonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeNonexistentContainerException "NonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeInvalidOperationException "InvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeInvalidAccountException "InvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateRegexMatchSet +func (c *WAF) UpdateRegexMatchSet(input *UpdateRegexMatchSetInput) (*UpdateRegexMatchSetOutput, error) { + req, out := c.UpdateRegexMatchSetRequest(input) + return out, req.Send() +} + +// UpdateRegexMatchSetWithContext is the same as UpdateRegexMatchSet with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateRegexMatchSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAF) UpdateRegexMatchSetWithContext(ctx aws.Context, input *UpdateRegexMatchSetInput, opts ...request.Option) (*UpdateRegexMatchSetOutput, error) { + req, out := c.UpdateRegexMatchSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateRegexPatternSet = "UpdateRegexPatternSet" + +// UpdateRegexPatternSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRegexPatternSet operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateRegexPatternSet for more information on using the UpdateRegexPatternSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateRegexPatternSetRequest method. +// req, resp := client.UpdateRegexPatternSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateRegexPatternSet +func (c *WAF) UpdateRegexPatternSetRequest(input *UpdateRegexPatternSetInput) (req *request.Request, output *UpdateRegexPatternSetOutput) { + op := &request.Operation{ + Name: opUpdateRegexPatternSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRegexPatternSetInput{} + } + + output = &UpdateRegexPatternSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateRegexPatternSet API operation for AWS WAF. +// +// Inserts or deletes RegexMatchSetUpdate objects (filters) in a RegexPatternSet. +// For each RegexPatternSet object, you specify the following values: +// +// * Whether to insert or delete the object from the array. If you want to +// change a RegexPatternSet object, you delete the existing object and add +// a new one. +// +// * The regular expression pattern that you want AWS WAF to look for. For +// more information, see RegexPatternSet. +// +// For example, you can create a RegexPatternString such as B[a@]dB[o0]t. AWS +// WAF will match this RegexPatternString to: +// +// * BadBot +// +// * BadB0t +// +// * B@dBot +// +// * B@dB0t +// +// To create and configure a RegexPatternSet, perform the following steps: +// +// Create a RegexPatternSet. For more information, see CreateRegexPatternSet. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateRegexPatternSet request. +// +// Submit an UpdateRegexPatternSet request to specify the regular expression +// pattern that you want AWS WAF to watch for. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF's +// API operation UpdateRegexPatternSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeStaleDataException "StaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeLimitsExceededException "LimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// * ErrCodeNonexistentContainerException "NonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeInvalidOperationException "InvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeInvalidAccountException "InvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeInvalidRegexPatternException "InvalidRegexPatternException" +// The regular expression (regex) you specified in RegexPatternString is invalid. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateRegexPatternSet +func (c *WAF) UpdateRegexPatternSet(input *UpdateRegexPatternSetInput) (*UpdateRegexPatternSetOutput, error) { + req, out := c.UpdateRegexPatternSetRequest(input) + return out, req.Send() +} + +// UpdateRegexPatternSetWithContext is the same as UpdateRegexPatternSet with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateRegexPatternSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAF) UpdateRegexPatternSetWithContext(ctx aws.Context, input *UpdateRegexPatternSetInput, opts ...request.Option) (*UpdateRegexPatternSetOutput, error) { + req, out := c.UpdateRegexPatternSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateRule = "UpdateRule" + +// UpdateRuleRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRule operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateRule for more information on using the UpdateRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateRuleRequest method. +// req, resp := client.UpdateRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateRule +func (c *WAF) UpdateRuleRequest(input *UpdateRuleInput) (req *request.Request, output *UpdateRuleOutput) { + op := &request.Operation{ + Name: opUpdateRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRuleInput{} + } + + output = &UpdateRuleOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateRule API operation for AWS WAF. +// +// Inserts or deletes Predicate objects in a Rule. Each Predicate object identifies +// a predicate, such as a ByteMatchSet or an IPSet, that specifies the web requests +// that you want to allow, block, or count. If you add more than one predicate +// to a Rule, a request must match all of the specifications to be allowed, +// blocked, or counted. For example, suppose you add the following to a Rule: +// +// * A ByteMatchSet that matches the value BadBot in the User-Agent header +// +// * An IPSet that matches the IP address 192.0.2.44 +// +// You then add the Rule to a WebACL and specify that you want to block requests +// that satisfy the Rule. For a request to be blocked, the User-Agent header +// in the request must contain the value BadBotand the request must originate +// from the IP address 192.0.2.44. +// +// To create and configure a Rule, perform the following steps: +// +// Create and update the predicates that you want to include in the Rule. +// +// Create the Rule. See CreateRule. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateRule request. +// +// Submit an UpdateRule request to add predicates to the Rule. +// +// Create and update a WebACL that contains the Rule. See CreateWebACL. +// +// If you want to replace one ByteMatchSet or IPSet with another, you delete +// the existing one and add the new one. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF's +// API operation UpdateRule for usage and error information. +// +// Returned Error Codes: +// * ErrCodeStaleDataException "StaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeInvalidAccountException "InvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeInvalidOperationException "InvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeNonexistentContainerException "NonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeNonexistentItemException "NonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeReferencedItemException "ReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeLimitsExceededException "LimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateRule +func (c *WAF) UpdateRule(input *UpdateRuleInput) (*UpdateRuleOutput, error) { + req, out := c.UpdateRuleRequest(input) + return out, req.Send() +} + +// UpdateRuleWithContext is the same as UpdateRule with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAF) UpdateRuleWithContext(ctx aws.Context, input *UpdateRuleInput, opts ...request.Option) (*UpdateRuleOutput, error) { + req, out := c.UpdateRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateSizeConstraintSet = "UpdateSizeConstraintSet" + +// UpdateSizeConstraintSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSizeConstraintSet operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateSizeConstraintSet for more information on using the UpdateSizeConstraintSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateSizeConstraintSetRequest method. +// req, resp := client.UpdateSizeConstraintSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateSizeConstraintSet +func (c *WAF) UpdateSizeConstraintSetRequest(input *UpdateSizeConstraintSetInput) (req *request.Request, output *UpdateSizeConstraintSetOutput) { + op := &request.Operation{ + Name: opUpdateSizeConstraintSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSizeConstraintSetInput{} + } + + output = &UpdateSizeConstraintSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateSizeConstraintSet API operation for AWS WAF. +// +// Inserts or deletes SizeConstraint objects (filters) in a SizeConstraintSet. +// For each SizeConstraint object, you specify the following values: +// +// * Whether to insert or delete the object from the array. If you want to +// change a SizeConstraintSetUpdate object, you delete the existing object +// and add a new one. +// +// * The part of a web request that you want AWS WAF to evaluate, such as +// the length of a query string or the length of the User-Agent header. +// +// * Whether to perform any transformations on the request, such as converting +// it to lowercase, before checking its length. Note that transformations +// of the request body are not supported because the AWS resource forwards +// only the first 8192 bytes of your request to AWS WAF. +// +// * A ComparisonOperator used for evaluating the selected part of the request +// against the specified Size, such as equals, greater than, less than, and +// so on. +// +// * The length, in bytes, that you want AWS WAF to watch for in selected +// part of the request. The length is computed after applying the transformation. +// +// For example, you can add a SizeConstraintSetUpdate object that matches web +// requests in which the length of the User-Agent header is greater than 100 +// bytes. You can then configure AWS WAF to block those requests. +// +// To create and configure a SizeConstraintSet, perform the following steps: +// +// Create a SizeConstraintSet. For more information, see CreateSizeConstraintSet. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateSizeConstraintSet request. +// +// Submit an UpdateSizeConstraintSet request to specify the part of the request +// that you want AWS WAF to inspect (for example, the header or the URI) and +// the value that you want AWS WAF to watch for. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF's +// API operation UpdateSizeConstraintSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeStaleDataException "StaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeInvalidAccountException "InvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeInvalidOperationException "InvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeNonexistentContainerException "NonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeNonexistentItemException "NonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeReferencedItemException "ReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeLimitsExceededException "LimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateSizeConstraintSet +func (c *WAF) UpdateSizeConstraintSet(input *UpdateSizeConstraintSetInput) (*UpdateSizeConstraintSetOutput, error) { + req, out := c.UpdateSizeConstraintSetRequest(input) + return out, req.Send() +} + +// UpdateSizeConstraintSetWithContext is the same as UpdateSizeConstraintSet with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateSizeConstraintSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAF) UpdateSizeConstraintSetWithContext(ctx aws.Context, input *UpdateSizeConstraintSetInput, opts ...request.Option) (*UpdateSizeConstraintSetOutput, error) { + req, out := c.UpdateSizeConstraintSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateSqlInjectionMatchSet = "UpdateSqlInjectionMatchSet" + +// UpdateSqlInjectionMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSqlInjectionMatchSet operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateSqlInjectionMatchSet for more information on using the UpdateSqlInjectionMatchSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateSqlInjectionMatchSetRequest method. +// req, resp := client.UpdateSqlInjectionMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateSqlInjectionMatchSet +func (c *WAF) UpdateSqlInjectionMatchSetRequest(input *UpdateSqlInjectionMatchSetInput) (req *request.Request, output *UpdateSqlInjectionMatchSetOutput) { + op := &request.Operation{ + Name: opUpdateSqlInjectionMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSqlInjectionMatchSetInput{} + } + + output = &UpdateSqlInjectionMatchSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateSqlInjectionMatchSet API operation for AWS WAF. +// +// Inserts or deletes SqlInjectionMatchTuple objects (filters) in a SqlInjectionMatchSet. +// For each SqlInjectionMatchTuple object, you specify the following values: +// +// * Action: Whether to insert the object into or delete the object from +// the array. To change a SqlInjectionMatchTuple, you delete the existing +// object and add a new one. +// +// * FieldToMatch: The part of web requests that you want AWS WAF to inspect +// and, if you want AWS WAF to inspect a header, the name of the header. +// +// * TextTransformation: Which text transformation, if any, to perform on +// the web request before inspecting the request for snippets of malicious +// SQL code. +// +// You use SqlInjectionMatchSet objects to specify which CloudFront requests +// you want to allow, block, or count. For example, if you're receiving requests +// that contain snippets of SQL code in the query string and you want to block +// the requests, you can create a SqlInjectionMatchSet with the applicable settings, +// and then configure AWS WAF to block the requests. +// +// To create and configure a SqlInjectionMatchSet, perform the following steps: +// +// Submit a CreateSqlInjectionMatchSet request. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateIPSet request. +// +// Submit an UpdateSqlInjectionMatchSet request to specify the parts of web +// requests that you want AWS WAF to inspect for snippets of SQL code. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF's +// API operation UpdateSqlInjectionMatchSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalErrorException "InternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeInvalidAccountException "InvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeInvalidOperationException "InvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeNonexistentContainerException "NonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeNonexistentItemException "NonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeStaleDataException "StaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeLimitsExceededException "LimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateSqlInjectionMatchSet +func (c *WAF) UpdateSqlInjectionMatchSet(input *UpdateSqlInjectionMatchSetInput) (*UpdateSqlInjectionMatchSetOutput, error) { + req, out := c.UpdateSqlInjectionMatchSetRequest(input) + return out, req.Send() +} + +// UpdateSqlInjectionMatchSetWithContext is the same as UpdateSqlInjectionMatchSet with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateSqlInjectionMatchSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAF) UpdateSqlInjectionMatchSetWithContext(ctx aws.Context, input *UpdateSqlInjectionMatchSetInput, opts ...request.Option) (*UpdateSqlInjectionMatchSetOutput, error) { + req, out := c.UpdateSqlInjectionMatchSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateWebACL = "UpdateWebACL" + +// UpdateWebACLRequest generates a "aws/request.Request" representing the +// client's request for the UpdateWebACL operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateWebACL for more information on using the UpdateWebACL +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateWebACLRequest method. +// req, resp := client.UpdateWebACLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateWebACL +func (c *WAF) UpdateWebACLRequest(input *UpdateWebACLInput) (req *request.Request, output *UpdateWebACLOutput) { + op := &request.Operation{ + Name: opUpdateWebACL, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateWebACLInput{} + } + + output = &UpdateWebACLOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateWebACL API operation for AWS WAF. +// +// Inserts or deletes ActivatedRule objects in a WebACL. Each Rule identifies +// web requests that you want to allow, block, or count. When you update a WebACL, +// you specify the following values: +// +// * A default action for the WebACL, either ALLOW or BLOCK. AWS WAF performs +// the default action if a request doesn't match the criteria in any of the +// Rules in a WebACL. +// +// * The Rules that you want to add and/or delete. If you want to replace +// one Rule with another, you delete the existing Rule and add the new one. +// +// * For each Rule, whether you want AWS WAF to allow requests, block requests, +// or count requests that match the conditions in the Rule. +// +// * The order in which you want AWS WAF to evaluate the Rules in a WebACL. +// If you add more than one Rule to a WebACL, AWS WAF evaluates each request +// against the Rules in order based on the value of Priority. (The Rule that +// has the lowest value for Priority is evaluated first.) When a web request +// matches all of the predicates (such as ByteMatchSets and IPSets) in a +// Rule, AWS WAF immediately takes the corresponding action, allow or block, +// and doesn't evaluate the request against the remaining Rules in the WebACL, +// if any. +// +// To create and configure a WebACL, perform the following steps: +// +// Create and update the predicates that you want to include in Rules. For more +// information, see CreateByteMatchSet, UpdateByteMatchSet, CreateIPSet, UpdateIPSet, +// CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet. +// +// Create and update the Rules that you want to include in the WebACL. For more +// information, see CreateRule and UpdateRule. +// +// Create a WebACL. See CreateWebACL. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateWebACL request. +// +// Submit an UpdateWebACL request to specify the Rules that you want to include +// in the WebACL, to specify the default action, and to associate the WebACL +// with a CloudFront distribution. +// +// Be aware that if you try to add a RATE_BASED rule to a web ACL without setting +// the rule type when first creating the rule, the UpdateWebACL request will +// fail because the request tries to add a REGULAR rule (the default rule type) +// with the specified ID, which does not exist. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF's +// API operation UpdateWebACL for usage and error information. +// +// Returned Error Codes: +// * ErrCodeStaleDataException "StaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeInternalErrorException "InternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeInvalidAccountException "InvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeInvalidOperationException "InvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeNonexistentContainerException "NonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeNonexistentItemException "NonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeReferencedItemException "ReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeLimitsExceededException "LimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateWebACL +func (c *WAF) UpdateWebACL(input *UpdateWebACLInput) (*UpdateWebACLOutput, error) { + req, out := c.UpdateWebACLRequest(input) + return out, req.Send() +} + +// UpdateWebACLWithContext is the same as UpdateWebACL with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateWebACL for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAF) UpdateWebACLWithContext(ctx aws.Context, input *UpdateWebACLInput, opts ...request.Option) (*UpdateWebACLOutput, error) { + req, out := c.UpdateWebACLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateXssMatchSet = "UpdateXssMatchSet" + +// UpdateXssMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateXssMatchSet operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateXssMatchSet for more information on using the UpdateXssMatchSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateXssMatchSetRequest method. +// req, resp := client.UpdateXssMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateXssMatchSet +func (c *WAF) UpdateXssMatchSetRequest(input *UpdateXssMatchSetInput) (req *request.Request, output *UpdateXssMatchSetOutput) { + op := &request.Operation{ + Name: opUpdateXssMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateXssMatchSetInput{} + } + + output = &UpdateXssMatchSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateXssMatchSet API operation for AWS WAF. +// +// Inserts or deletes XssMatchTuple objects (filters) in an XssMatchSet. For +// each XssMatchTuple object, you specify the following values: +// +// * Action: Whether to insert the object into or delete the object from +// the array. To change a XssMatchTuple, you delete the existing object and +// add a new one. +// +// * FieldToMatch: The part of web requests that you want AWS WAF to inspect +// and, if you want AWS WAF to inspect a header, the name of the header. +// +// * TextTransformation: Which text transformation, if any, to perform on +// the web request before inspecting the request for cross-site scripting +// attacks. +// +// You use XssMatchSet objects to specify which CloudFront requests you want +// to allow, block, or count. For example, if you're receiving requests that +// contain cross-site scripting attacks in the request body and you want to +// block the requests, you can create an XssMatchSet with the applicable settings, +// and then configure AWS WAF to block the requests. +// +// To create and configure an XssMatchSet, perform the following steps: +// +// Submit a CreateXssMatchSet request. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateIPSet request. +// +// Submit an UpdateXssMatchSet request to specify the parts of web requests +// that you want AWS WAF to inspect for cross-site scripting attacks. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF's +// API operation UpdateXssMatchSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalErrorException "InternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeInvalidAccountException "InvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeInvalidOperationException "InvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeNonexistentContainerException "NonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeNonexistentItemException "NonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeStaleDataException "StaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeLimitsExceededException "LimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateXssMatchSet +func (c *WAF) UpdateXssMatchSet(input *UpdateXssMatchSetInput) (*UpdateXssMatchSetOutput, error) { + req, out := c.UpdateXssMatchSetRequest(input) + return out, req.Send() +} + +// UpdateXssMatchSetWithContext is the same as UpdateXssMatchSet with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateXssMatchSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAF) UpdateXssMatchSetWithContext(ctx aws.Context, input *UpdateXssMatchSetInput, opts ...request.Option) (*UpdateXssMatchSetOutput, error) { + req, out := c.UpdateXssMatchSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// The ActivatedRule object in an UpdateWebACL request specifies a Rule that +// you want to insert or delete, the priority of the Rule in the WebACL, and +// the action that you want AWS WAF to take when a web request matches the Rule +// (ALLOW, BLOCK, or COUNT). +// +// To specify whether to insert or delete a Rule, use the Action parameter in +// the WebACLUpdate data type. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ActivatedRule +type ActivatedRule struct { + _ struct{} `type:"structure"` + + // Specifies the action that CloudFront or AWS WAF takes when a web request + // matches the conditions in the Rule. Valid values for Action include the following: + // + // * ALLOW: CloudFront responds with the requested object. + // + // * BLOCK: CloudFront responds with an HTTP 403 (Forbidden) status code. + // + // * COUNT: AWS WAF increments a counter of requests that match the conditions + // in the rule and then continues to inspect the web request based on the // remaining rules in the web ACL. // // Action is a required field - Action *WafAction `type:"structure" required:"true"` + Action *WafAction `type:"structure" required:"true"` + + // Specifies the order in which the Rules in a WebACL are evaluated. Rules with + // a lower value for Priority are evaluated before Rules with a higher value. + // The value must be a unique integer. If you add multiple Rules to a WebACL, + // the values don't need to be consecutive. + // + // Priority is a required field + Priority *int64 `type:"integer" required:"true"` + + // The RuleId for a Rule. You use RuleId to get more information about a Rule + // (see GetRule), update a Rule (see UpdateRule), insert a Rule into a WebACL + // or delete a one from a WebACL (see UpdateWebACL), or delete a Rule from AWS + // WAF (see DeleteRule). + // + // RuleId is returned by CreateRule and by ListRules. + // + // RuleId is a required field + RuleId *string `min:"1" type:"string" required:"true"` + + // The rule type, either REGULAR, as defined by Rule, or RATE_BASED, as defined + // by RateBasedRule. The default is REGULAR. Although this field is optional, + // be aware that if you try to add a RATE_BASED rule to a web ACL without setting + // the type, the UpdateWebACL request will fail because the request tries to + // add a REGULAR rule with the specified ID, which does not exist. + Type *string `type:"string" enum:"WafRuleType"` +} + +// String returns the string representation +func (s ActivatedRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActivatedRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ActivatedRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ActivatedRule"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.Priority == nil { + invalidParams.Add(request.NewErrParamRequired("Priority")) + } + if s.RuleId == nil { + invalidParams.Add(request.NewErrParamRequired("RuleId")) + } + if s.RuleId != nil && len(*s.RuleId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleId", 1)) + } + if s.Action != nil { + if err := s.Action.Validate(); err != nil { + invalidParams.AddNested("Action", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAction sets the Action field's value. +func (s *ActivatedRule) SetAction(v *WafAction) *ActivatedRule { + s.Action = v + return s +} + +// SetPriority sets the Priority field's value. +func (s *ActivatedRule) SetPriority(v int64) *ActivatedRule { + s.Priority = &v + return s +} + +// SetRuleId sets the RuleId field's value. +func (s *ActivatedRule) SetRuleId(v string) *ActivatedRule { + s.RuleId = &v + return s +} + +// SetType sets the Type field's value. +func (s *ActivatedRule) SetType(v string) *ActivatedRule { + s.Type = &v + return s +} + +// In a GetByteMatchSet request, ByteMatchSet is a complex type that contains +// the ByteMatchSetId and Name of a ByteMatchSet, and the values that you specified +// when you updated the ByteMatchSet. +// +// A complex type that contains ByteMatchTuple objects, which specify the parts +// of web requests that you want AWS WAF to inspect and the values that you +// want AWS WAF to search for. If a ByteMatchSet contains more than one ByteMatchTuple +// object, a request needs to match the settings in only one ByteMatchTuple +// to be considered a match. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ByteMatchSet +type ByteMatchSet struct { + _ struct{} `type:"structure"` + + // The ByteMatchSetId for a ByteMatchSet. You use ByteMatchSetId to get information + // about a ByteMatchSet (see GetByteMatchSet), update a ByteMatchSet (see UpdateByteMatchSet), + // insert a ByteMatchSet into a Rule or delete one from a Rule (see UpdateRule), + // and delete a ByteMatchSet from AWS WAF (see DeleteByteMatchSet). + // + // ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets. + // + // ByteMatchSetId is a required field + ByteMatchSetId *string `min:"1" type:"string" required:"true"` + + // Specifies the bytes (typically a string that corresponds with ASCII characters) + // that you want AWS WAF to search for in web requests, the location in requests + // that you want AWS WAF to search, and other settings. + // + // ByteMatchTuples is a required field + ByteMatchTuples []*ByteMatchTuple `type:"list" required:"true"` + + // A friendly name or description of the ByteMatchSet. You can't change Name + // after you create a ByteMatchSet. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ByteMatchSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ByteMatchSet) GoString() string { + return s.String() +} + +// SetByteMatchSetId sets the ByteMatchSetId field's value. +func (s *ByteMatchSet) SetByteMatchSetId(v string) *ByteMatchSet { + s.ByteMatchSetId = &v + return s +} + +// SetByteMatchTuples sets the ByteMatchTuples field's value. +func (s *ByteMatchSet) SetByteMatchTuples(v []*ByteMatchTuple) *ByteMatchSet { + s.ByteMatchTuples = v + return s +} + +// SetName sets the Name field's value. +func (s *ByteMatchSet) SetName(v string) *ByteMatchSet { + s.Name = &v + return s +} + +// Returned by ListByteMatchSets. Each ByteMatchSetSummary object includes the +// Name and ByteMatchSetId for one ByteMatchSet. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ByteMatchSetSummary +type ByteMatchSetSummary struct { + _ struct{} `type:"structure"` + + // The ByteMatchSetId for a ByteMatchSet. You use ByteMatchSetId to get information + // about a ByteMatchSet, update a ByteMatchSet, remove a ByteMatchSet from a + // Rule, and delete a ByteMatchSet from AWS WAF. + // + // ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets. + // + // ByteMatchSetId is a required field + ByteMatchSetId *string `min:"1" type:"string" required:"true"` + + // A friendly name or description of the ByteMatchSet. You can't change Name + // after you create a ByteMatchSet. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ByteMatchSetSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ByteMatchSetSummary) GoString() string { + return s.String() +} + +// SetByteMatchSetId sets the ByteMatchSetId field's value. +func (s *ByteMatchSetSummary) SetByteMatchSetId(v string) *ByteMatchSetSummary { + s.ByteMatchSetId = &v + return s +} + +// SetName sets the Name field's value. +func (s *ByteMatchSetSummary) SetName(v string) *ByteMatchSetSummary { + s.Name = &v + return s +} + +// In an UpdateByteMatchSet request, ByteMatchSetUpdate specifies whether to +// insert or delete a ByteMatchTuple and includes the settings for the ByteMatchTuple. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ByteMatchSetUpdate +type ByteMatchSetUpdate struct { + _ struct{} `type:"structure"` + + // Specifies whether to insert or delete a ByteMatchTuple. + // + // Action is a required field + Action *string `type:"string" required:"true" enum:"ChangeAction"` + + // Information about the part of a web request that you want AWS WAF to inspect + // and the value that you want AWS WAF to search for. If you specify DELETE + // for the value of Action, the ByteMatchTuple values must exactly match the + // values in the ByteMatchTuple that you want to delete from the ByteMatchSet. + // + // ByteMatchTuple is a required field + ByteMatchTuple *ByteMatchTuple `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ByteMatchSetUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ByteMatchSetUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ByteMatchSetUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ByteMatchSetUpdate"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.ByteMatchTuple == nil { + invalidParams.Add(request.NewErrParamRequired("ByteMatchTuple")) + } + if s.ByteMatchTuple != nil { + if err := s.ByteMatchTuple.Validate(); err != nil { + invalidParams.AddNested("ByteMatchTuple", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAction sets the Action field's value. +func (s *ByteMatchSetUpdate) SetAction(v string) *ByteMatchSetUpdate { + s.Action = &v + return s +} + +// SetByteMatchTuple sets the ByteMatchTuple field's value. +func (s *ByteMatchSetUpdate) SetByteMatchTuple(v *ByteMatchTuple) *ByteMatchSetUpdate { + s.ByteMatchTuple = v + return s +} + +// The bytes (typically a string that corresponds with ASCII characters) that +// you want AWS WAF to search for in web requests, the location in requests +// that you want AWS WAF to search, and other settings. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ByteMatchTuple +type ByteMatchTuple struct { + _ struct{} `type:"structure"` + + // The part of a web request that you want AWS WAF to search, such as a specified + // header or a query string. For more information, see FieldToMatch. + // + // FieldToMatch is a required field + FieldToMatch *FieldToMatch `type:"structure" required:"true"` + + // Within the portion of a web request that you want to search (for example, + // in the query string, if any), specify where you want AWS WAF to search. Valid + // values include the following: + // + // CONTAINS + // + // The specified part of the web request must include the value of TargetString, + // but the location doesn't matter. + // + // CONTAINS_WORD + // + // The specified part of the web request must include the value of TargetString, + // and TargetString must contain only alphanumeric characters or underscore + // (A-Z, a-z, 0-9, or _). In addition, TargetString must be a word, which means + // one of the following: + // + // * TargetString exactly matches the value of the specified part of the + // web request, such as the value of a header. + // + // * TargetString is at the beginning of the specified part of the web request + // and is followed by a character other than an alphanumeric character or + // underscore (_), for example, BadBot;. + // + // * TargetString is at the end of the specified part of the web request + // and is preceded by a character other than an alphanumeric character or + // underscore (_), for example, ;BadBot. + // + // * TargetString is in the middle of the specified part of the web request + // and is preceded and followed by characters other than alphanumeric characters + // or underscore (_), for example, -BadBot;. + // + // EXACTLY + // + // The value of the specified part of the web request must exactly match the + // value of TargetString. + // + // STARTS_WITH + // + // The value of TargetString must appear at the beginning of the specified part + // of the web request. + // + // ENDS_WITH + // + // The value of TargetString must appear at the end of the specified part of + // the web request. + // + // PositionalConstraint is a required field + PositionalConstraint *string `type:"string" required:"true" enum:"PositionalConstraint"` + + // The value that you want AWS WAF to search for. AWS WAF searches for the specified + // string in the part of web requests that you specified in FieldToMatch. The + // maximum length of the value is 50 bytes. + // + // Valid values depend on the values that you specified for FieldToMatch: + // + // * HEADER: The value that you want AWS WAF to search for in the request + // header that you specified in FieldToMatch, for example, the value of the + // User-Agent or Referer header. + // + // * METHOD: The HTTP method, which indicates the type of operation specified + // in the request. CloudFront supports the following methods: DELETE, GET, + // HEAD, OPTIONS, PATCH, POST, and PUT. + // + // * QUERY_STRING: The value that you want AWS WAF to search for in the query + // string, which is the part of a URL that appears after a ? character. + // + // * URI: The value that you want AWS WAF to search for in the part of a + // URL that identifies a resource, for example, /images/daily-ad.jpg. + // + // * BODY: The part of a request that contains any additional data that you + // want to send to your web server as the HTTP request body, such as data + // from a form. The request body immediately follows the request headers. + // Note that only the first 8192 bytes of the request body are forwarded + // to AWS WAF for inspection. To allow or block requests based on the length + // of the body, you can create a size constraint set. For more information, + // see CreateSizeConstraintSet. + // + // If TargetString includes alphabetic characters A-Z and a-z, note that the + // value is case sensitive. + // + // If you're using the AWS WAF API + // + // Specify a base64-encoded version of the value. The maximum length of the + // value before you base64-encode it is 50 bytes. + // + // For example, suppose the value of Type is HEADER and the value of Data is + // User-Agent. If you want to search the User-Agent header for the value BadBot, + // you base64-encode BadBot using MIME base64 encoding and include the resulting + // value, QmFkQm90, in the value of TargetString. + // + // If you're using the AWS CLI or one of the AWS SDKs + // + // The value that you want AWS WAF to search for. The SDK automatically base64 + // encodes the value. + // + // TargetString is automatically base64 encoded/decoded by the SDK. + // + // TargetString is a required field + TargetString []byte `type:"blob" required:"true"` + + // Text transformations eliminate some of the unusual formatting that attackers + // use in web requests in an effort to bypass AWS WAF. If you specify a transformation, + // AWS WAF performs the transformation on TargetString before inspecting a request + // for a match. + // + // CMD_LINE + // + // When you're concerned that attackers are injecting an operating system commandline + // command and using unusual formatting to disguise some or all of the command, + // use this option to perform the following transformations: + // + // * Delete the following characters: \ " ' ^ + // + // * Delete spaces before the following characters: / ( + // + // * Replace the following characters with a space: , ; + // + // * Replace multiple spaces with one space + // + // * Convert uppercase letters (A-Z) to lowercase (a-z) + // + // COMPRESS_WHITE_SPACE + // + // Use this option to replace the following characters with a space character + // (decimal 32): + // + // * \f, formfeed, decimal 12 + // + // * \t, tab, decimal 9 + // + // * \n, newline, decimal 10 + // + // * \r, carriage return, decimal 13 + // + // * \v, vertical tab, decimal 11 + // + // * non-breaking space, decimal 160 + // + // COMPRESS_WHITE_SPACE also replaces multiple spaces with one space. + // + // HTML_ENTITY_DECODE + // + // Use this option to replace HTML-encoded characters with unencoded characters. + // HTML_ENTITY_DECODE performs the following operations: + // + // * Replaces (ampersand)quot; with " + // + // * Replaces (ampersand)nbsp; with a non-breaking space, decimal 160 + // + // * Replaces (ampersand)lt; with a "less than" symbol + // + // * Replaces (ampersand)gt; with > + // + // * Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, + // with the corresponding characters + // + // * Replaces characters that are represented in decimal format, (ampersand)#nnnn;, + // with the corresponding characters + // + // LOWERCASE + // + // Use this option to convert uppercase letters (A-Z) to lowercase (a-z). + // + // URL_DECODE + // + // Use this option to decode a URL-encoded value. + // + // NONE + // + // Specify NONE if you don't want to perform any text transformations. + // + // TextTransformation is a required field + TextTransformation *string `type:"string" required:"true" enum:"TextTransformation"` +} + +// String returns the string representation +func (s ByteMatchTuple) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ByteMatchTuple) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ByteMatchTuple) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ByteMatchTuple"} + if s.FieldToMatch == nil { + invalidParams.Add(request.NewErrParamRequired("FieldToMatch")) + } + if s.PositionalConstraint == nil { + invalidParams.Add(request.NewErrParamRequired("PositionalConstraint")) + } + if s.TargetString == nil { + invalidParams.Add(request.NewErrParamRequired("TargetString")) + } + if s.TextTransformation == nil { + invalidParams.Add(request.NewErrParamRequired("TextTransformation")) + } + if s.FieldToMatch != nil { + if err := s.FieldToMatch.Validate(); err != nil { + invalidParams.AddNested("FieldToMatch", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFieldToMatch sets the FieldToMatch field's value. +func (s *ByteMatchTuple) SetFieldToMatch(v *FieldToMatch) *ByteMatchTuple { + s.FieldToMatch = v + return s +} + +// SetPositionalConstraint sets the PositionalConstraint field's value. +func (s *ByteMatchTuple) SetPositionalConstraint(v string) *ByteMatchTuple { + s.PositionalConstraint = &v + return s +} + +// SetTargetString sets the TargetString field's value. +func (s *ByteMatchTuple) SetTargetString(v []byte) *ByteMatchTuple { + s.TargetString = v + return s +} + +// SetTextTransformation sets the TextTransformation field's value. +func (s *ByteMatchTuple) SetTextTransformation(v string) *ByteMatchTuple { + s.TextTransformation = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateByteMatchSetRequest +type CreateByteMatchSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` + + // A friendly name or description of the ByteMatchSet. You can't change Name + // after you create a ByteMatchSet. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateByteMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateByteMatchSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateByteMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateByteMatchSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *CreateByteMatchSetInput) SetChangeToken(v string) *CreateByteMatchSetInput { + s.ChangeToken = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateByteMatchSetInput) SetName(v string) *CreateByteMatchSetInput { + s.Name = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateByteMatchSetResponse +type CreateByteMatchSetOutput struct { + _ struct{} `type:"structure"` + + // A ByteMatchSet that contains no ByteMatchTuple objects. + ByteMatchSet *ByteMatchSet `type:"structure"` + + // The ChangeToken that you used to submit the CreateByteMatchSet request. You + // can also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateByteMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateByteMatchSetOutput) GoString() string { + return s.String() +} + +// SetByteMatchSet sets the ByteMatchSet field's value. +func (s *CreateByteMatchSetOutput) SetByteMatchSet(v *ByteMatchSet) *CreateByteMatchSetOutput { + s.ByteMatchSet = v + return s +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *CreateByteMatchSetOutput) SetChangeToken(v string) *CreateByteMatchSetOutput { + s.ChangeToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateGeoMatchSetRequest +type CreateGeoMatchSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` + + // A friendly name or description of the GeoMatchSet. You can't change Name + // after you create the GeoMatchSet. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateGeoMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGeoMatchSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateGeoMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateGeoMatchSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *CreateGeoMatchSetInput) SetChangeToken(v string) *CreateGeoMatchSetInput { + s.ChangeToken = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateGeoMatchSetInput) SetName(v string) *CreateGeoMatchSetInput { + s.Name = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateGeoMatchSetResponse +type CreateGeoMatchSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the CreateGeoMatchSet request. You + // can also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` + + // The GeoMatchSet returned in the CreateGeoMatchSet response. The GeoMatchSet + // contains no GeoMatchConstraints. + GeoMatchSet *GeoMatchSet `type:"structure"` +} + +// String returns the string representation +func (s CreateGeoMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGeoMatchSetOutput) GoString() string { + return s.String() +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *CreateGeoMatchSetOutput) SetChangeToken(v string) *CreateGeoMatchSetOutput { + s.ChangeToken = &v + return s +} + +// SetGeoMatchSet sets the GeoMatchSet field's value. +func (s *CreateGeoMatchSetOutput) SetGeoMatchSet(v *GeoMatchSet) *CreateGeoMatchSetOutput { + s.GeoMatchSet = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateIPSetRequest +type CreateIPSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` + + // A friendly name or description of the IPSet. You can't change Name after + // you create the IPSet. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateIPSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateIPSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateIPSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateIPSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *CreateIPSetInput) SetChangeToken(v string) *CreateIPSetInput { + s.ChangeToken = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateIPSetInput) SetName(v string) *CreateIPSetInput { + s.Name = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateIPSetResponse +type CreateIPSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the CreateIPSet request. You can + // also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` + + // The IPSet returned in the CreateIPSet response. + IPSet *IPSet `type:"structure"` +} + +// String returns the string representation +func (s CreateIPSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateIPSetOutput) GoString() string { + return s.String() +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *CreateIPSetOutput) SetChangeToken(v string) *CreateIPSetOutput { + s.ChangeToken = &v + return s +} + +// SetIPSet sets the IPSet field's value. +func (s *CreateIPSetOutput) SetIPSet(v *IPSet) *CreateIPSetOutput { + s.IPSet = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateRateBasedRuleRequest +type CreateRateBasedRuleInput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the CreateRateBasedRule request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` + + // A friendly name or description for the metrics for this RateBasedRule. The + // name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't + // contain whitespace. You can't change the name of the metric after you create + // the RateBasedRule. + // + // MetricName is a required field + MetricName *string `type:"string" required:"true"` + + // A friendly name or description of the RateBasedRule. You can't change the + // name of a RateBasedRule after you create it. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The field that AWS WAF uses to determine if requests are likely arriving + // from a single source and thus subject to rate monitoring. The only valid + // value for RateKey is IP. IP indicates that requests that arrive from the + // same IP address are subject to the RateLimit that is specified in the RateBasedRule. + // + // RateKey is a required field + RateKey *string `type:"string" required:"true" enum:"RateKey"` + + // The maximum number of requests, which have an identical value in the field + // that is specified by RateKey, allowed in a five-minute period. If the number + // of requests exceeds the RateLimit and the other predicates specified in the + // rule are also met, AWS WAF triggers the action that is specified for this + // rule. + // + // RateLimit is a required field + RateLimit *int64 `min:"2000" type:"long" required:"true"` +} + +// String returns the string representation +func (s CreateRateBasedRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRateBasedRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRateBasedRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRateBasedRuleInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.RateKey == nil { + invalidParams.Add(request.NewErrParamRequired("RateKey")) + } + if s.RateLimit == nil { + invalidParams.Add(request.NewErrParamRequired("RateLimit")) + } + if s.RateLimit != nil && *s.RateLimit < 2000 { + invalidParams.Add(request.NewErrParamMinValue("RateLimit", 2000)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *CreateRateBasedRuleInput) SetChangeToken(v string) *CreateRateBasedRuleInput { + s.ChangeToken = &v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *CreateRateBasedRuleInput) SetMetricName(v string) *CreateRateBasedRuleInput { + s.MetricName = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateRateBasedRuleInput) SetName(v string) *CreateRateBasedRuleInput { + s.Name = &v + return s +} + +// SetRateKey sets the RateKey field's value. +func (s *CreateRateBasedRuleInput) SetRateKey(v string) *CreateRateBasedRuleInput { + s.RateKey = &v + return s +} + +// SetRateLimit sets the RateLimit field's value. +func (s *CreateRateBasedRuleInput) SetRateLimit(v int64) *CreateRateBasedRuleInput { + s.RateLimit = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateRateBasedRuleResponse +type CreateRateBasedRuleOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the CreateRateBasedRule request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` + + // The RateBasedRule that is returned in the CreateRateBasedRule response. + Rule *RateBasedRule `type:"structure"` +} + +// String returns the string representation +func (s CreateRateBasedRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRateBasedRuleOutput) GoString() string { + return s.String() +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *CreateRateBasedRuleOutput) SetChangeToken(v string) *CreateRateBasedRuleOutput { + s.ChangeToken = &v + return s +} + +// SetRule sets the Rule field's value. +func (s *CreateRateBasedRuleOutput) SetRule(v *RateBasedRule) *CreateRateBasedRuleOutput { + s.Rule = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateRegexMatchSetRequest +type CreateRegexMatchSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` + + // A friendly name or description of the RegexMatchSet. You can't change Name + // after you create a RegexMatchSet. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateRegexMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRegexMatchSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRegexMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRegexMatchSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *CreateRegexMatchSetInput) SetChangeToken(v string) *CreateRegexMatchSetInput { + s.ChangeToken = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateRegexMatchSetInput) SetName(v string) *CreateRegexMatchSetInput { + s.Name = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateRegexMatchSetResponse +type CreateRegexMatchSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the CreateRegexMatchSet request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` + + // A RegexMatchSet that contains no RegexMatchTuple objects. + RegexMatchSet *RegexMatchSet `type:"structure"` +} + +// String returns the string representation +func (s CreateRegexMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRegexMatchSetOutput) GoString() string { + return s.String() +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *CreateRegexMatchSetOutput) SetChangeToken(v string) *CreateRegexMatchSetOutput { + s.ChangeToken = &v + return s +} + +// SetRegexMatchSet sets the RegexMatchSet field's value. +func (s *CreateRegexMatchSetOutput) SetRegexMatchSet(v *RegexMatchSet) *CreateRegexMatchSetOutput { + s.RegexMatchSet = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateRegexPatternSetRequest +type CreateRegexPatternSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` + + // A friendly name or description of the RegexPatternSet. You can't change Name + // after you create a RegexPatternSet. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateRegexPatternSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRegexPatternSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRegexPatternSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRegexPatternSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *CreateRegexPatternSetInput) SetChangeToken(v string) *CreateRegexPatternSetInput { + s.ChangeToken = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateRegexPatternSetInput) SetName(v string) *CreateRegexPatternSetInput { + s.Name = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateRegexPatternSetResponse +type CreateRegexPatternSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the CreateRegexPatternSet request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` + + // A RegexPatternSet that contains no objects. + RegexPatternSet *RegexPatternSet `type:"structure"` +} + +// String returns the string representation +func (s CreateRegexPatternSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRegexPatternSetOutput) GoString() string { + return s.String() +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *CreateRegexPatternSetOutput) SetChangeToken(v string) *CreateRegexPatternSetOutput { + s.ChangeToken = &v + return s +} + +// SetRegexPatternSet sets the RegexPatternSet field's value. +func (s *CreateRegexPatternSetOutput) SetRegexPatternSet(v *RegexPatternSet) *CreateRegexPatternSetOutput { + s.RegexPatternSet = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateRuleRequest +type CreateRuleInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` + + // A friendly name or description for the metrics for this Rule. The name can + // contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain + // whitespace. You can't change the name of the metric after you create the + // Rule. + // + // MetricName is a required field + MetricName *string `type:"string" required:"true"` + + // A friendly name or description of the Rule. You can't change the name of + // a Rule after you create it. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRuleInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *CreateRuleInput) SetChangeToken(v string) *CreateRuleInput { + s.ChangeToken = &v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *CreateRuleInput) SetMetricName(v string) *CreateRuleInput { + s.MetricName = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateRuleInput) SetName(v string) *CreateRuleInput { + s.Name = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateRuleResponse +type CreateRuleOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the CreateRule request. You can also + // use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` + + // The Rule returned in the CreateRule response. + Rule *Rule `type:"structure"` +} + +// String returns the string representation +func (s CreateRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRuleOutput) GoString() string { + return s.String() +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *CreateRuleOutput) SetChangeToken(v string) *CreateRuleOutput { + s.ChangeToken = &v + return s +} + +// SetRule sets the Rule field's value. +func (s *CreateRuleOutput) SetRule(v *Rule) *CreateRuleOutput { + s.Rule = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateSizeConstraintSetRequest +type CreateSizeConstraintSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` + + // A friendly name or description of the SizeConstraintSet. You can't change + // Name after you create a SizeConstraintSet. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSizeConstraintSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSizeConstraintSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSizeConstraintSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSizeConstraintSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *CreateSizeConstraintSetInput) SetChangeToken(v string) *CreateSizeConstraintSetInput { + s.ChangeToken = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateSizeConstraintSetInput) SetName(v string) *CreateSizeConstraintSetInput { + s.Name = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateSizeConstraintSetResponse +type CreateSizeConstraintSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the CreateSizeConstraintSet request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` + + // A SizeConstraintSet that contains no SizeConstraint objects. + SizeConstraintSet *SizeConstraintSet `type:"structure"` +} + +// String returns the string representation +func (s CreateSizeConstraintSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSizeConstraintSetOutput) GoString() string { + return s.String() +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *CreateSizeConstraintSetOutput) SetChangeToken(v string) *CreateSizeConstraintSetOutput { + s.ChangeToken = &v + return s +} + +// SetSizeConstraintSet sets the SizeConstraintSet field's value. +func (s *CreateSizeConstraintSetOutput) SetSizeConstraintSet(v *SizeConstraintSet) *CreateSizeConstraintSetOutput { + s.SizeConstraintSet = v + return s +} + +// A request to create a SqlInjectionMatchSet. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateSqlInjectionMatchSetRequest +type CreateSqlInjectionMatchSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` + + // A friendly name or description for the SqlInjectionMatchSet that you're creating. + // You can't change Name after you create the SqlInjectionMatchSet. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSqlInjectionMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSqlInjectionMatchSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSqlInjectionMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSqlInjectionMatchSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *CreateSqlInjectionMatchSetInput) SetChangeToken(v string) *CreateSqlInjectionMatchSetInput { + s.ChangeToken = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateSqlInjectionMatchSetInput) SetName(v string) *CreateSqlInjectionMatchSetInput { + s.Name = &v + return s +} + +// The response to a CreateSqlInjectionMatchSet request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateSqlInjectionMatchSetResponse +type CreateSqlInjectionMatchSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the CreateSqlInjectionMatchSet request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` + + // A SqlInjectionMatchSet. + SqlInjectionMatchSet *SqlInjectionMatchSet `type:"structure"` +} + +// String returns the string representation +func (s CreateSqlInjectionMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSqlInjectionMatchSetOutput) GoString() string { + return s.String() +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *CreateSqlInjectionMatchSetOutput) SetChangeToken(v string) *CreateSqlInjectionMatchSetOutput { + s.ChangeToken = &v + return s +} + +// SetSqlInjectionMatchSet sets the SqlInjectionMatchSet field's value. +func (s *CreateSqlInjectionMatchSetOutput) SetSqlInjectionMatchSet(v *SqlInjectionMatchSet) *CreateSqlInjectionMatchSetOutput { + s.SqlInjectionMatchSet = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateWebACLRequest +type CreateWebACLInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` + + // The action that you want AWS WAF to take when a request doesn't match the + // criteria specified in any of the Rule objects that are associated with the + // WebACL. + // + // DefaultAction is a required field + DefaultAction *WafAction `type:"structure" required:"true"` + + // A friendly name or description for the metrics for this WebACL. The name + // can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't + // contain whitespace. You can't change MetricName after you create the WebACL. + // + // MetricName is a required field + MetricName *string `type:"string" required:"true"` + + // A friendly name or description of the WebACL. You can't change Name after + // you create the WebACL. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateWebACLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateWebACLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateWebACLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateWebACLInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.DefaultAction == nil { + invalidParams.Add(request.NewErrParamRequired("DefaultAction")) + } + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.DefaultAction != nil { + if err := s.DefaultAction.Validate(); err != nil { + invalidParams.AddNested("DefaultAction", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *CreateWebACLInput) SetChangeToken(v string) *CreateWebACLInput { + s.ChangeToken = &v + return s +} + +// SetDefaultAction sets the DefaultAction field's value. +func (s *CreateWebACLInput) SetDefaultAction(v *WafAction) *CreateWebACLInput { + s.DefaultAction = v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *CreateWebACLInput) SetMetricName(v string) *CreateWebACLInput { + s.MetricName = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateWebACLInput) SetName(v string) *CreateWebACLInput { + s.Name = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateWebACLResponse +type CreateWebACLOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the CreateWebACL request. You can + // also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` + + // The WebACL returned in the CreateWebACL response. + WebACL *WebACL `type:"structure"` +} + +// String returns the string representation +func (s CreateWebACLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateWebACLOutput) GoString() string { + return s.String() +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *CreateWebACLOutput) SetChangeToken(v string) *CreateWebACLOutput { + s.ChangeToken = &v + return s +} + +// SetWebACL sets the WebACL field's value. +func (s *CreateWebACLOutput) SetWebACL(v *WebACL) *CreateWebACLOutput { + s.WebACL = v + return s +} + +// A request to create an XssMatchSet. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateXssMatchSetRequest +type CreateXssMatchSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` + + // A friendly name or description for the XssMatchSet that you're creating. + // You can't change Name after you create the XssMatchSet. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateXssMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateXssMatchSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateXssMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateXssMatchSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *CreateXssMatchSetInput) SetChangeToken(v string) *CreateXssMatchSetInput { + s.ChangeToken = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateXssMatchSetInput) SetName(v string) *CreateXssMatchSetInput { + s.Name = &v + return s +} + +// The response to a CreateXssMatchSet request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateXssMatchSetResponse +type CreateXssMatchSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the CreateXssMatchSet request. You + // can also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` + + // An XssMatchSet. + XssMatchSet *XssMatchSet `type:"structure"` +} + +// String returns the string representation +func (s CreateXssMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateXssMatchSetOutput) GoString() string { + return s.String() +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *CreateXssMatchSetOutput) SetChangeToken(v string) *CreateXssMatchSetOutput { + s.ChangeToken = &v + return s +} + +// SetXssMatchSet sets the XssMatchSet field's value. +func (s *CreateXssMatchSetOutput) SetXssMatchSet(v *XssMatchSet) *CreateXssMatchSetOutput { + s.XssMatchSet = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteByteMatchSetRequest +type DeleteByteMatchSetInput struct { + _ struct{} `type:"structure"` + + // The ByteMatchSetId of the ByteMatchSet that you want to delete. ByteMatchSetId + // is returned by CreateByteMatchSet and by ListByteMatchSets. + // + // ByteMatchSetId is a required field + ByteMatchSetId *string `min:"1" type:"string" required:"true"` + + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteByteMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteByteMatchSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteByteMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteByteMatchSetInput"} + if s.ByteMatchSetId == nil { + invalidParams.Add(request.NewErrParamRequired("ByteMatchSetId")) + } + if s.ByteMatchSetId != nil && len(*s.ByteMatchSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ByteMatchSetId", 1)) + } + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetByteMatchSetId sets the ByteMatchSetId field's value. +func (s *DeleteByteMatchSetInput) SetByteMatchSetId(v string) *DeleteByteMatchSetInput { + s.ByteMatchSetId = &v + return s +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *DeleteByteMatchSetInput) SetChangeToken(v string) *DeleteByteMatchSetInput { + s.ChangeToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteByteMatchSetResponse +type DeleteByteMatchSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the DeleteByteMatchSet request. You + // can also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteByteMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteByteMatchSetOutput) GoString() string { + return s.String() +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *DeleteByteMatchSetOutput) SetChangeToken(v string) *DeleteByteMatchSetOutput { + s.ChangeToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteGeoMatchSetRequest +type DeleteGeoMatchSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` + + // The GeoMatchSetID of the GeoMatchSet that you want to delete. GeoMatchSetId + // is returned by CreateGeoMatchSet and by ListGeoMatchSets. + // + // GeoMatchSetId is a required field + GeoMatchSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteGeoMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGeoMatchSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteGeoMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteGeoMatchSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.GeoMatchSetId == nil { + invalidParams.Add(request.NewErrParamRequired("GeoMatchSetId")) + } + if s.GeoMatchSetId != nil && len(*s.GeoMatchSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GeoMatchSetId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *DeleteGeoMatchSetInput) SetChangeToken(v string) *DeleteGeoMatchSetInput { + s.ChangeToken = &v + return s +} + +// SetGeoMatchSetId sets the GeoMatchSetId field's value. +func (s *DeleteGeoMatchSetInput) SetGeoMatchSetId(v string) *DeleteGeoMatchSetInput { + s.GeoMatchSetId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteGeoMatchSetResponse +type DeleteGeoMatchSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the DeleteGeoMatchSet request. You + // can also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteGeoMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGeoMatchSetOutput) GoString() string { + return s.String() +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *DeleteGeoMatchSetOutput) SetChangeToken(v string) *DeleteGeoMatchSetOutput { + s.ChangeToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteIPSetRequest +type DeleteIPSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` + + // The IPSetId of the IPSet that you want to delete. IPSetId is returned by + // CreateIPSet and by ListIPSets. + // + // IPSetId is a required field + IPSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteIPSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIPSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteIPSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteIPSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.IPSetId == nil { + invalidParams.Add(request.NewErrParamRequired("IPSetId")) + } + if s.IPSetId != nil && len(*s.IPSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IPSetId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *DeleteIPSetInput) SetChangeToken(v string) *DeleteIPSetInput { + s.ChangeToken = &v + return s +} + +// SetIPSetId sets the IPSetId field's value. +func (s *DeleteIPSetInput) SetIPSetId(v string) *DeleteIPSetInput { + s.IPSetId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteIPSetResponse +type DeleteIPSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the DeleteIPSet request. You can + // also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteIPSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIPSetOutput) GoString() string { + return s.String() +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *DeleteIPSetOutput) SetChangeToken(v string) *DeleteIPSetOutput { + s.ChangeToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRateBasedRuleRequest +type DeleteRateBasedRuleInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` + + // The RuleId of the RateBasedRule that you want to delete. RuleId is returned + // by CreateRateBasedRule and by ListRateBasedRules. + // + // RuleId is a required field + RuleId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRateBasedRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRateBasedRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRateBasedRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRateBasedRuleInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.RuleId == nil { + invalidParams.Add(request.NewErrParamRequired("RuleId")) + } + if s.RuleId != nil && len(*s.RuleId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *DeleteRateBasedRuleInput) SetChangeToken(v string) *DeleteRateBasedRuleInput { + s.ChangeToken = &v + return s +} + +// SetRuleId sets the RuleId field's value. +func (s *DeleteRateBasedRuleInput) SetRuleId(v string) *DeleteRateBasedRuleInput { + s.RuleId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRateBasedRuleResponse +type DeleteRateBasedRuleOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the DeleteRateBasedRule request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteRateBasedRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRateBasedRuleOutput) GoString() string { + return s.String() +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *DeleteRateBasedRuleOutput) SetChangeToken(v string) *DeleteRateBasedRuleOutput { + s.ChangeToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRegexMatchSetRequest +type DeleteRegexMatchSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` + + // The RegexMatchSetId of the RegexMatchSet that you want to delete. RegexMatchSetId + // is returned by CreateRegexMatchSet and by ListRegexMatchSets. + // + // RegexMatchSetId is a required field + RegexMatchSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRegexMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRegexMatchSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRegexMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRegexMatchSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.RegexMatchSetId == nil { + invalidParams.Add(request.NewErrParamRequired("RegexMatchSetId")) + } + if s.RegexMatchSetId != nil && len(*s.RegexMatchSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RegexMatchSetId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *DeleteRegexMatchSetInput) SetChangeToken(v string) *DeleteRegexMatchSetInput { + s.ChangeToken = &v + return s +} + +// SetRegexMatchSetId sets the RegexMatchSetId field's value. +func (s *DeleteRegexMatchSetInput) SetRegexMatchSetId(v string) *DeleteRegexMatchSetInput { + s.RegexMatchSetId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRegexMatchSetResponse +type DeleteRegexMatchSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the DeleteRegexMatchSet request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteRegexMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRegexMatchSetOutput) GoString() string { + return s.String() +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *DeleteRegexMatchSetOutput) SetChangeToken(v string) *DeleteRegexMatchSetOutput { + s.ChangeToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRegexPatternSetRequest +type DeleteRegexPatternSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` + + // The RegexPatternSetId of the RegexPatternSet that you want to delete. RegexPatternSetId + // is returned by CreateRegexPatternSet and by ListRegexPatternSets. + // + // RegexPatternSetId is a required field + RegexPatternSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRegexPatternSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRegexPatternSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRegexPatternSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRegexPatternSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.RegexPatternSetId == nil { + invalidParams.Add(request.NewErrParamRequired("RegexPatternSetId")) + } + if s.RegexPatternSetId != nil && len(*s.RegexPatternSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RegexPatternSetId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *DeleteRegexPatternSetInput) SetChangeToken(v string) *DeleteRegexPatternSetInput { + s.ChangeToken = &v + return s +} + +// SetRegexPatternSetId sets the RegexPatternSetId field's value. +func (s *DeleteRegexPatternSetInput) SetRegexPatternSetId(v string) *DeleteRegexPatternSetInput { + s.RegexPatternSetId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRegexPatternSetResponse +type DeleteRegexPatternSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the DeleteRegexPatternSet request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteRegexPatternSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRegexPatternSetOutput) GoString() string { + return s.String() +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *DeleteRegexPatternSetOutput) SetChangeToken(v string) *DeleteRegexPatternSetOutput { + s.ChangeToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRuleRequest +type DeleteRuleInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` + + // The RuleId of the Rule that you want to delete. RuleId is returned by CreateRule + // and by ListRules. + // + // RuleId is a required field + RuleId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRuleInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.RuleId == nil { + invalidParams.Add(request.NewErrParamRequired("RuleId")) + } + if s.RuleId != nil && len(*s.RuleId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *DeleteRuleInput) SetChangeToken(v string) *DeleteRuleInput { + s.ChangeToken = &v + return s +} + +// SetRuleId sets the RuleId field's value. +func (s *DeleteRuleInput) SetRuleId(v string) *DeleteRuleInput { + s.RuleId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRuleResponse +type DeleteRuleOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the DeleteRule request. You can also + // use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRuleOutput) GoString() string { + return s.String() +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *DeleteRuleOutput) SetChangeToken(v string) *DeleteRuleOutput { + s.ChangeToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteSizeConstraintSetRequest +type DeleteSizeConstraintSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` + + // The SizeConstraintSetId of the SizeConstraintSet that you want to delete. + // SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets. + // + // SizeConstraintSetId is a required field + SizeConstraintSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSizeConstraintSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSizeConstraintSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSizeConstraintSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSizeConstraintSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.SizeConstraintSetId == nil { + invalidParams.Add(request.NewErrParamRequired("SizeConstraintSetId")) + } + if s.SizeConstraintSetId != nil && len(*s.SizeConstraintSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SizeConstraintSetId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *DeleteSizeConstraintSetInput) SetChangeToken(v string) *DeleteSizeConstraintSetInput { + s.ChangeToken = &v + return s +} + +// SetSizeConstraintSetId sets the SizeConstraintSetId field's value. +func (s *DeleteSizeConstraintSetInput) SetSizeConstraintSetId(v string) *DeleteSizeConstraintSetInput { + s.SizeConstraintSetId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteSizeConstraintSetResponse +type DeleteSizeConstraintSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the DeleteSizeConstraintSet request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteSizeConstraintSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSizeConstraintSetOutput) GoString() string { + return s.String() +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *DeleteSizeConstraintSetOutput) SetChangeToken(v string) *DeleteSizeConstraintSetOutput { + s.ChangeToken = &v + return s +} + +// A request to delete a SqlInjectionMatchSet from AWS WAF. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteSqlInjectionMatchSetRequest +type DeleteSqlInjectionMatchSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` + + // The SqlInjectionMatchSetId of the SqlInjectionMatchSet that you want to delete. + // SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets. + // + // SqlInjectionMatchSetId is a required field + SqlInjectionMatchSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSqlInjectionMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSqlInjectionMatchSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSqlInjectionMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSqlInjectionMatchSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.SqlInjectionMatchSetId == nil { + invalidParams.Add(request.NewErrParamRequired("SqlInjectionMatchSetId")) + } + if s.SqlInjectionMatchSetId != nil && len(*s.SqlInjectionMatchSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SqlInjectionMatchSetId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *DeleteSqlInjectionMatchSetInput) SetChangeToken(v string) *DeleteSqlInjectionMatchSetInput { + s.ChangeToken = &v + return s +} + +// SetSqlInjectionMatchSetId sets the SqlInjectionMatchSetId field's value. +func (s *DeleteSqlInjectionMatchSetInput) SetSqlInjectionMatchSetId(v string) *DeleteSqlInjectionMatchSetInput { + s.SqlInjectionMatchSetId = &v + return s +} + +// The response to a request to delete a SqlInjectionMatchSet from AWS WAF. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteSqlInjectionMatchSetResponse +type DeleteSqlInjectionMatchSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the DeleteSqlInjectionMatchSet request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteSqlInjectionMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSqlInjectionMatchSetOutput) GoString() string { + return s.String() +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *DeleteSqlInjectionMatchSetOutput) SetChangeToken(v string) *DeleteSqlInjectionMatchSetOutput { + s.ChangeToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteWebACLRequest +type DeleteWebACLInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` + + // The WebACLId of the WebACL that you want to delete. WebACLId is returned + // by CreateWebACL and by ListWebACLs. + // + // WebACLId is a required field + WebACLId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteWebACLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteWebACLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteWebACLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteWebACLInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.WebACLId == nil { + invalidParams.Add(request.NewErrParamRequired("WebACLId")) + } + if s.WebACLId != nil && len(*s.WebACLId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WebACLId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *DeleteWebACLInput) SetChangeToken(v string) *DeleteWebACLInput { + s.ChangeToken = &v + return s +} + +// SetWebACLId sets the WebACLId field's value. +func (s *DeleteWebACLInput) SetWebACLId(v string) *DeleteWebACLInput { + s.WebACLId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteWebACLResponse +type DeleteWebACLOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the DeleteWebACL request. You can + // also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteWebACLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteWebACLOutput) GoString() string { + return s.String() +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *DeleteWebACLOutput) SetChangeToken(v string) *DeleteWebACLOutput { + s.ChangeToken = &v + return s +} + +// A request to delete an XssMatchSet from AWS WAF. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteXssMatchSetRequest +type DeleteXssMatchSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` + + // The XssMatchSetId of the XssMatchSet that you want to delete. XssMatchSetId + // is returned by CreateXssMatchSet and by ListXssMatchSets. + // + // XssMatchSetId is a required field + XssMatchSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteXssMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteXssMatchSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteXssMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteXssMatchSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.XssMatchSetId == nil { + invalidParams.Add(request.NewErrParamRequired("XssMatchSetId")) + } + if s.XssMatchSetId != nil && len(*s.XssMatchSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("XssMatchSetId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *DeleteXssMatchSetInput) SetChangeToken(v string) *DeleteXssMatchSetInput { + s.ChangeToken = &v + return s +} + +// SetXssMatchSetId sets the XssMatchSetId field's value. +func (s *DeleteXssMatchSetInput) SetXssMatchSetId(v string) *DeleteXssMatchSetInput { + s.XssMatchSetId = &v + return s +} + +// The response to a request to delete an XssMatchSet from AWS WAF. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteXssMatchSetResponse +type DeleteXssMatchSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the DeleteXssMatchSet request. You + // can also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteXssMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteXssMatchSetOutput) GoString() string { + return s.String() +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *DeleteXssMatchSetOutput) SetChangeToken(v string) *DeleteXssMatchSetOutput { + s.ChangeToken = &v + return s +} + +// Specifies where in a web request to look for TargetString. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/FieldToMatch +type FieldToMatch struct { + _ struct{} `type:"structure"` + + // When the value of Type is HEADER, enter the name of the header that you want + // AWS WAF to search, for example, User-Agent or Referer. If the value of Type + // is any other value, omit Data. + // + // The name of the header is not case sensitive. + Data *string `type:"string"` + + // The part of the web request that you want AWS WAF to search for a specified + // string. Parts of a request that you can search include the following: + // + // * HEADER: A specified request header, for example, the value of the User-Agent + // or Referer header. If you choose HEADER for the type, specify the name + // of the header in Data. + // + // * METHOD: The HTTP method, which indicated the type of operation that + // the request is asking the origin to perform. Amazon CloudFront supports + // the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and PUT. + // + // * QUERY_STRING: A query string, which is the part of a URL that appears + // after a ? character, if any. + // + // * URI: The part of a web request that identifies a resource, for example, + // /images/daily-ad.jpg. + // + // * BODY: The part of a request that contains any additional data that you + // want to send to your web server as the HTTP request body, such as data + // from a form. The request body immediately follows the request headers. + // Note that only the first 8192 bytes of the request body are forwarded + // to AWS WAF for inspection. To allow or block requests based on the length + // of the body, you can create a size constraint set. For more information, + // see CreateSizeConstraintSet. + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"MatchFieldType"` +} + +// String returns the string representation +func (s FieldToMatch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FieldToMatch) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FieldToMatch) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FieldToMatch"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetData sets the Data field's value. +func (s *FieldToMatch) SetData(v string) *FieldToMatch { + s.Data = &v + return s +} + +// SetType sets the Type field's value. +func (s *FieldToMatch) SetType(v string) *FieldToMatch { + s.Type = &v + return s +} + +// The country from which web requests originate that you want AWS WAF to search +// for. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GeoMatchConstraint +type GeoMatchConstraint struct { + _ struct{} `type:"structure"` + + // The type of geographical area you want AWS WAF to search for. Currently Country + // is the only valid value. + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"GeoMatchConstraintType"` + + // The country that you want AWS WAF to search for. + // + // Value is a required field + Value *string `type:"string" required:"true" enum:"GeoMatchConstraintValue"` +} + +// String returns the string representation +func (s GeoMatchConstraint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GeoMatchConstraint) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GeoMatchConstraint) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GeoMatchConstraint"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetType sets the Type field's value. +func (s *GeoMatchConstraint) SetType(v string) *GeoMatchConstraint { + s.Type = &v + return s +} + +// SetValue sets the Value field's value. +func (s *GeoMatchConstraint) SetValue(v string) *GeoMatchConstraint { + s.Value = &v + return s +} + +// Contains one or more countries that AWS WAF will search for. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GeoMatchSet +type GeoMatchSet struct { + _ struct{} `type:"structure"` + + // An array of GeoMatchConstraint objects, which contain the country that you + // want AWS WAF to search for. + // + // GeoMatchConstraints is a required field + GeoMatchConstraints []*GeoMatchConstraint `type:"list" required:"true"` + + // The GeoMatchSetId for an GeoMatchSet. You use GeoMatchSetId to get information + // about a GeoMatchSet (see GeoMatchSet), update a GeoMatchSet (see UpdateGeoMatchSet), + // insert a GeoMatchSet into a Rule or delete one from a Rule (see UpdateRule), + // and delete a GeoMatchSet from AWS WAF (see DeleteGeoMatchSet). + // + // GeoMatchSetId is returned by CreateGeoMatchSet and by ListGeoMatchSets. + // + // GeoMatchSetId is a required field + GeoMatchSetId *string `min:"1" type:"string" required:"true"` + + // A friendly name or description of the GeoMatchSet. You can't change the name + // of an GeoMatchSet after you create it. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GeoMatchSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GeoMatchSet) GoString() string { + return s.String() +} + +// SetGeoMatchConstraints sets the GeoMatchConstraints field's value. +func (s *GeoMatchSet) SetGeoMatchConstraints(v []*GeoMatchConstraint) *GeoMatchSet { + s.GeoMatchConstraints = v + return s +} + +// SetGeoMatchSetId sets the GeoMatchSetId field's value. +func (s *GeoMatchSet) SetGeoMatchSetId(v string) *GeoMatchSet { + s.GeoMatchSetId = &v + return s +} + +// SetName sets the Name field's value. +func (s *GeoMatchSet) SetName(v string) *GeoMatchSet { + s.Name = &v + return s +} + +// Contains the identifier and the name of the GeoMatchSet. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GeoMatchSetSummary +type GeoMatchSetSummary struct { + _ struct{} `type:"structure"` + + // The GeoMatchSetId for an GeoMatchSet. You can use GeoMatchSetId in a GetGeoMatchSet + // request to get detailed information about an GeoMatchSet. + // + // GeoMatchSetId is a required field + GeoMatchSetId *string `min:"1" type:"string" required:"true"` + + // A friendly name or description of the GeoMatchSet. You can't change the name + // of an GeoMatchSet after you create it. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GeoMatchSetSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GeoMatchSetSummary) GoString() string { + return s.String() +} + +// SetGeoMatchSetId sets the GeoMatchSetId field's value. +func (s *GeoMatchSetSummary) SetGeoMatchSetId(v string) *GeoMatchSetSummary { + s.GeoMatchSetId = &v + return s +} + +// SetName sets the Name field's value. +func (s *GeoMatchSetSummary) SetName(v string) *GeoMatchSetSummary { + s.Name = &v + return s +} + +// Specifies the type of update to perform to an GeoMatchSet with UpdateGeoMatchSet. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GeoMatchSetUpdate +type GeoMatchSetUpdate struct { + _ struct{} `type:"structure"` + + // Specifies whether to insert or delete a country with UpdateGeoMatchSet. + // + // Action is a required field + Action *string `type:"string" required:"true" enum:"ChangeAction"` - // Specifies the order in which the Rules in a WebACL are evaluated. Rules with - // a lower value for Priority are evaluated before Rules with a higher value. - // The value must be a unique integer. If you add multiple Rules to a WebACL, - // the values don't need to be consecutive. + // The country from which web requests originate that you want AWS WAF to search + // for. // - // Priority is a required field - Priority *int64 `type:"integer" required:"true"` + // GeoMatchConstraint is a required field + GeoMatchConstraint *GeoMatchConstraint `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GeoMatchSetUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GeoMatchSetUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GeoMatchSetUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GeoMatchSetUpdate"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.GeoMatchConstraint == nil { + invalidParams.Add(request.NewErrParamRequired("GeoMatchConstraint")) + } + if s.GeoMatchConstraint != nil { + if err := s.GeoMatchConstraint.Validate(); err != nil { + invalidParams.AddNested("GeoMatchConstraint", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAction sets the Action field's value. +func (s *GeoMatchSetUpdate) SetAction(v string) *GeoMatchSetUpdate { + s.Action = &v + return s +} + +// SetGeoMatchConstraint sets the GeoMatchConstraint field's value. +func (s *GeoMatchSetUpdate) SetGeoMatchConstraint(v *GeoMatchConstraint) *GeoMatchSetUpdate { + s.GeoMatchConstraint = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetByteMatchSetRequest +type GetByteMatchSetInput struct { + _ struct{} `type:"structure"` + + // The ByteMatchSetId of the ByteMatchSet that you want to get. ByteMatchSetId + // is returned by CreateByteMatchSet and by ListByteMatchSets. + // + // ByteMatchSetId is a required field + ByteMatchSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetByteMatchSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetByteMatchSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetByteMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetByteMatchSetInput"} + if s.ByteMatchSetId == nil { + invalidParams.Add(request.NewErrParamRequired("ByteMatchSetId")) + } + if s.ByteMatchSetId != nil && len(*s.ByteMatchSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ByteMatchSetId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetByteMatchSetId sets the ByteMatchSetId field's value. +func (s *GetByteMatchSetInput) SetByteMatchSetId(v string) *GetByteMatchSetInput { + s.ByteMatchSetId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetByteMatchSetResponse +type GetByteMatchSetOutput struct { + _ struct{} `type:"structure"` + + // Information about the ByteMatchSet that you specified in the GetByteMatchSet + // request. For more information, see the following topics: + // + // * ByteMatchSet: Contains ByteMatchSetId, ByteMatchTuples, and Name + // + // * ByteMatchTuples: Contains an array of ByteMatchTuple objects. Each ByteMatchTuple + // object contains FieldToMatch, PositionalConstraint, TargetString, and + // TextTransformation + // + // * FieldToMatch: Contains Data and Type + ByteMatchSet *ByteMatchSet `type:"structure"` +} + +// String returns the string representation +func (s GetByteMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetByteMatchSetOutput) GoString() string { + return s.String() +} + +// SetByteMatchSet sets the ByteMatchSet field's value. +func (s *GetByteMatchSetOutput) SetByteMatchSet(v *ByteMatchSet) *GetByteMatchSetOutput { + s.ByteMatchSet = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetChangeTokenRequest +type GetChangeTokenInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetChangeTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeTokenInput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetChangeTokenResponse +type GetChangeTokenOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used in the request. Use this value in a GetChangeTokenStatus + // request to get the current status of the request. + ChangeToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetChangeTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetChangeTokenOutput) GoString() string { + return s.String() +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *GetChangeTokenOutput) SetChangeToken(v string) *GetChangeTokenOutput { + s.ChangeToken = &v + return s +} - // The RuleId for a Rule. You use RuleId to get more information about a Rule - // (see GetRule), update a Rule (see UpdateRule), insert a Rule into a WebACL - // or delete a one from a WebACL (see UpdateWebACL), or delete a Rule from AWS - // WAF (see DeleteRule). - // - // RuleId is returned by CreateRule and by ListRules. - // - // RuleId is a required field - RuleId *string `min:"1" type:"string" required:"true"` +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetChangeTokenStatusRequest +type GetChangeTokenStatusInput struct { + _ struct{} `type:"structure"` - // The rule type, either REGULAR, as defined by Rule, or RATE_BASED, as defined - // by RateBasedRule. The default is REGULAR. Although this field is optional, - // be aware that if you try to add a RATE_BASED rule to a web ACL without setting - // the type, the UpdateWebACL request will fail because the request tries to - // add a REGULAR rule with the specified ID, which does not exist. - Type *string `type:"string" enum:"WafRuleType"` + // The change token for which you want to get the status. This change token + // was previously returned in the GetChangeToken response. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s ActivatedRule) String() string { +func (s GetChangeTokenStatusInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ActivatedRule) GoString() string { +func (s GetChangeTokenStatusInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ActivatedRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ActivatedRule"} - if s.Action == nil { - invalidParams.Add(request.NewErrParamRequired("Action")) - } - if s.Priority == nil { - invalidParams.Add(request.NewErrParamRequired("Priority")) - } - if s.RuleId == nil { - invalidParams.Add(request.NewErrParamRequired("RuleId")) - } - if s.RuleId != nil && len(*s.RuleId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RuleId", 1)) +func (s *GetChangeTokenStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetChangeTokenStatusInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) } - if s.Action != nil { - if err := s.Action.Validate(); err != nil { - invalidParams.AddNested("Action", err.(request.ErrInvalidParams)) - } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) } if invalidParams.Len() > 0 { @@ -5750,180 +10530,256 @@ func (s *ActivatedRule) Validate() error { return nil } -// SetAction sets the Action field's value. -func (s *ActivatedRule) SetAction(v *WafAction) *ActivatedRule { - s.Action = v +// SetChangeToken sets the ChangeToken field's value. +func (s *GetChangeTokenStatusInput) SetChangeToken(v string) *GetChangeTokenStatusInput { + s.ChangeToken = &v return s } -// SetPriority sets the Priority field's value. -func (s *ActivatedRule) SetPriority(v int64) *ActivatedRule { - s.Priority = &v - return s +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetChangeTokenStatusResponse +type GetChangeTokenStatusOutput struct { + _ struct{} `type:"structure"` + + // The status of the change token. + ChangeTokenStatus *string `type:"string" enum:"ChangeTokenStatus"` } -// SetRuleId sets the RuleId field's value. -func (s *ActivatedRule) SetRuleId(v string) *ActivatedRule { - s.RuleId = &v - return s +// String returns the string representation +func (s GetChangeTokenStatusOutput) String() string { + return awsutil.Prettify(s) } -// SetType sets the Type field's value. -func (s *ActivatedRule) SetType(v string) *ActivatedRule { - s.Type = &v +// GoString returns the string representation +func (s GetChangeTokenStatusOutput) GoString() string { + return s.String() +} + +// SetChangeTokenStatus sets the ChangeTokenStatus field's value. +func (s *GetChangeTokenStatusOutput) SetChangeTokenStatus(v string) *GetChangeTokenStatusOutput { + s.ChangeTokenStatus = &v return s } -// In a GetByteMatchSet request, ByteMatchSet is a complex type that contains -// the ByteMatchSetId and Name of a ByteMatchSet, and the values that you specified -// when you updated the ByteMatchSet. -// -// A complex type that contains ByteMatchTuple objects, which specify the parts -// of web requests that you want AWS WAF to inspect and the values that you -// want AWS WAF to search for. If a ByteMatchSet contains more than one ByteMatchTuple -// object, a request needs to match the settings in only one ByteMatchTuple -// to be considered a match. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ByteMatchSet -type ByteMatchSet struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetGeoMatchSetRequest +type GetGeoMatchSetInput struct { _ struct{} `type:"structure"` - // The ByteMatchSetId for a ByteMatchSet. You use ByteMatchSetId to get information - // about a ByteMatchSet (see GetByteMatchSet), update a ByteMatchSet (see UpdateByteMatchSet), - // insert a ByteMatchSet into a Rule or delete one from a Rule (see UpdateRule), - // and delete a ByteMatchSet from AWS WAF (see DeleteByteMatchSet). - // - // ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets. + // The GeoMatchSetId of the GeoMatchSet that you want to get. GeoMatchSetId + // is returned by CreateGeoMatchSet and by ListGeoMatchSets. // - // ByteMatchSetId is a required field - ByteMatchSetId *string `min:"1" type:"string" required:"true"` + // GeoMatchSetId is a required field + GeoMatchSetId *string `min:"1" type:"string" required:"true"` +} - // Specifies the bytes (typically a string that corresponds with ASCII characters) - // that you want AWS WAF to search for in web requests, the location in requests - // that you want AWS WAF to search, and other settings. - // - // ByteMatchTuples is a required field - ByteMatchTuples []*ByteMatchTuple `type:"list" required:"true"` +// String returns the string representation +func (s GetGeoMatchSetInput) String() string { + return awsutil.Prettify(s) +} - // A friendly name or description of the ByteMatchSet. You can't change Name - // after you create a ByteMatchSet. - Name *string `min:"1" type:"string"` +// GoString returns the string representation +func (s GetGeoMatchSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetGeoMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetGeoMatchSetInput"} + if s.GeoMatchSetId == nil { + invalidParams.Add(request.NewErrParamRequired("GeoMatchSetId")) + } + if s.GeoMatchSetId != nil && len(*s.GeoMatchSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GeoMatchSetId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGeoMatchSetId sets the GeoMatchSetId field's value. +func (s *GetGeoMatchSetInput) SetGeoMatchSetId(v string) *GetGeoMatchSetInput { + s.GeoMatchSetId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetGeoMatchSetResponse +type GetGeoMatchSetOutput struct { + _ struct{} `type:"structure"` + + // Information about the GeoMatchSet that you specified in the GetGeoMatchSet + // request. This includes the Type, which for a GeoMatchContraint is always + // Country, as well as the Value, which is the identifier for a specific country. + GeoMatchSet *GeoMatchSet `type:"structure"` } // String returns the string representation -func (s ByteMatchSet) String() string { +func (s GetGeoMatchSetOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ByteMatchSet) GoString() string { +func (s GetGeoMatchSetOutput) GoString() string { return s.String() } -// SetByteMatchSetId sets the ByteMatchSetId field's value. -func (s *ByteMatchSet) SetByteMatchSetId(v string) *ByteMatchSet { - s.ByteMatchSetId = &v +// SetGeoMatchSet sets the GeoMatchSet field's value. +func (s *GetGeoMatchSetOutput) SetGeoMatchSet(v *GeoMatchSet) *GetGeoMatchSetOutput { + s.GeoMatchSet = v return s } -// SetByteMatchTuples sets the ByteMatchTuples field's value. -func (s *ByteMatchSet) SetByteMatchTuples(v []*ByteMatchTuple) *ByteMatchSet { - s.ByteMatchTuples = v - return s +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetIPSetRequest +type GetIPSetInput struct { + _ struct{} `type:"structure"` + + // The IPSetId of the IPSet that you want to get. IPSetId is returned by CreateIPSet + // and by ListIPSets. + // + // IPSetId is a required field + IPSetId *string `min:"1" type:"string" required:"true"` } -// SetName sets the Name field's value. -func (s *ByteMatchSet) SetName(v string) *ByteMatchSet { - s.Name = &v +// String returns the string representation +func (s GetIPSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIPSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetIPSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetIPSetInput"} + if s.IPSetId == nil { + invalidParams.Add(request.NewErrParamRequired("IPSetId")) + } + if s.IPSetId != nil && len(*s.IPSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IPSetId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIPSetId sets the IPSetId field's value. +func (s *GetIPSetInput) SetIPSetId(v string) *GetIPSetInput { + s.IPSetId = &v return s } -// Returned by ListByteMatchSets. Each ByteMatchSetSummary object includes the -// Name and ByteMatchSetId for one ByteMatchSet. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ByteMatchSetSummary -type ByteMatchSetSummary struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetIPSetResponse +type GetIPSetOutput struct { _ struct{} `type:"structure"` - // The ByteMatchSetId for a ByteMatchSet. You use ByteMatchSetId to get information - // about a ByteMatchSet, update a ByteMatchSet, remove a ByteMatchSet from a - // Rule, and delete a ByteMatchSet from AWS WAF. + // Information about the IPSet that you specified in the GetIPSet request. For + // more information, see the following topics: // - // ByteMatchSetId is returned by CreateByteMatchSet and by ListByteMatchSets. + // * IPSet: Contains IPSetDescriptors, IPSetId, and Name // - // ByteMatchSetId is a required field - ByteMatchSetId *string `min:"1" type:"string" required:"true"` + // * IPSetDescriptors: Contains an array of IPSetDescriptor objects. Each + // IPSetDescriptor object contains Type and Value + IPSet *IPSet `type:"structure"` +} - // A friendly name or description of the ByteMatchSet. You can't change Name - // after you create a ByteMatchSet. +// String returns the string representation +func (s GetIPSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIPSetOutput) GoString() string { + return s.String() +} + +// SetIPSet sets the IPSet field's value. +func (s *GetIPSetOutput) SetIPSet(v *IPSet) *GetIPSetOutput { + s.IPSet = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRateBasedRuleRequest +type GetRateBasedRuleInput struct { + _ struct{} `type:"structure"` + + // The RuleId of the RateBasedRule that you want to get. RuleId is returned + // by CreateRateBasedRule and by ListRateBasedRules. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // RuleId is a required field + RuleId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s ByteMatchSetSummary) String() string { +func (s GetRateBasedRuleInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ByteMatchSetSummary) GoString() string { +func (s GetRateBasedRuleInput) GoString() string { return s.String() } -// SetByteMatchSetId sets the ByteMatchSetId field's value. -func (s *ByteMatchSetSummary) SetByteMatchSetId(v string) *ByteMatchSetSummary { - s.ByteMatchSetId = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRateBasedRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRateBasedRuleInput"} + if s.RuleId == nil { + invalidParams.Add(request.NewErrParamRequired("RuleId")) + } + if s.RuleId != nil && len(*s.RuleId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetName sets the Name field's value. -func (s *ByteMatchSetSummary) SetName(v string) *ByteMatchSetSummary { - s.Name = &v +// SetRuleId sets the RuleId field's value. +func (s *GetRateBasedRuleInput) SetRuleId(v string) *GetRateBasedRuleInput { + s.RuleId = &v return s } -// In an UpdateByteMatchSet request, ByteMatchSetUpdate specifies whether to -// insert or delete a ByteMatchTuple and includes the settings for the ByteMatchTuple. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ByteMatchSetUpdate -type ByteMatchSetUpdate struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRateBasedRuleManagedKeysRequest +type GetRateBasedRuleManagedKeysInput struct { _ struct{} `type:"structure"` - // Specifies whether to insert or delete a ByteMatchTuple. - // - // Action is a required field - Action *string `type:"string" required:"true" enum:"ChangeAction"` + // A null value and not currently used. Do not include this in your request. + NextMarker *string `min:"1" type:"string"` - // Information about the part of a web request that you want AWS WAF to inspect - // and the value that you want AWS WAF to search for. If you specify DELETE - // for the value of Action, the ByteMatchTuple values must exactly match the - // values in the ByteMatchTuple that you want to delete from the ByteMatchSet. + // The RuleId of the RateBasedRule for which you want to get a list of ManagedKeys. + // RuleId is returned by CreateRateBasedRule and by ListRateBasedRules. // - // ByteMatchTuple is a required field - ByteMatchTuple *ByteMatchTuple `type:"structure" required:"true"` + // RuleId is a required field + RuleId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s ByteMatchSetUpdate) String() string { +func (s GetRateBasedRuleManagedKeysInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ByteMatchSetUpdate) GoString() string { +func (s GetRateBasedRuleManagedKeysInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ByteMatchSetUpdate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ByteMatchSetUpdate"} - if s.Action == nil { - invalidParams.Add(request.NewErrParamRequired("Action")) +func (s *GetRateBasedRuleManagedKeysInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRateBasedRuleManagedKeysInput"} + if s.NextMarker != nil && len(*s.NextMarker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) } - if s.ByteMatchTuple == nil { - invalidParams.Add(request.NewErrParamRequired("ByteMatchTuple")) + if s.RuleId == nil { + invalidParams.Add(request.NewErrParamRequired("RuleId")) } - if s.ByteMatchTuple != nil { - if err := s.ByteMatchTuple.Validate(); err != nil { - invalidParams.AddNested("ByteMatchTuple", err.(request.ErrInvalidParams)) - } + if s.RuleId != nil && len(*s.RuleId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleId", 1)) } if invalidParams.Len() > 0 { @@ -5932,235 +10788,173 @@ func (s *ByteMatchSetUpdate) Validate() error { return nil } -// SetAction sets the Action field's value. -func (s *ByteMatchSetUpdate) SetAction(v string) *ByteMatchSetUpdate { - s.Action = &v +// SetNextMarker sets the NextMarker field's value. +func (s *GetRateBasedRuleManagedKeysInput) SetNextMarker(v string) *GetRateBasedRuleManagedKeysInput { + s.NextMarker = &v return s } -// SetByteMatchTuple sets the ByteMatchTuple field's value. -func (s *ByteMatchSetUpdate) SetByteMatchTuple(v *ByteMatchTuple) *ByteMatchSetUpdate { - s.ByteMatchTuple = v +// SetRuleId sets the RuleId field's value. +func (s *GetRateBasedRuleManagedKeysInput) SetRuleId(v string) *GetRateBasedRuleManagedKeysInput { + s.RuleId = &v return s } -// The bytes (typically a string that corresponds with ASCII characters) that -// you want AWS WAF to search for in web requests, the location in requests -// that you want AWS WAF to search, and other settings. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ByteMatchTuple -type ByteMatchTuple struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRateBasedRuleManagedKeysResponse +type GetRateBasedRuleManagedKeysOutput struct { _ struct{} `type:"structure"` - // The part of a web request that you want AWS WAF to search, such as a specified - // header or a query string. For more information, see FieldToMatch. - // - // FieldToMatch is a required field - FieldToMatch *FieldToMatch `type:"structure" required:"true"` + // An array of IP addresses that currently are blocked by the specified RateBasedRule. + ManagedKeys []*string `type:"list"` - // Within the portion of a web request that you want to search (for example, - // in the query string, if any), specify where you want AWS WAF to search. Valid - // values include the following: - // - // CONTAINS - // - // The specified part of the web request must include the value of TargetString, - // but the location doesn't matter. - // - // CONTAINS_WORD - // - // The specified part of the web request must include the value of TargetString, - // and TargetString must contain only alphanumeric characters or underscore - // (A-Z, a-z, 0-9, or _). In addition, TargetString must be a word, which means - // one of the following: - // - // * TargetString exactly matches the value of the specified part of the - // web request, such as the value of a header. - // - // * TargetString is at the beginning of the specified part of the web request - // and is followed by a character other than an alphanumeric character or - // underscore (_), for example, BadBot;. - // - // * TargetString is at the end of the specified part of the web request - // and is preceded by a character other than an alphanumeric character or - // underscore (_), for example, ;BadBot. - // - // * TargetString is in the middle of the specified part of the web request - // and is preceded and followed by characters other than alphanumeric characters - // or underscore (_), for example, -BadBot;. - // - // EXACTLY - // - // The value of the specified part of the web request must exactly match the - // value of TargetString. - // - // STARTS_WITH - // - // The value of TargetString must appear at the beginning of the specified part - // of the web request. - // - // ENDS_WITH - // - // The value of TargetString must appear at the end of the specified part of - // the web request. - // - // PositionalConstraint is a required field - PositionalConstraint *string `type:"string" required:"true" enum:"PositionalConstraint"` + // A null value and not currently used. + NextMarker *string `min:"1" type:"string"` +} - // The value that you want AWS WAF to search for. AWS WAF searches for the specified - // string in the part of web requests that you specified in FieldToMatch. The - // maximum length of the value is 50 bytes. - // - // Valid values depend on the values that you specified for FieldToMatch: - // - // * HEADER: The value that you want AWS WAF to search for in the request - // header that you specified in FieldToMatch, for example, the value of the - // User-Agent or Referer header. - // - // * METHOD: The HTTP method, which indicates the type of operation specified - // in the request. CloudFront supports the following methods: DELETE, GET, - // HEAD, OPTIONS, PATCH, POST, and PUT. - // - // * QUERY_STRING: The value that you want AWS WAF to search for in the query - // string, which is the part of a URL that appears after a ? character. - // - // * URI: The value that you want AWS WAF to search for in the part of a - // URL that identifies a resource, for example, /images/daily-ad.jpg. - // - // * BODY: The part of a request that contains any additional data that you - // want to send to your web server as the HTTP request body, such as data - // from a form. The request body immediately follows the request headers. - // Note that only the first 8192 bytes of the request body are forwarded - // to AWS WAF for inspection. To allow or block requests based on the length - // of the body, you can create a size constraint set. For more information, - // see CreateSizeConstraintSet. - // - // If TargetString includes alphabetic characters A-Z and a-z, note that the - // value is case sensitive. - // - // If you're using the AWS WAF API - // - // Specify a base64-encoded version of the value. The maximum length of the - // value before you base64-encode it is 50 bytes. - // - // For example, suppose the value of Type is HEADER and the value of Data is - // User-Agent. If you want to search the User-Agent header for the value BadBot, - // you base64-encode BadBot using MIME base64 encoding and include the resulting - // value, QmFkQm90, in the value of TargetString. - // - // If you're using the AWS CLI or one of the AWS SDKs - // - // The value that you want AWS WAF to search for. The SDK automatically base64 - // encodes the value. - // - // TargetString is automatically base64 encoded/decoded by the SDK. - // - // TargetString is a required field - TargetString []byte `type:"blob" required:"true"` +// String returns the string representation +func (s GetRateBasedRuleManagedKeysOutput) String() string { + return awsutil.Prettify(s) +} - // Text transformations eliminate some of the unusual formatting that attackers - // use in web requests in an effort to bypass AWS WAF. If you specify a transformation, - // AWS WAF performs the transformation on TargetString before inspecting a request - // for a match. - // - // CMD_LINE - // - // When you're concerned that attackers are injecting an operating system commandline - // command and using unusual formatting to disguise some or all of the command, - // use this option to perform the following transformations: - // - // * Delete the following characters: \ " ' ^ - // - // * Delete spaces before the following characters: / ( - // - // * Replace the following characters with a space: , ; - // - // * Replace multiple spaces with one space - // - // * Convert uppercase letters (A-Z) to lowercase (a-z) - // - // COMPRESS_WHITE_SPACE - // - // Use this option to replace the following characters with a space character - // (decimal 32): - // - // * \f, formfeed, decimal 12 - // - // * \t, tab, decimal 9 - // - // * \n, newline, decimal 10 - // - // * \r, carriage return, decimal 13 - // - // * \v, vertical tab, decimal 11 - // - // * non-breaking space, decimal 160 - // - // COMPRESS_WHITE_SPACE also replaces multiple spaces with one space. - // - // HTML_ENTITY_DECODE - // - // Use this option to replace HTML-encoded characters with unencoded characters. - // HTML_ENTITY_DECODE performs the following operations: - // - // * Replaces (ampersand)quot; with " - // - // * Replaces (ampersand)nbsp; with a non-breaking space, decimal 160 - // - // * Replaces (ampersand)lt; with a "less than" symbol - // - // * Replaces (ampersand)gt; with > - // - // * Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, - // with the corresponding characters - // - // * Replaces characters that are represented in decimal format, (ampersand)#nnnn;, - // with the corresponding characters - // - // LOWERCASE - // - // Use this option to convert uppercase letters (A-Z) to lowercase (a-z). - // - // URL_DECODE - // - // Use this option to decode a URL-encoded value. - // - // NONE - // - // Specify NONE if you don't want to perform any text transformations. +// GoString returns the string representation +func (s GetRateBasedRuleManagedKeysOutput) GoString() string { + return s.String() +} + +// SetManagedKeys sets the ManagedKeys field's value. +func (s *GetRateBasedRuleManagedKeysOutput) SetManagedKeys(v []*string) *GetRateBasedRuleManagedKeysOutput { + s.ManagedKeys = v + return s +} + +// SetNextMarker sets the NextMarker field's value. +func (s *GetRateBasedRuleManagedKeysOutput) SetNextMarker(v string) *GetRateBasedRuleManagedKeysOutput { + s.NextMarker = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRateBasedRuleResponse +type GetRateBasedRuleOutput struct { + _ struct{} `type:"structure"` + + // Information about the RateBasedRule that you specified in the GetRateBasedRule + // request. + Rule *RateBasedRule `type:"structure"` +} + +// String returns the string representation +func (s GetRateBasedRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRateBasedRuleOutput) GoString() string { + return s.String() +} + +// SetRule sets the Rule field's value. +func (s *GetRateBasedRuleOutput) SetRule(v *RateBasedRule) *GetRateBasedRuleOutput { + s.Rule = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRegexMatchSetRequest +type GetRegexMatchSetInput struct { + _ struct{} `type:"structure"` + + // The RegexMatchSetId of the RegexMatchSet that you want to get. RegexMatchSetId + // is returned by CreateRegexMatchSet and by ListRegexMatchSets. // - // TextTransformation is a required field - TextTransformation *string `type:"string" required:"true" enum:"TextTransformation"` + // RegexMatchSetId is a required field + RegexMatchSetId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s ByteMatchTuple) String() string { +func (s GetRegexMatchSetInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ByteMatchTuple) GoString() string { +func (s GetRegexMatchSetInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ByteMatchTuple) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ByteMatchTuple"} - if s.FieldToMatch == nil { - invalidParams.Add(request.NewErrParamRequired("FieldToMatch")) +func (s *GetRegexMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRegexMatchSetInput"} + if s.RegexMatchSetId == nil { + invalidParams.Add(request.NewErrParamRequired("RegexMatchSetId")) } - if s.PositionalConstraint == nil { - invalidParams.Add(request.NewErrParamRequired("PositionalConstraint")) + if s.RegexMatchSetId != nil && len(*s.RegexMatchSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RegexMatchSetId", 1)) } - if s.TargetString == nil { - invalidParams.Add(request.NewErrParamRequired("TargetString")) + + if invalidParams.Len() > 0 { + return invalidParams } - if s.TextTransformation == nil { - invalidParams.Add(request.NewErrParamRequired("TextTransformation")) + return nil +} + +// SetRegexMatchSetId sets the RegexMatchSetId field's value. +func (s *GetRegexMatchSetInput) SetRegexMatchSetId(v string) *GetRegexMatchSetInput { + s.RegexMatchSetId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRegexMatchSetResponse +type GetRegexMatchSetOutput struct { + _ struct{} `type:"structure"` + + // Information about the RegexMatchSet that you specified in the GetRegexMatchSet + // request. For more information, see RegexMatchTuple. + RegexMatchSet *RegexMatchSet `type:"structure"` +} + +// String returns the string representation +func (s GetRegexMatchSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRegexMatchSetOutput) GoString() string { + return s.String() +} + +// SetRegexMatchSet sets the RegexMatchSet field's value. +func (s *GetRegexMatchSetOutput) SetRegexMatchSet(v *RegexMatchSet) *GetRegexMatchSetOutput { + s.RegexMatchSet = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRegexPatternSetRequest +type GetRegexPatternSetInput struct { + _ struct{} `type:"structure"` + + // The RegexPatternSetId of the RegexPatternSet that you want to get. RegexPatternSetId + // is returned by CreateRegexPatternSet and by ListRegexPatternSets. + // + // RegexPatternSetId is a required field + RegexPatternSetId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRegexPatternSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRegexPatternSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRegexPatternSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRegexPatternSetInput"} + if s.RegexPatternSetId == nil { + invalidParams.Add(request.NewErrParamRequired("RegexPatternSetId")) } - if s.FieldToMatch != nil { - if err := s.FieldToMatch.Validate(); err != nil { - invalidParams.AddNested("FieldToMatch", err.(request.ErrInvalidParams)) - } + if s.RegexPatternSetId != nil && len(*s.RegexPatternSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RegexPatternSetId", 1)) } if invalidParams.Len() > 0 { @@ -6169,70 +10963,187 @@ func (s *ByteMatchTuple) Validate() error { return nil } -// SetFieldToMatch sets the FieldToMatch field's value. -func (s *ByteMatchTuple) SetFieldToMatch(v *FieldToMatch) *ByteMatchTuple { - s.FieldToMatch = v +// SetRegexPatternSetId sets the RegexPatternSetId field's value. +func (s *GetRegexPatternSetInput) SetRegexPatternSetId(v string) *GetRegexPatternSetInput { + s.RegexPatternSetId = &v return s } -// SetPositionalConstraint sets the PositionalConstraint field's value. -func (s *ByteMatchTuple) SetPositionalConstraint(v string) *ByteMatchTuple { - s.PositionalConstraint = &v +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRegexPatternSetResponse +type GetRegexPatternSetOutput struct { + _ struct{} `type:"structure"` + + // Information about the RegexPatternSet that you specified in the GetRegexPatternSet + // request, including the identifier of the pattern set and the regular expression + // patterns you want AWS WAF to search for. + RegexPatternSet *RegexPatternSet `type:"structure"` +} + +// String returns the string representation +func (s GetRegexPatternSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRegexPatternSetOutput) GoString() string { + return s.String() +} + +// SetRegexPatternSet sets the RegexPatternSet field's value. +func (s *GetRegexPatternSetOutput) SetRegexPatternSet(v *RegexPatternSet) *GetRegexPatternSetOutput { + s.RegexPatternSet = v return s } -// SetTargetString sets the TargetString field's value. -func (s *ByteMatchTuple) SetTargetString(v []byte) *ByteMatchTuple { - s.TargetString = v +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRuleRequest +type GetRuleInput struct { + _ struct{} `type:"structure"` + + // The RuleId of the Rule that you want to get. RuleId is returned by CreateRule + // and by ListRules. + // + // RuleId is a required field + RuleId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRuleInput"} + if s.RuleId == nil { + invalidParams.Add(request.NewErrParamRequired("RuleId")) + } + if s.RuleId != nil && len(*s.RuleId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRuleId sets the RuleId field's value. +func (s *GetRuleInput) SetRuleId(v string) *GetRuleInput { + s.RuleId = &v return s } -// SetTextTransformation sets the TextTransformation field's value. -func (s *ByteMatchTuple) SetTextTransformation(v string) *ByteMatchTuple { - s.TextTransformation = &v +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRuleResponse +type GetRuleOutput struct { + _ struct{} `type:"structure"` + + // Information about the Rule that you specified in the GetRule request. For + // more information, see the following topics: + // + // * Rule: Contains MetricName, Name, an array of Predicate objects, and + // RuleId + // + // * Predicate: Each Predicate object contains DataId, Negated, and Type + Rule *Rule `type:"structure"` +} + +// String returns the string representation +func (s GetRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRuleOutput) GoString() string { + return s.String() +} + +// SetRule sets the Rule field's value. +func (s *GetRuleOutput) SetRule(v *Rule) *GetRuleOutput { + s.Rule = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateByteMatchSetRequest -type CreateByteMatchSetInput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetSampledRequestsRequest +type GetSampledRequestsInput struct { _ struct{} `type:"structure"` - // The value returned by the most recent call to GetChangeToken. + // The number of requests that you want AWS WAF to return from among the first + // 5,000 requests that your AWS resource received during the time range. If + // your resource received fewer requests than the value of MaxItems, GetSampledRequests + // returns information about all of them. // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` + // MaxItems is a required field + MaxItems *int64 `min:"1" type:"long" required:"true"` - // A friendly name or description of the ByteMatchSet. You can't change Name - // after you create a ByteMatchSet. + // RuleId is one of two values: // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // * The RuleId of the Rule for which you want GetSampledRequests to return + // a sample of requests. + // + // * Default_Action, which causes GetSampledRequests to return a sample of + // the requests that didn't match any of the rules in the specified WebACL. + // + // RuleId is a required field + RuleId *string `min:"1" type:"string" required:"true"` + + // The start date and time and the end date and time of the range for which + // you want GetSampledRequests to return a sample of requests. Specify the date + // and time in the following format: "2016-09-27T14:50Z". You can specify any + // time range in the previous three hours. + // + // TimeWindow is a required field + TimeWindow *TimeWindow `type:"structure" required:"true"` + + // The WebACLId of the WebACL for which you want GetSampledRequests to return + // a sample of requests. + // + // WebAclId is a required field + WebAclId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CreateByteMatchSetInput) String() string { +func (s GetSampledRequestsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateByteMatchSetInput) GoString() string { +func (s GetSampledRequestsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateByteMatchSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateByteMatchSetInput"} - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) +func (s *GetSampledRequestsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSampledRequestsInput"} + if s.MaxItems == nil { + invalidParams.Add(request.NewErrParamRequired("MaxItems")) } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.RuleId == nil { + invalidParams.Add(request.NewErrParamRequired("RuleId")) + } + if s.RuleId != nil && len(*s.RuleId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleId", 1)) + } + if s.TimeWindow == nil { + invalidParams.Add(request.NewErrParamRequired("TimeWindow")) + } + if s.WebAclId == nil { + invalidParams.Add(request.NewErrParamRequired("WebAclId")) } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) + if s.WebAclId != nil && len(*s.WebAclId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WebAclId", 1)) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + if s.TimeWindow != nil { + if err := s.TimeWindow.Validate(); err != nil { + invalidParams.AddNested("TimeWindow", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -6241,93 +11152,107 @@ func (s *CreateByteMatchSetInput) Validate() error { return nil } -// SetChangeToken sets the ChangeToken field's value. -func (s *CreateByteMatchSetInput) SetChangeToken(v string) *CreateByteMatchSetInput { - s.ChangeToken = &v +// SetMaxItems sets the MaxItems field's value. +func (s *GetSampledRequestsInput) SetMaxItems(v int64) *GetSampledRequestsInput { + s.MaxItems = &v return s } -// SetName sets the Name field's value. -func (s *CreateByteMatchSetInput) SetName(v string) *CreateByteMatchSetInput { - s.Name = &v +// SetRuleId sets the RuleId field's value. +func (s *GetSampledRequestsInput) SetRuleId(v string) *GetSampledRequestsInput { + s.RuleId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateByteMatchSetResponse -type CreateByteMatchSetOutput struct { +// SetTimeWindow sets the TimeWindow field's value. +func (s *GetSampledRequestsInput) SetTimeWindow(v *TimeWindow) *GetSampledRequestsInput { + s.TimeWindow = v + return s +} + +// SetWebAclId sets the WebAclId field's value. +func (s *GetSampledRequestsInput) SetWebAclId(v string) *GetSampledRequestsInput { + s.WebAclId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetSampledRequestsResponse +type GetSampledRequestsOutput struct { _ struct{} `type:"structure"` - // A ByteMatchSet that contains no ByteMatchTuple objects. - ByteMatchSet *ByteMatchSet `type:"structure"` + // The total number of requests from which GetSampledRequests got a sample of + // MaxItems requests. If PopulationSize is less than MaxItems, the sample includes + // every request that your AWS resource received during the specified time range. + PopulationSize *int64 `type:"long"` - // The ChangeToken that you used to submit the CreateByteMatchSet request. You - // can also use this value to query the status of the request. For more information, - // see GetChangeTokenStatus. - ChangeToken *string `min:"1" type:"string"` + // A complex type that contains detailed information about each of the requests + // in the sample. + SampledRequests []*SampledHTTPRequest `type:"list"` + + // Usually, TimeWindow is the time range that you specified in the GetSampledRequests + // request. However, if your AWS resource received more than 5,000 requests + // during the time range that you specified in the request, GetSampledRequests + // returns the time range for the first 5,000 requests. + TimeWindow *TimeWindow `type:"structure"` } // String returns the string representation -func (s CreateByteMatchSetOutput) String() string { +func (s GetSampledRequestsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateByteMatchSetOutput) GoString() string { +func (s GetSampledRequestsOutput) GoString() string { return s.String() } -// SetByteMatchSet sets the ByteMatchSet field's value. -func (s *CreateByteMatchSetOutput) SetByteMatchSet(v *ByteMatchSet) *CreateByteMatchSetOutput { - s.ByteMatchSet = v +// SetPopulationSize sets the PopulationSize field's value. +func (s *GetSampledRequestsOutput) SetPopulationSize(v int64) *GetSampledRequestsOutput { + s.PopulationSize = &v return s } -// SetChangeToken sets the ChangeToken field's value. -func (s *CreateByteMatchSetOutput) SetChangeToken(v string) *CreateByteMatchSetOutput { - s.ChangeToken = &v +// SetSampledRequests sets the SampledRequests field's value. +func (s *GetSampledRequestsOutput) SetSampledRequests(v []*SampledHTTPRequest) *GetSampledRequestsOutput { + s.SampledRequests = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateIPSetRequest -type CreateIPSetInput struct { - _ struct{} `type:"structure"` +// SetTimeWindow sets the TimeWindow field's value. +func (s *GetSampledRequestsOutput) SetTimeWindow(v *TimeWindow) *GetSampledRequestsOutput { + s.TimeWindow = v + return s +} - // The value returned by the most recent call to GetChangeToken. - // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetSizeConstraintSetRequest +type GetSizeConstraintSetInput struct { + _ struct{} `type:"structure"` - // A friendly name or description of the IPSet. You can't change Name after - // you create the IPSet. + // The SizeConstraintSetId of the SizeConstraintSet that you want to get. SizeConstraintSetId + // is returned by CreateSizeConstraintSet and by ListSizeConstraintSets. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // SizeConstraintSetId is a required field + SizeConstraintSetId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CreateIPSetInput) String() string { +func (s GetSizeConstraintSetInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateIPSetInput) GoString() string { +func (s GetSizeConstraintSetInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateIPSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateIPSetInput"} - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) - } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) +func (s *GetSizeConstraintSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSizeConstraintSetInput"} + if s.SizeConstraintSetId == nil { + invalidParams.Add(request.NewErrParamRequired("SizeConstraintSetId")) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + if s.SizeConstraintSetId != nil && len(*s.SizeConstraintSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SizeConstraintSetId", 1)) } if invalidParams.Len() > 0 { @@ -6336,256 +11261,153 @@ func (s *CreateIPSetInput) Validate() error { return nil } -// SetChangeToken sets the ChangeToken field's value. -func (s *CreateIPSetInput) SetChangeToken(v string) *CreateIPSetInput { - s.ChangeToken = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateIPSetInput) SetName(v string) *CreateIPSetInput { - s.Name = &v +// SetSizeConstraintSetId sets the SizeConstraintSetId field's value. +func (s *GetSizeConstraintSetInput) SetSizeConstraintSetId(v string) *GetSizeConstraintSetInput { + s.SizeConstraintSetId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateIPSetResponse -type CreateIPSetOutput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetSizeConstraintSetResponse +type GetSizeConstraintSetOutput struct { _ struct{} `type:"structure"` - // The ChangeToken that you used to submit the CreateIPSet request. You can - // also use this value to query the status of the request. For more information, - // see GetChangeTokenStatus. - ChangeToken *string `min:"1" type:"string"` - - // The IPSet returned in the CreateIPSet response. - IPSet *IPSet `type:"structure"` + // Information about the SizeConstraintSet that you specified in the GetSizeConstraintSet + // request. For more information, see the following topics: + // + // * SizeConstraintSet: Contains SizeConstraintSetId, SizeConstraints, and + // Name + // + // * SizeConstraints: Contains an array of SizeConstraint objects. Each SizeConstraint + // object contains FieldToMatch, TextTransformation, ComparisonOperator, + // and Size + // + // * FieldToMatch: Contains Data and Type + SizeConstraintSet *SizeConstraintSet `type:"structure"` } // String returns the string representation -func (s CreateIPSetOutput) String() string { +func (s GetSizeConstraintSetOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateIPSetOutput) GoString() string { +func (s GetSizeConstraintSetOutput) GoString() string { return s.String() } -// SetChangeToken sets the ChangeToken field's value. -func (s *CreateIPSetOutput) SetChangeToken(v string) *CreateIPSetOutput { - s.ChangeToken = &v - return s -} - -// SetIPSet sets the IPSet field's value. -func (s *CreateIPSetOutput) SetIPSet(v *IPSet) *CreateIPSetOutput { - s.IPSet = v +// SetSizeConstraintSet sets the SizeConstraintSet field's value. +func (s *GetSizeConstraintSetOutput) SetSizeConstraintSet(v *SizeConstraintSet) *GetSizeConstraintSetOutput { + s.SizeConstraintSet = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateRateBasedRuleRequest -type CreateRateBasedRuleInput struct { +// A request to get a SqlInjectionMatchSet. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetSqlInjectionMatchSetRequest +type GetSqlInjectionMatchSetInput struct { _ struct{} `type:"structure"` - // The ChangeToken that you used to submit the CreateRateBasedRule request. - // You can also use this value to query the status of the request. For more - // information, see GetChangeTokenStatus. - // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` - - // A friendly name or description for the metrics for this RateBasedRule. The - // name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't - // contain whitespace. You can't change the name of the metric after you create - // the RateBasedRule. - // - // MetricName is a required field - MetricName *string `type:"string" required:"true"` - - // A friendly name or description of the RateBasedRule. You can't change the - // name of a RateBasedRule after you create it. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // The field that AWS WAF uses to determine if requests are likely arriving - // from a single source and thus subject to rate monitoring. The only valid - // value for RateKey is IP. IP indicates that requests that arrive from the - // same IP address are subject to the RateLimit that is specified in the RateBasedRule. - // - // RateKey is a required field - RateKey *string `type:"string" required:"true" enum:"RateKey"` - - // The maximum number of requests, which have an identical value in the field - // that is specified by RateKey, allowed in a five-minute period. If the number - // of requests exceeds the RateLimit and the other predicates specified in the - // rule are also met, AWS WAF triggers the action that is specified for this - // rule. + // The SqlInjectionMatchSetId of the SqlInjectionMatchSet that you want to get. + // SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets. // - // RateLimit is a required field - RateLimit *int64 `min:"2000" type:"long" required:"true"` -} - -// String returns the string representation -func (s CreateRateBasedRuleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateRateBasedRuleInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateRateBasedRuleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateRateBasedRuleInput"} - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) - } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) - } - if s.MetricName == nil { - invalidParams.Add(request.NewErrParamRequired("MetricName")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.RateKey == nil { - invalidParams.Add(request.NewErrParamRequired("RateKey")) - } - if s.RateLimit == nil { - invalidParams.Add(request.NewErrParamRequired("RateLimit")) - } - if s.RateLimit != nil && *s.RateLimit < 2000 { - invalidParams.Add(request.NewErrParamMinValue("RateLimit", 2000)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil + // SqlInjectionMatchSetId is a required field + SqlInjectionMatchSetId *string `min:"1" type:"string" required:"true"` } -// SetChangeToken sets the ChangeToken field's value. -func (s *CreateRateBasedRuleInput) SetChangeToken(v string) *CreateRateBasedRuleInput { - s.ChangeToken = &v - return s +// String returns the string representation +func (s GetSqlInjectionMatchSetInput) String() string { + return awsutil.Prettify(s) } -// SetMetricName sets the MetricName field's value. -func (s *CreateRateBasedRuleInput) SetMetricName(v string) *CreateRateBasedRuleInput { - s.MetricName = &v - return s +// GoString returns the string representation +func (s GetSqlInjectionMatchSetInput) GoString() string { + return s.String() } -// SetName sets the Name field's value. -func (s *CreateRateBasedRuleInput) SetName(v string) *CreateRateBasedRuleInput { - s.Name = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSqlInjectionMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSqlInjectionMatchSetInput"} + if s.SqlInjectionMatchSetId == nil { + invalidParams.Add(request.NewErrParamRequired("SqlInjectionMatchSetId")) + } + if s.SqlInjectionMatchSetId != nil && len(*s.SqlInjectionMatchSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SqlInjectionMatchSetId", 1)) + } -// SetRateKey sets the RateKey field's value. -func (s *CreateRateBasedRuleInput) SetRateKey(v string) *CreateRateBasedRuleInput { - s.RateKey = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetRateLimit sets the RateLimit field's value. -func (s *CreateRateBasedRuleInput) SetRateLimit(v int64) *CreateRateBasedRuleInput { - s.RateLimit = &v +// SetSqlInjectionMatchSetId sets the SqlInjectionMatchSetId field's value. +func (s *GetSqlInjectionMatchSetInput) SetSqlInjectionMatchSetId(v string) *GetSqlInjectionMatchSetInput { + s.SqlInjectionMatchSetId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateRateBasedRuleResponse -type CreateRateBasedRuleOutput struct { +// The response to a GetSqlInjectionMatchSet request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetSqlInjectionMatchSetResponse +type GetSqlInjectionMatchSetOutput struct { _ struct{} `type:"structure"` - // The ChangeToken that you used to submit the CreateRateBasedRule request. - // You can also use this value to query the status of the request. For more - // information, see GetChangeTokenStatus. - ChangeToken *string `min:"1" type:"string"` - - // The RateBasedRule that is returned in the CreateRateBasedRule response. - Rule *RateBasedRule `type:"structure"` + // Information about the SqlInjectionMatchSet that you specified in the GetSqlInjectionMatchSet + // request. For more information, see the following topics: + // + // * SqlInjectionMatchSet: Contains Name, SqlInjectionMatchSetId, and an + // array of SqlInjectionMatchTuple objects + // + // * SqlInjectionMatchTuple: Each SqlInjectionMatchTuple object contains + // FieldToMatch and TextTransformation + // + // * FieldToMatch: Contains Data and Type + SqlInjectionMatchSet *SqlInjectionMatchSet `type:"structure"` } // String returns the string representation -func (s CreateRateBasedRuleOutput) String() string { +func (s GetSqlInjectionMatchSetOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateRateBasedRuleOutput) GoString() string { +func (s GetSqlInjectionMatchSetOutput) GoString() string { return s.String() } -// SetChangeToken sets the ChangeToken field's value. -func (s *CreateRateBasedRuleOutput) SetChangeToken(v string) *CreateRateBasedRuleOutput { - s.ChangeToken = &v - return s -} - -// SetRule sets the Rule field's value. -func (s *CreateRateBasedRuleOutput) SetRule(v *RateBasedRule) *CreateRateBasedRuleOutput { - s.Rule = v +// SetSqlInjectionMatchSet sets the SqlInjectionMatchSet field's value. +func (s *GetSqlInjectionMatchSetOutput) SetSqlInjectionMatchSet(v *SqlInjectionMatchSet) *GetSqlInjectionMatchSetOutput { + s.SqlInjectionMatchSet = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateRuleRequest -type CreateRuleInput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetWebACLRequest +type GetWebACLInput struct { _ struct{} `type:"structure"` - // The value returned by the most recent call to GetChangeToken. - // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` - - // A friendly name or description for the metrics for this Rule. The name can - // contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain - // whitespace. You can't change the name of the metric after you create the - // Rule. - // - // MetricName is a required field - MetricName *string `type:"string" required:"true"` - - // A friendly name or description of the Rule. You can't change the name of - // a Rule after you create it. + // The WebACLId of the WebACL that you want to get. WebACLId is returned by + // CreateWebACL and by ListWebACLs. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // WebACLId is a required field + WebACLId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CreateRuleInput) String() string { +func (s GetWebACLInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateRuleInput) GoString() string { +func (s GetWebACLInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateRuleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateRuleInput"} - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) - } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) - } - if s.MetricName == nil { - invalidParams.Add(request.NewErrParamRequired("MetricName")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) +func (s *GetWebACLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetWebACLInput"} + if s.WebACLId == nil { + invalidParams.Add(request.NewErrParamRequired("WebACLId")) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + if s.WebACLId != nil && len(*s.WebACLId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WebACLId", 1)) } if invalidParams.Len() > 0 { @@ -6594,316 +11416,364 @@ func (s *CreateRuleInput) Validate() error { return nil } -// SetChangeToken sets the ChangeToken field's value. -func (s *CreateRuleInput) SetChangeToken(v string) *CreateRuleInput { - s.ChangeToken = &v - return s -} - -// SetMetricName sets the MetricName field's value. -func (s *CreateRuleInput) SetMetricName(v string) *CreateRuleInput { - s.MetricName = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateRuleInput) SetName(v string) *CreateRuleInput { - s.Name = &v +// SetWebACLId sets the WebACLId field's value. +func (s *GetWebACLInput) SetWebACLId(v string) *GetWebACLInput { + s.WebACLId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateRuleResponse -type CreateRuleOutput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetWebACLResponse +type GetWebACLOutput struct { _ struct{} `type:"structure"` - // The ChangeToken that you used to submit the CreateRule request. You can also - // use this value to query the status of the request. For more information, - // see GetChangeTokenStatus. - ChangeToken *string `min:"1" type:"string"` - - // The Rule returned in the CreateRule response. - Rule *Rule `type:"structure"` + // Information about the WebACL that you specified in the GetWebACL request. + // For more information, see the following topics: + // + // * WebACL: Contains DefaultAction, MetricName, Name, an array of Rule objects, + // and WebACLId + // + // * DefaultAction (Data type is WafAction): Contains Type + // + // * Rules: Contains an array of ActivatedRule objects, which contain Action, + // Priority, and RuleId + // + // * Action: Contains Type + WebACL *WebACL `type:"structure"` } // String returns the string representation -func (s CreateRuleOutput) String() string { +func (s GetWebACLOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateRuleOutput) GoString() string { +func (s GetWebACLOutput) GoString() string { return s.String() } -// SetChangeToken sets the ChangeToken field's value. -func (s *CreateRuleOutput) SetChangeToken(v string) *CreateRuleOutput { - s.ChangeToken = &v - return s -} - -// SetRule sets the Rule field's value. -func (s *CreateRuleOutput) SetRule(v *Rule) *CreateRuleOutput { - s.Rule = v +// SetWebACL sets the WebACL field's value. +func (s *GetWebACLOutput) SetWebACL(v *WebACL) *GetWebACLOutput { + s.WebACL = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateSizeConstraintSetRequest -type CreateSizeConstraintSetInput struct { +// A request to get an XssMatchSet. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetXssMatchSetRequest +type GetXssMatchSetInput struct { _ struct{} `type:"structure"` - // The value returned by the most recent call to GetChangeToken. - // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` - - // A friendly name or description of the SizeConstraintSet. You can't change - // Name after you create a SizeConstraintSet. + // The XssMatchSetId of the XssMatchSet that you want to get. XssMatchSetId + // is returned by CreateXssMatchSet and by ListXssMatchSets. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // XssMatchSetId is a required field + XssMatchSetId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CreateSizeConstraintSetInput) String() string { +func (s GetXssMatchSetInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateSizeConstraintSetInput) GoString() string { +func (s GetXssMatchSetInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateSizeConstraintSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateSizeConstraintSetInput"} - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) - } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) +func (s *GetXssMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetXssMatchSetInput"} + if s.XssMatchSetId == nil { + invalidParams.Add(request.NewErrParamRequired("XssMatchSetId")) } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) + if s.XssMatchSetId != nil && len(*s.XssMatchSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("XssMatchSetId", 1)) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + + if invalidParams.Len() > 0 { + return invalidParams } + return nil +} + +// SetXssMatchSetId sets the XssMatchSetId field's value. +func (s *GetXssMatchSetInput) SetXssMatchSetId(v string) *GetXssMatchSetInput { + s.XssMatchSetId = &v + return s +} + +// The response to a GetXssMatchSet request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetXssMatchSetResponse +type GetXssMatchSetOutput struct { + _ struct{} `type:"structure"` + + // Information about the XssMatchSet that you specified in the GetXssMatchSet + // request. For more information, see the following topics: + // + // * XssMatchSet: Contains Name, XssMatchSetId, and an array of XssMatchTuple + // objects + // + // * XssMatchTuple: Each XssMatchTuple object contains FieldToMatch and TextTransformation + // + // * FieldToMatch: Contains Data and Type + XssMatchSet *XssMatchSet `type:"structure"` +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// String returns the string representation +func (s GetXssMatchSetOutput) String() string { + return awsutil.Prettify(s) } -// SetChangeToken sets the ChangeToken field's value. -func (s *CreateSizeConstraintSetInput) SetChangeToken(v string) *CreateSizeConstraintSetInput { - s.ChangeToken = &v - return s +// GoString returns the string representation +func (s GetXssMatchSetOutput) GoString() string { + return s.String() } -// SetName sets the Name field's value. -func (s *CreateSizeConstraintSetInput) SetName(v string) *CreateSizeConstraintSetInput { - s.Name = &v +// SetXssMatchSet sets the XssMatchSet field's value. +func (s *GetXssMatchSetOutput) SetXssMatchSet(v *XssMatchSet) *GetXssMatchSetOutput { + s.XssMatchSet = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateSizeConstraintSetResponse -type CreateSizeConstraintSetOutput struct { +// The response from a GetSampledRequests request includes an HTTPHeader complex +// type that appears as Headers in the response syntax. HTTPHeader contains +// the names and values of all of the headers that appear in one of the web +// requests that were returned by GetSampledRequests. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/HTTPHeader +type HTTPHeader struct { _ struct{} `type:"structure"` - // The ChangeToken that you used to submit the CreateSizeConstraintSet request. - // You can also use this value to query the status of the request. For more - // information, see GetChangeTokenStatus. - ChangeToken *string `min:"1" type:"string"` + // The name of one of the headers in the sampled web request. + Name *string `type:"string"` - // A SizeConstraintSet that contains no SizeConstraint objects. - SizeConstraintSet *SizeConstraintSet `type:"structure"` + // The value of one of the headers in the sampled web request. + Value *string `type:"string"` } // String returns the string representation -func (s CreateSizeConstraintSetOutput) String() string { +func (s HTTPHeader) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateSizeConstraintSetOutput) GoString() string { +func (s HTTPHeader) GoString() string { return s.String() } -// SetChangeToken sets the ChangeToken field's value. -func (s *CreateSizeConstraintSetOutput) SetChangeToken(v string) *CreateSizeConstraintSetOutput { - s.ChangeToken = &v +// SetName sets the Name field's value. +func (s *HTTPHeader) SetName(v string) *HTTPHeader { + s.Name = &v return s } -// SetSizeConstraintSet sets the SizeConstraintSet field's value. -func (s *CreateSizeConstraintSetOutput) SetSizeConstraintSet(v *SizeConstraintSet) *CreateSizeConstraintSetOutput { - s.SizeConstraintSet = v +// SetValue sets the Value field's value. +func (s *HTTPHeader) SetValue(v string) *HTTPHeader { + s.Value = &v return s } -// A request to create a SqlInjectionMatchSet. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateSqlInjectionMatchSetRequest -type CreateSqlInjectionMatchSetInput struct { +// The response from a GetSampledRequests request includes an HTTPRequest complex +// type that appears as Request in the response syntax. HTTPRequest contains +// information about one of the web requests that were returned by GetSampledRequests. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/HTTPRequest +type HTTPRequest struct { _ struct{} `type:"structure"` - // The value returned by the most recent call to GetChangeToken. + // The IP address that the request originated from. If the WebACL is associated + // with a CloudFront distribution, this is the value of one of the following + // fields in CloudFront access logs: // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` - - // A friendly name or description for the SqlInjectionMatchSet that you're creating. - // You can't change Name after you create the SqlInjectionMatchSet. + // * c-ip, if the viewer did not use an HTTP proxy or a load balancer to + // send the request // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // * x-forwarded-for, if the viewer did use an HTTP proxy or a load balancer + // to send the request + ClientIP *string `type:"string"` + + // The two-letter country code for the country that the request originated from. + // For a current list of country codes, see the Wikipedia entry ISO 3166-1 alpha-2 + // (https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2). + Country *string `type:"string"` + + // The HTTP version specified in the sampled web request, for example, HTTP/1.1. + HTTPVersion *string `type:"string"` + + // A complex type that contains two values for each header in the sampled web + // request: the name of the header and the value of the header. + Headers []*HTTPHeader `type:"list"` + + // The HTTP method specified in the sampled web request. CloudFront supports + // the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and PUT. + Method *string `type:"string"` + + // The part of a web request that identifies the resource, for example, /images/daily-ad.jpg. + URI *string `type:"string"` } // String returns the string representation -func (s CreateSqlInjectionMatchSetInput) String() string { +func (s HTTPRequest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateSqlInjectionMatchSetInput) GoString() string { +func (s HTTPRequest) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateSqlInjectionMatchSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateSqlInjectionMatchSetInput"} - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) - } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } +// SetClientIP sets the ClientIP field's value. +func (s *HTTPRequest) SetClientIP(v string) *HTTPRequest { + s.ClientIP = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetCountry sets the Country field's value. +func (s *HTTPRequest) SetCountry(v string) *HTTPRequest { + s.Country = &v + return s } -// SetChangeToken sets the ChangeToken field's value. -func (s *CreateSqlInjectionMatchSetInput) SetChangeToken(v string) *CreateSqlInjectionMatchSetInput { - s.ChangeToken = &v +// SetHTTPVersion sets the HTTPVersion field's value. +func (s *HTTPRequest) SetHTTPVersion(v string) *HTTPRequest { + s.HTTPVersion = &v return s } -// SetName sets the Name field's value. -func (s *CreateSqlInjectionMatchSetInput) SetName(v string) *CreateSqlInjectionMatchSetInput { - s.Name = &v +// SetHeaders sets the Headers field's value. +func (s *HTTPRequest) SetHeaders(v []*HTTPHeader) *HTTPRequest { + s.Headers = v return s } -// The response to a CreateSqlInjectionMatchSet request. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateSqlInjectionMatchSetResponse -type CreateSqlInjectionMatchSetOutput struct { +// SetMethod sets the Method field's value. +func (s *HTTPRequest) SetMethod(v string) *HTTPRequest { + s.Method = &v + return s +} + +// SetURI sets the URI field's value. +func (s *HTTPRequest) SetURI(v string) *HTTPRequest { + s.URI = &v + return s +} + +// Contains one or more IP addresses or blocks of IP addresses specified in +// Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports /8, /16, +// /24, and /32 IP address ranges for IPv4, and /24, /32, /48, /56, /64 and +// /128 for IPv6. +// +// To specify an individual IP address, you specify the four-part IP address +// followed by a /32, for example, 192.0.2.0/31. To block a range of IP addresses, +// you can specify a /128, /64, /56, /48, /32, /24, /16, or /8 CIDR. For more +// information about CIDR notation, see the Wikipedia entry Classless Inter-Domain +// Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/IPSet +type IPSet struct { _ struct{} `type:"structure"` - // The ChangeToken that you used to submit the CreateSqlInjectionMatchSet request. - // You can also use this value to query the status of the request. For more - // information, see GetChangeTokenStatus. - ChangeToken *string `min:"1" type:"string"` + // The IP address type (IPV4 or IPV6) and the IP address range (in CIDR notation) + // that web requests originate from. If the WebACL is associated with a CloudFront + // distribution and the viewer did not use an HTTP proxy or a load balancer + // to send the request, this is the value of the c-ip field in the CloudFront + // access logs. + // + // IPSetDescriptors is a required field + IPSetDescriptors []*IPSetDescriptor `type:"list" required:"true"` - // A SqlInjectionMatchSet. - SqlInjectionMatchSet *SqlInjectionMatchSet `type:"structure"` + // The IPSetId for an IPSet. You use IPSetId to get information about an IPSet + // (see GetIPSet), update an IPSet (see UpdateIPSet), insert an IPSet into a + // Rule or delete one from a Rule (see UpdateRule), and delete an IPSet from + // AWS WAF (see DeleteIPSet). + // + // IPSetId is returned by CreateIPSet and by ListIPSets. + // + // IPSetId is a required field + IPSetId *string `min:"1" type:"string" required:"true"` + + // A friendly name or description of the IPSet. You can't change the name of + // an IPSet after you create it. + Name *string `min:"1" type:"string"` } // String returns the string representation -func (s CreateSqlInjectionMatchSetOutput) String() string { +func (s IPSet) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateSqlInjectionMatchSetOutput) GoString() string { +func (s IPSet) GoString() string { return s.String() } -// SetChangeToken sets the ChangeToken field's value. -func (s *CreateSqlInjectionMatchSetOutput) SetChangeToken(v string) *CreateSqlInjectionMatchSetOutput { - s.ChangeToken = &v +// SetIPSetDescriptors sets the IPSetDescriptors field's value. +func (s *IPSet) SetIPSetDescriptors(v []*IPSetDescriptor) *IPSet { + s.IPSetDescriptors = v return s } -// SetSqlInjectionMatchSet sets the SqlInjectionMatchSet field's value. -func (s *CreateSqlInjectionMatchSetOutput) SetSqlInjectionMatchSet(v *SqlInjectionMatchSet) *CreateSqlInjectionMatchSetOutput { - s.SqlInjectionMatchSet = v +// SetIPSetId sets the IPSetId field's value. +func (s *IPSet) SetIPSetId(v string) *IPSet { + s.IPSetId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateWebACLRequest -type CreateWebACLInput struct { +// SetName sets the Name field's value. +func (s *IPSet) SetName(v string) *IPSet { + s.Name = &v + return s +} + +// Specifies the IP address type (IPV4 or IPV6) and the IP address range (in +// CIDR format) that web requests originate from. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/IPSetDescriptor +type IPSetDescriptor struct { _ struct{} `type:"structure"` - // The value returned by the most recent call to GetChangeToken. + // Specify IPV4 or IPV6. // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` + // Type is a required field + Type *string `type:"string" required:"true" enum:"IPSetDescriptorType"` - // The action that you want AWS WAF to take when a request doesn't match the - // criteria specified in any of the Rule objects that are associated with the - // WebACL. + // Specify an IPv4 address by using CIDR notation. For example: // - // DefaultAction is a required field - DefaultAction *WafAction `type:"structure" required:"true"` - - // A friendly name or description for the metrics for this WebACL. The name - // can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't - // contain whitespace. You can't change MetricName after you create the WebACL. + // * To configure AWS WAF to allow, block, or count requests that originated + // from the IP address 192.0.2.44, specify 192.0.2.44/32. // - // MetricName is a required field - MetricName *string `type:"string" required:"true"` - - // A friendly name or description of the WebACL. You can't change Name after - // you create the WebACL. + // * To configure AWS WAF to allow, block, or count requests that originated + // from IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24. + // + // For more information about CIDR notation, see the Wikipedia entry Classless + // Inter-Domain Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). + // + // Specify an IPv6 address by using CIDR notation. For example: + // + // * To configure AWS WAF to allow, block, or count requests that originated + // from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, specify 1111:0000:0000:0000:0000:0000:0000:0111/128. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // * To configure AWS WAF to allow, block, or count requests that originated + // from IP addresses 1111:0000:0000:0000:0000:0000:0000:0000 to 1111:0000:0000:0000:ffff:ffff:ffff:ffff, + // specify 1111:0000:0000:0000:0000:0000:0000:0000/64. + // + // Value is a required field + Value *string `type:"string" required:"true"` } // String returns the string representation -func (s CreateWebACLInput) String() string { +func (s IPSetDescriptor) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateWebACLInput) GoString() string { +func (s IPSetDescriptor) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateWebACLInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateWebACLInput"} - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) - } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) - } - if s.DefaultAction == nil { - invalidParams.Add(request.NewErrParamRequired("DefaultAction")) - } - if s.MetricName == nil { - invalidParams.Add(request.NewErrParamRequired("MetricName")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) +func (s *IPSetDescriptor) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IPSetDescriptor"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) } - if s.DefaultAction != nil { - if err := s.DefaultAction.Validate(); err != nil { - invalidParams.AddNested("DefaultAction", err.(request.ErrInvalidParams)) - } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) } if invalidParams.Len() > 0 { @@ -6912,106 +11782,151 @@ func (s *CreateWebACLInput) Validate() error { return nil } -// SetChangeToken sets the ChangeToken field's value. -func (s *CreateWebACLInput) SetChangeToken(v string) *CreateWebACLInput { - s.ChangeToken = &v +// SetType sets the Type field's value. +func (s *IPSetDescriptor) SetType(v string) *IPSetDescriptor { + s.Type = &v return s } -// SetDefaultAction sets the DefaultAction field's value. -func (s *CreateWebACLInput) SetDefaultAction(v *WafAction) *CreateWebACLInput { - s.DefaultAction = v +// SetValue sets the Value field's value. +func (s *IPSetDescriptor) SetValue(v string) *IPSetDescriptor { + s.Value = &v return s } -// SetMetricName sets the MetricName field's value. -func (s *CreateWebACLInput) SetMetricName(v string) *CreateWebACLInput { - s.MetricName = &v +// Contains the identifier and the name of the IPSet. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/IPSetSummary +type IPSetSummary struct { + _ struct{} `type:"structure"` + + // The IPSetId for an IPSet. You can use IPSetId in a GetIPSet request to get + // detailed information about an IPSet. + // + // IPSetId is a required field + IPSetId *string `min:"1" type:"string" required:"true"` + + // A friendly name or description of the IPSet. You can't change the name of + // an IPSet after you create it. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s IPSetSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IPSetSummary) GoString() string { + return s.String() +} + +// SetIPSetId sets the IPSetId field's value. +func (s *IPSetSummary) SetIPSetId(v string) *IPSetSummary { + s.IPSetId = &v return s } // SetName sets the Name field's value. -func (s *CreateWebACLInput) SetName(v string) *CreateWebACLInput { +func (s *IPSetSummary) SetName(v string) *IPSetSummary { s.Name = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateWebACLResponse -type CreateWebACLOutput struct { +// Specifies the type of update to perform to an IPSet with UpdateIPSet. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/IPSetUpdate +type IPSetUpdate struct { _ struct{} `type:"structure"` - // The ChangeToken that you used to submit the CreateWebACL request. You can - // also use this value to query the status of the request. For more information, - // see GetChangeTokenStatus. - ChangeToken *string `min:"1" type:"string"` + // Specifies whether to insert or delete an IP address with UpdateIPSet. + // + // Action is a required field + Action *string `type:"string" required:"true" enum:"ChangeAction"` - // The WebACL returned in the CreateWebACL response. - WebACL *WebACL `type:"structure"` + // The IP address type (IPV4 or IPV6) and the IP address range (in CIDR notation) + // that web requests originate from. + // + // IPSetDescriptor is a required field + IPSetDescriptor *IPSetDescriptor `type:"structure" required:"true"` } // String returns the string representation -func (s CreateWebACLOutput) String() string { +func (s IPSetUpdate) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateWebACLOutput) GoString() string { +func (s IPSetUpdate) GoString() string { return s.String() } -// SetChangeToken sets the ChangeToken field's value. -func (s *CreateWebACLOutput) SetChangeToken(v string) *CreateWebACLOutput { - s.ChangeToken = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *IPSetUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IPSetUpdate"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.IPSetDescriptor == nil { + invalidParams.Add(request.NewErrParamRequired("IPSetDescriptor")) + } + if s.IPSetDescriptor != nil { + if err := s.IPSetDescriptor.Validate(); err != nil { + invalidParams.AddNested("IPSetDescriptor", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAction sets the Action field's value. +func (s *IPSetUpdate) SetAction(v string) *IPSetUpdate { + s.Action = &v return s } -// SetWebACL sets the WebACL field's value. -func (s *CreateWebACLOutput) SetWebACL(v *WebACL) *CreateWebACLOutput { - s.WebACL = v +// SetIPSetDescriptor sets the IPSetDescriptor field's value. +func (s *IPSetUpdate) SetIPSetDescriptor(v *IPSetDescriptor) *IPSetUpdate { + s.IPSetDescriptor = v return s } -// A request to create an XssMatchSet. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateXssMatchSetRequest -type CreateXssMatchSetInput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListByteMatchSetsRequest +type ListByteMatchSetsInput struct { _ struct{} `type:"structure"` - // The value returned by the most recent call to GetChangeToken. - // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` + // Specifies the number of ByteMatchSet objects that you want AWS WAF to return + // for this request. If you have more ByteMatchSets objects than the number + // you specify for Limit, the response includes a NextMarker value that you + // can use to get another batch of ByteMatchSet objects. + Limit *int64 `type:"integer"` - // A friendly name or description for the XssMatchSet that you're creating. - // You can't change Name after you create the XssMatchSet. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // If you specify a value for Limit and you have more ByteMatchSets than the + // value of Limit, AWS WAF returns a NextMarker value in the response that allows + // you to list another group of ByteMatchSets. For the second and subsequent + // ListByteMatchSets requests, specify the value of NextMarker from the previous + // response to get information about another batch of ByteMatchSets. + NextMarker *string `min:"1" type:"string"` } // String returns the string representation -func (s CreateXssMatchSetInput) String() string { +func (s ListByteMatchSetsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateXssMatchSetInput) GoString() string { +func (s ListByteMatchSetsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateXssMatchSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateXssMatchSetInput"} - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) - } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) +func (s *ListByteMatchSetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListByteMatchSetsInput"} + if s.NextMarker != nil && len(*s.NextMarker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) } if invalidParams.Len() > 0 { @@ -7020,94 +11935,88 @@ func (s *CreateXssMatchSetInput) Validate() error { return nil } -// SetChangeToken sets the ChangeToken field's value. -func (s *CreateXssMatchSetInput) SetChangeToken(v string) *CreateXssMatchSetInput { - s.ChangeToken = &v +// SetLimit sets the Limit field's value. +func (s *ListByteMatchSetsInput) SetLimit(v int64) *ListByteMatchSetsInput { + s.Limit = &v return s } -// SetName sets the Name field's value. -func (s *CreateXssMatchSetInput) SetName(v string) *CreateXssMatchSetInput { - s.Name = &v +// SetNextMarker sets the NextMarker field's value. +func (s *ListByteMatchSetsInput) SetNextMarker(v string) *ListByteMatchSetsInput { + s.NextMarker = &v return s } -// The response to a CreateXssMatchSet request. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateXssMatchSetResponse -type CreateXssMatchSetOutput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListByteMatchSetsResponse +type ListByteMatchSetsOutput struct { _ struct{} `type:"structure"` - // The ChangeToken that you used to submit the CreateXssMatchSet request. You - // can also use this value to query the status of the request. For more information, - // see GetChangeTokenStatus. - ChangeToken *string `min:"1" type:"string"` + // An array of ByteMatchSetSummary objects. + ByteMatchSets []*ByteMatchSetSummary `type:"list"` - // An XssMatchSet. - XssMatchSet *XssMatchSet `type:"structure"` + // If you have more ByteMatchSet objects than the number that you specified + // for Limit in the request, the response includes a NextMarker value. To list + // more ByteMatchSet objects, submit another ListByteMatchSets request, and + // specify the NextMarker value from the response in the NextMarker value in + // the next request. + NextMarker *string `min:"1" type:"string"` } // String returns the string representation -func (s CreateXssMatchSetOutput) String() string { +func (s ListByteMatchSetsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateXssMatchSetOutput) GoString() string { +func (s ListByteMatchSetsOutput) GoString() string { return s.String() } -// SetChangeToken sets the ChangeToken field's value. -func (s *CreateXssMatchSetOutput) SetChangeToken(v string) *CreateXssMatchSetOutput { - s.ChangeToken = &v +// SetByteMatchSets sets the ByteMatchSets field's value. +func (s *ListByteMatchSetsOutput) SetByteMatchSets(v []*ByteMatchSetSummary) *ListByteMatchSetsOutput { + s.ByteMatchSets = v return s } -// SetXssMatchSet sets the XssMatchSet field's value. -func (s *CreateXssMatchSetOutput) SetXssMatchSet(v *XssMatchSet) *CreateXssMatchSetOutput { - s.XssMatchSet = v +// SetNextMarker sets the NextMarker field's value. +func (s *ListByteMatchSetsOutput) SetNextMarker(v string) *ListByteMatchSetsOutput { + s.NextMarker = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteByteMatchSetRequest -type DeleteByteMatchSetInput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListGeoMatchSetsRequest +type ListGeoMatchSetsInput struct { _ struct{} `type:"structure"` - // The ByteMatchSetId of the ByteMatchSet that you want to delete. ByteMatchSetId - // is returned by CreateByteMatchSet and by ListByteMatchSets. - // - // ByteMatchSetId is a required field - ByteMatchSetId *string `min:"1" type:"string" required:"true"` + // Specifies the number of GeoMatchSet objects that you want AWS WAF to return + // for this request. If you have more GeoMatchSet objects than the number you + // specify for Limit, the response includes a NextMarker value that you can + // use to get another batch of GeoMatchSet objects. + Limit *int64 `type:"integer"` - // The value returned by the most recent call to GetChangeToken. - // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` + // If you specify a value for Limit and you have more GeoMatchSets than the + // value of Limit, AWS WAF returns a NextMarker value in the response that allows + // you to list another group of GeoMatchSet objects. For the second and subsequent + // ListGeoMatchSets requests, specify the value of NextMarker from the previous + // response to get information about another batch of GeoMatchSet objects. + NextMarker *string `min:"1" type:"string"` } // String returns the string representation -func (s DeleteByteMatchSetInput) String() string { +func (s ListGeoMatchSetsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteByteMatchSetInput) GoString() string { +func (s ListGeoMatchSetsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteByteMatchSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteByteMatchSetInput"} - if s.ByteMatchSetId == nil { - invalidParams.Add(request.NewErrParamRequired("ByteMatchSetId")) - } - if s.ByteMatchSetId != nil && len(*s.ByteMatchSetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ByteMatchSetId", 1)) - } - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) - } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) +func (s *ListGeoMatchSetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListGeoMatchSetsInput"} + if s.NextMarker != nil && len(*s.NextMarker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) } if invalidParams.Len() > 0 { @@ -7116,84 +12025,88 @@ func (s *DeleteByteMatchSetInput) Validate() error { return nil } -// SetByteMatchSetId sets the ByteMatchSetId field's value. -func (s *DeleteByteMatchSetInput) SetByteMatchSetId(v string) *DeleteByteMatchSetInput { - s.ByteMatchSetId = &v +// SetLimit sets the Limit field's value. +func (s *ListGeoMatchSetsInput) SetLimit(v int64) *ListGeoMatchSetsInput { + s.Limit = &v return s } -// SetChangeToken sets the ChangeToken field's value. -func (s *DeleteByteMatchSetInput) SetChangeToken(v string) *DeleteByteMatchSetInput { - s.ChangeToken = &v +// SetNextMarker sets the NextMarker field's value. +func (s *ListGeoMatchSetsInput) SetNextMarker(v string) *ListGeoMatchSetsInput { + s.NextMarker = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteByteMatchSetResponse -type DeleteByteMatchSetOutput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListGeoMatchSetsResponse +type ListGeoMatchSetsOutput struct { _ struct{} `type:"structure"` - // The ChangeToken that you used to submit the DeleteByteMatchSet request. You - // can also use this value to query the status of the request. For more information, - // see GetChangeTokenStatus. - ChangeToken *string `min:"1" type:"string"` + // An array of GeoMatchSetSummary objects. + GeoMatchSets []*GeoMatchSetSummary `type:"list"` + + // If you have more GeoMatchSet objects than the number that you specified for + // Limit in the request, the response includes a NextMarker value. To list more + // GeoMatchSet objects, submit another ListGeoMatchSets request, and specify + // the NextMarker value from the response in the NextMarker value in the next + // request. + NextMarker *string `min:"1" type:"string"` } // String returns the string representation -func (s DeleteByteMatchSetOutput) String() string { +func (s ListGeoMatchSetsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteByteMatchSetOutput) GoString() string { +func (s ListGeoMatchSetsOutput) GoString() string { return s.String() } -// SetChangeToken sets the ChangeToken field's value. -func (s *DeleteByteMatchSetOutput) SetChangeToken(v string) *DeleteByteMatchSetOutput { - s.ChangeToken = &v +// SetGeoMatchSets sets the GeoMatchSets field's value. +func (s *ListGeoMatchSetsOutput) SetGeoMatchSets(v []*GeoMatchSetSummary) *ListGeoMatchSetsOutput { + s.GeoMatchSets = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteIPSetRequest -type DeleteIPSetInput struct { +// SetNextMarker sets the NextMarker field's value. +func (s *ListGeoMatchSetsOutput) SetNextMarker(v string) *ListGeoMatchSetsOutput { + s.NextMarker = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListIPSetsRequest +type ListIPSetsInput struct { _ struct{} `type:"structure"` - // The value returned by the most recent call to GetChangeToken. - // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` + // Specifies the number of IPSet objects that you want AWS WAF to return for + // this request. If you have more IPSet objects than the number you specify + // for Limit, the response includes a NextMarker value that you can use to get + // another batch of IPSet objects. + Limit *int64 `type:"integer"` - // The IPSetId of the IPSet that you want to delete. IPSetId is returned by - // CreateIPSet and by ListIPSets. - // - // IPSetId is a required field - IPSetId *string `min:"1" type:"string" required:"true"` + // If you specify a value for Limit and you have more IPSets than the value + // of Limit, AWS WAF returns a NextMarker value in the response that allows + // you to list another group of IPSets. For the second and subsequent ListIPSets + // requests, specify the value of NextMarker from the previous response to get + // information about another batch of IPSets. + NextMarker *string `min:"1" type:"string"` } // String returns the string representation -func (s DeleteIPSetInput) String() string { +func (s ListIPSetsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteIPSetInput) GoString() string { +func (s ListIPSetsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteIPSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteIPSetInput"} - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) - } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) - } - if s.IPSetId == nil { - invalidParams.Add(request.NewErrParamRequired("IPSetId")) - } - if s.IPSetId != nil && len(*s.IPSetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("IPSetId", 1)) +func (s *ListIPSetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListIPSetsInput"} + if s.NextMarker != nil && len(*s.NextMarker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) } if invalidParams.Len() > 0 { @@ -7202,84 +12115,86 @@ func (s *DeleteIPSetInput) Validate() error { return nil } -// SetChangeToken sets the ChangeToken field's value. -func (s *DeleteIPSetInput) SetChangeToken(v string) *DeleteIPSetInput { - s.ChangeToken = &v +// SetLimit sets the Limit field's value. +func (s *ListIPSetsInput) SetLimit(v int64) *ListIPSetsInput { + s.Limit = &v return s } -// SetIPSetId sets the IPSetId field's value. -func (s *DeleteIPSetInput) SetIPSetId(v string) *DeleteIPSetInput { - s.IPSetId = &v +// SetNextMarker sets the NextMarker field's value. +func (s *ListIPSetsInput) SetNextMarker(v string) *ListIPSetsInput { + s.NextMarker = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteIPSetResponse -type DeleteIPSetOutput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListIPSetsResponse +type ListIPSetsOutput struct { _ struct{} `type:"structure"` - // The ChangeToken that you used to submit the DeleteIPSet request. You can - // also use this value to query the status of the request. For more information, - // see GetChangeTokenStatus. - ChangeToken *string `min:"1" type:"string"` + // An array of IPSetSummary objects. + IPSets []*IPSetSummary `type:"list"` + + // If you have more IPSet objects than the number that you specified for Limit + // in the request, the response includes a NextMarker value. To list more IPSet + // objects, submit another ListIPSets request, and specify the NextMarker value + // from the response in the NextMarker value in the next request. + NextMarker *string `min:"1" type:"string"` } // String returns the string representation -func (s DeleteIPSetOutput) String() string { +func (s ListIPSetsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteIPSetOutput) GoString() string { +func (s ListIPSetsOutput) GoString() string { return s.String() } -// SetChangeToken sets the ChangeToken field's value. -func (s *DeleteIPSetOutput) SetChangeToken(v string) *DeleteIPSetOutput { - s.ChangeToken = &v +// SetIPSets sets the IPSets field's value. +func (s *ListIPSetsOutput) SetIPSets(v []*IPSetSummary) *ListIPSetsOutput { + s.IPSets = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRateBasedRuleRequest -type DeleteRateBasedRuleInput struct { +// SetNextMarker sets the NextMarker field's value. +func (s *ListIPSetsOutput) SetNextMarker(v string) *ListIPSetsOutput { + s.NextMarker = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListRateBasedRulesRequest +type ListRateBasedRulesInput struct { _ struct{} `type:"structure"` - // The value returned by the most recent call to GetChangeToken. - // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` + // Specifies the number of Rules that you want AWS WAF to return for this request. + // If you have more Rules than the number that you specify for Limit, the response + // includes a NextMarker value that you can use to get another batch of Rules. + Limit *int64 `type:"integer"` - // The RuleId of the RateBasedRule that you want to delete. RuleId is returned - // by CreateRateBasedRule and by ListRateBasedRules. - // - // RuleId is a required field - RuleId *string `min:"1" type:"string" required:"true"` + // If you specify a value for Limit and you have more Rules than the value of + // Limit, AWS WAF returns a NextMarker value in the response that allows you + // to list another group of Rules. For the second and subsequent ListRateBasedRules + // requests, specify the value of NextMarker from the previous response to get + // information about another batch of Rules. + NextMarker *string `min:"1" type:"string"` } // String returns the string representation -func (s DeleteRateBasedRuleInput) String() string { +func (s ListRateBasedRulesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteRateBasedRuleInput) GoString() string { +func (s ListRateBasedRulesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteRateBasedRuleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteRateBasedRuleInput"} - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) - } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) - } - if s.RuleId == nil { - invalidParams.Add(request.NewErrParamRequired("RuleId")) - } - if s.RuleId != nil && len(*s.RuleId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RuleId", 1)) +func (s *ListRateBasedRulesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListRateBasedRulesInput"} + if s.NextMarker != nil && len(*s.NextMarker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) } if invalidParams.Len() > 0 { @@ -7288,84 +12203,88 @@ func (s *DeleteRateBasedRuleInput) Validate() error { return nil } -// SetChangeToken sets the ChangeToken field's value. -func (s *DeleteRateBasedRuleInput) SetChangeToken(v string) *DeleteRateBasedRuleInput { - s.ChangeToken = &v +// SetLimit sets the Limit field's value. +func (s *ListRateBasedRulesInput) SetLimit(v int64) *ListRateBasedRulesInput { + s.Limit = &v return s } -// SetRuleId sets the RuleId field's value. -func (s *DeleteRateBasedRuleInput) SetRuleId(v string) *DeleteRateBasedRuleInput { - s.RuleId = &v +// SetNextMarker sets the NextMarker field's value. +func (s *ListRateBasedRulesInput) SetNextMarker(v string) *ListRateBasedRulesInput { + s.NextMarker = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRateBasedRuleResponse -type DeleteRateBasedRuleOutput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListRateBasedRulesResponse +type ListRateBasedRulesOutput struct { _ struct{} `type:"structure"` - // The ChangeToken that you used to submit the DeleteRateBasedRule request. - // You can also use this value to query the status of the request. For more - // information, see GetChangeTokenStatus. - ChangeToken *string `min:"1" type:"string"` + // If you have more Rules than the number that you specified for Limit in the + // request, the response includes a NextMarker value. To list more Rules, submit + // another ListRateBasedRules request, and specify the NextMarker value from + // the response in the NextMarker value in the next request. + NextMarker *string `min:"1" type:"string"` + + // An array of RuleSummary objects. + Rules []*RuleSummary `type:"list"` } // String returns the string representation -func (s DeleteRateBasedRuleOutput) String() string { +func (s ListRateBasedRulesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteRateBasedRuleOutput) GoString() string { +func (s ListRateBasedRulesOutput) GoString() string { return s.String() } -// SetChangeToken sets the ChangeToken field's value. -func (s *DeleteRateBasedRuleOutput) SetChangeToken(v string) *DeleteRateBasedRuleOutput { - s.ChangeToken = &v +// SetNextMarker sets the NextMarker field's value. +func (s *ListRateBasedRulesOutput) SetNextMarker(v string) *ListRateBasedRulesOutput { + s.NextMarker = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRuleRequest -type DeleteRuleInput struct { +// SetRules sets the Rules field's value. +func (s *ListRateBasedRulesOutput) SetRules(v []*RuleSummary) *ListRateBasedRulesOutput { + s.Rules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListRegexMatchSetsRequest +type ListRegexMatchSetsInput struct { _ struct{} `type:"structure"` - // The value returned by the most recent call to GetChangeToken. - // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` + // Specifies the number of RegexMatchSet objects that you want AWS WAF to return + // for this request. If you have more RegexMatchSet objects than the number + // you specify for Limit, the response includes a NextMarker value that you + // can use to get another batch of RegexMatchSet objects. + Limit *int64 `type:"integer"` - // The RuleId of the Rule that you want to delete. RuleId is returned by CreateRule - // and by ListRules. - // - // RuleId is a required field - RuleId *string `min:"1" type:"string" required:"true"` + // If you specify a value for Limit and you have more RegexMatchSet objects + // than the value of Limit, AWS WAF returns a NextMarker value in the response + // that allows you to list another group of ByteMatchSets. For the second and + // subsequent ListRegexMatchSets requests, specify the value of NextMarker from + // the previous response to get information about another batch of RegexMatchSet + // objects. + NextMarker *string `min:"1" type:"string"` } // String returns the string representation -func (s DeleteRuleInput) String() string { +func (s ListRegexMatchSetsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteRuleInput) GoString() string { +func (s ListRegexMatchSetsInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteRuleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteRuleInput"} - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) - } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) - } - if s.RuleId == nil { - invalidParams.Add(request.NewErrParamRequired("RuleId")) - } - if s.RuleId != nil && len(*s.RuleId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RuleId", 1)) +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRegexMatchSetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListRegexMatchSetsInput"} + if s.NextMarker != nil && len(*s.NextMarker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) } if invalidParams.Len() > 0 { @@ -7374,84 +12293,89 @@ func (s *DeleteRuleInput) Validate() error { return nil } -// SetChangeToken sets the ChangeToken field's value. -func (s *DeleteRuleInput) SetChangeToken(v string) *DeleteRuleInput { - s.ChangeToken = &v +// SetLimit sets the Limit field's value. +func (s *ListRegexMatchSetsInput) SetLimit(v int64) *ListRegexMatchSetsInput { + s.Limit = &v return s } -// SetRuleId sets the RuleId field's value. -func (s *DeleteRuleInput) SetRuleId(v string) *DeleteRuleInput { - s.RuleId = &v +// SetNextMarker sets the NextMarker field's value. +func (s *ListRegexMatchSetsInput) SetNextMarker(v string) *ListRegexMatchSetsInput { + s.NextMarker = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteRuleResponse -type DeleteRuleOutput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListRegexMatchSetsResponse +type ListRegexMatchSetsOutput struct { _ struct{} `type:"structure"` - // The ChangeToken that you used to submit the DeleteRule request. You can also - // use this value to query the status of the request. For more information, - // see GetChangeTokenStatus. - ChangeToken *string `min:"1" type:"string"` + // If you have more RegexMatchSet objects than the number that you specified + // for Limit in the request, the response includes a NextMarker value. To list + // more RegexMatchSet objects, submit another ListRegexMatchSets request, and + // specify the NextMarker value from the response in the NextMarker value in + // the next request. + NextMarker *string `min:"1" type:"string"` + + // An array of RegexMatchSetSummary objects. + RegexMatchSets []*RegexMatchSetSummary `type:"list"` } // String returns the string representation -func (s DeleteRuleOutput) String() string { +func (s ListRegexMatchSetsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteRuleOutput) GoString() string { +func (s ListRegexMatchSetsOutput) GoString() string { return s.String() } -// SetChangeToken sets the ChangeToken field's value. -func (s *DeleteRuleOutput) SetChangeToken(v string) *DeleteRuleOutput { - s.ChangeToken = &v +// SetNextMarker sets the NextMarker field's value. +func (s *ListRegexMatchSetsOutput) SetNextMarker(v string) *ListRegexMatchSetsOutput { + s.NextMarker = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteSizeConstraintSetRequest -type DeleteSizeConstraintSetInput struct { +// SetRegexMatchSets sets the RegexMatchSets field's value. +func (s *ListRegexMatchSetsOutput) SetRegexMatchSets(v []*RegexMatchSetSummary) *ListRegexMatchSetsOutput { + s.RegexMatchSets = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListRegexPatternSetsRequest +type ListRegexPatternSetsInput struct { _ struct{} `type:"structure"` - // The value returned by the most recent call to GetChangeToken. - // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` + // Specifies the number of RegexPatternSet objects that you want AWS WAF to + // return for this request. If you have more RegexPatternSet objects than the + // number you specify for Limit, the response includes a NextMarker value that + // you can use to get another batch of RegexPatternSet objects. + Limit *int64 `type:"integer"` - // The SizeConstraintSetId of the SizeConstraintSet that you want to delete. - // SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets. - // - // SizeConstraintSetId is a required field - SizeConstraintSetId *string `min:"1" type:"string" required:"true"` + // If you specify a value for Limit and you have more RegexPatternSet objects + // than the value of Limit, AWS WAF returns a NextMarker value in the response + // that allows you to list another group of RegexPatternSet objects. For the + // second and subsequent ListRegexPatternSets requests, specify the value of + // NextMarker from the previous response to get information about another batch + // of RegexPatternSet objects. + NextMarker *string `min:"1" type:"string"` } // String returns the string representation -func (s DeleteSizeConstraintSetInput) String() string { +func (s ListRegexPatternSetsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteSizeConstraintSetInput) GoString() string { +func (s ListRegexPatternSetsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteSizeConstraintSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteSizeConstraintSetInput"} - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) - } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) - } - if s.SizeConstraintSetId == nil { - invalidParams.Add(request.NewErrParamRequired("SizeConstraintSetId")) - } - if s.SizeConstraintSetId != nil && len(*s.SizeConstraintSetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SizeConstraintSetId", 1)) +func (s *ListRegexPatternSetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListRegexPatternSetsInput"} + if s.NextMarker != nil && len(*s.NextMarker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) } if invalidParams.Len() > 0 { @@ -7460,85 +12384,87 @@ func (s *DeleteSizeConstraintSetInput) Validate() error { return nil } -// SetChangeToken sets the ChangeToken field's value. -func (s *DeleteSizeConstraintSetInput) SetChangeToken(v string) *DeleteSizeConstraintSetInput { - s.ChangeToken = &v +// SetLimit sets the Limit field's value. +func (s *ListRegexPatternSetsInput) SetLimit(v int64) *ListRegexPatternSetsInput { + s.Limit = &v return s } -// SetSizeConstraintSetId sets the SizeConstraintSetId field's value. -func (s *DeleteSizeConstraintSetInput) SetSizeConstraintSetId(v string) *DeleteSizeConstraintSetInput { - s.SizeConstraintSetId = &v +// SetNextMarker sets the NextMarker field's value. +func (s *ListRegexPatternSetsInput) SetNextMarker(v string) *ListRegexPatternSetsInput { + s.NextMarker = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteSizeConstraintSetResponse -type DeleteSizeConstraintSetOutput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListRegexPatternSetsResponse +type ListRegexPatternSetsOutput struct { _ struct{} `type:"structure"` - // The ChangeToken that you used to submit the DeleteSizeConstraintSet request. - // You can also use this value to query the status of the request. For more - // information, see GetChangeTokenStatus. - ChangeToken *string `min:"1" type:"string"` + // If you have more RegexPatternSet objects than the number that you specified + // for Limit in the request, the response includes a NextMarker value. To list + // more RegexPatternSet objects, submit another ListRegexPatternSets request, + // and specify the NextMarker value from the response in the NextMarker value + // in the next request. + NextMarker *string `min:"1" type:"string"` + + // An array of RegexPatternSetSummary objects. + RegexPatternSets []*RegexPatternSetSummary `type:"list"` } // String returns the string representation -func (s DeleteSizeConstraintSetOutput) String() string { +func (s ListRegexPatternSetsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteSizeConstraintSetOutput) GoString() string { +func (s ListRegexPatternSetsOutput) GoString() string { return s.String() } -// SetChangeToken sets the ChangeToken field's value. -func (s *DeleteSizeConstraintSetOutput) SetChangeToken(v string) *DeleteSizeConstraintSetOutput { - s.ChangeToken = &v +// SetNextMarker sets the NextMarker field's value. +func (s *ListRegexPatternSetsOutput) SetNextMarker(v string) *ListRegexPatternSetsOutput { + s.NextMarker = &v return s } -// A request to delete a SqlInjectionMatchSet from AWS WAF. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteSqlInjectionMatchSetRequest -type DeleteSqlInjectionMatchSetInput struct { +// SetRegexPatternSets sets the RegexPatternSets field's value. +func (s *ListRegexPatternSetsOutput) SetRegexPatternSets(v []*RegexPatternSetSummary) *ListRegexPatternSetsOutput { + s.RegexPatternSets = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListRulesRequest +type ListRulesInput struct { _ struct{} `type:"structure"` - // The value returned by the most recent call to GetChangeToken. - // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` + // Specifies the number of Rules that you want AWS WAF to return for this request. + // If you have more Rules than the number that you specify for Limit, the response + // includes a NextMarker value that you can use to get another batch of Rules. + Limit *int64 `type:"integer"` - // The SqlInjectionMatchSetId of the SqlInjectionMatchSet that you want to delete. - // SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets. - // - // SqlInjectionMatchSetId is a required field - SqlInjectionMatchSetId *string `min:"1" type:"string" required:"true"` + // If you specify a value for Limit and you have more Rules than the value of + // Limit, AWS WAF returns a NextMarker value in the response that allows you + // to list another group of Rules. For the second and subsequent ListRules requests, + // specify the value of NextMarker from the previous response to get information + // about another batch of Rules. + NextMarker *string `min:"1" type:"string"` } // String returns the string representation -func (s DeleteSqlInjectionMatchSetInput) String() string { +func (s ListRulesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteSqlInjectionMatchSetInput) GoString() string { +func (s ListRulesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteSqlInjectionMatchSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteSqlInjectionMatchSetInput"} - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) - } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) - } - if s.SqlInjectionMatchSetId == nil { - invalidParams.Add(request.NewErrParamRequired("SqlInjectionMatchSetId")) - } - if s.SqlInjectionMatchSetId != nil && len(*s.SqlInjectionMatchSetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SqlInjectionMatchSetId", 1)) +func (s *ListRulesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListRulesInput"} + if s.NextMarker != nil && len(*s.NextMarker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) } if invalidParams.Len() > 0 { @@ -7547,85 +12473,87 @@ func (s *DeleteSqlInjectionMatchSetInput) Validate() error { return nil } -// SetChangeToken sets the ChangeToken field's value. -func (s *DeleteSqlInjectionMatchSetInput) SetChangeToken(v string) *DeleteSqlInjectionMatchSetInput { - s.ChangeToken = &v +// SetLimit sets the Limit field's value. +func (s *ListRulesInput) SetLimit(v int64) *ListRulesInput { + s.Limit = &v return s } -// SetSqlInjectionMatchSetId sets the SqlInjectionMatchSetId field's value. -func (s *DeleteSqlInjectionMatchSetInput) SetSqlInjectionMatchSetId(v string) *DeleteSqlInjectionMatchSetInput { - s.SqlInjectionMatchSetId = &v +// SetNextMarker sets the NextMarker field's value. +func (s *ListRulesInput) SetNextMarker(v string) *ListRulesInput { + s.NextMarker = &v return s } -// The response to a request to delete a SqlInjectionMatchSet from AWS WAF. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteSqlInjectionMatchSetResponse -type DeleteSqlInjectionMatchSetOutput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListRulesResponse +type ListRulesOutput struct { _ struct{} `type:"structure"` - // The ChangeToken that you used to submit the DeleteSqlInjectionMatchSet request. - // You can also use this value to query the status of the request. For more - // information, see GetChangeTokenStatus. - ChangeToken *string `min:"1" type:"string"` + // If you have more Rules than the number that you specified for Limit in the + // request, the response includes a NextMarker value. To list more Rules, submit + // another ListRules request, and specify the NextMarker value from the response + // in the NextMarker value in the next request. + NextMarker *string `min:"1" type:"string"` + + // An array of RuleSummary objects. + Rules []*RuleSummary `type:"list"` } // String returns the string representation -func (s DeleteSqlInjectionMatchSetOutput) String() string { +func (s ListRulesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteSqlInjectionMatchSetOutput) GoString() string { +func (s ListRulesOutput) GoString() string { return s.String() } -// SetChangeToken sets the ChangeToken field's value. -func (s *DeleteSqlInjectionMatchSetOutput) SetChangeToken(v string) *DeleteSqlInjectionMatchSetOutput { - s.ChangeToken = &v +// SetNextMarker sets the NextMarker field's value. +func (s *ListRulesOutput) SetNextMarker(v string) *ListRulesOutput { + s.NextMarker = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteWebACLRequest -type DeleteWebACLInput struct { +// SetRules sets the Rules field's value. +func (s *ListRulesOutput) SetRules(v []*RuleSummary) *ListRulesOutput { + s.Rules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListSizeConstraintSetsRequest +type ListSizeConstraintSetsInput struct { _ struct{} `type:"structure"` - // The value returned by the most recent call to GetChangeToken. - // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` + // Specifies the number of SizeConstraintSet objects that you want AWS WAF to + // return for this request. If you have more SizeConstraintSets objects than + // the number you specify for Limit, the response includes a NextMarker value + // that you can use to get another batch of SizeConstraintSet objects. + Limit *int64 `type:"integer"` - // The WebACLId of the WebACL that you want to delete. WebACLId is returned - // by CreateWebACL and by ListWebACLs. - // - // WebACLId is a required field - WebACLId *string `min:"1" type:"string" required:"true"` + // If you specify a value for Limit and you have more SizeConstraintSets than + // the value of Limit, AWS WAF returns a NextMarker value in the response that + // allows you to list another group of SizeConstraintSets. For the second and + // subsequent ListSizeConstraintSets requests, specify the value of NextMarker + // from the previous response to get information about another batch of SizeConstraintSets. + NextMarker *string `min:"1" type:"string"` } // String returns the string representation -func (s DeleteWebACLInput) String() string { +func (s ListSizeConstraintSetsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteWebACLInput) GoString() string { +func (s ListSizeConstraintSetsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteWebACLInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteWebACLInput"} - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) - } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) - } - if s.WebACLId == nil { - invalidParams.Add(request.NewErrParamRequired("WebACLId")) - } - if s.WebACLId != nil && len(*s.WebACLId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WebACLId", 1)) +func (s *ListSizeConstraintSetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListSizeConstraintSetsInput"} + if s.NextMarker != nil && len(*s.NextMarker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) } if invalidParams.Len() > 0 { @@ -7634,85 +12562,90 @@ func (s *DeleteWebACLInput) Validate() error { return nil } -// SetChangeToken sets the ChangeToken field's value. -func (s *DeleteWebACLInput) SetChangeToken(v string) *DeleteWebACLInput { - s.ChangeToken = &v +// SetLimit sets the Limit field's value. +func (s *ListSizeConstraintSetsInput) SetLimit(v int64) *ListSizeConstraintSetsInput { + s.Limit = &v return s } -// SetWebACLId sets the WebACLId field's value. -func (s *DeleteWebACLInput) SetWebACLId(v string) *DeleteWebACLInput { - s.WebACLId = &v +// SetNextMarker sets the NextMarker field's value. +func (s *ListSizeConstraintSetsInput) SetNextMarker(v string) *ListSizeConstraintSetsInput { + s.NextMarker = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteWebACLResponse -type DeleteWebACLOutput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListSizeConstraintSetsResponse +type ListSizeConstraintSetsOutput struct { _ struct{} `type:"structure"` - // The ChangeToken that you used to submit the DeleteWebACL request. You can - // also use this value to query the status of the request. For more information, - // see GetChangeTokenStatus. - ChangeToken *string `min:"1" type:"string"` + // If you have more SizeConstraintSet objects than the number that you specified + // for Limit in the request, the response includes a NextMarker value. To list + // more SizeConstraintSet objects, submit another ListSizeConstraintSets request, + // and specify the NextMarker value from the response in the NextMarker value + // in the next request. + NextMarker *string `min:"1" type:"string"` + + // An array of SizeConstraintSetSummary objects. + SizeConstraintSets []*SizeConstraintSetSummary `type:"list"` } // String returns the string representation -func (s DeleteWebACLOutput) String() string { +func (s ListSizeConstraintSetsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteWebACLOutput) GoString() string { +func (s ListSizeConstraintSetsOutput) GoString() string { return s.String() } -// SetChangeToken sets the ChangeToken field's value. -func (s *DeleteWebACLOutput) SetChangeToken(v string) *DeleteWebACLOutput { - s.ChangeToken = &v +// SetNextMarker sets the NextMarker field's value. +func (s *ListSizeConstraintSetsOutput) SetNextMarker(v string) *ListSizeConstraintSetsOutput { + s.NextMarker = &v return s } -// A request to delete an XssMatchSet from AWS WAF. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteXssMatchSetRequest -type DeleteXssMatchSetInput struct { +// SetSizeConstraintSets sets the SizeConstraintSets field's value. +func (s *ListSizeConstraintSetsOutput) SetSizeConstraintSets(v []*SizeConstraintSetSummary) *ListSizeConstraintSetsOutput { + s.SizeConstraintSets = v + return s +} + +// A request to list the SqlInjectionMatchSet objects created by the current +// AWS account. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListSqlInjectionMatchSetsRequest +type ListSqlInjectionMatchSetsInput struct { _ struct{} `type:"structure"` - // The value returned by the most recent call to GetChangeToken. - // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` + // Specifies the number of SqlInjectionMatchSet objects that you want AWS WAF + // to return for this request. If you have more SqlInjectionMatchSet objects + // than the number you specify for Limit, the response includes a NextMarker + // value that you can use to get another batch of Rules. + Limit *int64 `type:"integer"` - // The XssMatchSetId of the XssMatchSet that you want to delete. XssMatchSetId - // is returned by CreateXssMatchSet and by ListXssMatchSets. - // - // XssMatchSetId is a required field - XssMatchSetId *string `min:"1" type:"string" required:"true"` + // If you specify a value for Limit and you have more SqlInjectionMatchSet objects + // than the value of Limit, AWS WAF returns a NextMarker value in the response + // that allows you to list another group of SqlInjectionMatchSets. For the second + // and subsequent ListSqlInjectionMatchSets requests, specify the value of NextMarker + // from the previous response to get information about another batch of SqlInjectionMatchSets. + NextMarker *string `min:"1" type:"string"` } // String returns the string representation -func (s DeleteXssMatchSetInput) String() string { +func (s ListSqlInjectionMatchSetsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteXssMatchSetInput) GoString() string { +func (s ListSqlInjectionMatchSetsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteXssMatchSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteXssMatchSetInput"} - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) - } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) - } - if s.XssMatchSetId == nil { - invalidParams.Add(request.NewErrParamRequired("XssMatchSetId")) - } - if s.XssMatchSetId != nil && len(*s.XssMatchSetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("XssMatchSetId", 1)) +func (s *ListSqlInjectionMatchSetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListSqlInjectionMatchSetsInput"} + if s.NextMarker != nil && len(*s.NextMarker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) } if invalidParams.Len() > 0 { @@ -7721,101 +12654,90 @@ func (s *DeleteXssMatchSetInput) Validate() error { return nil } -// SetChangeToken sets the ChangeToken field's value. -func (s *DeleteXssMatchSetInput) SetChangeToken(v string) *DeleteXssMatchSetInput { - s.ChangeToken = &v +// SetLimit sets the Limit field's value. +func (s *ListSqlInjectionMatchSetsInput) SetLimit(v int64) *ListSqlInjectionMatchSetsInput { + s.Limit = &v return s } -// SetXssMatchSetId sets the XssMatchSetId field's value. -func (s *DeleteXssMatchSetInput) SetXssMatchSetId(v string) *DeleteXssMatchSetInput { - s.XssMatchSetId = &v +// SetNextMarker sets the NextMarker field's value. +func (s *ListSqlInjectionMatchSetsInput) SetNextMarker(v string) *ListSqlInjectionMatchSetsInput { + s.NextMarker = &v return s } -// The response to a request to delete an XssMatchSet from AWS WAF. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/DeleteXssMatchSetResponse -type DeleteXssMatchSetOutput struct { +// The response to a ListSqlInjectionMatchSets request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListSqlInjectionMatchSetsResponse +type ListSqlInjectionMatchSetsOutput struct { _ struct{} `type:"structure"` - // The ChangeToken that you used to submit the DeleteXssMatchSet request. You - // can also use this value to query the status of the request. For more information, - // see GetChangeTokenStatus. - ChangeToken *string `min:"1" type:"string"` + // If you have more SqlInjectionMatchSet objects than the number that you specified + // for Limit in the request, the response includes a NextMarker value. To list + // more SqlInjectionMatchSet objects, submit another ListSqlInjectionMatchSets + // request, and specify the NextMarker value from the response in the NextMarker + // value in the next request. + NextMarker *string `min:"1" type:"string"` + + // An array of SqlInjectionMatchSetSummary objects. + SqlInjectionMatchSets []*SqlInjectionMatchSetSummary `type:"list"` } // String returns the string representation -func (s DeleteXssMatchSetOutput) String() string { +func (s ListSqlInjectionMatchSetsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteXssMatchSetOutput) GoString() string { +func (s ListSqlInjectionMatchSetsOutput) GoString() string { return s.String() } -// SetChangeToken sets the ChangeToken field's value. -func (s *DeleteXssMatchSetOutput) SetChangeToken(v string) *DeleteXssMatchSetOutput { - s.ChangeToken = &v +// SetNextMarker sets the NextMarker field's value. +func (s *ListSqlInjectionMatchSetsOutput) SetNextMarker(v string) *ListSqlInjectionMatchSetsOutput { + s.NextMarker = &v return s } -// Specifies where in a web request to look for TargetString. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/FieldToMatch -type FieldToMatch struct { +// SetSqlInjectionMatchSets sets the SqlInjectionMatchSets field's value. +func (s *ListSqlInjectionMatchSetsOutput) SetSqlInjectionMatchSets(v []*SqlInjectionMatchSetSummary) *ListSqlInjectionMatchSetsOutput { + s.SqlInjectionMatchSets = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListWebACLsRequest +type ListWebACLsInput struct { _ struct{} `type:"structure"` - // When the value of Type is HEADER, enter the name of the header that you want - // AWS WAF to search, for example, User-Agent or Referer. If the value of Type - // is any other value, omit Data. - // - // The name of the header is not case sensitive. - Data *string `type:"string"` + // Specifies the number of WebACL objects that you want AWS WAF to return for + // this request. If you have more WebACL objects than the number that you specify + // for Limit, the response includes a NextMarker value that you can use to get + // another batch of WebACL objects. + Limit *int64 `type:"integer"` - // The part of the web request that you want AWS WAF to search for a specified - // string. Parts of a request that you can search include the following: - // - // * HEADER: A specified request header, for example, the value of the User-Agent - // or Referer header. If you choose HEADER for the type, specify the name - // of the header in Data. - // - // * METHOD: The HTTP method, which indicated the type of operation that - // the request is asking the origin to perform. Amazon CloudFront supports - // the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and PUT. - // - // * QUERY_STRING: A query string, which is the part of a URL that appears - // after a ? character, if any. - // - // * URI: The part of a web request that identifies a resource, for example, - // /images/daily-ad.jpg. - // - // * BODY: The part of a request that contains any additional data that you - // want to send to your web server as the HTTP request body, such as data - // from a form. The request body immediately follows the request headers. - // Note that only the first 8192 bytes of the request body are forwarded - // to AWS WAF for inspection. To allow or block requests based on the length - // of the body, you can create a size constraint set. For more information, - // see CreateSizeConstraintSet. - // - // Type is a required field - Type *string `type:"string" required:"true" enum:"MatchFieldType"` + // If you specify a value for Limit and you have more WebACL objects than the + // number that you specify for Limit, AWS WAF returns a NextMarker value in + // the response that allows you to list another group of WebACL objects. For + // the second and subsequent ListWebACLs requests, specify the value of NextMarker + // from the previous response to get information about another batch of WebACL + // objects. + NextMarker *string `min:"1" type:"string"` } // String returns the string representation -func (s FieldToMatch) String() string { +func (s ListWebACLsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s FieldToMatch) GoString() string { +func (s ListWebACLsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *FieldToMatch) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "FieldToMatch"} - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) +func (s *ListWebACLsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListWebACLsInput"} + if s.NextMarker != nil && len(*s.NextMarker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) } if invalidParams.Len() > 0 { @@ -7824,163 +12746,205 @@ func (s *FieldToMatch) Validate() error { return nil } -// SetData sets the Data field's value. -func (s *FieldToMatch) SetData(v string) *FieldToMatch { - s.Data = &v +// SetLimit sets the Limit field's value. +func (s *ListWebACLsInput) SetLimit(v int64) *ListWebACLsInput { + s.Limit = &v return s } -// SetType sets the Type field's value. -func (s *FieldToMatch) SetType(v string) *FieldToMatch { - s.Type = &v +// SetNextMarker sets the NextMarker field's value. +func (s *ListWebACLsInput) SetNextMarker(v string) *ListWebACLsInput { + s.NextMarker = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetByteMatchSetRequest -type GetByteMatchSetInput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListWebACLsResponse +type ListWebACLsOutput struct { _ struct{} `type:"structure"` - // The ByteMatchSetId of the ByteMatchSet that you want to get. ByteMatchSetId - // is returned by CreateByteMatchSet and by ListByteMatchSets. - // - // ByteMatchSetId is a required field - ByteMatchSetId *string `min:"1" type:"string" required:"true"` + // If you have more WebACL objects than the number that you specified for Limit + // in the request, the response includes a NextMarker value. To list more WebACL + // objects, submit another ListWebACLs request, and specify the NextMarker value + // from the response in the NextMarker value in the next request. + NextMarker *string `min:"1" type:"string"` + + // An array of WebACLSummary objects. + WebACLs []*WebACLSummary `type:"list"` } // String returns the string representation -func (s GetByteMatchSetInput) String() string { +func (s ListWebACLsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetByteMatchSetInput) GoString() string { +func (s ListWebACLsOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetByteMatchSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetByteMatchSetInput"} - if s.ByteMatchSetId == nil { - invalidParams.Add(request.NewErrParamRequired("ByteMatchSetId")) - } - if s.ByteMatchSetId != nil && len(*s.ByteMatchSetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ByteMatchSetId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetNextMarker sets the NextMarker field's value. +func (s *ListWebACLsOutput) SetNextMarker(v string) *ListWebACLsOutput { + s.NextMarker = &v + return s } -// SetByteMatchSetId sets the ByteMatchSetId field's value. -func (s *GetByteMatchSetInput) SetByteMatchSetId(v string) *GetByteMatchSetInput { - s.ByteMatchSetId = &v +// SetWebACLs sets the WebACLs field's value. +func (s *ListWebACLsOutput) SetWebACLs(v []*WebACLSummary) *ListWebACLsOutput { + s.WebACLs = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetByteMatchSetResponse -type GetByteMatchSetOutput struct { +// A request to list the XssMatchSet objects created by the current AWS account. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListXssMatchSetsRequest +type ListXssMatchSetsInput struct { _ struct{} `type:"structure"` - // Information about the ByteMatchSet that you specified in the GetByteMatchSet - // request. For more information, see the following topics: - // - // * ByteMatchSet: Contains ByteMatchSetId, ByteMatchTuples, and Name - // - // * ByteMatchTuples: Contains an array of ByteMatchTuple objects. Each ByteMatchTuple - // object contains FieldToMatch, PositionalConstraint, TargetString, and - // TextTransformation - // - // * FieldToMatch: Contains Data and Type - ByteMatchSet *ByteMatchSet `type:"structure"` + // Specifies the number of XssMatchSet objects that you want AWS WAF to return + // for this request. If you have more XssMatchSet objects than the number you + // specify for Limit, the response includes a NextMarker value that you can + // use to get another batch of Rules. + Limit *int64 `type:"integer"` + + // If you specify a value for Limit and you have more XssMatchSet objects than + // the value of Limit, AWS WAF returns a NextMarker value in the response that + // allows you to list another group of XssMatchSets. For the second and subsequent + // ListXssMatchSets requests, specify the value of NextMarker from the previous + // response to get information about another batch of XssMatchSets. + NextMarker *string `min:"1" type:"string"` } // String returns the string representation -func (s GetByteMatchSetOutput) String() string { +func (s ListXssMatchSetsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetByteMatchSetOutput) GoString() string { +func (s ListXssMatchSetsInput) GoString() string { return s.String() } -// SetByteMatchSet sets the ByteMatchSet field's value. -func (s *GetByteMatchSetOutput) SetByteMatchSet(v *ByteMatchSet) *GetByteMatchSetOutput { - s.ByteMatchSet = v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListXssMatchSetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListXssMatchSetsInput"} + if s.NextMarker != nil && len(*s.NextMarker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) + } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetChangeTokenRequest -type GetChangeTokenInput struct { - _ struct{} `type:"structure"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// String returns the string representation -func (s GetChangeTokenInput) String() string { - return awsutil.Prettify(s) +// SetLimit sets the Limit field's value. +func (s *ListXssMatchSetsInput) SetLimit(v int64) *ListXssMatchSetsInput { + s.Limit = &v + return s } -// GoString returns the string representation -func (s GetChangeTokenInput) GoString() string { - return s.String() +// SetNextMarker sets the NextMarker field's value. +func (s *ListXssMatchSetsInput) SetNextMarker(v string) *ListXssMatchSetsInput { + s.NextMarker = &v + return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetChangeTokenResponse -type GetChangeTokenOutput struct { +// The response to a ListXssMatchSets request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListXssMatchSetsResponse +type ListXssMatchSetsOutput struct { _ struct{} `type:"structure"` - // The ChangeToken that you used in the request. Use this value in a GetChangeTokenStatus - // request to get the current status of the request. - ChangeToken *string `min:"1" type:"string"` + // If you have more XssMatchSet objects than the number that you specified for + // Limit in the request, the response includes a NextMarker value. To list more + // XssMatchSet objects, submit another ListXssMatchSets request, and specify + // the NextMarker value from the response in the NextMarker value in the next + // request. + NextMarker *string `min:"1" type:"string"` + + // An array of XssMatchSetSummary objects. + XssMatchSets []*XssMatchSetSummary `type:"list"` } // String returns the string representation -func (s GetChangeTokenOutput) String() string { +func (s ListXssMatchSetsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetChangeTokenOutput) GoString() string { +func (s ListXssMatchSetsOutput) GoString() string { return s.String() } -// SetChangeToken sets the ChangeToken field's value. -func (s *GetChangeTokenOutput) SetChangeToken(v string) *GetChangeTokenOutput { - s.ChangeToken = &v +// SetNextMarker sets the NextMarker field's value. +func (s *ListXssMatchSetsOutput) SetNextMarker(v string) *ListXssMatchSetsOutput { + s.NextMarker = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetChangeTokenStatusRequest -type GetChangeTokenStatusInput struct { +// SetXssMatchSets sets the XssMatchSets field's value. +func (s *ListXssMatchSetsOutput) SetXssMatchSets(v []*XssMatchSetSummary) *ListXssMatchSetsOutput { + s.XssMatchSets = v + return s +} + +// Specifies the ByteMatchSet, IPSet, SqlInjectionMatchSet, XssMatchSet, RegexMatchSet, +// GeoMatchSet, and SizeConstraintSet objects that you want to add to a Rule +// and, for each object, indicates whether you want to negate the settings, +// for example, requests that do NOT originate from the IP address 192.0.2.44. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/Predicate +type Predicate struct { _ struct{} `type:"structure"` - // The change token for which you want to get the status. This change token - // was previously returned in the GetChangeToken response. + // A unique identifier for a predicate in a Rule, such as ByteMatchSetId or + // IPSetId. The ID is returned by the corresponding Create or List command. // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` + // DataId is a required field + DataId *string `min:"1" type:"string" required:"true"` + + // Set Negated to False if you want AWS WAF to allow, block, or count requests + // based on the settings in the specified ByteMatchSet, IPSet, SqlInjectionMatchSet, + // XssMatchSet, RegexMatchSet, GeoMatchSet, or SizeConstraintSet. For example, + // if an IPSet includes the IP address 192.0.2.44, AWS WAF will allow or block + // requests based on that IP address. + // + // Set Negated to True if you want AWS WAF to allow or block a request based + // on the negation of the settings in the ByteMatchSet, IPSet, SqlInjectionMatchSet, + // XssMatchSet, RegexMatchSet, GeoMatchSet, or SizeConstraintSet. For example, + // if an IPSet includes the IP address 192.0.2.44, AWS WAF will allow, block, + // or count requests based on all IP addresses except192.0.2.44. + // + // Negated is a required field + Negated *bool `type:"boolean" required:"true"` + + // The type of predicate in a Rule, such as ByteMatchSet or IPSet. + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"PredicateType"` } // String returns the string representation -func (s GetChangeTokenStatusInput) String() string { +func (s Predicate) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetChangeTokenStatusInput) GoString() string { +func (s Predicate) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetChangeTokenStatusInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetChangeTokenStatusInput"} - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) +func (s *Predicate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Predicate"} + if s.DataId == nil { + invalidParams.Add(request.NewErrParamRequired("DataId")) } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + if s.DataId != nil && len(*s.DataId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataId", 1)) + } + if s.Negated == nil { + invalidParams.Add(request.NewErrParamRequired("Negated")) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) } if invalidParams.Len() > 0 { @@ -7989,187 +12953,287 @@ func (s *GetChangeTokenStatusInput) Validate() error { return nil } -// SetChangeToken sets the ChangeToken field's value. -func (s *GetChangeTokenStatusInput) SetChangeToken(v string) *GetChangeTokenStatusInput { - s.ChangeToken = &v +// SetDataId sets the DataId field's value. +func (s *Predicate) SetDataId(v string) *Predicate { + s.DataId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetChangeTokenStatusResponse -type GetChangeTokenStatusOutput struct { +// SetNegated sets the Negated field's value. +func (s *Predicate) SetNegated(v bool) *Predicate { + s.Negated = &v + return s +} + +// SetType sets the Type field's value. +func (s *Predicate) SetType(v string) *Predicate { + s.Type = &v + return s +} + +// A RateBasedRule is identical to a regular Rule, with one addition: a RateBasedRule +// counts the number of requests that arrive from a specified IP address every +// five minutes. For example, based on recent requests that you've seen from +// an attacker, you might create a RateBasedRule that includes the following +// conditions: +// +// * The requests come from 192.0.2.44. +// +// * They contain the value BadBot in the User-Agent header. +// +// In the rule, you also define the rate limit as 15,000. +// +// Requests that meet both of these conditions and exceed 15,000 requests every +// five minutes trigger the rule's action (block or count), which is defined +// in the web ACL. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/RateBasedRule +type RateBasedRule struct { _ struct{} `type:"structure"` - // The status of the change token. - ChangeTokenStatus *string `type:"string" enum:"ChangeTokenStatus"` + // The Predicates object contains one Predicate element for each ByteMatchSet, + // IPSet, or SqlInjectionMatchSet object that you want to include in a RateBasedRule. + // + // MatchPredicates is a required field + MatchPredicates []*Predicate `type:"list" required:"true"` + + // A friendly name or description for the metrics for a RateBasedRule. The name + // can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't + // contain whitespace. You can't change the name of the metric after you create + // the RateBasedRule. + MetricName *string `type:"string"` + + // A friendly name or description for a RateBasedRule. You can't change the + // name of a RateBasedRule after you create it. + Name *string `min:"1" type:"string"` + + // The field that AWS WAF uses to determine if requests are likely arriving + // from single source and thus subject to rate monitoring. The only valid value + // for RateKey is IP. IP indicates that requests arriving from the same IP address + // are subject to the RateLimit that is specified in the RateBasedRule. + // + // RateKey is a required field + RateKey *string `type:"string" required:"true" enum:"RateKey"` + + // The maximum number of requests, which have an identical value in the field + // specified by the RateKey, allowed in a five-minute period. If the number + // of requests exceeds the RateLimit and the other predicates specified in the + // rule are also met, AWS WAF triggers the action that is specified for this + // rule. + // + // RateLimit is a required field + RateLimit *int64 `min:"2000" type:"long" required:"true"` + + // A unique identifier for a RateBasedRule. You use RuleId to get more information + // about a RateBasedRule (see GetRateBasedRule), update a RateBasedRule (see + // UpdateRateBasedRule), insert a RateBasedRule into a WebACL or delete one + // from a WebACL (see UpdateWebACL), or delete a RateBasedRule from AWS WAF + // (see DeleteRateBasedRule). + // + // RuleId is a required field + RuleId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetChangeTokenStatusOutput) String() string { +func (s RateBasedRule) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetChangeTokenStatusOutput) GoString() string { +func (s RateBasedRule) GoString() string { return s.String() } -// SetChangeTokenStatus sets the ChangeTokenStatus field's value. -func (s *GetChangeTokenStatusOutput) SetChangeTokenStatus(v string) *GetChangeTokenStatusOutput { - s.ChangeTokenStatus = &v +// SetMatchPredicates sets the MatchPredicates field's value. +func (s *RateBasedRule) SetMatchPredicates(v []*Predicate) *RateBasedRule { + s.MatchPredicates = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetIPSetRequest -type GetIPSetInput struct { - _ struct{} `type:"structure"` - - // The IPSetId of the IPSet that you want to get. IPSetId is returned by CreateIPSet - // and by ListIPSets. - // - // IPSetId is a required field - IPSetId *string `min:"1" type:"string" required:"true"` +// SetMetricName sets the MetricName field's value. +func (s *RateBasedRule) SetMetricName(v string) *RateBasedRule { + s.MetricName = &v + return s } -// String returns the string representation -func (s GetIPSetInput) String() string { - return awsutil.Prettify(s) +// SetName sets the Name field's value. +func (s *RateBasedRule) SetName(v string) *RateBasedRule { + s.Name = &v + return s } -// GoString returns the string representation -func (s GetIPSetInput) GoString() string { - return s.String() +// SetRateKey sets the RateKey field's value. +func (s *RateBasedRule) SetRateKey(v string) *RateBasedRule { + s.RateKey = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetIPSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetIPSetInput"} - if s.IPSetId == nil { - invalidParams.Add(request.NewErrParamRequired("IPSetId")) - } - if s.IPSetId != nil && len(*s.IPSetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("IPSetId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetRateLimit sets the RateLimit field's value. +func (s *RateBasedRule) SetRateLimit(v int64) *RateBasedRule { + s.RateLimit = &v + return s } -// SetIPSetId sets the IPSetId field's value. -func (s *GetIPSetInput) SetIPSetId(v string) *GetIPSetInput { - s.IPSetId = &v +// SetRuleId sets the RuleId field's value. +func (s *RateBasedRule) SetRuleId(v string) *RateBasedRule { + s.RuleId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetIPSetResponse -type GetIPSetOutput struct { +// In a GetRegexMatchSet request, RegexMatchSet is a complex type that contains +// the RegexMatchSetId and Name of a RegexMatchSet, and the values that you +// specified when you updated the RegexMatchSet. +// +// The values are contained in a RegexMatchTuple object, which specify the parts +// of web requests that you want AWS WAF to inspect and the values that you +// want AWS WAF to search for. If a RegexMatchSet contains more than one RegexMatchTuple +// object, a request needs to match the settings in only one ByteMatchTuple +// to be considered a match. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/RegexMatchSet +type RegexMatchSet struct { _ struct{} `type:"structure"` - // Information about the IPSet that you specified in the GetIPSet request. For - // more information, see the following topics: + // A friendly name or description of the RegexMatchSet. You can't change Name + // after you create a RegexMatchSet. + Name *string `min:"1" type:"string"` + + // The RegexMatchSetId for a RegexMatchSet. You use RegexMatchSetId to get information + // about a RegexMatchSet (see GetRegexMatchSet), update a RegexMatchSet (see + // UpdateRegexMatchSet), insert a RegexMatchSet into a Rule or delete one from + // a Rule (see UpdateRule), and delete a RegexMatchSet from AWS WAF (see DeleteRegexMatchSet). // - // * IPSet: Contains IPSetDescriptors, IPSetId, and Name + // RegexMatchSetId is returned by CreateRegexMatchSet and by ListRegexMatchSets. + RegexMatchSetId *string `min:"1" type:"string"` + + // Contains an array of RegexMatchTuple objects. Each RegexMatchTuple object + // contains: // - // * IPSetDescriptors: Contains an array of IPSetDescriptor objects. Each - // IPSetDescriptor object contains Type and Value - IPSet *IPSet `type:"structure"` + // * The part of a web request that you want AWS WAF to inspect, such as + // a query string or the value of the User-Agent header. + // + // * The identifier of the pattern (a regular expression) that you want AWS + // WAF to look for. For more information, see RegexPatternSet. + // + // * Whether to perform any conversions on the request, such as converting + // it to lowercase, before inspecting it for the specified string. + RegexMatchTuples []*RegexMatchTuple `type:"list"` } // String returns the string representation -func (s GetIPSetOutput) String() string { +func (s RegexMatchSet) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetIPSetOutput) GoString() string { +func (s RegexMatchSet) GoString() string { return s.String() } -// SetIPSet sets the IPSet field's value. -func (s *GetIPSetOutput) SetIPSet(v *IPSet) *GetIPSetOutput { - s.IPSet = v +// SetName sets the Name field's value. +func (s *RegexMatchSet) SetName(v string) *RegexMatchSet { + s.Name = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRateBasedRuleRequest -type GetRateBasedRuleInput struct { +// SetRegexMatchSetId sets the RegexMatchSetId field's value. +func (s *RegexMatchSet) SetRegexMatchSetId(v string) *RegexMatchSet { + s.RegexMatchSetId = &v + return s +} + +// SetRegexMatchTuples sets the RegexMatchTuples field's value. +func (s *RegexMatchSet) SetRegexMatchTuples(v []*RegexMatchTuple) *RegexMatchSet { + s.RegexMatchTuples = v + return s +} + +// Returned by ListRegexMatchSets. Each RegexMatchSetSummary object includes +// the Name and RegexMatchSetId for one RegexMatchSet. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/RegexMatchSetSummary +type RegexMatchSetSummary struct { _ struct{} `type:"structure"` - // The RuleId of the RateBasedRule that you want to get. RuleId is returned - // by CreateRateBasedRule and by ListRateBasedRules. + // A friendly name or description of the RegexMatchSet. You can't change Name + // after you create a RegexMatchSet. // - // RuleId is a required field - RuleId *string `min:"1" type:"string" required:"true"` + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The RegexMatchSetId for a RegexMatchSet. You use RegexMatchSetId to get information + // about a RegexMatchSet, update a RegexMatchSet, remove a RegexMatchSet from + // a Rule, and delete a RegexMatchSet from AWS WAF. + // + // RegexMatchSetId is returned by CreateRegexMatchSet and by ListRegexMatchSets. + // + // RegexMatchSetId is a required field + RegexMatchSetId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetRateBasedRuleInput) String() string { +func (s RegexMatchSetSummary) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetRateBasedRuleInput) GoString() string { +func (s RegexMatchSetSummary) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetRateBasedRuleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetRateBasedRuleInput"} - if s.RuleId == nil { - invalidParams.Add(request.NewErrParamRequired("RuleId")) - } - if s.RuleId != nil && len(*s.RuleId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RuleId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetName sets the Name field's value. +func (s *RegexMatchSetSummary) SetName(v string) *RegexMatchSetSummary { + s.Name = &v + return s } -// SetRuleId sets the RuleId field's value. -func (s *GetRateBasedRuleInput) SetRuleId(v string) *GetRateBasedRuleInput { - s.RuleId = &v +// SetRegexMatchSetId sets the RegexMatchSetId field's value. +func (s *RegexMatchSetSummary) SetRegexMatchSetId(v string) *RegexMatchSetSummary { + s.RegexMatchSetId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRateBasedRuleManagedKeysRequest -type GetRateBasedRuleManagedKeysInput struct { +// In an UpdateRegexMatchSet request, RegexMatchSetUpdate specifies whether +// to insert or delete a RegexMatchTuple and includes the settings for the RegexMatchTuple. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/RegexMatchSetUpdate +type RegexMatchSetUpdate struct { _ struct{} `type:"structure"` - // A null value and not currently used. Do not include this in your request. - NextMarker *string `min:"1" type:"string"` + // Specifies whether to insert or delete a RegexMatchTuple. + // + // Action is a required field + Action *string `type:"string" required:"true" enum:"ChangeAction"` - // The RuleId of the RateBasedRule for which you want to get a list of ManagedKeys. - // RuleId is returned by CreateRateBasedRule and by ListRateBasedRules. + // Information about the part of a web request that you want AWS WAF to inspect + // and the identifier of the regular expression (regex) pattern that you want + // AWS WAF to search for. If you specify DELETE for the value of Action, the + // RegexMatchTuple values must exactly match the values in the RegexMatchTuple + // that you want to delete from the RegexMatchSet. // - // RuleId is a required field - RuleId *string `min:"1" type:"string" required:"true"` + // RegexMatchTuple is a required field + RegexMatchTuple *RegexMatchTuple `type:"structure" required:"true"` } // String returns the string representation -func (s GetRateBasedRuleManagedKeysInput) String() string { +func (s RegexMatchSetUpdate) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetRateBasedRuleManagedKeysInput) GoString() string { +func (s RegexMatchSetUpdate) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetRateBasedRuleManagedKeysInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetRateBasedRuleManagedKeysInput"} - if s.NextMarker != nil && len(*s.NextMarker) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) +func (s *RegexMatchSetUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegexMatchSetUpdate"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) } - if s.RuleId == nil { - invalidParams.Add(request.NewErrParamRequired("RuleId")) + if s.RegexMatchTuple == nil { + invalidParams.Add(request.NewErrParamRequired("RegexMatchTuple")) } - if s.RuleId != nil && len(*s.RuleId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RuleId", 1)) + if s.RegexMatchTuple != nil { + if err := s.RegexMatchTuple.Validate(); err != nil { + invalidParams.AddNested("RegexMatchTuple", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -8178,225 +13242,320 @@ func (s *GetRateBasedRuleManagedKeysInput) Validate() error { return nil } -// SetNextMarker sets the NextMarker field's value. -func (s *GetRateBasedRuleManagedKeysInput) SetNextMarker(v string) *GetRateBasedRuleManagedKeysInput { - s.NextMarker = &v +// SetAction sets the Action field's value. +func (s *RegexMatchSetUpdate) SetAction(v string) *RegexMatchSetUpdate { + s.Action = &v return s } -// SetRuleId sets the RuleId field's value. -func (s *GetRateBasedRuleManagedKeysInput) SetRuleId(v string) *GetRateBasedRuleManagedKeysInput { - s.RuleId = &v +// SetRegexMatchTuple sets the RegexMatchTuple field's value. +func (s *RegexMatchSetUpdate) SetRegexMatchTuple(v *RegexMatchTuple) *RegexMatchSetUpdate { + s.RegexMatchTuple = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRateBasedRuleManagedKeysResponse -type GetRateBasedRuleManagedKeysOutput struct { +// The regular expression pattern that you want AWS WAF to search for in web +// requests, the location in requests that you want AWS WAF to search, and other +// settings. Each RegexMatchTuple object contains: +// +// * The part of a web request that you want AWS WAF to inspect, such as +// a query string or the value of the User-Agent header. +// +// * The identifier of the pattern (a regular expression) that you want AWS +// WAF to look for. For more information, see RegexPatternSet. +// +// * Whether to perform any conversions on the request, such as converting +// it to lowercase, before inspecting it for the specified string. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/RegexMatchTuple +type RegexMatchTuple struct { _ struct{} `type:"structure"` - // An array of IP addresses that currently are blocked by the specified RateBasedRule. - ManagedKeys []*string `type:"list"` + // Specifies where in a web request to look for the RegexPatternSet. + // + // FieldToMatch is a required field + FieldToMatch *FieldToMatch `type:"structure" required:"true"` - // A null value and not currently used. - NextMarker *string `min:"1" type:"string"` + // The RegexPatternSetId for a RegexPatternSet. You use RegexPatternSetId to + // get information about a RegexPatternSet (see GetRegexPatternSet), update + // a RegexPatternSet (see UpdateRegexPatternSet), insert a RegexPatternSet into + // a RegexMatchSet or delete one from a RegexMatchSet (see UpdateRegexMatchSet), + // and delete an RegexPatternSet from AWS WAF (see DeleteRegexPatternSet). + // + // RegexPatternSetId is returned by CreateRegexPatternSet and by ListRegexPatternSets. + // + // RegexPatternSetId is a required field + RegexPatternSetId *string `min:"1" type:"string" required:"true"` + + // Text transformations eliminate some of the unusual formatting that attackers + // use in web requests in an effort to bypass AWS WAF. If you specify a transformation, + // AWS WAF performs the transformation on RegexPatternSet before inspecting + // a request for a match. + // + // CMD_LINE + // + // When you're concerned that attackers are injecting an operating system commandline + // command and using unusual formatting to disguise some or all of the command, + // use this option to perform the following transformations: + // + // * Delete the following characters: \ " ' ^ + // + // * Delete spaces before the following characters: / ( + // + // * Replace the following characters with a space: , ; + // + // * Replace multiple spaces with one space + // + // * Convert uppercase letters (A-Z) to lowercase (a-z) + // + // COMPRESS_WHITE_SPACE + // + // Use this option to replace the following characters with a space character + // (decimal 32): + // + // * \f, formfeed, decimal 12 + // + // * \t, tab, decimal 9 + // + // * \n, newline, decimal 10 + // + // * \r, carriage return, decimal 13 + // + // * \v, vertical tab, decimal 11 + // + // * non-breaking space, decimal 160 + // + // COMPRESS_WHITE_SPACE also replaces multiple spaces with one space. + // + // HTML_ENTITY_DECODE + // + // Use this option to replace HTML-encoded characters with unencoded characters. + // HTML_ENTITY_DECODE performs the following operations: + // + // * Replaces (ampersand)quot; with " + // + // * Replaces (ampersand)nbsp; with a non-breaking space, decimal 160 + // + // * Replaces (ampersand)lt; with a "less than" symbol + // + // * Replaces (ampersand)gt; with > + // + // * Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, + // with the corresponding characters + // + // * Replaces characters that are represented in decimal format, (ampersand)#nnnn;, + // with the corresponding characters + // + // LOWERCASE + // + // Use this option to convert uppercase letters (A-Z) to lowercase (a-z). + // + // URL_DECODE + // + // Use this option to decode a URL-encoded value. + // + // NONE + // + // Specify NONE if you don't want to perform any text transformations. + // + // TextTransformation is a required field + TextTransformation *string `type:"string" required:"true" enum:"TextTransformation"` } // String returns the string representation -func (s GetRateBasedRuleManagedKeysOutput) String() string { +func (s RegexMatchTuple) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetRateBasedRuleManagedKeysOutput) GoString() string { +func (s RegexMatchTuple) GoString() string { return s.String() } -// SetManagedKeys sets the ManagedKeys field's value. -func (s *GetRateBasedRuleManagedKeysOutput) SetManagedKeys(v []*string) *GetRateBasedRuleManagedKeysOutput { - s.ManagedKeys = v - return s -} - -// SetNextMarker sets the NextMarker field's value. -func (s *GetRateBasedRuleManagedKeysOutput) SetNextMarker(v string) *GetRateBasedRuleManagedKeysOutput { - s.NextMarker = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRateBasedRuleResponse -type GetRateBasedRuleOutput struct { - _ struct{} `type:"structure"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegexMatchTuple) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegexMatchTuple"} + if s.FieldToMatch == nil { + invalidParams.Add(request.NewErrParamRequired("FieldToMatch")) + } + if s.RegexPatternSetId == nil { + invalidParams.Add(request.NewErrParamRequired("RegexPatternSetId")) + } + if s.RegexPatternSetId != nil && len(*s.RegexPatternSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RegexPatternSetId", 1)) + } + if s.TextTransformation == nil { + invalidParams.Add(request.NewErrParamRequired("TextTransformation")) + } + if s.FieldToMatch != nil { + if err := s.FieldToMatch.Validate(); err != nil { + invalidParams.AddNested("FieldToMatch", err.(request.ErrInvalidParams)) + } + } - // Information about the RateBasedRule that you specified in the GetRateBasedRule - // request. - Rule *RateBasedRule `type:"structure"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// String returns the string representation -func (s GetRateBasedRuleOutput) String() string { - return awsutil.Prettify(s) +// SetFieldToMatch sets the FieldToMatch field's value. +func (s *RegexMatchTuple) SetFieldToMatch(v *FieldToMatch) *RegexMatchTuple { + s.FieldToMatch = v + return s } -// GoString returns the string representation -func (s GetRateBasedRuleOutput) GoString() string { - return s.String() +// SetRegexPatternSetId sets the RegexPatternSetId field's value. +func (s *RegexMatchTuple) SetRegexPatternSetId(v string) *RegexMatchTuple { + s.RegexPatternSetId = &v + return s } -// SetRule sets the Rule field's value. -func (s *GetRateBasedRuleOutput) SetRule(v *RateBasedRule) *GetRateBasedRuleOutput { - s.Rule = v +// SetTextTransformation sets the TextTransformation field's value. +func (s *RegexMatchTuple) SetTextTransformation(v string) *RegexMatchTuple { + s.TextTransformation = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRuleRequest -type GetRuleInput struct { +// The RegexPatternSet specifies the regular expression (regex) pattern that +// you want AWS WAF to search for, such as B[a@]dB[o0]t. You can then configure +// AWS WAF to reject those requests. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/RegexPatternSet +type RegexPatternSet struct { _ struct{} `type:"structure"` - // The RuleId of the Rule that you want to get. RuleId is returned by CreateRule - // and by ListRules. + // A friendly name or description of the RegexPatternSet. You can't change Name + // after you create a RegexPatternSet. + Name *string `min:"1" type:"string"` + + // The identifier for the RegexPatternSet. You use RegexPatternSetId to get + // information about a RegexPatternSet, update a RegexPatternSet, remove a RegexPatternSet + // from a RegexMatchSet, and delete a RegexPatternSet from AWS WAF. + // + // RegexMatchSetId is returned by CreateRegexPatternSet and by ListRegexPatternSets. + // + // RegexPatternSetId is a required field + RegexPatternSetId *string `min:"1" type:"string" required:"true"` + + // Specifies the regular expression (regex) patterns that you want AWS WAF to + // search for, such as B[a@]dB[o0]t. // - // RuleId is a required field - RuleId *string `min:"1" type:"string" required:"true"` + // RegexPatternStrings is a required field + RegexPatternStrings []*string `type:"list" required:"true"` } // String returns the string representation -func (s GetRuleInput) String() string { +func (s RegexPatternSet) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetRuleInput) GoString() string { +func (s RegexPatternSet) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetRuleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetRuleInput"} - if s.RuleId == nil { - invalidParams.Add(request.NewErrParamRequired("RuleId")) - } - if s.RuleId != nil && len(*s.RuleId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RuleId", 1)) - } +// SetName sets the Name field's value. +func (s *RegexPatternSet) SetName(v string) *RegexPatternSet { + s.Name = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetRegexPatternSetId sets the RegexPatternSetId field's value. +func (s *RegexPatternSet) SetRegexPatternSetId(v string) *RegexPatternSet { + s.RegexPatternSetId = &v + return s } -// SetRuleId sets the RuleId field's value. -func (s *GetRuleInput) SetRuleId(v string) *GetRuleInput { - s.RuleId = &v +// SetRegexPatternStrings sets the RegexPatternStrings field's value. +func (s *RegexPatternSet) SetRegexPatternStrings(v []*string) *RegexPatternSet { + s.RegexPatternStrings = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetRuleResponse -type GetRuleOutput struct { +// Returned by ListRegexPatternSets. Each RegexPatternSetSummary object includes +// the Name and RegexPatternSetId for one RegexPatternSet. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/RegexPatternSetSummary +type RegexPatternSetSummary struct { _ struct{} `type:"structure"` - // Information about the Rule that you specified in the GetRule request. For - // more information, see the following topics: + // A friendly name or description of the RegexPatternSet. You can't change Name + // after you create a RegexPatternSet. // - // * Rule: Contains MetricName, Name, an array of Predicate objects, and - // RuleId + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The RegexPatternSetId for a RegexPatternSet. You use RegexPatternSetId to + // get information about a RegexPatternSet, update a RegexPatternSet, remove + // a RegexPatternSet from a RegexMatchSet, and delete a RegexPatternSet from + // AWS WAF. // - // * Predicate: Each Predicate object contains DataId, Negated, and Type - Rule *Rule `type:"structure"` + // RegexPatternSetId is returned by CreateRegexPatternSet and by ListRegexPatternSets. + // + // RegexPatternSetId is a required field + RegexPatternSetId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetRuleOutput) String() string { +func (s RegexPatternSetSummary) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetRuleOutput) GoString() string { +func (s RegexPatternSetSummary) GoString() string { return s.String() } -// SetRule sets the Rule field's value. -func (s *GetRuleOutput) SetRule(v *Rule) *GetRuleOutput { - s.Rule = v +// SetName sets the Name field's value. +func (s *RegexPatternSetSummary) SetName(v string) *RegexPatternSetSummary { + s.Name = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetSampledRequestsRequest -type GetSampledRequestsInput struct { - _ struct{} `type:"structure"` - - // The number of requests that you want AWS WAF to return from among the first - // 5,000 requests that your AWS resource received during the time range. If - // your resource received fewer requests than the value of MaxItems, GetSampledRequests - // returns information about all of them. - // - // MaxItems is a required field - MaxItems *int64 `min:"1" type:"long" required:"true"` +// SetRegexPatternSetId sets the RegexPatternSetId field's value. +func (s *RegexPatternSetSummary) SetRegexPatternSetId(v string) *RegexPatternSetSummary { + s.RegexPatternSetId = &v + return s +} - // RuleId is one of two values: - // - // * The RuleId of the Rule for which you want GetSampledRequests to return - // a sample of requests. - // - // * Default_Action, which causes GetSampledRequests to return a sample of - // the requests that didn't match any of the rules in the specified WebACL. - // - // RuleId is a required field - RuleId *string `min:"1" type:"string" required:"true"` +// In an UpdateRegexPatternSet request, RegexPatternSetUpdate specifies whether +// to insert or delete a RegexPatternString and includes the settings for the +// RegexPatternString. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/RegexPatternSetUpdate +type RegexPatternSetUpdate struct { + _ struct{} `type:"structure"` - // The start date and time and the end date and time of the range for which - // you want GetSampledRequests to return a sample of requests. Specify the date - // and time in the following format: "2016-09-27T14:50Z". You can specify any - // time range in the previous three hours. + // Specifies whether to insert or delete a RegexPatternString. // - // TimeWindow is a required field - TimeWindow *TimeWindow `type:"structure" required:"true"` + // Action is a required field + Action *string `type:"string" required:"true" enum:"ChangeAction"` - // The WebACLId of the WebACL for which you want GetSampledRequests to return - // a sample of requests. + // Specifies the regular expression (regex) pattern that you want AWS WAF to + // search for, such as B[a@]dB[o0]t. // - // WebAclId is a required field - WebAclId *string `min:"1" type:"string" required:"true"` + // RegexPatternString is a required field + RegexPatternString *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetSampledRequestsInput) String() string { +func (s RegexPatternSetUpdate) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetSampledRequestsInput) GoString() string { +func (s RegexPatternSetUpdate) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetSampledRequestsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSampledRequestsInput"} - if s.MaxItems == nil { - invalidParams.Add(request.NewErrParamRequired("MaxItems")) - } - if s.MaxItems != nil && *s.MaxItems < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) - } - if s.RuleId == nil { - invalidParams.Add(request.NewErrParamRequired("RuleId")) - } - if s.RuleId != nil && len(*s.RuleId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RuleId", 1)) - } - if s.TimeWindow == nil { - invalidParams.Add(request.NewErrParamRequired("TimeWindow")) - } - if s.WebAclId == nil { - invalidParams.Add(request.NewErrParamRequired("WebAclId")) +func (s *RegexPatternSetUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegexPatternSetUpdate"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) } - if s.WebAclId != nil && len(*s.WebAclId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WebAclId", 1)) + if s.RegexPatternString == nil { + invalidParams.Add(request.NewErrParamRequired("RegexPatternString")) } - if s.TimeWindow != nil { - if err := s.TimeWindow.Validate(); err != nil { - invalidParams.AddNested("TimeWindow", err.(request.ErrInvalidParams)) - } + if s.RegexPatternString != nil && len(*s.RegexPatternString) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RegexPatternString", 1)) } if invalidParams.Len() > 0 { @@ -8405,185 +13564,179 @@ func (s *GetSampledRequestsInput) Validate() error { return nil } -// SetMaxItems sets the MaxItems field's value. -func (s *GetSampledRequestsInput) SetMaxItems(v int64) *GetSampledRequestsInput { - s.MaxItems = &v - return s -} - -// SetRuleId sets the RuleId field's value. -func (s *GetSampledRequestsInput) SetRuleId(v string) *GetSampledRequestsInput { - s.RuleId = &v - return s -} - -// SetTimeWindow sets the TimeWindow field's value. -func (s *GetSampledRequestsInput) SetTimeWindow(v *TimeWindow) *GetSampledRequestsInput { - s.TimeWindow = v +// SetAction sets the Action field's value. +func (s *RegexPatternSetUpdate) SetAction(v string) *RegexPatternSetUpdate { + s.Action = &v return s } -// SetWebAclId sets the WebAclId field's value. -func (s *GetSampledRequestsInput) SetWebAclId(v string) *GetSampledRequestsInput { - s.WebAclId = &v +// SetRegexPatternString sets the RegexPatternString field's value. +func (s *RegexPatternSetUpdate) SetRegexPatternString(v string) *RegexPatternSetUpdate { + s.RegexPatternString = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetSampledRequestsResponse -type GetSampledRequestsOutput struct { +// A combination of ByteMatchSet, IPSet, and/or SqlInjectionMatchSet objects +// that identify the web requests that you want to allow, block, or count. For +// example, you might create a Rule that includes the following predicates: +// +// * An IPSet that causes AWS WAF to search for web requests that originate +// from the IP address 192.0.2.44 +// +// * A ByteMatchSet that causes AWS WAF to search for web requests for which +// the value of the User-Agent header is BadBot. +// +// To match the settings in this Rule, a request must originate from 192.0.2.44 +// AND include a User-Agent header for which the value is BadBot. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/Rule +type Rule struct { _ struct{} `type:"structure"` - // The total number of requests from which GetSampledRequests got a sample of - // MaxItems requests. If PopulationSize is less than MaxItems, the sample includes - // every request that your AWS resource received during the specified time range. - PopulationSize *int64 `type:"long"` + // A friendly name or description for the metrics for this Rule. The name can + // contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain + // whitespace. You can't change MetricName after you create the Rule. + MetricName *string `type:"string"` - // A complex type that contains detailed information about each of the requests - // in the sample. - SampledRequests []*SampledHTTPRequest `type:"list"` + // The friendly name or description for the Rule. You can't change the name + // of a Rule after you create it. + Name *string `min:"1" type:"string"` - // Usually, TimeWindow is the time range that you specified in the GetSampledRequests - // request. However, if your AWS resource received more than 5,000 requests - // during the time range that you specified in the request, GetSampledRequests - // returns the time range for the first 5,000 requests. - TimeWindow *TimeWindow `type:"structure"` + // The Predicates object contains one Predicate element for each ByteMatchSet, + // IPSet, or SqlInjectionMatchSet object that you want to include in a Rule. + // + // Predicates is a required field + Predicates []*Predicate `type:"list" required:"true"` + + // A unique identifier for a Rule. You use RuleId to get more information about + // a Rule (see GetRule), update a Rule (see UpdateRule), insert a Rule into + // a WebACL or delete a one from a WebACL (see UpdateWebACL), or delete a Rule + // from AWS WAF (see DeleteRule). + // + // RuleId is returned by CreateRule and by ListRules. + // + // RuleId is a required field + RuleId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetSampledRequestsOutput) String() string { +func (s Rule) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetSampledRequestsOutput) GoString() string { +func (s Rule) GoString() string { return s.String() } -// SetPopulationSize sets the PopulationSize field's value. -func (s *GetSampledRequestsOutput) SetPopulationSize(v int64) *GetSampledRequestsOutput { - s.PopulationSize = &v +// SetMetricName sets the MetricName field's value. +func (s *Rule) SetMetricName(v string) *Rule { + s.MetricName = &v return s } -// SetSampledRequests sets the SampledRequests field's value. -func (s *GetSampledRequestsOutput) SetSampledRequests(v []*SampledHTTPRequest) *GetSampledRequestsOutput { - s.SampledRequests = v +// SetName sets the Name field's value. +func (s *Rule) SetName(v string) *Rule { + s.Name = &v return s } -// SetTimeWindow sets the TimeWindow field's value. -func (s *GetSampledRequestsOutput) SetTimeWindow(v *TimeWindow) *GetSampledRequestsOutput { - s.TimeWindow = v +// SetPredicates sets the Predicates field's value. +func (s *Rule) SetPredicates(v []*Predicate) *Rule { + s.Predicates = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetSizeConstraintSetRequest -type GetSizeConstraintSetInput struct { - _ struct{} `type:"structure"` - - // The SizeConstraintSetId of the SizeConstraintSet that you want to get. SizeConstraintSetId - // is returned by CreateSizeConstraintSet and by ListSizeConstraintSets. - // - // SizeConstraintSetId is a required field - SizeConstraintSetId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetSizeConstraintSetInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetSizeConstraintSetInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetSizeConstraintSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSizeConstraintSetInput"} - if s.SizeConstraintSetId == nil { - invalidParams.Add(request.NewErrParamRequired("SizeConstraintSetId")) - } - if s.SizeConstraintSetId != nil && len(*s.SizeConstraintSetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SizeConstraintSetId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetSizeConstraintSetId sets the SizeConstraintSetId field's value. -func (s *GetSizeConstraintSetInput) SetSizeConstraintSetId(v string) *GetSizeConstraintSetInput { - s.SizeConstraintSetId = &v +// SetRuleId sets the RuleId field's value. +func (s *Rule) SetRuleId(v string) *Rule { + s.RuleId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetSizeConstraintSetResponse -type GetSizeConstraintSetOutput struct { +// Contains the identifier and the friendly name or description of the Rule. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/RuleSummary +type RuleSummary struct { _ struct{} `type:"structure"` - // Information about the SizeConstraintSet that you specified in the GetSizeConstraintSet - // request. For more information, see the following topics: + // A friendly name or description of the Rule. You can't change the name of + // a Rule after you create it. // - // * SizeConstraintSet: Contains SizeConstraintSetId, SizeConstraints, and - // Name + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // A unique identifier for a Rule. You use RuleId to get more information about + // a Rule (see GetRule), update a Rule (see UpdateRule), insert a Rule into + // a WebACL or delete one from a WebACL (see UpdateWebACL), or delete a Rule + // from AWS WAF (see DeleteRule). // - // * SizeConstraints: Contains an array of SizeConstraint objects. Each SizeConstraint - // object contains FieldToMatch, TextTransformation, ComparisonOperator, - // and Size + // RuleId is returned by CreateRule and by ListRules. // - // * FieldToMatch: Contains Data and Type - SizeConstraintSet *SizeConstraintSet `type:"structure"` + // RuleId is a required field + RuleId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetSizeConstraintSetOutput) String() string { +func (s RuleSummary) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetSizeConstraintSetOutput) GoString() string { +func (s RuleSummary) GoString() string { return s.String() } -// SetSizeConstraintSet sets the SizeConstraintSet field's value. -func (s *GetSizeConstraintSetOutput) SetSizeConstraintSet(v *SizeConstraintSet) *GetSizeConstraintSetOutput { - s.SizeConstraintSet = v +// SetName sets the Name field's value. +func (s *RuleSummary) SetName(v string) *RuleSummary { + s.Name = &v return s } -// A request to get a SqlInjectionMatchSet. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetSqlInjectionMatchSetRequest -type GetSqlInjectionMatchSetInput struct { +// SetRuleId sets the RuleId field's value. +func (s *RuleSummary) SetRuleId(v string) *RuleSummary { + s.RuleId = &v + return s +} + +// Specifies a Predicate (such as an IPSet) and indicates whether you want to +// add it to a Rule or delete it from a Rule. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/RuleUpdate +type RuleUpdate struct { _ struct{} `type:"structure"` - // The SqlInjectionMatchSetId of the SqlInjectionMatchSet that you want to get. - // SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets. + // Specify INSERT to add a Predicate to a Rule. Use DELETE to remove a Predicate + // from a Rule. // - // SqlInjectionMatchSetId is a required field - SqlInjectionMatchSetId *string `min:"1" type:"string" required:"true"` + // Action is a required field + Action *string `type:"string" required:"true" enum:"ChangeAction"` + + // The ID of the Predicate (such as an IPSet) that you want to add to a Rule. + // + // Predicate is a required field + Predicate *Predicate `type:"structure" required:"true"` } // String returns the string representation -func (s GetSqlInjectionMatchSetInput) String() string { +func (s RuleUpdate) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetSqlInjectionMatchSetInput) GoString() string { +func (s RuleUpdate) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetSqlInjectionMatchSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSqlInjectionMatchSetInput"} - if s.SqlInjectionMatchSetId == nil { - invalidParams.Add(request.NewErrParamRequired("SqlInjectionMatchSetId")) +func (s *RuleUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RuleUpdate"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) } - if s.SqlInjectionMatchSetId != nil && len(*s.SqlInjectionMatchSetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SqlInjectionMatchSetId", 1)) + if s.Predicate == nil { + invalidParams.Add(request.NewErrParamRequired("Predicate")) + } + if s.Predicate != nil { + if err := s.Predicate.Validate(); err != nil { + invalidParams.AddNested("Predicate", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -8592,154 +13745,237 @@ func (s *GetSqlInjectionMatchSetInput) Validate() error { return nil } -// SetSqlInjectionMatchSetId sets the SqlInjectionMatchSetId field's value. -func (s *GetSqlInjectionMatchSetInput) SetSqlInjectionMatchSetId(v string) *GetSqlInjectionMatchSetInput { - s.SqlInjectionMatchSetId = &v +// SetAction sets the Action field's value. +func (s *RuleUpdate) SetAction(v string) *RuleUpdate { + s.Action = &v return s } -// The response to a GetSqlInjectionMatchSet request. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetSqlInjectionMatchSetResponse -type GetSqlInjectionMatchSetOutput struct { +// SetPredicate sets the Predicate field's value. +func (s *RuleUpdate) SetPredicate(v *Predicate) *RuleUpdate { + s.Predicate = v + return s +} + +// The response from a GetSampledRequests request includes a SampledHTTPRequests +// complex type that appears as SampledRequests in the response syntax. SampledHTTPRequests +// contains one SampledHTTPRequest object for each web request that is returned +// by GetSampledRequests. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/SampledHTTPRequest +type SampledHTTPRequest struct { _ struct{} `type:"structure"` - // Information about the SqlInjectionMatchSet that you specified in the GetSqlInjectionMatchSet - // request. For more information, see the following topics: - // - // * SqlInjectionMatchSet: Contains Name, SqlInjectionMatchSetId, and an - // array of SqlInjectionMatchTuple objects + // The action for the Rule that the request matched: ALLOW, BLOCK, or COUNT. + Action *string `type:"string"` + + // A complex type that contains detailed information about the request. // - // * SqlInjectionMatchTuple: Each SqlInjectionMatchTuple object contains - // FieldToMatch and TextTransformation + // Request is a required field + Request *HTTPRequest `type:"structure" required:"true"` + + // The time at which AWS WAF received the request from your AWS resource, in + // Unix time format (in seconds). + Timestamp *time.Time `type:"timestamp" timestampFormat:"unix"` + + // A value that indicates how one result in the response relates proportionally + // to other results in the response. A result that has a weight of 2 represents + // roughly twice as many CloudFront web requests as a result that has a weight + // of 1. // - // * FieldToMatch: Contains Data and Type - SqlInjectionMatchSet *SqlInjectionMatchSet `type:"structure"` + // Weight is a required field + Weight *int64 `type:"long" required:"true"` } // String returns the string representation -func (s GetSqlInjectionMatchSetOutput) String() string { +func (s SampledHTTPRequest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetSqlInjectionMatchSetOutput) GoString() string { +func (s SampledHTTPRequest) GoString() string { return s.String() } -// SetSqlInjectionMatchSet sets the SqlInjectionMatchSet field's value. -func (s *GetSqlInjectionMatchSetOutput) SetSqlInjectionMatchSet(v *SqlInjectionMatchSet) *GetSqlInjectionMatchSetOutput { - s.SqlInjectionMatchSet = v +// SetAction sets the Action field's value. +func (s *SampledHTTPRequest) SetAction(v string) *SampledHTTPRequest { + s.Action = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetWebACLRequest -type GetWebACLInput struct { +// SetRequest sets the Request field's value. +func (s *SampledHTTPRequest) SetRequest(v *HTTPRequest) *SampledHTTPRequest { + s.Request = v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *SampledHTTPRequest) SetTimestamp(v time.Time) *SampledHTTPRequest { + s.Timestamp = &v + return s +} + +// SetWeight sets the Weight field's value. +func (s *SampledHTTPRequest) SetWeight(v int64) *SampledHTTPRequest { + s.Weight = &v + return s +} + +// Specifies a constraint on the size of a part of the web request. AWS WAF +// uses the Size, ComparisonOperator, and FieldToMatch to build an expression +// in the form of "SizeComparisonOperator size in bytes of FieldToMatch". If +// that expression is true, the SizeConstraint is considered to match. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/SizeConstraint +type SizeConstraint struct { _ struct{} `type:"structure"` - // The WebACLId of the WebACL that you want to get. WebACLId is returned by - // CreateWebACL and by ListWebACLs. + // The type of comparison you want AWS WAF to perform. AWS WAF uses this in + // combination with the provided Size and FieldToMatch to build an expression + // in the form of "SizeComparisonOperator size in bytes of FieldToMatch". If + // that expression is true, the SizeConstraint is considered to match. + // + // EQ: Used to test if the Size is equal to the size of the FieldToMatch + // + // NE: Used to test if the Size is not equal to the size of the FieldToMatch + // + // LE: Used to test if the Size is less than or equal to the size of the FieldToMatch + // + // LT: Used to test if the Size is strictly less than the size of the FieldToMatch + // + // GE: Used to test if the Size is greater than or equal to the size of the + // FieldToMatch + // + // GT: Used to test if the Size is strictly greater than the size of the FieldToMatch + // + // ComparisonOperator is a required field + ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperator"` + + // Specifies where in a web request to look for the size constraint. + // + // FieldToMatch is a required field + FieldToMatch *FieldToMatch `type:"structure" required:"true"` + + // The size in bytes that you want AWS WAF to compare against the size of the + // specified FieldToMatch. AWS WAF uses this in combination with ComparisonOperator + // and FieldToMatch to build an expression in the form of "SizeComparisonOperator + // size in bytes of FieldToMatch". If that expression is true, the SizeConstraint + // is considered to match. + // + // Valid values for size are 0 - 21474836480 bytes (0 - 20 GB). + // + // If you specify URI for the value of Type, the / in the URI counts as one + // character. For example, the URI /logo.jpg is nine characters long. + // + // Size is a required field + Size *int64 `type:"long" required:"true"` + + // Text transformations eliminate some of the unusual formatting that attackers + // use in web requests in an effort to bypass AWS WAF. If you specify a transformation, + // AWS WAF performs the transformation on FieldToMatch before inspecting a request + // for a match. + // + // Note that if you choose BODY for the value of Type, you must choose NONE + // for TextTransformation because CloudFront forwards only the first 8192 bytes + // for inspection. + // + // NONE + // + // Specify NONE if you don't want to perform any text transformations. + // + // CMD_LINE + // + // When you're concerned that attackers are injecting an operating system command + // line command and using unusual formatting to disguise some or all of the + // command, use this option to perform the following transformations: + // + // * Delete the following characters: \ " ' ^ + // + // * Delete spaces before the following characters: / ( + // + // * Replace the following characters with a space: , ; + // + // * Replace multiple spaces with one space + // + // * Convert uppercase letters (A-Z) to lowercase (a-z) + // + // COMPRESS_WHITE_SPACE + // + // Use this option to replace the following characters with a space character + // (decimal 32): + // + // * \f, formfeed, decimal 12 + // + // * \t, tab, decimal 9 + // + // * \n, newline, decimal 10 + // + // * \r, carriage return, decimal 13 + // + // * \v, vertical tab, decimal 11 + // + // * non-breaking space, decimal 160 + // + // COMPRESS_WHITE_SPACE also replaces multiple spaces with one space. + // + // HTML_ENTITY_DECODE + // + // Use this option to replace HTML-encoded characters with unencoded characters. + // HTML_ENTITY_DECODE performs the following operations: + // + // * Replaces (ampersand)quot; with " + // + // * Replaces (ampersand)nbsp; with a non-breaking space, decimal 160 + // + // * Replaces (ampersand)lt; with a "less than" symbol + // + // * Replaces (ampersand)gt; with > // - // WebACLId is a required field - WebACLId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetWebACLInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetWebACLInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetWebACLInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetWebACLInput"} - if s.WebACLId == nil { - invalidParams.Add(request.NewErrParamRequired("WebACLId")) - } - if s.WebACLId != nil && len(*s.WebACLId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WebACLId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetWebACLId sets the WebACLId field's value. -func (s *GetWebACLInput) SetWebACLId(v string) *GetWebACLInput { - s.WebACLId = &v - return s -} - -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetWebACLResponse -type GetWebACLOutput struct { - _ struct{} `type:"structure"` - - // Information about the WebACL that you specified in the GetWebACL request. - // For more information, see the following topics: + // * Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, + // with the corresponding characters // - // * WebACL: Contains DefaultAction, MetricName, Name, an array of Rule objects, - // and WebACLId + // * Replaces characters that are represented in decimal format, (ampersand)#nnnn;, + // with the corresponding characters // - // * DefaultAction (Data type is WafAction): Contains Type + // LOWERCASE // - // * Rules: Contains an array of ActivatedRule objects, which contain Action, - // Priority, and RuleId + // Use this option to convert uppercase letters (A-Z) to lowercase (a-z). // - // * Action: Contains Type - WebACL *WebACL `type:"structure"` -} - -// String returns the string representation -func (s GetWebACLOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetWebACLOutput) GoString() string { - return s.String() -} - -// SetWebACL sets the WebACL field's value. -func (s *GetWebACLOutput) SetWebACL(v *WebACL) *GetWebACLOutput { - s.WebACL = v - return s -} - -// A request to get an XssMatchSet. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetXssMatchSetRequest -type GetXssMatchSetInput struct { - _ struct{} `type:"structure"` - - // The XssMatchSetId of the XssMatchSet that you want to get. XssMatchSetId - // is returned by CreateXssMatchSet and by ListXssMatchSets. + // URL_DECODE // - // XssMatchSetId is a required field - XssMatchSetId *string `min:"1" type:"string" required:"true"` + // Use this option to decode a URL-encoded value. + // + // TextTransformation is a required field + TextTransformation *string `type:"string" required:"true" enum:"TextTransformation"` } // String returns the string representation -func (s GetXssMatchSetInput) String() string { +func (s SizeConstraint) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetXssMatchSetInput) GoString() string { +func (s SizeConstraint) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetXssMatchSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetXssMatchSetInput"} - if s.XssMatchSetId == nil { - invalidParams.Add(request.NewErrParamRequired("XssMatchSetId")) +func (s *SizeConstraint) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SizeConstraint"} + if s.ComparisonOperator == nil { + invalidParams.Add(request.NewErrParamRequired("ComparisonOperator")) } - if s.XssMatchSetId != nil && len(*s.XssMatchSetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("XssMatchSetId", 1)) + if s.FieldToMatch == nil { + invalidParams.Add(request.NewErrParamRequired("FieldToMatch")) + } + if s.Size == nil { + invalidParams.Add(request.NewErrParamRequired("Size")) + } + if s.TextTransformation == nil { + invalidParams.Add(request.NewErrParamRequired("TextTransformation")) + } + if s.FieldToMatch != nil { + if err := s.FieldToMatch.Validate(); err != nil { + invalidParams.AddNested("FieldToMatch", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -8748,285 +13984,175 @@ func (s *GetXssMatchSetInput) Validate() error { return nil } -// SetXssMatchSetId sets the XssMatchSetId field's value. -func (s *GetXssMatchSetInput) SetXssMatchSetId(v string) *GetXssMatchSetInput { - s.XssMatchSetId = &v +// SetComparisonOperator sets the ComparisonOperator field's value. +func (s *SizeConstraint) SetComparisonOperator(v string) *SizeConstraint { + s.ComparisonOperator = &v return s } -// The response to a GetXssMatchSet request. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/GetXssMatchSetResponse -type GetXssMatchSetOutput struct { - _ struct{} `type:"structure"` - - // Information about the XssMatchSet that you specified in the GetXssMatchSet - // request. For more information, see the following topics: - // - // * XssMatchSet: Contains Name, XssMatchSetId, and an array of XssMatchTuple - // objects - // - // * XssMatchTuple: Each XssMatchTuple object contains FieldToMatch and TextTransformation - // - // * FieldToMatch: Contains Data and Type - XssMatchSet *XssMatchSet `type:"structure"` -} - -// String returns the string representation -func (s GetXssMatchSetOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetXssMatchSetOutput) GoString() string { - return s.String() -} - -// SetXssMatchSet sets the XssMatchSet field's value. -func (s *GetXssMatchSetOutput) SetXssMatchSet(v *XssMatchSet) *GetXssMatchSetOutput { - s.XssMatchSet = v +// SetFieldToMatch sets the FieldToMatch field's value. +func (s *SizeConstraint) SetFieldToMatch(v *FieldToMatch) *SizeConstraint { + s.FieldToMatch = v return s } -// The response from a GetSampledRequests request includes an HTTPHeader complex -// type that appears as Headers in the response syntax. HTTPHeader contains -// the names and values of all of the headers that appear in one of the web -// requests that were returned by GetSampledRequests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/HTTPHeader -type HTTPHeader struct { - _ struct{} `type:"structure"` - - // The name of one of the headers in the sampled web request. - Name *string `type:"string"` - - // The value of one of the headers in the sampled web request. - Value *string `type:"string"` -} - -// String returns the string representation -func (s HTTPHeader) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HTTPHeader) GoString() string { - return s.String() -} - -// SetName sets the Name field's value. -func (s *HTTPHeader) SetName(v string) *HTTPHeader { - s.Name = &v +// SetSize sets the Size field's value. +func (s *SizeConstraint) SetSize(v int64) *SizeConstraint { + s.Size = &v return s } -// SetValue sets the Value field's value. -func (s *HTTPHeader) SetValue(v string) *HTTPHeader { - s.Value = &v +// SetTextTransformation sets the TextTransformation field's value. +func (s *SizeConstraint) SetTextTransformation(v string) *SizeConstraint { + s.TextTransformation = &v return s } -// The response from a GetSampledRequests request includes an HTTPRequest complex -// type that appears as Request in the response syntax. HTTPRequest contains -// information about one of the web requests that were returned by GetSampledRequests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/HTTPRequest -type HTTPRequest struct { +// A complex type that contains SizeConstraint objects, which specify the parts +// of web requests that you want AWS WAF to inspect the size of. If a SizeConstraintSet +// contains more than one SizeConstraint object, a request only needs to match +// one constraint to be considered a match. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/SizeConstraintSet +type SizeConstraintSet struct { _ struct{} `type:"structure"` - // The IP address that the request originated from. If the WebACL is associated - // with a CloudFront distribution, this is the value of one of the following - // fields in CloudFront access logs: + // The name, if any, of the SizeConstraintSet. + Name *string `min:"1" type:"string"` + + // A unique identifier for a SizeConstraintSet. You use SizeConstraintSetId + // to get information about a SizeConstraintSet (see GetSizeConstraintSet), + // update a SizeConstraintSet (see UpdateSizeConstraintSet), insert a SizeConstraintSet + // into a Rule or delete one from a Rule (see UpdateRule), and delete a SizeConstraintSet + // from AWS WAF (see DeleteSizeConstraintSet). // - // * c-ip, if the viewer did not use an HTTP proxy or a load balancer to - // send the request + // SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets. // - // * x-forwarded-for, if the viewer did use an HTTP proxy or a load balancer - // to send the request - ClientIP *string `type:"string"` - - // The two-letter country code for the country that the request originated from. - // For a current list of country codes, see the Wikipedia entry ISO 3166-1 alpha-2 - // (https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2). - Country *string `type:"string"` - - // The HTTP version specified in the sampled web request, for example, HTTP/1.1. - HTTPVersion *string `type:"string"` - - // A complex type that contains two values for each header in the sampled web - // request: the name of the header and the value of the header. - Headers []*HTTPHeader `type:"list"` - - // The HTTP method specified in the sampled web request. CloudFront supports - // the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and PUT. - Method *string `type:"string"` - - // The part of a web request that identifies the resource, for example, /images/daily-ad.jpg. - URI *string `type:"string"` -} - -// String returns the string representation -func (s HTTPRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HTTPRequest) GoString() string { - return s.String() -} + // SizeConstraintSetId is a required field + SizeConstraintSetId *string `min:"1" type:"string" required:"true"` -// SetClientIP sets the ClientIP field's value. -func (s *HTTPRequest) SetClientIP(v string) *HTTPRequest { - s.ClientIP = &v - return s + // Specifies the parts of web requests that you want to inspect the size of. + // + // SizeConstraints is a required field + SizeConstraints []*SizeConstraint `type:"list" required:"true"` } -// SetCountry sets the Country field's value. -func (s *HTTPRequest) SetCountry(v string) *HTTPRequest { - s.Country = &v - return s +// String returns the string representation +func (s SizeConstraintSet) String() string { + return awsutil.Prettify(s) } -// SetHTTPVersion sets the HTTPVersion field's value. -func (s *HTTPRequest) SetHTTPVersion(v string) *HTTPRequest { - s.HTTPVersion = &v - return s +// GoString returns the string representation +func (s SizeConstraintSet) GoString() string { + return s.String() } -// SetHeaders sets the Headers field's value. -func (s *HTTPRequest) SetHeaders(v []*HTTPHeader) *HTTPRequest { - s.Headers = v +// SetName sets the Name field's value. +func (s *SizeConstraintSet) SetName(v string) *SizeConstraintSet { + s.Name = &v return s } -// SetMethod sets the Method field's value. -func (s *HTTPRequest) SetMethod(v string) *HTTPRequest { - s.Method = &v +// SetSizeConstraintSetId sets the SizeConstraintSetId field's value. +func (s *SizeConstraintSet) SetSizeConstraintSetId(v string) *SizeConstraintSet { + s.SizeConstraintSetId = &v return s } -// SetURI sets the URI field's value. -func (s *HTTPRequest) SetURI(v string) *HTTPRequest { - s.URI = &v +// SetSizeConstraints sets the SizeConstraints field's value. +func (s *SizeConstraintSet) SetSizeConstraints(v []*SizeConstraint) *SizeConstraintSet { + s.SizeConstraints = v return s } -// Contains one or more IP addresses or blocks of IP addresses specified in -// Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports /8, /16, -// /24, and /32 IP address ranges for IPv4, and /24, /32, /48, /56, /64 and -// /128 for IPv6. -// -// To specify an individual IP address, you specify the four-part IP address -// followed by a /32, for example, 192.0.2.0/31. To block a range of IP addresses, -// you can specify a /128, /64, /56, /48, /32, /24, /16, or /8 CIDR. For more -// information about CIDR notation, see the Wikipedia entry Classless Inter-Domain -// Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/IPSet -type IPSet struct { +// The Id and Name of a SizeConstraintSet. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/SizeConstraintSetSummary +type SizeConstraintSetSummary struct { _ struct{} `type:"structure"` - // The IP address type (IPV4 or IPV6) and the IP address range (in CIDR notation) - // that web requests originate from. If the WebACL is associated with a CloudFront - // distribution and the viewer did not use an HTTP proxy or a load balancer - // to send the request, this is the value of the c-ip field in the CloudFront - // access logs. + // The name of the SizeConstraintSet, if any. // - // IPSetDescriptors is a required field - IPSetDescriptors []*IPSetDescriptor `type:"list" required:"true"` + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` - // The IPSetId for an IPSet. You use IPSetId to get information about an IPSet - // (see GetIPSet), update an IPSet (see UpdateIPSet), insert an IPSet into a - // Rule or delete one from a Rule (see UpdateRule), and delete an IPSet from - // AWS WAF (see DeleteIPSet). + // A unique identifier for a SizeConstraintSet. You use SizeConstraintSetId + // to get information about a SizeConstraintSet (see GetSizeConstraintSet), + // update a SizeConstraintSet (see UpdateSizeConstraintSet), insert a SizeConstraintSet + // into a Rule or delete one from a Rule (see UpdateRule), and delete a SizeConstraintSet + // from AWS WAF (see DeleteSizeConstraintSet). // - // IPSetId is returned by CreateIPSet and by ListIPSets. + // SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets. // - // IPSetId is a required field - IPSetId *string `min:"1" type:"string" required:"true"` - - // A friendly name or description of the IPSet. You can't change the name of - // an IPSet after you create it. - Name *string `min:"1" type:"string"` + // SizeConstraintSetId is a required field + SizeConstraintSetId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s IPSet) String() string { +func (s SizeConstraintSetSummary) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s IPSet) GoString() string { +func (s SizeConstraintSetSummary) GoString() string { return s.String() } -// SetIPSetDescriptors sets the IPSetDescriptors field's value. -func (s *IPSet) SetIPSetDescriptors(v []*IPSetDescriptor) *IPSet { - s.IPSetDescriptors = v - return s -} - -// SetIPSetId sets the IPSetId field's value. -func (s *IPSet) SetIPSetId(v string) *IPSet { - s.IPSetId = &v +// SetName sets the Name field's value. +func (s *SizeConstraintSetSummary) SetName(v string) *SizeConstraintSetSummary { + s.Name = &v return s } -// SetName sets the Name field's value. -func (s *IPSet) SetName(v string) *IPSet { - s.Name = &v +// SetSizeConstraintSetId sets the SizeConstraintSetId field's value. +func (s *SizeConstraintSetSummary) SetSizeConstraintSetId(v string) *SizeConstraintSetSummary { + s.SizeConstraintSetId = &v return s } -// Specifies the IP address type (IPV4 or IPV6) and the IP address range (in -// CIDR format) that web requests originate from. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/IPSetDescriptor -type IPSetDescriptor struct { +// Specifies the part of a web request that you want to inspect the size of +// and indicates whether you want to add the specification to a SizeConstraintSet +// or delete it from a SizeConstraintSet. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/SizeConstraintSetUpdate +type SizeConstraintSetUpdate struct { _ struct{} `type:"structure"` - // Specify IPV4 or IPV6. + // Specify INSERT to add a SizeConstraintSetUpdate to a SizeConstraintSet. Use + // DELETE to remove a SizeConstraintSetUpdate from a SizeConstraintSet. // - // Type is a required field - Type *string `type:"string" required:"true" enum:"IPSetDescriptorType"` + // Action is a required field + Action *string `type:"string" required:"true" enum:"ChangeAction"` - // Specify an IPv4 address by using CIDR notation. For example: - // - // * To configure AWS WAF to allow, block, or count requests that originated - // from the IP address 192.0.2.44, specify 192.0.2.44/32. - // - // * To configure AWS WAF to allow, block, or count requests that originated - // from IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24. - // - // For more information about CIDR notation, see the Wikipedia entry Classless - // Inter-Domain Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). - // - // Specify an IPv6 address by using CIDR notation. For example: - // - // * To configure AWS WAF to allow, block, or count requests that originated - // from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, specify 1111:0000:0000:0000:0000:0000:0000:0111/128. - // - // * To configure AWS WAF to allow, block, or count requests that originated - // from IP addresses 1111:0000:0000:0000:0000:0000:0000:0000 to 1111:0000:0000:0000:ffff:ffff:ffff:ffff, - // specify 1111:0000:0000:0000:0000:0000:0000:0000/64. + // Specifies a constraint on the size of a part of the web request. AWS WAF + // uses the Size, ComparisonOperator, and FieldToMatch to build an expression + // in the form of "SizeComparisonOperator size in bytes of FieldToMatch". If + // that expression is true, the SizeConstraint is considered to match. // - // Value is a required field - Value *string `type:"string" required:"true"` + // SizeConstraint is a required field + SizeConstraint *SizeConstraint `type:"structure" required:"true"` } // String returns the string representation -func (s IPSetDescriptor) String() string { +func (s SizeConstraintSetUpdate) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s IPSetDescriptor) GoString() string { +func (s SizeConstraintSetUpdate) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *IPSetDescriptor) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "IPSetDescriptor"} - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) +func (s *SizeConstraintSetUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SizeConstraintSetUpdate"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) } - if s.Value == nil { - invalidParams.Add(request.NewErrParamRequired("Value")) + if s.SizeConstraint == nil { + invalidParams.Add(request.NewErrParamRequired("SizeConstraint")) + } + if s.SizeConstraint != nil { + if err := s.SizeConstraint.Validate(); err != nil { + invalidParams.AddNested("SizeConstraint", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -9035,97 +14161,164 @@ func (s *IPSetDescriptor) Validate() error { return nil } -// SetType sets the Type field's value. -func (s *IPSetDescriptor) SetType(v string) *IPSetDescriptor { - s.Type = &v +// SetAction sets the Action field's value. +func (s *SizeConstraintSetUpdate) SetAction(v string) *SizeConstraintSetUpdate { + s.Action = &v return s } -// SetValue sets the Value field's value. -func (s *IPSetDescriptor) SetValue(v string) *IPSetDescriptor { - s.Value = &v +// SetSizeConstraint sets the SizeConstraint field's value. +func (s *SizeConstraintSetUpdate) SetSizeConstraint(v *SizeConstraint) *SizeConstraintSetUpdate { + s.SizeConstraint = v return s } -// Contains the identifier and the name of the IPSet. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/IPSetSummary -type IPSetSummary struct { +// A complex type that contains SqlInjectionMatchTuple objects, which specify +// the parts of web requests that you want AWS WAF to inspect for snippets of +// malicious SQL code and, if you want AWS WAF to inspect a header, the name +// of the header. If a SqlInjectionMatchSet contains more than one SqlInjectionMatchTuple +// object, a request needs to include snippets of SQL code in only one of the +// specified parts of the request to be considered a match. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/SqlInjectionMatchSet +type SqlInjectionMatchSet struct { _ struct{} `type:"structure"` - // The IPSetId for an IPSet. You can use IPSetId in a GetIPSet request to get - // detailed information about an IPSet. + // The name, if any, of the SqlInjectionMatchSet. + Name *string `min:"1" type:"string"` + + // A unique identifier for a SqlInjectionMatchSet. You use SqlInjectionMatchSetId + // to get information about a SqlInjectionMatchSet (see GetSqlInjectionMatchSet), + // update a SqlInjectionMatchSet (see UpdateSqlInjectionMatchSet), insert a + // SqlInjectionMatchSet into a Rule or delete one from a Rule (see UpdateRule), + // and delete a SqlInjectionMatchSet from AWS WAF (see DeleteSqlInjectionMatchSet). // - // IPSetId is a required field - IPSetId *string `min:"1" type:"string" required:"true"` + // SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets. + // + // SqlInjectionMatchSetId is a required field + SqlInjectionMatchSetId *string `min:"1" type:"string" required:"true"` - // A friendly name or description of the IPSet. You can't change the name of - // an IPSet after you create it. + // Specifies the parts of web requests that you want to inspect for snippets + // of malicious SQL code. + // + // SqlInjectionMatchTuples is a required field + SqlInjectionMatchTuples []*SqlInjectionMatchTuple `type:"list" required:"true"` +} + +// String returns the string representation +func (s SqlInjectionMatchSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SqlInjectionMatchSet) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *SqlInjectionMatchSet) SetName(v string) *SqlInjectionMatchSet { + s.Name = &v + return s +} + +// SetSqlInjectionMatchSetId sets the SqlInjectionMatchSetId field's value. +func (s *SqlInjectionMatchSet) SetSqlInjectionMatchSetId(v string) *SqlInjectionMatchSet { + s.SqlInjectionMatchSetId = &v + return s +} + +// SetSqlInjectionMatchTuples sets the SqlInjectionMatchTuples field's value. +func (s *SqlInjectionMatchSet) SetSqlInjectionMatchTuples(v []*SqlInjectionMatchTuple) *SqlInjectionMatchSet { + s.SqlInjectionMatchTuples = v + return s +} + +// The Id and Name of a SqlInjectionMatchSet. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/SqlInjectionMatchSetSummary +type SqlInjectionMatchSetSummary struct { + _ struct{} `type:"structure"` + + // The name of the SqlInjectionMatchSet, if any, specified by Id. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` + + // A unique identifier for a SqlInjectionMatchSet. You use SqlInjectionMatchSetId + // to get information about a SqlInjectionMatchSet (see GetSqlInjectionMatchSet), + // update a SqlInjectionMatchSet (see UpdateSqlInjectionMatchSet), insert a + // SqlInjectionMatchSet into a Rule or delete one from a Rule (see UpdateRule), + // and delete a SqlInjectionMatchSet from AWS WAF (see DeleteSqlInjectionMatchSet). + // + // SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets. + // + // SqlInjectionMatchSetId is a required field + SqlInjectionMatchSetId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s IPSetSummary) String() string { +func (s SqlInjectionMatchSetSummary) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s IPSetSummary) GoString() string { +func (s SqlInjectionMatchSetSummary) GoString() string { return s.String() } -// SetIPSetId sets the IPSetId field's value. -func (s *IPSetSummary) SetIPSetId(v string) *IPSetSummary { - s.IPSetId = &v +// SetName sets the Name field's value. +func (s *SqlInjectionMatchSetSummary) SetName(v string) *SqlInjectionMatchSetSummary { + s.Name = &v return s } -// SetName sets the Name field's value. -func (s *IPSetSummary) SetName(v string) *IPSetSummary { - s.Name = &v +// SetSqlInjectionMatchSetId sets the SqlInjectionMatchSetId field's value. +func (s *SqlInjectionMatchSetSummary) SetSqlInjectionMatchSetId(v string) *SqlInjectionMatchSetSummary { + s.SqlInjectionMatchSetId = &v return s } -// Specifies the type of update to perform to an IPSet with UpdateIPSet. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/IPSetUpdate -type IPSetUpdate struct { +// Specifies the part of a web request that you want to inspect for snippets +// of malicious SQL code and indicates whether you want to add the specification +// to a SqlInjectionMatchSet or delete it from a SqlInjectionMatchSet. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/SqlInjectionMatchSetUpdate +type SqlInjectionMatchSetUpdate struct { _ struct{} `type:"structure"` - // Specifies whether to insert or delete an IP address with UpdateIPSet. + // Specify INSERT to add a SqlInjectionMatchSetUpdate to a SqlInjectionMatchSet. + // Use DELETE to remove a SqlInjectionMatchSetUpdate from a SqlInjectionMatchSet. // // Action is a required field Action *string `type:"string" required:"true" enum:"ChangeAction"` - // The IP address type (IPV4 or IPV6) and the IP address range (in CIDR notation) - // that web requests originate from. + // Specifies the part of a web request that you want AWS WAF to inspect for + // snippets of malicious SQL code and, if you want AWS WAF to inspect a header, + // the name of the header. // - // IPSetDescriptor is a required field - IPSetDescriptor *IPSetDescriptor `type:"structure" required:"true"` + // SqlInjectionMatchTuple is a required field + SqlInjectionMatchTuple *SqlInjectionMatchTuple `type:"structure" required:"true"` } // String returns the string representation -func (s IPSetUpdate) String() string { +func (s SqlInjectionMatchSetUpdate) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s IPSetUpdate) GoString() string { +func (s SqlInjectionMatchSetUpdate) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *IPSetUpdate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "IPSetUpdate"} +func (s *SqlInjectionMatchSetUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SqlInjectionMatchSetUpdate"} if s.Action == nil { invalidParams.Add(request.NewErrParamRequired("Action")) } - if s.IPSetDescriptor == nil { - invalidParams.Add(request.NewErrParamRequired("IPSetDescriptor")) + if s.SqlInjectionMatchTuple == nil { + invalidParams.Add(request.NewErrParamRequired("SqlInjectionMatchTuple")) } - if s.IPSetDescriptor != nil { - if err := s.IPSetDescriptor.Validate(); err != nil { - invalidParams.AddNested("IPSetDescriptor", err.(request.ErrInvalidParams)) + if s.SqlInjectionMatchTuple != nil { + if err := s.SqlInjectionMatchTuple.Validate(); err != nil { + invalidParams.AddNested("SqlInjectionMatchTuple", err.(request.ErrInvalidParams)) } } @@ -9136,50 +14329,127 @@ func (s *IPSetUpdate) Validate() error { } // SetAction sets the Action field's value. -func (s *IPSetUpdate) SetAction(v string) *IPSetUpdate { +func (s *SqlInjectionMatchSetUpdate) SetAction(v string) *SqlInjectionMatchSetUpdate { s.Action = &v return s } -// SetIPSetDescriptor sets the IPSetDescriptor field's value. -func (s *IPSetUpdate) SetIPSetDescriptor(v *IPSetDescriptor) *IPSetUpdate { - s.IPSetDescriptor = v +// SetSqlInjectionMatchTuple sets the SqlInjectionMatchTuple field's value. +func (s *SqlInjectionMatchSetUpdate) SetSqlInjectionMatchTuple(v *SqlInjectionMatchTuple) *SqlInjectionMatchSetUpdate { + s.SqlInjectionMatchTuple = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListByteMatchSetsRequest -type ListByteMatchSetsInput struct { +// Specifies the part of a web request that you want AWS WAF to inspect for +// snippets of malicious SQL code and, if you want AWS WAF to inspect a header, +// the name of the header. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/SqlInjectionMatchTuple +type SqlInjectionMatchTuple struct { _ struct{} `type:"structure"` - // Specifies the number of ByteMatchSet objects that you want AWS WAF to return - // for this request. If you have more ByteMatchSets objects than the number - // you specify for Limit, the response includes a NextMarker value that you - // can use to get another batch of ByteMatchSet objects. - Limit *int64 `type:"integer"` + // Specifies where in a web request to look for snippets of malicious SQL code. + // + // FieldToMatch is a required field + FieldToMatch *FieldToMatch `type:"structure" required:"true"` - // If you specify a value for Limit and you have more ByteMatchSets than the - // value of Limit, AWS WAF returns a NextMarker value in the response that allows - // you to list another group of ByteMatchSets. For the second and subsequent - // ListByteMatchSets requests, specify the value of NextMarker from the previous - // response to get information about another batch of ByteMatchSets. - NextMarker *string `min:"1" type:"string"` + // Text transformations eliminate some of the unusual formatting that attackers + // use in web requests in an effort to bypass AWS WAF. If you specify a transformation, + // AWS WAF performs the transformation on FieldToMatch before inspecting a request + // for a match. + // + // CMD_LINE + // + // When you're concerned that attackers are injecting an operating system commandline + // command and using unusual formatting to disguise some or all of the command, + // use this option to perform the following transformations: + // + // * Delete the following characters: \ " ' ^ + // + // * Delete spaces before the following characters: / ( + // + // * Replace the following characters with a space: , ; + // + // * Replace multiple spaces with one space + // + // * Convert uppercase letters (A-Z) to lowercase (a-z) + // + // COMPRESS_WHITE_SPACE + // + // Use this option to replace the following characters with a space character + // (decimal 32): + // + // * \f, formfeed, decimal 12 + // + // * \t, tab, decimal 9 + // + // * \n, newline, decimal 10 + // + // * \r, carriage return, decimal 13 + // + // * \v, vertical tab, decimal 11 + // + // * non-breaking space, decimal 160 + // + // COMPRESS_WHITE_SPACE also replaces multiple spaces with one space. + // + // HTML_ENTITY_DECODE + // + // Use this option to replace HTML-encoded characters with unencoded characters. + // HTML_ENTITY_DECODE performs the following operations: + // + // * Replaces (ampersand)quot; with " + // + // * Replaces (ampersand)nbsp; with a non-breaking space, decimal 160 + // + // * Replaces (ampersand)lt; with a "less than" symbol + // + // * Replaces (ampersand)gt; with > + // + // * Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, + // with the corresponding characters + // + // * Replaces characters that are represented in decimal format, (ampersand)#nnnn;, + // with the corresponding characters + // + // LOWERCASE + // + // Use this option to convert uppercase letters (A-Z) to lowercase (a-z). + // + // URL_DECODE + // + // Use this option to decode a URL-encoded value. + // + // NONE + // + // Specify NONE if you don't want to perform any text transformations. + // + // TextTransformation is a required field + TextTransformation *string `type:"string" required:"true" enum:"TextTransformation"` } // String returns the string representation -func (s ListByteMatchSetsInput) String() string { +func (s SqlInjectionMatchTuple) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListByteMatchSetsInput) GoString() string { +func (s SqlInjectionMatchTuple) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListByteMatchSetsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListByteMatchSetsInput"} - if s.NextMarker != nil && len(*s.NextMarker) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) +func (s *SqlInjectionMatchTuple) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SqlInjectionMatchTuple"} + if s.FieldToMatch == nil { + invalidParams.Add(request.NewErrParamRequired("FieldToMatch")) + } + if s.TextTransformation == nil { + invalidParams.Add(request.NewErrParamRequired("TextTransformation")) + } + if s.FieldToMatch != nil { + if err := s.FieldToMatch.Validate(); err != nil { + invalidParams.AddNested("FieldToMatch", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -9188,88 +14458,156 @@ func (s *ListByteMatchSetsInput) Validate() error { return nil } -// SetLimit sets the Limit field's value. -func (s *ListByteMatchSetsInput) SetLimit(v int64) *ListByteMatchSetsInput { - s.Limit = &v +// SetFieldToMatch sets the FieldToMatch field's value. +func (s *SqlInjectionMatchTuple) SetFieldToMatch(v *FieldToMatch) *SqlInjectionMatchTuple { + s.FieldToMatch = v return s } -// SetNextMarker sets the NextMarker field's value. -func (s *ListByteMatchSetsInput) SetNextMarker(v string) *ListByteMatchSetsInput { - s.NextMarker = &v +// SetTextTransformation sets the TextTransformation field's value. +func (s *SqlInjectionMatchTuple) SetTextTransformation(v string) *SqlInjectionMatchTuple { + s.TextTransformation = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListByteMatchSetsResponse -type ListByteMatchSetsOutput struct { +// In a GetSampledRequests request, the StartTime and EndTime objects specify +// the time range for which you want AWS WAF to return a sample of web requests. +// +// In a GetSampledRequests response, the StartTime and EndTime objects specify +// the time range for which AWS WAF actually returned a sample of web requests. +// AWS WAF gets the specified number of requests from among the first 5,000 +// requests that your AWS resource receives during the specified time period. +// If your resource receives more than 5,000 requests during that period, AWS +// WAF stops sampling after the 5,000th request. In that case, EndTime is the +// time that AWS WAF received the 5,000th request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/TimeWindow +type TimeWindow struct { _ struct{} `type:"structure"` - // An array of ByteMatchSetSummary objects. - ByteMatchSets []*ByteMatchSetSummary `type:"list"` + // The end of the time range from which you want GetSampledRequests to return + // a sample of the requests that your AWS resource received. Specify the date + // and time in the following format: "2016-09-27T14:50Z". You can specify any + // time range in the previous three hours. + // + // EndTime is a required field + EndTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` - // If you have more ByteMatchSet objects than the number that you specified - // for Limit in the request, the response includes a NextMarker value. To list - // more ByteMatchSet objects, submit another ListByteMatchSets request, and - // specify the NextMarker value from the response in the NextMarker value in - // the next request. - NextMarker *string `min:"1" type:"string"` + // The beginning of the time range from which you want GetSampledRequests to + // return a sample of the requests that your AWS resource received. Specify + // the date and time in the following format: "2016-09-27T14:50Z". You can specify + // any time range in the previous three hours. + // + // StartTime is a required field + StartTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` } // String returns the string representation -func (s ListByteMatchSetsOutput) String() string { +func (s TimeWindow) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListByteMatchSetsOutput) GoString() string { +func (s TimeWindow) GoString() string { return s.String() } -// SetByteMatchSets sets the ByteMatchSets field's value. -func (s *ListByteMatchSetsOutput) SetByteMatchSets(v []*ByteMatchSetSummary) *ListByteMatchSetsOutput { - s.ByteMatchSets = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *TimeWindow) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TimeWindow"} + if s.EndTime == nil { + invalidParams.Add(request.NewErrParamRequired("EndTime")) + } + if s.StartTime == nil { + invalidParams.Add(request.NewErrParamRequired("StartTime")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetNextMarker sets the NextMarker field's value. -func (s *ListByteMatchSetsOutput) SetNextMarker(v string) *ListByteMatchSetsOutput { - s.NextMarker = &v +// SetEndTime sets the EndTime field's value. +func (s *TimeWindow) SetEndTime(v time.Time) *TimeWindow { + s.EndTime = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *TimeWindow) SetStartTime(v time.Time) *TimeWindow { + s.StartTime = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListIPSetsRequest -type ListIPSetsInput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateByteMatchSetRequest +type UpdateByteMatchSetInput struct { _ struct{} `type:"structure"` - // Specifies the number of IPSet objects that you want AWS WAF to return for - // this request. If you have more IPSet objects than the number you specify - // for Limit, the response includes a NextMarker value that you can use to get - // another batch of IPSet objects. - Limit *int64 `type:"integer"` + // The ByteMatchSetId of the ByteMatchSet that you want to update. ByteMatchSetId + // is returned by CreateByteMatchSet and by ListByteMatchSets. + // + // ByteMatchSetId is a required field + ByteMatchSetId *string `min:"1" type:"string" required:"true"` - // If you specify a value for Limit and you have more IPSets than the value - // of Limit, AWS WAF returns a NextMarker value in the response that allows - // you to list another group of IPSets. For the second and subsequent ListIPSets - // requests, specify the value of NextMarker from the previous response to get - // information about another batch of ByteMatchSets. - NextMarker *string `min:"1" type:"string"` + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` + + // An array of ByteMatchSetUpdate objects that you want to insert into or delete + // from a ByteMatchSet. For more information, see the applicable data types: + // + // * ByteMatchSetUpdate: Contains Action and ByteMatchTuple + // + // * ByteMatchTuple: Contains FieldToMatch, PositionalConstraint, TargetString, + // and TextTransformation + // + // * FieldToMatch: Contains Data and Type + // + // Updates is a required field + Updates []*ByteMatchSetUpdate `min:"1" type:"list" required:"true"` } // String returns the string representation -func (s ListIPSetsInput) String() string { +func (s UpdateByteMatchSetInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListIPSetsInput) GoString() string { +func (s UpdateByteMatchSetInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListIPSetsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListIPSetsInput"} - if s.NextMarker != nil && len(*s.NextMarker) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) +func (s *UpdateByteMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateByteMatchSetInput"} + if s.ByteMatchSetId == nil { + invalidParams.Add(request.NewErrParamRequired("ByteMatchSetId")) + } + if s.ByteMatchSetId != nil && len(*s.ByteMatchSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ByteMatchSetId", 1)) + } + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.Updates == nil { + invalidParams.Add(request.NewErrParamRequired("Updates")) + } + if s.Updates != nil && len(s.Updates) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Updates", 1)) + } + if s.Updates != nil { + for i, v := range s.Updates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -9278,86 +14616,119 @@ func (s *ListIPSetsInput) Validate() error { return nil } -// SetLimit sets the Limit field's value. -func (s *ListIPSetsInput) SetLimit(v int64) *ListIPSetsInput { - s.Limit = &v +// SetByteMatchSetId sets the ByteMatchSetId field's value. +func (s *UpdateByteMatchSetInput) SetByteMatchSetId(v string) *UpdateByteMatchSetInput { + s.ByteMatchSetId = &v return s } -// SetNextMarker sets the NextMarker field's value. -func (s *ListIPSetsInput) SetNextMarker(v string) *ListIPSetsInput { - s.NextMarker = &v +// SetChangeToken sets the ChangeToken field's value. +func (s *UpdateByteMatchSetInput) SetChangeToken(v string) *UpdateByteMatchSetInput { + s.ChangeToken = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListIPSetsResponse -type ListIPSetsOutput struct { - _ struct{} `type:"structure"` +// SetUpdates sets the Updates field's value. +func (s *UpdateByteMatchSetInput) SetUpdates(v []*ByteMatchSetUpdate) *UpdateByteMatchSetInput { + s.Updates = v + return s +} - // An array of IPSetSummary objects. - IPSets []*IPSetSummary `type:"list"` +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateByteMatchSetResponse +type UpdateByteMatchSetOutput struct { + _ struct{} `type:"structure"` - // If you have more IPSet objects than the number that you specified for Limit - // in the request, the response includes a NextMarker value. To list more IPSet - // objects, submit another ListIPSets request, and specify the NextMarker value - // from the response in the NextMarker value in the next request. - NextMarker *string `min:"1" type:"string"` + // The ChangeToken that you used to submit the UpdateByteMatchSet request. You + // can also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` } // String returns the string representation -func (s ListIPSetsOutput) String() string { +func (s UpdateByteMatchSetOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListIPSetsOutput) GoString() string { +func (s UpdateByteMatchSetOutput) GoString() string { return s.String() } -// SetIPSets sets the IPSets field's value. -func (s *ListIPSetsOutput) SetIPSets(v []*IPSetSummary) *ListIPSetsOutput { - s.IPSets = v - return s -} - -// SetNextMarker sets the NextMarker field's value. -func (s *ListIPSetsOutput) SetNextMarker(v string) *ListIPSetsOutput { - s.NextMarker = &v +// SetChangeToken sets the ChangeToken field's value. +func (s *UpdateByteMatchSetOutput) SetChangeToken(v string) *UpdateByteMatchSetOutput { + s.ChangeToken = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListRateBasedRulesRequest -type ListRateBasedRulesInput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateGeoMatchSetRequest +type UpdateGeoMatchSetInput struct { _ struct{} `type:"structure"` - // Specifies the number of Rules that you want AWS WAF to return for this request. - // If you have more Rules than the number that you specify for Limit, the response - // includes a NextMarker value that you can use to get another batch of Rules. - Limit *int64 `type:"integer"` + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` - // If you specify a value for Limit and you have more Rules than the value of - // Limit, AWS WAF returns a NextMarker value in the response that allows you - // to list another group of Rules. For the second and subsequent ListRateBasedRules - // requests, specify the value of NextMarker from the previous response to get - // information about another batch of Rules. - NextMarker *string `min:"1" type:"string"` + // The GeoMatchSetId of the GeoMatchSet that you want to update. GeoMatchSetId + // is returned by CreateGeoMatchSet and by ListGeoMatchSets. + // + // GeoMatchSetId is a required field + GeoMatchSetId *string `min:"1" type:"string" required:"true"` + + // An array of GeoMatchSetUpdate objects that you want to insert into or delete + // from an GeoMatchSet. For more information, see the applicable data types: + // + // * GeoMatchSetUpdate: Contains Action and GeoMatchConstraint + // + // * GeoMatchConstraint: Contains Type and Value + // + // You can have only one Type and Value per GeoMatchConstraint. To add multiple + // countries, include multiple GeoMatchSetUpdate objects in your request. + // + // Updates is a required field + Updates []*GeoMatchSetUpdate `min:"1" type:"list" required:"true"` } // String returns the string representation -func (s ListRateBasedRulesInput) String() string { +func (s UpdateGeoMatchSetInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListRateBasedRulesInput) GoString() string { +func (s UpdateGeoMatchSetInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListRateBasedRulesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListRateBasedRulesInput"} - if s.NextMarker != nil && len(*s.NextMarker) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) +func (s *UpdateGeoMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateGeoMatchSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.GeoMatchSetId == nil { + invalidParams.Add(request.NewErrParamRequired("GeoMatchSetId")) + } + if s.GeoMatchSetId != nil && len(*s.GeoMatchSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GeoMatchSetId", 1)) + } + if s.Updates == nil { + invalidParams.Add(request.NewErrParamRequired("Updates")) + } + if s.Updates != nil && len(s.Updates) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Updates", 1)) + } + if s.Updates != nil { + for i, v := range s.Updates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -9366,86 +14737,116 @@ func (s *ListRateBasedRulesInput) Validate() error { return nil } -// SetLimit sets the Limit field's value. -func (s *ListRateBasedRulesInput) SetLimit(v int64) *ListRateBasedRulesInput { - s.Limit = &v +// SetChangeToken sets the ChangeToken field's value. +func (s *UpdateGeoMatchSetInput) SetChangeToken(v string) *UpdateGeoMatchSetInput { + s.ChangeToken = &v return s } -// SetNextMarker sets the NextMarker field's value. -func (s *ListRateBasedRulesInput) SetNextMarker(v string) *ListRateBasedRulesInput { - s.NextMarker = &v +// SetGeoMatchSetId sets the GeoMatchSetId field's value. +func (s *UpdateGeoMatchSetInput) SetGeoMatchSetId(v string) *UpdateGeoMatchSetInput { + s.GeoMatchSetId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListRateBasedRulesResponse -type ListRateBasedRulesOutput struct { - _ struct{} `type:"structure"` +// SetUpdates sets the Updates field's value. +func (s *UpdateGeoMatchSetInput) SetUpdates(v []*GeoMatchSetUpdate) *UpdateGeoMatchSetInput { + s.Updates = v + return s +} - // If you have more Rules than the number that you specified for Limit in the - // request, the response includes a NextMarker value. To list more Rules, submit - // another ListRateBasedRules request, and specify the NextMarker value from - // the response in the NextMarker value in the next request. - NextMarker *string `min:"1" type:"string"` +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateGeoMatchSetResponse +type UpdateGeoMatchSetOutput struct { + _ struct{} `type:"structure"` - // An array of RuleSummary objects. - Rules []*RuleSummary `type:"list"` + // The ChangeToken that you used to submit the UpdateGeoMatchSet request. You + // can also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` } // String returns the string representation -func (s ListRateBasedRulesOutput) String() string { +func (s UpdateGeoMatchSetOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListRateBasedRulesOutput) GoString() string { +func (s UpdateGeoMatchSetOutput) GoString() string { return s.String() } -// SetNextMarker sets the NextMarker field's value. -func (s *ListRateBasedRulesOutput) SetNextMarker(v string) *ListRateBasedRulesOutput { - s.NextMarker = &v - return s -} - -// SetRules sets the Rules field's value. -func (s *ListRateBasedRulesOutput) SetRules(v []*RuleSummary) *ListRateBasedRulesOutput { - s.Rules = v +// SetChangeToken sets the ChangeToken field's value. +func (s *UpdateGeoMatchSetOutput) SetChangeToken(v string) *UpdateGeoMatchSetOutput { + s.ChangeToken = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListRulesRequest -type ListRulesInput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateIPSetRequest +type UpdateIPSetInput struct { _ struct{} `type:"structure"` - // Specifies the number of Rules that you want AWS WAF to return for this request. - // If you have more Rules than the number that you specify for Limit, the response - // includes a NextMarker value that you can use to get another batch of Rules. - Limit *int64 `type:"integer"` + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` - // If you specify a value for Limit and you have more Rules than the value of - // Limit, AWS WAF returns a NextMarker value in the response that allows you - // to list another group of Rules. For the second and subsequent ListRules requests, - // specify the value of NextMarker from the previous response to get information - // about another batch of Rules. - NextMarker *string `min:"1" type:"string"` + // The IPSetId of the IPSet that you want to update. IPSetId is returned by + // CreateIPSet and by ListIPSets. + // + // IPSetId is a required field + IPSetId *string `min:"1" type:"string" required:"true"` + + // An array of IPSetUpdate objects that you want to insert into or delete from + // an IPSet. For more information, see the applicable data types: + // + // * IPSetUpdate: Contains Action and IPSetDescriptor + // + // * IPSetDescriptor: Contains Type and Value + // + // Updates is a required field + Updates []*IPSetUpdate `min:"1" type:"list" required:"true"` } // String returns the string representation -func (s ListRulesInput) String() string { +func (s UpdateIPSetInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListRulesInput) GoString() string { +func (s UpdateIPSetInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListRulesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListRulesInput"} - if s.NextMarker != nil && len(*s.NextMarker) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) +func (s *UpdateIPSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateIPSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.IPSetId == nil { + invalidParams.Add(request.NewErrParamRequired("IPSetId")) + } + if s.IPSetId != nil && len(*s.IPSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IPSetId", 1)) + } + if s.Updates == nil { + invalidParams.Add(request.NewErrParamRequired("Updates")) + } + if s.Updates != nil && len(s.Updates) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Updates", 1)) + } + if s.Updates != nil { + for i, v := range s.Updates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -9454,87 +14855,124 @@ func (s *ListRulesInput) Validate() error { return nil } -// SetLimit sets the Limit field's value. -func (s *ListRulesInput) SetLimit(v int64) *ListRulesInput { - s.Limit = &v +// SetChangeToken sets the ChangeToken field's value. +func (s *UpdateIPSetInput) SetChangeToken(v string) *UpdateIPSetInput { + s.ChangeToken = &v return s } -// SetNextMarker sets the NextMarker field's value. -func (s *ListRulesInput) SetNextMarker(v string) *ListRulesInput { - s.NextMarker = &v +// SetIPSetId sets the IPSetId field's value. +func (s *UpdateIPSetInput) SetIPSetId(v string) *UpdateIPSetInput { + s.IPSetId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListRulesResponse -type ListRulesOutput struct { - _ struct{} `type:"structure"` +// SetUpdates sets the Updates field's value. +func (s *UpdateIPSetInput) SetUpdates(v []*IPSetUpdate) *UpdateIPSetInput { + s.Updates = v + return s +} - // If you have more Rules than the number that you specified for Limit in the - // request, the response includes a NextMarker value. To list more Rules, submit - // another ListRules request, and specify the NextMarker value from the response - // in the NextMarker value in the next request. - NextMarker *string `min:"1" type:"string"` +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateIPSetResponse +type UpdateIPSetOutput struct { + _ struct{} `type:"structure"` - // An array of RuleSummary objects. - Rules []*RuleSummary `type:"list"` + // The ChangeToken that you used to submit the UpdateIPSet request. You can + // also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` } // String returns the string representation -func (s ListRulesOutput) String() string { +func (s UpdateIPSetOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListRulesOutput) GoString() string { +func (s UpdateIPSetOutput) GoString() string { return s.String() } -// SetNextMarker sets the NextMarker field's value. -func (s *ListRulesOutput) SetNextMarker(v string) *ListRulesOutput { - s.NextMarker = &v - return s -} - -// SetRules sets the Rules field's value. -func (s *ListRulesOutput) SetRules(v []*RuleSummary) *ListRulesOutput { - s.Rules = v +// SetChangeToken sets the ChangeToken field's value. +func (s *UpdateIPSetOutput) SetChangeToken(v string) *UpdateIPSetOutput { + s.ChangeToken = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListSizeConstraintSetsRequest -type ListSizeConstraintSetsInput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateRateBasedRuleRequest +type UpdateRateBasedRuleInput struct { _ struct{} `type:"structure"` - // Specifies the number of SizeConstraintSet objects that you want AWS WAF to - // return for this request. If you have more SizeConstraintSets objects than - // the number you specify for Limit, the response includes a NextMarker value - // that you can use to get another batch of SizeConstraintSet objects. - Limit *int64 `type:"integer"` + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` - // If you specify a value for Limit and you have more SizeConstraintSets than - // the value of Limit, AWS WAF returns a NextMarker value in the response that - // allows you to list another group of SizeConstraintSets. For the second and - // subsequent ListSizeConstraintSets requests, specify the value of NextMarker - // from the previous response to get information about another batch of SizeConstraintSets. - NextMarker *string `min:"1" type:"string"` + // The maximum number of requests, which have an identical value in the field + // specified by the RateKey, allowed in a five-minute period. If the number + // of requests exceeds the RateLimit and the other predicates specified in the + // rule are also met, AWS WAF triggers the action that is specified for this + // rule. + // + // RateLimit is a required field + RateLimit *int64 `min:"2000" type:"long" required:"true"` + + // The RuleId of the RateBasedRule that you want to update. RuleId is returned + // by CreateRateBasedRule and by ListRateBasedRules. + // + // RuleId is a required field + RuleId *string `min:"1" type:"string" required:"true"` + + // An array of RuleUpdate objects that you want to insert into or delete from + // a RateBasedRule. + // + // Updates is a required field + Updates []*RuleUpdate `type:"list" required:"true"` } // String returns the string representation -func (s ListSizeConstraintSetsInput) String() string { +func (s UpdateRateBasedRuleInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListSizeConstraintSetsInput) GoString() string { +func (s UpdateRateBasedRuleInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListSizeConstraintSetsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListSizeConstraintSetsInput"} - if s.NextMarker != nil && len(*s.NextMarker) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) +func (s *UpdateRateBasedRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateRateBasedRuleInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.RateLimit == nil { + invalidParams.Add(request.NewErrParamRequired("RateLimit")) + } + if s.RateLimit != nil && *s.RateLimit < 2000 { + invalidParams.Add(request.NewErrParamMinValue("RateLimit", 2000)) + } + if s.RuleId == nil { + invalidParams.Add(request.NewErrParamRequired("RuleId")) + } + if s.RuleId != nil && len(*s.RuleId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleId", 1)) + } + if s.Updates == nil { + invalidParams.Add(request.NewErrParamRequired("Updates")) + } + if s.Updates != nil { + for i, v := range s.Updates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -9543,90 +14981,118 @@ func (s *ListSizeConstraintSetsInput) Validate() error { return nil } -// SetLimit sets the Limit field's value. -func (s *ListSizeConstraintSetsInput) SetLimit(v int64) *ListSizeConstraintSetsInput { - s.Limit = &v +// SetChangeToken sets the ChangeToken field's value. +func (s *UpdateRateBasedRuleInput) SetChangeToken(v string) *UpdateRateBasedRuleInput { + s.ChangeToken = &v return s } -// SetNextMarker sets the NextMarker field's value. -func (s *ListSizeConstraintSetsInput) SetNextMarker(v string) *ListSizeConstraintSetsInput { - s.NextMarker = &v +// SetRateLimit sets the RateLimit field's value. +func (s *UpdateRateBasedRuleInput) SetRateLimit(v int64) *UpdateRateBasedRuleInput { + s.RateLimit = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListSizeConstraintSetsResponse -type ListSizeConstraintSetsOutput struct { - _ struct{} `type:"structure"` +// SetRuleId sets the RuleId field's value. +func (s *UpdateRateBasedRuleInput) SetRuleId(v string) *UpdateRateBasedRuleInput { + s.RuleId = &v + return s +} - // If you have more SizeConstraintSet objects than the number that you specified - // for Limit in the request, the response includes a NextMarker value. To list - // more SizeConstraintSet objects, submit another ListSizeConstraintSets request, - // and specify the NextMarker value from the response in the NextMarker value - // in the next request. - NextMarker *string `min:"1" type:"string"` +// SetUpdates sets the Updates field's value. +func (s *UpdateRateBasedRuleInput) SetUpdates(v []*RuleUpdate) *UpdateRateBasedRuleInput { + s.Updates = v + return s +} - // An array of SizeConstraintSetSummary objects. - SizeConstraintSets []*SizeConstraintSetSummary `type:"list"` +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateRateBasedRuleResponse +type UpdateRateBasedRuleOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the UpdateRateBasedRule request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` } // String returns the string representation -func (s ListSizeConstraintSetsOutput) String() string { +func (s UpdateRateBasedRuleOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListSizeConstraintSetsOutput) GoString() string { +func (s UpdateRateBasedRuleOutput) GoString() string { return s.String() } -// SetNextMarker sets the NextMarker field's value. -func (s *ListSizeConstraintSetsOutput) SetNextMarker(v string) *ListSizeConstraintSetsOutput { - s.NextMarker = &v - return s -} - -// SetSizeConstraintSets sets the SizeConstraintSets field's value. -func (s *ListSizeConstraintSetsOutput) SetSizeConstraintSets(v []*SizeConstraintSetSummary) *ListSizeConstraintSetsOutput { - s.SizeConstraintSets = v +// SetChangeToken sets the ChangeToken field's value. +func (s *UpdateRateBasedRuleOutput) SetChangeToken(v string) *UpdateRateBasedRuleOutput { + s.ChangeToken = &v return s } -// A request to list the SqlInjectionMatchSet objects created by the current -// AWS account. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListSqlInjectionMatchSetsRequest -type ListSqlInjectionMatchSetsInput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateRegexMatchSetRequest +type UpdateRegexMatchSetInput struct { _ struct{} `type:"structure"` - // Specifies the number of SqlInjectionMatchSet objects that you want AWS WAF - // to return for this request. If you have more SqlInjectionMatchSet objects - // than the number you specify for Limit, the response includes a NextMarker - // value that you can use to get another batch of Rules. - Limit *int64 `type:"integer"` + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` - // If you specify a value for Limit and you have more SqlInjectionMatchSet objects - // than the value of Limit, AWS WAF returns a NextMarker value in the response - // that allows you to list another group of SqlInjectionMatchSets. For the second - // and subsequent ListSqlInjectionMatchSets requests, specify the value of NextMarker - // from the previous response to get information about another batch of SqlInjectionMatchSets. - NextMarker *string `min:"1" type:"string"` + // The RegexMatchSetId of the RegexMatchSet that you want to update. RegexMatchSetId + // is returned by CreateRegexMatchSet and by ListRegexMatchSets. + // + // RegexMatchSetId is a required field + RegexMatchSetId *string `min:"1" type:"string" required:"true"` + + // An array of RegexMatchSetUpdate objects that you want to insert into or delete + // from a RegexMatchSet. For more information, see RegexMatchTuple. + // + // Updates is a required field + Updates []*RegexMatchSetUpdate `min:"1" type:"list" required:"true"` } // String returns the string representation -func (s ListSqlInjectionMatchSetsInput) String() string { +func (s UpdateRegexMatchSetInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListSqlInjectionMatchSetsInput) GoString() string { +func (s UpdateRegexMatchSetInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListSqlInjectionMatchSetsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListSqlInjectionMatchSetsInput"} - if s.NextMarker != nil && len(*s.NextMarker) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) +func (s *UpdateRegexMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateRegexMatchSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.RegexMatchSetId == nil { + invalidParams.Add(request.NewErrParamRequired("RegexMatchSetId")) + } + if s.RegexMatchSetId != nil && len(*s.RegexMatchSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RegexMatchSetId", 1)) + } + if s.Updates == nil { + invalidParams.Add(request.NewErrParamRequired("Updates")) + } + if s.Updates != nil && len(s.Updates) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Updates", 1)) + } + if s.Updates != nil { + for i, v := range s.Updates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -9635,90 +15101,112 @@ func (s *ListSqlInjectionMatchSetsInput) Validate() error { return nil } -// SetLimit sets the Limit field's value. -func (s *ListSqlInjectionMatchSetsInput) SetLimit(v int64) *ListSqlInjectionMatchSetsInput { - s.Limit = &v +// SetChangeToken sets the ChangeToken field's value. +func (s *UpdateRegexMatchSetInput) SetChangeToken(v string) *UpdateRegexMatchSetInput { + s.ChangeToken = &v return s } -// SetNextMarker sets the NextMarker field's value. -func (s *ListSqlInjectionMatchSetsInput) SetNextMarker(v string) *ListSqlInjectionMatchSetsInput { - s.NextMarker = &v +// SetRegexMatchSetId sets the RegexMatchSetId field's value. +func (s *UpdateRegexMatchSetInput) SetRegexMatchSetId(v string) *UpdateRegexMatchSetInput { + s.RegexMatchSetId = &v return s } -// The response to a ListSqlInjectionMatchSets request. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListSqlInjectionMatchSetsResponse -type ListSqlInjectionMatchSetsOutput struct { - _ struct{} `type:"structure"` +// SetUpdates sets the Updates field's value. +func (s *UpdateRegexMatchSetInput) SetUpdates(v []*RegexMatchSetUpdate) *UpdateRegexMatchSetInput { + s.Updates = v + return s +} - // If you have more SqlInjectionMatchSet objects than the number that you specified - // for Limit in the request, the response includes a NextMarker value. To list - // more SqlInjectionMatchSet objects, submit another ListSqlInjectionMatchSets - // request, and specify the NextMarker value from the response in the NextMarker - // value in the next request. - NextMarker *string `min:"1" type:"string"` +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateRegexMatchSetResponse +type UpdateRegexMatchSetOutput struct { + _ struct{} `type:"structure"` - // An array of SqlInjectionMatchSetSummary objects. - SqlInjectionMatchSets []*SqlInjectionMatchSetSummary `type:"list"` + // The ChangeToken that you used to submit the UpdateRegexMatchSet request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` } // String returns the string representation -func (s ListSqlInjectionMatchSetsOutput) String() string { +func (s UpdateRegexMatchSetOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListSqlInjectionMatchSetsOutput) GoString() string { +func (s UpdateRegexMatchSetOutput) GoString() string { return s.String() } -// SetNextMarker sets the NextMarker field's value. -func (s *ListSqlInjectionMatchSetsOutput) SetNextMarker(v string) *ListSqlInjectionMatchSetsOutput { - s.NextMarker = &v - return s -} - -// SetSqlInjectionMatchSets sets the SqlInjectionMatchSets field's value. -func (s *ListSqlInjectionMatchSetsOutput) SetSqlInjectionMatchSets(v []*SqlInjectionMatchSetSummary) *ListSqlInjectionMatchSetsOutput { - s.SqlInjectionMatchSets = v +// SetChangeToken sets the ChangeToken field's value. +func (s *UpdateRegexMatchSetOutput) SetChangeToken(v string) *UpdateRegexMatchSetOutput { + s.ChangeToken = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListWebACLsRequest -type ListWebACLsInput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateRegexPatternSetRequest +type UpdateRegexPatternSetInput struct { _ struct{} `type:"structure"` - // Specifies the number of WebACL objects that you want AWS WAF to return for - // this request. If you have more WebACL objects than the number that you specify - // for Limit, the response includes a NextMarker value that you can use to get - // another batch of WebACL objects. - Limit *int64 `type:"integer"` + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` - // If you specify a value for Limit and you have more WebACL objects than the - // number that you specify for Limit, AWS WAF returns a NextMarker value in - // the response that allows you to list another group of WebACL objects. For - // the second and subsequent ListWebACLs requests, specify the value of NextMarker - // from the previous response to get information about another batch of WebACL - // objects. - NextMarker *string `min:"1" type:"string"` + // The RegexPatternSetId of the RegexPatternSet that you want to update. RegexPatternSetId + // is returned by CreateRegexPatternSet and by ListRegexPatternSets. + // + // RegexPatternSetId is a required field + RegexPatternSetId *string `min:"1" type:"string" required:"true"` + + // An array of RegexPatternSetUpdate objects that you want to insert into or + // delete from a RegexPatternSet. + // + // Updates is a required field + Updates []*RegexPatternSetUpdate `min:"1" type:"list" required:"true"` } // String returns the string representation -func (s ListWebACLsInput) String() string { +func (s UpdateRegexPatternSetInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListWebACLsInput) GoString() string { +func (s UpdateRegexPatternSetInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListWebACLsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListWebACLsInput"} - if s.NextMarker != nil && len(*s.NextMarker) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) +func (s *UpdateRegexPatternSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateRegexPatternSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.RegexPatternSetId == nil { + invalidParams.Add(request.NewErrParamRequired("RegexPatternSetId")) + } + if s.RegexPatternSetId != nil && len(*s.RegexPatternSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RegexPatternSetId", 1)) + } + if s.Updates == nil { + invalidParams.Add(request.NewErrParamRequired("Updates")) + } + if s.Updates != nil && len(s.Updates) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Updates", 1)) + } + if s.Updates != nil { + for i, v := range s.Updates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -9727,88 +15215,115 @@ func (s *ListWebACLsInput) Validate() error { return nil } -// SetLimit sets the Limit field's value. -func (s *ListWebACLsInput) SetLimit(v int64) *ListWebACLsInput { - s.Limit = &v +// SetChangeToken sets the ChangeToken field's value. +func (s *UpdateRegexPatternSetInput) SetChangeToken(v string) *UpdateRegexPatternSetInput { + s.ChangeToken = &v return s } -// SetNextMarker sets the NextMarker field's value. -func (s *ListWebACLsInput) SetNextMarker(v string) *ListWebACLsInput { - s.NextMarker = &v +// SetRegexPatternSetId sets the RegexPatternSetId field's value. +func (s *UpdateRegexPatternSetInput) SetRegexPatternSetId(v string) *UpdateRegexPatternSetInput { + s.RegexPatternSetId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListWebACLsResponse -type ListWebACLsOutput struct { - _ struct{} `type:"structure"` +// SetUpdates sets the Updates field's value. +func (s *UpdateRegexPatternSetInput) SetUpdates(v []*RegexPatternSetUpdate) *UpdateRegexPatternSetInput { + s.Updates = v + return s +} - // If you have more WebACL objects than the number that you specified for Limit - // in the request, the response includes a NextMarker value. To list more WebACL - // objects, submit another ListWebACLs request, and specify the NextMarker value - // from the response in the NextMarker value in the next request. - NextMarker *string `min:"1" type:"string"` +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateRegexPatternSetResponse +type UpdateRegexPatternSetOutput struct { + _ struct{} `type:"structure"` - // An array of WebACLSummary objects. - WebACLs []*WebACLSummary `type:"list"` + // The ChangeToken that you used to submit the UpdateRegexPatternSet request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` } // String returns the string representation -func (s ListWebACLsOutput) String() string { +func (s UpdateRegexPatternSetOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListWebACLsOutput) GoString() string { +func (s UpdateRegexPatternSetOutput) GoString() string { return s.String() } -// SetNextMarker sets the NextMarker field's value. -func (s *ListWebACLsOutput) SetNextMarker(v string) *ListWebACLsOutput { - s.NextMarker = &v - return s -} - -// SetWebACLs sets the WebACLs field's value. -func (s *ListWebACLsOutput) SetWebACLs(v []*WebACLSummary) *ListWebACLsOutput { - s.WebACLs = v +// SetChangeToken sets the ChangeToken field's value. +func (s *UpdateRegexPatternSetOutput) SetChangeToken(v string) *UpdateRegexPatternSetOutput { + s.ChangeToken = &v return s } -// A request to list the XssMatchSet objects created by the current AWS account. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListXssMatchSetsRequest -type ListXssMatchSetsInput struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateRuleRequest +type UpdateRuleInput struct { _ struct{} `type:"structure"` - // Specifies the number of XssMatchSet objects that you want AWS WAF to return - // for this request. If you have more XssMatchSet objects than the number you - // specify for Limit, the response includes a NextMarker value that you can - // use to get another batch of Rules. - Limit *int64 `type:"integer"` + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` - // If you specify a value for Limit and you have more XssMatchSet objects than - // the value of Limit, AWS WAF returns a NextMarker value in the response that - // allows you to list another group of XssMatchSets. For the second and subsequent - // ListXssMatchSets requests, specify the value of NextMarker from the previous - // response to get information about another batch of XssMatchSets. - NextMarker *string `min:"1" type:"string"` + // The RuleId of the Rule that you want to update. RuleId is returned by CreateRule + // and by ListRules. + // + // RuleId is a required field + RuleId *string `min:"1" type:"string" required:"true"` + + // An array of RuleUpdate objects that you want to insert into or delete from + // a Rule. For more information, see the applicable data types: + // + // * RuleUpdate: Contains Action and Predicate + // + // * Predicate: Contains DataId, Negated, and Type + // + // * FieldToMatch: Contains Data and Type + // + // Updates is a required field + Updates []*RuleUpdate `type:"list" required:"true"` } // String returns the string representation -func (s ListXssMatchSetsInput) String() string { +func (s UpdateRuleInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListXssMatchSetsInput) GoString() string { +func (s UpdateRuleInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListXssMatchSetsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListXssMatchSetsInput"} - if s.NextMarker != nil && len(*s.NextMarker) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextMarker", 1)) +func (s *UpdateRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateRuleInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.RuleId == nil { + invalidParams.Add(request.NewErrParamRequired("RuleId")) + } + if s.RuleId != nil && len(*s.RuleId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleId", 1)) + } + if s.Updates == nil { + invalidParams.Add(request.NewErrParamRequired("Updates")) + } + if s.Updates != nil { + for i, v := range s.Updates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -9817,115 +15332,120 @@ func (s *ListXssMatchSetsInput) Validate() error { return nil } -// SetLimit sets the Limit field's value. -func (s *ListXssMatchSetsInput) SetLimit(v int64) *ListXssMatchSetsInput { - s.Limit = &v +// SetChangeToken sets the ChangeToken field's value. +func (s *UpdateRuleInput) SetChangeToken(v string) *UpdateRuleInput { + s.ChangeToken = &v return s } -// SetNextMarker sets the NextMarker field's value. -func (s *ListXssMatchSetsInput) SetNextMarker(v string) *ListXssMatchSetsInput { - s.NextMarker = &v +// SetRuleId sets the RuleId field's value. +func (s *UpdateRuleInput) SetRuleId(v string) *UpdateRuleInput { + s.RuleId = &v return s } -// The response to a ListXssMatchSets request. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/ListXssMatchSetsResponse -type ListXssMatchSetsOutput struct { - _ struct{} `type:"structure"` +// SetUpdates sets the Updates field's value. +func (s *UpdateRuleInput) SetUpdates(v []*RuleUpdate) *UpdateRuleInput { + s.Updates = v + return s +} - // If you have more XssMatchSet objects than the number that you specified for - // Limit in the request, the response includes a NextMarker value. To list more - // XssMatchSet objects, submit another ListXssMatchSets request, and specify - // the NextMarker value from the response in the NextMarker value in the next - // request. - NextMarker *string `min:"1" type:"string"` +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateRuleResponse +type UpdateRuleOutput struct { + _ struct{} `type:"structure"` - // An array of XssMatchSetSummary objects. - XssMatchSets []*XssMatchSetSummary `type:"list"` + // The ChangeToken that you used to submit the UpdateRule request. You can also + // use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` } // String returns the string representation -func (s ListXssMatchSetsOutput) String() string { +func (s UpdateRuleOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListXssMatchSetsOutput) GoString() string { +func (s UpdateRuleOutput) GoString() string { return s.String() } -// SetNextMarker sets the NextMarker field's value. -func (s *ListXssMatchSetsOutput) SetNextMarker(v string) *ListXssMatchSetsOutput { - s.NextMarker = &v - return s -} - -// SetXssMatchSets sets the XssMatchSets field's value. -func (s *ListXssMatchSetsOutput) SetXssMatchSets(v []*XssMatchSetSummary) *ListXssMatchSetsOutput { - s.XssMatchSets = v +// SetChangeToken sets the ChangeToken field's value. +func (s *UpdateRuleOutput) SetChangeToken(v string) *UpdateRuleOutput { + s.ChangeToken = &v return s } -// Specifies the ByteMatchSet, IPSet, SqlInjectionMatchSet, XssMatchSet, and -// SizeConstraintSet objects that you want to add to a Rule and, for each object, -// indicates whether you want to negate the settings, for example, requests -// that do NOT originate from the IP address 192.0.2.44. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/Predicate -type Predicate struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateSizeConstraintSetRequest +type UpdateSizeConstraintSetInput struct { _ struct{} `type:"structure"` - // A unique identifier for a predicate in a Rule, such as ByteMatchSetId or - // IPSetId. The ID is returned by the corresponding Create or List command. + // The value returned by the most recent call to GetChangeToken. // - // DataId is a required field - DataId *string `min:"1" type:"string" required:"true"` + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` - // Set Negated to False if you want AWS WAF to allow, block, or count requests - // based on the settings in the specified ByteMatchSet, IPSet, SqlInjectionMatchSet, - // XssMatchSet, or SizeConstraintSet. For example, if an IPSet includes the - // IP address 192.0.2.44, AWS WAF will allow or block requests based on that - // IP address. - // - // Set Negated to True if you want AWS WAF to allow or block a request based - // on the negation of the settings in the ByteMatchSet, IPSet, SqlInjectionMatchSet, - // XssMatchSet, or SizeConstraintSet. For example, if an IPSet includes the - // IP address 192.0.2.44, AWS WAF will allow, block, or count requests based - // on all IP addresses except192.0.2.44. + // The SizeConstraintSetId of the SizeConstraintSet that you want to update. + // SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets. // - // Negated is a required field - Negated *bool `type:"boolean" required:"true"` + // SizeConstraintSetId is a required field + SizeConstraintSetId *string `min:"1" type:"string" required:"true"` - // The type of predicate in a Rule, such as ByteMatchSet or IPSet. + // An array of SizeConstraintSetUpdate objects that you want to insert into + // or delete from a SizeConstraintSet. For more information, see the applicable + // data types: // - // Type is a required field - Type *string `type:"string" required:"true" enum:"PredicateType"` + // * SizeConstraintSetUpdate: Contains Action and SizeConstraint + // + // * SizeConstraint: Contains FieldToMatch, TextTransformation, ComparisonOperator, + // and Size + // + // * FieldToMatch: Contains Data and Type + // + // Updates is a required field + Updates []*SizeConstraintSetUpdate `min:"1" type:"list" required:"true"` } // String returns the string representation -func (s Predicate) String() string { +func (s UpdateSizeConstraintSetInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Predicate) GoString() string { +func (s UpdateSizeConstraintSetInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *Predicate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Predicate"} - if s.DataId == nil { - invalidParams.Add(request.NewErrParamRequired("DataId")) +func (s *UpdateSizeConstraintSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateSizeConstraintSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) } - if s.DataId != nil && len(*s.DataId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DataId", 1)) + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) } - if s.Negated == nil { - invalidParams.Add(request.NewErrParamRequired("Negated")) + if s.SizeConstraintSetId == nil { + invalidParams.Add(request.NewErrParamRequired("SizeConstraintSetId")) } - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) + if s.SizeConstraintSetId != nil && len(*s.SizeConstraintSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SizeConstraintSetId", 1)) + } + if s.Updates == nil { + invalidParams.Add(request.NewErrParamRequired("Updates")) + } + if s.Updates != nil && len(s.Updates) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Updates", 1)) + } + if s.Updates != nil { + for i, v := range s.Updates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -9934,292 +15454,371 @@ func (s *Predicate) Validate() error { return nil } -// SetDataId sets the DataId field's value. -func (s *Predicate) SetDataId(v string) *Predicate { - s.DataId = &v +// SetChangeToken sets the ChangeToken field's value. +func (s *UpdateSizeConstraintSetInput) SetChangeToken(v string) *UpdateSizeConstraintSetInput { + s.ChangeToken = &v return s } -// SetNegated sets the Negated field's value. -func (s *Predicate) SetNegated(v bool) *Predicate { - s.Negated = &v +// SetSizeConstraintSetId sets the SizeConstraintSetId field's value. +func (s *UpdateSizeConstraintSetInput) SetSizeConstraintSetId(v string) *UpdateSizeConstraintSetInput { + s.SizeConstraintSetId = &v return s } -// SetType sets the Type field's value. -func (s *Predicate) SetType(v string) *Predicate { - s.Type = &v +// SetUpdates sets the Updates field's value. +func (s *UpdateSizeConstraintSetInput) SetUpdates(v []*SizeConstraintSetUpdate) *UpdateSizeConstraintSetInput { + s.Updates = v return s } -// A RateBasedRule is identical to a regular Rule, with one addition: a RateBasedRule -// counts the number of requests that arrive from a specified IP address every -// five minutes. For example, based on recent requests that you've seen from -// an attacker, you might create a RateBasedRule that includes the following -// conditions: -// -// * The requests come from 192.0.2.44. -// -// * They contain the value BadBot in the User-Agent header. -// -// In the rule, you also define the rate limit as 15,000. -// -// Requests that meet both of these conditions and exceed 15,000 requests every -// five minutes trigger the rule's action (block or count), which is defined -// in the web ACL. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/RateBasedRule -type RateBasedRule struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateSizeConstraintSetResponse +type UpdateSizeConstraintSetOutput struct { _ struct{} `type:"structure"` - // The Predicates object contains one Predicate element for each ByteMatchSet, - // IPSet, or SqlInjectionMatchSet object that you want to include in a RateBasedRule. - // - // MatchPredicates is a required field - MatchPredicates []*Predicate `type:"list" required:"true"` + // The ChangeToken that you used to submit the UpdateSizeConstraintSet request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` +} - // A friendly name or description for the metrics for a RateBasedRule. The name - // can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't - // contain whitespace. You can't change the name of the metric after you create - // the RateBasedRule. - MetricName *string `type:"string"` +// String returns the string representation +func (s UpdateSizeConstraintSetOutput) String() string { + return awsutil.Prettify(s) +} - // A friendly name or description for a RateBasedRule. You can't change the - // name of a RateBasedRule after you create it. - Name *string `min:"1" type:"string"` +// GoString returns the string representation +func (s UpdateSizeConstraintSetOutput) GoString() string { + return s.String() +} - // The field that AWS WAF uses to determine if requests are likely arriving - // from single source and thus subject to rate monitoring. The only valid value - // for RateKey is IP. IP indicates that requests arriving from the same IP address - // are subject to the RateLimit that is specified in the RateBasedRule. +// SetChangeToken sets the ChangeToken field's value. +func (s *UpdateSizeConstraintSetOutput) SetChangeToken(v string) *UpdateSizeConstraintSetOutput { + s.ChangeToken = &v + return s +} + +// A request to update a SqlInjectionMatchSet. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateSqlInjectionMatchSetRequest +type UpdateSqlInjectionMatchSetInput struct { + _ struct{} `type:"structure"` + + // The value returned by the most recent call to GetChangeToken. // - // RateKey is a required field - RateKey *string `type:"string" required:"true" enum:"RateKey"` + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` - // The maximum number of requests, which have an identical value in the field - // specified by the RateKey, allowed in a five-minute period. If the number - // of requests exceeds the RateLimit and the other predicates specified in the - // rule are also met, AWS WAF triggers the action that is specified for this - // rule. + // The SqlInjectionMatchSetId of the SqlInjectionMatchSet that you want to update. + // SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets. // - // RateLimit is a required field - RateLimit *int64 `min:"2000" type:"long" required:"true"` + // SqlInjectionMatchSetId is a required field + SqlInjectionMatchSetId *string `min:"1" type:"string" required:"true"` - // A unique identifier for a RateBasedRule. You use RuleId to get more information - // about a RateBasedRule (see GetRateBasedRule), update a RateBasedRule (see - // UpdateRateBasedRule), insert a RateBasedRule into a WebACL or delete one - // from a WebACL (see UpdateWebACL), or delete a RateBasedRule from AWS WAF - // (see DeleteRateBasedRule). + // An array of SqlInjectionMatchSetUpdate objects that you want to insert into + // or delete from a SqlInjectionMatchSet. For more information, see the applicable + // data types: // - // RuleId is a required field - RuleId *string `min:"1" type:"string" required:"true"` + // * SqlInjectionMatchSetUpdate: Contains Action and SqlInjectionMatchTuple + // + // * SqlInjectionMatchTuple: Contains FieldToMatch and TextTransformation + // + // * FieldToMatch: Contains Data and Type + // + // Updates is a required field + Updates []*SqlInjectionMatchSetUpdate `min:"1" type:"list" required:"true"` } // String returns the string representation -func (s RateBasedRule) String() string { +func (s UpdateSqlInjectionMatchSetInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RateBasedRule) GoString() string { +func (s UpdateSqlInjectionMatchSetInput) GoString() string { return s.String() } -// SetMatchPredicates sets the MatchPredicates field's value. -func (s *RateBasedRule) SetMatchPredicates(v []*Predicate) *RateBasedRule { - s.MatchPredicates = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateSqlInjectionMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateSqlInjectionMatchSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.SqlInjectionMatchSetId == nil { + invalidParams.Add(request.NewErrParamRequired("SqlInjectionMatchSetId")) + } + if s.SqlInjectionMatchSetId != nil && len(*s.SqlInjectionMatchSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SqlInjectionMatchSetId", 1)) + } + if s.Updates == nil { + invalidParams.Add(request.NewErrParamRequired("Updates")) + } + if s.Updates != nil && len(s.Updates) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Updates", 1)) + } + if s.Updates != nil { + for i, v := range s.Updates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetMetricName sets the MetricName field's value. -func (s *RateBasedRule) SetMetricName(v string) *RateBasedRule { - s.MetricName = &v +// SetChangeToken sets the ChangeToken field's value. +func (s *UpdateSqlInjectionMatchSetInput) SetChangeToken(v string) *UpdateSqlInjectionMatchSetInput { + s.ChangeToken = &v return s } -// SetName sets the Name field's value. -func (s *RateBasedRule) SetName(v string) *RateBasedRule { - s.Name = &v +// SetSqlInjectionMatchSetId sets the SqlInjectionMatchSetId field's value. +func (s *UpdateSqlInjectionMatchSetInput) SetSqlInjectionMatchSetId(v string) *UpdateSqlInjectionMatchSetInput { + s.SqlInjectionMatchSetId = &v return s } -// SetRateKey sets the RateKey field's value. -func (s *RateBasedRule) SetRateKey(v string) *RateBasedRule { - s.RateKey = &v +// SetUpdates sets the Updates field's value. +func (s *UpdateSqlInjectionMatchSetInput) SetUpdates(v []*SqlInjectionMatchSetUpdate) *UpdateSqlInjectionMatchSetInput { + s.Updates = v return s } -// SetRateLimit sets the RateLimit field's value. -func (s *RateBasedRule) SetRateLimit(v int64) *RateBasedRule { - s.RateLimit = &v - return s +// The response to an UpdateSqlInjectionMatchSets request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateSqlInjectionMatchSetResponse +type UpdateSqlInjectionMatchSetOutput struct { + _ struct{} `type:"structure"` + + // The ChangeToken that you used to submit the UpdateSqlInjectionMatchSet request. + // You can also use this value to query the status of the request. For more + // information, see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateSqlInjectionMatchSetOutput) String() string { + return awsutil.Prettify(s) } -// SetRuleId sets the RuleId field's value. -func (s *RateBasedRule) SetRuleId(v string) *RateBasedRule { - s.RuleId = &v +// GoString returns the string representation +func (s UpdateSqlInjectionMatchSetOutput) GoString() string { + return s.String() +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *UpdateSqlInjectionMatchSetOutput) SetChangeToken(v string) *UpdateSqlInjectionMatchSetOutput { + s.ChangeToken = &v return s } -// A combination of ByteMatchSet, IPSet, and/or SqlInjectionMatchSet objects -// that identify the web requests that you want to allow, block, or count. For -// example, you might create a Rule that includes the following predicates: -// -// * An IPSet that causes AWS WAF to search for web requests that originate -// from the IP address 192.0.2.44 -// -// * A ByteMatchSet that causes AWS WAF to search for web requests for which -// the value of the User-Agent header is BadBot. -// -// To match the settings in this Rule, a request must originate from 192.0.2.44 -// AND include a User-Agent header for which the value is BadBot. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/Rule -type Rule struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateWebACLRequest +type UpdateWebACLInput struct { _ struct{} `type:"structure"` - // A friendly name or description for the metrics for this Rule. The name can - // contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain - // whitespace. You can't change MetricName after you create the Rule. - MetricName *string `type:"string"` + // The value returned by the most recent call to GetChangeToken. + // + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` - // The friendly name or description for the Rule. You can't change the name - // of a Rule after you create it. - Name *string `min:"1" type:"string"` + // A default action for the web ACL, either ALLOW or BLOCK. AWS WAF performs + // the default action if a request doesn't match the criteria in any of the + // rules in a web ACL. + DefaultAction *WafAction `type:"structure"` - // The Predicates object contains one Predicate element for each ByteMatchSet, - // IPSet, or SqlInjectionMatchSet object that you want to include in a Rule. + // An array of updates to make to the WebACL. // - // Predicates is a required field - Predicates []*Predicate `type:"list" required:"true"` - - // A unique identifier for a Rule. You use RuleId to get more information about - // a Rule (see GetRule), update a Rule (see UpdateRule), insert a Rule into - // a WebACL or delete a one from a WebACL (see UpdateWebACL), or delete a Rule - // from AWS WAF (see DeleteRule). + // An array of WebACLUpdate objects that you want to insert into or delete from + // a WebACL. For more information, see the applicable data types: // - // RuleId is returned by CreateRule and by ListRules. + // * WebACLUpdate: Contains Action and ActivatedRule // - // RuleId is a required field - RuleId *string `min:"1" type:"string" required:"true"` + // * ActivatedRule: Contains Action, Priority, RuleId, and Type + // + // * WafAction: Contains Type + Updates []*WebACLUpdate `type:"list"` + + // The WebACLId of the WebACL that you want to update. WebACLId is returned + // by CreateWebACL and by ListWebACLs. + // + // WebACLId is a required field + WebACLId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s Rule) String() string { +func (s UpdateWebACLInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Rule) GoString() string { +func (s UpdateWebACLInput) GoString() string { return s.String() } -// SetMetricName sets the MetricName field's value. -func (s *Rule) SetMetricName(v string) *Rule { - s.MetricName = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateWebACLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateWebACLInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) + } + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) + } + if s.WebACLId == nil { + invalidParams.Add(request.NewErrParamRequired("WebACLId")) + } + if s.WebACLId != nil && len(*s.WebACLId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WebACLId", 1)) + } + if s.DefaultAction != nil { + if err := s.DefaultAction.Validate(); err != nil { + invalidParams.AddNested("DefaultAction", err.(request.ErrInvalidParams)) + } + } + if s.Updates != nil { + for i, v := range s.Updates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChangeToken sets the ChangeToken field's value. +func (s *UpdateWebACLInput) SetChangeToken(v string) *UpdateWebACLInput { + s.ChangeToken = &v return s } -// SetName sets the Name field's value. -func (s *Rule) SetName(v string) *Rule { - s.Name = &v +// SetDefaultAction sets the DefaultAction field's value. +func (s *UpdateWebACLInput) SetDefaultAction(v *WafAction) *UpdateWebACLInput { + s.DefaultAction = v return s } -// SetPredicates sets the Predicates field's value. -func (s *Rule) SetPredicates(v []*Predicate) *Rule { - s.Predicates = v +// SetUpdates sets the Updates field's value. +func (s *UpdateWebACLInput) SetUpdates(v []*WebACLUpdate) *UpdateWebACLInput { + s.Updates = v return s } -// SetRuleId sets the RuleId field's value. -func (s *Rule) SetRuleId(v string) *Rule { - s.RuleId = &v +// SetWebACLId sets the WebACLId field's value. +func (s *UpdateWebACLInput) SetWebACLId(v string) *UpdateWebACLInput { + s.WebACLId = &v return s } -// Contains the identifier and the friendly name or description of the Rule. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/RuleSummary -type RuleSummary struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateWebACLResponse +type UpdateWebACLOutput struct { _ struct{} `type:"structure"` - // A friendly name or description of the Rule. You can't change the name of - // a Rule after you create it. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // A unique identifier for a Rule. You use RuleId to get more information about - // a Rule (see GetRule), update a Rule (see UpdateRule), insert a Rule into - // a WebACL or delete one from a WebACL (see UpdateWebACL), or delete a Rule - // from AWS WAF (see DeleteRule). - // - // RuleId is returned by CreateRule and by ListRules. - // - // RuleId is a required field - RuleId *string `min:"1" type:"string" required:"true"` + // The ChangeToken that you used to submit the UpdateWebACL request. You can + // also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` } // String returns the string representation -func (s RuleSummary) String() string { +func (s UpdateWebACLOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RuleSummary) GoString() string { +func (s UpdateWebACLOutput) GoString() string { return s.String() } -// SetName sets the Name field's value. -func (s *RuleSummary) SetName(v string) *RuleSummary { - s.Name = &v - return s -} - -// SetRuleId sets the RuleId field's value. -func (s *RuleSummary) SetRuleId(v string) *RuleSummary { - s.RuleId = &v +// SetChangeToken sets the ChangeToken field's value. +func (s *UpdateWebACLOutput) SetChangeToken(v string) *UpdateWebACLOutput { + s.ChangeToken = &v return s } -// Specifies a Predicate (such as an IPSet) and indicates whether you want to -// add it to a Rule or delete it from a Rule. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/RuleUpdate -type RuleUpdate struct { +// A request to update an XssMatchSet. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateXssMatchSetRequest +type UpdateXssMatchSetInput struct { _ struct{} `type:"structure"` - // Specify INSERT to add a Predicate to a Rule. Use DELETE to remove a Predicate - // from a Rule. + // The value returned by the most recent call to GetChangeToken. // - // Action is a required field - Action *string `type:"string" required:"true" enum:"ChangeAction"` + // ChangeToken is a required field + ChangeToken *string `min:"1" type:"string" required:"true"` - // The ID of the Predicate (such as an IPSet) that you want to add to a Rule. + // An array of XssMatchSetUpdate objects that you want to insert into or delete + // from a XssMatchSet. For more information, see the applicable data types: // - // Predicate is a required field - Predicate *Predicate `type:"structure" required:"true"` + // * XssMatchSetUpdate: Contains Action and XssMatchTuple + // + // * XssMatchTuple: Contains FieldToMatch and TextTransformation + // + // * FieldToMatch: Contains Data and Type + // + // Updates is a required field + Updates []*XssMatchSetUpdate `min:"1" type:"list" required:"true"` + + // The XssMatchSetId of the XssMatchSet that you want to update. XssMatchSetId + // is returned by CreateXssMatchSet and by ListXssMatchSets. + // + // XssMatchSetId is a required field + XssMatchSetId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s RuleUpdate) String() string { +func (s UpdateXssMatchSetInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RuleUpdate) GoString() string { +func (s UpdateXssMatchSetInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *RuleUpdate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RuleUpdate"} - if s.Action == nil { - invalidParams.Add(request.NewErrParamRequired("Action")) +func (s *UpdateXssMatchSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateXssMatchSetInput"} + if s.ChangeToken == nil { + invalidParams.Add(request.NewErrParamRequired("ChangeToken")) } - if s.Predicate == nil { - invalidParams.Add(request.NewErrParamRequired("Predicate")) + if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) } - if s.Predicate != nil { - if err := s.Predicate.Validate(); err != nil { - invalidParams.AddNested("Predicate", err.(request.ErrInvalidParams)) + if s.Updates == nil { + invalidParams.Add(request.NewErrParamRequired("Updates")) + } + if s.Updates != nil && len(s.Updates) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Updates", 1)) + } + if s.XssMatchSetId == nil { + invalidParams.Add(request.NewErrParamRequired("XssMatchSetId")) + } + if s.XssMatchSetId != nil && len(*s.XssMatchSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("XssMatchSetId", 1)) + } + if s.Updates != nil { + for i, v := range s.Updates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) + } } } @@ -10229,237 +15828,91 @@ func (s *RuleUpdate) Validate() error { return nil } -// SetAction sets the Action field's value. -func (s *RuleUpdate) SetAction(v string) *RuleUpdate { - s.Action = &v +// SetChangeToken sets the ChangeToken field's value. +func (s *UpdateXssMatchSetInput) SetChangeToken(v string) *UpdateXssMatchSetInput { + s.ChangeToken = &v + return s +} + +// SetUpdates sets the Updates field's value. +func (s *UpdateXssMatchSetInput) SetUpdates(v []*XssMatchSetUpdate) *UpdateXssMatchSetInput { + s.Updates = v return s } -// SetPredicate sets the Predicate field's value. -func (s *RuleUpdate) SetPredicate(v *Predicate) *RuleUpdate { - s.Predicate = v +// SetXssMatchSetId sets the XssMatchSetId field's value. +func (s *UpdateXssMatchSetInput) SetXssMatchSetId(v string) *UpdateXssMatchSetInput { + s.XssMatchSetId = &v return s } -// The response from a GetSampledRequests request includes a SampledHTTPRequests -// complex type that appears as SampledRequests in the response syntax. SampledHTTPRequests -// contains one SampledHTTPRequest object for each web request that is returned -// by GetSampledRequests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/SampledHTTPRequest -type SampledHTTPRequest struct { +// The response to an UpdateXssMatchSets request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateXssMatchSetResponse +type UpdateXssMatchSetOutput struct { _ struct{} `type:"structure"` - // The action for the Rule that the request matched: ALLOW, BLOCK, or COUNT. - Action *string `type:"string"` - - // A complex type that contains detailed information about the request. - // - // Request is a required field - Request *HTTPRequest `type:"structure" required:"true"` - - // The time at which AWS WAF received the request from your AWS resource, in - // Unix time format (in seconds). - Timestamp *time.Time `type:"timestamp" timestampFormat:"unix"` - - // A value that indicates how one result in the response relates proportionally - // to other results in the response. A result that has a weight of 2 represents - // roughly twice as many CloudFront web requests as a result that has a weight - // of 1. - // - // Weight is a required field - Weight *int64 `type:"long" required:"true"` + // The ChangeToken that you used to submit the UpdateXssMatchSet request. You + // can also use this value to query the status of the request. For more information, + // see GetChangeTokenStatus. + ChangeToken *string `min:"1" type:"string"` } // String returns the string representation -func (s SampledHTTPRequest) String() string { +func (s UpdateXssMatchSetOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SampledHTTPRequest) GoString() string { +func (s UpdateXssMatchSetOutput) GoString() string { return s.String() } -// SetAction sets the Action field's value. -func (s *SampledHTTPRequest) SetAction(v string) *SampledHTTPRequest { - s.Action = &v - return s -} - -// SetRequest sets the Request field's value. -func (s *SampledHTTPRequest) SetRequest(v *HTTPRequest) *SampledHTTPRequest { - s.Request = v - return s -} - -// SetTimestamp sets the Timestamp field's value. -func (s *SampledHTTPRequest) SetTimestamp(v time.Time) *SampledHTTPRequest { - s.Timestamp = &v - return s -} - -// SetWeight sets the Weight field's value. -func (s *SampledHTTPRequest) SetWeight(v int64) *SampledHTTPRequest { - s.Weight = &v +// SetChangeToken sets the ChangeToken field's value. +func (s *UpdateXssMatchSetOutput) SetChangeToken(v string) *UpdateXssMatchSetOutput { + s.ChangeToken = &v return s } -// Specifies a constraint on the size of a part of the web request. AWS WAF -// uses the Size, ComparisonOperator, and FieldToMatch to build an expression -// in the form of "SizeComparisonOperator size in bytes of FieldToMatch". If -// that expression is true, the SizeConstraint is considered to match. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/SizeConstraint -type SizeConstraint struct { +// For the action that is associated with a rule in a WebACL, specifies the +// action that you want AWS WAF to perform when a web request matches all of +// the conditions in a rule. For the default action in a WebACL, specifies the +// action that you want AWS WAF to take when a web request doesn't match all +// of the conditions in any of the rules in a WebACL. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/WafAction +type WafAction struct { _ struct{} `type:"structure"` - // The type of comparison you want AWS WAF to perform. AWS WAF uses this in - // combination with the provided Size and FieldToMatch to build an expression - // in the form of "SizeComparisonOperator size in bytes of FieldToMatch". If - // that expression is true, the SizeConstraint is considered to match. - // - // EQ: Used to test if the Size is equal to the size of the FieldToMatch - // - // NE: Used to test if the Size is not equal to the size of the FieldToMatch - // - // LE: Used to test if the Size is less than or equal to the size of the FieldToMatch - // - // LT: Used to test if the Size is strictly less than the size of the FieldToMatch - // - // GE: Used to test if the Size is greater than or equal to the size of the - // FieldToMatch - // - // GT: Used to test if the Size is strictly greater than the size of the FieldToMatch - // - // ComparisonOperator is a required field - ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperator"` - - // Specifies where in a web request to look for the size constraint. - // - // FieldToMatch is a required field - FieldToMatch *FieldToMatch `type:"structure" required:"true"` - - // The size in bytes that you want AWS WAF to compare against the size of the - // specified FieldToMatch. AWS WAF uses this in combination with ComparisonOperator - // and FieldToMatch to build an expression in the form of "SizeComparisonOperator - // size in bytes of FieldToMatch". If that expression is true, the SizeConstraint - // is considered to match. - // - // Valid values for size are 0 - 21474836480 bytes (0 - 20 GB). - // - // If you specify URI for the value of Type, the / in the URI counts as one - // character. For example, the URI /logo.jpg is nine characters long. - // - // Size is a required field - Size *int64 `type:"long" required:"true"` - - // Text transformations eliminate some of the unusual formatting that attackers - // use in web requests in an effort to bypass AWS WAF. If you specify a transformation, - // AWS WAF performs the transformation on FieldToMatch before inspecting a request - // for a match. - // - // Note that if you choose BODY for the value of Type, you must choose NONE - // for TextTransformation because CloudFront forwards only the first 8192 bytes - // for inspection. - // - // NONE - // - // Specify NONE if you don't want to perform any text transformations. - // - // CMD_LINE - // - // When you're concerned that attackers are injecting an operating system command - // line command and using unusual formatting to disguise some or all of the - // command, use this option to perform the following transformations: - // - // * Delete the following characters: \ " ' ^ - // - // * Delete spaces before the following characters: / ( - // - // * Replace the following characters with a space: , ; - // - // * Replace multiple spaces with one space - // - // * Convert uppercase letters (A-Z) to lowercase (a-z) - // - // COMPRESS_WHITE_SPACE - // - // Use this option to replace the following characters with a space character - // (decimal 32): - // - // * \f, formfeed, decimal 12 - // - // * \t, tab, decimal 9 - // - // * \n, newline, decimal 10 - // - // * \r, carriage return, decimal 13 - // - // * \v, vertical tab, decimal 11 - // - // * non-breaking space, decimal 160 - // - // COMPRESS_WHITE_SPACE also replaces multiple spaces with one space. - // - // HTML_ENTITY_DECODE - // - // Use this option to replace HTML-encoded characters with unencoded characters. - // HTML_ENTITY_DECODE performs the following operations: - // - // * Replaces (ampersand)quot; with " - // - // * Replaces (ampersand)nbsp; with a non-breaking space, decimal 160 - // - // * Replaces (ampersand)lt; with a "less than" symbol - // - // * Replaces (ampersand)gt; with > - // - // * Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, - // with the corresponding characters - // - // * Replaces characters that are represented in decimal format, (ampersand)#nnnn;, - // with the corresponding characters - // - // LOWERCASE + // Specifies how you want AWS WAF to respond to requests that match the settings + // in a Rule. Valid settings include the following: // - // Use this option to convert uppercase letters (A-Z) to lowercase (a-z). + // * ALLOW: AWS WAF allows requests // - // URL_DECODE + // * BLOCK: AWS WAF blocks requests // - // Use this option to decode a URL-encoded value. + // * COUNT: AWS WAF increments a counter of the requests that match all of + // the conditions in the rule. AWS WAF then continues to inspect the web + // request based on the remaining rules in the web ACL. You can't specify + // COUNT for the default action for a WebACL. // - // TextTransformation is a required field - TextTransformation *string `type:"string" required:"true" enum:"TextTransformation"` + // Type is a required field + Type *string `type:"string" required:"true" enum:"WafActionType"` } // String returns the string representation -func (s SizeConstraint) String() string { +func (s WafAction) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SizeConstraint) GoString() string { +func (s WafAction) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *SizeConstraint) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SizeConstraint"} - if s.ComparisonOperator == nil { - invalidParams.Add(request.NewErrParamRequired("ComparisonOperator")) - } - if s.FieldToMatch == nil { - invalidParams.Add(request.NewErrParamRequired("FieldToMatch")) - } - if s.Size == nil { - invalidParams.Add(request.NewErrParamRequired("Size")) - } - if s.TextTransformation == nil { - invalidParams.Add(request.NewErrParamRequired("TextTransformation")) - } - if s.FieldToMatch != nil { - if err := s.FieldToMatch.Validate(); err != nil { - invalidParams.AddNested("FieldToMatch", err.(request.ErrInvalidParams)) - } +func (s *WafAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WafAction"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) } if invalidParams.Len() > 0 { @@ -10467,175 +15920,180 @@ func (s *SizeConstraint) Validate() error { } return nil } - -// SetComparisonOperator sets the ComparisonOperator field's value. -func (s *SizeConstraint) SetComparisonOperator(v string) *SizeConstraint { - s.ComparisonOperator = &v - return s -} - -// SetFieldToMatch sets the FieldToMatch field's value. -func (s *SizeConstraint) SetFieldToMatch(v *FieldToMatch) *SizeConstraint { - s.FieldToMatch = v - return s -} - -// SetSize sets the Size field's value. -func (s *SizeConstraint) SetSize(v int64) *SizeConstraint { - s.Size = &v - return s -} - -// SetTextTransformation sets the TextTransformation field's value. -func (s *SizeConstraint) SetTextTransformation(v string) *SizeConstraint { - s.TextTransformation = &v + +// SetType sets the Type field's value. +func (s *WafAction) SetType(v string) *WafAction { + s.Type = &v return s } -// A complex type that contains SizeConstraint objects, which specify the parts -// of web requests that you want AWS WAF to inspect the size of. If a SizeConstraintSet -// contains more than one SizeConstraint object, a request only needs to match -// one constraint to be considered a match. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/SizeConstraintSet -type SizeConstraintSet struct { +// Contains the Rules that identify the requests that you want to allow, block, +// or count. In a WebACL, you also specify a default action (ALLOW or BLOCK), +// and the action for each Rule that you add to a WebACL, for example, block +// requests from specified IP addresses or block requests from specified referrers. +// You also associate the WebACL with a CloudFront distribution to identify +// the requests that you want AWS WAF to filter. If you add more than one Rule +// to a WebACL, a request needs to match only one of the specifications to be +// allowed, blocked, or counted. For more information, see UpdateWebACL. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/WebACL +type WebACL struct { _ struct{} `type:"structure"` - // The name, if any, of the SizeConstraintSet. + // The action to perform if none of the Rules contained in the WebACL match. + // The action is specified by the WafAction object. + // + // DefaultAction is a required field + DefaultAction *WafAction `type:"structure" required:"true"` + + // A friendly name or description for the metrics for this WebACL. The name + // can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't + // contain whitespace. You can't change MetricName after you create the WebACL. + MetricName *string `type:"string"` + + // A friendly name or description of the WebACL. You can't change the name of + // a WebACL after you create it. Name *string `min:"1" type:"string"` - // A unique identifier for a SizeConstraintSet. You use SizeConstraintSetId - // to get information about a SizeConstraintSet (see GetSizeConstraintSet), - // update a SizeConstraintSet (see UpdateSizeConstraintSet), insert a SizeConstraintSet - // into a Rule or delete one from a Rule (see UpdateRule), and delete a SizeConstraintSet - // from AWS WAF (see DeleteSizeConstraintSet). - // - // SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets. + // An array that contains the action for each Rule in a WebACL, the priority + // of the Rule, and the ID of the Rule. // - // SizeConstraintSetId is a required field - SizeConstraintSetId *string `min:"1" type:"string" required:"true"` + // Rules is a required field + Rules []*ActivatedRule `type:"list" required:"true"` - // Specifies the parts of web requests that you want to inspect the size of. + // A unique identifier for a WebACL. You use WebACLId to get information about + // a WebACL (see GetWebACL), update a WebACL (see UpdateWebACL), and delete + // a WebACL from AWS WAF (see DeleteWebACL). // - // SizeConstraints is a required field - SizeConstraints []*SizeConstraint `type:"list" required:"true"` + // WebACLId is returned by CreateWebACL and by ListWebACLs. + // + // WebACLId is a required field + WebACLId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s SizeConstraintSet) String() string { +func (s WebACL) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SizeConstraintSet) GoString() string { +func (s WebACL) GoString() string { return s.String() } +// SetDefaultAction sets the DefaultAction field's value. +func (s *WebACL) SetDefaultAction(v *WafAction) *WebACL { + s.DefaultAction = v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *WebACL) SetMetricName(v string) *WebACL { + s.MetricName = &v + return s +} + // SetName sets the Name field's value. -func (s *SizeConstraintSet) SetName(v string) *SizeConstraintSet { +func (s *WebACL) SetName(v string) *WebACL { s.Name = &v return s } -// SetSizeConstraintSetId sets the SizeConstraintSetId field's value. -func (s *SizeConstraintSet) SetSizeConstraintSetId(v string) *SizeConstraintSet { - s.SizeConstraintSetId = &v +// SetRules sets the Rules field's value. +func (s *WebACL) SetRules(v []*ActivatedRule) *WebACL { + s.Rules = v return s } -// SetSizeConstraints sets the SizeConstraints field's value. -func (s *SizeConstraintSet) SetSizeConstraints(v []*SizeConstraint) *SizeConstraintSet { - s.SizeConstraints = v +// SetWebACLId sets the WebACLId field's value. +func (s *WebACL) SetWebACLId(v string) *WebACL { + s.WebACLId = &v return s } -// The Id and Name of a SizeConstraintSet. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/SizeConstraintSetSummary -type SizeConstraintSetSummary struct { +// Contains the identifier and the name or description of the WebACL. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/WebACLSummary +type WebACLSummary struct { _ struct{} `type:"structure"` - // The name of the SizeConstraintSet, if any. + // A friendly name or description of the WebACL. You can't change the name of + // a WebACL after you create it. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` - // A unique identifier for a SizeConstraintSet. You use SizeConstraintSetId - // to get information about a SizeConstraintSet (see GetSizeConstraintSet), - // update a SizeConstraintSet (see UpdateSizeConstraintSet), insert a SizeConstraintSet - // into a Rule or delete one from a Rule (see UpdateRule), and delete a SizeConstraintSet - // from AWS WAF (see DeleteSizeConstraintSet). + // A unique identifier for a WebACL. You use WebACLId to get information about + // a WebACL (see GetWebACL), update a WebACL (see UpdateWebACL), and delete + // a WebACL from AWS WAF (see DeleteWebACL). // - // SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets. + // WebACLId is returned by CreateWebACL and by ListWebACLs. // - // SizeConstraintSetId is a required field - SizeConstraintSetId *string `min:"1" type:"string" required:"true"` + // WebACLId is a required field + WebACLId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s SizeConstraintSetSummary) String() string { +func (s WebACLSummary) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SizeConstraintSetSummary) GoString() string { +func (s WebACLSummary) GoString() string { return s.String() } // SetName sets the Name field's value. -func (s *SizeConstraintSetSummary) SetName(v string) *SizeConstraintSetSummary { +func (s *WebACLSummary) SetName(v string) *WebACLSummary { s.Name = &v return s } -// SetSizeConstraintSetId sets the SizeConstraintSetId field's value. -func (s *SizeConstraintSetSummary) SetSizeConstraintSetId(v string) *SizeConstraintSetSummary { - s.SizeConstraintSetId = &v +// SetWebACLId sets the WebACLId field's value. +func (s *WebACLSummary) SetWebACLId(v string) *WebACLSummary { + s.WebACLId = &v return s } -// Specifies the part of a web request that you want to inspect the size of -// and indicates whether you want to add the specification to a SizeConstraintSet -// or delete it from a SizeConstraintSet. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/SizeConstraintSetUpdate -type SizeConstraintSetUpdate struct { +// Specifies whether to insert a Rule into or delete a Rule from a WebACL. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/WebACLUpdate +type WebACLUpdate struct { _ struct{} `type:"structure"` - // Specify INSERT to add a SizeConstraintSetUpdate to a SizeConstraintSet. Use - // DELETE to remove a SizeConstraintSetUpdate from a SizeConstraintSet. + // Specifies whether to insert a Rule into or delete a Rule from a WebACL. // // Action is a required field Action *string `type:"string" required:"true" enum:"ChangeAction"` - // Specifies a constraint on the size of a part of the web request. AWS WAF - // uses the Size, ComparisonOperator, and FieldToMatch to build an expression - // in the form of "SizeComparisonOperator size in bytes of FieldToMatch". If - // that expression is true, the SizeConstraint is considered to match. + // The ActivatedRule object in an UpdateWebACL request specifies a Rule that + // you want to insert or delete, the priority of the Rule in the WebACL, and + // the action that you want AWS WAF to take when a web request matches the Rule + // (ALLOW, BLOCK, or COUNT). // - // SizeConstraint is a required field - SizeConstraint *SizeConstraint `type:"structure" required:"true"` + // ActivatedRule is a required field + ActivatedRule *ActivatedRule `type:"structure" required:"true"` } // String returns the string representation -func (s SizeConstraintSetUpdate) String() string { +func (s WebACLUpdate) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SizeConstraintSetUpdate) GoString() string { +func (s WebACLUpdate) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *SizeConstraintSetUpdate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SizeConstraintSetUpdate"} +func (s *WebACLUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WebACLUpdate"} if s.Action == nil { invalidParams.Add(request.NewErrParamRequired("Action")) } - if s.SizeConstraint == nil { - invalidParams.Add(request.NewErrParamRequired("SizeConstraint")) + if s.ActivatedRule == nil { + invalidParams.Add(request.NewErrParamRequired("ActivatedRule")) } - if s.SizeConstraint != nil { - if err := s.SizeConstraint.Validate(); err != nil { - invalidParams.AddNested("SizeConstraint", err.(request.ErrInvalidParams)) + if s.ActivatedRule != nil { + if err := s.ActivatedRule.Validate(); err != nil { + invalidParams.AddNested("ActivatedRule", err.(request.ErrInvalidParams)) } } @@ -10646,163 +16104,161 @@ func (s *SizeConstraintSetUpdate) Validate() error { } // SetAction sets the Action field's value. -func (s *SizeConstraintSetUpdate) SetAction(v string) *SizeConstraintSetUpdate { +func (s *WebACLUpdate) SetAction(v string) *WebACLUpdate { s.Action = &v return s } -// SetSizeConstraint sets the SizeConstraint field's value. -func (s *SizeConstraintSetUpdate) SetSizeConstraint(v *SizeConstraint) *SizeConstraintSetUpdate { - s.SizeConstraint = v +// SetActivatedRule sets the ActivatedRule field's value. +func (s *WebACLUpdate) SetActivatedRule(v *ActivatedRule) *WebACLUpdate { + s.ActivatedRule = v return s } -// A complex type that contains SqlInjectionMatchTuple objects, which specify -// the parts of web requests that you want AWS WAF to inspect for snippets of -// malicious SQL code and, if you want AWS WAF to inspect a header, the name -// of the header. If a SqlInjectionMatchSet contains more than one SqlInjectionMatchTuple -// object, a request needs to include snippets of SQL code in only one of the -// specified parts of the request to be considered a match. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/SqlInjectionMatchSet -type SqlInjectionMatchSet struct { +// A complex type that contains XssMatchTuple objects, which specify the parts +// of web requests that you want AWS WAF to inspect for cross-site scripting +// attacks and, if you want AWS WAF to inspect a header, the name of the header. +// If a XssMatchSet contains more than one XssMatchTuple object, a request needs +// to include cross-site scripting attacks in only one of the specified parts +// of the request to be considered a match. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/XssMatchSet +type XssMatchSet struct { _ struct{} `type:"structure"` - // The name, if any, of the SqlInjectionMatchSet. + // The name, if any, of the XssMatchSet. Name *string `min:"1" type:"string"` - // A unique identifier for a SqlInjectionMatchSet. You use SqlInjectionMatchSetId - // to get information about a SqlInjectionMatchSet (see GetSqlInjectionMatchSet), - // update a SqlInjectionMatchSet (see UpdateSqlInjectionMatchSet), insert a - // SqlInjectionMatchSet into a Rule or delete one from a Rule (see UpdateRule), - // and delete a SqlInjectionMatchSet from AWS WAF (see DeleteSqlInjectionMatchSet). + // A unique identifier for an XssMatchSet. You use XssMatchSetId to get information + // about an XssMatchSet (see GetXssMatchSet), update an XssMatchSet (see UpdateXssMatchSet), + // insert an XssMatchSet into a Rule or delete one from a Rule (see UpdateRule), + // and delete an XssMatchSet from AWS WAF (see DeleteXssMatchSet). // - // SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets. + // XssMatchSetId is returned by CreateXssMatchSet and by ListXssMatchSets. // - // SqlInjectionMatchSetId is a required field - SqlInjectionMatchSetId *string `min:"1" type:"string" required:"true"` + // XssMatchSetId is a required field + XssMatchSetId *string `min:"1" type:"string" required:"true"` - // Specifies the parts of web requests that you want to inspect for snippets - // of malicious SQL code. + // Specifies the parts of web requests that you want to inspect for cross-site + // scripting attacks. // - // SqlInjectionMatchTuples is a required field - SqlInjectionMatchTuples []*SqlInjectionMatchTuple `type:"list" required:"true"` + // XssMatchTuples is a required field + XssMatchTuples []*XssMatchTuple `type:"list" required:"true"` } // String returns the string representation -func (s SqlInjectionMatchSet) String() string { +func (s XssMatchSet) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SqlInjectionMatchSet) GoString() string { +func (s XssMatchSet) GoString() string { return s.String() } // SetName sets the Name field's value. -func (s *SqlInjectionMatchSet) SetName(v string) *SqlInjectionMatchSet { +func (s *XssMatchSet) SetName(v string) *XssMatchSet { s.Name = &v return s } -// SetSqlInjectionMatchSetId sets the SqlInjectionMatchSetId field's value. -func (s *SqlInjectionMatchSet) SetSqlInjectionMatchSetId(v string) *SqlInjectionMatchSet { - s.SqlInjectionMatchSetId = &v +// SetXssMatchSetId sets the XssMatchSetId field's value. +func (s *XssMatchSet) SetXssMatchSetId(v string) *XssMatchSet { + s.XssMatchSetId = &v return s } -// SetSqlInjectionMatchTuples sets the SqlInjectionMatchTuples field's value. -func (s *SqlInjectionMatchSet) SetSqlInjectionMatchTuples(v []*SqlInjectionMatchTuple) *SqlInjectionMatchSet { - s.SqlInjectionMatchTuples = v +// SetXssMatchTuples sets the XssMatchTuples field's value. +func (s *XssMatchSet) SetXssMatchTuples(v []*XssMatchTuple) *XssMatchSet { + s.XssMatchTuples = v return s } -// The Id and Name of a SqlInjectionMatchSet. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/SqlInjectionMatchSetSummary -type SqlInjectionMatchSetSummary struct { +// The Id and Name of an XssMatchSet. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/XssMatchSetSummary +type XssMatchSetSummary struct { _ struct{} `type:"structure"` - // The name of the SqlInjectionMatchSet, if any, specified by Id. + // The name of the XssMatchSet, if any, specified by Id. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` - // A unique identifier for a SqlInjectionMatchSet. You use SqlInjectionMatchSetId - // to get information about a SqlInjectionMatchSet (see GetSqlInjectionMatchSet), - // update a SqlInjectionMatchSet (see UpdateSqlInjectionMatchSet), insert a - // SqlInjectionMatchSet into a Rule or delete one from a Rule (see UpdateRule), - // and delete a SqlInjectionMatchSet from AWS WAF (see DeleteSqlInjectionMatchSet). + // A unique identifier for an XssMatchSet. You use XssMatchSetId to get information + // about a XssMatchSet (see GetXssMatchSet), update an XssMatchSet (see UpdateXssMatchSet), + // insert an XssMatchSet into a Rule or delete one from a Rule (see UpdateRule), + // and delete an XssMatchSet from AWS WAF (see DeleteXssMatchSet). // - // SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets. + // XssMatchSetId is returned by CreateXssMatchSet and by ListXssMatchSets. // - // SqlInjectionMatchSetId is a required field - SqlInjectionMatchSetId *string `min:"1" type:"string" required:"true"` + // XssMatchSetId is a required field + XssMatchSetId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s SqlInjectionMatchSetSummary) String() string { +func (s XssMatchSetSummary) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SqlInjectionMatchSetSummary) GoString() string { +func (s XssMatchSetSummary) GoString() string { return s.String() } // SetName sets the Name field's value. -func (s *SqlInjectionMatchSetSummary) SetName(v string) *SqlInjectionMatchSetSummary { +func (s *XssMatchSetSummary) SetName(v string) *XssMatchSetSummary { s.Name = &v return s } -// SetSqlInjectionMatchSetId sets the SqlInjectionMatchSetId field's value. -func (s *SqlInjectionMatchSetSummary) SetSqlInjectionMatchSetId(v string) *SqlInjectionMatchSetSummary { - s.SqlInjectionMatchSetId = &v +// SetXssMatchSetId sets the XssMatchSetId field's value. +func (s *XssMatchSetSummary) SetXssMatchSetId(v string) *XssMatchSetSummary { + s.XssMatchSetId = &v return s } -// Specifies the part of a web request that you want to inspect for snippets -// of malicious SQL code and indicates whether you want to add the specification -// to a SqlInjectionMatchSet or delete it from a SqlInjectionMatchSet. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/SqlInjectionMatchSetUpdate -type SqlInjectionMatchSetUpdate struct { +// Specifies the part of a web request that you want to inspect for cross-site +// scripting attacks and indicates whether you want to add the specification +// to an XssMatchSet or delete it from an XssMatchSet. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/XssMatchSetUpdate +type XssMatchSetUpdate struct { _ struct{} `type:"structure"` - // Specify INSERT to add a SqlInjectionMatchSetUpdate to a SqlInjectionMatchSet. - // Use DELETE to remove a SqlInjectionMatchSetUpdate from a SqlInjectionMatchSet. + // Specify INSERT to add a XssMatchSetUpdate to an XssMatchSet. Use DELETE to + // remove a XssMatchSetUpdate from an XssMatchSet. // // Action is a required field Action *string `type:"string" required:"true" enum:"ChangeAction"` // Specifies the part of a web request that you want AWS WAF to inspect for - // snippets of malicious SQL code and, if you want AWS WAF to inspect a header, + // cross-site scripting attacks and, if you want AWS WAF to inspect a header, // the name of the header. // - // SqlInjectionMatchTuple is a required field - SqlInjectionMatchTuple *SqlInjectionMatchTuple `type:"structure" required:"true"` + // XssMatchTuple is a required field + XssMatchTuple *XssMatchTuple `type:"structure" required:"true"` } // String returns the string representation -func (s SqlInjectionMatchSetUpdate) String() string { +func (s XssMatchSetUpdate) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SqlInjectionMatchSetUpdate) GoString() string { +func (s XssMatchSetUpdate) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *SqlInjectionMatchSetUpdate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SqlInjectionMatchSetUpdate"} +func (s *XssMatchSetUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "XssMatchSetUpdate"} if s.Action == nil { invalidParams.Add(request.NewErrParamRequired("Action")) } - if s.SqlInjectionMatchTuple == nil { - invalidParams.Add(request.NewErrParamRequired("SqlInjectionMatchTuple")) + if s.XssMatchTuple == nil { + invalidParams.Add(request.NewErrParamRequired("XssMatchTuple")) } - if s.SqlInjectionMatchTuple != nil { - if err := s.SqlInjectionMatchTuple.Validate(); err != nil { - invalidParams.AddNested("SqlInjectionMatchTuple", err.(request.ErrInvalidParams)) + if s.XssMatchTuple != nil { + if err := s.XssMatchTuple.Validate(); err != nil { + invalidParams.AddNested("XssMatchTuple", err.(request.ErrInvalidParams)) } } @@ -10813,25 +16269,25 @@ func (s *SqlInjectionMatchSetUpdate) Validate() error { } // SetAction sets the Action field's value. -func (s *SqlInjectionMatchSetUpdate) SetAction(v string) *SqlInjectionMatchSetUpdate { +func (s *XssMatchSetUpdate) SetAction(v string) *XssMatchSetUpdate { s.Action = &v return s } -// SetSqlInjectionMatchTuple sets the SqlInjectionMatchTuple field's value. -func (s *SqlInjectionMatchSetUpdate) SetSqlInjectionMatchTuple(v *SqlInjectionMatchTuple) *SqlInjectionMatchSetUpdate { - s.SqlInjectionMatchTuple = v +// SetXssMatchTuple sets the XssMatchTuple field's value. +func (s *XssMatchSetUpdate) SetXssMatchTuple(v *XssMatchTuple) *XssMatchSetUpdate { + s.XssMatchTuple = v return s } // Specifies the part of a web request that you want AWS WAF to inspect for -// snippets of malicious SQL code and, if you want AWS WAF to inspect a header, +// cross-site scripting attacks and, if you want AWS WAF to inspect a header, // the name of the header. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/SqlInjectionMatchTuple -type SqlInjectionMatchTuple struct { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/XssMatchTuple +type XssMatchTuple struct { _ struct{} `type:"structure"` - // Specifies where in a web request to look for snippets of malicious SQL code. + // Specifies where in a web request to look for cross-site scripting attacks. // // FieldToMatch is a required field FieldToMatch *FieldToMatch `type:"structure" required:"true"` @@ -10912,18 +16368,18 @@ type SqlInjectionMatchTuple struct { } // String returns the string representation -func (s SqlInjectionMatchTuple) String() string { +func (s XssMatchTuple) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SqlInjectionMatchTuple) GoString() string { +func (s XssMatchTuple) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *SqlInjectionMatchTuple) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SqlInjectionMatchTuple"} +func (s *XssMatchTuple) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "XssMatchTuple"} if s.FieldToMatch == nil { invalidParams.Add(request.NewErrParamRequired("FieldToMatch")) } @@ -10943,1630 +16399,808 @@ func (s *SqlInjectionMatchTuple) Validate() error { } // SetFieldToMatch sets the FieldToMatch field's value. -func (s *SqlInjectionMatchTuple) SetFieldToMatch(v *FieldToMatch) *SqlInjectionMatchTuple { +func (s *XssMatchTuple) SetFieldToMatch(v *FieldToMatch) *XssMatchTuple { s.FieldToMatch = v return s } // SetTextTransformation sets the TextTransformation field's value. -func (s *SqlInjectionMatchTuple) SetTextTransformation(v string) *SqlInjectionMatchTuple { +func (s *XssMatchTuple) SetTextTransformation(v string) *XssMatchTuple { s.TextTransformation = &v return s } -// In a GetSampledRequests request, the StartTime and EndTime objects specify -// the time range for which you want AWS WAF to return a sample of web requests. -// -// In a GetSampledRequests response, the StartTime and EndTime objects specify -// the time range for which AWS WAF actually returned a sample of web requests. -// AWS WAF gets the specified number of requests from among the first 5,000 -// requests that your AWS resource receives during the specified time period. -// If your resource receives more than 5,000 requests during that period, AWS -// WAF stops sampling after the 5,000th request. In that case, EndTime is the -// time that AWS WAF received the 5,000th request. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/TimeWindow -type TimeWindow struct { - _ struct{} `type:"structure"` +const ( + // ChangeActionInsert is a ChangeAction enum value + ChangeActionInsert = "INSERT" - // The end of the time range from which you want GetSampledRequests to return - // a sample of the requests that your AWS resource received. Specify the date - // and time in the following format: "2016-09-27T14:50Z". You can specify any - // time range in the previous three hours. - // - // EndTime is a required field - EndTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + // ChangeActionDelete is a ChangeAction enum value + ChangeActionDelete = "DELETE" +) - // The beginning of the time range from which you want GetSampledRequests to - // return a sample of the requests that your AWS resource received. Specify - // the date and time in the following format: "2016-09-27T14:50Z". You can specify - // any time range in the previous three hours. - // - // StartTime is a required field - StartTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` -} +const ( + // ChangeTokenStatusProvisioned is a ChangeTokenStatus enum value + ChangeTokenStatusProvisioned = "PROVISIONED" -// String returns the string representation -func (s TimeWindow) String() string { - return awsutil.Prettify(s) -} + // ChangeTokenStatusPending is a ChangeTokenStatus enum value + ChangeTokenStatusPending = "PENDING" -// GoString returns the string representation -func (s TimeWindow) GoString() string { - return s.String() -} + // ChangeTokenStatusInsync is a ChangeTokenStatus enum value + ChangeTokenStatusInsync = "INSYNC" +) -// Validate inspects the fields of the type to determine if they are valid. -func (s *TimeWindow) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TimeWindow"} - if s.EndTime == nil { - invalidParams.Add(request.NewErrParamRequired("EndTime")) - } - if s.StartTime == nil { - invalidParams.Add(request.NewErrParamRequired("StartTime")) - } +const ( + // ComparisonOperatorEq is a ComparisonOperator enum value + ComparisonOperatorEq = "EQ" - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // ComparisonOperatorNe is a ComparisonOperator enum value + ComparisonOperatorNe = "NE" -// SetEndTime sets the EndTime field's value. -func (s *TimeWindow) SetEndTime(v time.Time) *TimeWindow { - s.EndTime = &v - return s -} + // ComparisonOperatorLe is a ComparisonOperator enum value + ComparisonOperatorLe = "LE" -// SetStartTime sets the StartTime field's value. -func (s *TimeWindow) SetStartTime(v time.Time) *TimeWindow { - s.StartTime = &v - return s -} + // ComparisonOperatorLt is a ComparisonOperator enum value + ComparisonOperatorLt = "LT" -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateByteMatchSetRequest -type UpdateByteMatchSetInput struct { - _ struct{} `type:"structure"` + // ComparisonOperatorGe is a ComparisonOperator enum value + ComparisonOperatorGe = "GE" - // The ByteMatchSetId of the ByteMatchSet that you want to update. ByteMatchSetId - // is returned by CreateByteMatchSet and by ListByteMatchSets. - // - // ByteMatchSetId is a required field - ByteMatchSetId *string `min:"1" type:"string" required:"true"` + // ComparisonOperatorGt is a ComparisonOperator enum value + ComparisonOperatorGt = "GT" +) - // The value returned by the most recent call to GetChangeToken. - // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` +const ( + // GeoMatchConstraintTypeCountry is a GeoMatchConstraintType enum value + GeoMatchConstraintTypeCountry = "Country" +) - // An array of ByteMatchSetUpdate objects that you want to insert into or delete - // from a ByteMatchSet. For more information, see the applicable data types: - // - // * ByteMatchSetUpdate: Contains Action and ByteMatchTuple - // - // * ByteMatchTuple: Contains FieldToMatch, PositionalConstraint, TargetString, - // and TextTransformation - // - // * FieldToMatch: Contains Data and Type - // - // Updates is a required field - Updates []*ByteMatchSetUpdate `type:"list" required:"true"` -} +const ( + // GeoMatchConstraintValueAf is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAf = "AF" -// String returns the string representation -func (s UpdateByteMatchSetInput) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueAx is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAx = "AX" -// GoString returns the string representation -func (s UpdateByteMatchSetInput) GoString() string { - return s.String() -} + // GeoMatchConstraintValueAl is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAl = "AL" -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateByteMatchSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateByteMatchSetInput"} - if s.ByteMatchSetId == nil { - invalidParams.Add(request.NewErrParamRequired("ByteMatchSetId")) - } - if s.ByteMatchSetId != nil && len(*s.ByteMatchSetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ByteMatchSetId", 1)) - } - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) - } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) - } - if s.Updates == nil { - invalidParams.Add(request.NewErrParamRequired("Updates")) - } - if s.Updates != nil { - for i, v := range s.Updates { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) - } - } - } + // GeoMatchConstraintValueDz is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueDz = "DZ" - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // GeoMatchConstraintValueAs is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAs = "AS" -// SetByteMatchSetId sets the ByteMatchSetId field's value. -func (s *UpdateByteMatchSetInput) SetByteMatchSetId(v string) *UpdateByteMatchSetInput { - s.ByteMatchSetId = &v - return s -} + // GeoMatchConstraintValueAd is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAd = "AD" + + // GeoMatchConstraintValueAo is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAo = "AO" + + // GeoMatchConstraintValueAi is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAi = "AI" + + // GeoMatchConstraintValueAq is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAq = "AQ" + + // GeoMatchConstraintValueAg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAg = "AG" + + // GeoMatchConstraintValueAr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAr = "AR" + + // GeoMatchConstraintValueAm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAm = "AM" + + // GeoMatchConstraintValueAw is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAw = "AW" + + // GeoMatchConstraintValueAu is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAu = "AU" + + // GeoMatchConstraintValueAt is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAt = "AT" + + // GeoMatchConstraintValueAz is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAz = "AZ" + + // GeoMatchConstraintValueBs is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBs = "BS" + + // GeoMatchConstraintValueBh is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBh = "BH" + + // GeoMatchConstraintValueBd is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBd = "BD" + + // GeoMatchConstraintValueBb is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBb = "BB" + + // GeoMatchConstraintValueBy is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBy = "BY" + + // GeoMatchConstraintValueBe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBe = "BE" + + // GeoMatchConstraintValueBz is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBz = "BZ" + + // GeoMatchConstraintValueBj is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBj = "BJ" + + // GeoMatchConstraintValueBm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBm = "BM" + + // GeoMatchConstraintValueBt is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBt = "BT" -// SetChangeToken sets the ChangeToken field's value. -func (s *UpdateByteMatchSetInput) SetChangeToken(v string) *UpdateByteMatchSetInput { - s.ChangeToken = &v - return s -} + // GeoMatchConstraintValueBo is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBo = "BO" -// SetUpdates sets the Updates field's value. -func (s *UpdateByteMatchSetInput) SetUpdates(v []*ByteMatchSetUpdate) *UpdateByteMatchSetInput { - s.Updates = v - return s -} + // GeoMatchConstraintValueBq is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBq = "BQ" -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateByteMatchSetResponse -type UpdateByteMatchSetOutput struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueBa is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBa = "BA" - // The ChangeToken that you used to submit the UpdateByteMatchSet request. You - // can also use this value to query the status of the request. For more information, - // see GetChangeTokenStatus. - ChangeToken *string `min:"1" type:"string"` -} + // GeoMatchConstraintValueBw is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBw = "BW" -// String returns the string representation -func (s UpdateByteMatchSetOutput) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueBv is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBv = "BV" -// GoString returns the string representation -func (s UpdateByteMatchSetOutput) GoString() string { - return s.String() -} + // GeoMatchConstraintValueBr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBr = "BR" -// SetChangeToken sets the ChangeToken field's value. -func (s *UpdateByteMatchSetOutput) SetChangeToken(v string) *UpdateByteMatchSetOutput { - s.ChangeToken = &v - return s -} + // GeoMatchConstraintValueIo is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueIo = "IO" -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateIPSetRequest -type UpdateIPSetInput struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueBn is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBn = "BN" - // The value returned by the most recent call to GetChangeToken. - // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` + // GeoMatchConstraintValueBg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBg = "BG" - // The IPSetId of the IPSet that you want to update. IPSetId is returned by - // CreateIPSet and by ListIPSets. - // - // IPSetId is a required field - IPSetId *string `min:"1" type:"string" required:"true"` + // GeoMatchConstraintValueBf is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBf = "BF" - // An array of IPSetUpdate objects that you want to insert into or delete from - // an IPSet. For more information, see the applicable data types: - // - // * IPSetUpdate: Contains Action and IPSetDescriptor - // - // * IPSetDescriptor: Contains Type and Value - // - // Updates is a required field - Updates []*IPSetUpdate `type:"list" required:"true"` -} + // GeoMatchConstraintValueBi is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBi = "BI" -// String returns the string representation -func (s UpdateIPSetInput) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueKh is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueKh = "KH" -// GoString returns the string representation -func (s UpdateIPSetInput) GoString() string { - return s.String() -} + // GeoMatchConstraintValueCm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCm = "CM" -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateIPSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateIPSetInput"} - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) - } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) - } - if s.IPSetId == nil { - invalidParams.Add(request.NewErrParamRequired("IPSetId")) - } - if s.IPSetId != nil && len(*s.IPSetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("IPSetId", 1)) - } - if s.Updates == nil { - invalidParams.Add(request.NewErrParamRequired("Updates")) - } - if s.Updates != nil { - for i, v := range s.Updates { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) - } - } - } + // GeoMatchConstraintValueCa is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCa = "CA" - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // GeoMatchConstraintValueCv is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCv = "CV" -// SetChangeToken sets the ChangeToken field's value. -func (s *UpdateIPSetInput) SetChangeToken(v string) *UpdateIPSetInput { - s.ChangeToken = &v - return s -} + // GeoMatchConstraintValueKy is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueKy = "KY" -// SetIPSetId sets the IPSetId field's value. -func (s *UpdateIPSetInput) SetIPSetId(v string) *UpdateIPSetInput { - s.IPSetId = &v - return s -} + // GeoMatchConstraintValueCf is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCf = "CF" -// SetUpdates sets the Updates field's value. -func (s *UpdateIPSetInput) SetUpdates(v []*IPSetUpdate) *UpdateIPSetInput { - s.Updates = v - return s -} + // GeoMatchConstraintValueTd is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTd = "TD" -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateIPSetResponse -type UpdateIPSetOutput struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueCl is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCl = "CL" - // The ChangeToken that you used to submit the UpdateIPSet request. You can - // also use this value to query the status of the request. For more information, - // see GetChangeTokenStatus. - ChangeToken *string `min:"1" type:"string"` -} + // GeoMatchConstraintValueCn is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCn = "CN" -// String returns the string representation -func (s UpdateIPSetOutput) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueCx is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCx = "CX" -// GoString returns the string representation -func (s UpdateIPSetOutput) GoString() string { - return s.String() -} + // GeoMatchConstraintValueCc is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCc = "CC" -// SetChangeToken sets the ChangeToken field's value. -func (s *UpdateIPSetOutput) SetChangeToken(v string) *UpdateIPSetOutput { - s.ChangeToken = &v - return s -} + // GeoMatchConstraintValueCo is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCo = "CO" -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateRateBasedRuleRequest -type UpdateRateBasedRuleInput struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueKm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueKm = "KM" - // The value returned by the most recent call to GetChangeToken. - // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` + // GeoMatchConstraintValueCg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCg = "CG" - // The maximum number of requests, which have an identical value in the field - // specified by the RateKey, allowed in a five-minute period. If the number - // of requests exceeds the RateLimit and the other predicates specified in the - // rule are also met, AWS WAF triggers the action that is specified for this - // rule. - // - // RateLimit is a required field - RateLimit *int64 `min:"2000" type:"long" required:"true"` + // GeoMatchConstraintValueCd is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCd = "CD" - // The RuleId of the RateBasedRule that you want to update. RuleId is returned - // by CreateRateBasedRule and by ListRateBasedRules. - // - // RuleId is a required field - RuleId *string `min:"1" type:"string" required:"true"` + // GeoMatchConstraintValueCk is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCk = "CK" - // An array of RuleUpdate objects that you want to insert into or delete from - // a RateBasedRule. - // - // Updates is a required field - Updates []*RuleUpdate `type:"list" required:"true"` -} + // GeoMatchConstraintValueCr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCr = "CR" -// String returns the string representation -func (s UpdateRateBasedRuleInput) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueCi is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCi = "CI" -// GoString returns the string representation -func (s UpdateRateBasedRuleInput) GoString() string { - return s.String() -} + // GeoMatchConstraintValueHr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueHr = "HR" -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateRateBasedRuleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateRateBasedRuleInput"} - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) - } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) - } - if s.RateLimit == nil { - invalidParams.Add(request.NewErrParamRequired("RateLimit")) - } - if s.RateLimit != nil && *s.RateLimit < 2000 { - invalidParams.Add(request.NewErrParamMinValue("RateLimit", 2000)) - } - if s.RuleId == nil { - invalidParams.Add(request.NewErrParamRequired("RuleId")) - } - if s.RuleId != nil && len(*s.RuleId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RuleId", 1)) - } - if s.Updates == nil { - invalidParams.Add(request.NewErrParamRequired("Updates")) - } - if s.Updates != nil { - for i, v := range s.Updates { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) - } - } - } + // GeoMatchConstraintValueCu is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCu = "CU" - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // GeoMatchConstraintValueCw is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCw = "CW" -// SetChangeToken sets the ChangeToken field's value. -func (s *UpdateRateBasedRuleInput) SetChangeToken(v string) *UpdateRateBasedRuleInput { - s.ChangeToken = &v - return s -} + // GeoMatchConstraintValueCy is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCy = "CY" -// SetRateLimit sets the RateLimit field's value. -func (s *UpdateRateBasedRuleInput) SetRateLimit(v int64) *UpdateRateBasedRuleInput { - s.RateLimit = &v - return s -} + // GeoMatchConstraintValueCz is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCz = "CZ" -// SetRuleId sets the RuleId field's value. -func (s *UpdateRateBasedRuleInput) SetRuleId(v string) *UpdateRateBasedRuleInput { - s.RuleId = &v - return s -} + // GeoMatchConstraintValueDk is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueDk = "DK" -// SetUpdates sets the Updates field's value. -func (s *UpdateRateBasedRuleInput) SetUpdates(v []*RuleUpdate) *UpdateRateBasedRuleInput { - s.Updates = v - return s -} + // GeoMatchConstraintValueDj is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueDj = "DJ" -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateRateBasedRuleResponse -type UpdateRateBasedRuleOutput struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueDm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueDm = "DM" - // The ChangeToken that you used to submit the UpdateRateBasedRule request. - // You can also use this value to query the status of the request. For more - // information, see GetChangeTokenStatus. - ChangeToken *string `min:"1" type:"string"` -} + // GeoMatchConstraintValueDo is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueDo = "DO" -// String returns the string representation -func (s UpdateRateBasedRuleOutput) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueEc is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueEc = "EC" -// GoString returns the string representation -func (s UpdateRateBasedRuleOutput) GoString() string { - return s.String() -} + // GeoMatchConstraintValueEg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueEg = "EG" -// SetChangeToken sets the ChangeToken field's value. -func (s *UpdateRateBasedRuleOutput) SetChangeToken(v string) *UpdateRateBasedRuleOutput { - s.ChangeToken = &v - return s -} + // GeoMatchConstraintValueSv is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSv = "SV" -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateRuleRequest -type UpdateRuleInput struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueGq is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGq = "GQ" - // The value returned by the most recent call to GetChangeToken. - // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` + // GeoMatchConstraintValueEr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueEr = "ER" - // The RuleId of the Rule that you want to update. RuleId is returned by CreateRule - // and by ListRules. - // - // RuleId is a required field - RuleId *string `min:"1" type:"string" required:"true"` + // GeoMatchConstraintValueEe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueEe = "EE" + + // GeoMatchConstraintValueEt is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueEt = "ET" - // An array of RuleUpdate objects that you want to insert into or delete from - // a Rule. For more information, see the applicable data types: - // - // * RuleUpdate: Contains Action and Predicate - // - // * Predicate: Contains DataId, Negated, and Type - // - // * FieldToMatch: Contains Data and Type - // - // Updates is a required field - Updates []*RuleUpdate `type:"list" required:"true"` -} + // GeoMatchConstraintValueFk is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueFk = "FK" -// String returns the string representation -func (s UpdateRuleInput) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueFo is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueFo = "FO" -// GoString returns the string representation -func (s UpdateRuleInput) GoString() string { - return s.String() -} + // GeoMatchConstraintValueFj is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueFj = "FJ" -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateRuleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateRuleInput"} - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) - } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) - } - if s.RuleId == nil { - invalidParams.Add(request.NewErrParamRequired("RuleId")) - } - if s.RuleId != nil && len(*s.RuleId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RuleId", 1)) - } - if s.Updates == nil { - invalidParams.Add(request.NewErrParamRequired("Updates")) - } - if s.Updates != nil { - for i, v := range s.Updates { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) - } - } - } + // GeoMatchConstraintValueFi is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueFi = "FI" - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // GeoMatchConstraintValueFr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueFr = "FR" -// SetChangeToken sets the ChangeToken field's value. -func (s *UpdateRuleInput) SetChangeToken(v string) *UpdateRuleInput { - s.ChangeToken = &v - return s -} + // GeoMatchConstraintValueGf is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGf = "GF" -// SetRuleId sets the RuleId field's value. -func (s *UpdateRuleInput) SetRuleId(v string) *UpdateRuleInput { - s.RuleId = &v - return s -} + // GeoMatchConstraintValuePf is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePf = "PF" -// SetUpdates sets the Updates field's value. -func (s *UpdateRuleInput) SetUpdates(v []*RuleUpdate) *UpdateRuleInput { - s.Updates = v - return s -} + // GeoMatchConstraintValueTf is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTf = "TF" -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateRuleResponse -type UpdateRuleOutput struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueGa is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGa = "GA" - // The ChangeToken that you used to submit the UpdateRule request. You can also - // use this value to query the status of the request. For more information, - // see GetChangeTokenStatus. - ChangeToken *string `min:"1" type:"string"` -} + // GeoMatchConstraintValueGm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGm = "GM" -// String returns the string representation -func (s UpdateRuleOutput) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueGe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGe = "GE" -// GoString returns the string representation -func (s UpdateRuleOutput) GoString() string { - return s.String() -} + // GeoMatchConstraintValueDe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueDe = "DE" -// SetChangeToken sets the ChangeToken field's value. -func (s *UpdateRuleOutput) SetChangeToken(v string) *UpdateRuleOutput { - s.ChangeToken = &v - return s -} + // GeoMatchConstraintValueGh is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGh = "GH" -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateSizeConstraintSetRequest -type UpdateSizeConstraintSetInput struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueGi is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGi = "GI" - // The value returned by the most recent call to GetChangeToken. - // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` + // GeoMatchConstraintValueGr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGr = "GR" - // The SizeConstraintSetId of the SizeConstraintSet that you want to update. - // SizeConstraintSetId is returned by CreateSizeConstraintSet and by ListSizeConstraintSets. - // - // SizeConstraintSetId is a required field - SizeConstraintSetId *string `min:"1" type:"string" required:"true"` + // GeoMatchConstraintValueGl is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGl = "GL" - // An array of SizeConstraintSetUpdate objects that you want to insert into - // or delete from a SizeConstraintSet. For more information, see the applicable - // data types: - // - // * SizeConstraintSetUpdate: Contains Action and SizeConstraint - // - // * SizeConstraint: Contains FieldToMatch, TextTransformation, ComparisonOperator, - // and Size - // - // * FieldToMatch: Contains Data and Type - // - // Updates is a required field - Updates []*SizeConstraintSetUpdate `type:"list" required:"true"` -} + // GeoMatchConstraintValueGd is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGd = "GD" -// String returns the string representation -func (s UpdateSizeConstraintSetInput) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueGp is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGp = "GP" -// GoString returns the string representation -func (s UpdateSizeConstraintSetInput) GoString() string { - return s.String() -} + // GeoMatchConstraintValueGu is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGu = "GU" -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateSizeConstraintSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateSizeConstraintSetInput"} - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) - } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) - } - if s.SizeConstraintSetId == nil { - invalidParams.Add(request.NewErrParamRequired("SizeConstraintSetId")) - } - if s.SizeConstraintSetId != nil && len(*s.SizeConstraintSetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SizeConstraintSetId", 1)) - } - if s.Updates == nil { - invalidParams.Add(request.NewErrParamRequired("Updates")) - } - if s.Updates != nil { - for i, v := range s.Updates { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) - } - } - } + // GeoMatchConstraintValueGt is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGt = "GT" - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // GeoMatchConstraintValueGg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGg = "GG" -// SetChangeToken sets the ChangeToken field's value. -func (s *UpdateSizeConstraintSetInput) SetChangeToken(v string) *UpdateSizeConstraintSetInput { - s.ChangeToken = &v - return s -} + // GeoMatchConstraintValueGn is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGn = "GN" -// SetSizeConstraintSetId sets the SizeConstraintSetId field's value. -func (s *UpdateSizeConstraintSetInput) SetSizeConstraintSetId(v string) *UpdateSizeConstraintSetInput { - s.SizeConstraintSetId = &v - return s -} + // GeoMatchConstraintValueGw is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGw = "GW" -// SetUpdates sets the Updates field's value. -func (s *UpdateSizeConstraintSetInput) SetUpdates(v []*SizeConstraintSetUpdate) *UpdateSizeConstraintSetInput { - s.Updates = v - return s -} + // GeoMatchConstraintValueGy is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGy = "GY" -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateSizeConstraintSetResponse -type UpdateSizeConstraintSetOutput struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueHt is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueHt = "HT" - // The ChangeToken that you used to submit the UpdateSizeConstraintSet request. - // You can also use this value to query the status of the request. For more - // information, see GetChangeTokenStatus. - ChangeToken *string `min:"1" type:"string"` -} + // GeoMatchConstraintValueHm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueHm = "HM" -// String returns the string representation -func (s UpdateSizeConstraintSetOutput) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueVa is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueVa = "VA" -// GoString returns the string representation -func (s UpdateSizeConstraintSetOutput) GoString() string { - return s.String() -} + // GeoMatchConstraintValueHn is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueHn = "HN" -// SetChangeToken sets the ChangeToken field's value. -func (s *UpdateSizeConstraintSetOutput) SetChangeToken(v string) *UpdateSizeConstraintSetOutput { - s.ChangeToken = &v - return s -} + // GeoMatchConstraintValueHk is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueHk = "HK" -// A request to update a SqlInjectionMatchSet. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateSqlInjectionMatchSetRequest -type UpdateSqlInjectionMatchSetInput struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueHu is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueHu = "HU" - // The value returned by the most recent call to GetChangeToken. - // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` + // GeoMatchConstraintValueIs is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueIs = "IS" - // The SqlInjectionMatchSetId of the SqlInjectionMatchSet that you want to update. - // SqlInjectionMatchSetId is returned by CreateSqlInjectionMatchSet and by ListSqlInjectionMatchSets. - // - // SqlInjectionMatchSetId is a required field - SqlInjectionMatchSetId *string `min:"1" type:"string" required:"true"` + // GeoMatchConstraintValueIn is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueIn = "IN" - // An array of SqlInjectionMatchSetUpdate objects that you want to insert into - // or delete from a SqlInjectionMatchSet. For more information, see the applicable - // data types: - // - // * SqlInjectionMatchSetUpdate: Contains Action and SqlInjectionMatchTuple - // - // * SqlInjectionMatchTuple: Contains FieldToMatch and TextTransformation - // - // * FieldToMatch: Contains Data and Type - // - // Updates is a required field - Updates []*SqlInjectionMatchSetUpdate `type:"list" required:"true"` -} + // GeoMatchConstraintValueId is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueId = "ID" -// String returns the string representation -func (s UpdateSqlInjectionMatchSetInput) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueIr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueIr = "IR" -// GoString returns the string representation -func (s UpdateSqlInjectionMatchSetInput) GoString() string { - return s.String() -} + // GeoMatchConstraintValueIq is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueIq = "IQ" -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateSqlInjectionMatchSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateSqlInjectionMatchSetInput"} - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) - } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) - } - if s.SqlInjectionMatchSetId == nil { - invalidParams.Add(request.NewErrParamRequired("SqlInjectionMatchSetId")) - } - if s.SqlInjectionMatchSetId != nil && len(*s.SqlInjectionMatchSetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SqlInjectionMatchSetId", 1)) - } - if s.Updates == nil { - invalidParams.Add(request.NewErrParamRequired("Updates")) - } - if s.Updates != nil { - for i, v := range s.Updates { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) - } - } - } + // GeoMatchConstraintValueIe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueIe = "IE" + + // GeoMatchConstraintValueIm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueIm = "IM" + + // GeoMatchConstraintValueIl is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueIl = "IL" + + // GeoMatchConstraintValueIt is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueIt = "IT" + + // GeoMatchConstraintValueJm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueJm = "JM" - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // GeoMatchConstraintValueJp is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueJp = "JP" -// SetChangeToken sets the ChangeToken field's value. -func (s *UpdateSqlInjectionMatchSetInput) SetChangeToken(v string) *UpdateSqlInjectionMatchSetInput { - s.ChangeToken = &v - return s -} + // GeoMatchConstraintValueJe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueJe = "JE" -// SetSqlInjectionMatchSetId sets the SqlInjectionMatchSetId field's value. -func (s *UpdateSqlInjectionMatchSetInput) SetSqlInjectionMatchSetId(v string) *UpdateSqlInjectionMatchSetInput { - s.SqlInjectionMatchSetId = &v - return s -} + // GeoMatchConstraintValueJo is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueJo = "JO" -// SetUpdates sets the Updates field's value. -func (s *UpdateSqlInjectionMatchSetInput) SetUpdates(v []*SqlInjectionMatchSetUpdate) *UpdateSqlInjectionMatchSetInput { - s.Updates = v - return s -} + // GeoMatchConstraintValueKz is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueKz = "KZ" -// The response to an UpdateSqlInjectionMatchSets request. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateSqlInjectionMatchSetResponse -type UpdateSqlInjectionMatchSetOutput struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueKe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueKe = "KE" - // The ChangeToken that you used to submit the UpdateSqlInjectionMatchSet request. - // You can also use this value to query the status of the request. For more - // information, see GetChangeTokenStatus. - ChangeToken *string `min:"1" type:"string"` -} + // GeoMatchConstraintValueKi is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueKi = "KI" -// String returns the string representation -func (s UpdateSqlInjectionMatchSetOutput) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueKp is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueKp = "KP" -// GoString returns the string representation -func (s UpdateSqlInjectionMatchSetOutput) GoString() string { - return s.String() -} + // GeoMatchConstraintValueKr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueKr = "KR" -// SetChangeToken sets the ChangeToken field's value. -func (s *UpdateSqlInjectionMatchSetOutput) SetChangeToken(v string) *UpdateSqlInjectionMatchSetOutput { - s.ChangeToken = &v - return s -} + // GeoMatchConstraintValueKw is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueKw = "KW" -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateWebACLRequest -type UpdateWebACLInput struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueKg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueKg = "KG" - // The value returned by the most recent call to GetChangeToken. - // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` + // GeoMatchConstraintValueLa is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueLa = "LA" - // A default action for the web ACL, either ALLOW or BLOCK. AWS WAF performs - // the default action if a request doesn't match the criteria in any of the - // rules in a web ACL. - DefaultAction *WafAction `type:"structure"` + // GeoMatchConstraintValueLv is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueLv = "LV" - // An array of updates to make to the WebACL. - // - // An array of WebACLUpdate objects that you want to insert into or delete from - // a WebACL. For more information, see the applicable data types: - // - // * WebACLUpdate: Contains Action and ActivatedRule - // - // * ActivatedRule: Contains Action, Priority, RuleId, and Type - // - // * WafAction: Contains Type - Updates []*WebACLUpdate `type:"list"` + // GeoMatchConstraintValueLb is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueLb = "LB" - // The WebACLId of the WebACL that you want to update. WebACLId is returned - // by CreateWebACL and by ListWebACLs. - // - // WebACLId is a required field - WebACLId *string `min:"1" type:"string" required:"true"` -} + // GeoMatchConstraintValueLs is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueLs = "LS" -// String returns the string representation -func (s UpdateWebACLInput) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueLr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueLr = "LR" -// GoString returns the string representation -func (s UpdateWebACLInput) GoString() string { - return s.String() -} + // GeoMatchConstraintValueLy is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueLy = "LY" -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateWebACLInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateWebACLInput"} - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) - } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) - } - if s.WebACLId == nil { - invalidParams.Add(request.NewErrParamRequired("WebACLId")) - } - if s.WebACLId != nil && len(*s.WebACLId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WebACLId", 1)) - } - if s.DefaultAction != nil { - if err := s.DefaultAction.Validate(); err != nil { - invalidParams.AddNested("DefaultAction", err.(request.ErrInvalidParams)) - } - } - if s.Updates != nil { - for i, v := range s.Updates { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) - } - } - } + // GeoMatchConstraintValueLi is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueLi = "LI" - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // GeoMatchConstraintValueLt is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueLt = "LT" -// SetChangeToken sets the ChangeToken field's value. -func (s *UpdateWebACLInput) SetChangeToken(v string) *UpdateWebACLInput { - s.ChangeToken = &v - return s -} + // GeoMatchConstraintValueLu is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueLu = "LU" -// SetDefaultAction sets the DefaultAction field's value. -func (s *UpdateWebACLInput) SetDefaultAction(v *WafAction) *UpdateWebACLInput { - s.DefaultAction = v - return s -} + // GeoMatchConstraintValueMo is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMo = "MO" -// SetUpdates sets the Updates field's value. -func (s *UpdateWebACLInput) SetUpdates(v []*WebACLUpdate) *UpdateWebACLInput { - s.Updates = v - return s -} + // GeoMatchConstraintValueMk is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMk = "MK" -// SetWebACLId sets the WebACLId field's value. -func (s *UpdateWebACLInput) SetWebACLId(v string) *UpdateWebACLInput { - s.WebACLId = &v - return s -} + // GeoMatchConstraintValueMg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMg = "MG" -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateWebACLResponse -type UpdateWebACLOutput struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueMw is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMw = "MW" - // The ChangeToken that you used to submit the UpdateWebACL request. You can - // also use this value to query the status of the request. For more information, - // see GetChangeTokenStatus. - ChangeToken *string `min:"1" type:"string"` -} + // GeoMatchConstraintValueMy is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMy = "MY" -// String returns the string representation -func (s UpdateWebACLOutput) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueMv is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMv = "MV" -// GoString returns the string representation -func (s UpdateWebACLOutput) GoString() string { - return s.String() -} + // GeoMatchConstraintValueMl is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMl = "ML" -// SetChangeToken sets the ChangeToken field's value. -func (s *UpdateWebACLOutput) SetChangeToken(v string) *UpdateWebACLOutput { - s.ChangeToken = &v - return s -} + // GeoMatchConstraintValueMt is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMt = "MT" -// A request to update an XssMatchSet. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateXssMatchSetRequest -type UpdateXssMatchSetInput struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueMh is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMh = "MH" - // The value returned by the most recent call to GetChangeToken. - // - // ChangeToken is a required field - ChangeToken *string `min:"1" type:"string" required:"true"` + // GeoMatchConstraintValueMq is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMq = "MQ" - // An array of XssMatchSetUpdate objects that you want to insert into or delete - // from a XssMatchSet. For more information, see the applicable data types: - // - // * XssMatchSetUpdate: Contains Action and XssMatchTuple - // - // * XssMatchTuple: Contains FieldToMatch and TextTransformation - // - // * FieldToMatch: Contains Data and Type - // - // Updates is a required field - Updates []*XssMatchSetUpdate `type:"list" required:"true"` + // GeoMatchConstraintValueMr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMr = "MR" - // The XssMatchSetId of the XssMatchSet that you want to update. XssMatchSetId - // is returned by CreateXssMatchSet and by ListXssMatchSets. - // - // XssMatchSetId is a required field - XssMatchSetId *string `min:"1" type:"string" required:"true"` -} + // GeoMatchConstraintValueMu is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMu = "MU" -// String returns the string representation -func (s UpdateXssMatchSetInput) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueYt is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueYt = "YT" -// GoString returns the string representation -func (s UpdateXssMatchSetInput) GoString() string { - return s.String() -} + // GeoMatchConstraintValueMx is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMx = "MX" -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateXssMatchSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateXssMatchSetInput"} - if s.ChangeToken == nil { - invalidParams.Add(request.NewErrParamRequired("ChangeToken")) - } - if s.ChangeToken != nil && len(*s.ChangeToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChangeToken", 1)) - } - if s.Updates == nil { - invalidParams.Add(request.NewErrParamRequired("Updates")) - } - if s.XssMatchSetId == nil { - invalidParams.Add(request.NewErrParamRequired("XssMatchSetId")) - } - if s.XssMatchSetId != nil && len(*s.XssMatchSetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("XssMatchSetId", 1)) - } - if s.Updates != nil { - for i, v := range s.Updates { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Updates", i), err.(request.ErrInvalidParams)) - } - } - } + // GeoMatchConstraintValueFm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueFm = "FM" - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // GeoMatchConstraintValueMd is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMd = "MD" -// SetChangeToken sets the ChangeToken field's value. -func (s *UpdateXssMatchSetInput) SetChangeToken(v string) *UpdateXssMatchSetInput { - s.ChangeToken = &v - return s -} + // GeoMatchConstraintValueMc is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMc = "MC" -// SetUpdates sets the Updates field's value. -func (s *UpdateXssMatchSetInput) SetUpdates(v []*XssMatchSetUpdate) *UpdateXssMatchSetInput { - s.Updates = v - return s -} + // GeoMatchConstraintValueMn is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMn = "MN" -// SetXssMatchSetId sets the XssMatchSetId field's value. -func (s *UpdateXssMatchSetInput) SetXssMatchSetId(v string) *UpdateXssMatchSetInput { - s.XssMatchSetId = &v - return s -} + // GeoMatchConstraintValueMe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMe = "ME" -// The response to an UpdateXssMatchSets request. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/UpdateXssMatchSetResponse -type UpdateXssMatchSetOutput struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueMs is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMs = "MS" - // The ChangeToken that you used to submit the UpdateXssMatchSet request. You - // can also use this value to query the status of the request. For more information, - // see GetChangeTokenStatus. - ChangeToken *string `min:"1" type:"string"` -} + // GeoMatchConstraintValueMa is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMa = "MA" -// String returns the string representation -func (s UpdateXssMatchSetOutput) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueMz is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMz = "MZ" + + // GeoMatchConstraintValueMm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMm = "MM" + + // GeoMatchConstraintValueNa is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueNa = "NA" + + // GeoMatchConstraintValueNr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueNr = "NR" + + // GeoMatchConstraintValueNp is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueNp = "NP" + + // GeoMatchConstraintValueNl is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueNl = "NL" + + // GeoMatchConstraintValueNc is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueNc = "NC" + + // GeoMatchConstraintValueNz is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueNz = "NZ" + + // GeoMatchConstraintValueNi is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueNi = "NI" + + // GeoMatchConstraintValueNe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueNe = "NE" -// GoString returns the string representation -func (s UpdateXssMatchSetOutput) GoString() string { - return s.String() -} + // GeoMatchConstraintValueNg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueNg = "NG" -// SetChangeToken sets the ChangeToken field's value. -func (s *UpdateXssMatchSetOutput) SetChangeToken(v string) *UpdateXssMatchSetOutput { - s.ChangeToken = &v - return s -} + // GeoMatchConstraintValueNu is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueNu = "NU" -// For the action that is associated with a rule in a WebACL, specifies the -// action that you want AWS WAF to perform when a web request matches all of -// the conditions in a rule. For the default action in a WebACL, specifies the -// action that you want AWS WAF to take when a web request doesn't match all -// of the conditions in any of the rules in a WebACL. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/WafAction -type WafAction struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueNf is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueNf = "NF" - // Specifies how you want AWS WAF to respond to requests that match the settings - // in a Rule. Valid settings include the following: - // - // * ALLOW: AWS WAF allows requests - // - // * BLOCK: AWS WAF blocks requests - // - // * COUNT: AWS WAF increments a counter of the requests that match all of - // the conditions in the rule. AWS WAF then continues to inspect the web - // request based on the remaining rules in the web ACL. You can't specify - // COUNT for the default action for a WebACL. - // - // Type is a required field - Type *string `type:"string" required:"true" enum:"WafActionType"` -} + // GeoMatchConstraintValueMp is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMp = "MP" -// String returns the string representation -func (s WafAction) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueNo is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueNo = "NO" -// GoString returns the string representation -func (s WafAction) GoString() string { - return s.String() -} + // GeoMatchConstraintValueOm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueOm = "OM" -// Validate inspects the fields of the type to determine if they are valid. -func (s *WafAction) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "WafAction"} - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) - } + // GeoMatchConstraintValuePk is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePk = "PK" - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // GeoMatchConstraintValuePw is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePw = "PW" -// SetType sets the Type field's value. -func (s *WafAction) SetType(v string) *WafAction { - s.Type = &v - return s -} + // GeoMatchConstraintValuePs is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePs = "PS" -// Contains the Rules that identify the requests that you want to allow, block, -// or count. In a WebACL, you also specify a default action (ALLOW or BLOCK), -// and the action for each Rule that you add to a WebACL, for example, block -// requests from specified IP addresses or block requests from specified referrers. -// You also associate the WebACL with a CloudFront distribution to identify -// the requests that you want AWS WAF to filter. If you add more than one Rule -// to a WebACL, a request needs to match only one of the specifications to be -// allowed, blocked, or counted. For more information, see UpdateWebACL. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/WebACL -type WebACL struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValuePa is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePa = "PA" - // The action to perform if none of the Rules contained in the WebACL match. - // The action is specified by the WafAction object. - // - // DefaultAction is a required field - DefaultAction *WafAction `type:"structure" required:"true"` + // GeoMatchConstraintValuePg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePg = "PG" - // A friendly name or description for the metrics for this WebACL. The name - // can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't - // contain whitespace. You can't change MetricName after you create the WebACL. - MetricName *string `type:"string"` + // GeoMatchConstraintValuePy is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePy = "PY" - // A friendly name or description of the WebACL. You can't change the name of - // a WebACL after you create it. - Name *string `min:"1" type:"string"` + // GeoMatchConstraintValuePe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePe = "PE" - // An array that contains the action for each Rule in a WebACL, the priority - // of the Rule, and the ID of the Rule. - // - // Rules is a required field - Rules []*ActivatedRule `type:"list" required:"true"` + // GeoMatchConstraintValuePh is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePh = "PH" - // A unique identifier for a WebACL. You use WebACLId to get information about - // a WebACL (see GetWebACL), update a WebACL (see UpdateWebACL), and delete - // a WebACL from AWS WAF (see DeleteWebACL). - // - // WebACLId is returned by CreateWebACL and by ListWebACLs. - // - // WebACLId is a required field - WebACLId *string `min:"1" type:"string" required:"true"` -} + // GeoMatchConstraintValuePn is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePn = "PN" -// String returns the string representation -func (s WebACL) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValuePl is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePl = "PL" -// GoString returns the string representation -func (s WebACL) GoString() string { - return s.String() -} + // GeoMatchConstraintValuePt is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePt = "PT" -// SetDefaultAction sets the DefaultAction field's value. -func (s *WebACL) SetDefaultAction(v *WafAction) *WebACL { - s.DefaultAction = v - return s -} + // GeoMatchConstraintValuePr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePr = "PR" -// SetMetricName sets the MetricName field's value. -func (s *WebACL) SetMetricName(v string) *WebACL { - s.MetricName = &v - return s -} + // GeoMatchConstraintValueQa is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueQa = "QA" -// SetName sets the Name field's value. -func (s *WebACL) SetName(v string) *WebACL { - s.Name = &v - return s -} + // GeoMatchConstraintValueRe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueRe = "RE" -// SetRules sets the Rules field's value. -func (s *WebACL) SetRules(v []*ActivatedRule) *WebACL { - s.Rules = v - return s -} + // GeoMatchConstraintValueRo is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueRo = "RO" -// SetWebACLId sets the WebACLId field's value. -func (s *WebACL) SetWebACLId(v string) *WebACL { - s.WebACLId = &v - return s -} + // GeoMatchConstraintValueRu is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueRu = "RU" -// Contains the identifier and the name or description of the WebACL. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/WebACLSummary -type WebACLSummary struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueRw is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueRw = "RW" - // A friendly name or description of the WebACL. You can't change the name of - // a WebACL after you create it. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // GeoMatchConstraintValueBl is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBl = "BL" - // A unique identifier for a WebACL. You use WebACLId to get information about - // a WebACL (see GetWebACL), update a WebACL (see UpdateWebACL), and delete - // a WebACL from AWS WAF (see DeleteWebACL). - // - // WebACLId is returned by CreateWebACL and by ListWebACLs. - // - // WebACLId is a required field - WebACLId *string `min:"1" type:"string" required:"true"` -} + // GeoMatchConstraintValueSh is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSh = "SH" -// String returns the string representation -func (s WebACLSummary) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueKn is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueKn = "KN" -// GoString returns the string representation -func (s WebACLSummary) GoString() string { - return s.String() -} + // GeoMatchConstraintValueLc is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueLc = "LC" -// SetName sets the Name field's value. -func (s *WebACLSummary) SetName(v string) *WebACLSummary { - s.Name = &v - return s -} + // GeoMatchConstraintValueMf is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMf = "MF" -// SetWebACLId sets the WebACLId field's value. -func (s *WebACLSummary) SetWebACLId(v string) *WebACLSummary { - s.WebACLId = &v - return s -} + // GeoMatchConstraintValuePm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePm = "PM" -// Specifies whether to insert a Rule into or delete a Rule from a WebACL. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/WebACLUpdate -type WebACLUpdate struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueVc is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueVc = "VC" - // Specifies whether to insert a Rule into or delete a Rule from a WebACL. - // - // Action is a required field - Action *string `type:"string" required:"true" enum:"ChangeAction"` + // GeoMatchConstraintValueWs is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueWs = "WS" - // The ActivatedRule object in an UpdateWebACL request specifies a Rule that - // you want to insert or delete, the priority of the Rule in the WebACL, and - // the action that you want AWS WAF to take when a web request matches the Rule - // (ALLOW, BLOCK, or COUNT). - // - // ActivatedRule is a required field - ActivatedRule *ActivatedRule `type:"structure" required:"true"` -} + // GeoMatchConstraintValueSm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSm = "SM" -// String returns the string representation -func (s WebACLUpdate) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueSt is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSt = "ST" -// GoString returns the string representation -func (s WebACLUpdate) GoString() string { - return s.String() -} + // GeoMatchConstraintValueSa is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSa = "SA" -// Validate inspects the fields of the type to determine if they are valid. -func (s *WebACLUpdate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "WebACLUpdate"} - if s.Action == nil { - invalidParams.Add(request.NewErrParamRequired("Action")) - } - if s.ActivatedRule == nil { - invalidParams.Add(request.NewErrParamRequired("ActivatedRule")) - } - if s.ActivatedRule != nil { - if err := s.ActivatedRule.Validate(); err != nil { - invalidParams.AddNested("ActivatedRule", err.(request.ErrInvalidParams)) - } - } + // GeoMatchConstraintValueSn is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSn = "SN" - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // GeoMatchConstraintValueRs is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueRs = "RS" -// SetAction sets the Action field's value. -func (s *WebACLUpdate) SetAction(v string) *WebACLUpdate { - s.Action = &v - return s -} + // GeoMatchConstraintValueSc is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSc = "SC" -// SetActivatedRule sets the ActivatedRule field's value. -func (s *WebACLUpdate) SetActivatedRule(v *ActivatedRule) *WebACLUpdate { - s.ActivatedRule = v - return s -} + // GeoMatchConstraintValueSl is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSl = "SL" -// A complex type that contains XssMatchTuple objects, which specify the parts -// of web requests that you want AWS WAF to inspect for cross-site scripting -// attacks and, if you want AWS WAF to inspect a header, the name of the header. -// If a XssMatchSet contains more than one XssMatchTuple object, a request needs -// to include cross-site scripting attacks in only one of the specified parts -// of the request to be considered a match. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/XssMatchSet -type XssMatchSet struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueSg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSg = "SG" - // The name, if any, of the XssMatchSet. - Name *string `min:"1" type:"string"` + // GeoMatchConstraintValueSx is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSx = "SX" - // A unique identifier for an XssMatchSet. You use XssMatchSetId to get information - // about an XssMatchSet (see GetXssMatchSet), update an XssMatchSet (see UpdateXssMatchSet), - // insert an XssMatchSet into a Rule or delete one from a Rule (see UpdateRule), - // and delete an XssMatchSet from AWS WAF (see DeleteXssMatchSet). - // - // XssMatchSetId is returned by CreateXssMatchSet and by ListXssMatchSets. - // - // XssMatchSetId is a required field - XssMatchSetId *string `min:"1" type:"string" required:"true"` + // GeoMatchConstraintValueSk is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSk = "SK" - // Specifies the parts of web requests that you want to inspect for cross-site - // scripting attacks. - // - // XssMatchTuples is a required field - XssMatchTuples []*XssMatchTuple `type:"list" required:"true"` -} + // GeoMatchConstraintValueSi is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSi = "SI" -// String returns the string representation -func (s XssMatchSet) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueSb is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSb = "SB" -// GoString returns the string representation -func (s XssMatchSet) GoString() string { - return s.String() -} + // GeoMatchConstraintValueSo is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSo = "SO" -// SetName sets the Name field's value. -func (s *XssMatchSet) SetName(v string) *XssMatchSet { - s.Name = &v - return s -} + // GeoMatchConstraintValueZa is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueZa = "ZA" + + // GeoMatchConstraintValueGs is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGs = "GS" + + // GeoMatchConstraintValueSs is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSs = "SS" -// SetXssMatchSetId sets the XssMatchSetId field's value. -func (s *XssMatchSet) SetXssMatchSetId(v string) *XssMatchSet { - s.XssMatchSetId = &v - return s -} + // GeoMatchConstraintValueEs is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueEs = "ES" -// SetXssMatchTuples sets the XssMatchTuples field's value. -func (s *XssMatchSet) SetXssMatchTuples(v []*XssMatchTuple) *XssMatchSet { - s.XssMatchTuples = v - return s -} + // GeoMatchConstraintValueLk is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueLk = "LK" -// The Id and Name of an XssMatchSet. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/XssMatchSetSummary -type XssMatchSetSummary struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueSd is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSd = "SD" - // The name of the XssMatchSet, if any, specified by Id. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // GeoMatchConstraintValueSr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSr = "SR" - // A unique identifier for an XssMatchSet. You use XssMatchSetId to get information - // about a XssMatchSet (see GetXssMatchSet), update an XssMatchSet (see UpdateXssMatchSet), - // insert an XssMatchSet into a Rule or delete one from a Rule (see UpdateRule), - // and delete an XssMatchSet from AWS WAF (see DeleteXssMatchSet). - // - // XssMatchSetId is returned by CreateXssMatchSet and by ListXssMatchSets. - // - // XssMatchSetId is a required field - XssMatchSetId *string `min:"1" type:"string" required:"true"` -} + // GeoMatchConstraintValueSj is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSj = "SJ" -// String returns the string representation -func (s XssMatchSetSummary) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueSz is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSz = "SZ" -// GoString returns the string representation -func (s XssMatchSetSummary) GoString() string { - return s.String() -} + // GeoMatchConstraintValueSe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSe = "SE" -// SetName sets the Name field's value. -func (s *XssMatchSetSummary) SetName(v string) *XssMatchSetSummary { - s.Name = &v - return s -} + // GeoMatchConstraintValueCh is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCh = "CH" -// SetXssMatchSetId sets the XssMatchSetId field's value. -func (s *XssMatchSetSummary) SetXssMatchSetId(v string) *XssMatchSetSummary { - s.XssMatchSetId = &v - return s -} + // GeoMatchConstraintValueSy is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSy = "SY" -// Specifies the part of a web request that you want to inspect for cross-site -// scripting attacks and indicates whether you want to add the specification -// to an XssMatchSet or delete it from an XssMatchSet. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/XssMatchSetUpdate -type XssMatchSetUpdate struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueTw is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTw = "TW" - // Specify INSERT to add a XssMatchSetUpdate to an XssMatchSet. Use DELETE to - // remove a XssMatchSetUpdate from an XssMatchSet. - // - // Action is a required field - Action *string `type:"string" required:"true" enum:"ChangeAction"` + // GeoMatchConstraintValueTj is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTj = "TJ" - // Specifies the part of a web request that you want AWS WAF to inspect for - // cross-site scripting attacks and, if you want AWS WAF to inspect a header, - // the name of the header. - // - // XssMatchTuple is a required field - XssMatchTuple *XssMatchTuple `type:"structure" required:"true"` -} + // GeoMatchConstraintValueTz is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTz = "TZ" -// String returns the string representation -func (s XssMatchSetUpdate) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueTh is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTh = "TH" -// GoString returns the string representation -func (s XssMatchSetUpdate) GoString() string { - return s.String() -} + // GeoMatchConstraintValueTl is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTl = "TL" -// Validate inspects the fields of the type to determine if they are valid. -func (s *XssMatchSetUpdate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "XssMatchSetUpdate"} - if s.Action == nil { - invalidParams.Add(request.NewErrParamRequired("Action")) - } - if s.XssMatchTuple == nil { - invalidParams.Add(request.NewErrParamRequired("XssMatchTuple")) - } - if s.XssMatchTuple != nil { - if err := s.XssMatchTuple.Validate(); err != nil { - invalidParams.AddNested("XssMatchTuple", err.(request.ErrInvalidParams)) - } - } + // GeoMatchConstraintValueTg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTg = "TG" - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // GeoMatchConstraintValueTk is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTk = "TK" -// SetAction sets the Action field's value. -func (s *XssMatchSetUpdate) SetAction(v string) *XssMatchSetUpdate { - s.Action = &v - return s -} + // GeoMatchConstraintValueTo is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTo = "TO" -// SetXssMatchTuple sets the XssMatchTuple field's value. -func (s *XssMatchSetUpdate) SetXssMatchTuple(v *XssMatchTuple) *XssMatchSetUpdate { - s.XssMatchTuple = v - return s -} + // GeoMatchConstraintValueTt is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTt = "TT" -// Specifies the part of a web request that you want AWS WAF to inspect for -// cross-site scripting attacks and, if you want AWS WAF to inspect a header, -// the name of the header. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/XssMatchTuple -type XssMatchTuple struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueTn is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTn = "TN" - // Specifies where in a web request to look for cross-site scripting attacks. - // - // FieldToMatch is a required field - FieldToMatch *FieldToMatch `type:"structure" required:"true"` + // GeoMatchConstraintValueTr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTr = "TR" - // Text transformations eliminate some of the unusual formatting that attackers - // use in web requests in an effort to bypass AWS WAF. If you specify a transformation, - // AWS WAF performs the transformation on FieldToMatch before inspecting a request - // for a match. - // - // CMD_LINE - // - // When you're concerned that attackers are injecting an operating system commandline - // command and using unusual formatting to disguise some or all of the command, - // use this option to perform the following transformations: - // - // * Delete the following characters: \ " ' ^ - // - // * Delete spaces before the following characters: / ( - // - // * Replace the following characters with a space: , ; - // - // * Replace multiple spaces with one space - // - // * Convert uppercase letters (A-Z) to lowercase (a-z) - // - // COMPRESS_WHITE_SPACE - // - // Use this option to replace the following characters with a space character - // (decimal 32): - // - // * \f, formfeed, decimal 12 - // - // * \t, tab, decimal 9 - // - // * \n, newline, decimal 10 - // - // * \r, carriage return, decimal 13 - // - // * \v, vertical tab, decimal 11 - // - // * non-breaking space, decimal 160 - // - // COMPRESS_WHITE_SPACE also replaces multiple spaces with one space. - // - // HTML_ENTITY_DECODE - // - // Use this option to replace HTML-encoded characters with unencoded characters. - // HTML_ENTITY_DECODE performs the following operations: - // - // * Replaces (ampersand)quot; with " - // - // * Replaces (ampersand)nbsp; with a non-breaking space, decimal 160 - // - // * Replaces (ampersand)lt; with a "less than" symbol - // - // * Replaces (ampersand)gt; with > - // - // * Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh;, - // with the corresponding characters - // - // * Replaces characters that are represented in decimal format, (ampersand)#nnnn;, - // with the corresponding characters - // - // LOWERCASE - // - // Use this option to convert uppercase letters (A-Z) to lowercase (a-z). - // - // URL_DECODE - // - // Use this option to decode a URL-encoded value. - // - // NONE - // - // Specify NONE if you don't want to perform any text transformations. - // - // TextTransformation is a required field - TextTransformation *string `type:"string" required:"true" enum:"TextTransformation"` -} + // GeoMatchConstraintValueTm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTm = "TM" -// String returns the string representation -func (s XssMatchTuple) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueTc is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTc = "TC" -// GoString returns the string representation -func (s XssMatchTuple) GoString() string { - return s.String() -} + // GeoMatchConstraintValueTv is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTv = "TV" -// Validate inspects the fields of the type to determine if they are valid. -func (s *XssMatchTuple) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "XssMatchTuple"} - if s.FieldToMatch == nil { - invalidParams.Add(request.NewErrParamRequired("FieldToMatch")) - } - if s.TextTransformation == nil { - invalidParams.Add(request.NewErrParamRequired("TextTransformation")) - } - if s.FieldToMatch != nil { - if err := s.FieldToMatch.Validate(); err != nil { - invalidParams.AddNested("FieldToMatch", err.(request.ErrInvalidParams)) - } - } + // GeoMatchConstraintValueUg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueUg = "UG" - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // GeoMatchConstraintValueUa is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueUa = "UA" -// SetFieldToMatch sets the FieldToMatch field's value. -func (s *XssMatchTuple) SetFieldToMatch(v *FieldToMatch) *XssMatchTuple { - s.FieldToMatch = v - return s -} + // GeoMatchConstraintValueAe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAe = "AE" -// SetTextTransformation sets the TextTransformation field's value. -func (s *XssMatchTuple) SetTextTransformation(v string) *XssMatchTuple { - s.TextTransformation = &v - return s -} + // GeoMatchConstraintValueGb is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGb = "GB" -const ( - // ChangeActionInsert is a ChangeAction enum value - ChangeActionInsert = "INSERT" + // GeoMatchConstraintValueUs is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueUs = "US" - // ChangeActionDelete is a ChangeAction enum value - ChangeActionDelete = "DELETE" -) + // GeoMatchConstraintValueUm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueUm = "UM" -const ( - // ChangeTokenStatusProvisioned is a ChangeTokenStatus enum value - ChangeTokenStatusProvisioned = "PROVISIONED" + // GeoMatchConstraintValueUy is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueUy = "UY" - // ChangeTokenStatusPending is a ChangeTokenStatus enum value - ChangeTokenStatusPending = "PENDING" + // GeoMatchConstraintValueUz is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueUz = "UZ" - // ChangeTokenStatusInsync is a ChangeTokenStatus enum value - ChangeTokenStatusInsync = "INSYNC" -) + // GeoMatchConstraintValueVu is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueVu = "VU" -const ( - // ComparisonOperatorEq is a ComparisonOperator enum value - ComparisonOperatorEq = "EQ" + // GeoMatchConstraintValueVe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueVe = "VE" - // ComparisonOperatorNe is a ComparisonOperator enum value - ComparisonOperatorNe = "NE" + // GeoMatchConstraintValueVn is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueVn = "VN" - // ComparisonOperatorLe is a ComparisonOperator enum value - ComparisonOperatorLe = "LE" + // GeoMatchConstraintValueVg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueVg = "VG" - // ComparisonOperatorLt is a ComparisonOperator enum value - ComparisonOperatorLt = "LT" + // GeoMatchConstraintValueVi is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueVi = "VI" - // ComparisonOperatorGe is a ComparisonOperator enum value - ComparisonOperatorGe = "GE" + // GeoMatchConstraintValueWf is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueWf = "WF" - // ComparisonOperatorGt is a ComparisonOperator enum value - ComparisonOperatorGt = "GT" + // GeoMatchConstraintValueEh is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueEh = "EH" + + // GeoMatchConstraintValueYe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueYe = "YE" + + // GeoMatchConstraintValueZm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueZm = "ZM" + + // GeoMatchConstraintValueZw is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueZw = "ZW" ) const ( @@ -12622,6 +17256,12 @@ const ( // ParameterExceptionFieldSizeConstraintComparisonOperator is a ParameterExceptionField enum value ParameterExceptionFieldSizeConstraintComparisonOperator = "SIZE_CONSTRAINT_COMPARISON_OPERATOR" + // ParameterExceptionFieldGeoMatchLocationType is a ParameterExceptionField enum value + ParameterExceptionFieldGeoMatchLocationType = "GEO_MATCH_LOCATION_TYPE" + + // ParameterExceptionFieldGeoMatchLocationValue is a ParameterExceptionField enum value + ParameterExceptionFieldGeoMatchLocationValue = "GEO_MATCH_LOCATION_VALUE" + // ParameterExceptionFieldRateKey is a ParameterExceptionField enum value ParameterExceptionFieldRateKey = "RATE_KEY" @@ -12667,11 +17307,17 @@ const ( // PredicateTypeSqlInjectionMatch is a PredicateType enum value PredicateTypeSqlInjectionMatch = "SqlInjectionMatch" + // PredicateTypeGeoMatch is a PredicateType enum value + PredicateTypeGeoMatch = "GeoMatch" + // PredicateTypeSizeConstraint is a PredicateType enum value PredicateTypeSizeConstraint = "SizeConstraint" // PredicateTypeXssMatch is a PredicateType enum value PredicateTypeXssMatch = "XssMatch" + + // PredicateTypeRegexMatch is a PredicateType enum value + PredicateTypeRegexMatch = "RegexMatch" ) const ( diff --git a/vendor/github.com/aws/aws-sdk-go/service/waf/errors.go b/vendor/github.com/aws/aws-sdk-go/service/waf/errors.go index 67a17577f63..83f4870dccb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/waf/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/waf/errors.go @@ -71,7 +71,7 @@ const ( // BLOCK, or COUNT. // // * You tried to update a ByteMatchSet with a FieldToMatchType other than - // HEADER, QUERY_STRING, or URI. + // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value // for Data. @@ -80,6 +80,12 @@ const ( // a resource with which a web ACL cannot be associated. ErrCodeInvalidParameterException = "InvalidParameterException" + // ErrCodeInvalidRegexPatternException for service response error code + // "InvalidRegexPatternException". + // + // The regular expression (regex) you specified in RegexPatternString is invalid. + ErrCodeInvalidRegexPatternException = "InvalidRegexPatternException" + // ErrCodeLimitsExceededException for service response error code // "LimitsExceededException". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/wafregional/api.go b/vendor/github.com/aws/aws-sdk-go/service/wafregional/api.go index dcf329a0aca..4e966dd9e7b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/wafregional/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/wafregional/api.go @@ -92,7 +92,7 @@ func (c *WAFRegional) AssociateWebACLRequest(input *AssociateWebACLInput) (req * // BLOCK, or COUNT. // // * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. +// HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value // for Data. @@ -236,7 +236,7 @@ func (c *WAFRegional) CreateByteMatchSetRequest(input *waf.CreateByteMatchSetInp // BLOCK, or COUNT. // // * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. +// HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value // for Data. @@ -276,6 +276,152 @@ func (c *WAFRegional) CreateByteMatchSetWithContext(ctx aws.Context, input *waf. return out, req.Send() } +const opCreateGeoMatchSet = "CreateGeoMatchSet" + +// CreateGeoMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateGeoMatchSet operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateGeoMatchSet for more information on using the CreateGeoMatchSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateGeoMatchSetRequest method. +// req, resp := client.CreateGeoMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateGeoMatchSet +func (c *WAFRegional) CreateGeoMatchSetRequest(input *waf.CreateGeoMatchSetInput) (req *request.Request, output *waf.CreateGeoMatchSetOutput) { + op := &request.Operation{ + Name: opCreateGeoMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.CreateGeoMatchSetInput{} + } + + output = &waf.CreateGeoMatchSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateGeoMatchSet API operation for AWS WAF Regional. +// +// Creates an GeoMatchSet, which you use to specify which web requests you want +// to allow or block based on the country that the requests originate from. +// For example, if you're receiving a lot of requests from one or more countries +// and you want to block the requests, you can create an GeoMatchSet that contains +// those countries and then configure AWS WAF to block the requests. +// +// To create and configure a GeoMatchSet, perform the following steps: +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a CreateGeoMatchSet request. +// +// Submit a CreateGeoMatchSet request. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateGeoMatchSet request. +// +// Submit an UpdateGeoMatchSetSet request to specify the countries that you +// want AWS WAF to watch for. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation CreateGeoMatchSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFDisallowedNameException "WAFDisallowedNameException" +// The name specified is invalid. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateGeoMatchSet +func (c *WAFRegional) CreateGeoMatchSet(input *waf.CreateGeoMatchSetInput) (*waf.CreateGeoMatchSetOutput, error) { + req, out := c.CreateGeoMatchSetRequest(input) + return out, req.Send() +} + +// CreateGeoMatchSetWithContext is the same as CreateGeoMatchSet with the addition of +// the ability to pass a context and additional request options. +// +// See CreateGeoMatchSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) CreateGeoMatchSetWithContext(ctx aws.Context, input *waf.CreateGeoMatchSetInput, opts ...request.Option) (*waf.CreateGeoMatchSetOutput, error) { + req, out := c.CreateGeoMatchSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateIPSet = "CreateIPSet" // CreateIPSetRequest generates a "aws/request.Request" representing the @@ -387,7 +533,7 @@ func (c *WAFRegional) CreateIPSetRequest(input *waf.CreateIPSetInput) (req *requ // BLOCK, or COUNT. // // * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. +// HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value // for Data. @@ -569,7 +715,7 @@ func (c *WAFRegional) CreateRateBasedRuleRequest(input *waf.CreateRateBasedRuleI // BLOCK, or COUNT. // // * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. +// HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value // for Data. @@ -605,83 +751,70 @@ func (c *WAFRegional) CreateRateBasedRuleWithContext(ctx aws.Context, input *waf return out, req.Send() } -const opCreateRule = "CreateRule" +const opCreateRegexMatchSet = "CreateRegexMatchSet" -// CreateRuleRequest generates a "aws/request.Request" representing the -// client's request for the CreateRule operation. The "output" return +// CreateRegexMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateRegexMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See CreateRule for more information on using the CreateRule +// See CreateRegexMatchSet for more information on using the CreateRegexMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the CreateRuleRequest method. -// req, resp := client.CreateRuleRequest(params) +// // Example sending a request using the CreateRegexMatchSetRequest method. +// req, resp := client.CreateRegexMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateRule -func (c *WAFRegional) CreateRuleRequest(input *waf.CreateRuleInput) (req *request.Request, output *waf.CreateRuleOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateRegexMatchSet +func (c *WAFRegional) CreateRegexMatchSetRequest(input *waf.CreateRegexMatchSetInput) (req *request.Request, output *waf.CreateRegexMatchSetOutput) { op := &request.Operation{ - Name: opCreateRule, + Name: opCreateRegexMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.CreateRuleInput{} + input = &waf.CreateRegexMatchSetInput{} } - output = &waf.CreateRuleOutput{} + output = &waf.CreateRegexMatchSetOutput{} req = c.newRequest(op, input, output) return } -// CreateRule API operation for AWS WAF Regional. -// -// Creates a Rule, which contains the IPSet objects, ByteMatchSet objects, and -// other predicates that identify the requests that you want to block. If you -// add more than one predicate to a Rule, a request must match all of the specifications -// to be allowed or blocked. For example, suppose you add the following to a -// Rule: -// -// * An IPSet that matches the IP address 192.0.2.44/32 -// -// * A ByteMatchSet that matches BadBot in the User-Agent header +// CreateRegexMatchSet API operation for AWS WAF Regional. // -// You then add the Rule to a WebACL and specify that you want to blocks requests -// that satisfy the Rule. For a request to be blocked, it must come from the -// IP address 192.0.2.44 and the User-Agent header in the request must contain -// the value BadBot. +// Creates a RegexMatchSet. You then use UpdateRegexMatchSet to identify the +// part of a web request that you want AWS WAF to inspect, such as the values +// of the User-Agent header or the query string. For example, you can create +// a RegexMatchSet that contains a RegexMatchTuple that looks for any requests +// with User-Agent headers that match a RegexPatternSet with pattern B[a@]dB[o0]t. +// You can then configure AWS WAF to reject those requests. // -// To create and configure a Rule, perform the following steps: -// -// Create and update the predicates that you want to include in the Rule. For -// more information, see CreateByteMatchSet, CreateIPSet, and CreateSqlInjectionMatchSet. +// To create and configure a RegexMatchSet, perform the following steps: // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a CreateRule request. +// parameter of a CreateRegexMatchSet request. // -// Submit a CreateRule request. +// Submit a CreateRegexMatchSet request. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateRule request. -// -// Submit an UpdateRule request to specify the predicates that you want to include -// in the Rule. +// parameter of an UpdateRegexMatchSet request. // -// Create and update a WebACL that contains the Rule. For more information, -// see CreateWebACL. +// Submit an UpdateRegexMatchSet request to specify the part of the request +// that you want AWS WAF to inspect (for example, the header or the URI) and +// the value, using a RegexPatternSet, that you want AWS WAF to watch for. // // For more information about how to use the AWS WAF API to allow or block HTTP // requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). @@ -691,7 +824,7 @@ func (c *WAFRegional) CreateRuleRequest(input *waf.CreateRuleInput) (req *reques // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation CreateRule for usage and error information. +// API operation CreateRegexMatchSet for usage and error information. // // Returned Error Codes: // * ErrCodeWAFStaleDataException "WAFStaleDataException" @@ -705,127 +838,94 @@ func (c *WAFRegional) CreateRuleRequest(input *waf.CreateRuleInput) (req *reques // * ErrCodeWAFDisallowedNameException "WAFDisallowedNameException" // The name specified is invalid. // -// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" -// The operation failed because AWS WAF didn't recognize a parameter in the -// request. For example: -// -// * You specified an invalid parameter name. -// -// * You specified an invalid value. -// -// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) -// using an action other than INSERT or DELETE. -// -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to create a RateBasedRule with a RateKey value other than -// IP. -// -// * You tried to update a WebACL with a WafActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. -// -// * You tried to update a ByteMatchSet with a Field of HEADER but no value -// for Data. -// -// * Your request references an ARN that is malformed, or corresponds to -// a resource with which a web ACL cannot be associated. -// // * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" // The operation exceeds a resource limit, for example, the maximum number of // WebACL objects that you can create for an AWS account. For more information, // see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateRule -func (c *WAFRegional) CreateRule(input *waf.CreateRuleInput) (*waf.CreateRuleOutput, error) { - req, out := c.CreateRuleRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateRegexMatchSet +func (c *WAFRegional) CreateRegexMatchSet(input *waf.CreateRegexMatchSetInput) (*waf.CreateRegexMatchSetOutput, error) { + req, out := c.CreateRegexMatchSetRequest(input) return out, req.Send() } -// CreateRuleWithContext is the same as CreateRule with the addition of +// CreateRegexMatchSetWithContext is the same as CreateRegexMatchSet with the addition of // the ability to pass a context and additional request options. // -// See CreateRule for details on how to use this API operation. +// See CreateRegexMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) CreateRuleWithContext(ctx aws.Context, input *waf.CreateRuleInput, opts ...request.Option) (*waf.CreateRuleOutput, error) { - req, out := c.CreateRuleRequest(input) +func (c *WAFRegional) CreateRegexMatchSetWithContext(ctx aws.Context, input *waf.CreateRegexMatchSetInput, opts ...request.Option) (*waf.CreateRegexMatchSetOutput, error) { + req, out := c.CreateRegexMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opCreateSizeConstraintSet = "CreateSizeConstraintSet" +const opCreateRegexPatternSet = "CreateRegexPatternSet" -// CreateSizeConstraintSetRequest generates a "aws/request.Request" representing the -// client's request for the CreateSizeConstraintSet operation. The "output" return +// CreateRegexPatternSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateRegexPatternSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See CreateSizeConstraintSet for more information on using the CreateSizeConstraintSet +// See CreateRegexPatternSet for more information on using the CreateRegexPatternSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the CreateSizeConstraintSetRequest method. -// req, resp := client.CreateSizeConstraintSetRequest(params) +// // Example sending a request using the CreateRegexPatternSetRequest method. +// req, resp := client.CreateRegexPatternSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateSizeConstraintSet -func (c *WAFRegional) CreateSizeConstraintSetRequest(input *waf.CreateSizeConstraintSetInput) (req *request.Request, output *waf.CreateSizeConstraintSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateRegexPatternSet +func (c *WAFRegional) CreateRegexPatternSetRequest(input *waf.CreateRegexPatternSetInput) (req *request.Request, output *waf.CreateRegexPatternSetOutput) { op := &request.Operation{ - Name: opCreateSizeConstraintSet, + Name: opCreateRegexPatternSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.CreateSizeConstraintSetInput{} + input = &waf.CreateRegexPatternSetInput{} } - output = &waf.CreateSizeConstraintSetOutput{} + output = &waf.CreateRegexPatternSetOutput{} req = c.newRequest(op, input, output) return } -// CreateSizeConstraintSet API operation for AWS WAF Regional. +// CreateRegexPatternSet API operation for AWS WAF Regional. // -// Creates a SizeConstraintSet. You then use UpdateSizeConstraintSet to identify -// the part of a web request that you want AWS WAF to check for length, such -// as the length of the User-Agent header or the length of the query string. -// For example, you can create a SizeConstraintSet that matches any requests -// that have a query string that is longer than 100 bytes. You can then configure -// AWS WAF to reject those requests. +// Creates a RegexPatternSet. You then use UpdateRegexPatternSet to specify +// the regular expression (regex) pattern that you want AWS WAF to search for, +// such as B[a@]dB[o0]t. You can then configure AWS WAF to reject those requests. // -// To create and configure a SizeConstraintSet, perform the following steps: +// To create and configure a RegexPatternSet, perform the following steps: // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a CreateSizeConstraintSet request. +// parameter of a CreateRegexPatternSet request. // -// Submit a CreateSizeConstraintSet request. +// Submit a CreateRegexPatternSet request. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateSizeConstraintSet request. +// parameter of an UpdateRegexPatternSet request. // -// Submit an UpdateSizeConstraintSet request to specify the part of the request -// that you want AWS WAF to inspect (for example, the header or the URI) and -// the value that you want AWS WAF to watch for. +// Submit an UpdateRegexPatternSet request to specify the string that you want +// AWS WAF to watch for. // // For more information about how to use the AWS WAF API to allow or block HTTP // requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). @@ -835,7 +935,7 @@ func (c *WAFRegional) CreateSizeConstraintSetRequest(input *waf.CreateSizeConstr // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation CreateSizeConstraintSet for usage and error information. +// API operation CreateRegexPatternSet for usage and error information. // // Returned Error Codes: // * ErrCodeWAFStaleDataException "WAFStaleDataException" @@ -846,130 +946,114 @@ func (c *WAFRegional) CreateSizeConstraintSetRequest(input *waf.CreateSizeConstr // The operation failed because of a system problem, even though the request // was valid. Retry your request. // -// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" -// The operation failed because you tried to create, update, or delete an object -// by using an invalid account identifier. -// // * ErrCodeWAFDisallowedNameException "WAFDisallowedNameException" // The name specified is invalid. // -// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" -// The operation failed because AWS WAF didn't recognize a parameter in the -// request. For example: -// -// * You specified an invalid parameter name. -// -// * You specified an invalid value. -// -// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) -// using an action other than INSERT or DELETE. -// -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to create a RateBasedRule with a RateKey value other than -// IP. -// -// * You tried to update a WebACL with a WafActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. -// -// * You tried to update a ByteMatchSet with a Field of HEADER but no value -// for Data. -// -// * Your request references an ARN that is malformed, or corresponds to -// a resource with which a web ACL cannot be associated. -// // * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" // The operation exceeds a resource limit, for example, the maximum number of // WebACL objects that you can create for an AWS account. For more information, // see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateSizeConstraintSet -func (c *WAFRegional) CreateSizeConstraintSet(input *waf.CreateSizeConstraintSetInput) (*waf.CreateSizeConstraintSetOutput, error) { - req, out := c.CreateSizeConstraintSetRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateRegexPatternSet +func (c *WAFRegional) CreateRegexPatternSet(input *waf.CreateRegexPatternSetInput) (*waf.CreateRegexPatternSetOutput, error) { + req, out := c.CreateRegexPatternSetRequest(input) return out, req.Send() } -// CreateSizeConstraintSetWithContext is the same as CreateSizeConstraintSet with the addition of +// CreateRegexPatternSetWithContext is the same as CreateRegexPatternSet with the addition of // the ability to pass a context and additional request options. // -// See CreateSizeConstraintSet for details on how to use this API operation. +// See CreateRegexPatternSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) CreateSizeConstraintSetWithContext(ctx aws.Context, input *waf.CreateSizeConstraintSetInput, opts ...request.Option) (*waf.CreateSizeConstraintSetOutput, error) { - req, out := c.CreateSizeConstraintSetRequest(input) +func (c *WAFRegional) CreateRegexPatternSetWithContext(ctx aws.Context, input *waf.CreateRegexPatternSetInput, opts ...request.Option) (*waf.CreateRegexPatternSetOutput, error) { + req, out := c.CreateRegexPatternSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opCreateSqlInjectionMatchSet = "CreateSqlInjectionMatchSet" +const opCreateRule = "CreateRule" -// CreateSqlInjectionMatchSetRequest generates a "aws/request.Request" representing the -// client's request for the CreateSqlInjectionMatchSet operation. The "output" return +// CreateRuleRequest generates a "aws/request.Request" representing the +// client's request for the CreateRule operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See CreateSqlInjectionMatchSet for more information on using the CreateSqlInjectionMatchSet +// See CreateRule for more information on using the CreateRule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the CreateSqlInjectionMatchSetRequest method. -// req, resp := client.CreateSqlInjectionMatchSetRequest(params) +// // Example sending a request using the CreateRuleRequest method. +// req, resp := client.CreateRuleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateSqlInjectionMatchSet -func (c *WAFRegional) CreateSqlInjectionMatchSetRequest(input *waf.CreateSqlInjectionMatchSetInput) (req *request.Request, output *waf.CreateSqlInjectionMatchSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateRule +func (c *WAFRegional) CreateRuleRequest(input *waf.CreateRuleInput) (req *request.Request, output *waf.CreateRuleOutput) { op := &request.Operation{ - Name: opCreateSqlInjectionMatchSet, + Name: opCreateRule, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.CreateSqlInjectionMatchSetInput{} + input = &waf.CreateRuleInput{} } - output = &waf.CreateSqlInjectionMatchSetOutput{} + output = &waf.CreateRuleOutput{} req = c.newRequest(op, input, output) return } -// CreateSqlInjectionMatchSet API operation for AWS WAF Regional. +// CreateRule API operation for AWS WAF Regional. // -// Creates a SqlInjectionMatchSet, which you use to allow, block, or count requests -// that contain snippets of SQL code in a specified part of web requests. AWS -// WAF searches for character sequences that are likely to be malicious strings. +// Creates a Rule, which contains the IPSet objects, ByteMatchSet objects, and +// other predicates that identify the requests that you want to block. If you +// add more than one predicate to a Rule, a request must match all of the specifications +// to be allowed or blocked. For example, suppose you add the following to a +// Rule: // -// To create and configure a SqlInjectionMatchSet, perform the following steps: +// * An IPSet that matches the IP address 192.0.2.44/32 +// +// * A ByteMatchSet that matches BadBot in the User-Agent header +// +// You then add the Rule to a WebACL and specify that you want to blocks requests +// that satisfy the Rule. For a request to be blocked, it must come from the +// IP address 192.0.2.44 and the User-Agent header in the request must contain +// the value BadBot. +// +// To create and configure a Rule, perform the following steps: +// +// Create and update the predicates that you want to include in the Rule. For +// more information, see CreateByteMatchSet, CreateIPSet, and CreateSqlInjectionMatchSet. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a CreateSqlInjectionMatchSet request. +// parameter of a CreateRule request. // -// Submit a CreateSqlInjectionMatchSet request. +// Submit a CreateRule request. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateSqlInjectionMatchSet request. +// parameter of an UpdateRule request. // -// Submit an UpdateSqlInjectionMatchSet request to specify the parts of web -// requests in which you want to allow, block, or count malicious SQL code. +// Submit an UpdateRule request to specify the predicates that you want to include +// in the Rule. +// +// Create and update a WebACL that contains the Rule. For more information, +// see CreateWebACL. // // For more information about how to use the AWS WAF API to allow or block HTTP // requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). @@ -979,19 +1063,19 @@ func (c *WAFRegional) CreateSqlInjectionMatchSetRequest(input *waf.CreateSqlInje // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation CreateSqlInjectionMatchSet for usage and error information. +// API operation CreateRule for usage and error information. // // Returned Error Codes: -// * ErrCodeWAFDisallowedNameException "WAFDisallowedNameException" -// The name specified is invalid. +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. // // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. // -// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" -// The operation failed because you tried to create, update, or delete an object -// by using an invalid account identifier. +// * ErrCodeWAFDisallowedNameException "WAFDisallowedNameException" +// The name specified is invalid. // // * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" // The operation failed because AWS WAF didn't recognize a parameter in the @@ -1014,7 +1098,7 @@ func (c *WAFRegional) CreateSqlInjectionMatchSetRequest(input *waf.CreateSqlInje // BLOCK, or COUNT. // // * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. +// HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value // for Data. @@ -1022,120 +1106,108 @@ func (c *WAFRegional) CreateSqlInjectionMatchSetRequest(input *waf.CreateSqlInje // * Your request references an ARN that is malformed, or corresponds to // a resource with which a web ACL cannot be associated. // -// * ErrCodeWAFStaleDataException "WAFStaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. -// // * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" // The operation exceeds a resource limit, for example, the maximum number of // WebACL objects that you can create for an AWS account. For more information, // see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateSqlInjectionMatchSet -func (c *WAFRegional) CreateSqlInjectionMatchSet(input *waf.CreateSqlInjectionMatchSetInput) (*waf.CreateSqlInjectionMatchSetOutput, error) { - req, out := c.CreateSqlInjectionMatchSetRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateRule +func (c *WAFRegional) CreateRule(input *waf.CreateRuleInput) (*waf.CreateRuleOutput, error) { + req, out := c.CreateRuleRequest(input) return out, req.Send() } -// CreateSqlInjectionMatchSetWithContext is the same as CreateSqlInjectionMatchSet with the addition of +// CreateRuleWithContext is the same as CreateRule with the addition of // the ability to pass a context and additional request options. // -// See CreateSqlInjectionMatchSet for details on how to use this API operation. +// See CreateRule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) CreateSqlInjectionMatchSetWithContext(ctx aws.Context, input *waf.CreateSqlInjectionMatchSetInput, opts ...request.Option) (*waf.CreateSqlInjectionMatchSetOutput, error) { - req, out := c.CreateSqlInjectionMatchSetRequest(input) +func (c *WAFRegional) CreateRuleWithContext(ctx aws.Context, input *waf.CreateRuleInput, opts ...request.Option) (*waf.CreateRuleOutput, error) { + req, out := c.CreateRuleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opCreateWebACL = "CreateWebACL" +const opCreateSizeConstraintSet = "CreateSizeConstraintSet" -// CreateWebACLRequest generates a "aws/request.Request" representing the -// client's request for the CreateWebACL operation. The "output" return +// CreateSizeConstraintSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateSizeConstraintSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See CreateWebACL for more information on using the CreateWebACL +// See CreateSizeConstraintSet for more information on using the CreateSizeConstraintSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the CreateWebACLRequest method. -// req, resp := client.CreateWebACLRequest(params) +// // Example sending a request using the CreateSizeConstraintSetRequest method. +// req, resp := client.CreateSizeConstraintSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateWebACL -func (c *WAFRegional) CreateWebACLRequest(input *waf.CreateWebACLInput) (req *request.Request, output *waf.CreateWebACLOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateSizeConstraintSet +func (c *WAFRegional) CreateSizeConstraintSetRequest(input *waf.CreateSizeConstraintSetInput) (req *request.Request, output *waf.CreateSizeConstraintSetOutput) { op := &request.Operation{ - Name: opCreateWebACL, + Name: opCreateSizeConstraintSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.CreateWebACLInput{} + input = &waf.CreateSizeConstraintSetInput{} } - output = &waf.CreateWebACLOutput{} + output = &waf.CreateSizeConstraintSetOutput{} req = c.newRequest(op, input, output) return } -// CreateWebACL API operation for AWS WAF Regional. -// -// Creates a WebACL, which contains the Rules that identify the CloudFront web -// requests that you want to allow, block, or count. AWS WAF evaluates Rules -// in order based on the value of Priority for each Rule. -// -// You also specify a default action, either ALLOW or BLOCK. If a web request -// doesn't match any of the Rules in a WebACL, AWS WAF responds to the request -// with the default action. -// -// To create and configure a WebACL, perform the following steps: +// CreateSizeConstraintSet API operation for AWS WAF Regional. // -// Create and update the ByteMatchSet objects and other predicates that you -// want to include in Rules. For more information, see CreateByteMatchSet, UpdateByteMatchSet, -// CreateIPSet, UpdateIPSet, CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet. +// Creates a SizeConstraintSet. You then use UpdateSizeConstraintSet to identify +// the part of a web request that you want AWS WAF to check for length, such +// as the length of the User-Agent header or the length of the query string. +// For example, you can create a SizeConstraintSet that matches any requests +// that have a query string that is longer than 100 bytes. You can then configure +// AWS WAF to reject those requests. // -// Create and update the Rules that you want to include in the WebACL. For more -// information, see CreateRule and UpdateRule. +// To create and configure a SizeConstraintSet, perform the following steps: // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a CreateWebACL request. +// parameter of a CreateSizeConstraintSet request. // -// Submit a CreateWebACL request. +// Submit a CreateSizeConstraintSet request. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateWebACL request. +// parameter of an UpdateSizeConstraintSet request. // -// Submit an UpdateWebACL request to specify the Rules that you want to include -// in the WebACL, to specify the default action, and to associate the WebACL -// with a CloudFront distribution. +// Submit an UpdateSizeConstraintSet request to specify the part of the request +// that you want AWS WAF to inspect (for example, the header or the URI) and +// the value that you want AWS WAF to watch for. // -// For more information about how to use the AWS WAF API, see the AWS WAF Developer -// Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation CreateWebACL for usage and error information. +// API operation CreateSizeConstraintSet for usage and error information. // // Returned Error Codes: // * ErrCodeWAFStaleDataException "WAFStaleDataException" @@ -1174,7 +1246,7 @@ func (c *WAFRegional) CreateWebACLRequest(input *waf.CreateWebACLInput) (req *re // BLOCK, or COUNT. // // * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. +// HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value // for Data. @@ -1188,89 +1260,88 @@ func (c *WAFRegional) CreateWebACLRequest(input *waf.CreateWebACLInput) (req *re // see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateWebACL -func (c *WAFRegional) CreateWebACL(input *waf.CreateWebACLInput) (*waf.CreateWebACLOutput, error) { - req, out := c.CreateWebACLRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateSizeConstraintSet +func (c *WAFRegional) CreateSizeConstraintSet(input *waf.CreateSizeConstraintSetInput) (*waf.CreateSizeConstraintSetOutput, error) { + req, out := c.CreateSizeConstraintSetRequest(input) return out, req.Send() } -// CreateWebACLWithContext is the same as CreateWebACL with the addition of +// CreateSizeConstraintSetWithContext is the same as CreateSizeConstraintSet with the addition of // the ability to pass a context and additional request options. // -// See CreateWebACL for details on how to use this API operation. +// See CreateSizeConstraintSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) CreateWebACLWithContext(ctx aws.Context, input *waf.CreateWebACLInput, opts ...request.Option) (*waf.CreateWebACLOutput, error) { - req, out := c.CreateWebACLRequest(input) +func (c *WAFRegional) CreateSizeConstraintSetWithContext(ctx aws.Context, input *waf.CreateSizeConstraintSetInput, opts ...request.Option) (*waf.CreateSizeConstraintSetOutput, error) { + req, out := c.CreateSizeConstraintSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opCreateXssMatchSet = "CreateXssMatchSet" +const opCreateSqlInjectionMatchSet = "CreateSqlInjectionMatchSet" -// CreateXssMatchSetRequest generates a "aws/request.Request" representing the -// client's request for the CreateXssMatchSet operation. The "output" return +// CreateSqlInjectionMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateSqlInjectionMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See CreateXssMatchSet for more information on using the CreateXssMatchSet +// See CreateSqlInjectionMatchSet for more information on using the CreateSqlInjectionMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the CreateXssMatchSetRequest method. -// req, resp := client.CreateXssMatchSetRequest(params) +// // Example sending a request using the CreateSqlInjectionMatchSetRequest method. +// req, resp := client.CreateSqlInjectionMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateXssMatchSet -func (c *WAFRegional) CreateXssMatchSetRequest(input *waf.CreateXssMatchSetInput) (req *request.Request, output *waf.CreateXssMatchSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateSqlInjectionMatchSet +func (c *WAFRegional) CreateSqlInjectionMatchSetRequest(input *waf.CreateSqlInjectionMatchSetInput) (req *request.Request, output *waf.CreateSqlInjectionMatchSetOutput) { op := &request.Operation{ - Name: opCreateXssMatchSet, + Name: opCreateSqlInjectionMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.CreateXssMatchSetInput{} + input = &waf.CreateSqlInjectionMatchSetInput{} } - output = &waf.CreateXssMatchSetOutput{} + output = &waf.CreateSqlInjectionMatchSetOutput{} req = c.newRequest(op, input, output) return } -// CreateXssMatchSet API operation for AWS WAF Regional. +// CreateSqlInjectionMatchSet API operation for AWS WAF Regional. // -// Creates an XssMatchSet, which you use to allow, block, or count requests -// that contain cross-site scripting attacks in the specified part of web requests. -// AWS WAF searches for character sequences that are likely to be malicious -// strings. +// Creates a SqlInjectionMatchSet, which you use to allow, block, or count requests +// that contain snippets of SQL code in a specified part of web requests. AWS +// WAF searches for character sequences that are likely to be malicious strings. // -// To create and configure an XssMatchSet, perform the following steps: +// To create and configure a SqlInjectionMatchSet, perform the following steps: // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a CreateXssMatchSet request. +// parameter of a CreateSqlInjectionMatchSet request. // -// Submit a CreateXssMatchSet request. +// Submit a CreateSqlInjectionMatchSet request. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateXssMatchSet request. +// parameter of an UpdateSqlInjectionMatchSet request. // -// Submit an UpdateXssMatchSet request to specify the parts of web requests -// in which you want to allow, block, or count cross-site scripting attacks. +// Submit an UpdateSqlInjectionMatchSet request to specify the parts of web +// requests in which you want to allow, block, or count malicious SQL code. // // For more information about how to use the AWS WAF API to allow or block HTTP // requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). @@ -1280,7 +1351,7 @@ func (c *WAFRegional) CreateXssMatchSetRequest(input *waf.CreateXssMatchSetInput // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation CreateXssMatchSet for usage and error information. +// API operation CreateSqlInjectionMatchSet for usage and error information. // // Returned Error Codes: // * ErrCodeWAFDisallowedNameException "WAFDisallowedNameException" @@ -1315,7 +1386,7 @@ func (c *WAFRegional) CreateXssMatchSetRequest(input *waf.CreateXssMatchSetInput // BLOCK, or COUNT. // // * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. +// HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value // for Data. @@ -1333,96 +1404,116 @@ func (c *WAFRegional) CreateXssMatchSetRequest(input *waf.CreateXssMatchSetInput // see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateXssMatchSet -func (c *WAFRegional) CreateXssMatchSet(input *waf.CreateXssMatchSetInput) (*waf.CreateXssMatchSetOutput, error) { - req, out := c.CreateXssMatchSetRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateSqlInjectionMatchSet +func (c *WAFRegional) CreateSqlInjectionMatchSet(input *waf.CreateSqlInjectionMatchSetInput) (*waf.CreateSqlInjectionMatchSetOutput, error) { + req, out := c.CreateSqlInjectionMatchSetRequest(input) return out, req.Send() } -// CreateXssMatchSetWithContext is the same as CreateXssMatchSet with the addition of +// CreateSqlInjectionMatchSetWithContext is the same as CreateSqlInjectionMatchSet with the addition of // the ability to pass a context and additional request options. // -// See CreateXssMatchSet for details on how to use this API operation. +// See CreateSqlInjectionMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) CreateXssMatchSetWithContext(ctx aws.Context, input *waf.CreateXssMatchSetInput, opts ...request.Option) (*waf.CreateXssMatchSetOutput, error) { - req, out := c.CreateXssMatchSetRequest(input) +func (c *WAFRegional) CreateSqlInjectionMatchSetWithContext(ctx aws.Context, input *waf.CreateSqlInjectionMatchSetInput, opts ...request.Option) (*waf.CreateSqlInjectionMatchSetOutput, error) { + req, out := c.CreateSqlInjectionMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteByteMatchSet = "DeleteByteMatchSet" +const opCreateWebACL = "CreateWebACL" -// DeleteByteMatchSetRequest generates a "aws/request.Request" representing the -// client's request for the DeleteByteMatchSet operation. The "output" return +// CreateWebACLRequest generates a "aws/request.Request" representing the +// client's request for the CreateWebACL operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteByteMatchSet for more information on using the DeleteByteMatchSet +// See CreateWebACL for more information on using the CreateWebACL // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteByteMatchSetRequest method. -// req, resp := client.DeleteByteMatchSetRequest(params) +// // Example sending a request using the CreateWebACLRequest method. +// req, resp := client.CreateWebACLRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteByteMatchSet -func (c *WAFRegional) DeleteByteMatchSetRequest(input *waf.DeleteByteMatchSetInput) (req *request.Request, output *waf.DeleteByteMatchSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateWebACL +func (c *WAFRegional) CreateWebACLRequest(input *waf.CreateWebACLInput) (req *request.Request, output *waf.CreateWebACLOutput) { op := &request.Operation{ - Name: opDeleteByteMatchSet, + Name: opCreateWebACL, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.DeleteByteMatchSetInput{} + input = &waf.CreateWebACLInput{} } - output = &waf.DeleteByteMatchSetOutput{} + output = &waf.CreateWebACLOutput{} req = c.newRequest(op, input, output) return } -// DeleteByteMatchSet API operation for AWS WAF Regional. -// -// Permanently deletes a ByteMatchSet. You can't delete a ByteMatchSet if it's -// still used in any Rules or if it still includes any ByteMatchTuple objects -// (any filters). +// CreateWebACL API operation for AWS WAF Regional. // -// If you just want to remove a ByteMatchSet from a Rule, use UpdateRule. +// Creates a WebACL, which contains the Rules that identify the CloudFront web +// requests that you want to allow, block, or count. AWS WAF evaluates Rules +// in order based on the value of Priority for each Rule. // -// To permanently delete a ByteMatchSet, perform the following steps: +// You also specify a default action, either ALLOW or BLOCK. If a web request +// doesn't match any of the Rules in a WebACL, AWS WAF responds to the request +// with the default action. // -// Update the ByteMatchSet to remove filters, if any. For more information, -// see UpdateByteMatchSet. +// To create and configure a WebACL, perform the following steps: +// +// Create and update the ByteMatchSet objects and other predicates that you +// want to include in Rules. For more information, see CreateByteMatchSet, UpdateByteMatchSet, +// CreateIPSet, UpdateIPSet, CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet. +// +// Create and update the Rules that you want to include in the WebACL. For more +// information, see CreateRule and UpdateRule. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a DeleteByteMatchSet request. +// parameter of a CreateWebACL request. // -// Submit a DeleteByteMatchSet request. +// Submit a CreateWebACL request. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateWebACL request. +// +// Submit an UpdateWebACL request to specify the Rules that you want to include +// in the WebACL, to specify the default action, and to associate the WebACL +// with a CloudFront distribution. +// +// For more information about how to use the AWS WAF API, see the AWS WAF Developer +// Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation DeleteByteMatchSet for usage and error information. +// API operation CreateWebACL for usage and error information. // // Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. @@ -1431,127 +1522,141 @@ func (c *WAFRegional) DeleteByteMatchSetRequest(input *waf.DeleteByteMatchSetInp // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" -// The operation failed because the referenced object doesn't exist. +// * ErrCodeWAFDisallowedNameException "WAFDisallowedNameException" +// The name specified is invalid. // -// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" -// The operation failed because you tried to delete an object that is still -// in use. For example: +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: // -// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// * You specified an invalid parameter name. // -// * You tried to delete a Rule that is still referenced by a WebACL. +// * You specified an invalid value. // -// * ErrCodeWAFStaleDataException "WAFStaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. // -// * ErrCodeWAFNonEmptyEntityException "WAFNonEmptyEntityException" -// The operation failed because you tried to delete an object that isn't empty. -// For example: +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. // -// * You tried to delete a WebACL that still contains one or more Rule objects. +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. // -// * You tried to delete a Rule that still contains one or more ByteMatchSet -// objects or other predicates. +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. // -// * You tried to delete a ByteMatchSet that contains one or more ByteMatchTuple -// objects. +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. // -// * You tried to delete an IPSet that references one or more IP addresses. +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteByteMatchSet -func (c *WAFRegional) DeleteByteMatchSet(input *waf.DeleteByteMatchSetInput) (*waf.DeleteByteMatchSetOutput, error) { - req, out := c.DeleteByteMatchSetRequest(input) +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateWebACL +func (c *WAFRegional) CreateWebACL(input *waf.CreateWebACLInput) (*waf.CreateWebACLOutput, error) { + req, out := c.CreateWebACLRequest(input) return out, req.Send() } -// DeleteByteMatchSetWithContext is the same as DeleteByteMatchSet with the addition of +// CreateWebACLWithContext is the same as CreateWebACL with the addition of // the ability to pass a context and additional request options. // -// See DeleteByteMatchSet for details on how to use this API operation. +// See CreateWebACL for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) DeleteByteMatchSetWithContext(ctx aws.Context, input *waf.DeleteByteMatchSetInput, opts ...request.Option) (*waf.DeleteByteMatchSetOutput, error) { - req, out := c.DeleteByteMatchSetRequest(input) +func (c *WAFRegional) CreateWebACLWithContext(ctx aws.Context, input *waf.CreateWebACLInput, opts ...request.Option) (*waf.CreateWebACLOutput, error) { + req, out := c.CreateWebACLRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteIPSet = "DeleteIPSet" +const opCreateXssMatchSet = "CreateXssMatchSet" -// DeleteIPSetRequest generates a "aws/request.Request" representing the -// client's request for the DeleteIPSet operation. The "output" return +// CreateXssMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateXssMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteIPSet for more information on using the DeleteIPSet +// See CreateXssMatchSet for more information on using the CreateXssMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteIPSetRequest method. -// req, resp := client.DeleteIPSetRequest(params) +// // Example sending a request using the CreateXssMatchSetRequest method. +// req, resp := client.CreateXssMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteIPSet -func (c *WAFRegional) DeleteIPSetRequest(input *waf.DeleteIPSetInput) (req *request.Request, output *waf.DeleteIPSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateXssMatchSet +func (c *WAFRegional) CreateXssMatchSetRequest(input *waf.CreateXssMatchSetInput) (req *request.Request, output *waf.CreateXssMatchSetOutput) { op := &request.Operation{ - Name: opDeleteIPSet, + Name: opCreateXssMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.DeleteIPSetInput{} + input = &waf.CreateXssMatchSetInput{} } - output = &waf.DeleteIPSetOutput{} + output = &waf.CreateXssMatchSetOutput{} req = c.newRequest(op, input, output) return } -// DeleteIPSet API operation for AWS WAF Regional. +// CreateXssMatchSet API operation for AWS WAF Regional. // -// Permanently deletes an IPSet. You can't delete an IPSet if it's still used -// in any Rules or if it still includes any IP addresses. +// Creates an XssMatchSet, which you use to allow, block, or count requests +// that contain cross-site scripting attacks in the specified part of web requests. +// AWS WAF searches for character sequences that are likely to be malicious +// strings. // -// If you just want to remove an IPSet from a Rule, use UpdateRule. +// To create and configure an XssMatchSet, perform the following steps: // -// To permanently delete an IPSet from AWS WAF, perform the following steps: +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a CreateXssMatchSet request. // -// Update the IPSet to remove IP address ranges, if any. For more information, -// see UpdateIPSet. +// Submit a CreateXssMatchSet request. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a DeleteIPSet request. +// parameter of an UpdateXssMatchSet request. // -// Submit a DeleteIPSet request. +// Submit an UpdateXssMatchSet request to specify the parts of web requests +// in which you want to allow, block, or count cross-site scripting attacks. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation DeleteIPSet for usage and error information. +// API operation CreateXssMatchSet for usage and error information. // // Returned Error Codes: -// * ErrCodeWAFStaleDataException "WAFStaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. +// * ErrCodeWAFDisallowedNameException "WAFDisallowedNameException" +// The name specified is invalid. // // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" // The operation failed because of a system problem, even though the request @@ -1561,126 +1666,135 @@ func (c *WAFRegional) DeleteIPSetRequest(input *waf.DeleteIPSetInput) (req *requ // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" -// The operation failed because the referenced object doesn't exist. +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: // -// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" -// The operation failed because you tried to delete an object that is still -// in use. For example: +// * You specified an invalid parameter name. // -// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// * You specified an invalid value. // -// * You tried to delete a Rule that is still referenced by a WebACL. +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. // -// * ErrCodeWAFNonEmptyEntityException "WAFNonEmptyEntityException" -// The operation failed because you tried to delete an object that isn't empty. -// For example: +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. // -// * You tried to delete a WebACL that still contains one or more Rule objects. +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. // -// * You tried to delete a Rule that still contains one or more ByteMatchSet -// objects or other predicates. +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. // -// * You tried to delete a ByteMatchSet that contains one or more ByteMatchTuple -// objects. +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. // -// * You tried to delete an IPSet that references one or more IP addresses. +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteIPSet -func (c *WAFRegional) DeleteIPSet(input *waf.DeleteIPSetInput) (*waf.DeleteIPSetOutput, error) { - req, out := c.DeleteIPSetRequest(input) +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateXssMatchSet +func (c *WAFRegional) CreateXssMatchSet(input *waf.CreateXssMatchSetInput) (*waf.CreateXssMatchSetOutput, error) { + req, out := c.CreateXssMatchSetRequest(input) return out, req.Send() } -// DeleteIPSetWithContext is the same as DeleteIPSet with the addition of +// CreateXssMatchSetWithContext is the same as CreateXssMatchSet with the addition of // the ability to pass a context and additional request options. // -// See DeleteIPSet for details on how to use this API operation. +// See CreateXssMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) DeleteIPSetWithContext(ctx aws.Context, input *waf.DeleteIPSetInput, opts ...request.Option) (*waf.DeleteIPSetOutput, error) { - req, out := c.DeleteIPSetRequest(input) +func (c *WAFRegional) CreateXssMatchSetWithContext(ctx aws.Context, input *waf.CreateXssMatchSetInput, opts ...request.Option) (*waf.CreateXssMatchSetOutput, error) { + req, out := c.CreateXssMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteRateBasedRule = "DeleteRateBasedRule" +const opDeleteByteMatchSet = "DeleteByteMatchSet" -// DeleteRateBasedRuleRequest generates a "aws/request.Request" representing the -// client's request for the DeleteRateBasedRule operation. The "output" return +// DeleteByteMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteByteMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteRateBasedRule for more information on using the DeleteRateBasedRule +// See DeleteByteMatchSet for more information on using the DeleteByteMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteRateBasedRuleRequest method. -// req, resp := client.DeleteRateBasedRuleRequest(params) +// // Example sending a request using the DeleteByteMatchSetRequest method. +// req, resp := client.DeleteByteMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteRateBasedRule -func (c *WAFRegional) DeleteRateBasedRuleRequest(input *waf.DeleteRateBasedRuleInput) (req *request.Request, output *waf.DeleteRateBasedRuleOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteByteMatchSet +func (c *WAFRegional) DeleteByteMatchSetRequest(input *waf.DeleteByteMatchSetInput) (req *request.Request, output *waf.DeleteByteMatchSetOutput) { op := &request.Operation{ - Name: opDeleteRateBasedRule, + Name: opDeleteByteMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.DeleteRateBasedRuleInput{} + input = &waf.DeleteByteMatchSetInput{} } - output = &waf.DeleteRateBasedRuleOutput{} + output = &waf.DeleteByteMatchSetOutput{} req = c.newRequest(op, input, output) return } -// DeleteRateBasedRule API operation for AWS WAF Regional. +// DeleteByteMatchSet API operation for AWS WAF Regional. // -// Permanently deletes a RateBasedRule. You can't delete a rule if it's still -// used in any WebACL objects or if it still includes any predicates, such as -// ByteMatchSet objects. +// Permanently deletes a ByteMatchSet. You can't delete a ByteMatchSet if it's +// still used in any Rules or if it still includes any ByteMatchTuple objects +// (any filters). // -// If you just want to remove a rule from a WebACL, use UpdateWebACL. +// If you just want to remove a ByteMatchSet from a Rule, use UpdateRule. // -// To permanently delete a RateBasedRule from AWS WAF, perform the following -// steps: +// To permanently delete a ByteMatchSet, perform the following steps: // -// Update the RateBasedRule to remove predicates, if any. For more information, -// see UpdateRateBasedRule. +// Update the ByteMatchSet to remove filters, if any. For more information, +// see UpdateByteMatchSet. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a DeleteRateBasedRule request. +// parameter of a DeleteByteMatchSet request. // -// Submit a DeleteRateBasedRule request. +// Submit a DeleteByteMatchSet request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation DeleteRateBasedRule for usage and error information. +// API operation DeleteByteMatchSet for usage and error information. // // Returned Error Codes: -// * ErrCodeWAFStaleDataException "WAFStaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. -// // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. @@ -1700,7 +1814,11 @@ func (c *WAFRegional) DeleteRateBasedRuleRequest(input *waf.DeleteRateBasedRuleI // // * You tried to delete a Rule that is still referenced by a WebACL. // -// * ErrCodeWAFNonEmptyEntityException "WAFNonEmptyEntityException" +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFNonEmptyEntityException "WAFNonEmptyEntityException" // The operation failed because you tried to delete an object that isn't empty. // For example: // @@ -1714,93 +1832,93 @@ func (c *WAFRegional) DeleteRateBasedRuleRequest(input *waf.DeleteRateBasedRuleI // // * You tried to delete an IPSet that references one or more IP addresses. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteRateBasedRule -func (c *WAFRegional) DeleteRateBasedRule(input *waf.DeleteRateBasedRuleInput) (*waf.DeleteRateBasedRuleOutput, error) { - req, out := c.DeleteRateBasedRuleRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteByteMatchSet +func (c *WAFRegional) DeleteByteMatchSet(input *waf.DeleteByteMatchSetInput) (*waf.DeleteByteMatchSetOutput, error) { + req, out := c.DeleteByteMatchSetRequest(input) return out, req.Send() } -// DeleteRateBasedRuleWithContext is the same as DeleteRateBasedRule with the addition of +// DeleteByteMatchSetWithContext is the same as DeleteByteMatchSet with the addition of // the ability to pass a context and additional request options. // -// See DeleteRateBasedRule for details on how to use this API operation. +// See DeleteByteMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) DeleteRateBasedRuleWithContext(ctx aws.Context, input *waf.DeleteRateBasedRuleInput, opts ...request.Option) (*waf.DeleteRateBasedRuleOutput, error) { - req, out := c.DeleteRateBasedRuleRequest(input) +func (c *WAFRegional) DeleteByteMatchSetWithContext(ctx aws.Context, input *waf.DeleteByteMatchSetInput, opts ...request.Option) (*waf.DeleteByteMatchSetOutput, error) { + req, out := c.DeleteByteMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteRule = "DeleteRule" +const opDeleteGeoMatchSet = "DeleteGeoMatchSet" -// DeleteRuleRequest generates a "aws/request.Request" representing the -// client's request for the DeleteRule operation. The "output" return +// DeleteGeoMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteGeoMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteRule for more information on using the DeleteRule +// See DeleteGeoMatchSet for more information on using the DeleteGeoMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteRuleRequest method. -// req, resp := client.DeleteRuleRequest(params) +// // Example sending a request using the DeleteGeoMatchSetRequest method. +// req, resp := client.DeleteGeoMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteRule -func (c *WAFRegional) DeleteRuleRequest(input *waf.DeleteRuleInput) (req *request.Request, output *waf.DeleteRuleOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteGeoMatchSet +func (c *WAFRegional) DeleteGeoMatchSetRequest(input *waf.DeleteGeoMatchSetInput) (req *request.Request, output *waf.DeleteGeoMatchSetOutput) { op := &request.Operation{ - Name: opDeleteRule, + Name: opDeleteGeoMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.DeleteRuleInput{} + input = &waf.DeleteGeoMatchSetInput{} } - output = &waf.DeleteRuleOutput{} + output = &waf.DeleteGeoMatchSetOutput{} req = c.newRequest(op, input, output) return } -// DeleteRule API operation for AWS WAF Regional. +// DeleteGeoMatchSet API operation for AWS WAF Regional. // -// Permanently deletes a Rule. You can't delete a Rule if it's still used in -// any WebACL objects or if it still includes any predicates, such as ByteMatchSet -// objects. +// Permanently deletes a GeoMatchSet. You can't delete a GeoMatchSet if it's +// still used in any Rules or if it still includes any countries. // -// If you just want to remove a Rule from a WebACL, use UpdateWebACL. +// If you just want to remove a GeoMatchSet from a Rule, use UpdateRule. // -// To permanently delete a Rule from AWS WAF, perform the following steps: +// To permanently delete a GeoMatchSet from AWS WAF, perform the following steps: // -// Update the Rule to remove predicates, if any. For more information, see UpdateRule. +// Update the GeoMatchSet to remove any countries. For more information, see +// UpdateGeoMatchSet. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a DeleteRule request. +// parameter of a DeleteGeoMatchSet request. // -// Submit a DeleteRule request. +// Submit a DeleteGeoMatchSet request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation DeleteRule for usage and error information. +// API operation DeleteGeoMatchSet for usage and error information. // // Returned Error Codes: // * ErrCodeWAFStaleDataException "WAFStaleDataException" @@ -1840,94 +1958,93 @@ func (c *WAFRegional) DeleteRuleRequest(input *waf.DeleteRuleInput) (req *reques // // * You tried to delete an IPSet that references one or more IP addresses. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteRule -func (c *WAFRegional) DeleteRule(input *waf.DeleteRuleInput) (*waf.DeleteRuleOutput, error) { - req, out := c.DeleteRuleRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteGeoMatchSet +func (c *WAFRegional) DeleteGeoMatchSet(input *waf.DeleteGeoMatchSetInput) (*waf.DeleteGeoMatchSetOutput, error) { + req, out := c.DeleteGeoMatchSetRequest(input) return out, req.Send() } -// DeleteRuleWithContext is the same as DeleteRule with the addition of +// DeleteGeoMatchSetWithContext is the same as DeleteGeoMatchSet with the addition of // the ability to pass a context and additional request options. // -// See DeleteRule for details on how to use this API operation. +// See DeleteGeoMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) DeleteRuleWithContext(ctx aws.Context, input *waf.DeleteRuleInput, opts ...request.Option) (*waf.DeleteRuleOutput, error) { - req, out := c.DeleteRuleRequest(input) +func (c *WAFRegional) DeleteGeoMatchSetWithContext(ctx aws.Context, input *waf.DeleteGeoMatchSetInput, opts ...request.Option) (*waf.DeleteGeoMatchSetOutput, error) { + req, out := c.DeleteGeoMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteSizeConstraintSet = "DeleteSizeConstraintSet" +const opDeleteIPSet = "DeleteIPSet" -// DeleteSizeConstraintSetRequest generates a "aws/request.Request" representing the -// client's request for the DeleteSizeConstraintSet operation. The "output" return +// DeleteIPSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteIPSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteSizeConstraintSet for more information on using the DeleteSizeConstraintSet +// See DeleteIPSet for more information on using the DeleteIPSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteSizeConstraintSetRequest method. -// req, resp := client.DeleteSizeConstraintSetRequest(params) +// // Example sending a request using the DeleteIPSetRequest method. +// req, resp := client.DeleteIPSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteSizeConstraintSet -func (c *WAFRegional) DeleteSizeConstraintSetRequest(input *waf.DeleteSizeConstraintSetInput) (req *request.Request, output *waf.DeleteSizeConstraintSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteIPSet +func (c *WAFRegional) DeleteIPSetRequest(input *waf.DeleteIPSetInput) (req *request.Request, output *waf.DeleteIPSetOutput) { op := &request.Operation{ - Name: opDeleteSizeConstraintSet, + Name: opDeleteIPSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.DeleteSizeConstraintSetInput{} + input = &waf.DeleteIPSetInput{} } - output = &waf.DeleteSizeConstraintSetOutput{} + output = &waf.DeleteIPSetOutput{} req = c.newRequest(op, input, output) return } -// DeleteSizeConstraintSet API operation for AWS WAF Regional. +// DeleteIPSet API operation for AWS WAF Regional. // -// Permanently deletes a SizeConstraintSet. You can't delete a SizeConstraintSet -// if it's still used in any Rules or if it still includes any SizeConstraint -// objects (any filters). +// Permanently deletes an IPSet. You can't delete an IPSet if it's still used +// in any Rules or if it still includes any IP addresses. // -// If you just want to remove a SizeConstraintSet from a Rule, use UpdateRule. +// If you just want to remove an IPSet from a Rule, use UpdateRule. // -// To permanently delete a SizeConstraintSet, perform the following steps: +// To permanently delete an IPSet from AWS WAF, perform the following steps: // -// Update the SizeConstraintSet to remove filters, if any. For more information, -// see UpdateSizeConstraintSet. +// Update the IPSet to remove IP address ranges, if any. For more information, +// see UpdateIPSet. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a DeleteSizeConstraintSet request. +// parameter of a DeleteIPSet request. // -// Submit a DeleteSizeConstraintSet request. +// Submit a DeleteIPSet request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation DeleteSizeConstraintSet for usage and error information. +// API operation DeleteIPSet for usage and error information. // // Returned Error Codes: // * ErrCodeWAFStaleDataException "WAFStaleDataException" @@ -1967,97 +2084,101 @@ func (c *WAFRegional) DeleteSizeConstraintSetRequest(input *waf.DeleteSizeConstr // // * You tried to delete an IPSet that references one or more IP addresses. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteSizeConstraintSet -func (c *WAFRegional) DeleteSizeConstraintSet(input *waf.DeleteSizeConstraintSetInput) (*waf.DeleteSizeConstraintSetOutput, error) { - req, out := c.DeleteSizeConstraintSetRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteIPSet +func (c *WAFRegional) DeleteIPSet(input *waf.DeleteIPSetInput) (*waf.DeleteIPSetOutput, error) { + req, out := c.DeleteIPSetRequest(input) return out, req.Send() } -// DeleteSizeConstraintSetWithContext is the same as DeleteSizeConstraintSet with the addition of +// DeleteIPSetWithContext is the same as DeleteIPSet with the addition of // the ability to pass a context and additional request options. // -// See DeleteSizeConstraintSet for details on how to use this API operation. +// See DeleteIPSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) DeleteSizeConstraintSetWithContext(ctx aws.Context, input *waf.DeleteSizeConstraintSetInput, opts ...request.Option) (*waf.DeleteSizeConstraintSetOutput, error) { - req, out := c.DeleteSizeConstraintSetRequest(input) +func (c *WAFRegional) DeleteIPSetWithContext(ctx aws.Context, input *waf.DeleteIPSetInput, opts ...request.Option) (*waf.DeleteIPSetOutput, error) { + req, out := c.DeleteIPSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteSqlInjectionMatchSet = "DeleteSqlInjectionMatchSet" +const opDeleteRateBasedRule = "DeleteRateBasedRule" -// DeleteSqlInjectionMatchSetRequest generates a "aws/request.Request" representing the -// client's request for the DeleteSqlInjectionMatchSet operation. The "output" return +// DeleteRateBasedRuleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRateBasedRule operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteSqlInjectionMatchSet for more information on using the DeleteSqlInjectionMatchSet +// See DeleteRateBasedRule for more information on using the DeleteRateBasedRule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteSqlInjectionMatchSetRequest method. -// req, resp := client.DeleteSqlInjectionMatchSetRequest(params) +// // Example sending a request using the DeleteRateBasedRuleRequest method. +// req, resp := client.DeleteRateBasedRuleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteSqlInjectionMatchSet -func (c *WAFRegional) DeleteSqlInjectionMatchSetRequest(input *waf.DeleteSqlInjectionMatchSetInput) (req *request.Request, output *waf.DeleteSqlInjectionMatchSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteRateBasedRule +func (c *WAFRegional) DeleteRateBasedRuleRequest(input *waf.DeleteRateBasedRuleInput) (req *request.Request, output *waf.DeleteRateBasedRuleOutput) { op := &request.Operation{ - Name: opDeleteSqlInjectionMatchSet, + Name: opDeleteRateBasedRule, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.DeleteSqlInjectionMatchSetInput{} + input = &waf.DeleteRateBasedRuleInput{} } - output = &waf.DeleteSqlInjectionMatchSetOutput{} + output = &waf.DeleteRateBasedRuleOutput{} req = c.newRequest(op, input, output) return } -// DeleteSqlInjectionMatchSet API operation for AWS WAF Regional. +// DeleteRateBasedRule API operation for AWS WAF Regional. // -// Permanently deletes a SqlInjectionMatchSet. You can't delete a SqlInjectionMatchSet -// if it's still used in any Rules or if it still contains any SqlInjectionMatchTuple -// objects. +// Permanently deletes a RateBasedRule. You can't delete a rule if it's still +// used in any WebACL objects or if it still includes any predicates, such as +// ByteMatchSet objects. // -// If you just want to remove a SqlInjectionMatchSet from a Rule, use UpdateRule. +// If you just want to remove a rule from a WebACL, use UpdateWebACL. // -// To permanently delete a SqlInjectionMatchSet from AWS WAF, perform the following +// To permanently delete a RateBasedRule from AWS WAF, perform the following // steps: // -// Update the SqlInjectionMatchSet to remove filters, if any. For more information, -// see UpdateSqlInjectionMatchSet. +// Update the RateBasedRule to remove predicates, if any. For more information, +// see UpdateRateBasedRule. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a DeleteSqlInjectionMatchSet request. +// parameter of a DeleteRateBasedRule request. // -// Submit a DeleteSqlInjectionMatchSet request. +// Submit a DeleteRateBasedRule request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation DeleteSqlInjectionMatchSet for usage and error information. +// API operation DeleteRateBasedRule for usage and error information. // // Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. @@ -2077,10 +2198,6 @@ func (c *WAFRegional) DeleteSqlInjectionMatchSetRequest(input *waf.DeleteSqlInje // // * You tried to delete a Rule that is still referenced by a WebACL. // -// * ErrCodeWAFStaleDataException "WAFStaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. -// // * ErrCodeWAFNonEmptyEntityException "WAFNonEmptyEntityException" // The operation failed because you tried to delete an object that isn't empty. // For example: @@ -2095,96 +2212,96 @@ func (c *WAFRegional) DeleteSqlInjectionMatchSetRequest(input *waf.DeleteSqlInje // // * You tried to delete an IPSet that references one or more IP addresses. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteSqlInjectionMatchSet -func (c *WAFRegional) DeleteSqlInjectionMatchSet(input *waf.DeleteSqlInjectionMatchSetInput) (*waf.DeleteSqlInjectionMatchSetOutput, error) { - req, out := c.DeleteSqlInjectionMatchSetRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteRateBasedRule +func (c *WAFRegional) DeleteRateBasedRule(input *waf.DeleteRateBasedRuleInput) (*waf.DeleteRateBasedRuleOutput, error) { + req, out := c.DeleteRateBasedRuleRequest(input) return out, req.Send() } -// DeleteSqlInjectionMatchSetWithContext is the same as DeleteSqlInjectionMatchSet with the addition of +// DeleteRateBasedRuleWithContext is the same as DeleteRateBasedRule with the addition of // the ability to pass a context and additional request options. // -// See DeleteSqlInjectionMatchSet for details on how to use this API operation. +// See DeleteRateBasedRule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) DeleteSqlInjectionMatchSetWithContext(ctx aws.Context, input *waf.DeleteSqlInjectionMatchSetInput, opts ...request.Option) (*waf.DeleteSqlInjectionMatchSetOutput, error) { - req, out := c.DeleteSqlInjectionMatchSetRequest(input) +func (c *WAFRegional) DeleteRateBasedRuleWithContext(ctx aws.Context, input *waf.DeleteRateBasedRuleInput, opts ...request.Option) (*waf.DeleteRateBasedRuleOutput, error) { + req, out := c.DeleteRateBasedRuleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteWebACL = "DeleteWebACL" +const opDeleteRegexMatchSet = "DeleteRegexMatchSet" -// DeleteWebACLRequest generates a "aws/request.Request" representing the -// client's request for the DeleteWebACL operation. The "output" return +// DeleteRegexMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRegexMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteWebACL for more information on using the DeleteWebACL +// See DeleteRegexMatchSet for more information on using the DeleteRegexMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteWebACLRequest method. -// req, resp := client.DeleteWebACLRequest(params) +// // Example sending a request using the DeleteRegexMatchSetRequest method. +// req, resp := client.DeleteRegexMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteWebACL -func (c *WAFRegional) DeleteWebACLRequest(input *waf.DeleteWebACLInput) (req *request.Request, output *waf.DeleteWebACLOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteRegexMatchSet +func (c *WAFRegional) DeleteRegexMatchSetRequest(input *waf.DeleteRegexMatchSetInput) (req *request.Request, output *waf.DeleteRegexMatchSetOutput) { op := &request.Operation{ - Name: opDeleteWebACL, + Name: opDeleteRegexMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.DeleteWebACLInput{} + input = &waf.DeleteRegexMatchSetInput{} } - output = &waf.DeleteWebACLOutput{} + output = &waf.DeleteRegexMatchSetOutput{} req = c.newRequest(op, input, output) return } -// DeleteWebACL API operation for AWS WAF Regional. +// DeleteRegexMatchSet API operation for AWS WAF Regional. // -// Permanently deletes a WebACL. You can't delete a WebACL if it still contains -// any Rules. +// Permanently deletes a RegexMatchSet. You can't delete a RegexMatchSet if +// it's still used in any Rules or if it still includes any RegexMatchTuples +// objects (any filters). // -// To delete a WebACL, perform the following steps: +// If you just want to remove a RegexMatchSet from a Rule, use UpdateRule. // -// Update the WebACL to remove Rules, if any. For more information, see UpdateWebACL. +// To permanently delete a RegexMatchSet, perform the following steps: +// +// Update the RegexMatchSet to remove filters, if any. For more information, +// see UpdateRegexMatchSet. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a DeleteWebACL request. +// parameter of a DeleteRegexMatchSet request. // -// Submit a DeleteWebACL request. +// Submit a DeleteRegexMatchSet request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation DeleteWebACL for usage and error information. +// API operation DeleteRegexMatchSet for usage and error information. // // Returned Error Codes: -// * ErrCodeWAFStaleDataException "WAFStaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. -// // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. @@ -2204,6 +2321,10 @@ func (c *WAFRegional) DeleteWebACLRequest(input *waf.DeleteWebACLInput) (req *re // // * You tried to delete a Rule that is still referenced by a WebACL. // +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// // * ErrCodeWAFNonEmptyEntityException "WAFNonEmptyEntityException" // The operation failed because you tried to delete an object that isn't empty. // For example: @@ -2218,94 +2339,82 @@ func (c *WAFRegional) DeleteWebACLRequest(input *waf.DeleteWebACLInput) (req *re // // * You tried to delete an IPSet that references one or more IP addresses. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteWebACL -func (c *WAFRegional) DeleteWebACL(input *waf.DeleteWebACLInput) (*waf.DeleteWebACLOutput, error) { - req, out := c.DeleteWebACLRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteRegexMatchSet +func (c *WAFRegional) DeleteRegexMatchSet(input *waf.DeleteRegexMatchSetInput) (*waf.DeleteRegexMatchSetOutput, error) { + req, out := c.DeleteRegexMatchSetRequest(input) return out, req.Send() } -// DeleteWebACLWithContext is the same as DeleteWebACL with the addition of +// DeleteRegexMatchSetWithContext is the same as DeleteRegexMatchSet with the addition of // the ability to pass a context and additional request options. // -// See DeleteWebACL for details on how to use this API operation. +// See DeleteRegexMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) DeleteWebACLWithContext(ctx aws.Context, input *waf.DeleteWebACLInput, opts ...request.Option) (*waf.DeleteWebACLOutput, error) { - req, out := c.DeleteWebACLRequest(input) +func (c *WAFRegional) DeleteRegexMatchSetWithContext(ctx aws.Context, input *waf.DeleteRegexMatchSetInput, opts ...request.Option) (*waf.DeleteRegexMatchSetOutput, error) { + req, out := c.DeleteRegexMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteXssMatchSet = "DeleteXssMatchSet" +const opDeleteRegexPatternSet = "DeleteRegexPatternSet" -// DeleteXssMatchSetRequest generates a "aws/request.Request" representing the -// client's request for the DeleteXssMatchSet operation. The "output" return +// DeleteRegexPatternSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRegexPatternSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteXssMatchSet for more information on using the DeleteXssMatchSet +// See DeleteRegexPatternSet for more information on using the DeleteRegexPatternSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteXssMatchSetRequest method. -// req, resp := client.DeleteXssMatchSetRequest(params) +// // Example sending a request using the DeleteRegexPatternSetRequest method. +// req, resp := client.DeleteRegexPatternSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteXssMatchSet -func (c *WAFRegional) DeleteXssMatchSetRequest(input *waf.DeleteXssMatchSetInput) (req *request.Request, output *waf.DeleteXssMatchSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteRegexPatternSet +func (c *WAFRegional) DeleteRegexPatternSetRequest(input *waf.DeleteRegexPatternSetInput) (req *request.Request, output *waf.DeleteRegexPatternSetOutput) { op := &request.Operation{ - Name: opDeleteXssMatchSet, + Name: opDeleteRegexPatternSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.DeleteXssMatchSetInput{} + input = &waf.DeleteRegexPatternSetInput{} } - output = &waf.DeleteXssMatchSetOutput{} + output = &waf.DeleteRegexPatternSetOutput{} req = c.newRequest(op, input, output) return } -// DeleteXssMatchSet API operation for AWS WAF Regional. -// -// Permanently deletes an XssMatchSet. You can't delete an XssMatchSet if it's -// still used in any Rules or if it still contains any XssMatchTuple objects. -// -// If you just want to remove an XssMatchSet from a Rule, use UpdateRule. -// -// To permanently delete an XssMatchSet from AWS WAF, perform the following -// steps: -// -// Update the XssMatchSet to remove filters, if any. For more information, see -// UpdateXssMatchSet. -// -// Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of a DeleteXssMatchSet request. +// DeleteRegexPatternSet API operation for AWS WAF Regional. // -// Submit a DeleteXssMatchSet request. +// Permanently deletes a RegexPatternSet. You can't delete a RegexPatternSet +// if it's still used in any RegexMatchSet or if the RegexPatternSet is not +// empty. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation DeleteXssMatchSet for usage and error information. +// API operation DeleteRegexPatternSet for usage and error information. // // Returned Error Codes: // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" @@ -2345,82 +2454,99 @@ func (c *WAFRegional) DeleteXssMatchSetRequest(input *waf.DeleteXssMatchSetInput // // * You tried to delete an IPSet that references one or more IP addresses. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteXssMatchSet -func (c *WAFRegional) DeleteXssMatchSet(input *waf.DeleteXssMatchSetInput) (*waf.DeleteXssMatchSetOutput, error) { - req, out := c.DeleteXssMatchSetRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteRegexPatternSet +func (c *WAFRegional) DeleteRegexPatternSet(input *waf.DeleteRegexPatternSetInput) (*waf.DeleteRegexPatternSetOutput, error) { + req, out := c.DeleteRegexPatternSetRequest(input) return out, req.Send() } -// DeleteXssMatchSetWithContext is the same as DeleteXssMatchSet with the addition of +// DeleteRegexPatternSetWithContext is the same as DeleteRegexPatternSet with the addition of // the ability to pass a context and additional request options. // -// See DeleteXssMatchSet for details on how to use this API operation. +// See DeleteRegexPatternSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) DeleteXssMatchSetWithContext(ctx aws.Context, input *waf.DeleteXssMatchSetInput, opts ...request.Option) (*waf.DeleteXssMatchSetOutput, error) { - req, out := c.DeleteXssMatchSetRequest(input) +func (c *WAFRegional) DeleteRegexPatternSetWithContext(ctx aws.Context, input *waf.DeleteRegexPatternSetInput, opts ...request.Option) (*waf.DeleteRegexPatternSetOutput, error) { + req, out := c.DeleteRegexPatternSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDisassociateWebACL = "DisassociateWebACL" +const opDeleteRule = "DeleteRule" -// DisassociateWebACLRequest generates a "aws/request.Request" representing the -// client's request for the DisassociateWebACL operation. The "output" return +// DeleteRuleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRule operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DisassociateWebACL for more information on using the DisassociateWebACL +// See DeleteRule for more information on using the DeleteRule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DisassociateWebACLRequest method. -// req, resp := client.DisassociateWebACLRequest(params) +// // Example sending a request using the DeleteRuleRequest method. +// req, resp := client.DeleteRuleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DisassociateWebACL -func (c *WAFRegional) DisassociateWebACLRequest(input *DisassociateWebACLInput) (req *request.Request, output *DisassociateWebACLOutput) { - op := &request.Operation{ - Name: opDisassociateWebACL, +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteRule +func (c *WAFRegional) DeleteRuleRequest(input *waf.DeleteRuleInput) (req *request.Request, output *waf.DeleteRuleOutput) { + op := &request.Operation{ + Name: opDeleteRule, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DisassociateWebACLInput{} + input = &waf.DeleteRuleInput{} } - output = &DisassociateWebACLOutput{} + output = &waf.DeleteRuleOutput{} req = c.newRequest(op, input, output) return } -// DisassociateWebACL API operation for AWS WAF Regional. +// DeleteRule API operation for AWS WAF Regional. // -// Removes a web ACL from the specified resource. +// Permanently deletes a Rule. You can't delete a Rule if it's still used in +// any WebACL objects or if it still includes any predicates, such as ByteMatchSet +// objects. +// +// If you just want to remove a Rule from a WebACL, use UpdateWebACL. +// +// To permanently delete a Rule from AWS WAF, perform the following steps: +// +// Update the Rule to remove predicates, if any. For more information, see UpdateRule. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a DeleteRule request. +// +// Submit a DeleteRule request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation DisassociateWebACL for usage and error information. +// API operation DeleteRule for usage and error information. // // Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. @@ -2429,114 +2555,125 @@ func (c *WAFRegional) DisassociateWebACLRequest(input *DisassociateWebACLInput) // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" -// The operation failed because AWS WAF didn't recognize a parameter in the -// request. For example: -// -// * You specified an invalid parameter name. -// -// * You specified an invalid value. +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. // -// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) -// using an action other than INSERT or DELETE. +// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, -// BLOCK, or COUNT. +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. // -// * You tried to create a RateBasedRule with a RateKey value other than -// IP. +// * You tried to delete a Rule that is still referenced by a WebACL. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, -// BLOCK, or COUNT. +// * ErrCodeWAFNonEmptyEntityException "WAFNonEmptyEntityException" +// The operation failed because you tried to delete an object that isn't empty. +// For example: // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. +// * You tried to delete a WebACL that still contains one or more Rule objects. // -// * You tried to update a ByteMatchSet with a Field of HEADER but no value -// for Data. +// * You tried to delete a Rule that still contains one or more ByteMatchSet +// objects or other predicates. // -// * Your request references an ARN that is malformed, or corresponds to -// a resource with which a web ACL cannot be associated. +// * You tried to delete a ByteMatchSet that contains one or more ByteMatchTuple +// objects. // -// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" -// The operation failed because the referenced object doesn't exist. +// * You tried to delete an IPSet that references one or more IP addresses. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DisassociateWebACL -func (c *WAFRegional) DisassociateWebACL(input *DisassociateWebACLInput) (*DisassociateWebACLOutput, error) { - req, out := c.DisassociateWebACLRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteRule +func (c *WAFRegional) DeleteRule(input *waf.DeleteRuleInput) (*waf.DeleteRuleOutput, error) { + req, out := c.DeleteRuleRequest(input) return out, req.Send() } -// DisassociateWebACLWithContext is the same as DisassociateWebACL with the addition of +// DeleteRuleWithContext is the same as DeleteRule with the addition of // the ability to pass a context and additional request options. // -// See DisassociateWebACL for details on how to use this API operation. +// See DeleteRule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) DisassociateWebACLWithContext(ctx aws.Context, input *DisassociateWebACLInput, opts ...request.Option) (*DisassociateWebACLOutput, error) { - req, out := c.DisassociateWebACLRequest(input) +func (c *WAFRegional) DeleteRuleWithContext(ctx aws.Context, input *waf.DeleteRuleInput, opts ...request.Option) (*waf.DeleteRuleOutput, error) { + req, out := c.DeleteRuleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetByteMatchSet = "GetByteMatchSet" +const opDeleteSizeConstraintSet = "DeleteSizeConstraintSet" -// GetByteMatchSetRequest generates a "aws/request.Request" representing the -// client's request for the GetByteMatchSet operation. The "output" return +// DeleteSizeConstraintSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSizeConstraintSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetByteMatchSet for more information on using the GetByteMatchSet +// See DeleteSizeConstraintSet for more information on using the DeleteSizeConstraintSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetByteMatchSetRequest method. -// req, resp := client.GetByteMatchSetRequest(params) +// // Example sending a request using the DeleteSizeConstraintSetRequest method. +// req, resp := client.DeleteSizeConstraintSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetByteMatchSet -func (c *WAFRegional) GetByteMatchSetRequest(input *waf.GetByteMatchSetInput) (req *request.Request, output *waf.GetByteMatchSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteSizeConstraintSet +func (c *WAFRegional) DeleteSizeConstraintSetRequest(input *waf.DeleteSizeConstraintSetInput) (req *request.Request, output *waf.DeleteSizeConstraintSetOutput) { op := &request.Operation{ - Name: opGetByteMatchSet, + Name: opDeleteSizeConstraintSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.GetByteMatchSetInput{} + input = &waf.DeleteSizeConstraintSetInput{} } - output = &waf.GetByteMatchSetOutput{} + output = &waf.DeleteSizeConstraintSetOutput{} req = c.newRequest(op, input, output) return } -// GetByteMatchSet API operation for AWS WAF Regional. +// DeleteSizeConstraintSet API operation for AWS WAF Regional. // -// Returns the ByteMatchSet specified by ByteMatchSetId. +// Permanently deletes a SizeConstraintSet. You can't delete a SizeConstraintSet +// if it's still used in any Rules or if it still includes any SizeConstraint +// objects (any filters). +// +// If you just want to remove a SizeConstraintSet from a Rule, use UpdateRule. +// +// To permanently delete a SizeConstraintSet, perform the following steps: +// +// Update the SizeConstraintSet to remove filters, if any. For more information, +// see UpdateSizeConstraintSet. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a DeleteSizeConstraintSet request. +// +// Submit a DeleteSizeConstraintSet request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation GetByteMatchSet for usage and error information. +// API operation DeleteSizeConstraintSet for usage and error information. // // Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. @@ -2548,269 +2685,246 @@ func (c *WAFRegional) GetByteMatchSetRequest(input *waf.GetByteMatchSetInput) (r // * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" // The operation failed because the referenced object doesn't exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetByteMatchSet -func (c *WAFRegional) GetByteMatchSet(input *waf.GetByteMatchSetInput) (*waf.GetByteMatchSetOutput, error) { - req, out := c.GetByteMatchSetRequest(input) +// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeWAFNonEmptyEntityException "WAFNonEmptyEntityException" +// The operation failed because you tried to delete an object that isn't empty. +// For example: +// +// * You tried to delete a WebACL that still contains one or more Rule objects. +// +// * You tried to delete a Rule that still contains one or more ByteMatchSet +// objects or other predicates. +// +// * You tried to delete a ByteMatchSet that contains one or more ByteMatchTuple +// objects. +// +// * You tried to delete an IPSet that references one or more IP addresses. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteSizeConstraintSet +func (c *WAFRegional) DeleteSizeConstraintSet(input *waf.DeleteSizeConstraintSetInput) (*waf.DeleteSizeConstraintSetOutput, error) { + req, out := c.DeleteSizeConstraintSetRequest(input) return out, req.Send() } -// GetByteMatchSetWithContext is the same as GetByteMatchSet with the addition of +// DeleteSizeConstraintSetWithContext is the same as DeleteSizeConstraintSet with the addition of // the ability to pass a context and additional request options. // -// See GetByteMatchSet for details on how to use this API operation. +// See DeleteSizeConstraintSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) GetByteMatchSetWithContext(ctx aws.Context, input *waf.GetByteMatchSetInput, opts ...request.Option) (*waf.GetByteMatchSetOutput, error) { - req, out := c.GetByteMatchSetRequest(input) +func (c *WAFRegional) DeleteSizeConstraintSetWithContext(ctx aws.Context, input *waf.DeleteSizeConstraintSetInput, opts ...request.Option) (*waf.DeleteSizeConstraintSetOutput, error) { + req, out := c.DeleteSizeConstraintSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetChangeToken = "GetChangeToken" +const opDeleteSqlInjectionMatchSet = "DeleteSqlInjectionMatchSet" -// GetChangeTokenRequest generates a "aws/request.Request" representing the -// client's request for the GetChangeToken operation. The "output" return +// DeleteSqlInjectionMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSqlInjectionMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetChangeToken for more information on using the GetChangeToken +// See DeleteSqlInjectionMatchSet for more information on using the DeleteSqlInjectionMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetChangeTokenRequest method. -// req, resp := client.GetChangeTokenRequest(params) +// // Example sending a request using the DeleteSqlInjectionMatchSetRequest method. +// req, resp := client.DeleteSqlInjectionMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetChangeToken -func (c *WAFRegional) GetChangeTokenRequest(input *waf.GetChangeTokenInput) (req *request.Request, output *waf.GetChangeTokenOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteSqlInjectionMatchSet +func (c *WAFRegional) DeleteSqlInjectionMatchSetRequest(input *waf.DeleteSqlInjectionMatchSetInput) (req *request.Request, output *waf.DeleteSqlInjectionMatchSetOutput) { op := &request.Operation{ - Name: opGetChangeToken, + Name: opDeleteSqlInjectionMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.GetChangeTokenInput{} + input = &waf.DeleteSqlInjectionMatchSetInput{} } - output = &waf.GetChangeTokenOutput{} + output = &waf.DeleteSqlInjectionMatchSetOutput{} req = c.newRequest(op, input, output) return } -// GetChangeToken API operation for AWS WAF Regional. +// DeleteSqlInjectionMatchSet API operation for AWS WAF Regional. // -// When you want to create, update, or delete AWS WAF objects, get a change -// token and include the change token in the create, update, or delete request. -// Change tokens ensure that your application doesn't submit conflicting requests -// to AWS WAF. +// Permanently deletes a SqlInjectionMatchSet. You can't delete a SqlInjectionMatchSet +// if it's still used in any Rules or if it still contains any SqlInjectionMatchTuple +// objects. // -// Each create, update, or delete request must use a unique change token. If -// your application submits a GetChangeToken request and then submits a second -// GetChangeToken request before submitting a create, update, or delete request, -// the second GetChangeToken request returns the same value as the first GetChangeToken -// request. +// If you just want to remove a SqlInjectionMatchSet from a Rule, use UpdateRule. // -// When you use a change token in a create, update, or delete request, the status -// of the change token changes to PENDING, which indicates that AWS WAF is propagating -// the change to all AWS WAF servers. Use GetChangeTokenStatus to determine -// the status of your change token. +// To permanently delete a SqlInjectionMatchSet from AWS WAF, perform the following +// steps: +// +// Update the SqlInjectionMatchSet to remove filters, if any. For more information, +// see UpdateSqlInjectionMatchSet. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a DeleteSqlInjectionMatchSet request. +// +// Submit a DeleteSqlInjectionMatchSet request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation GetChangeToken for usage and error information. +// API operation DeleteSqlInjectionMatchSet for usage and error information. // // Returned Error Codes: // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetChangeToken -func (c *WAFRegional) GetChangeToken(input *waf.GetChangeTokenInput) (*waf.GetChangeTokenOutput, error) { - req, out := c.GetChangeTokenRequest(input) - return out, req.Send() -} - -// GetChangeTokenWithContext is the same as GetChangeToken with the addition of -// the ability to pass a context and additional request options. +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. // -// See GetChangeToken for details on how to use this API operation. +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. // -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *WAFRegional) GetChangeTokenWithContext(ctx aws.Context, input *waf.GetChangeTokenInput, opts ...request.Option) (*waf.GetChangeTokenOutput, error) { - req, out := c.GetChangeTokenRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetChangeTokenStatus = "GetChangeTokenStatus" - -// GetChangeTokenStatusRequest generates a "aws/request.Request" representing the -// client's request for the GetChangeTokenStatus operation. The "output" return -// value will be populated with the request's response once the request complets -// successfuly. +// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: // -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. // -// See GetChangeTokenStatus for more information on using the GetChangeTokenStatus -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetChangeTokenStatusRequest method. -// req, resp := client.GetChangeTokenStatusRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetChangeTokenStatus -func (c *WAFRegional) GetChangeTokenStatusRequest(input *waf.GetChangeTokenStatusInput) (req *request.Request, output *waf.GetChangeTokenStatusOutput) { - op := &request.Operation{ - Name: opGetChangeTokenStatus, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &waf.GetChangeTokenStatusInput{} - } - - output = &waf.GetChangeTokenStatusOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetChangeTokenStatus API operation for AWS WAF Regional. -// -// Returns the status of a ChangeToken that you got by calling GetChangeToken. -// ChangeTokenStatus is one of the following values: -// -// * PROVISIONED: You requested the change token by calling GetChangeToken, -// but you haven't used it yet in a call to create, update, or delete an -// AWS WAF object. +// * You tried to delete a Rule that is still referenced by a WebACL. // -// * PENDING: AWS WAF is propagating the create, update, or delete request -// to all AWS WAF servers. +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. // -// * IN_SYNC: Propagation is complete. +// * ErrCodeWAFNonEmptyEntityException "WAFNonEmptyEntityException" +// The operation failed because you tried to delete an object that isn't empty. +// For example: // -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. +// * You tried to delete a WebACL that still contains one or more Rule objects. // -// See the AWS API reference guide for AWS WAF Regional's -// API operation GetChangeTokenStatus for usage and error information. +// * You tried to delete a Rule that still contains one or more ByteMatchSet +// objects or other predicates. // -// Returned Error Codes: -// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" -// The operation failed because the referenced object doesn't exist. +// * You tried to delete a ByteMatchSet that contains one or more ByteMatchTuple +// objects. // -// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" -// The operation failed because of a system problem, even though the request -// was valid. Retry your request. +// * You tried to delete an IPSet that references one or more IP addresses. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetChangeTokenStatus -func (c *WAFRegional) GetChangeTokenStatus(input *waf.GetChangeTokenStatusInput) (*waf.GetChangeTokenStatusOutput, error) { - req, out := c.GetChangeTokenStatusRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteSqlInjectionMatchSet +func (c *WAFRegional) DeleteSqlInjectionMatchSet(input *waf.DeleteSqlInjectionMatchSetInput) (*waf.DeleteSqlInjectionMatchSetOutput, error) { + req, out := c.DeleteSqlInjectionMatchSetRequest(input) return out, req.Send() } -// GetChangeTokenStatusWithContext is the same as GetChangeTokenStatus with the addition of +// DeleteSqlInjectionMatchSetWithContext is the same as DeleteSqlInjectionMatchSet with the addition of // the ability to pass a context and additional request options. // -// See GetChangeTokenStatus for details on how to use this API operation. +// See DeleteSqlInjectionMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) GetChangeTokenStatusWithContext(ctx aws.Context, input *waf.GetChangeTokenStatusInput, opts ...request.Option) (*waf.GetChangeTokenStatusOutput, error) { - req, out := c.GetChangeTokenStatusRequest(input) +func (c *WAFRegional) DeleteSqlInjectionMatchSetWithContext(ctx aws.Context, input *waf.DeleteSqlInjectionMatchSetInput, opts ...request.Option) (*waf.DeleteSqlInjectionMatchSetOutput, error) { + req, out := c.DeleteSqlInjectionMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetIPSet = "GetIPSet" +const opDeleteWebACL = "DeleteWebACL" -// GetIPSetRequest generates a "aws/request.Request" representing the -// client's request for the GetIPSet operation. The "output" return +// DeleteWebACLRequest generates a "aws/request.Request" representing the +// client's request for the DeleteWebACL operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetIPSet for more information on using the GetIPSet +// See DeleteWebACL for more information on using the DeleteWebACL // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetIPSetRequest method. -// req, resp := client.GetIPSetRequest(params) +// // Example sending a request using the DeleteWebACLRequest method. +// req, resp := client.DeleteWebACLRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetIPSet -func (c *WAFRegional) GetIPSetRequest(input *waf.GetIPSetInput) (req *request.Request, output *waf.GetIPSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteWebACL +func (c *WAFRegional) DeleteWebACLRequest(input *waf.DeleteWebACLInput) (req *request.Request, output *waf.DeleteWebACLOutput) { op := &request.Operation{ - Name: opGetIPSet, + Name: opDeleteWebACL, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.GetIPSetInput{} + input = &waf.DeleteWebACLInput{} } - output = &waf.GetIPSetOutput{} + output = &waf.DeleteWebACLOutput{} req = c.newRequest(op, input, output) return } -// GetIPSet API operation for AWS WAF Regional. +// DeleteWebACL API operation for AWS WAF Regional. // -// Returns the IPSet that is specified by IPSetId. +// Permanently deletes a WebACL. You can't delete a WebACL if it still contains +// any Rules. +// +// To delete a WebACL, perform the following steps: +// +// Update the WebACL to remove Rules, if any. For more information, see UpdateWebACL. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a DeleteWebACL request. +// +// Submit a DeleteWebACL request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation GetIPSet for usage and error information. +// API operation DeleteWebACL for usage and error information. // // Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. @@ -2822,81 +2936,116 @@ func (c *WAFRegional) GetIPSetRequest(input *waf.GetIPSetInput) (req *request.Re // * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" // The operation failed because the referenced object doesn't exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetIPSet -func (c *WAFRegional) GetIPSet(input *waf.GetIPSetInput) (*waf.GetIPSetOutput, error) { - req, out := c.GetIPSetRequest(input) +// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeWAFNonEmptyEntityException "WAFNonEmptyEntityException" +// The operation failed because you tried to delete an object that isn't empty. +// For example: +// +// * You tried to delete a WebACL that still contains one or more Rule objects. +// +// * You tried to delete a Rule that still contains one or more ByteMatchSet +// objects or other predicates. +// +// * You tried to delete a ByteMatchSet that contains one or more ByteMatchTuple +// objects. +// +// * You tried to delete an IPSet that references one or more IP addresses. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteWebACL +func (c *WAFRegional) DeleteWebACL(input *waf.DeleteWebACLInput) (*waf.DeleteWebACLOutput, error) { + req, out := c.DeleteWebACLRequest(input) return out, req.Send() } -// GetIPSetWithContext is the same as GetIPSet with the addition of +// DeleteWebACLWithContext is the same as DeleteWebACL with the addition of // the ability to pass a context and additional request options. // -// See GetIPSet for details on how to use this API operation. +// See DeleteWebACL for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) GetIPSetWithContext(ctx aws.Context, input *waf.GetIPSetInput, opts ...request.Option) (*waf.GetIPSetOutput, error) { - req, out := c.GetIPSetRequest(input) +func (c *WAFRegional) DeleteWebACLWithContext(ctx aws.Context, input *waf.DeleteWebACLInput, opts ...request.Option) (*waf.DeleteWebACLOutput, error) { + req, out := c.DeleteWebACLRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetRateBasedRule = "GetRateBasedRule" +const opDeleteXssMatchSet = "DeleteXssMatchSet" -// GetRateBasedRuleRequest generates a "aws/request.Request" representing the -// client's request for the GetRateBasedRule operation. The "output" return +// DeleteXssMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteXssMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetRateBasedRule for more information on using the GetRateBasedRule +// See DeleteXssMatchSet for more information on using the DeleteXssMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetRateBasedRuleRequest method. -// req, resp := client.GetRateBasedRuleRequest(params) +// // Example sending a request using the DeleteXssMatchSetRequest method. +// req, resp := client.DeleteXssMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetRateBasedRule -func (c *WAFRegional) GetRateBasedRuleRequest(input *waf.GetRateBasedRuleInput) (req *request.Request, output *waf.GetRateBasedRuleOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteXssMatchSet +func (c *WAFRegional) DeleteXssMatchSetRequest(input *waf.DeleteXssMatchSetInput) (req *request.Request, output *waf.DeleteXssMatchSetOutput) { op := &request.Operation{ - Name: opGetRateBasedRule, + Name: opDeleteXssMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.GetRateBasedRuleInput{} + input = &waf.DeleteXssMatchSetInput{} } - output = &waf.GetRateBasedRuleOutput{} + output = &waf.DeleteXssMatchSetOutput{} req = c.newRequest(op, input, output) return } -// GetRateBasedRule API operation for AWS WAF Regional. +// DeleteXssMatchSet API operation for AWS WAF Regional. // -// Returns the RateBasedRule that is specified by the RuleId that you included -// in the GetRateBasedRule request. +// Permanently deletes an XssMatchSet. You can't delete an XssMatchSet if it's +// still used in any Rules or if it still contains any XssMatchTuple objects. +// +// If you just want to remove an XssMatchSet from a Rule, use UpdateRule. +// +// To permanently delete an XssMatchSet from AWS WAF, perform the following +// steps: +// +// Update the XssMatchSet to remove filters, if any. For more information, see +// UpdateXssMatchSet. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a DeleteXssMatchSet request. +// +// Submit a DeleteXssMatchSet request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation GetRateBasedRule for usage and error information. +// API operation DeleteXssMatchSet for usage and error information. // // Returned Error Codes: // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" @@ -2910,83 +3059,106 @@ func (c *WAFRegional) GetRateBasedRuleRequest(input *waf.GetRateBasedRuleInput) // * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" // The operation failed because the referenced object doesn't exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetRateBasedRule -func (c *WAFRegional) GetRateBasedRule(input *waf.GetRateBasedRuleInput) (*waf.GetRateBasedRuleOutput, error) { - req, out := c.GetRateBasedRuleRequest(input) +// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFNonEmptyEntityException "WAFNonEmptyEntityException" +// The operation failed because you tried to delete an object that isn't empty. +// For example: +// +// * You tried to delete a WebACL that still contains one or more Rule objects. +// +// * You tried to delete a Rule that still contains one or more ByteMatchSet +// objects or other predicates. +// +// * You tried to delete a ByteMatchSet that contains one or more ByteMatchTuple +// objects. +// +// * You tried to delete an IPSet that references one or more IP addresses. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DeleteXssMatchSet +func (c *WAFRegional) DeleteXssMatchSet(input *waf.DeleteXssMatchSetInput) (*waf.DeleteXssMatchSetOutput, error) { + req, out := c.DeleteXssMatchSetRequest(input) return out, req.Send() } -// GetRateBasedRuleWithContext is the same as GetRateBasedRule with the addition of +// DeleteXssMatchSetWithContext is the same as DeleteXssMatchSet with the addition of // the ability to pass a context and additional request options. // -// See GetRateBasedRule for details on how to use this API operation. +// See DeleteXssMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) GetRateBasedRuleWithContext(ctx aws.Context, input *waf.GetRateBasedRuleInput, opts ...request.Option) (*waf.GetRateBasedRuleOutput, error) { - req, out := c.GetRateBasedRuleRequest(input) +func (c *WAFRegional) DeleteXssMatchSetWithContext(ctx aws.Context, input *waf.DeleteXssMatchSetInput, opts ...request.Option) (*waf.DeleteXssMatchSetOutput, error) { + req, out := c.DeleteXssMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetRateBasedRuleManagedKeys = "GetRateBasedRuleManagedKeys" +const opDisassociateWebACL = "DisassociateWebACL" -// GetRateBasedRuleManagedKeysRequest generates a "aws/request.Request" representing the -// client's request for the GetRateBasedRuleManagedKeys operation. The "output" return +// DisassociateWebACLRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateWebACL operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetRateBasedRuleManagedKeys for more information on using the GetRateBasedRuleManagedKeys +// See DisassociateWebACL for more information on using the DisassociateWebACL // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetRateBasedRuleManagedKeysRequest method. -// req, resp := client.GetRateBasedRuleManagedKeysRequest(params) +// // Example sending a request using the DisassociateWebACLRequest method. +// req, resp := client.DisassociateWebACLRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetRateBasedRuleManagedKeys -func (c *WAFRegional) GetRateBasedRuleManagedKeysRequest(input *waf.GetRateBasedRuleManagedKeysInput) (req *request.Request, output *waf.GetRateBasedRuleManagedKeysOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DisassociateWebACL +func (c *WAFRegional) DisassociateWebACLRequest(input *DisassociateWebACLInput) (req *request.Request, output *DisassociateWebACLOutput) { op := &request.Operation{ - Name: opGetRateBasedRuleManagedKeys, + Name: opDisassociateWebACL, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.GetRateBasedRuleManagedKeysInput{} + input = &DisassociateWebACLInput{} } - output = &waf.GetRateBasedRuleManagedKeysOutput{} + output = &DisassociateWebACLOutput{} req = c.newRequest(op, input, output) return } -// GetRateBasedRuleManagedKeys API operation for AWS WAF Regional. +// DisassociateWebACL API operation for AWS WAF Regional. // -// Returns an array of IP addresses currently being blocked by the RateBasedRule -// that is specified by the RuleId. The maximum number of managed keys that -// will be blocked is 10,000. If more than 10,000 addresses exceed the rate -// limit, the 10,000 addresses with the highest rates will be blocked. +// Removes a web ACL from the specified resource. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation GetRateBasedRuleManagedKeys for usage and error information. +// API operation DisassociateWebACL for usage and error information. // // Returned Error Codes: // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" @@ -2997,9 +3169,6 @@ func (c *WAFRegional) GetRateBasedRuleManagedKeysRequest(input *waf.GetRateBased // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" -// The operation failed because the referenced object doesn't exist. -// // * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" // The operation failed because AWS WAF didn't recognize a parameter in the // request. For example: @@ -3021,7 +3190,7 @@ func (c *WAFRegional) GetRateBasedRuleManagedKeysRequest(input *waf.GetRateBased // BLOCK, or COUNT. // // * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. +// HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value // for Data. @@ -3029,81 +3198,83 @@ func (c *WAFRegional) GetRateBasedRuleManagedKeysRequest(input *waf.GetRateBased // * Your request references an ARN that is malformed, or corresponds to // a resource with which a web ACL cannot be associated. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetRateBasedRuleManagedKeys -func (c *WAFRegional) GetRateBasedRuleManagedKeys(input *waf.GetRateBasedRuleManagedKeysInput) (*waf.GetRateBasedRuleManagedKeysOutput, error) { - req, out := c.GetRateBasedRuleManagedKeysRequest(input) +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DisassociateWebACL +func (c *WAFRegional) DisassociateWebACL(input *DisassociateWebACLInput) (*DisassociateWebACLOutput, error) { + req, out := c.DisassociateWebACLRequest(input) return out, req.Send() } -// GetRateBasedRuleManagedKeysWithContext is the same as GetRateBasedRuleManagedKeys with the addition of +// DisassociateWebACLWithContext is the same as DisassociateWebACL with the addition of // the ability to pass a context and additional request options. // -// See GetRateBasedRuleManagedKeys for details on how to use this API operation. +// See DisassociateWebACL for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) GetRateBasedRuleManagedKeysWithContext(ctx aws.Context, input *waf.GetRateBasedRuleManagedKeysInput, opts ...request.Option) (*waf.GetRateBasedRuleManagedKeysOutput, error) { - req, out := c.GetRateBasedRuleManagedKeysRequest(input) +func (c *WAFRegional) DisassociateWebACLWithContext(ctx aws.Context, input *DisassociateWebACLInput, opts ...request.Option) (*DisassociateWebACLOutput, error) { + req, out := c.DisassociateWebACLRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetRule = "GetRule" +const opGetByteMatchSet = "GetByteMatchSet" -// GetRuleRequest generates a "aws/request.Request" representing the -// client's request for the GetRule operation. The "output" return +// GetByteMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the GetByteMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetRule for more information on using the GetRule +// See GetByteMatchSet for more information on using the GetByteMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetRuleRequest method. -// req, resp := client.GetRuleRequest(params) +// // Example sending a request using the GetByteMatchSetRequest method. +// req, resp := client.GetByteMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetRule -func (c *WAFRegional) GetRuleRequest(input *waf.GetRuleInput) (req *request.Request, output *waf.GetRuleOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetByteMatchSet +func (c *WAFRegional) GetByteMatchSetRequest(input *waf.GetByteMatchSetInput) (req *request.Request, output *waf.GetByteMatchSetOutput) { op := &request.Operation{ - Name: opGetRule, + Name: opGetByteMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.GetRuleInput{} + input = &waf.GetByteMatchSetInput{} } - output = &waf.GetRuleOutput{} + output = &waf.GetByteMatchSetOutput{} req = c.newRequest(op, input, output) return } -// GetRule API operation for AWS WAF Regional. +// GetByteMatchSet API operation for AWS WAF Regional. // -// Returns the Rule that is specified by the RuleId that you included in the -// GetRule request. +// Returns the ByteMatchSet specified by ByteMatchSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation GetRule for usage and error information. +// API operation GetByteMatchSet for usage and error information. // // Returned Error Codes: // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" @@ -3117,260 +3288,267 @@ func (c *WAFRegional) GetRuleRequest(input *waf.GetRuleInput) (req *request.Requ // * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" // The operation failed because the referenced object doesn't exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetRule -func (c *WAFRegional) GetRule(input *waf.GetRuleInput) (*waf.GetRuleOutput, error) { - req, out := c.GetRuleRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetByteMatchSet +func (c *WAFRegional) GetByteMatchSet(input *waf.GetByteMatchSetInput) (*waf.GetByteMatchSetOutput, error) { + req, out := c.GetByteMatchSetRequest(input) return out, req.Send() } -// GetRuleWithContext is the same as GetRule with the addition of +// GetByteMatchSetWithContext is the same as GetByteMatchSet with the addition of // the ability to pass a context and additional request options. // -// See GetRule for details on how to use this API operation. +// See GetByteMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) GetRuleWithContext(ctx aws.Context, input *waf.GetRuleInput, opts ...request.Option) (*waf.GetRuleOutput, error) { - req, out := c.GetRuleRequest(input) +func (c *WAFRegional) GetByteMatchSetWithContext(ctx aws.Context, input *waf.GetByteMatchSetInput, opts ...request.Option) (*waf.GetByteMatchSetOutput, error) { + req, out := c.GetByteMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetSampledRequests = "GetSampledRequests" +const opGetChangeToken = "GetChangeToken" -// GetSampledRequestsRequest generates a "aws/request.Request" representing the -// client's request for the GetSampledRequests operation. The "output" return +// GetChangeTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetChangeToken operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetSampledRequests for more information on using the GetSampledRequests +// See GetChangeToken for more information on using the GetChangeToken // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetSampledRequestsRequest method. -// req, resp := client.GetSampledRequestsRequest(params) +// // Example sending a request using the GetChangeTokenRequest method. +// req, resp := client.GetChangeTokenRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetSampledRequests -func (c *WAFRegional) GetSampledRequestsRequest(input *waf.GetSampledRequestsInput) (req *request.Request, output *waf.GetSampledRequestsOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetChangeToken +func (c *WAFRegional) GetChangeTokenRequest(input *waf.GetChangeTokenInput) (req *request.Request, output *waf.GetChangeTokenOutput) { op := &request.Operation{ - Name: opGetSampledRequests, + Name: opGetChangeToken, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.GetSampledRequestsInput{} + input = &waf.GetChangeTokenInput{} } - output = &waf.GetSampledRequestsOutput{} + output = &waf.GetChangeTokenOutput{} req = c.newRequest(op, input, output) return } -// GetSampledRequests API operation for AWS WAF Regional. +// GetChangeToken API operation for AWS WAF Regional. // -// Gets detailed information about a specified number of requests--a sample--that -// AWS WAF randomly selects from among the first 5,000 requests that your AWS -// resource received during a time range that you choose. You can specify a -// sample size of up to 500 requests, and you can specify any time range in -// the previous three hours. +// When you want to create, update, or delete AWS WAF objects, get a change +// token and include the change token in the create, update, or delete request. +// Change tokens ensure that your application doesn't submit conflicting requests +// to AWS WAF. // -// GetSampledRequests returns a time range, which is usually the time range -// that you specified. However, if your resource (such as a CloudFront distribution) -// received 5,000 requests before the specified time range elapsed, GetSampledRequests -// returns an updated time range. This new time range indicates the actual period -// during which AWS WAF selected the requests in the sample. +// Each create, update, or delete request must use a unique change token. If +// your application submits a GetChangeToken request and then submits a second +// GetChangeToken request before submitting a create, update, or delete request, +// the second GetChangeToken request returns the same value as the first GetChangeToken +// request. +// +// When you use a change token in a create, update, or delete request, the status +// of the change token changes to PENDING, which indicates that AWS WAF is propagating +// the change to all AWS WAF servers. Use GetChangeTokenStatus to determine +// the status of your change token. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation GetSampledRequests for usage and error information. +// API operation GetChangeToken for usage and error information. // // Returned Error Codes: -// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" -// The operation failed because the referenced object doesn't exist. -// // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetSampledRequests -func (c *WAFRegional) GetSampledRequests(input *waf.GetSampledRequestsInput) (*waf.GetSampledRequestsOutput, error) { - req, out := c.GetSampledRequestsRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetChangeToken +func (c *WAFRegional) GetChangeToken(input *waf.GetChangeTokenInput) (*waf.GetChangeTokenOutput, error) { + req, out := c.GetChangeTokenRequest(input) return out, req.Send() } -// GetSampledRequestsWithContext is the same as GetSampledRequests with the addition of +// GetChangeTokenWithContext is the same as GetChangeToken with the addition of // the ability to pass a context and additional request options. // -// See GetSampledRequests for details on how to use this API operation. +// See GetChangeToken for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) GetSampledRequestsWithContext(ctx aws.Context, input *waf.GetSampledRequestsInput, opts ...request.Option) (*waf.GetSampledRequestsOutput, error) { - req, out := c.GetSampledRequestsRequest(input) +func (c *WAFRegional) GetChangeTokenWithContext(ctx aws.Context, input *waf.GetChangeTokenInput, opts ...request.Option) (*waf.GetChangeTokenOutput, error) { + req, out := c.GetChangeTokenRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetSizeConstraintSet = "GetSizeConstraintSet" +const opGetChangeTokenStatus = "GetChangeTokenStatus" -// GetSizeConstraintSetRequest generates a "aws/request.Request" representing the -// client's request for the GetSizeConstraintSet operation. The "output" return +// GetChangeTokenStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetChangeTokenStatus operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetSizeConstraintSet for more information on using the GetSizeConstraintSet +// See GetChangeTokenStatus for more information on using the GetChangeTokenStatus // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetSizeConstraintSetRequest method. -// req, resp := client.GetSizeConstraintSetRequest(params) +// // Example sending a request using the GetChangeTokenStatusRequest method. +// req, resp := client.GetChangeTokenStatusRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetSizeConstraintSet -func (c *WAFRegional) GetSizeConstraintSetRequest(input *waf.GetSizeConstraintSetInput) (req *request.Request, output *waf.GetSizeConstraintSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetChangeTokenStatus +func (c *WAFRegional) GetChangeTokenStatusRequest(input *waf.GetChangeTokenStatusInput) (req *request.Request, output *waf.GetChangeTokenStatusOutput) { op := &request.Operation{ - Name: opGetSizeConstraintSet, + Name: opGetChangeTokenStatus, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.GetSizeConstraintSetInput{} + input = &waf.GetChangeTokenStatusInput{} } - output = &waf.GetSizeConstraintSetOutput{} + output = &waf.GetChangeTokenStatusOutput{} req = c.newRequest(op, input, output) return } -// GetSizeConstraintSet API operation for AWS WAF Regional. +// GetChangeTokenStatus API operation for AWS WAF Regional. // -// Returns the SizeConstraintSet specified by SizeConstraintSetId. +// Returns the status of a ChangeToken that you got by calling GetChangeToken. +// ChangeTokenStatus is one of the following values: +// +// * PROVISIONED: You requested the change token by calling GetChangeToken, +// but you haven't used it yet in a call to create, update, or delete an +// AWS WAF object. +// +// * PENDING: AWS WAF is propagating the create, update, or delete request +// to all AWS WAF servers. +// +// * IN_SYNC: Propagation is complete. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation GetSizeConstraintSet for usage and error information. +// API operation GetChangeTokenStatus for usage and error information. // // Returned Error Codes: +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. // -// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" -// The operation failed because you tried to create, update, or delete an object -// by using an invalid account identifier. -// -// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" -// The operation failed because the referenced object doesn't exist. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetSizeConstraintSet -func (c *WAFRegional) GetSizeConstraintSet(input *waf.GetSizeConstraintSetInput) (*waf.GetSizeConstraintSetOutput, error) { - req, out := c.GetSizeConstraintSetRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetChangeTokenStatus +func (c *WAFRegional) GetChangeTokenStatus(input *waf.GetChangeTokenStatusInput) (*waf.GetChangeTokenStatusOutput, error) { + req, out := c.GetChangeTokenStatusRequest(input) return out, req.Send() } -// GetSizeConstraintSetWithContext is the same as GetSizeConstraintSet with the addition of +// GetChangeTokenStatusWithContext is the same as GetChangeTokenStatus with the addition of // the ability to pass a context and additional request options. // -// See GetSizeConstraintSet for details on how to use this API operation. +// See GetChangeTokenStatus for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) GetSizeConstraintSetWithContext(ctx aws.Context, input *waf.GetSizeConstraintSetInput, opts ...request.Option) (*waf.GetSizeConstraintSetOutput, error) { - req, out := c.GetSizeConstraintSetRequest(input) +func (c *WAFRegional) GetChangeTokenStatusWithContext(ctx aws.Context, input *waf.GetChangeTokenStatusInput, opts ...request.Option) (*waf.GetChangeTokenStatusOutput, error) { + req, out := c.GetChangeTokenStatusRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetSqlInjectionMatchSet = "GetSqlInjectionMatchSet" +const opGetGeoMatchSet = "GetGeoMatchSet" -// GetSqlInjectionMatchSetRequest generates a "aws/request.Request" representing the -// client's request for the GetSqlInjectionMatchSet operation. The "output" return +// GetGeoMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the GetGeoMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetSqlInjectionMatchSet for more information on using the GetSqlInjectionMatchSet +// See GetGeoMatchSet for more information on using the GetGeoMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetSqlInjectionMatchSetRequest method. -// req, resp := client.GetSqlInjectionMatchSetRequest(params) +// // Example sending a request using the GetGeoMatchSetRequest method. +// req, resp := client.GetGeoMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetSqlInjectionMatchSet -func (c *WAFRegional) GetSqlInjectionMatchSetRequest(input *waf.GetSqlInjectionMatchSetInput) (req *request.Request, output *waf.GetSqlInjectionMatchSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetGeoMatchSet +func (c *WAFRegional) GetGeoMatchSetRequest(input *waf.GetGeoMatchSetInput) (req *request.Request, output *waf.GetGeoMatchSetOutput) { op := &request.Operation{ - Name: opGetSqlInjectionMatchSet, + Name: opGetGeoMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.GetSqlInjectionMatchSetInput{} + input = &waf.GetGeoMatchSetInput{} } - output = &waf.GetSqlInjectionMatchSetOutput{} + output = &waf.GetGeoMatchSetOutput{} req = c.newRequest(op, input, output) return } -// GetSqlInjectionMatchSet API operation for AWS WAF Regional. +// GetGeoMatchSet API operation for AWS WAF Regional. // -// Returns the SqlInjectionMatchSet that is specified by SqlInjectionMatchSetId. +// Returns the GeoMatchSet that is specified by GeoMatchSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation GetSqlInjectionMatchSet for usage and error information. +// API operation GetGeoMatchSet for usage and error information. // // Returned Error Codes: // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" @@ -3384,80 +3562,80 @@ func (c *WAFRegional) GetSqlInjectionMatchSetRequest(input *waf.GetSqlInjectionM // * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" // The operation failed because the referenced object doesn't exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetSqlInjectionMatchSet -func (c *WAFRegional) GetSqlInjectionMatchSet(input *waf.GetSqlInjectionMatchSetInput) (*waf.GetSqlInjectionMatchSetOutput, error) { - req, out := c.GetSqlInjectionMatchSetRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetGeoMatchSet +func (c *WAFRegional) GetGeoMatchSet(input *waf.GetGeoMatchSetInput) (*waf.GetGeoMatchSetOutput, error) { + req, out := c.GetGeoMatchSetRequest(input) return out, req.Send() } -// GetSqlInjectionMatchSetWithContext is the same as GetSqlInjectionMatchSet with the addition of +// GetGeoMatchSetWithContext is the same as GetGeoMatchSet with the addition of // the ability to pass a context and additional request options. // -// See GetSqlInjectionMatchSet for details on how to use this API operation. +// See GetGeoMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) GetSqlInjectionMatchSetWithContext(ctx aws.Context, input *waf.GetSqlInjectionMatchSetInput, opts ...request.Option) (*waf.GetSqlInjectionMatchSetOutput, error) { - req, out := c.GetSqlInjectionMatchSetRequest(input) +func (c *WAFRegional) GetGeoMatchSetWithContext(ctx aws.Context, input *waf.GetGeoMatchSetInput, opts ...request.Option) (*waf.GetGeoMatchSetOutput, error) { + req, out := c.GetGeoMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetWebACL = "GetWebACL" +const opGetIPSet = "GetIPSet" -// GetWebACLRequest generates a "aws/request.Request" representing the -// client's request for the GetWebACL operation. The "output" return +// GetIPSetRequest generates a "aws/request.Request" representing the +// client's request for the GetIPSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetWebACL for more information on using the GetWebACL +// See GetIPSet for more information on using the GetIPSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetWebACLRequest method. -// req, resp := client.GetWebACLRequest(params) +// // Example sending a request using the GetIPSetRequest method. +// req, resp := client.GetIPSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetWebACL -func (c *WAFRegional) GetWebACLRequest(input *waf.GetWebACLInput) (req *request.Request, output *waf.GetWebACLOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetIPSet +func (c *WAFRegional) GetIPSetRequest(input *waf.GetIPSetInput) (req *request.Request, output *waf.GetIPSetOutput) { op := &request.Operation{ - Name: opGetWebACL, + Name: opGetIPSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.GetWebACLInput{} + input = &waf.GetIPSetInput{} } - output = &waf.GetWebACLOutput{} + output = &waf.GetIPSetOutput{} req = c.newRequest(op, input, output) return } -// GetWebACL API operation for AWS WAF Regional. +// GetIPSet API operation for AWS WAF Regional. // -// Returns the WebACL that is specified by WebACLId. +// Returns the IPSet that is specified by IPSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation GetWebACL for usage and error information. +// API operation GetIPSet for usage and error information. // // Returned Error Codes: // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" @@ -3471,80 +3649,81 @@ func (c *WAFRegional) GetWebACLRequest(input *waf.GetWebACLInput) (req *request. // * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" // The operation failed because the referenced object doesn't exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetWebACL -func (c *WAFRegional) GetWebACL(input *waf.GetWebACLInput) (*waf.GetWebACLOutput, error) { - req, out := c.GetWebACLRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetIPSet +func (c *WAFRegional) GetIPSet(input *waf.GetIPSetInput) (*waf.GetIPSetOutput, error) { + req, out := c.GetIPSetRequest(input) return out, req.Send() } -// GetWebACLWithContext is the same as GetWebACL with the addition of +// GetIPSetWithContext is the same as GetIPSet with the addition of // the ability to pass a context and additional request options. // -// See GetWebACL for details on how to use this API operation. +// See GetIPSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) GetWebACLWithContext(ctx aws.Context, input *waf.GetWebACLInput, opts ...request.Option) (*waf.GetWebACLOutput, error) { - req, out := c.GetWebACLRequest(input) +func (c *WAFRegional) GetIPSetWithContext(ctx aws.Context, input *waf.GetIPSetInput, opts ...request.Option) (*waf.GetIPSetOutput, error) { + req, out := c.GetIPSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetWebACLForResource = "GetWebACLForResource" +const opGetRateBasedRule = "GetRateBasedRule" -// GetWebACLForResourceRequest generates a "aws/request.Request" representing the -// client's request for the GetWebACLForResource operation. The "output" return +// GetRateBasedRuleRequest generates a "aws/request.Request" representing the +// client's request for the GetRateBasedRule operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetWebACLForResource for more information on using the GetWebACLForResource +// See GetRateBasedRule for more information on using the GetRateBasedRule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetWebACLForResourceRequest method. -// req, resp := client.GetWebACLForResourceRequest(params) +// // Example sending a request using the GetRateBasedRuleRequest method. +// req, resp := client.GetRateBasedRuleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetWebACLForResource -func (c *WAFRegional) GetWebACLForResourceRequest(input *GetWebACLForResourceInput) (req *request.Request, output *GetWebACLForResourceOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetRateBasedRule +func (c *WAFRegional) GetRateBasedRuleRequest(input *waf.GetRateBasedRuleInput) (req *request.Request, output *waf.GetRateBasedRuleOutput) { op := &request.Operation{ - Name: opGetWebACLForResource, + Name: opGetRateBasedRule, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetWebACLForResourceInput{} + input = &waf.GetRateBasedRuleInput{} } - output = &GetWebACLForResourceOutput{} + output = &waf.GetRateBasedRuleOutput{} req = c.newRequest(op, input, output) return } -// GetWebACLForResource API operation for AWS WAF Regional. +// GetRateBasedRule API operation for AWS WAF Regional. // -// Returns the web ACL for the specified resource. +// Returns the RateBasedRule that is specified by the RuleId that you included +// in the GetRateBasedRule request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation GetWebACLForResource for usage and error information. +// API operation GetRateBasedRule for usage and error information. // // Returned Error Codes: // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" @@ -3558,113 +3737,83 @@ func (c *WAFRegional) GetWebACLForResourceRequest(input *GetWebACLForResourceInp // * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" // The operation failed because the referenced object doesn't exist. // -// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" -// The operation failed because AWS WAF didn't recognize a parameter in the -// request. For example: -// -// * You specified an invalid parameter name. -// -// * You specified an invalid value. -// -// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) -// using an action other than INSERT or DELETE. -// -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to create a RateBasedRule with a RateKey value other than -// IP. -// -// * You tried to update a WebACL with a WafActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. -// -// * You tried to update a ByteMatchSet with a Field of HEADER but no value -// for Data. -// -// * Your request references an ARN that is malformed, or corresponds to -// a resource with which a web ACL cannot be associated. -// -// * ErrCodeWAFUnavailableEntityException "WAFUnavailableEntityException" -// The operation failed because the entity referenced is temporarily unavailable. -// Retry your request. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetWebACLForResource -func (c *WAFRegional) GetWebACLForResource(input *GetWebACLForResourceInput) (*GetWebACLForResourceOutput, error) { - req, out := c.GetWebACLForResourceRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetRateBasedRule +func (c *WAFRegional) GetRateBasedRule(input *waf.GetRateBasedRuleInput) (*waf.GetRateBasedRuleOutput, error) { + req, out := c.GetRateBasedRuleRequest(input) return out, req.Send() } -// GetWebACLForResourceWithContext is the same as GetWebACLForResource with the addition of +// GetRateBasedRuleWithContext is the same as GetRateBasedRule with the addition of // the ability to pass a context and additional request options. // -// See GetWebACLForResource for details on how to use this API operation. +// See GetRateBasedRule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) GetWebACLForResourceWithContext(ctx aws.Context, input *GetWebACLForResourceInput, opts ...request.Option) (*GetWebACLForResourceOutput, error) { - req, out := c.GetWebACLForResourceRequest(input) +func (c *WAFRegional) GetRateBasedRuleWithContext(ctx aws.Context, input *waf.GetRateBasedRuleInput, opts ...request.Option) (*waf.GetRateBasedRuleOutput, error) { + req, out := c.GetRateBasedRuleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetXssMatchSet = "GetXssMatchSet" +const opGetRateBasedRuleManagedKeys = "GetRateBasedRuleManagedKeys" -// GetXssMatchSetRequest generates a "aws/request.Request" representing the -// client's request for the GetXssMatchSet operation. The "output" return +// GetRateBasedRuleManagedKeysRequest generates a "aws/request.Request" representing the +// client's request for the GetRateBasedRuleManagedKeys operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetXssMatchSet for more information on using the GetXssMatchSet +// See GetRateBasedRuleManagedKeys for more information on using the GetRateBasedRuleManagedKeys // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetXssMatchSetRequest method. -// req, resp := client.GetXssMatchSetRequest(params) +// // Example sending a request using the GetRateBasedRuleManagedKeysRequest method. +// req, resp := client.GetRateBasedRuleManagedKeysRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetXssMatchSet -func (c *WAFRegional) GetXssMatchSetRequest(input *waf.GetXssMatchSetInput) (req *request.Request, output *waf.GetXssMatchSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetRateBasedRuleManagedKeys +func (c *WAFRegional) GetRateBasedRuleManagedKeysRequest(input *waf.GetRateBasedRuleManagedKeysInput) (req *request.Request, output *waf.GetRateBasedRuleManagedKeysOutput) { op := &request.Operation{ - Name: opGetXssMatchSet, + Name: opGetRateBasedRuleManagedKeys, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.GetXssMatchSetInput{} + input = &waf.GetRateBasedRuleManagedKeysInput{} } - output = &waf.GetXssMatchSetOutput{} + output = &waf.GetRateBasedRuleManagedKeysOutput{} req = c.newRequest(op, input, output) return } -// GetXssMatchSet API operation for AWS WAF Regional. +// GetRateBasedRuleManagedKeys API operation for AWS WAF Regional. // -// Returns the XssMatchSet that is specified by XssMatchSetId. +// Returns an array of IP addresses currently being blocked by the RateBasedRule +// that is specified by the RuleId. The maximum number of managed keys that +// will be blocked is 10,000. If more than 10,000 addresses exceed the rate +// limit, the 10,000 addresses with the highest rates will be blocked. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation GetXssMatchSet for usage and error information. +// API operation GetRateBasedRuleManagedKeys for usage and error information. // // Returned Error Codes: // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" @@ -3678,80 +3827,109 @@ func (c *WAFRegional) GetXssMatchSetRequest(input *waf.GetXssMatchSetInput) (req // * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" // The operation failed because the referenced object doesn't exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetXssMatchSet -func (c *WAFRegional) GetXssMatchSet(input *waf.GetXssMatchSetInput) (*waf.GetXssMatchSetOutput, error) { - req, out := c.GetXssMatchSetRequest(input) +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetRateBasedRuleManagedKeys +func (c *WAFRegional) GetRateBasedRuleManagedKeys(input *waf.GetRateBasedRuleManagedKeysInput) (*waf.GetRateBasedRuleManagedKeysOutput, error) { + req, out := c.GetRateBasedRuleManagedKeysRequest(input) return out, req.Send() } -// GetXssMatchSetWithContext is the same as GetXssMatchSet with the addition of +// GetRateBasedRuleManagedKeysWithContext is the same as GetRateBasedRuleManagedKeys with the addition of // the ability to pass a context and additional request options. // -// See GetXssMatchSet for details on how to use this API operation. +// See GetRateBasedRuleManagedKeys for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) GetXssMatchSetWithContext(ctx aws.Context, input *waf.GetXssMatchSetInput, opts ...request.Option) (*waf.GetXssMatchSetOutput, error) { - req, out := c.GetXssMatchSetRequest(input) +func (c *WAFRegional) GetRateBasedRuleManagedKeysWithContext(ctx aws.Context, input *waf.GetRateBasedRuleManagedKeysInput, opts ...request.Option) (*waf.GetRateBasedRuleManagedKeysOutput, error) { + req, out := c.GetRateBasedRuleManagedKeysRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListByteMatchSets = "ListByteMatchSets" +const opGetRegexMatchSet = "GetRegexMatchSet" -// ListByteMatchSetsRequest generates a "aws/request.Request" representing the -// client's request for the ListByteMatchSets operation. The "output" return +// GetRegexMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the GetRegexMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListByteMatchSets for more information on using the ListByteMatchSets +// See GetRegexMatchSet for more information on using the GetRegexMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListByteMatchSetsRequest method. -// req, resp := client.ListByteMatchSetsRequest(params) +// // Example sending a request using the GetRegexMatchSetRequest method. +// req, resp := client.GetRegexMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListByteMatchSets -func (c *WAFRegional) ListByteMatchSetsRequest(input *waf.ListByteMatchSetsInput) (req *request.Request, output *waf.ListByteMatchSetsOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetRegexMatchSet +func (c *WAFRegional) GetRegexMatchSetRequest(input *waf.GetRegexMatchSetInput) (req *request.Request, output *waf.GetRegexMatchSetOutput) { op := &request.Operation{ - Name: opListByteMatchSets, + Name: opGetRegexMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.ListByteMatchSetsInput{} + input = &waf.GetRegexMatchSetInput{} } - output = &waf.ListByteMatchSetsOutput{} + output = &waf.GetRegexMatchSetOutput{} req = c.newRequest(op, input, output) return } -// ListByteMatchSets API operation for AWS WAF Regional. +// GetRegexMatchSet API operation for AWS WAF Regional. // -// Returns an array of ByteMatchSetSummary objects. +// Returns the RegexMatchSet specified by RegexMatchSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation ListByteMatchSets for usage and error information. +// API operation GetRegexMatchSet for usage and error information. // // Returned Error Codes: // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" @@ -3762,80 +3940,83 @@ func (c *WAFRegional) ListByteMatchSetsRequest(input *waf.ListByteMatchSetsInput // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListByteMatchSets -func (c *WAFRegional) ListByteMatchSets(input *waf.ListByteMatchSetsInput) (*waf.ListByteMatchSetsOutput, error) { - req, out := c.ListByteMatchSetsRequest(input) +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetRegexMatchSet +func (c *WAFRegional) GetRegexMatchSet(input *waf.GetRegexMatchSetInput) (*waf.GetRegexMatchSetOutput, error) { + req, out := c.GetRegexMatchSetRequest(input) return out, req.Send() } -// ListByteMatchSetsWithContext is the same as ListByteMatchSets with the addition of +// GetRegexMatchSetWithContext is the same as GetRegexMatchSet with the addition of // the ability to pass a context and additional request options. // -// See ListByteMatchSets for details on how to use this API operation. +// See GetRegexMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) ListByteMatchSetsWithContext(ctx aws.Context, input *waf.ListByteMatchSetsInput, opts ...request.Option) (*waf.ListByteMatchSetsOutput, error) { - req, out := c.ListByteMatchSetsRequest(input) +func (c *WAFRegional) GetRegexMatchSetWithContext(ctx aws.Context, input *waf.GetRegexMatchSetInput, opts ...request.Option) (*waf.GetRegexMatchSetOutput, error) { + req, out := c.GetRegexMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListIPSets = "ListIPSets" +const opGetRegexPatternSet = "GetRegexPatternSet" -// ListIPSetsRequest generates a "aws/request.Request" representing the -// client's request for the ListIPSets operation. The "output" return +// GetRegexPatternSetRequest generates a "aws/request.Request" representing the +// client's request for the GetRegexPatternSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListIPSets for more information on using the ListIPSets +// See GetRegexPatternSet for more information on using the GetRegexPatternSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListIPSetsRequest method. -// req, resp := client.ListIPSetsRequest(params) +// // Example sending a request using the GetRegexPatternSetRequest method. +// req, resp := client.GetRegexPatternSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListIPSets -func (c *WAFRegional) ListIPSetsRequest(input *waf.ListIPSetsInput) (req *request.Request, output *waf.ListIPSetsOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetRegexPatternSet +func (c *WAFRegional) GetRegexPatternSetRequest(input *waf.GetRegexPatternSetInput) (req *request.Request, output *waf.GetRegexPatternSetOutput) { op := &request.Operation{ - Name: opListIPSets, + Name: opGetRegexPatternSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.ListIPSetsInput{} + input = &waf.GetRegexPatternSetInput{} } - output = &waf.ListIPSetsOutput{} + output = &waf.GetRegexPatternSetOutput{} req = c.newRequest(op, input, output) return } -// ListIPSets API operation for AWS WAF Regional. +// GetRegexPatternSet API operation for AWS WAF Regional. // -// Returns an array of IPSetSummary objects in the response. +// Returns the RegexPatternSet specified by RegexPatternSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation ListIPSets for usage and error information. +// API operation GetRegexPatternSet for usage and error information. // // Returned Error Codes: // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" @@ -3846,80 +4027,84 @@ func (c *WAFRegional) ListIPSetsRequest(input *waf.ListIPSetsInput) (req *reques // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListIPSets -func (c *WAFRegional) ListIPSets(input *waf.ListIPSetsInput) (*waf.ListIPSetsOutput, error) { - req, out := c.ListIPSetsRequest(input) +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetRegexPatternSet +func (c *WAFRegional) GetRegexPatternSet(input *waf.GetRegexPatternSetInput) (*waf.GetRegexPatternSetOutput, error) { + req, out := c.GetRegexPatternSetRequest(input) return out, req.Send() } -// ListIPSetsWithContext is the same as ListIPSets with the addition of +// GetRegexPatternSetWithContext is the same as GetRegexPatternSet with the addition of // the ability to pass a context and additional request options. // -// See ListIPSets for details on how to use this API operation. +// See GetRegexPatternSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) ListIPSetsWithContext(ctx aws.Context, input *waf.ListIPSetsInput, opts ...request.Option) (*waf.ListIPSetsOutput, error) { - req, out := c.ListIPSetsRequest(input) +func (c *WAFRegional) GetRegexPatternSetWithContext(ctx aws.Context, input *waf.GetRegexPatternSetInput, opts ...request.Option) (*waf.GetRegexPatternSetOutput, error) { + req, out := c.GetRegexPatternSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListRateBasedRules = "ListRateBasedRules" +const opGetRule = "GetRule" -// ListRateBasedRulesRequest generates a "aws/request.Request" representing the -// client's request for the ListRateBasedRules operation. The "output" return +// GetRuleRequest generates a "aws/request.Request" representing the +// client's request for the GetRule operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListRateBasedRules for more information on using the ListRateBasedRules +// See GetRule for more information on using the GetRule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListRateBasedRulesRequest method. -// req, resp := client.ListRateBasedRulesRequest(params) +// // Example sending a request using the GetRuleRequest method. +// req, resp := client.GetRuleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListRateBasedRules -func (c *WAFRegional) ListRateBasedRulesRequest(input *waf.ListRateBasedRulesInput) (req *request.Request, output *waf.ListRateBasedRulesOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetRule +func (c *WAFRegional) GetRuleRequest(input *waf.GetRuleInput) (req *request.Request, output *waf.GetRuleOutput) { op := &request.Operation{ - Name: opListRateBasedRules, + Name: opGetRule, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.ListRateBasedRulesInput{} + input = &waf.GetRuleInput{} } - output = &waf.ListRateBasedRulesOutput{} + output = &waf.GetRuleOutput{} req = c.newRequest(op, input, output) return } -// ListRateBasedRules API operation for AWS WAF Regional. +// GetRule API operation for AWS WAF Regional. // -// Returns an array of RuleSummary objects. +// Returns the Rule that is specified by the RuleId that you included in the +// GetRule request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation ListRateBasedRules for usage and error information. +// API operation GetRule for usage and error information. // // Returned Error Codes: // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" @@ -3930,167 +4115,176 @@ func (c *WAFRegional) ListRateBasedRulesRequest(input *waf.ListRateBasedRulesInp // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListRateBasedRules -func (c *WAFRegional) ListRateBasedRules(input *waf.ListRateBasedRulesInput) (*waf.ListRateBasedRulesOutput, error) { - req, out := c.ListRateBasedRulesRequest(input) +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetRule +func (c *WAFRegional) GetRule(input *waf.GetRuleInput) (*waf.GetRuleOutput, error) { + req, out := c.GetRuleRequest(input) return out, req.Send() } -// ListRateBasedRulesWithContext is the same as ListRateBasedRules with the addition of +// GetRuleWithContext is the same as GetRule with the addition of // the ability to pass a context and additional request options. // -// See ListRateBasedRules for details on how to use this API operation. +// See GetRule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) ListRateBasedRulesWithContext(ctx aws.Context, input *waf.ListRateBasedRulesInput, opts ...request.Option) (*waf.ListRateBasedRulesOutput, error) { - req, out := c.ListRateBasedRulesRequest(input) +func (c *WAFRegional) GetRuleWithContext(ctx aws.Context, input *waf.GetRuleInput, opts ...request.Option) (*waf.GetRuleOutput, error) { + req, out := c.GetRuleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListResourcesForWebACL = "ListResourcesForWebACL" +const opGetSampledRequests = "GetSampledRequests" -// ListResourcesForWebACLRequest generates a "aws/request.Request" representing the -// client's request for the ListResourcesForWebACL operation. The "output" return +// GetSampledRequestsRequest generates a "aws/request.Request" representing the +// client's request for the GetSampledRequests operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListResourcesForWebACL for more information on using the ListResourcesForWebACL +// See GetSampledRequests for more information on using the GetSampledRequests // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListResourcesForWebACLRequest method. -// req, resp := client.ListResourcesForWebACLRequest(params) +// // Example sending a request using the GetSampledRequestsRequest method. +// req, resp := client.GetSampledRequestsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListResourcesForWebACL -func (c *WAFRegional) ListResourcesForWebACLRequest(input *ListResourcesForWebACLInput) (req *request.Request, output *ListResourcesForWebACLOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetSampledRequests +func (c *WAFRegional) GetSampledRequestsRequest(input *waf.GetSampledRequestsInput) (req *request.Request, output *waf.GetSampledRequestsOutput) { op := &request.Operation{ - Name: opListResourcesForWebACL, + Name: opGetSampledRequests, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ListResourcesForWebACLInput{} + input = &waf.GetSampledRequestsInput{} } - output = &ListResourcesForWebACLOutput{} + output = &waf.GetSampledRequestsOutput{} req = c.newRequest(op, input, output) return } -// ListResourcesForWebACL API operation for AWS WAF Regional. +// GetSampledRequests API operation for AWS WAF Regional. // -// Returns an array of resources associated with the specified web ACL. +// Gets detailed information about a specified number of requests--a sample--that +// AWS WAF randomly selects from among the first 5,000 requests that your AWS +// resource received during a time range that you choose. You can specify a +// sample size of up to 500 requests, and you can specify any time range in +// the previous three hours. +// +// GetSampledRequests returns a time range, which is usually the time range +// that you specified. However, if your resource (such as a CloudFront distribution) +// received 5,000 requests before the specified time range elapsed, GetSampledRequests +// returns an updated time range. This new time range indicates the actual period +// during which AWS WAF selected the requests in the sample. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation ListResourcesForWebACL for usage and error information. +// API operation GetSampledRequests for usage and error information. // // Returned Error Codes: +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. // -// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" -// The operation failed because you tried to create, update, or delete an object -// by using an invalid account identifier. -// -// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" -// The operation failed because the referenced object doesn't exist. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListResourcesForWebACL -func (c *WAFRegional) ListResourcesForWebACL(input *ListResourcesForWebACLInput) (*ListResourcesForWebACLOutput, error) { - req, out := c.ListResourcesForWebACLRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetSampledRequests +func (c *WAFRegional) GetSampledRequests(input *waf.GetSampledRequestsInput) (*waf.GetSampledRequestsOutput, error) { + req, out := c.GetSampledRequestsRequest(input) return out, req.Send() } -// ListResourcesForWebACLWithContext is the same as ListResourcesForWebACL with the addition of +// GetSampledRequestsWithContext is the same as GetSampledRequests with the addition of // the ability to pass a context and additional request options. // -// See ListResourcesForWebACL for details on how to use this API operation. +// See GetSampledRequests for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) ListResourcesForWebACLWithContext(ctx aws.Context, input *ListResourcesForWebACLInput, opts ...request.Option) (*ListResourcesForWebACLOutput, error) { - req, out := c.ListResourcesForWebACLRequest(input) +func (c *WAFRegional) GetSampledRequestsWithContext(ctx aws.Context, input *waf.GetSampledRequestsInput, opts ...request.Option) (*waf.GetSampledRequestsOutput, error) { + req, out := c.GetSampledRequestsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListRules = "ListRules" +const opGetSizeConstraintSet = "GetSizeConstraintSet" -// ListRulesRequest generates a "aws/request.Request" representing the -// client's request for the ListRules operation. The "output" return +// GetSizeConstraintSetRequest generates a "aws/request.Request" representing the +// client's request for the GetSizeConstraintSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListRules for more information on using the ListRules +// See GetSizeConstraintSet for more information on using the GetSizeConstraintSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListRulesRequest method. -// req, resp := client.ListRulesRequest(params) +// // Example sending a request using the GetSizeConstraintSetRequest method. +// req, resp := client.GetSizeConstraintSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListRules -func (c *WAFRegional) ListRulesRequest(input *waf.ListRulesInput) (req *request.Request, output *waf.ListRulesOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetSizeConstraintSet +func (c *WAFRegional) GetSizeConstraintSetRequest(input *waf.GetSizeConstraintSetInput) (req *request.Request, output *waf.GetSizeConstraintSetOutput) { op := &request.Operation{ - Name: opListRules, + Name: opGetSizeConstraintSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.ListRulesInput{} + input = &waf.GetSizeConstraintSetInput{} } - output = &waf.ListRulesOutput{} + output = &waf.GetSizeConstraintSetOutput{} req = c.newRequest(op, input, output) return } -// ListRules API operation for AWS WAF Regional. +// GetSizeConstraintSet API operation for AWS WAF Regional. // -// Returns an array of RuleSummary objects. +// Returns the SizeConstraintSet specified by SizeConstraintSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation ListRules for usage and error information. +// API operation GetSizeConstraintSet for usage and error information. // // Returned Error Codes: // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" @@ -4101,80 +4295,83 @@ func (c *WAFRegional) ListRulesRequest(input *waf.ListRulesInput) (req *request. // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListRules -func (c *WAFRegional) ListRules(input *waf.ListRulesInput) (*waf.ListRulesOutput, error) { - req, out := c.ListRulesRequest(input) +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetSizeConstraintSet +func (c *WAFRegional) GetSizeConstraintSet(input *waf.GetSizeConstraintSetInput) (*waf.GetSizeConstraintSetOutput, error) { + req, out := c.GetSizeConstraintSetRequest(input) return out, req.Send() } -// ListRulesWithContext is the same as ListRules with the addition of +// GetSizeConstraintSetWithContext is the same as GetSizeConstraintSet with the addition of // the ability to pass a context and additional request options. // -// See ListRules for details on how to use this API operation. +// See GetSizeConstraintSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) ListRulesWithContext(ctx aws.Context, input *waf.ListRulesInput, opts ...request.Option) (*waf.ListRulesOutput, error) { - req, out := c.ListRulesRequest(input) +func (c *WAFRegional) GetSizeConstraintSetWithContext(ctx aws.Context, input *waf.GetSizeConstraintSetInput, opts ...request.Option) (*waf.GetSizeConstraintSetOutput, error) { + req, out := c.GetSizeConstraintSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListSizeConstraintSets = "ListSizeConstraintSets" +const opGetSqlInjectionMatchSet = "GetSqlInjectionMatchSet" -// ListSizeConstraintSetsRequest generates a "aws/request.Request" representing the -// client's request for the ListSizeConstraintSets operation. The "output" return +// GetSqlInjectionMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the GetSqlInjectionMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListSizeConstraintSets for more information on using the ListSizeConstraintSets +// See GetSqlInjectionMatchSet for more information on using the GetSqlInjectionMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListSizeConstraintSetsRequest method. -// req, resp := client.ListSizeConstraintSetsRequest(params) +// // Example sending a request using the GetSqlInjectionMatchSetRequest method. +// req, resp := client.GetSqlInjectionMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListSizeConstraintSets -func (c *WAFRegional) ListSizeConstraintSetsRequest(input *waf.ListSizeConstraintSetsInput) (req *request.Request, output *waf.ListSizeConstraintSetsOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetSqlInjectionMatchSet +func (c *WAFRegional) GetSqlInjectionMatchSetRequest(input *waf.GetSqlInjectionMatchSetInput) (req *request.Request, output *waf.GetSqlInjectionMatchSetOutput) { op := &request.Operation{ - Name: opListSizeConstraintSets, + Name: opGetSqlInjectionMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.ListSizeConstraintSetsInput{} + input = &waf.GetSqlInjectionMatchSetInput{} } - output = &waf.ListSizeConstraintSetsOutput{} + output = &waf.GetSqlInjectionMatchSetOutput{} req = c.newRequest(op, input, output) return } -// ListSizeConstraintSets API operation for AWS WAF Regional. +// GetSqlInjectionMatchSet API operation for AWS WAF Regional. // -// Returns an array of SizeConstraintSetSummary objects. +// Returns the SqlInjectionMatchSet that is specified by SqlInjectionMatchSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation ListSizeConstraintSets for usage and error information. +// API operation GetSqlInjectionMatchSet for usage and error information. // // Returned Error Codes: // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" @@ -4185,80 +4382,83 @@ func (c *WAFRegional) ListSizeConstraintSetsRequest(input *waf.ListSizeConstrain // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListSizeConstraintSets -func (c *WAFRegional) ListSizeConstraintSets(input *waf.ListSizeConstraintSetsInput) (*waf.ListSizeConstraintSetsOutput, error) { - req, out := c.ListSizeConstraintSetsRequest(input) +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetSqlInjectionMatchSet +func (c *WAFRegional) GetSqlInjectionMatchSet(input *waf.GetSqlInjectionMatchSetInput) (*waf.GetSqlInjectionMatchSetOutput, error) { + req, out := c.GetSqlInjectionMatchSetRequest(input) return out, req.Send() } -// ListSizeConstraintSetsWithContext is the same as ListSizeConstraintSets with the addition of +// GetSqlInjectionMatchSetWithContext is the same as GetSqlInjectionMatchSet with the addition of // the ability to pass a context and additional request options. // -// See ListSizeConstraintSets for details on how to use this API operation. +// See GetSqlInjectionMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) ListSizeConstraintSetsWithContext(ctx aws.Context, input *waf.ListSizeConstraintSetsInput, opts ...request.Option) (*waf.ListSizeConstraintSetsOutput, error) { - req, out := c.ListSizeConstraintSetsRequest(input) +func (c *WAFRegional) GetSqlInjectionMatchSetWithContext(ctx aws.Context, input *waf.GetSqlInjectionMatchSetInput, opts ...request.Option) (*waf.GetSqlInjectionMatchSetOutput, error) { + req, out := c.GetSqlInjectionMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListSqlInjectionMatchSets = "ListSqlInjectionMatchSets" +const opGetWebACL = "GetWebACL" -// ListSqlInjectionMatchSetsRequest generates a "aws/request.Request" representing the -// client's request for the ListSqlInjectionMatchSets operation. The "output" return +// GetWebACLRequest generates a "aws/request.Request" representing the +// client's request for the GetWebACL operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListSqlInjectionMatchSets for more information on using the ListSqlInjectionMatchSets +// See GetWebACL for more information on using the GetWebACL // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListSqlInjectionMatchSetsRequest method. -// req, resp := client.ListSqlInjectionMatchSetsRequest(params) +// // Example sending a request using the GetWebACLRequest method. +// req, resp := client.GetWebACLRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListSqlInjectionMatchSets -func (c *WAFRegional) ListSqlInjectionMatchSetsRequest(input *waf.ListSqlInjectionMatchSetsInput) (req *request.Request, output *waf.ListSqlInjectionMatchSetsOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetWebACL +func (c *WAFRegional) GetWebACLRequest(input *waf.GetWebACLInput) (req *request.Request, output *waf.GetWebACLOutput) { op := &request.Operation{ - Name: opListSqlInjectionMatchSets, + Name: opGetWebACL, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.ListSqlInjectionMatchSetsInput{} + input = &waf.GetWebACLInput{} } - output = &waf.ListSqlInjectionMatchSetsOutput{} + output = &waf.GetWebACLOutput{} req = c.newRequest(op, input, output) return } -// ListSqlInjectionMatchSets API operation for AWS WAF Regional. +// GetWebACL API operation for AWS WAF Regional. // -// Returns an array of SqlInjectionMatchSet objects. +// Returns the WebACL that is specified by WebACLId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation ListSqlInjectionMatchSets for usage and error information. +// API operation GetWebACL for usage and error information. // // Returned Error Codes: // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" @@ -4269,80 +4469,83 @@ func (c *WAFRegional) ListSqlInjectionMatchSetsRequest(input *waf.ListSqlInjecti // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListSqlInjectionMatchSets -func (c *WAFRegional) ListSqlInjectionMatchSets(input *waf.ListSqlInjectionMatchSetsInput) (*waf.ListSqlInjectionMatchSetsOutput, error) { - req, out := c.ListSqlInjectionMatchSetsRequest(input) +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetWebACL +func (c *WAFRegional) GetWebACL(input *waf.GetWebACLInput) (*waf.GetWebACLOutput, error) { + req, out := c.GetWebACLRequest(input) return out, req.Send() } -// ListSqlInjectionMatchSetsWithContext is the same as ListSqlInjectionMatchSets with the addition of +// GetWebACLWithContext is the same as GetWebACL with the addition of // the ability to pass a context and additional request options. // -// See ListSqlInjectionMatchSets for details on how to use this API operation. +// See GetWebACL for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) ListSqlInjectionMatchSetsWithContext(ctx aws.Context, input *waf.ListSqlInjectionMatchSetsInput, opts ...request.Option) (*waf.ListSqlInjectionMatchSetsOutput, error) { - req, out := c.ListSqlInjectionMatchSetsRequest(input) +func (c *WAFRegional) GetWebACLWithContext(ctx aws.Context, input *waf.GetWebACLInput, opts ...request.Option) (*waf.GetWebACLOutput, error) { + req, out := c.GetWebACLRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListWebACLs = "ListWebACLs" +const opGetWebACLForResource = "GetWebACLForResource" -// ListWebACLsRequest generates a "aws/request.Request" representing the -// client's request for the ListWebACLs operation. The "output" return +// GetWebACLForResourceRequest generates a "aws/request.Request" representing the +// client's request for the GetWebACLForResource operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListWebACLs for more information on using the ListWebACLs +// See GetWebACLForResource for more information on using the GetWebACLForResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListWebACLsRequest method. -// req, resp := client.ListWebACLsRequest(params) +// // Example sending a request using the GetWebACLForResourceRequest method. +// req, resp := client.GetWebACLForResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListWebACLs -func (c *WAFRegional) ListWebACLsRequest(input *waf.ListWebACLsInput) (req *request.Request, output *waf.ListWebACLsOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetWebACLForResource +func (c *WAFRegional) GetWebACLForResourceRequest(input *GetWebACLForResourceInput) (req *request.Request, output *GetWebACLForResourceOutput) { op := &request.Operation{ - Name: opListWebACLs, + Name: opGetWebACLForResource, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.ListWebACLsInput{} + input = &GetWebACLForResourceInput{} } - output = &waf.ListWebACLsOutput{} + output = &GetWebACLForResourceOutput{} req = c.newRequest(op, input, output) return } -// ListWebACLs API operation for AWS WAF Regional. +// GetWebACLForResource API operation for AWS WAF Regional. // -// Returns an array of WebACLSummary objects in the response. +// Returns the web ACL for the specified resource. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation ListWebACLs for usage and error information. +// API operation GetWebACLForResource for usage and error information. // // Returned Error Codes: // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" @@ -4353,80 +4556,116 @@ func (c *WAFRegional) ListWebACLsRequest(input *waf.ListWebACLsInput) (req *requ // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListWebACLs -func (c *WAFRegional) ListWebACLs(input *waf.ListWebACLsInput) (*waf.ListWebACLsOutput, error) { - req, out := c.ListWebACLsRequest(input) - return out, req.Send() -} - -// ListWebACLsWithContext is the same as ListWebACLs with the addition of -// the ability to pass a context and additional request options. +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. // -// See ListWebACLs for details on how to use this API operation. +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: // -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *WAFRegional) ListWebACLsWithContext(ctx aws.Context, input *waf.ListWebACLsInput, opts ...request.Option) (*waf.ListWebACLsOutput, error) { - req, out := c.ListWebACLsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListXssMatchSets = "ListXssMatchSets" - -// ListXssMatchSetsRequest generates a "aws/request.Request" representing the -// client's request for the ListXssMatchSets operation. The "output" return +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFUnavailableEntityException "WAFUnavailableEntityException" +// The operation failed because the entity referenced is temporarily unavailable. +// Retry your request. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetWebACLForResource +func (c *WAFRegional) GetWebACLForResource(input *GetWebACLForResourceInput) (*GetWebACLForResourceOutput, error) { + req, out := c.GetWebACLForResourceRequest(input) + return out, req.Send() +} + +// GetWebACLForResourceWithContext is the same as GetWebACLForResource with the addition of +// the ability to pass a context and additional request options. +// +// See GetWebACLForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) GetWebACLForResourceWithContext(ctx aws.Context, input *GetWebACLForResourceInput, opts ...request.Option) (*GetWebACLForResourceOutput, error) { + req, out := c.GetWebACLForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetXssMatchSet = "GetXssMatchSet" + +// GetXssMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the GetXssMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListXssMatchSets for more information on using the ListXssMatchSets +// See GetXssMatchSet for more information on using the GetXssMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListXssMatchSetsRequest method. -// req, resp := client.ListXssMatchSetsRequest(params) +// // Example sending a request using the GetXssMatchSetRequest method. +// req, resp := client.GetXssMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListXssMatchSets -func (c *WAFRegional) ListXssMatchSetsRequest(input *waf.ListXssMatchSetsInput) (req *request.Request, output *waf.ListXssMatchSetsOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetXssMatchSet +func (c *WAFRegional) GetXssMatchSetRequest(input *waf.GetXssMatchSetInput) (req *request.Request, output *waf.GetXssMatchSetOutput) { op := &request.Operation{ - Name: opListXssMatchSets, + Name: opGetXssMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.ListXssMatchSetsInput{} + input = &waf.GetXssMatchSetInput{} } - output = &waf.ListXssMatchSetsOutput{} + output = &waf.GetXssMatchSetOutput{} req = c.newRequest(op, input, output) return } -// ListXssMatchSets API operation for AWS WAF Regional. +// GetXssMatchSet API operation for AWS WAF Regional. // -// Returns an array of XssMatchSet objects. +// Returns the XssMatchSet that is specified by XssMatchSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation ListXssMatchSets for usage and error information. +// API operation GetXssMatchSet for usage and error information. // // Returned Error Codes: // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" @@ -4437,116 +4676,83 @@ func (c *WAFRegional) ListXssMatchSetsRequest(input *waf.ListXssMatchSetsInput) // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListXssMatchSets -func (c *WAFRegional) ListXssMatchSets(input *waf.ListXssMatchSetsInput) (*waf.ListXssMatchSetsOutput, error) { - req, out := c.ListXssMatchSetsRequest(input) +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetXssMatchSet +func (c *WAFRegional) GetXssMatchSet(input *waf.GetXssMatchSetInput) (*waf.GetXssMatchSetOutput, error) { + req, out := c.GetXssMatchSetRequest(input) return out, req.Send() } -// ListXssMatchSetsWithContext is the same as ListXssMatchSets with the addition of +// GetXssMatchSetWithContext is the same as GetXssMatchSet with the addition of // the ability to pass a context and additional request options. // -// See ListXssMatchSets for details on how to use this API operation. +// See GetXssMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) ListXssMatchSetsWithContext(ctx aws.Context, input *waf.ListXssMatchSetsInput, opts ...request.Option) (*waf.ListXssMatchSetsOutput, error) { - req, out := c.ListXssMatchSetsRequest(input) +func (c *WAFRegional) GetXssMatchSetWithContext(ctx aws.Context, input *waf.GetXssMatchSetInput, opts ...request.Option) (*waf.GetXssMatchSetOutput, error) { + req, out := c.GetXssMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateByteMatchSet = "UpdateByteMatchSet" +const opListByteMatchSets = "ListByteMatchSets" -// UpdateByteMatchSetRequest generates a "aws/request.Request" representing the -// client's request for the UpdateByteMatchSet operation. The "output" return +// ListByteMatchSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListByteMatchSets operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateByteMatchSet for more information on using the UpdateByteMatchSet +// See ListByteMatchSets for more information on using the ListByteMatchSets // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateByteMatchSetRequest method. -// req, resp := client.UpdateByteMatchSetRequest(params) +// // Example sending a request using the ListByteMatchSetsRequest method. +// req, resp := client.ListByteMatchSetsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateByteMatchSet -func (c *WAFRegional) UpdateByteMatchSetRequest(input *waf.UpdateByteMatchSetInput) (req *request.Request, output *waf.UpdateByteMatchSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListByteMatchSets +func (c *WAFRegional) ListByteMatchSetsRequest(input *waf.ListByteMatchSetsInput) (req *request.Request, output *waf.ListByteMatchSetsOutput) { op := &request.Operation{ - Name: opUpdateByteMatchSet, + Name: opListByteMatchSets, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.UpdateByteMatchSetInput{} + input = &waf.ListByteMatchSetsInput{} } - output = &waf.UpdateByteMatchSetOutput{} + output = &waf.ListByteMatchSetsOutput{} req = c.newRequest(op, input, output) return } -// UpdateByteMatchSet API operation for AWS WAF Regional. -// -// Inserts or deletes ByteMatchTuple objects (filters) in a ByteMatchSet. For -// each ByteMatchTuple object, you specify the following values: -// -// * Whether to insert or delete the object from the array. If you want to -// change a ByteMatchSetUpdate object, you delete the existing object and -// add a new one. -// -// * The part of a web request that you want AWS WAF to inspect, such as -// a query string or the value of the User-Agent header. -// -// * The bytes (typically a string that corresponds with ASCII characters) -// that you want AWS WAF to look for. For more information, including how -// you specify the values for the AWS WAF API and the AWS CLI or SDKs, see -// TargetString in the ByteMatchTuple data type. -// -// * Where to look, such as at the beginning or the end of a query string. -// -// * Whether to perform any conversions on the request, such as converting -// it to lowercase, before inspecting it for the specified string. -// -// For example, you can add a ByteMatchSetUpdate object that matches web requests -// in which User-Agent headers contain the string BadBot. You can then configure -// AWS WAF to block those requests. -// -// To create and configure a ByteMatchSet, perform the following steps: -// -// Create a ByteMatchSet. For more information, see CreateByteMatchSet. -// -// Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateByteMatchSet request. -// -// Submit an UpdateByteMatchSet request to specify the part of the request that -// you want AWS WAF to inspect (for example, the header or the URI) and the -// value that you want AWS WAF to watch for. +// ListByteMatchSets API operation for AWS WAF Regional. // -// For more information about how to use the AWS WAF API to allow or block HTTP -// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// Returns an array of ByteMatchSetSummary objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation UpdateByteMatchSet for usage and error information. +// API operation ListByteMatchSets for usage and error information. // // Returned Error Codes: // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" @@ -4557,213 +4763,82 @@ func (c *WAFRegional) UpdateByteMatchSetRequest(input *waf.UpdateByteMatchSetInp // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// * ErrCodeWAFInvalidOperationException "WAFInvalidOperationException" -// The operation failed because there was nothing to do. For example: -// -// * You tried to remove a Rule from a WebACL, but the Rule isn't in the -// specified WebACL. -// -// * You tried to remove an IP address from an IPSet, but the IP address -// isn't in the specified IPSet. -// -// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple -// isn't in the specified WebACL. -// -// * You tried to add a Rule to a WebACL, but the Rule already exists in -// the specified WebACL. -// -// * You tried to add an IP address to an IPSet, but the IP address already -// exists in the specified IPSet. -// -// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple -// already exists in the specified WebACL. -// -// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" -// The operation failed because AWS WAF didn't recognize a parameter in the -// request. For example: -// -// * You specified an invalid parameter name. -// -// * You specified an invalid value. -// -// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) -// using an action other than INSERT or DELETE. -// -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to create a RateBasedRule with a RateKey value other than -// IP. -// -// * You tried to update a WebACL with a WafActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. -// -// * You tried to update a ByteMatchSet with a Field of HEADER but no value -// for Data. -// -// * Your request references an ARN that is malformed, or corresponds to -// a resource with which a web ACL cannot be associated. -// -// * ErrCodeWAFNonexistentContainerException "WAFNonexistentContainerException" -// The operation failed because you tried to add an object to or delete an object -// from another object that doesn't exist. For example: -// -// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't -// exist. -// -// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule -// that doesn't exist. -// -// * You tried to add an IP address to or delete an IP address from an IPSet -// that doesn't exist. -// -// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from -// a ByteMatchSet that doesn't exist. -// -// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" -// The operation failed because the referenced object doesn't exist. -// -// * ErrCodeWAFStaleDataException "WAFStaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. -// -// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" -// The operation exceeds a resource limit, for example, the maximum number of -// WebACL objects that you can create for an AWS account. For more information, -// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) -// in the AWS WAF Developer Guide. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateByteMatchSet -func (c *WAFRegional) UpdateByteMatchSet(input *waf.UpdateByteMatchSetInput) (*waf.UpdateByteMatchSetOutput, error) { - req, out := c.UpdateByteMatchSetRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListByteMatchSets +func (c *WAFRegional) ListByteMatchSets(input *waf.ListByteMatchSetsInput) (*waf.ListByteMatchSetsOutput, error) { + req, out := c.ListByteMatchSetsRequest(input) return out, req.Send() } -// UpdateByteMatchSetWithContext is the same as UpdateByteMatchSet with the addition of +// ListByteMatchSetsWithContext is the same as ListByteMatchSets with the addition of // the ability to pass a context and additional request options. // -// See UpdateByteMatchSet for details on how to use this API operation. +// See ListByteMatchSets for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) UpdateByteMatchSetWithContext(ctx aws.Context, input *waf.UpdateByteMatchSetInput, opts ...request.Option) (*waf.UpdateByteMatchSetOutput, error) { - req, out := c.UpdateByteMatchSetRequest(input) +func (c *WAFRegional) ListByteMatchSetsWithContext(ctx aws.Context, input *waf.ListByteMatchSetsInput, opts ...request.Option) (*waf.ListByteMatchSetsOutput, error) { + req, out := c.ListByteMatchSetsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateIPSet = "UpdateIPSet" +const opListGeoMatchSets = "ListGeoMatchSets" -// UpdateIPSetRequest generates a "aws/request.Request" representing the -// client's request for the UpdateIPSet operation. The "output" return +// ListGeoMatchSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListGeoMatchSets operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateIPSet for more information on using the UpdateIPSet +// See ListGeoMatchSets for more information on using the ListGeoMatchSets // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateIPSetRequest method. -// req, resp := client.UpdateIPSetRequest(params) +// // Example sending a request using the ListGeoMatchSetsRequest method. +// req, resp := client.ListGeoMatchSetsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateIPSet -func (c *WAFRegional) UpdateIPSetRequest(input *waf.UpdateIPSetInput) (req *request.Request, output *waf.UpdateIPSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListGeoMatchSets +func (c *WAFRegional) ListGeoMatchSetsRequest(input *waf.ListGeoMatchSetsInput) (req *request.Request, output *waf.ListGeoMatchSetsOutput) { op := &request.Operation{ - Name: opUpdateIPSet, + Name: opListGeoMatchSets, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.UpdateIPSetInput{} + input = &waf.ListGeoMatchSetsInput{} } - output = &waf.UpdateIPSetOutput{} + output = &waf.ListGeoMatchSetsOutput{} req = c.newRequest(op, input, output) return } -// UpdateIPSet API operation for AWS WAF Regional. -// -// Inserts or deletes IPSetDescriptor objects in an IPSet. For each IPSetDescriptor -// object, you specify the following values: -// -// * Whether to insert or delete the object from the array. If you want to -// change an IPSetDescriptor object, you delete the existing object and add -// a new one. -// -// * The IP address version, IPv4 or IPv6. -// -// * The IP address in CIDR notation, for example, 192.0.2.0/24 (for the -// range of IP addresses from 192.0.2.0 to 192.0.2.255) or 192.0.2.44/32 -// (for the individual IP address 192.0.2.44). -// -// AWS WAF supports /8, /16, /24, and /32 IP address ranges for IPv4, and /24, -// /32, /48, /56, /64 and /128 for IPv6. For more information about CIDR notation, -// see the Wikipedia entry Classless Inter-Domain Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). -// -// IPv6 addresses can be represented using any of the following formats: -// -// * 1111:0000:0000:0000:0000:0000:0000:0111/128 -// -// * 1111:0:0:0:0:0:0:0111/128 -// -// * 1111::0111/128 -// -// * 1111::111/128 +// ListGeoMatchSets API operation for AWS WAF Regional. // -// You use an IPSet to specify which web requests you want to allow or block -// based on the IP addresses that the requests originated from. For example, -// if you're receiving a lot of requests from one or a small number of IP addresses -// and you want to block the requests, you can create an IPSet that specifies -// those IP addresses, and then configure AWS WAF to block the requests. -// -// To create and configure an IPSet, perform the following steps: -// -// Submit a CreateIPSet request. -// -// Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateIPSet request. -// -// Submit an UpdateIPSet request to specify the IP addresses that you want AWS -// WAF to watch for. -// -// When you update an IPSet, you specify the IP addresses that you want to add -// and/or the IP addresses that you want to delete. If you want to change an -// IP address, you delete the existing IP address and add the new one. -// -// For more information about how to use the AWS WAF API to allow or block HTTP -// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// Returns an array of GeoMatchSetSummary objects in the response. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation UpdateIPSet for usage and error information. +// API operation ListGeoMatchSets for usage and error information. // // Returned Error Codes: -// * ErrCodeWAFStaleDataException "WAFStaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. -// // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. @@ -4772,206 +4847,82 @@ func (c *WAFRegional) UpdateIPSetRequest(input *waf.UpdateIPSetInput) (req *requ // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// * ErrCodeWAFInvalidOperationException "WAFInvalidOperationException" -// The operation failed because there was nothing to do. For example: -// -// * You tried to remove a Rule from a WebACL, but the Rule isn't in the -// specified WebACL. -// -// * You tried to remove an IP address from an IPSet, but the IP address -// isn't in the specified IPSet. -// -// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple -// isn't in the specified WebACL. -// -// * You tried to add a Rule to a WebACL, but the Rule already exists in -// the specified WebACL. -// -// * You tried to add an IP address to an IPSet, but the IP address already -// exists in the specified IPSet. -// -// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple -// already exists in the specified WebACL. -// -// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" -// The operation failed because AWS WAF didn't recognize a parameter in the -// request. For example: -// -// * You specified an invalid parameter name. -// -// * You specified an invalid value. -// -// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) -// using an action other than INSERT or DELETE. -// -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to create a RateBasedRule with a RateKey value other than -// IP. -// -// * You tried to update a WebACL with a WafActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. -// -// * You tried to update a ByteMatchSet with a Field of HEADER but no value -// for Data. -// -// * Your request references an ARN that is malformed, or corresponds to -// a resource with which a web ACL cannot be associated. -// -// * ErrCodeWAFNonexistentContainerException "WAFNonexistentContainerException" -// The operation failed because you tried to add an object to or delete an object -// from another object that doesn't exist. For example: -// -// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't -// exist. -// -// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule -// that doesn't exist. -// -// * You tried to add an IP address to or delete an IP address from an IPSet -// that doesn't exist. -// -// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from -// a ByteMatchSet that doesn't exist. -// -// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" -// The operation failed because the referenced object doesn't exist. -// -// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" -// The operation failed because you tried to delete an object that is still -// in use. For example: -// -// * You tried to delete a ByteMatchSet that is still referenced by a Rule. -// -// * You tried to delete a Rule that is still referenced by a WebACL. -// -// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" -// The operation exceeds a resource limit, for example, the maximum number of -// WebACL objects that you can create for an AWS account. For more information, -// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) -// in the AWS WAF Developer Guide. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateIPSet -func (c *WAFRegional) UpdateIPSet(input *waf.UpdateIPSetInput) (*waf.UpdateIPSetOutput, error) { - req, out := c.UpdateIPSetRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListGeoMatchSets +func (c *WAFRegional) ListGeoMatchSets(input *waf.ListGeoMatchSetsInput) (*waf.ListGeoMatchSetsOutput, error) { + req, out := c.ListGeoMatchSetsRequest(input) return out, req.Send() } -// UpdateIPSetWithContext is the same as UpdateIPSet with the addition of +// ListGeoMatchSetsWithContext is the same as ListGeoMatchSets with the addition of // the ability to pass a context and additional request options. // -// See UpdateIPSet for details on how to use this API operation. +// See ListGeoMatchSets for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) UpdateIPSetWithContext(ctx aws.Context, input *waf.UpdateIPSetInput, opts ...request.Option) (*waf.UpdateIPSetOutput, error) { - req, out := c.UpdateIPSetRequest(input) +func (c *WAFRegional) ListGeoMatchSetsWithContext(ctx aws.Context, input *waf.ListGeoMatchSetsInput, opts ...request.Option) (*waf.ListGeoMatchSetsOutput, error) { + req, out := c.ListGeoMatchSetsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateRateBasedRule = "UpdateRateBasedRule" +const opListIPSets = "ListIPSets" -// UpdateRateBasedRuleRequest generates a "aws/request.Request" representing the -// client's request for the UpdateRateBasedRule operation. The "output" return +// ListIPSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListIPSets operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateRateBasedRule for more information on using the UpdateRateBasedRule +// See ListIPSets for more information on using the ListIPSets // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateRateBasedRuleRequest method. -// req, resp := client.UpdateRateBasedRuleRequest(params) +// // Example sending a request using the ListIPSetsRequest method. +// req, resp := client.ListIPSetsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateRateBasedRule -func (c *WAFRegional) UpdateRateBasedRuleRequest(input *waf.UpdateRateBasedRuleInput) (req *request.Request, output *waf.UpdateRateBasedRuleOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListIPSets +func (c *WAFRegional) ListIPSetsRequest(input *waf.ListIPSetsInput) (req *request.Request, output *waf.ListIPSetsOutput) { op := &request.Operation{ - Name: opUpdateRateBasedRule, + Name: opListIPSets, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.UpdateRateBasedRuleInput{} + input = &waf.ListIPSetsInput{} } - output = &waf.UpdateRateBasedRuleOutput{} + output = &waf.ListIPSetsOutput{} req = c.newRequest(op, input, output) return } -// UpdateRateBasedRule API operation for AWS WAF Regional. -// -// Inserts or deletes Predicate objects in a rule and updates the RateLimit -// in the rule. -// -// Each Predicate object identifies a predicate, such as a ByteMatchSet or an -// IPSet, that specifies the web requests that you want to block or count. The -// RateLimit specifies the number of requests every five minutes that triggers -// the rule. -// -// If you add more than one predicate to a RateBasedRule, a request must match -// all the predicates and exceed the RateLimit to be counted or blocked. For -// example, suppose you add the following to a RateBasedRule: -// -// * An IPSet that matches the IP address 192.0.2.44/32 -// -// * A ByteMatchSet that matches BadBot in the User-Agent header -// -// Further, you specify a RateLimit of 15,000. -// -// You then add the RateBasedRule to a WebACL and specify that you want to block -// requests that satisfy the rule. For a request to be blocked, it must come -// from the IP address 192.0.2.44 and the User-Agent header in the request must -// contain the value BadBot. Further, requests that match these two conditions -// much be received at a rate of more than 15,000 every five minutes. If the -// rate drops below this limit, AWS WAF no longer blocks the requests. -// -// As a second example, suppose you want to limit requests to a particular page -// on your site. To do this, you could add the following to a RateBasedRule: -// -// * A ByteMatchSet with FieldToMatch of URI -// -// * A PositionalConstraint of STARTS_WITH -// -// * A TargetString of login -// -// Further, you specify a RateLimit of 15,000. +// ListIPSets API operation for AWS WAF Regional. // -// By adding this RateBasedRule to a WebACL, you could limit requests to your -// login page without affecting the rest of your site. +// Returns an array of IPSetSummary objects in the response. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation UpdateRateBasedRule for usage and error information. +// API operation ListIPSets for usage and error information. // // Returned Error Codes: -// * ErrCodeWAFStaleDataException "WAFStaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. -// // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. @@ -4980,201 +4931,166 @@ func (c *WAFRegional) UpdateRateBasedRuleRequest(input *waf.UpdateRateBasedRuleI // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// * ErrCodeWAFInvalidOperationException "WAFInvalidOperationException" -// The operation failed because there was nothing to do. For example: -// -// * You tried to remove a Rule from a WebACL, but the Rule isn't in the -// specified WebACL. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListIPSets +func (c *WAFRegional) ListIPSets(input *waf.ListIPSetsInput) (*waf.ListIPSetsOutput, error) { + req, out := c.ListIPSetsRequest(input) + return out, req.Send() +} + +// ListIPSetsWithContext is the same as ListIPSets with the addition of +// the ability to pass a context and additional request options. // -// * You tried to remove an IP address from an IPSet, but the IP address -// isn't in the specified IPSet. +// See ListIPSets for details on how to use this API operation. // -// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple -// isn't in the specified WebACL. +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) ListIPSetsWithContext(ctx aws.Context, input *waf.ListIPSetsInput, opts ...request.Option) (*waf.ListIPSetsOutput, error) { + req, out := c.ListIPSetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListRateBasedRules = "ListRateBasedRules" + +// ListRateBasedRulesRequest generates a "aws/request.Request" representing the +// client's request for the ListRateBasedRules operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. // -// * You tried to add a Rule to a WebACL, but the Rule already exists in -// the specified WebACL. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// * You tried to add an IP address to an IPSet, but the IP address already -// exists in the specified IPSet. -// -// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple -// already exists in the specified WebACL. -// -// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" -// The operation failed because AWS WAF didn't recognize a parameter in the -// request. For example: -// -// * You specified an invalid parameter name. -// -// * You specified an invalid value. -// -// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) -// using an action other than INSERT or DELETE. -// -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to create a RateBasedRule with a RateKey value other than -// IP. -// -// * You tried to update a WebACL with a WafActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. -// -// * You tried to update a ByteMatchSet with a Field of HEADER but no value -// for Data. -// -// * Your request references an ARN that is malformed, or corresponds to -// a resource with which a web ACL cannot be associated. +// See ListRateBasedRules for more information on using the ListRateBasedRules +// API call, and error handling. // -// * ErrCodeWAFNonexistentContainerException "WAFNonexistentContainerException" -// The operation failed because you tried to add an object to or delete an object -// from another object that doesn't exist. For example: +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't -// exist. // -// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule -// that doesn't exist. +// // Example sending a request using the ListRateBasedRulesRequest method. +// req, resp := client.ListRateBasedRulesRequest(params) // -// * You tried to add an IP address to or delete an IP address from an IPSet -// that doesn't exist. +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from -// a ByteMatchSet that doesn't exist. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListRateBasedRules +func (c *WAFRegional) ListRateBasedRulesRequest(input *waf.ListRateBasedRulesInput) (req *request.Request, output *waf.ListRateBasedRulesOutput) { + op := &request.Operation{ + Name: opListRateBasedRules, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.ListRateBasedRulesInput{} + } + + output = &waf.ListRateBasedRulesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListRateBasedRules API operation for AWS WAF Regional. // -// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" -// The operation failed because the referenced object doesn't exist. +// Returns an array of RuleSummary objects. // -// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" -// The operation failed because you tried to delete an object that is still -// in use. For example: +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // -// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// See the AWS API reference guide for AWS WAF Regional's +// API operation ListRateBasedRules for usage and error information. // -// * You tried to delete a Rule that is still referenced by a WebACL. +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. // -// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" -// The operation exceeds a resource limit, for example, the maximum number of -// WebACL objects that you can create for an AWS account. For more information, -// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) -// in the AWS WAF Developer Guide. +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateRateBasedRule -func (c *WAFRegional) UpdateRateBasedRule(input *waf.UpdateRateBasedRuleInput) (*waf.UpdateRateBasedRuleOutput, error) { - req, out := c.UpdateRateBasedRuleRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListRateBasedRules +func (c *WAFRegional) ListRateBasedRules(input *waf.ListRateBasedRulesInput) (*waf.ListRateBasedRulesOutput, error) { + req, out := c.ListRateBasedRulesRequest(input) return out, req.Send() } -// UpdateRateBasedRuleWithContext is the same as UpdateRateBasedRule with the addition of +// ListRateBasedRulesWithContext is the same as ListRateBasedRules with the addition of // the ability to pass a context and additional request options. // -// See UpdateRateBasedRule for details on how to use this API operation. +// See ListRateBasedRules for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) UpdateRateBasedRuleWithContext(ctx aws.Context, input *waf.UpdateRateBasedRuleInput, opts ...request.Option) (*waf.UpdateRateBasedRuleOutput, error) { - req, out := c.UpdateRateBasedRuleRequest(input) +func (c *WAFRegional) ListRateBasedRulesWithContext(ctx aws.Context, input *waf.ListRateBasedRulesInput, opts ...request.Option) (*waf.ListRateBasedRulesOutput, error) { + req, out := c.ListRateBasedRulesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateRule = "UpdateRule" +const opListRegexMatchSets = "ListRegexMatchSets" -// UpdateRuleRequest generates a "aws/request.Request" representing the -// client's request for the UpdateRule operation. The "output" return +// ListRegexMatchSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListRegexMatchSets operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateRule for more information on using the UpdateRule +// See ListRegexMatchSets for more information on using the ListRegexMatchSets // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateRuleRequest method. -// req, resp := client.UpdateRuleRequest(params) +// // Example sending a request using the ListRegexMatchSetsRequest method. +// req, resp := client.ListRegexMatchSetsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateRule -func (c *WAFRegional) UpdateRuleRequest(input *waf.UpdateRuleInput) (req *request.Request, output *waf.UpdateRuleOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListRegexMatchSets +func (c *WAFRegional) ListRegexMatchSetsRequest(input *waf.ListRegexMatchSetsInput) (req *request.Request, output *waf.ListRegexMatchSetsOutput) { op := &request.Operation{ - Name: opUpdateRule, + Name: opListRegexMatchSets, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.UpdateRuleInput{} + input = &waf.ListRegexMatchSetsInput{} } - output = &waf.UpdateRuleOutput{} + output = &waf.ListRegexMatchSetsOutput{} req = c.newRequest(op, input, output) return } -// UpdateRule API operation for AWS WAF Regional. -// -// Inserts or deletes Predicate objects in a Rule. Each Predicate object identifies -// a predicate, such as a ByteMatchSet or an IPSet, that specifies the web requests -// that you want to allow, block, or count. If you add more than one predicate -// to a Rule, a request must match all of the specifications to be allowed, -// blocked, or counted. For example, suppose you add the following to a Rule: -// -// * A ByteMatchSet that matches the value BadBot in the User-Agent header -// -// * An IPSet that matches the IP address 192.0.2.44 -// -// You then add the Rule to a WebACL and specify that you want to block requests -// that satisfy the Rule. For a request to be blocked, the User-Agent header -// in the request must contain the value BadBotand the request must originate -// from the IP address 192.0.2.44. -// -// To create and configure a Rule, perform the following steps: -// -// Create and update the predicates that you want to include in the Rule. -// -// Create the Rule. See CreateRule. +// ListRegexMatchSets API operation for AWS WAF Regional. // -// Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateRule request. -// -// Submit an UpdateRule request to add predicates to the Rule. -// -// Create and update a WebACL that contains the Rule. See CreateWebACL. -// -// If you want to replace one ByteMatchSet or IPSet with another, you delete -// the existing one and add the new one. -// -// For more information about how to use the AWS WAF API to allow or block HTTP -// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// Returns an array of RegexMatchSetSummary objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation UpdateRule for usage and error information. +// API operation ListRegexMatchSets for usage and error information. // // Returned Error Codes: -// * ErrCodeWAFStaleDataException "WAFStaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. -// // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. @@ -5183,207 +5099,82 @@ func (c *WAFRegional) UpdateRuleRequest(input *waf.UpdateRuleInput) (req *reques // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// * ErrCodeWAFInvalidOperationException "WAFInvalidOperationException" -// The operation failed because there was nothing to do. For example: -// -// * You tried to remove a Rule from a WebACL, but the Rule isn't in the -// specified WebACL. -// -// * You tried to remove an IP address from an IPSet, but the IP address -// isn't in the specified IPSet. -// -// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple -// isn't in the specified WebACL. -// -// * You tried to add a Rule to a WebACL, but the Rule already exists in -// the specified WebACL. -// -// * You tried to add an IP address to an IPSet, but the IP address already -// exists in the specified IPSet. -// -// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple -// already exists in the specified WebACL. -// -// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" -// The operation failed because AWS WAF didn't recognize a parameter in the -// request. For example: -// -// * You specified an invalid parameter name. -// -// * You specified an invalid value. -// -// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) -// using an action other than INSERT or DELETE. -// -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to create a RateBasedRule with a RateKey value other than -// IP. -// -// * You tried to update a WebACL with a WafActionType other than ALLOW, -// BLOCK, or COUNT. -// -// * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. -// -// * You tried to update a ByteMatchSet with a Field of HEADER but no value -// for Data. -// -// * Your request references an ARN that is malformed, or corresponds to -// a resource with which a web ACL cannot be associated. -// -// * ErrCodeWAFNonexistentContainerException "WAFNonexistentContainerException" -// The operation failed because you tried to add an object to or delete an object -// from another object that doesn't exist. For example: -// -// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't -// exist. -// -// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule -// that doesn't exist. -// -// * You tried to add an IP address to or delete an IP address from an IPSet -// that doesn't exist. -// -// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from -// a ByteMatchSet that doesn't exist. -// -// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" -// The operation failed because the referenced object doesn't exist. -// -// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" -// The operation failed because you tried to delete an object that is still -// in use. For example: -// -// * You tried to delete a ByteMatchSet that is still referenced by a Rule. -// -// * You tried to delete a Rule that is still referenced by a WebACL. -// -// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" -// The operation exceeds a resource limit, for example, the maximum number of -// WebACL objects that you can create for an AWS account. For more information, -// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) -// in the AWS WAF Developer Guide. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateRule -func (c *WAFRegional) UpdateRule(input *waf.UpdateRuleInput) (*waf.UpdateRuleOutput, error) { - req, out := c.UpdateRuleRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListRegexMatchSets +func (c *WAFRegional) ListRegexMatchSets(input *waf.ListRegexMatchSetsInput) (*waf.ListRegexMatchSetsOutput, error) { + req, out := c.ListRegexMatchSetsRequest(input) return out, req.Send() } -// UpdateRuleWithContext is the same as UpdateRule with the addition of +// ListRegexMatchSetsWithContext is the same as ListRegexMatchSets with the addition of // the ability to pass a context and additional request options. // -// See UpdateRule for details on how to use this API operation. +// See ListRegexMatchSets for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) UpdateRuleWithContext(ctx aws.Context, input *waf.UpdateRuleInput, opts ...request.Option) (*waf.UpdateRuleOutput, error) { - req, out := c.UpdateRuleRequest(input) +func (c *WAFRegional) ListRegexMatchSetsWithContext(ctx aws.Context, input *waf.ListRegexMatchSetsInput, opts ...request.Option) (*waf.ListRegexMatchSetsOutput, error) { + req, out := c.ListRegexMatchSetsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateSizeConstraintSet = "UpdateSizeConstraintSet" +const opListRegexPatternSets = "ListRegexPatternSets" -// UpdateSizeConstraintSetRequest generates a "aws/request.Request" representing the -// client's request for the UpdateSizeConstraintSet operation. The "output" return +// ListRegexPatternSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListRegexPatternSets operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateSizeConstraintSet for more information on using the UpdateSizeConstraintSet +// See ListRegexPatternSets for more information on using the ListRegexPatternSets // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateSizeConstraintSetRequest method. -// req, resp := client.UpdateSizeConstraintSetRequest(params) +// // Example sending a request using the ListRegexPatternSetsRequest method. +// req, resp := client.ListRegexPatternSetsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateSizeConstraintSet -func (c *WAFRegional) UpdateSizeConstraintSetRequest(input *waf.UpdateSizeConstraintSetInput) (req *request.Request, output *waf.UpdateSizeConstraintSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListRegexPatternSets +func (c *WAFRegional) ListRegexPatternSetsRequest(input *waf.ListRegexPatternSetsInput) (req *request.Request, output *waf.ListRegexPatternSetsOutput) { op := &request.Operation{ - Name: opUpdateSizeConstraintSet, + Name: opListRegexPatternSets, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.UpdateSizeConstraintSetInput{} + input = &waf.ListRegexPatternSetsInput{} } - output = &waf.UpdateSizeConstraintSetOutput{} + output = &waf.ListRegexPatternSetsOutput{} req = c.newRequest(op, input, output) return } -// UpdateSizeConstraintSet API operation for AWS WAF Regional. -// -// Inserts or deletes SizeConstraint objects (filters) in a SizeConstraintSet. -// For each SizeConstraint object, you specify the following values: -// -// * Whether to insert or delete the object from the array. If you want to -// change a SizeConstraintSetUpdate object, you delete the existing object -// and add a new one. -// -// * The part of a web request that you want AWS WAF to evaluate, such as -// the length of a query string or the length of the User-Agent header. -// -// * Whether to perform any transformations on the request, such as converting -// it to lowercase, before checking its length. Note that transformations -// of the request body are not supported because the AWS resource forwards -// only the first 8192 bytes of your request to AWS WAF. -// -// * A ComparisonOperator used for evaluating the selected part of the request -// against the specified Size, such as equals, greater than, less than, and -// so on. -// -// * The length, in bytes, that you want AWS WAF to watch for in selected -// part of the request. The length is computed after applying the transformation. -// -// For example, you can add a SizeConstraintSetUpdate object that matches web -// requests in which the length of the User-Agent header is greater than 100 -// bytes. You can then configure AWS WAF to block those requests. +// ListRegexPatternSets API operation for AWS WAF Regional. // -// To create and configure a SizeConstraintSet, perform the following steps: -// -// Create a SizeConstraintSet. For more information, see CreateSizeConstraintSet. -// -// Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateSizeConstraintSet request. -// -// Submit an UpdateSizeConstraintSet request to specify the part of the request -// that you want AWS WAF to inspect (for example, the header or the URI) and -// the value that you want AWS WAF to watch for. -// -// For more information about how to use the AWS WAF API to allow or block HTTP -// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// Returns an array of RegexPatternSetSummary objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation UpdateSizeConstraintSet for usage and error information. +// API operation ListRegexPatternSets for usage and error information. // // Returned Error Codes: -// * ErrCodeWAFStaleDataException "WAFStaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. -// // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. @@ -5392,194 +5183,167 @@ func (c *WAFRegional) UpdateSizeConstraintSetRequest(input *waf.UpdateSizeConstr // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// * ErrCodeWAFInvalidOperationException "WAFInvalidOperationException" -// The operation failed because there was nothing to do. For example: -// -// * You tried to remove a Rule from a WebACL, but the Rule isn't in the -// specified WebACL. -// -// * You tried to remove an IP address from an IPSet, but the IP address -// isn't in the specified IPSet. -// -// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple -// isn't in the specified WebACL. -// -// * You tried to add a Rule to a WebACL, but the Rule already exists in -// the specified WebACL. -// -// * You tried to add an IP address to an IPSet, but the IP address already -// exists in the specified IPSet. -// -// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple -// already exists in the specified WebACL. -// -// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" -// The operation failed because AWS WAF didn't recognize a parameter in the -// request. For example: +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListRegexPatternSets +func (c *WAFRegional) ListRegexPatternSets(input *waf.ListRegexPatternSetsInput) (*waf.ListRegexPatternSetsOutput, error) { + req, out := c.ListRegexPatternSetsRequest(input) + return out, req.Send() +} + +// ListRegexPatternSetsWithContext is the same as ListRegexPatternSets with the addition of +// the ability to pass a context and additional request options. // -// * You specified an invalid parameter name. +// See ListRegexPatternSets for details on how to use this API operation. // -// * You specified an invalid value. +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) ListRegexPatternSetsWithContext(ctx aws.Context, input *waf.ListRegexPatternSetsInput, opts ...request.Option) (*waf.ListRegexPatternSetsOutput, error) { + req, out := c.ListRegexPatternSetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListResourcesForWebACL = "ListResourcesForWebACL" + +// ListResourcesForWebACLRequest generates a "aws/request.Request" representing the +// client's request for the ListResourcesForWebACL operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. // -// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) -// using an action other than INSERT or DELETE. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, -// BLOCK, or COUNT. +// See ListResourcesForWebACL for more information on using the ListResourcesForWebACL +// API call, and error handling. // -// * You tried to create a RateBasedRule with a RateKey value other than -// IP. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, -// BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. +// // Example sending a request using the ListResourcesForWebACLRequest method. +// req, resp := client.ListResourcesForWebACLRequest(params) // -// * You tried to update a ByteMatchSet with a Field of HEADER but no value -// for Data. +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// * Your request references an ARN that is malformed, or corresponds to -// a resource with which a web ACL cannot be associated. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListResourcesForWebACL +func (c *WAFRegional) ListResourcesForWebACLRequest(input *ListResourcesForWebACLInput) (req *request.Request, output *ListResourcesForWebACLOutput) { + op := &request.Operation{ + Name: opListResourcesForWebACL, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListResourcesForWebACLInput{} + } + + output = &ListResourcesForWebACLOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListResourcesForWebACL API operation for AWS WAF Regional. // -// * ErrCodeWAFNonexistentContainerException "WAFNonexistentContainerException" -// The operation failed because you tried to add an object to or delete an object -// from another object that doesn't exist. For example: +// Returns an array of resources associated with the specified web ACL. // -// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't -// exist. +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // -// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule -// that doesn't exist. +// See the AWS API reference guide for AWS WAF Regional's +// API operation ListResourcesForWebACL for usage and error information. // -// * You tried to add an IP address to or delete an IP address from an IPSet -// that doesn't exist. +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. // -// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from -// a ByteMatchSet that doesn't exist. +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. // // * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" // The operation failed because the referenced object doesn't exist. // -// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" -// The operation failed because you tried to delete an object that is still -// in use. For example: -// -// * You tried to delete a ByteMatchSet that is still referenced by a Rule. -// -// * You tried to delete a Rule that is still referenced by a WebACL. -// -// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" -// The operation exceeds a resource limit, for example, the maximum number of -// WebACL objects that you can create for an AWS account. For more information, -// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) -// in the AWS WAF Developer Guide. -// -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateSizeConstraintSet -func (c *WAFRegional) UpdateSizeConstraintSet(input *waf.UpdateSizeConstraintSetInput) (*waf.UpdateSizeConstraintSetOutput, error) { - req, out := c.UpdateSizeConstraintSetRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListResourcesForWebACL +func (c *WAFRegional) ListResourcesForWebACL(input *ListResourcesForWebACLInput) (*ListResourcesForWebACLOutput, error) { + req, out := c.ListResourcesForWebACLRequest(input) return out, req.Send() } -// UpdateSizeConstraintSetWithContext is the same as UpdateSizeConstraintSet with the addition of +// ListResourcesForWebACLWithContext is the same as ListResourcesForWebACL with the addition of // the ability to pass a context and additional request options. // -// See UpdateSizeConstraintSet for details on how to use this API operation. +// See ListResourcesForWebACL for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) UpdateSizeConstraintSetWithContext(ctx aws.Context, input *waf.UpdateSizeConstraintSetInput, opts ...request.Option) (*waf.UpdateSizeConstraintSetOutput, error) { - req, out := c.UpdateSizeConstraintSetRequest(input) +func (c *WAFRegional) ListResourcesForWebACLWithContext(ctx aws.Context, input *ListResourcesForWebACLInput, opts ...request.Option) (*ListResourcesForWebACLOutput, error) { + req, out := c.ListResourcesForWebACLRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateSqlInjectionMatchSet = "UpdateSqlInjectionMatchSet" +const opListRules = "ListRules" -// UpdateSqlInjectionMatchSetRequest generates a "aws/request.Request" representing the -// client's request for the UpdateSqlInjectionMatchSet operation. The "output" return +// ListRulesRequest generates a "aws/request.Request" representing the +// client's request for the ListRules operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateSqlInjectionMatchSet for more information on using the UpdateSqlInjectionMatchSet +// See ListRules for more information on using the ListRules // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateSqlInjectionMatchSetRequest method. -// req, resp := client.UpdateSqlInjectionMatchSetRequest(params) +// // Example sending a request using the ListRulesRequest method. +// req, resp := client.ListRulesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateSqlInjectionMatchSet -func (c *WAFRegional) UpdateSqlInjectionMatchSetRequest(input *waf.UpdateSqlInjectionMatchSetInput) (req *request.Request, output *waf.UpdateSqlInjectionMatchSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListRules +func (c *WAFRegional) ListRulesRequest(input *waf.ListRulesInput) (req *request.Request, output *waf.ListRulesOutput) { op := &request.Operation{ - Name: opUpdateSqlInjectionMatchSet, + Name: opListRules, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.UpdateSqlInjectionMatchSetInput{} + input = &waf.ListRulesInput{} } - output = &waf.UpdateSqlInjectionMatchSetOutput{} + output = &waf.ListRulesOutput{} req = c.newRequest(op, input, output) return } -// UpdateSqlInjectionMatchSet API operation for AWS WAF Regional. -// -// Inserts or deletes SqlInjectionMatchTuple objects (filters) in a SqlInjectionMatchSet. -// For each SqlInjectionMatchTuple object, you specify the following values: -// -// * Action: Whether to insert the object into or delete the object from -// the array. To change a SqlInjectionMatchTuple, you delete the existing -// object and add a new one. -// -// * FieldToMatch: The part of web requests that you want AWS WAF to inspect -// and, if you want AWS WAF to inspect a header, the name of the header. -// -// * TextTransformation: Which text transformation, if any, to perform on -// the web request before inspecting the request for snippets of malicious -// SQL code. -// -// You use SqlInjectionMatchSet objects to specify which CloudFront requests -// you want to allow, block, or count. For example, if you're receiving requests -// that contain snippets of SQL code in the query string and you want to block -// the requests, you can create a SqlInjectionMatchSet with the applicable settings, -// and then configure AWS WAF to block the requests. -// -// To create and configure a SqlInjectionMatchSet, perform the following steps: -// -// Submit a CreateSqlInjectionMatchSet request. -// -// Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateIPSet request. -// -// Submit an UpdateSqlInjectionMatchSet request to specify the parts of web -// requests that you want AWS WAF to inspect for snippets of SQL code. +// ListRules API operation for AWS WAF Regional. // -// For more information about how to use the AWS WAF API to allow or block HTTP -// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// Returns an array of RuleSummary objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation UpdateSqlInjectionMatchSet for usage and error information. +// API operation ListRules for usage and error information. // // Returned Error Codes: // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" @@ -5590,212 +5354,166 @@ func (c *WAFRegional) UpdateSqlInjectionMatchSetRequest(input *waf.UpdateSqlInje // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// * ErrCodeWAFInvalidOperationException "WAFInvalidOperationException" -// The operation failed because there was nothing to do. For example: -// -// * You tried to remove a Rule from a WebACL, but the Rule isn't in the -// specified WebACL. -// -// * You tried to remove an IP address from an IPSet, but the IP address -// isn't in the specified IPSet. -// -// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple -// isn't in the specified WebACL. -// -// * You tried to add a Rule to a WebACL, but the Rule already exists in -// the specified WebACL. -// -// * You tried to add an IP address to an IPSet, but the IP address already -// exists in the specified IPSet. -// -// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple -// already exists in the specified WebACL. -// -// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" -// The operation failed because AWS WAF didn't recognize a parameter in the -// request. For example: -// -// * You specified an invalid parameter name. -// -// * You specified an invalid value. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListRules +func (c *WAFRegional) ListRules(input *waf.ListRulesInput) (*waf.ListRulesOutput, error) { + req, out := c.ListRulesRequest(input) + return out, req.Send() +} + +// ListRulesWithContext is the same as ListRules with the addition of +// the ability to pass a context and additional request options. // -// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) -// using an action other than INSERT or DELETE. +// See ListRules for details on how to use this API operation. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, -// BLOCK, or COUNT. +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) ListRulesWithContext(ctx aws.Context, input *waf.ListRulesInput, opts ...request.Option) (*waf.ListRulesOutput, error) { + req, out := c.ListRulesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListSizeConstraintSets = "ListSizeConstraintSets" + +// ListSizeConstraintSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListSizeConstraintSets operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. // -// * You tried to create a RateBasedRule with a RateKey value other than -// IP. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, -// BLOCK, or COUNT. +// See ListSizeConstraintSets for more information on using the ListSizeConstraintSets +// API call, and error handling. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// * You tried to update a ByteMatchSet with a Field of HEADER but no value -// for Data. // -// * Your request references an ARN that is malformed, or corresponds to -// a resource with which a web ACL cannot be associated. +// // Example sending a request using the ListSizeConstraintSetsRequest method. +// req, resp := client.ListSizeConstraintSetsRequest(params) // -// * ErrCodeWAFNonexistentContainerException "WAFNonexistentContainerException" -// The operation failed because you tried to add an object to or delete an object -// from another object that doesn't exist. For example: +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't -// exist. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListSizeConstraintSets +func (c *WAFRegional) ListSizeConstraintSetsRequest(input *waf.ListSizeConstraintSetsInput) (req *request.Request, output *waf.ListSizeConstraintSetsOutput) { + op := &request.Operation{ + Name: opListSizeConstraintSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.ListSizeConstraintSetsInput{} + } + + output = &waf.ListSizeConstraintSetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListSizeConstraintSets API operation for AWS WAF Regional. // -// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule -// that doesn't exist. +// Returns an array of SizeConstraintSetSummary objects. // -// * You tried to add an IP address to or delete an IP address from an IPSet -// that doesn't exist. +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // -// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from -// a ByteMatchSet that doesn't exist. +// See the AWS API reference guide for AWS WAF Regional's +// API operation ListSizeConstraintSets for usage and error information. // -// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" -// The operation failed because the referenced object doesn't exist. +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. // -// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" // The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. -// -// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" -// The operation exceeds a resource limit, for example, the maximum number of -// WebACL objects that you can create for an AWS account. For more information, -// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) -// in the AWS WAF Developer Guide. +// by using an invalid account identifier. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateSqlInjectionMatchSet -func (c *WAFRegional) UpdateSqlInjectionMatchSet(input *waf.UpdateSqlInjectionMatchSetInput) (*waf.UpdateSqlInjectionMatchSetOutput, error) { - req, out := c.UpdateSqlInjectionMatchSetRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListSizeConstraintSets +func (c *WAFRegional) ListSizeConstraintSets(input *waf.ListSizeConstraintSetsInput) (*waf.ListSizeConstraintSetsOutput, error) { + req, out := c.ListSizeConstraintSetsRequest(input) return out, req.Send() } -// UpdateSqlInjectionMatchSetWithContext is the same as UpdateSqlInjectionMatchSet with the addition of +// ListSizeConstraintSetsWithContext is the same as ListSizeConstraintSets with the addition of // the ability to pass a context and additional request options. // -// See UpdateSqlInjectionMatchSet for details on how to use this API operation. +// See ListSizeConstraintSets for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) UpdateSqlInjectionMatchSetWithContext(ctx aws.Context, input *waf.UpdateSqlInjectionMatchSetInput, opts ...request.Option) (*waf.UpdateSqlInjectionMatchSetOutput, error) { - req, out := c.UpdateSqlInjectionMatchSetRequest(input) +func (c *WAFRegional) ListSizeConstraintSetsWithContext(ctx aws.Context, input *waf.ListSizeConstraintSetsInput, opts ...request.Option) (*waf.ListSizeConstraintSetsOutput, error) { + req, out := c.ListSizeConstraintSetsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateWebACL = "UpdateWebACL" +const opListSqlInjectionMatchSets = "ListSqlInjectionMatchSets" -// UpdateWebACLRequest generates a "aws/request.Request" representing the -// client's request for the UpdateWebACL operation. The "output" return +// ListSqlInjectionMatchSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListSqlInjectionMatchSets operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateWebACL for more information on using the UpdateWebACL +// See ListSqlInjectionMatchSets for more information on using the ListSqlInjectionMatchSets // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateWebACLRequest method. -// req, resp := client.UpdateWebACLRequest(params) +// // Example sending a request using the ListSqlInjectionMatchSetsRequest method. +// req, resp := client.ListSqlInjectionMatchSetsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateWebACL -func (c *WAFRegional) UpdateWebACLRequest(input *waf.UpdateWebACLInput) (req *request.Request, output *waf.UpdateWebACLOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListSqlInjectionMatchSets +func (c *WAFRegional) ListSqlInjectionMatchSetsRequest(input *waf.ListSqlInjectionMatchSetsInput) (req *request.Request, output *waf.ListSqlInjectionMatchSetsOutput) { op := &request.Operation{ - Name: opUpdateWebACL, + Name: opListSqlInjectionMatchSets, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.UpdateWebACLInput{} + input = &waf.ListSqlInjectionMatchSetsInput{} } - output = &waf.UpdateWebACLOutput{} + output = &waf.ListSqlInjectionMatchSetsOutput{} req = c.newRequest(op, input, output) return } -// UpdateWebACL API operation for AWS WAF Regional. -// -// Inserts or deletes ActivatedRule objects in a WebACL. Each Rule identifies -// web requests that you want to allow, block, or count. When you update a WebACL, -// you specify the following values: -// -// * A default action for the WebACL, either ALLOW or BLOCK. AWS WAF performs -// the default action if a request doesn't match the criteria in any of the -// Rules in a WebACL. -// -// * The Rules that you want to add and/or delete. If you want to replace -// one Rule with another, you delete the existing Rule and add the new one. -// -// * For each Rule, whether you want AWS WAF to allow requests, block requests, -// or count requests that match the conditions in the Rule. -// -// * The order in which you want AWS WAF to evaluate the Rules in a WebACL. -// If you add more than one Rule to a WebACL, AWS WAF evaluates each request -// against the Rules in order based on the value of Priority. (The Rule that -// has the lowest value for Priority is evaluated first.) When a web request -// matches all of the predicates (such as ByteMatchSets and IPSets) in a -// Rule, AWS WAF immediately takes the corresponding action, allow or block, -// and doesn't evaluate the request against the remaining Rules in the WebACL, -// if any. -// -// To create and configure a WebACL, perform the following steps: -// -// Create and update the predicates that you want to include in Rules. For more -// information, see CreateByteMatchSet, UpdateByteMatchSet, CreateIPSet, UpdateIPSet, -// CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet. -// -// Create and update the Rules that you want to include in the WebACL. For more -// information, see CreateRule and UpdateRule. -// -// Create a WebACL. See CreateWebACL. -// -// Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateWebACL request. -// -// Submit an UpdateWebACL request to specify the Rules that you want to include -// in the WebACL, to specify the default action, and to associate the WebACL -// with a CloudFront distribution. -// -// Be aware that if you try to add a RATE_BASED rule to a web ACL without setting -// the rule type when first creating the rule, the UpdateWebACL request will -// fail because the request tries to add a REGULAR rule (the default rule type) -// with the specified ID, which does not exist. +// ListSqlInjectionMatchSets API operation for AWS WAF Regional. // -// For more information about how to use the AWS WAF API to allow or block HTTP -// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// Returns an array of SqlInjectionMatchSet objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation UpdateWebACL for usage and error information. +// API operation ListSqlInjectionMatchSets for usage and error information. // // Returned Error Codes: -// * ErrCodeWAFStaleDataException "WAFStaleDataException" -// The operation failed because you tried to create, update, or delete an object -// by using a change token that has already been used. -// // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" // The operation failed because of a system problem, even though the request // was valid. Retry your request. @@ -5804,184 +5522,274 @@ func (c *WAFRegional) UpdateWebACLRequest(input *waf.UpdateWebACLInput) (req *re // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. // -// * ErrCodeWAFInvalidOperationException "WAFInvalidOperationException" -// The operation failed because there was nothing to do. For example: +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListSqlInjectionMatchSets +func (c *WAFRegional) ListSqlInjectionMatchSets(input *waf.ListSqlInjectionMatchSetsInput) (*waf.ListSqlInjectionMatchSetsOutput, error) { + req, out := c.ListSqlInjectionMatchSetsRequest(input) + return out, req.Send() +} + +// ListSqlInjectionMatchSetsWithContext is the same as ListSqlInjectionMatchSets with the addition of +// the ability to pass a context and additional request options. // -// * You tried to remove a Rule from a WebACL, but the Rule isn't in the -// specified WebACL. +// See ListSqlInjectionMatchSets for details on how to use this API operation. // -// * You tried to remove an IP address from an IPSet, but the IP address -// isn't in the specified IPSet. +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) ListSqlInjectionMatchSetsWithContext(ctx aws.Context, input *waf.ListSqlInjectionMatchSetsInput, opts ...request.Option) (*waf.ListSqlInjectionMatchSetsOutput, error) { + req, out := c.ListSqlInjectionMatchSetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListWebACLs = "ListWebACLs" + +// ListWebACLsRequest generates a "aws/request.Request" representing the +// client's request for the ListWebACLs operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. // -// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple -// isn't in the specified WebACL. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// * You tried to add a Rule to a WebACL, but the Rule already exists in -// the specified WebACL. +// See ListWebACLs for more information on using the ListWebACLs +// API call, and error handling. // -// * You tried to add an IP address to an IPSet, but the IP address already -// exists in the specified IPSet. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple -// already exists in the specified WebACL. // -// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" -// The operation failed because AWS WAF didn't recognize a parameter in the -// request. For example: +// // Example sending a request using the ListWebACLsRequest method. +// req, resp := client.ListWebACLsRequest(params) // -// * You specified an invalid parameter name. +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// * You specified an invalid value. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListWebACLs +func (c *WAFRegional) ListWebACLsRequest(input *waf.ListWebACLsInput) (req *request.Request, output *waf.ListWebACLsOutput) { + op := &request.Operation{ + Name: opListWebACLs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.ListWebACLsInput{} + } + + output = &waf.ListWebACLsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListWebACLs API operation for AWS WAF Regional. // -// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) -// using an action other than INSERT or DELETE. +// Returns an array of WebACLSummary objects in the response. // -// * You tried to create a WebACL with a DefaultActionType other than ALLOW, -// BLOCK, or COUNT. +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // -// * You tried to create a RateBasedRule with a RateKey value other than -// IP. +// See the AWS API reference guide for AWS WAF Regional's +// API operation ListWebACLs for usage and error information. // -// * You tried to update a WebACL with a WafActionType other than ALLOW, -// BLOCK, or COUNT. +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. // -// * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. // -// * You tried to update a ByteMatchSet with a Field of HEADER but no value -// for Data. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListWebACLs +func (c *WAFRegional) ListWebACLs(input *waf.ListWebACLsInput) (*waf.ListWebACLsOutput, error) { + req, out := c.ListWebACLsRequest(input) + return out, req.Send() +} + +// ListWebACLsWithContext is the same as ListWebACLs with the addition of +// the ability to pass a context and additional request options. // -// * Your request references an ARN that is malformed, or corresponds to -// a resource with which a web ACL cannot be associated. +// See ListWebACLs for details on how to use this API operation. // -// * ErrCodeWAFNonexistentContainerException "WAFNonexistentContainerException" -// The operation failed because you tried to add an object to or delete an object -// from another object that doesn't exist. For example: +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) ListWebACLsWithContext(ctx aws.Context, input *waf.ListWebACLsInput, opts ...request.Option) (*waf.ListWebACLsOutput, error) { + req, out := c.ListWebACLsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListXssMatchSets = "ListXssMatchSets" + +// ListXssMatchSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListXssMatchSets operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. // -// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't -// exist. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule -// that doesn't exist. +// See ListXssMatchSets for more information on using the ListXssMatchSets +// API call, and error handling. // -// * You tried to add an IP address to or delete an IP address from an IPSet -// that doesn't exist. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from -// a ByteMatchSet that doesn't exist. // -// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" -// The operation failed because the referenced object doesn't exist. +// // Example sending a request using the ListXssMatchSetsRequest method. +// req, resp := client.ListXssMatchSetsRequest(params) // -// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" -// The operation failed because you tried to delete an object that is still -// in use. For example: +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListXssMatchSets +func (c *WAFRegional) ListXssMatchSetsRequest(input *waf.ListXssMatchSetsInput) (req *request.Request, output *waf.ListXssMatchSetsOutput) { + op := &request.Operation{ + Name: opListXssMatchSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.ListXssMatchSetsInput{} + } + + output = &waf.ListXssMatchSetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListXssMatchSets API operation for AWS WAF Regional. // -// * You tried to delete a Rule that is still referenced by a WebACL. +// Returns an array of XssMatchSet objects. // -// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" -// The operation exceeds a resource limit, for example, the maximum number of -// WebACL objects that you can create for an AWS account. For more information, -// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) -// in the AWS WAF Developer Guide. +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateWebACL -func (c *WAFRegional) UpdateWebACL(input *waf.UpdateWebACLInput) (*waf.UpdateWebACLOutput, error) { - req, out := c.UpdateWebACLRequest(input) +// See the AWS API reference guide for AWS WAF Regional's +// API operation ListXssMatchSets for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListXssMatchSets +func (c *WAFRegional) ListXssMatchSets(input *waf.ListXssMatchSetsInput) (*waf.ListXssMatchSetsOutput, error) { + req, out := c.ListXssMatchSetsRequest(input) return out, req.Send() } -// UpdateWebACLWithContext is the same as UpdateWebACL with the addition of +// ListXssMatchSetsWithContext is the same as ListXssMatchSets with the addition of // the ability to pass a context and additional request options. // -// See UpdateWebACL for details on how to use this API operation. +// See ListXssMatchSets for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) UpdateWebACLWithContext(ctx aws.Context, input *waf.UpdateWebACLInput, opts ...request.Option) (*waf.UpdateWebACLOutput, error) { - req, out := c.UpdateWebACLRequest(input) +func (c *WAFRegional) ListXssMatchSetsWithContext(ctx aws.Context, input *waf.ListXssMatchSetsInput, opts ...request.Option) (*waf.ListXssMatchSetsOutput, error) { + req, out := c.ListXssMatchSetsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateXssMatchSet = "UpdateXssMatchSet" +const opUpdateByteMatchSet = "UpdateByteMatchSet" -// UpdateXssMatchSetRequest generates a "aws/request.Request" representing the -// client's request for the UpdateXssMatchSet operation. The "output" return +// UpdateByteMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateByteMatchSet operation. The "output" return // value will be populated with the request's response once the request complets // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateXssMatchSet for more information on using the UpdateXssMatchSet +// See UpdateByteMatchSet for more information on using the UpdateByteMatchSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateXssMatchSetRequest method. -// req, resp := client.UpdateXssMatchSetRequest(params) +// // Example sending a request using the UpdateByteMatchSetRequest method. +// req, resp := client.UpdateByteMatchSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateXssMatchSet -func (c *WAFRegional) UpdateXssMatchSetRequest(input *waf.UpdateXssMatchSetInput) (req *request.Request, output *waf.UpdateXssMatchSetOutput) { +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateByteMatchSet +func (c *WAFRegional) UpdateByteMatchSetRequest(input *waf.UpdateByteMatchSetInput) (req *request.Request, output *waf.UpdateByteMatchSetOutput) { op := &request.Operation{ - Name: opUpdateXssMatchSet, + Name: opUpdateByteMatchSet, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &waf.UpdateXssMatchSetInput{} + input = &waf.UpdateByteMatchSetInput{} } - output = &waf.UpdateXssMatchSetOutput{} + output = &waf.UpdateByteMatchSetOutput{} req = c.newRequest(op, input, output) return } -// UpdateXssMatchSet API operation for AWS WAF Regional. +// UpdateByteMatchSet API operation for AWS WAF Regional. // -// Inserts or deletes XssMatchTuple objects (filters) in an XssMatchSet. For -// each XssMatchTuple object, you specify the following values: +// Inserts or deletes ByteMatchTuple objects (filters) in a ByteMatchSet. For +// each ByteMatchTuple object, you specify the following values: // -// * Action: Whether to insert the object into or delete the object from -// the array. To change a XssMatchTuple, you delete the existing object and +// * Whether to insert or delete the object from the array. If you want to +// change a ByteMatchSetUpdate object, you delete the existing object and // add a new one. // -// * FieldToMatch: The part of web requests that you want AWS WAF to inspect -// and, if you want AWS WAF to inspect a header, the name of the header. +// * The part of a web request that you want AWS WAF to inspect, such as +// a query string or the value of the User-Agent header. // -// * TextTransformation: Which text transformation, if any, to perform on -// the web request before inspecting the request for cross-site scripting -// attacks. +// * The bytes (typically a string that corresponds with ASCII characters) +// that you want AWS WAF to look for. For more information, including how +// you specify the values for the AWS WAF API and the AWS CLI or SDKs, see +// TargetString in the ByteMatchTuple data type. // -// You use XssMatchSet objects to specify which CloudFront requests you want -// to allow, block, or count. For example, if you're receiving requests that -// contain cross-site scripting attacks in the request body and you want to -// block the requests, you can create an XssMatchSet with the applicable settings, -// and then configure AWS WAF to block the requests. +// * Where to look, such as at the beginning or the end of a query string. // -// To create and configure an XssMatchSet, perform the following steps: +// * Whether to perform any conversions on the request, such as converting +// it to lowercase, before inspecting it for the specified string. // -// Submit a CreateXssMatchSet request. +// For example, you can add a ByteMatchSetUpdate object that matches web requests +// in which User-Agent headers contain the string BadBot. You can then configure +// AWS WAF to block those requests. +// +// To create and configure a ByteMatchSet, perform the following steps: +// +// Create a ByteMatchSet. For more information, see CreateByteMatchSet. // // Use GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateIPSet request. +// parameter of an UpdateByteMatchSet request. // -// Submit an UpdateXssMatchSet request to specify the parts of web requests -// that you want AWS WAF to inspect for cross-site scripting attacks. +// Submit an UpdateByteMatchSet request to specify the part of the request that +// you want AWS WAF to inspect (for example, the header or the URI) and the +// value that you want AWS WAF to watch for. // // For more information about how to use the AWS WAF API to allow or block HTTP // requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). @@ -5991,7 +5799,7 @@ func (c *WAFRegional) UpdateXssMatchSetRequest(input *waf.UpdateXssMatchSetInput // the error. // // See the AWS API reference guide for AWS WAF Regional's -// API operation UpdateXssMatchSet for usage and error information. +// API operation UpdateByteMatchSet for usage and error information. // // Returned Error Codes: // * ErrCodeWAFInternalErrorException "WAFInternalErrorException" @@ -6044,7 +5852,7 @@ func (c *WAFRegional) UpdateXssMatchSetRequest(input *waf.UpdateXssMatchSetInput // BLOCK, or COUNT. // // * You tried to update a ByteMatchSet with a FieldToMatchType other than -// HEADER, QUERY_STRING, or URI. +// HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value // for Data. @@ -6081,333 +5889,3066 @@ func (c *WAFRegional) UpdateXssMatchSetRequest(input *waf.UpdateXssMatchSetInput // see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateXssMatchSet -func (c *WAFRegional) UpdateXssMatchSet(input *waf.UpdateXssMatchSetInput) (*waf.UpdateXssMatchSetOutput, error) { - req, out := c.UpdateXssMatchSetRequest(input) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateByteMatchSet +func (c *WAFRegional) UpdateByteMatchSet(input *waf.UpdateByteMatchSetInput) (*waf.UpdateByteMatchSetOutput, error) { + req, out := c.UpdateByteMatchSetRequest(input) return out, req.Send() } -// UpdateXssMatchSetWithContext is the same as UpdateXssMatchSet with the addition of +// UpdateByteMatchSetWithContext is the same as UpdateByteMatchSet with the addition of // the ability to pass a context and additional request options. // -// See UpdateXssMatchSet for details on how to use this API operation. +// See UpdateByteMatchSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFRegional) UpdateXssMatchSetWithContext(ctx aws.Context, input *waf.UpdateXssMatchSetInput, opts ...request.Option) (*waf.UpdateXssMatchSetOutput, error) { - req, out := c.UpdateXssMatchSetRequest(input) +func (c *WAFRegional) UpdateByteMatchSetWithContext(ctx aws.Context, input *waf.UpdateByteMatchSetInput, opts ...request.Option) (*waf.UpdateByteMatchSetOutput, error) { + req, out := c.UpdateByteMatchSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/AssociateWebACLRequest -type AssociateWebACLInput struct { - _ struct{} `type:"structure"` +const opUpdateGeoMatchSet = "UpdateGeoMatchSet" + +// UpdateGeoMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateGeoMatchSet operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateGeoMatchSet for more information on using the UpdateGeoMatchSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateGeoMatchSetRequest method. +// req, resp := client.UpdateGeoMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateGeoMatchSet +func (c *WAFRegional) UpdateGeoMatchSetRequest(input *waf.UpdateGeoMatchSetInput) (req *request.Request, output *waf.UpdateGeoMatchSetOutput) { + op := &request.Operation{ + Name: opUpdateGeoMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.UpdateGeoMatchSetInput{} + } + + output = &waf.UpdateGeoMatchSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateGeoMatchSet API operation for AWS WAF Regional. +// +// Inserts or deletes GeoMatchConstraint objects in an GeoMatchSet. For each +// GeoMatchConstraint object, you specify the following values: +// +// * Whether to insert or delete the object from the array. If you want to +// change an GeoMatchConstraint object, you delete the existing object and +// add a new one. +// +// * The Type. The only valid value for Type is Country. +// +// * The Value, which is a two character code for the country to add to the +// GeoMatchConstraint object. Valid codes are listed in GeoMatchConstraint$Value. +// +// To create and configure an GeoMatchSet, perform the following steps: +// +// Submit a CreateGeoMatchSet request. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateGeoMatchSet request. +// +// Submit an UpdateGeoMatchSet request to specify the country that you want +// AWS WAF to watch for. +// +// When you update an GeoMatchSet, you specify the country that you want to +// add and/or the country that you want to delete. If you want to change a country, +// you delete the existing country and add the new one. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation UpdateGeoMatchSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFInvalidOperationException "WAFInvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFNonexistentContainerException "WAFNonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateGeoMatchSet +func (c *WAFRegional) UpdateGeoMatchSet(input *waf.UpdateGeoMatchSetInput) (*waf.UpdateGeoMatchSetOutput, error) { + req, out := c.UpdateGeoMatchSetRequest(input) + return out, req.Send() +} + +// UpdateGeoMatchSetWithContext is the same as UpdateGeoMatchSet with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateGeoMatchSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) UpdateGeoMatchSetWithContext(ctx aws.Context, input *waf.UpdateGeoMatchSetInput, opts ...request.Option) (*waf.UpdateGeoMatchSetOutput, error) { + req, out := c.UpdateGeoMatchSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateIPSet = "UpdateIPSet" + +// UpdateIPSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateIPSet operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateIPSet for more information on using the UpdateIPSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateIPSetRequest method. +// req, resp := client.UpdateIPSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateIPSet +func (c *WAFRegional) UpdateIPSetRequest(input *waf.UpdateIPSetInput) (req *request.Request, output *waf.UpdateIPSetOutput) { + op := &request.Operation{ + Name: opUpdateIPSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.UpdateIPSetInput{} + } + + output = &waf.UpdateIPSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateIPSet API operation for AWS WAF Regional. +// +// Inserts or deletes IPSetDescriptor objects in an IPSet. For each IPSetDescriptor +// object, you specify the following values: +// +// * Whether to insert or delete the object from the array. If you want to +// change an IPSetDescriptor object, you delete the existing object and add +// a new one. +// +// * The IP address version, IPv4 or IPv6. +// +// * The IP address in CIDR notation, for example, 192.0.2.0/24 (for the +// range of IP addresses from 192.0.2.0 to 192.0.2.255) or 192.0.2.44/32 +// (for the individual IP address 192.0.2.44). +// +// AWS WAF supports /8, /16, /24, and /32 IP address ranges for IPv4, and /24, +// /32, /48, /56, /64 and /128 for IPv6. For more information about CIDR notation, +// see the Wikipedia entry Classless Inter-Domain Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). +// +// IPv6 addresses can be represented using any of the following formats: +// +// * 1111:0000:0000:0000:0000:0000:0000:0111/128 +// +// * 1111:0:0:0:0:0:0:0111/128 +// +// * 1111::0111/128 +// +// * 1111::111/128 +// +// You use an IPSet to specify which web requests you want to allow or block +// based on the IP addresses that the requests originated from. For example, +// if you're receiving a lot of requests from one or a small number of IP addresses +// and you want to block the requests, you can create an IPSet that specifies +// those IP addresses, and then configure AWS WAF to block the requests. +// +// To create and configure an IPSet, perform the following steps: +// +// Submit a CreateIPSet request. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateIPSet request. +// +// Submit an UpdateIPSet request to specify the IP addresses that you want AWS +// WAF to watch for. +// +// When you update an IPSet, you specify the IP addresses that you want to add +// and/or the IP addresses that you want to delete. If you want to change an +// IP address, you delete the existing IP address and add the new one. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation UpdateIPSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFInvalidOperationException "WAFInvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFNonexistentContainerException "WAFNonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateIPSet +func (c *WAFRegional) UpdateIPSet(input *waf.UpdateIPSetInput) (*waf.UpdateIPSetOutput, error) { + req, out := c.UpdateIPSetRequest(input) + return out, req.Send() +} + +// UpdateIPSetWithContext is the same as UpdateIPSet with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateIPSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) UpdateIPSetWithContext(ctx aws.Context, input *waf.UpdateIPSetInput, opts ...request.Option) (*waf.UpdateIPSetOutput, error) { + req, out := c.UpdateIPSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateRateBasedRule = "UpdateRateBasedRule" + +// UpdateRateBasedRuleRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRateBasedRule operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateRateBasedRule for more information on using the UpdateRateBasedRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateRateBasedRuleRequest method. +// req, resp := client.UpdateRateBasedRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateRateBasedRule +func (c *WAFRegional) UpdateRateBasedRuleRequest(input *waf.UpdateRateBasedRuleInput) (req *request.Request, output *waf.UpdateRateBasedRuleOutput) { + op := &request.Operation{ + Name: opUpdateRateBasedRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.UpdateRateBasedRuleInput{} + } + + output = &waf.UpdateRateBasedRuleOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateRateBasedRule API operation for AWS WAF Regional. +// +// Inserts or deletes Predicate objects in a rule and updates the RateLimit +// in the rule. +// +// Each Predicate object identifies a predicate, such as a ByteMatchSet or an +// IPSet, that specifies the web requests that you want to block or count. The +// RateLimit specifies the number of requests every five minutes that triggers +// the rule. +// +// If you add more than one predicate to a RateBasedRule, a request must match +// all the predicates and exceed the RateLimit to be counted or blocked. For +// example, suppose you add the following to a RateBasedRule: +// +// * An IPSet that matches the IP address 192.0.2.44/32 +// +// * A ByteMatchSet that matches BadBot in the User-Agent header +// +// Further, you specify a RateLimit of 15,000. +// +// You then add the RateBasedRule to a WebACL and specify that you want to block +// requests that satisfy the rule. For a request to be blocked, it must come +// from the IP address 192.0.2.44 and the User-Agent header in the request must +// contain the value BadBot. Further, requests that match these two conditions +// much be received at a rate of more than 15,000 every five minutes. If the +// rate drops below this limit, AWS WAF no longer blocks the requests. +// +// As a second example, suppose you want to limit requests to a particular page +// on your site. To do this, you could add the following to a RateBasedRule: +// +// * A ByteMatchSet with FieldToMatch of URI +// +// * A PositionalConstraint of STARTS_WITH +// +// * A TargetString of login +// +// Further, you specify a RateLimit of 15,000. +// +// By adding this RateBasedRule to a WebACL, you could limit requests to your +// login page without affecting the rest of your site. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation UpdateRateBasedRule for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFInvalidOperationException "WAFInvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFNonexistentContainerException "WAFNonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateRateBasedRule +func (c *WAFRegional) UpdateRateBasedRule(input *waf.UpdateRateBasedRuleInput) (*waf.UpdateRateBasedRuleOutput, error) { + req, out := c.UpdateRateBasedRuleRequest(input) + return out, req.Send() +} + +// UpdateRateBasedRuleWithContext is the same as UpdateRateBasedRule with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateRateBasedRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) UpdateRateBasedRuleWithContext(ctx aws.Context, input *waf.UpdateRateBasedRuleInput, opts ...request.Option) (*waf.UpdateRateBasedRuleOutput, error) { + req, out := c.UpdateRateBasedRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateRegexMatchSet = "UpdateRegexMatchSet" + +// UpdateRegexMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRegexMatchSet operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateRegexMatchSet for more information on using the UpdateRegexMatchSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateRegexMatchSetRequest method. +// req, resp := client.UpdateRegexMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateRegexMatchSet +func (c *WAFRegional) UpdateRegexMatchSetRequest(input *waf.UpdateRegexMatchSetInput) (req *request.Request, output *waf.UpdateRegexMatchSetOutput) { + op := &request.Operation{ + Name: opUpdateRegexMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.UpdateRegexMatchSetInput{} + } + + output = &waf.UpdateRegexMatchSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateRegexMatchSet API operation for AWS WAF Regional. +// +// Inserts or deletes RegexMatchSetUpdate objects (filters) in a RegexMatchSet. +// For each RegexMatchSetUpdate object, you specify the following values: +// +// * Whether to insert or delete the object from the array. If you want to +// change a RegexMatchSetUpdate object, you delete the existing object and +// add a new one. +// +// * The part of a web request that you want AWS WAF to inspect, such as +// a query string or the value of the User-Agent header. +// +// * The identifier of the pattern (a regular expression) that you want AWS +// WAF to look for. For more information, see RegexPatternSet. +// +// * Whether to perform any conversions on the request, such as converting +// it to lowercase, before inspecting it for the specified string. +// +// For example, you can create a RegexPatternSet that matches any requests with +// User-Agent headers that contain the string B[a@]dB[o0]t. You can then configure +// AWS WAF to reject those requests. +// +// To create and configure a RegexMatchSet, perform the following steps: +// +// Create a RegexMatchSet. For more information, see CreateRegexMatchSet. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateRegexMatchSet request. +// +// Submit an UpdateRegexMatchSet request to specify the part of the request +// that you want AWS WAF to inspect (for example, the header or the URI) and +// the identifier of the RegexPatternSet that contain the regular expression +// patters you want AWS WAF to watch for. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation UpdateRegexMatchSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFDisallowedNameException "WAFDisallowedNameException" +// The name specified is invalid. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFNonexistentContainerException "WAFNonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeWAFInvalidOperationException "WAFInvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateRegexMatchSet +func (c *WAFRegional) UpdateRegexMatchSet(input *waf.UpdateRegexMatchSetInput) (*waf.UpdateRegexMatchSetOutput, error) { + req, out := c.UpdateRegexMatchSetRequest(input) + return out, req.Send() +} + +// UpdateRegexMatchSetWithContext is the same as UpdateRegexMatchSet with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateRegexMatchSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) UpdateRegexMatchSetWithContext(ctx aws.Context, input *waf.UpdateRegexMatchSetInput, opts ...request.Option) (*waf.UpdateRegexMatchSetOutput, error) { + req, out := c.UpdateRegexMatchSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateRegexPatternSet = "UpdateRegexPatternSet" + +// UpdateRegexPatternSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRegexPatternSet operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateRegexPatternSet for more information on using the UpdateRegexPatternSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateRegexPatternSetRequest method. +// req, resp := client.UpdateRegexPatternSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateRegexPatternSet +func (c *WAFRegional) UpdateRegexPatternSetRequest(input *waf.UpdateRegexPatternSetInput) (req *request.Request, output *waf.UpdateRegexPatternSetOutput) { + op := &request.Operation{ + Name: opUpdateRegexPatternSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.UpdateRegexPatternSetInput{} + } + + output = &waf.UpdateRegexPatternSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateRegexPatternSet API operation for AWS WAF Regional. +// +// Inserts or deletes RegexMatchSetUpdate objects (filters) in a RegexPatternSet. +// For each RegexPatternSet object, you specify the following values: +// +// * Whether to insert or delete the object from the array. If you want to +// change a RegexPatternSet object, you delete the existing object and add +// a new one. +// +// * The regular expression pattern that you want AWS WAF to look for. For +// more information, see RegexPatternSet. +// +// For example, you can create a RegexPatternString such as B[a@]dB[o0]t. AWS +// WAF will match this RegexPatternString to: +// +// * BadBot +// +// * BadB0t +// +// * B@dBot +// +// * B@dB0t +// +// To create and configure a RegexPatternSet, perform the following steps: +// +// Create a RegexPatternSet. For more information, see CreateRegexPatternSet. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateRegexPatternSet request. +// +// Submit an UpdateRegexPatternSet request to specify the regular expression +// pattern that you want AWS WAF to watch for. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation UpdateRegexPatternSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// * ErrCodeWAFNonexistentContainerException "WAFNonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeWAFInvalidOperationException "WAFInvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFInvalidRegexPatternException "WAFInvalidRegexPatternException" +// The regular expression (regex) you specified in RegexPatternString is invalid. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateRegexPatternSet +func (c *WAFRegional) UpdateRegexPatternSet(input *waf.UpdateRegexPatternSetInput) (*waf.UpdateRegexPatternSetOutput, error) { + req, out := c.UpdateRegexPatternSetRequest(input) + return out, req.Send() +} + +// UpdateRegexPatternSetWithContext is the same as UpdateRegexPatternSet with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateRegexPatternSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) UpdateRegexPatternSetWithContext(ctx aws.Context, input *waf.UpdateRegexPatternSetInput, opts ...request.Option) (*waf.UpdateRegexPatternSetOutput, error) { + req, out := c.UpdateRegexPatternSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateRule = "UpdateRule" + +// UpdateRuleRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRule operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateRule for more information on using the UpdateRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateRuleRequest method. +// req, resp := client.UpdateRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateRule +func (c *WAFRegional) UpdateRuleRequest(input *waf.UpdateRuleInput) (req *request.Request, output *waf.UpdateRuleOutput) { + op := &request.Operation{ + Name: opUpdateRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.UpdateRuleInput{} + } + + output = &waf.UpdateRuleOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateRule API operation for AWS WAF Regional. +// +// Inserts or deletes Predicate objects in a Rule. Each Predicate object identifies +// a predicate, such as a ByteMatchSet or an IPSet, that specifies the web requests +// that you want to allow, block, or count. If you add more than one predicate +// to a Rule, a request must match all of the specifications to be allowed, +// blocked, or counted. For example, suppose you add the following to a Rule: +// +// * A ByteMatchSet that matches the value BadBot in the User-Agent header +// +// * An IPSet that matches the IP address 192.0.2.44 +// +// You then add the Rule to a WebACL and specify that you want to block requests +// that satisfy the Rule. For a request to be blocked, the User-Agent header +// in the request must contain the value BadBotand the request must originate +// from the IP address 192.0.2.44. +// +// To create and configure a Rule, perform the following steps: +// +// Create and update the predicates that you want to include in the Rule. +// +// Create the Rule. See CreateRule. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateRule request. +// +// Submit an UpdateRule request to add predicates to the Rule. +// +// Create and update a WebACL that contains the Rule. See CreateWebACL. +// +// If you want to replace one ByteMatchSet or IPSet with another, you delete +// the existing one and add the new one. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation UpdateRule for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFInvalidOperationException "WAFInvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFNonexistentContainerException "WAFNonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateRule +func (c *WAFRegional) UpdateRule(input *waf.UpdateRuleInput) (*waf.UpdateRuleOutput, error) { + req, out := c.UpdateRuleRequest(input) + return out, req.Send() +} + +// UpdateRuleWithContext is the same as UpdateRule with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) UpdateRuleWithContext(ctx aws.Context, input *waf.UpdateRuleInput, opts ...request.Option) (*waf.UpdateRuleOutput, error) { + req, out := c.UpdateRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateSizeConstraintSet = "UpdateSizeConstraintSet" + +// UpdateSizeConstraintSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSizeConstraintSet operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateSizeConstraintSet for more information on using the UpdateSizeConstraintSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateSizeConstraintSetRequest method. +// req, resp := client.UpdateSizeConstraintSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateSizeConstraintSet +func (c *WAFRegional) UpdateSizeConstraintSetRequest(input *waf.UpdateSizeConstraintSetInput) (req *request.Request, output *waf.UpdateSizeConstraintSetOutput) { + op := &request.Operation{ + Name: opUpdateSizeConstraintSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.UpdateSizeConstraintSetInput{} + } + + output = &waf.UpdateSizeConstraintSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateSizeConstraintSet API operation for AWS WAF Regional. +// +// Inserts or deletes SizeConstraint objects (filters) in a SizeConstraintSet. +// For each SizeConstraint object, you specify the following values: +// +// * Whether to insert or delete the object from the array. If you want to +// change a SizeConstraintSetUpdate object, you delete the existing object +// and add a new one. +// +// * The part of a web request that you want AWS WAF to evaluate, such as +// the length of a query string or the length of the User-Agent header. +// +// * Whether to perform any transformations on the request, such as converting +// it to lowercase, before checking its length. Note that transformations +// of the request body are not supported because the AWS resource forwards +// only the first 8192 bytes of your request to AWS WAF. +// +// * A ComparisonOperator used for evaluating the selected part of the request +// against the specified Size, such as equals, greater than, less than, and +// so on. +// +// * The length, in bytes, that you want AWS WAF to watch for in selected +// part of the request. The length is computed after applying the transformation. +// +// For example, you can add a SizeConstraintSetUpdate object that matches web +// requests in which the length of the User-Agent header is greater than 100 +// bytes. You can then configure AWS WAF to block those requests. +// +// To create and configure a SizeConstraintSet, perform the following steps: +// +// Create a SizeConstraintSet. For more information, see CreateSizeConstraintSet. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateSizeConstraintSet request. +// +// Submit an UpdateSizeConstraintSet request to specify the part of the request +// that you want AWS WAF to inspect (for example, the header or the URI) and +// the value that you want AWS WAF to watch for. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation UpdateSizeConstraintSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFInvalidOperationException "WAFInvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFNonexistentContainerException "WAFNonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateSizeConstraintSet +func (c *WAFRegional) UpdateSizeConstraintSet(input *waf.UpdateSizeConstraintSetInput) (*waf.UpdateSizeConstraintSetOutput, error) { + req, out := c.UpdateSizeConstraintSetRequest(input) + return out, req.Send() +} + +// UpdateSizeConstraintSetWithContext is the same as UpdateSizeConstraintSet with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateSizeConstraintSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) UpdateSizeConstraintSetWithContext(ctx aws.Context, input *waf.UpdateSizeConstraintSetInput, opts ...request.Option) (*waf.UpdateSizeConstraintSetOutput, error) { + req, out := c.UpdateSizeConstraintSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateSqlInjectionMatchSet = "UpdateSqlInjectionMatchSet" + +// UpdateSqlInjectionMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSqlInjectionMatchSet operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateSqlInjectionMatchSet for more information on using the UpdateSqlInjectionMatchSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateSqlInjectionMatchSetRequest method. +// req, resp := client.UpdateSqlInjectionMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateSqlInjectionMatchSet +func (c *WAFRegional) UpdateSqlInjectionMatchSetRequest(input *waf.UpdateSqlInjectionMatchSetInput) (req *request.Request, output *waf.UpdateSqlInjectionMatchSetOutput) { + op := &request.Operation{ + Name: opUpdateSqlInjectionMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.UpdateSqlInjectionMatchSetInput{} + } + + output = &waf.UpdateSqlInjectionMatchSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateSqlInjectionMatchSet API operation for AWS WAF Regional. +// +// Inserts or deletes SqlInjectionMatchTuple objects (filters) in a SqlInjectionMatchSet. +// For each SqlInjectionMatchTuple object, you specify the following values: +// +// * Action: Whether to insert the object into or delete the object from +// the array. To change a SqlInjectionMatchTuple, you delete the existing +// object and add a new one. +// +// * FieldToMatch: The part of web requests that you want AWS WAF to inspect +// and, if you want AWS WAF to inspect a header, the name of the header. +// +// * TextTransformation: Which text transformation, if any, to perform on +// the web request before inspecting the request for snippets of malicious +// SQL code. +// +// You use SqlInjectionMatchSet objects to specify which CloudFront requests +// you want to allow, block, or count. For example, if you're receiving requests +// that contain snippets of SQL code in the query string and you want to block +// the requests, you can create a SqlInjectionMatchSet with the applicable settings, +// and then configure AWS WAF to block the requests. +// +// To create and configure a SqlInjectionMatchSet, perform the following steps: +// +// Submit a CreateSqlInjectionMatchSet request. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateIPSet request. +// +// Submit an UpdateSqlInjectionMatchSet request to specify the parts of web +// requests that you want AWS WAF to inspect for snippets of SQL code. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation UpdateSqlInjectionMatchSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFInvalidOperationException "WAFInvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFNonexistentContainerException "WAFNonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateSqlInjectionMatchSet +func (c *WAFRegional) UpdateSqlInjectionMatchSet(input *waf.UpdateSqlInjectionMatchSetInput) (*waf.UpdateSqlInjectionMatchSetOutput, error) { + req, out := c.UpdateSqlInjectionMatchSetRequest(input) + return out, req.Send() +} + +// UpdateSqlInjectionMatchSetWithContext is the same as UpdateSqlInjectionMatchSet with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateSqlInjectionMatchSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) UpdateSqlInjectionMatchSetWithContext(ctx aws.Context, input *waf.UpdateSqlInjectionMatchSetInput, opts ...request.Option) (*waf.UpdateSqlInjectionMatchSetOutput, error) { + req, out := c.UpdateSqlInjectionMatchSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateWebACL = "UpdateWebACL" + +// UpdateWebACLRequest generates a "aws/request.Request" representing the +// client's request for the UpdateWebACL operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateWebACL for more information on using the UpdateWebACL +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateWebACLRequest method. +// req, resp := client.UpdateWebACLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateWebACL +func (c *WAFRegional) UpdateWebACLRequest(input *waf.UpdateWebACLInput) (req *request.Request, output *waf.UpdateWebACLOutput) { + op := &request.Operation{ + Name: opUpdateWebACL, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.UpdateWebACLInput{} + } + + output = &waf.UpdateWebACLOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateWebACL API operation for AWS WAF Regional. +// +// Inserts or deletes ActivatedRule objects in a WebACL. Each Rule identifies +// web requests that you want to allow, block, or count. When you update a WebACL, +// you specify the following values: +// +// * A default action for the WebACL, either ALLOW or BLOCK. AWS WAF performs +// the default action if a request doesn't match the criteria in any of the +// Rules in a WebACL. +// +// * The Rules that you want to add and/or delete. If you want to replace +// one Rule with another, you delete the existing Rule and add the new one. +// +// * For each Rule, whether you want AWS WAF to allow requests, block requests, +// or count requests that match the conditions in the Rule. +// +// * The order in which you want AWS WAF to evaluate the Rules in a WebACL. +// If you add more than one Rule to a WebACL, AWS WAF evaluates each request +// against the Rules in order based on the value of Priority. (The Rule that +// has the lowest value for Priority is evaluated first.) When a web request +// matches all of the predicates (such as ByteMatchSets and IPSets) in a +// Rule, AWS WAF immediately takes the corresponding action, allow or block, +// and doesn't evaluate the request against the remaining Rules in the WebACL, +// if any. +// +// To create and configure a WebACL, perform the following steps: +// +// Create and update the predicates that you want to include in Rules. For more +// information, see CreateByteMatchSet, UpdateByteMatchSet, CreateIPSet, UpdateIPSet, +// CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet. +// +// Create and update the Rules that you want to include in the WebACL. For more +// information, see CreateRule and UpdateRule. +// +// Create a WebACL. See CreateWebACL. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateWebACL request. +// +// Submit an UpdateWebACL request to specify the Rules that you want to include +// in the WebACL, to specify the default action, and to associate the WebACL +// with a CloudFront distribution. +// +// Be aware that if you try to add a RATE_BASED rule to a web ACL without setting +// the rule type when first creating the rule, the UpdateWebACL request will +// fail because the request tries to add a REGULAR rule (the default rule type) +// with the specified ID, which does not exist. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation UpdateWebACL for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFInvalidOperationException "WAFInvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFNonexistentContainerException "WAFNonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFReferencedItemException "WAFReferencedItemException" +// The operation failed because you tried to delete an object that is still +// in use. For example: +// +// * You tried to delete a ByteMatchSet that is still referenced by a Rule. +// +// * You tried to delete a Rule that is still referenced by a WebACL. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateWebACL +func (c *WAFRegional) UpdateWebACL(input *waf.UpdateWebACLInput) (*waf.UpdateWebACLOutput, error) { + req, out := c.UpdateWebACLRequest(input) + return out, req.Send() +} + +// UpdateWebACLWithContext is the same as UpdateWebACL with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateWebACL for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) UpdateWebACLWithContext(ctx aws.Context, input *waf.UpdateWebACLInput, opts ...request.Option) (*waf.UpdateWebACLOutput, error) { + req, out := c.UpdateWebACLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateXssMatchSet = "UpdateXssMatchSet" + +// UpdateXssMatchSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateXssMatchSet operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateXssMatchSet for more information on using the UpdateXssMatchSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateXssMatchSetRequest method. +// req, resp := client.UpdateXssMatchSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateXssMatchSet +func (c *WAFRegional) UpdateXssMatchSetRequest(input *waf.UpdateXssMatchSetInput) (req *request.Request, output *waf.UpdateXssMatchSetOutput) { + op := &request.Operation{ + Name: opUpdateXssMatchSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.UpdateXssMatchSetInput{} + } + + output = &waf.UpdateXssMatchSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateXssMatchSet API operation for AWS WAF Regional. +// +// Inserts or deletes XssMatchTuple objects (filters) in an XssMatchSet. For +// each XssMatchTuple object, you specify the following values: +// +// * Action: Whether to insert the object into or delete the object from +// the array. To change a XssMatchTuple, you delete the existing object and +// add a new one. +// +// * FieldToMatch: The part of web requests that you want AWS WAF to inspect +// and, if you want AWS WAF to inspect a header, the name of the header. +// +// * TextTransformation: Which text transformation, if any, to perform on +// the web request before inspecting the request for cross-site scripting +// attacks. +// +// You use XssMatchSet objects to specify which CloudFront requests you want +// to allow, block, or count. For example, if you're receiving requests that +// contain cross-site scripting attacks in the request body and you want to +// block the requests, you can create an XssMatchSet with the applicable settings, +// and then configure AWS WAF to block the requests. +// +// To create and configure an XssMatchSet, perform the following steps: +// +// Submit a CreateXssMatchSet request. +// +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateIPSet request. +// +// Submit an UpdateXssMatchSet request to specify the parts of web requests +// that you want AWS WAF to inspect for cross-site scripting attacks. +// +// For more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide (http://docs.aws.amazon.com/waf/latest/developerguide/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation UpdateXssMatchSet for usage and error information. +// +// Returned Error Codes: +// * ErrCodeWAFInternalErrorException "WAFInternalErrorException" +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * ErrCodeWAFInvalidAccountException "WAFInvalidAccountException" +// The operation failed because you tried to create, update, or delete an object +// by using an invalid account identifier. +// +// * ErrCodeWAFInvalidOperationException "WAFInvalidOperationException" +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add an IP address to an IPSet, but the IP address already +// exists in the specified IPSet. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * ErrCodeWAFInvalidParameterException "WAFInvalidParameterException" +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatchType other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * ErrCodeWAFNonexistentContainerException "WAFNonexistentContainerException" +// The operation failed because you tried to add an object to or delete an object +// from another object that doesn't exist. For example: +// +// * You tried to add a Rule to or delete a Rule from a WebACL that doesn't +// exist. +// +// * You tried to add a ByteMatchSet to or delete a ByteMatchSet from a Rule +// that doesn't exist. +// +// * You tried to add an IP address to or delete an IP address from an IPSet +// that doesn't exist. +// +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from +// a ByteMatchSet that doesn't exist. +// +// * ErrCodeWAFNonexistentItemException "WAFNonexistentItemException" +// The operation failed because the referenced object doesn't exist. +// +// * ErrCodeWAFStaleDataException "WAFStaleDataException" +// The operation failed because you tried to create, update, or delete an object +// by using a change token that has already been used. +// +// * ErrCodeWAFLimitsExceededException "WAFLimitsExceededException" +// The operation exceeds a resource limit, for example, the maximum number of +// WebACL objects that you can create for an AWS account. For more information, +// see Limits (http://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/UpdateXssMatchSet +func (c *WAFRegional) UpdateXssMatchSet(input *waf.UpdateXssMatchSetInput) (*waf.UpdateXssMatchSetOutput, error) { + req, out := c.UpdateXssMatchSetRequest(input) + return out, req.Send() +} + +// UpdateXssMatchSetWithContext is the same as UpdateXssMatchSet with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateXssMatchSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) UpdateXssMatchSetWithContext(ctx aws.Context, input *waf.UpdateXssMatchSetInput, opts ...request.Option) (*waf.UpdateXssMatchSetOutput, error) { + req, out := c.UpdateXssMatchSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/AssociateWebACLRequest +type AssociateWebACLInput struct { + _ struct{} `type:"structure"` + + // The ARN (Amazon Resource Name) of the resource to be protected. + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` + + // A unique identifier (ID) for the web ACL. + // + // WebACLId is a required field + WebACLId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateWebACLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateWebACLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateWebACLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateWebACLInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.WebACLId == nil { + invalidParams.Add(request.NewErrParamRequired("WebACLId")) + } + if s.WebACLId != nil && len(*s.WebACLId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WebACLId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *AssociateWebACLInput) SetResourceArn(v string) *AssociateWebACLInput { + s.ResourceArn = &v + return s +} + +// SetWebACLId sets the WebACLId field's value. +func (s *AssociateWebACLInput) SetWebACLId(v string) *AssociateWebACLInput { + s.WebACLId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/AssociateWebACLResponse +type AssociateWebACLOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssociateWebACLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateWebACLOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DisassociateWebACLRequest +type DisassociateWebACLInput struct { + _ struct{} `type:"structure"` + + // The ARN (Amazon Resource Name) of the resource from which the web ACL is + // being removed. + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisassociateWebACLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateWebACLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateWebACLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateWebACLInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *DisassociateWebACLInput) SetResourceArn(v string) *DisassociateWebACLInput { + s.ResourceArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DisassociateWebACLResponse +type DisassociateWebACLOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisassociateWebACLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateWebACLOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetWebACLForResourceRequest +type GetWebACLForResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN (Amazon Resource Name) of the resource for which to get the web ACL. + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetWebACLForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetWebACLForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetWebACLForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetWebACLForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *GetWebACLForResourceInput) SetResourceArn(v string) *GetWebACLForResourceInput { + s.ResourceArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetWebACLForResourceResponse +type GetWebACLForResourceOutput struct { + _ struct{} `type:"structure"` + + // Information about the web ACL that you specified in the GetWebACLForResource + // request. If there is no associated resource, a null WebACLSummary is returned. + WebACLSummary *waf.WebACLSummary `type:"structure"` +} + +// String returns the string representation +func (s GetWebACLForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetWebACLForResourceOutput) GoString() string { + return s.String() +} + +// SetWebACLSummary sets the WebACLSummary field's value. +func (s *GetWebACLForResourceOutput) SetWebACLSummary(v *waf.WebACLSummary) *GetWebACLForResourceOutput { + s.WebACLSummary = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListResourcesForWebACLRequest +type ListResourcesForWebACLInput struct { + _ struct{} `type:"structure"` + + // The unique identifier (ID) of the web ACL for which to list the associated + // resources. + // + // WebACLId is a required field + WebACLId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListResourcesForWebACLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResourcesForWebACLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListResourcesForWebACLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListResourcesForWebACLInput"} + if s.WebACLId == nil { + invalidParams.Add(request.NewErrParamRequired("WebACLId")) + } + if s.WebACLId != nil && len(*s.WebACLId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WebACLId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWebACLId sets the WebACLId field's value. +func (s *ListResourcesForWebACLInput) SetWebACLId(v string) *ListResourcesForWebACLInput { + s.WebACLId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListResourcesForWebACLResponse +type ListResourcesForWebACLOutput struct { + _ struct{} `type:"structure"` + + // An array of ARNs (Amazon Resource Names) of the resources associated with + // the specified web ACL. An array with zero elements is returned if there are + // no resources associated with the web ACL. + ResourceArns []*string `type:"list"` +} + +// String returns the string representation +func (s ListResourcesForWebACLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResourcesForWebACLOutput) GoString() string { + return s.String() +} + +// SetResourceArns sets the ResourceArns field's value. +func (s *ListResourcesForWebACLOutput) SetResourceArns(v []*string) *ListResourcesForWebACLOutput { + s.ResourceArns = v + return s +} + +const ( + // ChangeActionInsert is a ChangeAction enum value + ChangeActionInsert = "INSERT" + + // ChangeActionDelete is a ChangeAction enum value + ChangeActionDelete = "DELETE" +) + +const ( + // ChangeTokenStatusProvisioned is a ChangeTokenStatus enum value + ChangeTokenStatusProvisioned = "PROVISIONED" + + // ChangeTokenStatusPending is a ChangeTokenStatus enum value + ChangeTokenStatusPending = "PENDING" + + // ChangeTokenStatusInsync is a ChangeTokenStatus enum value + ChangeTokenStatusInsync = "INSYNC" +) + +const ( + // ComparisonOperatorEq is a ComparisonOperator enum value + ComparisonOperatorEq = "EQ" + + // ComparisonOperatorNe is a ComparisonOperator enum value + ComparisonOperatorNe = "NE" + + // ComparisonOperatorLe is a ComparisonOperator enum value + ComparisonOperatorLe = "LE" + + // ComparisonOperatorLt is a ComparisonOperator enum value + ComparisonOperatorLt = "LT" + + // ComparisonOperatorGe is a ComparisonOperator enum value + ComparisonOperatorGe = "GE" + + // ComparisonOperatorGt is a ComparisonOperator enum value + ComparisonOperatorGt = "GT" +) + +const ( + // GeoMatchConstraintTypeCountry is a GeoMatchConstraintType enum value + GeoMatchConstraintTypeCountry = "Country" +) + +const ( + // GeoMatchConstraintValueAf is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAf = "AF" + + // GeoMatchConstraintValueAx is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAx = "AX" + + // GeoMatchConstraintValueAl is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAl = "AL" + + // GeoMatchConstraintValueDz is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueDz = "DZ" + + // GeoMatchConstraintValueAs is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAs = "AS" + + // GeoMatchConstraintValueAd is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAd = "AD" + + // GeoMatchConstraintValueAo is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAo = "AO" + + // GeoMatchConstraintValueAi is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAi = "AI" + + // GeoMatchConstraintValueAq is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAq = "AQ" + + // GeoMatchConstraintValueAg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAg = "AG" + + // GeoMatchConstraintValueAr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAr = "AR" + + // GeoMatchConstraintValueAm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAm = "AM" + + // GeoMatchConstraintValueAw is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAw = "AW" + + // GeoMatchConstraintValueAu is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAu = "AU" + + // GeoMatchConstraintValueAt is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAt = "AT" + + // GeoMatchConstraintValueAz is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAz = "AZ" + + // GeoMatchConstraintValueBs is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBs = "BS" + + // GeoMatchConstraintValueBh is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBh = "BH" + + // GeoMatchConstraintValueBd is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBd = "BD" + + // GeoMatchConstraintValueBb is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBb = "BB" + + // GeoMatchConstraintValueBy is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBy = "BY" + + // GeoMatchConstraintValueBe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBe = "BE" + + // GeoMatchConstraintValueBz is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBz = "BZ" + + // GeoMatchConstraintValueBj is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBj = "BJ" + + // GeoMatchConstraintValueBm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBm = "BM" + + // GeoMatchConstraintValueBt is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBt = "BT" + + // GeoMatchConstraintValueBo is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBo = "BO" + + // GeoMatchConstraintValueBq is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBq = "BQ" + + // GeoMatchConstraintValueBa is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBa = "BA" + + // GeoMatchConstraintValueBw is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBw = "BW" + + // GeoMatchConstraintValueBv is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBv = "BV" + + // GeoMatchConstraintValueBr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBr = "BR" + + // GeoMatchConstraintValueIo is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueIo = "IO" + + // GeoMatchConstraintValueBn is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBn = "BN" + + // GeoMatchConstraintValueBg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBg = "BG" + + // GeoMatchConstraintValueBf is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBf = "BF" + + // GeoMatchConstraintValueBi is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBi = "BI" + + // GeoMatchConstraintValueKh is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueKh = "KH" + + // GeoMatchConstraintValueCm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCm = "CM" + + // GeoMatchConstraintValueCa is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCa = "CA" + + // GeoMatchConstraintValueCv is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCv = "CV" + + // GeoMatchConstraintValueKy is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueKy = "KY" + + // GeoMatchConstraintValueCf is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCf = "CF" + + // GeoMatchConstraintValueTd is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTd = "TD" + + // GeoMatchConstraintValueCl is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCl = "CL" + + // GeoMatchConstraintValueCn is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCn = "CN" + + // GeoMatchConstraintValueCx is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCx = "CX" + + // GeoMatchConstraintValueCc is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCc = "CC" + + // GeoMatchConstraintValueCo is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCo = "CO" + + // GeoMatchConstraintValueKm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueKm = "KM" + + // GeoMatchConstraintValueCg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCg = "CG" + + // GeoMatchConstraintValueCd is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCd = "CD" + + // GeoMatchConstraintValueCk is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCk = "CK" + + // GeoMatchConstraintValueCr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCr = "CR" + + // GeoMatchConstraintValueCi is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCi = "CI" + + // GeoMatchConstraintValueHr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueHr = "HR" + + // GeoMatchConstraintValueCu is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCu = "CU" + + // GeoMatchConstraintValueCw is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCw = "CW" + + // GeoMatchConstraintValueCy is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCy = "CY" + + // GeoMatchConstraintValueCz is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCz = "CZ" + + // GeoMatchConstraintValueDk is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueDk = "DK" + + // GeoMatchConstraintValueDj is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueDj = "DJ" + + // GeoMatchConstraintValueDm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueDm = "DM" + + // GeoMatchConstraintValueDo is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueDo = "DO" + + // GeoMatchConstraintValueEc is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueEc = "EC" + + // GeoMatchConstraintValueEg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueEg = "EG" + + // GeoMatchConstraintValueSv is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSv = "SV" + + // GeoMatchConstraintValueGq is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGq = "GQ" + + // GeoMatchConstraintValueEr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueEr = "ER" + + // GeoMatchConstraintValueEe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueEe = "EE" + + // GeoMatchConstraintValueEt is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueEt = "ET" + + // GeoMatchConstraintValueFk is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueFk = "FK" + + // GeoMatchConstraintValueFo is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueFo = "FO" + + // GeoMatchConstraintValueFj is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueFj = "FJ" + + // GeoMatchConstraintValueFi is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueFi = "FI" + + // GeoMatchConstraintValueFr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueFr = "FR" + + // GeoMatchConstraintValueGf is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGf = "GF" + + // GeoMatchConstraintValuePf is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePf = "PF" + + // GeoMatchConstraintValueTf is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTf = "TF" + + // GeoMatchConstraintValueGa is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGa = "GA" + + // GeoMatchConstraintValueGm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGm = "GM" + + // GeoMatchConstraintValueGe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGe = "GE" + + // GeoMatchConstraintValueDe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueDe = "DE" + + // GeoMatchConstraintValueGh is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGh = "GH" + + // GeoMatchConstraintValueGi is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGi = "GI" + + // GeoMatchConstraintValueGr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGr = "GR" + + // GeoMatchConstraintValueGl is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGl = "GL" + + // GeoMatchConstraintValueGd is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGd = "GD" + + // GeoMatchConstraintValueGp is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGp = "GP" + + // GeoMatchConstraintValueGu is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGu = "GU" + + // GeoMatchConstraintValueGt is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGt = "GT" + + // GeoMatchConstraintValueGg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGg = "GG" + + // GeoMatchConstraintValueGn is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGn = "GN" + + // GeoMatchConstraintValueGw is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGw = "GW" + + // GeoMatchConstraintValueGy is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGy = "GY" + + // GeoMatchConstraintValueHt is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueHt = "HT" + + // GeoMatchConstraintValueHm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueHm = "HM" + + // GeoMatchConstraintValueVa is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueVa = "VA" + + // GeoMatchConstraintValueHn is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueHn = "HN" + + // GeoMatchConstraintValueHk is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueHk = "HK" + + // GeoMatchConstraintValueHu is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueHu = "HU" + + // GeoMatchConstraintValueIs is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueIs = "IS" + + // GeoMatchConstraintValueIn is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueIn = "IN" + + // GeoMatchConstraintValueId is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueId = "ID" + + // GeoMatchConstraintValueIr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueIr = "IR" + + // GeoMatchConstraintValueIq is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueIq = "IQ" + + // GeoMatchConstraintValueIe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueIe = "IE" + + // GeoMatchConstraintValueIm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueIm = "IM" + + // GeoMatchConstraintValueIl is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueIl = "IL" + + // GeoMatchConstraintValueIt is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueIt = "IT" + + // GeoMatchConstraintValueJm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueJm = "JM" + + // GeoMatchConstraintValueJp is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueJp = "JP" + + // GeoMatchConstraintValueJe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueJe = "JE" + + // GeoMatchConstraintValueJo is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueJo = "JO" + + // GeoMatchConstraintValueKz is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueKz = "KZ" + + // GeoMatchConstraintValueKe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueKe = "KE" + + // GeoMatchConstraintValueKi is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueKi = "KI" + + // GeoMatchConstraintValueKp is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueKp = "KP" + + // GeoMatchConstraintValueKr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueKr = "KR" + + // GeoMatchConstraintValueKw is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueKw = "KW" + + // GeoMatchConstraintValueKg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueKg = "KG" + + // GeoMatchConstraintValueLa is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueLa = "LA" + + // GeoMatchConstraintValueLv is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueLv = "LV" + + // GeoMatchConstraintValueLb is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueLb = "LB" + + // GeoMatchConstraintValueLs is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueLs = "LS" + + // GeoMatchConstraintValueLr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueLr = "LR" + + // GeoMatchConstraintValueLy is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueLy = "LY" + + // GeoMatchConstraintValueLi is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueLi = "LI" + + // GeoMatchConstraintValueLt is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueLt = "LT" + + // GeoMatchConstraintValueLu is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueLu = "LU" + + // GeoMatchConstraintValueMo is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMo = "MO" + + // GeoMatchConstraintValueMk is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMk = "MK" + + // GeoMatchConstraintValueMg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMg = "MG" + + // GeoMatchConstraintValueMw is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMw = "MW" + + // GeoMatchConstraintValueMy is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMy = "MY" + + // GeoMatchConstraintValueMv is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMv = "MV" + + // GeoMatchConstraintValueMl is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMl = "ML" + + // GeoMatchConstraintValueMt is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMt = "MT" + + // GeoMatchConstraintValueMh is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMh = "MH" + + // GeoMatchConstraintValueMq is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMq = "MQ" + + // GeoMatchConstraintValueMr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMr = "MR" + + // GeoMatchConstraintValueMu is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMu = "MU" + + // GeoMatchConstraintValueYt is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueYt = "YT" + + // GeoMatchConstraintValueMx is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMx = "MX" + + // GeoMatchConstraintValueFm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueFm = "FM" + + // GeoMatchConstraintValueMd is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMd = "MD" + + // GeoMatchConstraintValueMc is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMc = "MC" + + // GeoMatchConstraintValueMn is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMn = "MN" + + // GeoMatchConstraintValueMe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMe = "ME" + + // GeoMatchConstraintValueMs is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMs = "MS" + + // GeoMatchConstraintValueMa is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMa = "MA" + + // GeoMatchConstraintValueMz is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMz = "MZ" + + // GeoMatchConstraintValueMm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMm = "MM" + + // GeoMatchConstraintValueNa is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueNa = "NA" + + // GeoMatchConstraintValueNr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueNr = "NR" + + // GeoMatchConstraintValueNp is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueNp = "NP" + + // GeoMatchConstraintValueNl is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueNl = "NL" + + // GeoMatchConstraintValueNc is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueNc = "NC" + + // GeoMatchConstraintValueNz is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueNz = "NZ" + + // GeoMatchConstraintValueNi is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueNi = "NI" + + // GeoMatchConstraintValueNe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueNe = "NE" + + // GeoMatchConstraintValueNg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueNg = "NG" + + // GeoMatchConstraintValueNu is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueNu = "NU" + + // GeoMatchConstraintValueNf is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueNf = "NF" + + // GeoMatchConstraintValueMp is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMp = "MP" + + // GeoMatchConstraintValueNo is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueNo = "NO" + + // GeoMatchConstraintValueOm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueOm = "OM" + + // GeoMatchConstraintValuePk is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePk = "PK" + + // GeoMatchConstraintValuePw is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePw = "PW" + + // GeoMatchConstraintValuePs is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePs = "PS" + + // GeoMatchConstraintValuePa is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePa = "PA" + + // GeoMatchConstraintValuePg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePg = "PG" + + // GeoMatchConstraintValuePy is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePy = "PY" + + // GeoMatchConstraintValuePe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePe = "PE" + + // GeoMatchConstraintValuePh is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePh = "PH" + + // GeoMatchConstraintValuePn is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePn = "PN" + + // GeoMatchConstraintValuePl is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePl = "PL" + + // GeoMatchConstraintValuePt is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePt = "PT" + + // GeoMatchConstraintValuePr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePr = "PR" + + // GeoMatchConstraintValueQa is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueQa = "QA" + + // GeoMatchConstraintValueRe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueRe = "RE" + + // GeoMatchConstraintValueRo is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueRo = "RO" + + // GeoMatchConstraintValueRu is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueRu = "RU" + + // GeoMatchConstraintValueRw is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueRw = "RW" + + // GeoMatchConstraintValueBl is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueBl = "BL" + + // GeoMatchConstraintValueSh is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSh = "SH" + + // GeoMatchConstraintValueKn is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueKn = "KN" + + // GeoMatchConstraintValueLc is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueLc = "LC" + + // GeoMatchConstraintValueMf is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueMf = "MF" + + // GeoMatchConstraintValuePm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValuePm = "PM" - // The ARN (Amazon Resource Name) of the resource to be protected. - // - // ResourceArn is a required field - ResourceArn *string `min:"1" type:"string" required:"true"` + // GeoMatchConstraintValueVc is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueVc = "VC" - // A unique identifier (ID) for the web ACL. - // - // WebACLId is a required field - WebACLId *string `min:"1" type:"string" required:"true"` -} + // GeoMatchConstraintValueWs is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueWs = "WS" -// String returns the string representation -func (s AssociateWebACLInput) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueSm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSm = "SM" -// GoString returns the string representation -func (s AssociateWebACLInput) GoString() string { - return s.String() -} + // GeoMatchConstraintValueSt is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSt = "ST" -// Validate inspects the fields of the type to determine if they are valid. -func (s *AssociateWebACLInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AssociateWebACLInput"} - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) - } - if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) - } - if s.WebACLId == nil { - invalidParams.Add(request.NewErrParamRequired("WebACLId")) - } - if s.WebACLId != nil && len(*s.WebACLId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WebACLId", 1)) - } + // GeoMatchConstraintValueSa is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSa = "SA" - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // GeoMatchConstraintValueSn is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSn = "SN" -// SetResourceArn sets the ResourceArn field's value. -func (s *AssociateWebACLInput) SetResourceArn(v string) *AssociateWebACLInput { - s.ResourceArn = &v - return s -} + // GeoMatchConstraintValueRs is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueRs = "RS" -// SetWebACLId sets the WebACLId field's value. -func (s *AssociateWebACLInput) SetWebACLId(v string) *AssociateWebACLInput { - s.WebACLId = &v - return s -} + // GeoMatchConstraintValueSc is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSc = "SC" -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/AssociateWebACLResponse -type AssociateWebACLOutput struct { - _ struct{} `type:"structure"` -} + // GeoMatchConstraintValueSl is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSl = "SL" -// String returns the string representation -func (s AssociateWebACLOutput) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueSg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSg = "SG" -// GoString returns the string representation -func (s AssociateWebACLOutput) GoString() string { - return s.String() -} + // GeoMatchConstraintValueSx is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSx = "SX" -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DisassociateWebACLRequest -type DisassociateWebACLInput struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueSk is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSk = "SK" - // The ARN (Amazon Resource Name) of the resource from which the web ACL is - // being removed. - // - // ResourceArn is a required field - ResourceArn *string `min:"1" type:"string" required:"true"` -} + // GeoMatchConstraintValueSi is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSi = "SI" -// String returns the string representation -func (s DisassociateWebACLInput) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueSb is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSb = "SB" -// GoString returns the string representation -func (s DisassociateWebACLInput) GoString() string { - return s.String() -} + // GeoMatchConstraintValueSo is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSo = "SO" -// Validate inspects the fields of the type to determine if they are valid. -func (s *DisassociateWebACLInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DisassociateWebACLInput"} - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) - } - if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) - } + // GeoMatchConstraintValueZa is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueZa = "ZA" - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // GeoMatchConstraintValueGs is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGs = "GS" -// SetResourceArn sets the ResourceArn field's value. -func (s *DisassociateWebACLInput) SetResourceArn(v string) *DisassociateWebACLInput { - s.ResourceArn = &v - return s -} + // GeoMatchConstraintValueSs is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSs = "SS" -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/DisassociateWebACLResponse -type DisassociateWebACLOutput struct { - _ struct{} `type:"structure"` -} + // GeoMatchConstraintValueEs is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueEs = "ES" -// String returns the string representation -func (s DisassociateWebACLOutput) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueLk is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueLk = "LK" -// GoString returns the string representation -func (s DisassociateWebACLOutput) GoString() string { - return s.String() -} + // GeoMatchConstraintValueSd is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSd = "SD" -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetWebACLForResourceRequest -type GetWebACLForResourceInput struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueSr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSr = "SR" - // The ARN (Amazon Resource Name) of the resource for which to get the web ACL. - // - // ResourceArn is a required field - ResourceArn *string `min:"1" type:"string" required:"true"` -} + // GeoMatchConstraintValueSj is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSj = "SJ" -// String returns the string representation -func (s GetWebACLForResourceInput) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueSz is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSz = "SZ" -// GoString returns the string representation -func (s GetWebACLForResourceInput) GoString() string { - return s.String() -} + // GeoMatchConstraintValueSe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSe = "SE" -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetWebACLForResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetWebACLForResourceInput"} - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) - } - if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) - } + // GeoMatchConstraintValueCh is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueCh = "CH" - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // GeoMatchConstraintValueSy is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueSy = "SY" -// SetResourceArn sets the ResourceArn field's value. -func (s *GetWebACLForResourceInput) SetResourceArn(v string) *GetWebACLForResourceInput { - s.ResourceArn = &v - return s -} + // GeoMatchConstraintValueTw is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTw = "TW" -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/GetWebACLForResourceResponse -type GetWebACLForResourceOutput struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueTj is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTj = "TJ" - // Information about the web ACL that you specified in the GetWebACLForResource - // request. If there is no associated resource, a null WebACLSummary is returned. - WebACLSummary *waf.WebACLSummary `type:"structure"` -} + // GeoMatchConstraintValueTz is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTz = "TZ" -// String returns the string representation -func (s GetWebACLForResourceOutput) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueTh is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTh = "TH" -// GoString returns the string representation -func (s GetWebACLForResourceOutput) GoString() string { - return s.String() -} + // GeoMatchConstraintValueTl is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTl = "TL" -// SetWebACLSummary sets the WebACLSummary field's value. -func (s *GetWebACLForResourceOutput) SetWebACLSummary(v *waf.WebACLSummary) *GetWebACLForResourceOutput { - s.WebACLSummary = v - return s -} + // GeoMatchConstraintValueTg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTg = "TG" -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListResourcesForWebACLRequest -type ListResourcesForWebACLInput struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueTk is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTk = "TK" - // The unique identifier (ID) of the web ACL for which to list the associated - // resources. - // - // WebACLId is a required field - WebACLId *string `min:"1" type:"string" required:"true"` -} + // GeoMatchConstraintValueTo is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTo = "TO" -// String returns the string representation -func (s ListResourcesForWebACLInput) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueTt is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTt = "TT" -// GoString returns the string representation -func (s ListResourcesForWebACLInput) GoString() string { - return s.String() -} + // GeoMatchConstraintValueTn is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTn = "TN" -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListResourcesForWebACLInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListResourcesForWebACLInput"} - if s.WebACLId == nil { - invalidParams.Add(request.NewErrParamRequired("WebACLId")) - } - if s.WebACLId != nil && len(*s.WebACLId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WebACLId", 1)) - } + // GeoMatchConstraintValueTr is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTr = "TR" - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // GeoMatchConstraintValueTm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTm = "TM" -// SetWebACLId sets the WebACLId field's value. -func (s *ListResourcesForWebACLInput) SetWebACLId(v string) *ListResourcesForWebACLInput { - s.WebACLId = &v - return s -} + // GeoMatchConstraintValueTc is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTc = "TC" -// Please also see https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/ListResourcesForWebACLResponse -type ListResourcesForWebACLOutput struct { - _ struct{} `type:"structure"` + // GeoMatchConstraintValueTv is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueTv = "TV" - // An array of ARNs (Amazon Resource Names) of the resources associated with - // the specified web ACL. An array with zero elements is returned if there are - // no resources associated with the web ACL. - ResourceArns []*string `type:"list"` -} + // GeoMatchConstraintValueUg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueUg = "UG" -// String returns the string representation -func (s ListResourcesForWebACLOutput) String() string { - return awsutil.Prettify(s) -} + // GeoMatchConstraintValueUa is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueUa = "UA" -// GoString returns the string representation -func (s ListResourcesForWebACLOutput) GoString() string { - return s.String() -} + // GeoMatchConstraintValueAe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueAe = "AE" -// SetResourceArns sets the ResourceArns field's value. -func (s *ListResourcesForWebACLOutput) SetResourceArns(v []*string) *ListResourcesForWebACLOutput { - s.ResourceArns = v - return s -} + // GeoMatchConstraintValueGb is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueGb = "GB" -const ( - // ChangeActionInsert is a ChangeAction enum value - ChangeActionInsert = "INSERT" + // GeoMatchConstraintValueUs is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueUs = "US" - // ChangeActionDelete is a ChangeAction enum value - ChangeActionDelete = "DELETE" -) + // GeoMatchConstraintValueUm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueUm = "UM" -const ( - // ChangeTokenStatusProvisioned is a ChangeTokenStatus enum value - ChangeTokenStatusProvisioned = "PROVISIONED" + // GeoMatchConstraintValueUy is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueUy = "UY" - // ChangeTokenStatusPending is a ChangeTokenStatus enum value - ChangeTokenStatusPending = "PENDING" + // GeoMatchConstraintValueUz is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueUz = "UZ" - // ChangeTokenStatusInsync is a ChangeTokenStatus enum value - ChangeTokenStatusInsync = "INSYNC" -) + // GeoMatchConstraintValueVu is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueVu = "VU" -const ( - // ComparisonOperatorEq is a ComparisonOperator enum value - ComparisonOperatorEq = "EQ" + // GeoMatchConstraintValueVe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueVe = "VE" - // ComparisonOperatorNe is a ComparisonOperator enum value - ComparisonOperatorNe = "NE" + // GeoMatchConstraintValueVn is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueVn = "VN" - // ComparisonOperatorLe is a ComparisonOperator enum value - ComparisonOperatorLe = "LE" + // GeoMatchConstraintValueVg is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueVg = "VG" - // ComparisonOperatorLt is a ComparisonOperator enum value - ComparisonOperatorLt = "LT" + // GeoMatchConstraintValueVi is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueVi = "VI" - // ComparisonOperatorGe is a ComparisonOperator enum value - ComparisonOperatorGe = "GE" + // GeoMatchConstraintValueWf is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueWf = "WF" - // ComparisonOperatorGt is a ComparisonOperator enum value - ComparisonOperatorGt = "GT" + // GeoMatchConstraintValueEh is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueEh = "EH" + + // GeoMatchConstraintValueYe is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueYe = "YE" + + // GeoMatchConstraintValueZm is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueZm = "ZM" + + // GeoMatchConstraintValueZw is a GeoMatchConstraintValue enum value + GeoMatchConstraintValueZw = "ZW" ) const ( @@ -6463,6 +9004,12 @@ const ( // ParameterExceptionFieldSizeConstraintComparisonOperator is a ParameterExceptionField enum value ParameterExceptionFieldSizeConstraintComparisonOperator = "SIZE_CONSTRAINT_COMPARISON_OPERATOR" + // ParameterExceptionFieldGeoMatchLocationType is a ParameterExceptionField enum value + ParameterExceptionFieldGeoMatchLocationType = "GEO_MATCH_LOCATION_TYPE" + + // ParameterExceptionFieldGeoMatchLocationValue is a ParameterExceptionField enum value + ParameterExceptionFieldGeoMatchLocationValue = "GEO_MATCH_LOCATION_VALUE" + // ParameterExceptionFieldRateKey is a ParameterExceptionField enum value ParameterExceptionFieldRateKey = "RATE_KEY" @@ -6508,11 +9055,17 @@ const ( // PredicateTypeSqlInjectionMatch is a PredicateType enum value PredicateTypeSqlInjectionMatch = "SqlInjectionMatch" + // PredicateTypeGeoMatch is a PredicateType enum value + PredicateTypeGeoMatch = "GeoMatch" + // PredicateTypeSizeConstraint is a PredicateType enum value PredicateTypeSizeConstraint = "SizeConstraint" // PredicateTypeXssMatch is a PredicateType enum value PredicateTypeXssMatch = "XssMatch" + + // PredicateTypeRegexMatch is a PredicateType enum value + PredicateTypeRegexMatch = "RegexMatch" ) const ( diff --git a/vendor/github.com/aws/aws-sdk-go/service/wafregional/errors.go b/vendor/github.com/aws/aws-sdk-go/service/wafregional/errors.go index 11577d5b980..fad8a77664a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/wafregional/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/wafregional/errors.go @@ -71,7 +71,7 @@ const ( // BLOCK, or COUNT. // // * You tried to update a ByteMatchSet with a FieldToMatchType other than - // HEADER, QUERY_STRING, or URI. + // HEADER, METHOD, QUERY_STRING, URI, or BODY. // // * You tried to update a ByteMatchSet with a Field of HEADER but no value // for Data. @@ -80,6 +80,12 @@ const ( // a resource with which a web ACL cannot be associated. ErrCodeWAFInvalidParameterException = "WAFInvalidParameterException" + // ErrCodeWAFInvalidRegexPatternException for service response error code + // "WAFInvalidRegexPatternException". + // + // The regular expression (regex) you specified in RegexPatternString is invalid. + ErrCodeWAFInvalidRegexPatternException = "WAFInvalidRegexPatternException" + // ErrCodeWAFLimitsExceededException for service response error code // "WAFLimitsExceededException". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go b/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go new file mode 100644 index 00000000000..017f0dbf55d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go @@ -0,0 +1,3620 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package workspaces + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opCreateTags = "CreateTags" + +// CreateTagsRequest generates a "aws/request.Request" representing the +// client's request for the CreateTags operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTags for more information on using the CreateTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTagsRequest method. +// req, resp := client.CreateTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateTags +func (c *WorkSpaces) CreateTagsRequest(input *CreateTagsInput) (req *request.Request, output *CreateTagsOutput) { + op := &request.Operation{ + Name: opCreateTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTagsInput{} + } + + output = &CreateTagsOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTags API operation for Amazon WorkSpaces. +// +// Creates tags for a WorkSpace. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation CreateTags for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource could not be found. +// +// * ErrCodeInvalidParameterValuesException "InvalidParameterValuesException" +// One or more parameter values are not valid. +// +// * ErrCodeResourceLimitExceededException "ResourceLimitExceededException" +// Your resource limits have been exceeded. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateTags +func (c *WorkSpaces) CreateTags(input *CreateTagsInput) (*CreateTagsOutput, error) { + req, out := c.CreateTagsRequest(input) + return out, req.Send() +} + +// CreateTagsWithContext is the same as CreateTags with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) CreateTagsWithContext(ctx aws.Context, input *CreateTagsInput, opts ...request.Option) (*CreateTagsOutput, error) { + req, out := c.CreateTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateWorkspaces = "CreateWorkspaces" + +// CreateWorkspacesRequest generates a "aws/request.Request" representing the +// client's request for the CreateWorkspaces operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateWorkspaces for more information on using the CreateWorkspaces +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateWorkspacesRequest method. +// req, resp := client.CreateWorkspacesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateWorkspaces +func (c *WorkSpaces) CreateWorkspacesRequest(input *CreateWorkspacesInput) (req *request.Request, output *CreateWorkspacesOutput) { + op := &request.Operation{ + Name: opCreateWorkspaces, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateWorkspacesInput{} + } + + output = &CreateWorkspacesOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateWorkspaces API operation for Amazon WorkSpaces. +// +// Creates one or more WorkSpaces. +// +// This operation is asynchronous and returns before the WorkSpaces are created. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation CreateWorkspaces for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceLimitExceededException "ResourceLimitExceededException" +// Your resource limits have been exceeded. +// +// * ErrCodeInvalidParameterValuesException "InvalidParameterValuesException" +// One or more parameter values are not valid. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateWorkspaces +func (c *WorkSpaces) CreateWorkspaces(input *CreateWorkspacesInput) (*CreateWorkspacesOutput, error) { + req, out := c.CreateWorkspacesRequest(input) + return out, req.Send() +} + +// CreateWorkspacesWithContext is the same as CreateWorkspaces with the addition of +// the ability to pass a context and additional request options. +// +// See CreateWorkspaces for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) CreateWorkspacesWithContext(ctx aws.Context, input *CreateWorkspacesInput, opts ...request.Option) (*CreateWorkspacesOutput, error) { + req, out := c.CreateWorkspacesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteTags = "DeleteTags" + +// DeleteTagsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTags operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTags for more information on using the DeleteTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTagsRequest method. +// req, resp := client.DeleteTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DeleteTags +func (c *WorkSpaces) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { + op := &request.Operation{ + Name: opDeleteTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTagsInput{} + } + + output = &DeleteTagsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteTags API operation for Amazon WorkSpaces. +// +// Deletes tags from a WorkSpace. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation DeleteTags for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource could not be found. +// +// * ErrCodeInvalidParameterValuesException "InvalidParameterValuesException" +// One or more parameter values are not valid. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DeleteTags +func (c *WorkSpaces) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { + req, out := c.DeleteTagsRequest(input) + return out, req.Send() +} + +// DeleteTagsWithContext is the same as DeleteTags with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) DeleteTagsWithContext(ctx aws.Context, input *DeleteTagsInput, opts ...request.Option) (*DeleteTagsOutput, error) { + req, out := c.DeleteTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeTags = "DescribeTags" + +// DescribeTagsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTags operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTags for more information on using the DescribeTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeTagsRequest method. +// req, resp := client.DescribeTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeTags +func (c *WorkSpaces) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { + op := &request.Operation{ + Name: opDescribeTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTagsInput{} + } + + output = &DescribeTagsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTags API operation for Amazon WorkSpaces. +// +// Describes tags for a WorkSpace. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation DescribeTags for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource could not be found. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeTags +func (c *WorkSpaces) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + return out, req.Send() +} + +// DescribeTagsWithContext is the same as DescribeTags with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) DescribeTagsWithContext(ctx aws.Context, input *DescribeTagsInput, opts ...request.Option) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeWorkspaceBundles = "DescribeWorkspaceBundles" + +// DescribeWorkspaceBundlesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeWorkspaceBundles operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeWorkspaceBundles for more information on using the DescribeWorkspaceBundles +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeWorkspaceBundlesRequest method. +// req, resp := client.DescribeWorkspaceBundlesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceBundles +func (c *WorkSpaces) DescribeWorkspaceBundlesRequest(input *DescribeWorkspaceBundlesInput) (req *request.Request, output *DescribeWorkspaceBundlesOutput) { + op := &request.Operation{ + Name: opDescribeWorkspaceBundles, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeWorkspaceBundlesInput{} + } + + output = &DescribeWorkspaceBundlesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeWorkspaceBundles API operation for Amazon WorkSpaces. +// +// Obtains information about the WorkSpace bundles that are available to your +// account in the specified region. +// +// You can filter the results with either the BundleIds parameter, or the Owner +// parameter, but not both. +// +// This operation supports pagination with the use of the NextToken request +// and response parameters. If more results are available, the NextToken response +// member contains a token that you pass in the next call to this operation +// to retrieve the next set of items. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation DescribeWorkspaceBundles for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterValuesException "InvalidParameterValuesException" +// One or more parameter values are not valid. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceBundles +func (c *WorkSpaces) DescribeWorkspaceBundles(input *DescribeWorkspaceBundlesInput) (*DescribeWorkspaceBundlesOutput, error) { + req, out := c.DescribeWorkspaceBundlesRequest(input) + return out, req.Send() +} + +// DescribeWorkspaceBundlesWithContext is the same as DescribeWorkspaceBundles with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeWorkspaceBundles for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) DescribeWorkspaceBundlesWithContext(ctx aws.Context, input *DescribeWorkspaceBundlesInput, opts ...request.Option) (*DescribeWorkspaceBundlesOutput, error) { + req, out := c.DescribeWorkspaceBundlesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeWorkspaceBundlesPages iterates over the pages of a DescribeWorkspaceBundles operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeWorkspaceBundles method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeWorkspaceBundles operation. +// pageNum := 0 +// err := client.DescribeWorkspaceBundlesPages(params, +// func(page *DescribeWorkspaceBundlesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *WorkSpaces) DescribeWorkspaceBundlesPages(input *DescribeWorkspaceBundlesInput, fn func(*DescribeWorkspaceBundlesOutput, bool) bool) error { + return c.DescribeWorkspaceBundlesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeWorkspaceBundlesPagesWithContext same as DescribeWorkspaceBundlesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) DescribeWorkspaceBundlesPagesWithContext(ctx aws.Context, input *DescribeWorkspaceBundlesInput, fn func(*DescribeWorkspaceBundlesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeWorkspaceBundlesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeWorkspaceBundlesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeWorkspaceBundlesOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opDescribeWorkspaceDirectories = "DescribeWorkspaceDirectories" + +// DescribeWorkspaceDirectoriesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeWorkspaceDirectories operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeWorkspaceDirectories for more information on using the DescribeWorkspaceDirectories +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeWorkspaceDirectoriesRequest method. +// req, resp := client.DescribeWorkspaceDirectoriesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceDirectories +func (c *WorkSpaces) DescribeWorkspaceDirectoriesRequest(input *DescribeWorkspaceDirectoriesInput) (req *request.Request, output *DescribeWorkspaceDirectoriesOutput) { + op := &request.Operation{ + Name: opDescribeWorkspaceDirectories, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeWorkspaceDirectoriesInput{} + } + + output = &DescribeWorkspaceDirectoriesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeWorkspaceDirectories API operation for Amazon WorkSpaces. +// +// Retrieves information about the AWS Directory Service directories in the +// region that are registered with Amazon WorkSpaces and are available to your +// account. +// +// This operation supports pagination with the use of the NextToken request +// and response parameters. If more results are available, the NextToken response +// member contains a token that you pass in the next call to this operation +// to retrieve the next set of items. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation DescribeWorkspaceDirectories for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterValuesException "InvalidParameterValuesException" +// One or more parameter values are not valid. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceDirectories +func (c *WorkSpaces) DescribeWorkspaceDirectories(input *DescribeWorkspaceDirectoriesInput) (*DescribeWorkspaceDirectoriesOutput, error) { + req, out := c.DescribeWorkspaceDirectoriesRequest(input) + return out, req.Send() +} + +// DescribeWorkspaceDirectoriesWithContext is the same as DescribeWorkspaceDirectories with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeWorkspaceDirectories for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) DescribeWorkspaceDirectoriesWithContext(ctx aws.Context, input *DescribeWorkspaceDirectoriesInput, opts ...request.Option) (*DescribeWorkspaceDirectoriesOutput, error) { + req, out := c.DescribeWorkspaceDirectoriesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeWorkspaceDirectoriesPages iterates over the pages of a DescribeWorkspaceDirectories operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeWorkspaceDirectories method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeWorkspaceDirectories operation. +// pageNum := 0 +// err := client.DescribeWorkspaceDirectoriesPages(params, +// func(page *DescribeWorkspaceDirectoriesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *WorkSpaces) DescribeWorkspaceDirectoriesPages(input *DescribeWorkspaceDirectoriesInput, fn func(*DescribeWorkspaceDirectoriesOutput, bool) bool) error { + return c.DescribeWorkspaceDirectoriesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeWorkspaceDirectoriesPagesWithContext same as DescribeWorkspaceDirectoriesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) DescribeWorkspaceDirectoriesPagesWithContext(ctx aws.Context, input *DescribeWorkspaceDirectoriesInput, fn func(*DescribeWorkspaceDirectoriesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeWorkspaceDirectoriesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeWorkspaceDirectoriesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeWorkspaceDirectoriesOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opDescribeWorkspaces = "DescribeWorkspaces" + +// DescribeWorkspacesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeWorkspaces operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeWorkspaces for more information on using the DescribeWorkspaces +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeWorkspacesRequest method. +// req, resp := client.DescribeWorkspacesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaces +func (c *WorkSpaces) DescribeWorkspacesRequest(input *DescribeWorkspacesInput) (req *request.Request, output *DescribeWorkspacesOutput) { + op := &request.Operation{ + Name: opDescribeWorkspaces, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeWorkspacesInput{} + } + + output = &DescribeWorkspacesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeWorkspaces API operation for Amazon WorkSpaces. +// +// Obtains information about the specified WorkSpaces. +// +// Only one of the filter parameters, such as BundleId, DirectoryId, or WorkspaceIds, +// can be specified at a time. +// +// This operation supports pagination with the use of the NextToken request +// and response parameters. If more results are available, the NextToken response +// member contains a token that you pass in the next call to this operation +// to retrieve the next set of items. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation DescribeWorkspaces for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterValuesException "InvalidParameterValuesException" +// One or more parameter values are not valid. +// +// * ErrCodeResourceUnavailableException "ResourceUnavailableException" +// The specified resource is not available. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaces +func (c *WorkSpaces) DescribeWorkspaces(input *DescribeWorkspacesInput) (*DescribeWorkspacesOutput, error) { + req, out := c.DescribeWorkspacesRequest(input) + return out, req.Send() +} + +// DescribeWorkspacesWithContext is the same as DescribeWorkspaces with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeWorkspaces for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) DescribeWorkspacesWithContext(ctx aws.Context, input *DescribeWorkspacesInput, opts ...request.Option) (*DescribeWorkspacesOutput, error) { + req, out := c.DescribeWorkspacesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeWorkspacesPages iterates over the pages of a DescribeWorkspaces operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeWorkspaces method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeWorkspaces operation. +// pageNum := 0 +// err := client.DescribeWorkspacesPages(params, +// func(page *DescribeWorkspacesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *WorkSpaces) DescribeWorkspacesPages(input *DescribeWorkspacesInput, fn func(*DescribeWorkspacesOutput, bool) bool) error { + return c.DescribeWorkspacesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeWorkspacesPagesWithContext same as DescribeWorkspacesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) DescribeWorkspacesPagesWithContext(ctx aws.Context, input *DescribeWorkspacesInput, fn func(*DescribeWorkspacesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeWorkspacesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeWorkspacesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeWorkspacesOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opDescribeWorkspacesConnectionStatus = "DescribeWorkspacesConnectionStatus" + +// DescribeWorkspacesConnectionStatusRequest generates a "aws/request.Request" representing the +// client's request for the DescribeWorkspacesConnectionStatus operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeWorkspacesConnectionStatus for more information on using the DescribeWorkspacesConnectionStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeWorkspacesConnectionStatusRequest method. +// req, resp := client.DescribeWorkspacesConnectionStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspacesConnectionStatus +func (c *WorkSpaces) DescribeWorkspacesConnectionStatusRequest(input *DescribeWorkspacesConnectionStatusInput) (req *request.Request, output *DescribeWorkspacesConnectionStatusOutput) { + op := &request.Operation{ + Name: opDescribeWorkspacesConnectionStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeWorkspacesConnectionStatusInput{} + } + + output = &DescribeWorkspacesConnectionStatusOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeWorkspacesConnectionStatus API operation for Amazon WorkSpaces. +// +// Describes the connection status of a specified WorkSpace. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation DescribeWorkspacesConnectionStatus for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterValuesException "InvalidParameterValuesException" +// One or more parameter values are not valid. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspacesConnectionStatus +func (c *WorkSpaces) DescribeWorkspacesConnectionStatus(input *DescribeWorkspacesConnectionStatusInput) (*DescribeWorkspacesConnectionStatusOutput, error) { + req, out := c.DescribeWorkspacesConnectionStatusRequest(input) + return out, req.Send() +} + +// DescribeWorkspacesConnectionStatusWithContext is the same as DescribeWorkspacesConnectionStatus with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeWorkspacesConnectionStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) DescribeWorkspacesConnectionStatusWithContext(ctx aws.Context, input *DescribeWorkspacesConnectionStatusInput, opts ...request.Option) (*DescribeWorkspacesConnectionStatusOutput, error) { + req, out := c.DescribeWorkspacesConnectionStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyWorkspaceProperties = "ModifyWorkspaceProperties" + +// ModifyWorkspacePropertiesRequest generates a "aws/request.Request" representing the +// client's request for the ModifyWorkspaceProperties operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyWorkspaceProperties for more information on using the ModifyWorkspaceProperties +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyWorkspacePropertiesRequest method. +// req, resp := client.ModifyWorkspacePropertiesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/ModifyWorkspaceProperties +func (c *WorkSpaces) ModifyWorkspacePropertiesRequest(input *ModifyWorkspacePropertiesInput) (req *request.Request, output *ModifyWorkspacePropertiesOutput) { + op := &request.Operation{ + Name: opModifyWorkspaceProperties, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyWorkspacePropertiesInput{} + } + + output = &ModifyWorkspacePropertiesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyWorkspaceProperties API operation for Amazon WorkSpaces. +// +// Modifies the WorkSpace properties, including the running mode and AutoStop +// time. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation ModifyWorkspaceProperties for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidParameterValuesException "InvalidParameterValuesException" +// One or more parameter values are not valid. +// +// * ErrCodeInvalidResourceStateException "InvalidResourceStateException" +// The state of the WorkSpace is not valid for this operation. +// +// * ErrCodeOperationInProgressException "OperationInProgressException" +// The properties of this WorkSpace are currently being modified. Try again +// in a moment. +// +// * ErrCodeUnsupportedWorkspaceConfigurationException "UnsupportedWorkspaceConfigurationException" +// The configuration of this WorkSpace is not supported for this operation. +// For more information, see the Amazon WorkSpaces Administration Guide (http://docs.aws.amazon.com/workspaces/latest/adminguide/). +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The resource could not be found. +// +// * ErrCodeAccessDeniedException "AccessDeniedException" +// The user is not authorized to access a resource. +// +// * ErrCodeResourceUnavailableException "ResourceUnavailableException" +// The specified resource is not available. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/ModifyWorkspaceProperties +func (c *WorkSpaces) ModifyWorkspaceProperties(input *ModifyWorkspacePropertiesInput) (*ModifyWorkspacePropertiesOutput, error) { + req, out := c.ModifyWorkspacePropertiesRequest(input) + return out, req.Send() +} + +// ModifyWorkspacePropertiesWithContext is the same as ModifyWorkspaceProperties with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyWorkspaceProperties for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) ModifyWorkspacePropertiesWithContext(ctx aws.Context, input *ModifyWorkspacePropertiesInput, opts ...request.Option) (*ModifyWorkspacePropertiesOutput, error) { + req, out := c.ModifyWorkspacePropertiesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRebootWorkspaces = "RebootWorkspaces" + +// RebootWorkspacesRequest generates a "aws/request.Request" representing the +// client's request for the RebootWorkspaces operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RebootWorkspaces for more information on using the RebootWorkspaces +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RebootWorkspacesRequest method. +// req, resp := client.RebootWorkspacesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebootWorkspaces +func (c *WorkSpaces) RebootWorkspacesRequest(input *RebootWorkspacesInput) (req *request.Request, output *RebootWorkspacesOutput) { + op := &request.Operation{ + Name: opRebootWorkspaces, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebootWorkspacesInput{} + } + + output = &RebootWorkspacesOutput{} + req = c.newRequest(op, input, output) + return +} + +// RebootWorkspaces API operation for Amazon WorkSpaces. +// +// Reboots the specified WorkSpaces. +// +// To be able to reboot a WorkSpace, the WorkSpace must have a State of AVAILABLE, +// IMPAIRED, or INOPERABLE. +// +// This operation is asynchronous and returns before the WorkSpaces have rebooted. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation RebootWorkspaces for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebootWorkspaces +func (c *WorkSpaces) RebootWorkspaces(input *RebootWorkspacesInput) (*RebootWorkspacesOutput, error) { + req, out := c.RebootWorkspacesRequest(input) + return out, req.Send() +} + +// RebootWorkspacesWithContext is the same as RebootWorkspaces with the addition of +// the ability to pass a context and additional request options. +// +// See RebootWorkspaces for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) RebootWorkspacesWithContext(ctx aws.Context, input *RebootWorkspacesInput, opts ...request.Option) (*RebootWorkspacesOutput, error) { + req, out := c.RebootWorkspacesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRebuildWorkspaces = "RebuildWorkspaces" + +// RebuildWorkspacesRequest generates a "aws/request.Request" representing the +// client's request for the RebuildWorkspaces operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RebuildWorkspaces for more information on using the RebuildWorkspaces +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RebuildWorkspacesRequest method. +// req, resp := client.RebuildWorkspacesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebuildWorkspaces +func (c *WorkSpaces) RebuildWorkspacesRequest(input *RebuildWorkspacesInput) (req *request.Request, output *RebuildWorkspacesOutput) { + op := &request.Operation{ + Name: opRebuildWorkspaces, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebuildWorkspacesInput{} + } + + output = &RebuildWorkspacesOutput{} + req = c.newRequest(op, input, output) + return +} + +// RebuildWorkspaces API operation for Amazon WorkSpaces. +// +// Rebuilds the specified WorkSpaces. +// +// Rebuilding a WorkSpace is a potentially destructive action that can result +// in the loss of data. Rebuilding a WorkSpace causes the following to occur: +// +// * The system is restored to the image of the bundle that the WorkSpace +// is created from. Any applications that have been installed, or system +// settings that have been made since the WorkSpace was created will be lost. +// +// * The data drive (D drive) is re-created from the last automatic snapshot +// taken of the data drive. The current contents of the data drive are overwritten. +// Automatic snapshots of the data drive are taken every 12 hours, so the +// snapshot can be as much as 12 hours old. +// +// To be able to rebuild a WorkSpace, the WorkSpace must have a State of AVAILABLE +// or ERROR. +// +// This operation is asynchronous and returns before the WorkSpaces have been +// completely rebuilt. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation RebuildWorkspaces for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebuildWorkspaces +func (c *WorkSpaces) RebuildWorkspaces(input *RebuildWorkspacesInput) (*RebuildWorkspacesOutput, error) { + req, out := c.RebuildWorkspacesRequest(input) + return out, req.Send() +} + +// RebuildWorkspacesWithContext is the same as RebuildWorkspaces with the addition of +// the ability to pass a context and additional request options. +// +// See RebuildWorkspaces for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) RebuildWorkspacesWithContext(ctx aws.Context, input *RebuildWorkspacesInput, opts ...request.Option) (*RebuildWorkspacesOutput, error) { + req, out := c.RebuildWorkspacesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartWorkspaces = "StartWorkspaces" + +// StartWorkspacesRequest generates a "aws/request.Request" representing the +// client's request for the StartWorkspaces operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartWorkspaces for more information on using the StartWorkspaces +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartWorkspacesRequest method. +// req, resp := client.StartWorkspacesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/StartWorkspaces +func (c *WorkSpaces) StartWorkspacesRequest(input *StartWorkspacesInput) (req *request.Request, output *StartWorkspacesOutput) { + op := &request.Operation{ + Name: opStartWorkspaces, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartWorkspacesInput{} + } + + output = &StartWorkspacesOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartWorkspaces API operation for Amazon WorkSpaces. +// +// Starts the specified WorkSpaces. The WorkSpaces must have a running mode +// of AutoStop and a state of STOPPED. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation StartWorkspaces for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/StartWorkspaces +func (c *WorkSpaces) StartWorkspaces(input *StartWorkspacesInput) (*StartWorkspacesOutput, error) { + req, out := c.StartWorkspacesRequest(input) + return out, req.Send() +} + +// StartWorkspacesWithContext is the same as StartWorkspaces with the addition of +// the ability to pass a context and additional request options. +// +// See StartWorkspaces for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) StartWorkspacesWithContext(ctx aws.Context, input *StartWorkspacesInput, opts ...request.Option) (*StartWorkspacesOutput, error) { + req, out := c.StartWorkspacesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopWorkspaces = "StopWorkspaces" + +// StopWorkspacesRequest generates a "aws/request.Request" representing the +// client's request for the StopWorkspaces operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopWorkspaces for more information on using the StopWorkspaces +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopWorkspacesRequest method. +// req, resp := client.StopWorkspacesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/StopWorkspaces +func (c *WorkSpaces) StopWorkspacesRequest(input *StopWorkspacesInput) (req *request.Request, output *StopWorkspacesOutput) { + op := &request.Operation{ + Name: opStopWorkspaces, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopWorkspacesInput{} + } + + output = &StopWorkspacesOutput{} + req = c.newRequest(op, input, output) + return +} + +// StopWorkspaces API operation for Amazon WorkSpaces. +// +// Stops the specified WorkSpaces. The WorkSpaces must have a running mode of +// AutoStop and a state of AVAILABLE, IMPAIRED, UNHEALTHY, or ERROR. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation StopWorkspaces for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/StopWorkspaces +func (c *WorkSpaces) StopWorkspaces(input *StopWorkspacesInput) (*StopWorkspacesOutput, error) { + req, out := c.StopWorkspacesRequest(input) + return out, req.Send() +} + +// StopWorkspacesWithContext is the same as StopWorkspaces with the addition of +// the ability to pass a context and additional request options. +// +// See StopWorkspaces for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) StopWorkspacesWithContext(ctx aws.Context, input *StopWorkspacesInput, opts ...request.Option) (*StopWorkspacesOutput, error) { + req, out := c.StopWorkspacesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTerminateWorkspaces = "TerminateWorkspaces" + +// TerminateWorkspacesRequest generates a "aws/request.Request" representing the +// client's request for the TerminateWorkspaces operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TerminateWorkspaces for more information on using the TerminateWorkspaces +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TerminateWorkspacesRequest method. +// req, resp := client.TerminateWorkspacesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/TerminateWorkspaces +func (c *WorkSpaces) TerminateWorkspacesRequest(input *TerminateWorkspacesInput) (req *request.Request, output *TerminateWorkspacesOutput) { + op := &request.Operation{ + Name: opTerminateWorkspaces, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TerminateWorkspacesInput{} + } + + output = &TerminateWorkspacesOutput{} + req = c.newRequest(op, input, output) + return +} + +// TerminateWorkspaces API operation for Amazon WorkSpaces. +// +// Terminates the specified WorkSpaces. +// +// Terminating a WorkSpace is a permanent action and cannot be undone. The user's +// data is not maintained and will be destroyed. If you need to archive any +// user data, contact Amazon Web Services before terminating the WorkSpace. +// +// You can terminate a WorkSpace that is in any state except SUSPENDED. +// +// This operation is asynchronous and returns before the WorkSpaces have been +// completely terminated. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation TerminateWorkspaces for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/TerminateWorkspaces +func (c *WorkSpaces) TerminateWorkspaces(input *TerminateWorkspacesInput) (*TerminateWorkspacesOutput, error) { + req, out := c.TerminateWorkspacesRequest(input) + return out, req.Send() +} + +// TerminateWorkspacesWithContext is the same as TerminateWorkspaces with the addition of +// the ability to pass a context and additional request options. +// +// See TerminateWorkspaces for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) TerminateWorkspacesWithContext(ctx aws.Context, input *TerminateWorkspacesInput, opts ...request.Option) (*TerminateWorkspacesOutput, error) { + req, out := c.TerminateWorkspacesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Contains information about the compute type of a WorkSpace bundle. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/ComputeType +type ComputeType struct { + _ struct{} `type:"structure"` + + // The name of the compute type for the bundle. + Name *string `type:"string" enum:"Compute"` +} + +// String returns the string representation +func (s ComputeType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComputeType) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *ComputeType) SetName(v string) *ComputeType { + s.Name = &v + return s +} + +// The request of the CreateTags operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateTagsRequest +type CreateTagsInput struct { + _ struct{} `type:"structure"` + + // The resource ID of the request. + // + // ResourceId is a required field + ResourceId *string `min:"1" type:"string" required:"true"` + + // The tags of the request. + // + // Tags is a required field + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTagsInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceId sets the ResourceId field's value. +func (s *CreateTagsInput) SetResourceId(v string) *CreateTagsInput { + s.ResourceId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateTagsInput) SetTags(v []*Tag) *CreateTagsInput { + s.Tags = v + return s +} + +// The result of the CreateTags operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateTagsResult +type CreateTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTagsOutput) GoString() string { + return s.String() +} + +// Contains the inputs for the CreateWorkspaces operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateWorkspacesRequest +type CreateWorkspacesInput struct { + _ struct{} `type:"structure"` + + // An array of structures that specify the WorkSpaces to create. + // + // Workspaces is a required field + Workspaces []*WorkspaceRequest `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateWorkspacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateWorkspacesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateWorkspacesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateWorkspacesInput"} + if s.Workspaces == nil { + invalidParams.Add(request.NewErrParamRequired("Workspaces")) + } + if s.Workspaces != nil && len(s.Workspaces) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Workspaces", 1)) + } + if s.Workspaces != nil { + for i, v := range s.Workspaces { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Workspaces", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWorkspaces sets the Workspaces field's value. +func (s *CreateWorkspacesInput) SetWorkspaces(v []*WorkspaceRequest) *CreateWorkspacesInput { + s.Workspaces = v + return s +} + +// Contains the result of the CreateWorkspaces operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateWorkspacesResult +type CreateWorkspacesOutput struct { + _ struct{} `type:"structure"` + + // An array of structures that represent the WorkSpaces that could not be created. + FailedRequests []*FailedCreateWorkspaceRequest `type:"list"` + + // An array of structures that represent the WorkSpaces that were created. + // + // Because this operation is asynchronous, the identifier in WorkspaceId is + // not immediately available. If you immediately call DescribeWorkspaces with + // this identifier, no information will be returned. + PendingRequests []*Workspace `type:"list"` +} + +// String returns the string representation +func (s CreateWorkspacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateWorkspacesOutput) GoString() string { + return s.String() +} + +// SetFailedRequests sets the FailedRequests field's value. +func (s *CreateWorkspacesOutput) SetFailedRequests(v []*FailedCreateWorkspaceRequest) *CreateWorkspacesOutput { + s.FailedRequests = v + return s +} + +// SetPendingRequests sets the PendingRequests field's value. +func (s *CreateWorkspacesOutput) SetPendingRequests(v []*Workspace) *CreateWorkspacesOutput { + s.PendingRequests = v + return s +} + +// Contains default WorkSpace creation information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DefaultWorkspaceCreationProperties +type DefaultWorkspaceCreationProperties struct { + _ struct{} `type:"structure"` + + // The identifier of any custom security groups that are applied to the WorkSpaces + // when they are created. + CustomSecurityGroupId *string `type:"string"` + + // The organizational unit (OU) in the directory that the WorkSpace machine + // accounts are placed in. + DefaultOu *string `type:"string"` + + // A public IP address will be attached to all WorkSpaces that are created or + // rebuilt. + EnableInternetAccess *bool `type:"boolean"` + + // Specifies if the directory is enabled for Amazon WorkDocs. + EnableWorkDocs *bool `type:"boolean"` + + // The WorkSpace user is an administrator on the WorkSpace. + UserEnabledAsLocalAdministrator *bool `type:"boolean"` +} + +// String returns the string representation +func (s DefaultWorkspaceCreationProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefaultWorkspaceCreationProperties) GoString() string { + return s.String() +} + +// SetCustomSecurityGroupId sets the CustomSecurityGroupId field's value. +func (s *DefaultWorkspaceCreationProperties) SetCustomSecurityGroupId(v string) *DefaultWorkspaceCreationProperties { + s.CustomSecurityGroupId = &v + return s +} + +// SetDefaultOu sets the DefaultOu field's value. +func (s *DefaultWorkspaceCreationProperties) SetDefaultOu(v string) *DefaultWorkspaceCreationProperties { + s.DefaultOu = &v + return s +} + +// SetEnableInternetAccess sets the EnableInternetAccess field's value. +func (s *DefaultWorkspaceCreationProperties) SetEnableInternetAccess(v bool) *DefaultWorkspaceCreationProperties { + s.EnableInternetAccess = &v + return s +} + +// SetEnableWorkDocs sets the EnableWorkDocs field's value. +func (s *DefaultWorkspaceCreationProperties) SetEnableWorkDocs(v bool) *DefaultWorkspaceCreationProperties { + s.EnableWorkDocs = &v + return s +} + +// SetUserEnabledAsLocalAdministrator sets the UserEnabledAsLocalAdministrator field's value. +func (s *DefaultWorkspaceCreationProperties) SetUserEnabledAsLocalAdministrator(v bool) *DefaultWorkspaceCreationProperties { + s.UserEnabledAsLocalAdministrator = &v + return s +} + +// The request of the DeleteTags operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DeleteTagsRequest +type DeleteTagsInput struct { + _ struct{} `type:"structure"` + + // The resource ID of the request. + // + // ResourceId is a required field + ResourceId *string `min:"1" type:"string" required:"true"` + + // The tag keys of the request. + // + // TagKeys is a required field + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DeleteTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTagsInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceId sets the ResourceId field's value. +func (s *DeleteTagsInput) SetResourceId(v string) *DeleteTagsInput { + s.ResourceId = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *DeleteTagsInput) SetTagKeys(v []*string) *DeleteTagsInput { + s.TagKeys = v + return s +} + +// The result of the DeleteTags operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DeleteTagsResult +type DeleteTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTagsOutput) GoString() string { + return s.String() +} + +// The request of the DescribeTags operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeTagsRequest +type DescribeTagsInput struct { + _ struct{} `type:"structure"` + + // The resource ID of the request. + // + // ResourceId is a required field + ResourceId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTagsInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceId sets the ResourceId field's value. +func (s *DescribeTagsInput) SetResourceId(v string) *DescribeTagsInput { + s.ResourceId = &v + return s +} + +// The result of the DescribeTags operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeTagsResult +type DescribeTagsOutput struct { + _ struct{} `type:"structure"` + + // The list of tags. + TagList []*Tag `type:"list"` +} + +// String returns the string representation +func (s DescribeTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsOutput) GoString() string { + return s.String() +} + +// SetTagList sets the TagList field's value. +func (s *DescribeTagsOutput) SetTagList(v []*Tag) *DescribeTagsOutput { + s.TagList = v + return s +} + +// Contains the inputs for the DescribeWorkspaceBundles operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceBundlesRequest +type DescribeWorkspaceBundlesInput struct { + _ struct{} `type:"structure"` + + // An array of strings that contains the identifiers of the bundles to retrieve. + // This parameter cannot be combined with any other filter parameter. + BundleIds []*string `min:"1" type:"list"` + + // The NextToken value from a previous call to this operation. Pass null if + // this is the first call. + NextToken *string `min:"1" type:"string"` + + // The owner of the bundles to retrieve. This parameter cannot be combined with + // any other filter parameter. + // + // This contains one of the following values: + // + // * null- Retrieves the bundles that belong to the account making the call. + // + // * AMAZON- Retrieves the bundles that are provided by AWS. + Owner *string `type:"string"` +} + +// String returns the string representation +func (s DescribeWorkspaceBundlesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspaceBundlesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeWorkspaceBundlesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeWorkspaceBundlesInput"} + if s.BundleIds != nil && len(s.BundleIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BundleIds", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBundleIds sets the BundleIds field's value. +func (s *DescribeWorkspaceBundlesInput) SetBundleIds(v []*string) *DescribeWorkspaceBundlesInput { + s.BundleIds = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeWorkspaceBundlesInput) SetNextToken(v string) *DescribeWorkspaceBundlesInput { + s.NextToken = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *DescribeWorkspaceBundlesInput) SetOwner(v string) *DescribeWorkspaceBundlesInput { + s.Owner = &v + return s +} + +// Contains the results of the DescribeWorkspaceBundles operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceBundlesResult +type DescribeWorkspaceBundlesOutput struct { + _ struct{} `type:"structure"` + + // An array of structures that contain information about the bundles. + Bundles []*WorkspaceBundle `type:"list"` + + // If not null, more results are available. Pass this value for the NextToken + // parameter in a subsequent call to this operation to retrieve the next set + // of items. This token is valid for one day and must be used within that time + // frame. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeWorkspaceBundlesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspaceBundlesOutput) GoString() string { + return s.String() +} + +// SetBundles sets the Bundles field's value. +func (s *DescribeWorkspaceBundlesOutput) SetBundles(v []*WorkspaceBundle) *DescribeWorkspaceBundlesOutput { + s.Bundles = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeWorkspaceBundlesOutput) SetNextToken(v string) *DescribeWorkspaceBundlesOutput { + s.NextToken = &v + return s +} + +// Contains the inputs for the DescribeWorkspaceDirectories operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceDirectoriesRequest +type DescribeWorkspaceDirectoriesInput struct { + _ struct{} `type:"structure"` + + // An array of strings that contains the directory identifiers to retrieve information + // for. If this member is null, all directories are retrieved. + DirectoryIds []*string `min:"1" type:"list"` + + // The NextToken value from a previous call to this operation. Pass null if + // this is the first call. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeWorkspaceDirectoriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspaceDirectoriesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeWorkspaceDirectoriesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeWorkspaceDirectoriesInput"} + if s.DirectoryIds != nil && len(s.DirectoryIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DirectoryIds", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDirectoryIds sets the DirectoryIds field's value. +func (s *DescribeWorkspaceDirectoriesInput) SetDirectoryIds(v []*string) *DescribeWorkspaceDirectoriesInput { + s.DirectoryIds = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeWorkspaceDirectoriesInput) SetNextToken(v string) *DescribeWorkspaceDirectoriesInput { + s.NextToken = &v + return s +} + +// Contains the results of the DescribeWorkspaceDirectories operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceDirectoriesResult +type DescribeWorkspaceDirectoriesOutput struct { + _ struct{} `type:"structure"` + + // An array of structures that contain information about the directories. + Directories []*WorkspaceDirectory `type:"list"` + + // If not null, more results are available. Pass this value for the NextToken + // parameter in a subsequent call to this operation to retrieve the next set + // of items. This token is valid for one day and must be used within that time + // frame. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeWorkspaceDirectoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspaceDirectoriesOutput) GoString() string { + return s.String() +} + +// SetDirectories sets the Directories field's value. +func (s *DescribeWorkspaceDirectoriesOutput) SetDirectories(v []*WorkspaceDirectory) *DescribeWorkspaceDirectoriesOutput { + s.Directories = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeWorkspaceDirectoriesOutput) SetNextToken(v string) *DescribeWorkspaceDirectoriesOutput { + s.NextToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspacesConnectionStatusRequest +type DescribeWorkspacesConnectionStatusInput struct { + _ struct{} `type:"structure"` + + // The next token of the request. + NextToken *string `min:"1" type:"string"` + + // An array of strings that contain the identifiers of the WorkSpaces. + WorkspaceIds []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s DescribeWorkspacesConnectionStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspacesConnectionStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeWorkspacesConnectionStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeWorkspacesConnectionStatusInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.WorkspaceIds != nil && len(s.WorkspaceIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkspaceIds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeWorkspacesConnectionStatusInput) SetNextToken(v string) *DescribeWorkspacesConnectionStatusInput { + s.NextToken = &v + return s +} + +// SetWorkspaceIds sets the WorkspaceIds field's value. +func (s *DescribeWorkspacesConnectionStatusInput) SetWorkspaceIds(v []*string) *DescribeWorkspacesConnectionStatusInput { + s.WorkspaceIds = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspacesConnectionStatusResult +type DescribeWorkspacesConnectionStatusOutput struct { + _ struct{} `type:"structure"` + + // The next token of the result. + NextToken *string `min:"1" type:"string"` + + // The connection status of the WorkSpace. + WorkspacesConnectionStatus []*WorkspaceConnectionStatus `type:"list"` +} + +// String returns the string representation +func (s DescribeWorkspacesConnectionStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspacesConnectionStatusOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeWorkspacesConnectionStatusOutput) SetNextToken(v string) *DescribeWorkspacesConnectionStatusOutput { + s.NextToken = &v + return s +} + +// SetWorkspacesConnectionStatus sets the WorkspacesConnectionStatus field's value. +func (s *DescribeWorkspacesConnectionStatusOutput) SetWorkspacesConnectionStatus(v []*WorkspaceConnectionStatus) *DescribeWorkspacesConnectionStatusOutput { + s.WorkspacesConnectionStatus = v + return s +} + +// Contains the inputs for the DescribeWorkspaces operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspacesRequest +type DescribeWorkspacesInput struct { + _ struct{} `type:"structure"` + + // The identifier of a bundle to obtain the WorkSpaces for. All WorkSpaces that + // are created from this bundle will be retrieved. This parameter cannot be + // combined with any other filter parameter. + BundleId *string `type:"string"` + + // Specifies the directory identifier to which to limit the WorkSpaces. Optionally, + // you can specify a specific directory user with the UserName parameter. This + // parameter cannot be combined with any other filter parameter. + DirectoryId *string `type:"string"` + + // The maximum number of items to return. + Limit *int64 `min:"1" type:"integer"` + + // The NextToken value from a previous call to this operation. Pass null if + // this is the first call. + NextToken *string `min:"1" type:"string"` + + // Used with the DirectoryId parameter to specify the directory user for whom + // to obtain the WorkSpace. + UserName *string `min:"1" type:"string"` + + // An array of strings that contain the identifiers of the WorkSpaces for which + // to retrieve information. This parameter cannot be combined with any other + // filter parameter. + // + // Because the CreateWorkspaces operation is asynchronous, the identifier it + // returns is not immediately available. If you immediately call DescribeWorkspaces + // with this identifier, no information is returned. + WorkspaceIds []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s DescribeWorkspacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspacesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeWorkspacesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeWorkspacesInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + if s.WorkspaceIds != nil && len(s.WorkspaceIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkspaceIds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBundleId sets the BundleId field's value. +func (s *DescribeWorkspacesInput) SetBundleId(v string) *DescribeWorkspacesInput { + s.BundleId = &v + return s +} + +// SetDirectoryId sets the DirectoryId field's value. +func (s *DescribeWorkspacesInput) SetDirectoryId(v string) *DescribeWorkspacesInput { + s.DirectoryId = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribeWorkspacesInput) SetLimit(v int64) *DescribeWorkspacesInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeWorkspacesInput) SetNextToken(v string) *DescribeWorkspacesInput { + s.NextToken = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *DescribeWorkspacesInput) SetUserName(v string) *DescribeWorkspacesInput { + s.UserName = &v + return s +} + +// SetWorkspaceIds sets the WorkspaceIds field's value. +func (s *DescribeWorkspacesInput) SetWorkspaceIds(v []*string) *DescribeWorkspacesInput { + s.WorkspaceIds = v + return s +} + +// Contains the results for the DescribeWorkspaces operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspacesResult +type DescribeWorkspacesOutput struct { + _ struct{} `type:"structure"` + + // If not null, more results are available. Pass this value for the NextToken + // parameter in a subsequent call to this operation to retrieve the next set + // of items. This token is valid for one day and must be used within that time + // frame. + NextToken *string `min:"1" type:"string"` + + // An array of structures that contain the information about the WorkSpaces. + // + // Because the CreateWorkspaces operation is asynchronous, some of this information + // may be incomplete for a newly-created WorkSpace. + Workspaces []*Workspace `type:"list"` +} + +// String returns the string representation +func (s DescribeWorkspacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspacesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeWorkspacesOutput) SetNextToken(v string) *DescribeWorkspacesOutput { + s.NextToken = &v + return s +} + +// SetWorkspaces sets the Workspaces field's value. +func (s *DescribeWorkspacesOutput) SetWorkspaces(v []*Workspace) *DescribeWorkspacesOutput { + s.Workspaces = v + return s +} + +// Contains information about a WorkSpace that could not be created. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/FailedCreateWorkspaceRequest +type FailedCreateWorkspaceRequest struct { + _ struct{} `type:"structure"` + + // The error code. + ErrorCode *string `type:"string"` + + // The textual error message. + ErrorMessage *string `type:"string"` + + // A FailedCreateWorkspaceRequest$WorkspaceRequest object that contains the + // information about the WorkSpace that could not be created. + WorkspaceRequest *WorkspaceRequest `type:"structure"` +} + +// String returns the string representation +func (s FailedCreateWorkspaceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailedCreateWorkspaceRequest) GoString() string { + return s.String() +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *FailedCreateWorkspaceRequest) SetErrorCode(v string) *FailedCreateWorkspaceRequest { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *FailedCreateWorkspaceRequest) SetErrorMessage(v string) *FailedCreateWorkspaceRequest { + s.ErrorMessage = &v + return s +} + +// SetWorkspaceRequest sets the WorkspaceRequest field's value. +func (s *FailedCreateWorkspaceRequest) SetWorkspaceRequest(v *WorkspaceRequest) *FailedCreateWorkspaceRequest { + s.WorkspaceRequest = v + return s +} + +// Contains information about a WorkSpace that could not be rebooted (RebootWorkspaces), +// rebuilt (RebuildWorkspaces), terminated (TerminateWorkspaces), started (StartWorkspaces), +// or stopped (StopWorkspaces). +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/FailedWorkspaceChangeRequest +type FailedWorkspaceChangeRequest struct { + _ struct{} `type:"structure"` + + // The error code. + ErrorCode *string `type:"string"` + + // The textual error message. + ErrorMessage *string `type:"string"` + + // The identifier of the WorkSpace. + WorkspaceId *string `type:"string"` +} + +// String returns the string representation +func (s FailedWorkspaceChangeRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailedWorkspaceChangeRequest) GoString() string { + return s.String() +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *FailedWorkspaceChangeRequest) SetErrorCode(v string) *FailedWorkspaceChangeRequest { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *FailedWorkspaceChangeRequest) SetErrorMessage(v string) *FailedWorkspaceChangeRequest { + s.ErrorMessage = &v + return s +} + +// SetWorkspaceId sets the WorkspaceId field's value. +func (s *FailedWorkspaceChangeRequest) SetWorkspaceId(v string) *FailedWorkspaceChangeRequest { + s.WorkspaceId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/ModifyWorkspacePropertiesRequest +type ModifyWorkspacePropertiesInput struct { + _ struct{} `type:"structure"` + + // The ID of the WorkSpace. + // + // WorkspaceId is a required field + WorkspaceId *string `type:"string" required:"true"` + + // The WorkSpace properties of the request. + // + // WorkspaceProperties is a required field + WorkspaceProperties *WorkspaceProperties `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ModifyWorkspacePropertiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyWorkspacePropertiesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyWorkspacePropertiesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyWorkspacePropertiesInput"} + if s.WorkspaceId == nil { + invalidParams.Add(request.NewErrParamRequired("WorkspaceId")) + } + if s.WorkspaceProperties == nil { + invalidParams.Add(request.NewErrParamRequired("WorkspaceProperties")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWorkspaceId sets the WorkspaceId field's value. +func (s *ModifyWorkspacePropertiesInput) SetWorkspaceId(v string) *ModifyWorkspacePropertiesInput { + s.WorkspaceId = &v + return s +} + +// SetWorkspaceProperties sets the WorkspaceProperties field's value. +func (s *ModifyWorkspacePropertiesInput) SetWorkspaceProperties(v *WorkspaceProperties) *ModifyWorkspacePropertiesInput { + s.WorkspaceProperties = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/ModifyWorkspacePropertiesResult +type ModifyWorkspacePropertiesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyWorkspacePropertiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyWorkspacePropertiesOutput) GoString() string { + return s.String() +} + +// Contains information used with the RebootWorkspaces operation to reboot a +// WorkSpace. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebootRequest +type RebootRequest struct { + _ struct{} `type:"structure"` + + // The identifier of the WorkSpace to reboot. + // + // WorkspaceId is a required field + WorkspaceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RebootRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RebootRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RebootRequest"} + if s.WorkspaceId == nil { + invalidParams.Add(request.NewErrParamRequired("WorkspaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWorkspaceId sets the WorkspaceId field's value. +func (s *RebootRequest) SetWorkspaceId(v string) *RebootRequest { + s.WorkspaceId = &v + return s +} + +// Contains the inputs for the RebootWorkspaces operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebootWorkspacesRequest +type RebootWorkspacesInput struct { + _ struct{} `type:"structure"` + + // An array of structures that specify the WorkSpaces to reboot. + // + // RebootWorkspaceRequests is a required field + RebootWorkspaceRequests []*RebootRequest `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s RebootWorkspacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootWorkspacesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RebootWorkspacesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RebootWorkspacesInput"} + if s.RebootWorkspaceRequests == nil { + invalidParams.Add(request.NewErrParamRequired("RebootWorkspaceRequests")) + } + if s.RebootWorkspaceRequests != nil && len(s.RebootWorkspaceRequests) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RebootWorkspaceRequests", 1)) + } + if s.RebootWorkspaceRequests != nil { + for i, v := range s.RebootWorkspaceRequests { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RebootWorkspaceRequests", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRebootWorkspaceRequests sets the RebootWorkspaceRequests field's value. +func (s *RebootWorkspacesInput) SetRebootWorkspaceRequests(v []*RebootRequest) *RebootWorkspacesInput { + s.RebootWorkspaceRequests = v + return s +} + +// Contains the results of the RebootWorkspaces operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebootWorkspacesResult +type RebootWorkspacesOutput struct { + _ struct{} `type:"structure"` + + // An array of structures representing any WorkSpaces that could not be rebooted. + FailedRequests []*FailedWorkspaceChangeRequest `type:"list"` +} + +// String returns the string representation +func (s RebootWorkspacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootWorkspacesOutput) GoString() string { + return s.String() +} + +// SetFailedRequests sets the FailedRequests field's value. +func (s *RebootWorkspacesOutput) SetFailedRequests(v []*FailedWorkspaceChangeRequest) *RebootWorkspacesOutput { + s.FailedRequests = v + return s +} + +// Contains information used with the RebuildWorkspaces operation to rebuild +// a WorkSpace. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebuildRequest +type RebuildRequest struct { + _ struct{} `type:"structure"` + + // The identifier of the WorkSpace to rebuild. + // + // WorkspaceId is a required field + WorkspaceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RebuildRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebuildRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RebuildRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RebuildRequest"} + if s.WorkspaceId == nil { + invalidParams.Add(request.NewErrParamRequired("WorkspaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWorkspaceId sets the WorkspaceId field's value. +func (s *RebuildRequest) SetWorkspaceId(v string) *RebuildRequest { + s.WorkspaceId = &v + return s +} + +// Contains the inputs for the RebuildWorkspaces operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebuildWorkspacesRequest +type RebuildWorkspacesInput struct { + _ struct{} `type:"structure"` + + // An array of structures that specify the WorkSpaces to rebuild. + // + // RebuildWorkspaceRequests is a required field + RebuildWorkspaceRequests []*RebuildRequest `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s RebuildWorkspacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebuildWorkspacesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RebuildWorkspacesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RebuildWorkspacesInput"} + if s.RebuildWorkspaceRequests == nil { + invalidParams.Add(request.NewErrParamRequired("RebuildWorkspaceRequests")) + } + if s.RebuildWorkspaceRequests != nil && len(s.RebuildWorkspaceRequests) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RebuildWorkspaceRequests", 1)) + } + if s.RebuildWorkspaceRequests != nil { + for i, v := range s.RebuildWorkspaceRequests { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RebuildWorkspaceRequests", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRebuildWorkspaceRequests sets the RebuildWorkspaceRequests field's value. +func (s *RebuildWorkspacesInput) SetRebuildWorkspaceRequests(v []*RebuildRequest) *RebuildWorkspacesInput { + s.RebuildWorkspaceRequests = v + return s +} + +// Contains the results of the RebuildWorkspaces operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RebuildWorkspacesResult +type RebuildWorkspacesOutput struct { + _ struct{} `type:"structure"` + + // An array of structures representing any WorkSpaces that could not be rebuilt. + FailedRequests []*FailedWorkspaceChangeRequest `type:"list"` +} + +// String returns the string representation +func (s RebuildWorkspacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebuildWorkspacesOutput) GoString() string { + return s.String() +} + +// SetFailedRequests sets the FailedRequests field's value. +func (s *RebuildWorkspacesOutput) SetFailedRequests(v []*FailedWorkspaceChangeRequest) *RebuildWorkspacesOutput { + s.FailedRequests = v + return s +} + +// Describes the start request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/StartRequest +type StartRequest struct { + _ struct{} `type:"structure"` + + // The ID of the WorkSpace. + WorkspaceId *string `type:"string"` +} + +// String returns the string representation +func (s StartRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartRequest) GoString() string { + return s.String() +} + +// SetWorkspaceId sets the WorkspaceId field's value. +func (s *StartRequest) SetWorkspaceId(v string) *StartRequest { + s.WorkspaceId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/StartWorkspacesRequest +type StartWorkspacesInput struct { + _ struct{} `type:"structure"` + + // The requests. + // + // StartWorkspaceRequests is a required field + StartWorkspaceRequests []*StartRequest `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s StartWorkspacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartWorkspacesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartWorkspacesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartWorkspacesInput"} + if s.StartWorkspaceRequests == nil { + invalidParams.Add(request.NewErrParamRequired("StartWorkspaceRequests")) + } + if s.StartWorkspaceRequests != nil && len(s.StartWorkspaceRequests) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StartWorkspaceRequests", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStartWorkspaceRequests sets the StartWorkspaceRequests field's value. +func (s *StartWorkspacesInput) SetStartWorkspaceRequests(v []*StartRequest) *StartWorkspacesInput { + s.StartWorkspaceRequests = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/StartWorkspacesResult +type StartWorkspacesOutput struct { + _ struct{} `type:"structure"` + + // The failed requests. + FailedRequests []*FailedWorkspaceChangeRequest `type:"list"` +} + +// String returns the string representation +func (s StartWorkspacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartWorkspacesOutput) GoString() string { + return s.String() +} + +// SetFailedRequests sets the FailedRequests field's value. +func (s *StartWorkspacesOutput) SetFailedRequests(v []*FailedWorkspaceChangeRequest) *StartWorkspacesOutput { + s.FailedRequests = v + return s +} + +// Describes the stop request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/StopRequest +type StopRequest struct { + _ struct{} `type:"structure"` + + // The ID of the WorkSpace. + WorkspaceId *string `type:"string"` +} + +// String returns the string representation +func (s StopRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopRequest) GoString() string { + return s.String() +} + +// SetWorkspaceId sets the WorkspaceId field's value. +func (s *StopRequest) SetWorkspaceId(v string) *StopRequest { + s.WorkspaceId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/StopWorkspacesRequest +type StopWorkspacesInput struct { + _ struct{} `type:"structure"` + + // The requests. + // + // StopWorkspaceRequests is a required field + StopWorkspaceRequests []*StopRequest `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s StopWorkspacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopWorkspacesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopWorkspacesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopWorkspacesInput"} + if s.StopWorkspaceRequests == nil { + invalidParams.Add(request.NewErrParamRequired("StopWorkspaceRequests")) + } + if s.StopWorkspaceRequests != nil && len(s.StopWorkspaceRequests) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StopWorkspaceRequests", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStopWorkspaceRequests sets the StopWorkspaceRequests field's value. +func (s *StopWorkspacesInput) SetStopWorkspaceRequests(v []*StopRequest) *StopWorkspacesInput { + s.StopWorkspaceRequests = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/StopWorkspacesResult +type StopWorkspacesOutput struct { + _ struct{} `type:"structure"` + + // The failed requests. + FailedRequests []*FailedWorkspaceChangeRequest `type:"list"` +} + +// String returns the string representation +func (s StopWorkspacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopWorkspacesOutput) GoString() string { + return s.String() +} + +// SetFailedRequests sets the FailedRequests field's value. +func (s *StopWorkspacesOutput) SetFailedRequests(v []*FailedWorkspaceChangeRequest) *StopWorkspacesOutput { + s.FailedRequests = v + return s +} + +// Describes the tag of the WorkSpace. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/Tag +type Tag struct { + _ struct{} `type:"structure"` + + // The key of the tag. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value of the tag. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +// Contains information used with the TerminateWorkspaces operation to terminate +// a WorkSpace. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/TerminateRequest +type TerminateRequest struct { + _ struct{} `type:"structure"` + + // The identifier of the WorkSpace to terminate. + // + // WorkspaceId is a required field + WorkspaceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s TerminateRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TerminateRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TerminateRequest"} + if s.WorkspaceId == nil { + invalidParams.Add(request.NewErrParamRequired("WorkspaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWorkspaceId sets the WorkspaceId field's value. +func (s *TerminateRequest) SetWorkspaceId(v string) *TerminateRequest { + s.WorkspaceId = &v + return s +} + +// Contains the inputs for the TerminateWorkspaces operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/TerminateWorkspacesRequest +type TerminateWorkspacesInput struct { + _ struct{} `type:"structure"` + + // An array of structures that specify the WorkSpaces to terminate. + // + // TerminateWorkspaceRequests is a required field + TerminateWorkspaceRequests []*TerminateRequest `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s TerminateWorkspacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateWorkspacesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TerminateWorkspacesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TerminateWorkspacesInput"} + if s.TerminateWorkspaceRequests == nil { + invalidParams.Add(request.NewErrParamRequired("TerminateWorkspaceRequests")) + } + if s.TerminateWorkspaceRequests != nil && len(s.TerminateWorkspaceRequests) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TerminateWorkspaceRequests", 1)) + } + if s.TerminateWorkspaceRequests != nil { + for i, v := range s.TerminateWorkspaceRequests { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TerminateWorkspaceRequests", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTerminateWorkspaceRequests sets the TerminateWorkspaceRequests field's value. +func (s *TerminateWorkspacesInput) SetTerminateWorkspaceRequests(v []*TerminateRequest) *TerminateWorkspacesInput { + s.TerminateWorkspaceRequests = v + return s +} + +// Contains the results of the TerminateWorkspaces operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/TerminateWorkspacesResult +type TerminateWorkspacesOutput struct { + _ struct{} `type:"structure"` + + // An array of structures representing any WorkSpaces that could not be terminated. + FailedRequests []*FailedWorkspaceChangeRequest `type:"list"` +} + +// String returns the string representation +func (s TerminateWorkspacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TerminateWorkspacesOutput) GoString() string { + return s.String() +} + +// SetFailedRequests sets the FailedRequests field's value. +func (s *TerminateWorkspacesOutput) SetFailedRequests(v []*FailedWorkspaceChangeRequest) *TerminateWorkspacesOutput { + s.FailedRequests = v + return s +} + +// Contains information about the user storage for a WorkSpace bundle. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/UserStorage +type UserStorage struct { + _ struct{} `type:"structure"` + + // The amount of user storage for the bundle. + Capacity *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UserStorage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserStorage) GoString() string { + return s.String() +} + +// SetCapacity sets the Capacity field's value. +func (s *UserStorage) SetCapacity(v string) *UserStorage { + s.Capacity = &v + return s +} + +// Contains information about a WorkSpace. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/Workspace +type Workspace struct { + _ struct{} `type:"structure"` + + // The identifier of the bundle that the WorkSpace was created from. + BundleId *string `type:"string"` + + // The name of the WorkSpace as seen by the operating system. + ComputerName *string `type:"string"` + + // The identifier of the AWS Directory Service directory that the WorkSpace + // belongs to. + DirectoryId *string `type:"string"` + + // If the WorkSpace could not be created, this contains the error code. + ErrorCode *string `type:"string"` + + // If the WorkSpace could not be created, this contains a textual error message + // that describes the failure. + ErrorMessage *string `type:"string"` + + // The IP address of the WorkSpace. + IpAddress *string `type:"string"` + + // Specifies whether the data stored on the root volume, or C: drive, is encrypted. + RootVolumeEncryptionEnabled *bool `type:"boolean"` + + // The operational state of the WorkSpace. + State *string `type:"string" enum:"WorkspaceState"` + + // The identifier of the subnet that the WorkSpace is in. + SubnetId *string `type:"string"` + + // The user that the WorkSpace is assigned to. + UserName *string `min:"1" type:"string"` + + // Specifies whether the data stored on the user volume, or D: drive, is encrypted. + UserVolumeEncryptionEnabled *bool `type:"boolean"` + + // The KMS key used to encrypt data stored on your WorkSpace. + VolumeEncryptionKey *string `type:"string"` + + // The identifier of the WorkSpace. + WorkspaceId *string `type:"string"` + + // Describes the properties of a WorkSpace. + WorkspaceProperties *WorkspaceProperties `type:"structure"` +} + +// String returns the string representation +func (s Workspace) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Workspace) GoString() string { + return s.String() +} + +// SetBundleId sets the BundleId field's value. +func (s *Workspace) SetBundleId(v string) *Workspace { + s.BundleId = &v + return s +} + +// SetComputerName sets the ComputerName field's value. +func (s *Workspace) SetComputerName(v string) *Workspace { + s.ComputerName = &v + return s +} + +// SetDirectoryId sets the DirectoryId field's value. +func (s *Workspace) SetDirectoryId(v string) *Workspace { + s.DirectoryId = &v + return s +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *Workspace) SetErrorCode(v string) *Workspace { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *Workspace) SetErrorMessage(v string) *Workspace { + s.ErrorMessage = &v + return s +} + +// SetIpAddress sets the IpAddress field's value. +func (s *Workspace) SetIpAddress(v string) *Workspace { + s.IpAddress = &v + return s +} + +// SetRootVolumeEncryptionEnabled sets the RootVolumeEncryptionEnabled field's value. +func (s *Workspace) SetRootVolumeEncryptionEnabled(v bool) *Workspace { + s.RootVolumeEncryptionEnabled = &v + return s +} + +// SetState sets the State field's value. +func (s *Workspace) SetState(v string) *Workspace { + s.State = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *Workspace) SetSubnetId(v string) *Workspace { + s.SubnetId = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *Workspace) SetUserName(v string) *Workspace { + s.UserName = &v + return s +} + +// SetUserVolumeEncryptionEnabled sets the UserVolumeEncryptionEnabled field's value. +func (s *Workspace) SetUserVolumeEncryptionEnabled(v bool) *Workspace { + s.UserVolumeEncryptionEnabled = &v + return s +} + +// SetVolumeEncryptionKey sets the VolumeEncryptionKey field's value. +func (s *Workspace) SetVolumeEncryptionKey(v string) *Workspace { + s.VolumeEncryptionKey = &v + return s +} + +// SetWorkspaceId sets the WorkspaceId field's value. +func (s *Workspace) SetWorkspaceId(v string) *Workspace { + s.WorkspaceId = &v + return s +} + +// SetWorkspaceProperties sets the WorkspaceProperties field's value. +func (s *Workspace) SetWorkspaceProperties(v *WorkspaceProperties) *Workspace { + s.WorkspaceProperties = v + return s +} + +// Contains information about a WorkSpace bundle. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/WorkspaceBundle +type WorkspaceBundle struct { + _ struct{} `type:"structure"` + + // The bundle identifier. + BundleId *string `type:"string"` + + // A ComputeType object that specifies the compute type for the bundle. + ComputeType *ComputeType `type:"structure"` + + // The bundle description. + Description *string `type:"string"` + + // The name of the bundle. + Name *string `min:"1" type:"string"` + + // The owner of the bundle. This contains the owner's account identifier, or + // AMAZON if the bundle is provided by AWS. + Owner *string `type:"string"` + + // A UserStorage object that specifies the amount of user storage that the bundle + // contains. + UserStorage *UserStorage `type:"structure"` +} + +// String returns the string representation +func (s WorkspaceBundle) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkspaceBundle) GoString() string { + return s.String() +} + +// SetBundleId sets the BundleId field's value. +func (s *WorkspaceBundle) SetBundleId(v string) *WorkspaceBundle { + s.BundleId = &v + return s +} + +// SetComputeType sets the ComputeType field's value. +func (s *WorkspaceBundle) SetComputeType(v *ComputeType) *WorkspaceBundle { + s.ComputeType = v + return s +} + +// SetDescription sets the Description field's value. +func (s *WorkspaceBundle) SetDescription(v string) *WorkspaceBundle { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *WorkspaceBundle) SetName(v string) *WorkspaceBundle { + s.Name = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *WorkspaceBundle) SetOwner(v string) *WorkspaceBundle { + s.Owner = &v + return s +} + +// SetUserStorage sets the UserStorage field's value. +func (s *WorkspaceBundle) SetUserStorage(v *UserStorage) *WorkspaceBundle { + s.UserStorage = v + return s +} + +// Describes the connection status of a WorkSpace. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/WorkspaceConnectionStatus +type WorkspaceConnectionStatus struct { + _ struct{} `type:"structure"` + + // The connection state of the WorkSpace. Returns UNKOWN if the WorkSpace is + // in a Stopped state. + ConnectionState *string `type:"string" enum:"ConnectionState"` + + // The timestamp of the connection state check. + ConnectionStateCheckTimestamp *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The timestamp of the last known user connection. + LastKnownUserConnectionTimestamp *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The ID of the WorkSpace. + WorkspaceId *string `type:"string"` +} + +// String returns the string representation +func (s WorkspaceConnectionStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkspaceConnectionStatus) GoString() string { + return s.String() +} + +// SetConnectionState sets the ConnectionState field's value. +func (s *WorkspaceConnectionStatus) SetConnectionState(v string) *WorkspaceConnectionStatus { + s.ConnectionState = &v + return s +} + +// SetConnectionStateCheckTimestamp sets the ConnectionStateCheckTimestamp field's value. +func (s *WorkspaceConnectionStatus) SetConnectionStateCheckTimestamp(v time.Time) *WorkspaceConnectionStatus { + s.ConnectionStateCheckTimestamp = &v + return s +} + +// SetLastKnownUserConnectionTimestamp sets the LastKnownUserConnectionTimestamp field's value. +func (s *WorkspaceConnectionStatus) SetLastKnownUserConnectionTimestamp(v time.Time) *WorkspaceConnectionStatus { + s.LastKnownUserConnectionTimestamp = &v + return s +} + +// SetWorkspaceId sets the WorkspaceId field's value. +func (s *WorkspaceConnectionStatus) SetWorkspaceId(v string) *WorkspaceConnectionStatus { + s.WorkspaceId = &v + return s +} + +// Contains information about an AWS Directory Service directory for use with +// Amazon WorkSpaces. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/WorkspaceDirectory +type WorkspaceDirectory struct { + _ struct{} `type:"structure"` + + // The directory alias. + Alias *string `type:"string"` + + // The user name for the service account. + CustomerUserName *string `min:"1" type:"string"` + + // The directory identifier. + DirectoryId *string `type:"string"` + + // The name of the directory. + DirectoryName *string `type:"string"` + + // The directory type. + DirectoryType *string `type:"string" enum:"WorkspaceDirectoryType"` + + // An array of strings that contains the IP addresses of the DNS servers for + // the directory. + DnsIpAddresses []*string `type:"list"` + + // The identifier of the IAM role. This is the role that allows Amazon WorkSpaces + // to make calls to other services, such as Amazon EC2, on your behalf. + IamRoleId *string `type:"string"` + + // The registration code for the directory. This is the code that users enter + // in their Amazon WorkSpaces client application to connect to the directory. + RegistrationCode *string `min:"1" type:"string"` + + // The state of the directory's registration with Amazon WorkSpaces + State *string `type:"string" enum:"WorkspaceDirectoryState"` + + // An array of strings that contains the identifiers of the subnets used with + // the directory. + SubnetIds []*string `type:"list"` + + // A structure that specifies the default creation properties for all WorkSpaces + // in the directory. + WorkspaceCreationProperties *DefaultWorkspaceCreationProperties `type:"structure"` + + // The identifier of the security group that is assigned to new WorkSpaces. + WorkspaceSecurityGroupId *string `type:"string"` +} + +// String returns the string representation +func (s WorkspaceDirectory) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkspaceDirectory) GoString() string { + return s.String() +} + +// SetAlias sets the Alias field's value. +func (s *WorkspaceDirectory) SetAlias(v string) *WorkspaceDirectory { + s.Alias = &v + return s +} + +// SetCustomerUserName sets the CustomerUserName field's value. +func (s *WorkspaceDirectory) SetCustomerUserName(v string) *WorkspaceDirectory { + s.CustomerUserName = &v + return s +} + +// SetDirectoryId sets the DirectoryId field's value. +func (s *WorkspaceDirectory) SetDirectoryId(v string) *WorkspaceDirectory { + s.DirectoryId = &v + return s +} + +// SetDirectoryName sets the DirectoryName field's value. +func (s *WorkspaceDirectory) SetDirectoryName(v string) *WorkspaceDirectory { + s.DirectoryName = &v + return s +} + +// SetDirectoryType sets the DirectoryType field's value. +func (s *WorkspaceDirectory) SetDirectoryType(v string) *WorkspaceDirectory { + s.DirectoryType = &v + return s +} + +// SetDnsIpAddresses sets the DnsIpAddresses field's value. +func (s *WorkspaceDirectory) SetDnsIpAddresses(v []*string) *WorkspaceDirectory { + s.DnsIpAddresses = v + return s +} + +// SetIamRoleId sets the IamRoleId field's value. +func (s *WorkspaceDirectory) SetIamRoleId(v string) *WorkspaceDirectory { + s.IamRoleId = &v + return s +} + +// SetRegistrationCode sets the RegistrationCode field's value. +func (s *WorkspaceDirectory) SetRegistrationCode(v string) *WorkspaceDirectory { + s.RegistrationCode = &v + return s +} + +// SetState sets the State field's value. +func (s *WorkspaceDirectory) SetState(v string) *WorkspaceDirectory { + s.State = &v + return s +} + +// SetSubnetIds sets the SubnetIds field's value. +func (s *WorkspaceDirectory) SetSubnetIds(v []*string) *WorkspaceDirectory { + s.SubnetIds = v + return s +} + +// SetWorkspaceCreationProperties sets the WorkspaceCreationProperties field's value. +func (s *WorkspaceDirectory) SetWorkspaceCreationProperties(v *DefaultWorkspaceCreationProperties) *WorkspaceDirectory { + s.WorkspaceCreationProperties = v + return s +} + +// SetWorkspaceSecurityGroupId sets the WorkspaceSecurityGroupId field's value. +func (s *WorkspaceDirectory) SetWorkspaceSecurityGroupId(v string) *WorkspaceDirectory { + s.WorkspaceSecurityGroupId = &v + return s +} + +// Describes the properties of a WorkSpace. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/WorkspaceProperties +type WorkspaceProperties struct { + _ struct{} `type:"structure"` + + // The running mode of the WorkSpace. AlwaysOn WorkSpaces are billed monthly. + // AutoStop WorkSpaces are billed by the hour and stopped when no longer being + // used in order to save on costs. + RunningMode *string `type:"string" enum:"RunningMode"` + + // The time after a user logs off when WorkSpaces are automatically stopped. + // Configured in 60 minute intervals. + RunningModeAutoStopTimeoutInMinutes *int64 `type:"integer"` +} + +// String returns the string representation +func (s WorkspaceProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkspaceProperties) GoString() string { + return s.String() +} + +// SetRunningMode sets the RunningMode field's value. +func (s *WorkspaceProperties) SetRunningMode(v string) *WorkspaceProperties { + s.RunningMode = &v + return s +} + +// SetRunningModeAutoStopTimeoutInMinutes sets the RunningModeAutoStopTimeoutInMinutes field's value. +func (s *WorkspaceProperties) SetRunningModeAutoStopTimeoutInMinutes(v int64) *WorkspaceProperties { + s.RunningModeAutoStopTimeoutInMinutes = &v + return s +} + +// Contains information about a WorkSpace creation request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/WorkspaceRequest +type WorkspaceRequest struct { + _ struct{} `type:"structure"` + + // The identifier of the bundle to create the WorkSpace from. You can use the + // DescribeWorkspaceBundles operation to obtain a list of the bundles that are + // available. + // + // BundleId is a required field + BundleId *string `type:"string" required:"true"` + + // The identifier of the AWS Directory Service directory to create the WorkSpace + // in. You can use the DescribeWorkspaceDirectories operation to obtain a list + // of the directories that are available. + // + // DirectoryId is a required field + DirectoryId *string `type:"string" required:"true"` + + // Specifies whether the data stored on the root volume, or C: drive, is encrypted. + RootVolumeEncryptionEnabled *bool `type:"boolean"` + + // The tags of the WorkSpace request. + Tags []*Tag `type:"list"` + + // The username that the WorkSpace is assigned to. This username must exist + // in the AWS Directory Service directory specified by the DirectoryId member. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` + + // Specifies whether the data stored on the user volume, or D: drive, is encrypted. + UserVolumeEncryptionEnabled *bool `type:"boolean"` + + // The KMS key used to encrypt data stored on your WorkSpace. + VolumeEncryptionKey *string `type:"string"` + + // Describes the properties of a WorkSpace. + WorkspaceProperties *WorkspaceProperties `type:"structure"` +} + +// String returns the string representation +func (s WorkspaceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkspaceRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WorkspaceRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WorkspaceRequest"} + if s.BundleId == nil { + invalidParams.Add(request.NewErrParamRequired("BundleId")) + } + if s.DirectoryId == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryId")) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBundleId sets the BundleId field's value. +func (s *WorkspaceRequest) SetBundleId(v string) *WorkspaceRequest { + s.BundleId = &v + return s +} + +// SetDirectoryId sets the DirectoryId field's value. +func (s *WorkspaceRequest) SetDirectoryId(v string) *WorkspaceRequest { + s.DirectoryId = &v + return s +} + +// SetRootVolumeEncryptionEnabled sets the RootVolumeEncryptionEnabled field's value. +func (s *WorkspaceRequest) SetRootVolumeEncryptionEnabled(v bool) *WorkspaceRequest { + s.RootVolumeEncryptionEnabled = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *WorkspaceRequest) SetTags(v []*Tag) *WorkspaceRequest { + s.Tags = v + return s +} + +// SetUserName sets the UserName field's value. +func (s *WorkspaceRequest) SetUserName(v string) *WorkspaceRequest { + s.UserName = &v + return s +} + +// SetUserVolumeEncryptionEnabled sets the UserVolumeEncryptionEnabled field's value. +func (s *WorkspaceRequest) SetUserVolumeEncryptionEnabled(v bool) *WorkspaceRequest { + s.UserVolumeEncryptionEnabled = &v + return s +} + +// SetVolumeEncryptionKey sets the VolumeEncryptionKey field's value. +func (s *WorkspaceRequest) SetVolumeEncryptionKey(v string) *WorkspaceRequest { + s.VolumeEncryptionKey = &v + return s +} + +// SetWorkspaceProperties sets the WorkspaceProperties field's value. +func (s *WorkspaceRequest) SetWorkspaceProperties(v *WorkspaceProperties) *WorkspaceRequest { + s.WorkspaceProperties = v + return s +} + +const ( + // ComputeValue is a Compute enum value + ComputeValue = "VALUE" + + // ComputeStandard is a Compute enum value + ComputeStandard = "STANDARD" + + // ComputePerformance is a Compute enum value + ComputePerformance = "PERFORMANCE" +) + +const ( + // ConnectionStateConnected is a ConnectionState enum value + ConnectionStateConnected = "CONNECTED" + + // ConnectionStateDisconnected is a ConnectionState enum value + ConnectionStateDisconnected = "DISCONNECTED" + + // ConnectionStateUnknown is a ConnectionState enum value + ConnectionStateUnknown = "UNKNOWN" +) + +const ( + // RunningModeAutoStop is a RunningMode enum value + RunningModeAutoStop = "AUTO_STOP" + + // RunningModeAlwaysOn is a RunningMode enum value + RunningModeAlwaysOn = "ALWAYS_ON" +) + +const ( + // WorkspaceDirectoryStateRegistering is a WorkspaceDirectoryState enum value + WorkspaceDirectoryStateRegistering = "REGISTERING" + + // WorkspaceDirectoryStateRegistered is a WorkspaceDirectoryState enum value + WorkspaceDirectoryStateRegistered = "REGISTERED" + + // WorkspaceDirectoryStateDeregistering is a WorkspaceDirectoryState enum value + WorkspaceDirectoryStateDeregistering = "DEREGISTERING" + + // WorkspaceDirectoryStateDeregistered is a WorkspaceDirectoryState enum value + WorkspaceDirectoryStateDeregistered = "DEREGISTERED" + + // WorkspaceDirectoryStateError is a WorkspaceDirectoryState enum value + WorkspaceDirectoryStateError = "ERROR" +) + +const ( + // WorkspaceDirectoryTypeSimpleAd is a WorkspaceDirectoryType enum value + WorkspaceDirectoryTypeSimpleAd = "SIMPLE_AD" + + // WorkspaceDirectoryTypeAdConnector is a WorkspaceDirectoryType enum value + WorkspaceDirectoryTypeAdConnector = "AD_CONNECTOR" +) + +const ( + // WorkspaceStatePending is a WorkspaceState enum value + WorkspaceStatePending = "PENDING" + + // WorkspaceStateAvailable is a WorkspaceState enum value + WorkspaceStateAvailable = "AVAILABLE" + + // WorkspaceStateImpaired is a WorkspaceState enum value + WorkspaceStateImpaired = "IMPAIRED" + + // WorkspaceStateUnhealthy is a WorkspaceState enum value + WorkspaceStateUnhealthy = "UNHEALTHY" + + // WorkspaceStateRebooting is a WorkspaceState enum value + WorkspaceStateRebooting = "REBOOTING" + + // WorkspaceStateStarting is a WorkspaceState enum value + WorkspaceStateStarting = "STARTING" + + // WorkspaceStateRebuilding is a WorkspaceState enum value + WorkspaceStateRebuilding = "REBUILDING" + + // WorkspaceStateMaintenance is a WorkspaceState enum value + WorkspaceStateMaintenance = "MAINTENANCE" + + // WorkspaceStateTerminating is a WorkspaceState enum value + WorkspaceStateTerminating = "TERMINATING" + + // WorkspaceStateTerminated is a WorkspaceState enum value + WorkspaceStateTerminated = "TERMINATED" + + // WorkspaceStateSuspended is a WorkspaceState enum value + WorkspaceStateSuspended = "SUSPENDED" + + // WorkspaceStateStopping is a WorkspaceState enum value + WorkspaceStateStopping = "STOPPING" + + // WorkspaceStateStopped is a WorkspaceState enum value + WorkspaceStateStopped = "STOPPED" + + // WorkspaceStateError is a WorkspaceState enum value + WorkspaceStateError = "ERROR" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/workspaces/doc.go b/vendor/github.com/aws/aws-sdk-go/service/workspaces/doc.go new file mode 100644 index 00000000000..8320f961c3d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/workspaces/doc.go @@ -0,0 +1,29 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package workspaces provides the client and types for making API +// requests to Amazon WorkSpaces. +// +// This reference provides detailed information about the Amazon WorkSpaces +// operations. +// +// See https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08 for more information on this service. +// +// See workspaces package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/workspaces/ +// +// Using the Client +// +// To Amazon WorkSpaces with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon WorkSpaces client WorkSpaces for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/workspaces/#New +package workspaces diff --git a/vendor/github.com/aws/aws-sdk-go/service/workspaces/errors.go b/vendor/github.com/aws/aws-sdk-go/service/workspaces/errors.go new file mode 100644 index 00000000000..78fcbedb2b2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/workspaces/errors.go @@ -0,0 +1,56 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package workspaces + +const ( + + // ErrCodeAccessDeniedException for service response error code + // "AccessDeniedException". + // + // The user is not authorized to access a resource. + ErrCodeAccessDeniedException = "AccessDeniedException" + + // ErrCodeInvalidParameterValuesException for service response error code + // "InvalidParameterValuesException". + // + // One or more parameter values are not valid. + ErrCodeInvalidParameterValuesException = "InvalidParameterValuesException" + + // ErrCodeInvalidResourceStateException for service response error code + // "InvalidResourceStateException". + // + // The state of the WorkSpace is not valid for this operation. + ErrCodeInvalidResourceStateException = "InvalidResourceStateException" + + // ErrCodeOperationInProgressException for service response error code + // "OperationInProgressException". + // + // The properties of this WorkSpace are currently being modified. Try again + // in a moment. + ErrCodeOperationInProgressException = "OperationInProgressException" + + // ErrCodeResourceLimitExceededException for service response error code + // "ResourceLimitExceededException". + // + // Your resource limits have been exceeded. + ErrCodeResourceLimitExceededException = "ResourceLimitExceededException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The resource could not be found. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeResourceUnavailableException for service response error code + // "ResourceUnavailableException". + // + // The specified resource is not available. + ErrCodeResourceUnavailableException = "ResourceUnavailableException" + + // ErrCodeUnsupportedWorkspaceConfigurationException for service response error code + // "UnsupportedWorkspaceConfigurationException". + // + // The configuration of this WorkSpace is not supported for this operation. + // For more information, see the Amazon WorkSpaces Administration Guide (http://docs.aws.amazon.com/workspaces/latest/adminguide/). + ErrCodeUnsupportedWorkspaceConfigurationException = "UnsupportedWorkspaceConfigurationException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go b/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go new file mode 100644 index 00000000000..45241703115 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go @@ -0,0 +1,95 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package workspaces + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// WorkSpaces provides the API operation methods for making requests to +// Amazon WorkSpaces. See this package's package overview docs +// for details on the service. +// +// WorkSpaces methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type WorkSpaces struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "workspaces" // Service endpoint prefix API calls made to. + EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. +) + +// New creates a new instance of the WorkSpaces client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a WorkSpaces client from just a session. +// svc := workspaces.New(mySession) +// +// // Create a WorkSpaces client with additional configuration +// svc := workspaces.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *WorkSpaces { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *WorkSpaces { + svc := &WorkSpaces{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-04-08", + JSONVersion: "1.1", + TargetPrefix: "WorkspacesService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a WorkSpaces operation and runs any +// custom request initialization. +func (c *WorkSpaces) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/coreos/etcd/LICENSE b/vendor/github.com/coreos/etcd/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/coreos/etcd/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/etcd/NOTICE b/vendor/github.com/coreos/etcd/NOTICE new file mode 100644 index 00000000000..b39ddfa5cbd --- /dev/null +++ b/vendor/github.com/coreos/etcd/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go new file mode 100644 index 00000000000..009ebda70ca --- /dev/null +++ b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go @@ -0,0 +1,824 @@ +// Code generated by protoc-gen-gogo. +// source: auth.proto +// DO NOT EDIT! + +/* + Package authpb is a generated protocol buffer package. + + It is generated from these files: + auth.proto + + It has these top-level messages: + User + Permission + Role +*/ +package authpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Permission_Type int32 + +const ( + READ Permission_Type = 0 + WRITE Permission_Type = 1 + READWRITE Permission_Type = 2 +) + +var Permission_Type_name = map[int32]string{ + 0: "READ", + 1: "WRITE", + 2: "READWRITE", +} +var Permission_Type_value = map[string]int32{ + "READ": 0, + "WRITE": 1, + "READWRITE": 2, +} + +func (x Permission_Type) String() string { + return proto.EnumName(Permission_Type_name, int32(x)) +} +func (Permission_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1, 0} } + +// User is a single entry in the bucket authUsers +type User struct { + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + Roles []string `protobuf:"bytes,3,rep,name=roles" json:"roles,omitempty"` +} + +func (m *User) Reset() { *m = User{} } +func (m *User) String() string { return proto.CompactTextString(m) } +func (*User) ProtoMessage() {} +func (*User) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{0} } + +// Permission is a single entity +type Permission struct { + PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` +} + +func (m *Permission) Reset() { *m = Permission{} } +func (m *Permission) String() string { return proto.CompactTextString(m) } +func (*Permission) ProtoMessage() {} +func (*Permission) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1} } + +// Role is a single entry in the bucket authRoles +type Role struct { + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission" json:"keyPermission,omitempty"` +} + +func (m *Role) Reset() { *m = Role{} } +func (m *Role) String() string { return proto.CompactTextString(m) } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{2} } + +func init() { + proto.RegisterType((*User)(nil), "authpb.User") + proto.RegisterType((*Permission)(nil), "authpb.Permission") + proto.RegisterType((*Role)(nil), "authpb.Role") + proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value) +} +func (m *User) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *User) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Password) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *Permission) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Permission) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PermType != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.PermType)) + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) + } + return i, nil +} + +func (m *Role) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Role) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.KeyPermission) > 0 { + for _, msg := range m.KeyPermission { + dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeFixed64Auth(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Auth(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintAuth(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *User) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + l = len(s) + n += 1 + l + sovAuth(uint64(l)) + } + } + return n +} + +func (m *Permission) Size() (n int) { + var l int + _ = l + if m.PermType != 0 { + n += 1 + sovAuth(uint64(m.PermType)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + return n +} + +func (m *Role) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if len(m.KeyPermission) > 0 { + for _, e := range m.KeyPermission { + l = e.Size() + n += 1 + l + sovAuth(uint64(l)) + } + } + return n +} + +func sovAuth(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozAuth(x uint64) (n int) { + return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *User) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: User: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...) + if m.Password == nil { + m.Password = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Permission) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Permission: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType) + } + m.PermType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PermType |= (Permission_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Role) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Role: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KeyPermission = append(m.KeyPermission, &Permission{}) + if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAuth(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthAuth + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipAuth(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) } + +var fileDescriptorAuth = []byte{ + // 288 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30, + 0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78, + 0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c, + 0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d, + 0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d, + 0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd, + 0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51, + 0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef, + 0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00, + 0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc, + 0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70, + 0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41, + 0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc, + 0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b, + 0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1, + 0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee, + 0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4, + 0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.proto b/vendor/github.com/coreos/etcd/auth/authpb/auth.proto new file mode 100644 index 00000000000..001d3343548 --- /dev/null +++ b/vendor/github.com/coreos/etcd/auth/authpb/auth.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; +package authpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; +option (gogoproto.goproto_enum_prefix_all) = false; + +// User is a single entry in the bucket authUsers +message User { + bytes name = 1; + bytes password = 2; + repeated string roles = 3; +} + +// Permission is a single entity +message Permission { + enum Type { + READ = 0; + WRITE = 1; + READWRITE = 2; + } + Type permType = 1; + + bytes key = 2; + bytes range_end = 3; +} + +// Role is a single entry in the bucket authRoles +message Role { + bytes name = 1; + + repeated Permission keyPermission = 2; +} diff --git a/vendor/github.com/coreos/etcd/client/README.md b/vendor/github.com/coreos/etcd/client/README.md new file mode 100644 index 00000000000..2be731ede0b --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/README.md @@ -0,0 +1,117 @@ +# etcd/client + +etcd/client is the Go client library for etcd. + +[![GoDoc](https://godoc.org/github.com/coreos/etcd/client?status.png)](https://godoc.org/github.com/coreos/etcd/client) + +etcd uses `cmd/vendor` directory to store external dependencies, which are +to be compiled into etcd release binaries. `client` can be imported without +vendoring. For full compatibility, it is recommended to vendor builds using +etcd's vendored packages, using tools like godep, as in +[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories). +For more detail, please read [Go vendor design](https://golang.org/s/go15vendor). + +## Install + +```bash +go get github.com/coreos/etcd/client +``` + +## Usage + +```go +package main + +import ( + "log" + "time" + "context" + + "github.com/coreos/etcd/client" +) + +func main() { + cfg := client.Config{ + Endpoints: []string{"http://127.0.0.1:2379"}, + Transport: client.DefaultTransport, + // set timeout per request to fail fast when the target endpoint is unavailable + HeaderTimeoutPerRequest: time.Second, + } + c, err := client.New(cfg) + if err != nil { + log.Fatal(err) + } + kapi := client.NewKeysAPI(c) + // set "/foo" key with "bar" value + log.Print("Setting '/foo' key with 'bar' value") + resp, err := kapi.Set(context.Background(), "/foo", "bar", nil) + if err != nil { + log.Fatal(err) + } else { + // print common key info + log.Printf("Set is done. Metadata is %q\n", resp) + } + // get "/foo" key's value + log.Print("Getting '/foo' key value") + resp, err = kapi.Get(context.Background(), "/foo", nil) + if err != nil { + log.Fatal(err) + } else { + // print common key info + log.Printf("Get is done. Metadata is %q\n", resp) + // print value + log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value) + } +} +``` + +## Error Handling + +etcd client might return three types of errors. + +- context error + +Each API call has its first parameter as `context`. A context can be canceled or have an attached deadline. If the context is canceled or reaches its deadline, the responding context error will be returned no matter what internal errors the API call has already encountered. + +- cluster error + +Each API call tries to send request to the cluster endpoints one by one until it successfully gets a response. If a requests to an endpoint fails, due to exceeding per request timeout or connection issues, the error will be added into a list of errors. If all possible endpoints fail, a cluster error that includes all encountered errors will be returned. + +- response error + +If the response gets from the cluster is invalid, a plain string error will be returned. For example, it might be a invalid JSON error. + +Here is the example code to handle client errors: + +```go +cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}} +c, err := client.New(cfg) +if err != nil { + log.Fatal(err) +} + +kapi := client.NewKeysAPI(c) +resp, err := kapi.Set(ctx, "test", "bar", nil) +if err != nil { + if err == context.Canceled { + // ctx is canceled by another routine + } else if err == context.DeadlineExceeded { + // ctx is attached with a deadline and it exceeded + } else if cerr, ok := err.(*client.ClusterError); ok { + // process (cerr.Errors) + } else { + // bad cluster endpoints, which are not etcd servers + } +} +``` + + +## Caveat + +1. etcd/client prefers to use the same endpoint as long as the endpoint continues to work well. This saves socket resources, and improves efficiency for both client and server side. This preference doesn't remove consistency from the data consumed by the client because data replicated to each etcd member has already passed through the consensus process. + +2. etcd/client does round-robin rotation on other available endpoints if the preferred endpoint isn't functioning properly. For example, if the member that etcd/client connects to is hard killed, etcd/client will fail on the first attempt with the killed member, and succeed on the second attempt with another member. If it fails to talk to all available endpoints, it will return all errors happened. + +3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention. + +4. etcd/client cannot detect whether a member is healthy with watches and non-quorum read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. Instead, users can either issue quorum read requests or monitor the /health endpoint for member health information. diff --git a/vendor/github.com/coreos/etcd/client/auth_role.go b/vendor/github.com/coreos/etcd/client/auth_role.go new file mode 100644 index 00000000000..b6ba7e150dc --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/auth_role.go @@ -0,0 +1,236 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "net/url" +) + +type Role struct { + Role string `json:"role"` + Permissions Permissions `json:"permissions"` + Grant *Permissions `json:"grant,omitempty"` + Revoke *Permissions `json:"revoke,omitempty"` +} + +type Permissions struct { + KV rwPermission `json:"kv"` +} + +type rwPermission struct { + Read []string `json:"read"` + Write []string `json:"write"` +} + +type PermissionType int + +const ( + ReadPermission PermissionType = iota + WritePermission + ReadWritePermission +) + +// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to +// interact with etcd's role creation and modification features. +func NewAuthRoleAPI(c Client) AuthRoleAPI { + return &httpAuthRoleAPI{ + client: c, + } +} + +type AuthRoleAPI interface { + // AddRole adds a role. + AddRole(ctx context.Context, role string) error + + // RemoveRole removes a role. + RemoveRole(ctx context.Context, role string) error + + // GetRole retrieves role details. + GetRole(ctx context.Context, role string) (*Role, error) + + // GrantRoleKV grants a role some permission prefixes for the KV store. + GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) + + // RevokeRoleKV revokes some permission prefixes for a role on the KV store. + RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) + + // ListRoles lists roles. + ListRoles(ctx context.Context) ([]string, error) +} + +type httpAuthRoleAPI struct { + client httpClient +} + +type authRoleAPIAction struct { + verb string + name string + role *Role +} + +type authRoleAPIList struct{} + +func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "roles", "") + req, _ := http.NewRequest("GET", u.String(), nil) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "roles", l.name) + if l.role == nil { + req, _ := http.NewRequest(l.verb, u.String(), nil) + return req + } + b, err := json.Marshal(l.role) + if err != nil { + panic(err) + } + body := bytes.NewReader(b) + req, _ := http.NewRequest(l.verb, u.String(), body) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) { + resp, body, err := r.client.Do(ctx, &authRoleAPIList{}) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + return nil, err + } + var roleList struct { + Roles []Role `json:"roles"` + } + if err = json.Unmarshal(body, &roleList); err != nil { + return nil, err + } + ret := make([]string, 0, len(roleList.Roles)) + for _, r := range roleList.Roles { + ret = append(ret, r.Role) + } + return ret, nil +} + +func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error { + role := &Role{ + Role: rolename, + } + return r.addRemoveRole(ctx, &authRoleAPIAction{ + verb: "PUT", + name: rolename, + role: role, + }) +} + +func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error { + return r.addRemoveRole(ctx, &authRoleAPIAction{ + verb: "DELETE", + name: rolename, + }) +} + +func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error { + resp, body, err := r.client.Do(ctx, req) + if err != nil { + return err + } + if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { + var sec authError + err := json.Unmarshal(body, &sec) + if err != nil { + return err + } + return sec + } + return nil +} + +func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) { + return r.modRole(ctx, &authRoleAPIAction{ + verb: "GET", + name: rolename, + }) +} + +func buildRWPermission(prefixes []string, permType PermissionType) rwPermission { + var out rwPermission + switch permType { + case ReadPermission: + out.Read = prefixes + case WritePermission: + out.Write = prefixes + case ReadWritePermission: + out.Read = prefixes + out.Write = prefixes + } + return out +} + +func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { + rwp := buildRWPermission(prefixes, permType) + role := &Role{ + Role: rolename, + Grant: &Permissions{ + KV: rwp, + }, + } + return r.modRole(ctx, &authRoleAPIAction{ + verb: "PUT", + name: rolename, + role: role, + }) +} + +func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { + rwp := buildRWPermission(prefixes, permType) + role := &Role{ + Role: rolename, + Revoke: &Permissions{ + KV: rwp, + }, + } + return r.modRole(ctx, &authRoleAPIAction{ + verb: "PUT", + name: rolename, + role: role, + }) +} + +func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) { + resp, body, err := r.client.Do(ctx, req) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return nil, err + } + return nil, sec + } + var role Role + if err = json.Unmarshal(body, &role); err != nil { + return nil, err + } + return &role, nil +} diff --git a/vendor/github.com/coreos/etcd/client/auth_user.go b/vendor/github.com/coreos/etcd/client/auth_user.go new file mode 100644 index 00000000000..8e7e2efe833 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/auth_user.go @@ -0,0 +1,319 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "net/url" + "path" +) + +var ( + defaultV2AuthPrefix = "/v2/auth" +) + +type User struct { + User string `json:"user"` + Password string `json:"password,omitempty"` + Roles []string `json:"roles"` + Grant []string `json:"grant,omitempty"` + Revoke []string `json:"revoke,omitempty"` +} + +// userListEntry is the user representation given by the server for ListUsers +type userListEntry struct { + User string `json:"user"` + Roles []Role `json:"roles"` +} + +type UserRoles struct { + User string `json:"user"` + Roles []Role `json:"roles"` +} + +func v2AuthURL(ep url.URL, action string, name string) *url.URL { + if name != "" { + ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name) + return &ep + } + ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action) + return &ep +} + +// NewAuthAPI constructs a new AuthAPI that uses HTTP to +// interact with etcd's general auth features. +func NewAuthAPI(c Client) AuthAPI { + return &httpAuthAPI{ + client: c, + } +} + +type AuthAPI interface { + // Enable auth. + Enable(ctx context.Context) error + + // Disable auth. + Disable(ctx context.Context) error +} + +type httpAuthAPI struct { + client httpClient +} + +func (s *httpAuthAPI) Enable(ctx context.Context) error { + return s.enableDisable(ctx, &authAPIAction{"PUT"}) +} + +func (s *httpAuthAPI) Disable(ctx context.Context) error { + return s.enableDisable(ctx, &authAPIAction{"DELETE"}) +} + +func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error { + resp, body, err := s.client.Do(ctx, req) + if err != nil { + return err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return err + } + return sec + } + return nil +} + +type authAPIAction struct { + verb string +} + +func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "enable", "") + req, _ := http.NewRequest(l.verb, u.String(), nil) + return req +} + +type authError struct { + Message string `json:"message"` + Code int `json:"-"` +} + +func (e authError) Error() string { + return e.Message +} + +// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to +// interact with etcd's user creation and modification features. +func NewAuthUserAPI(c Client) AuthUserAPI { + return &httpAuthUserAPI{ + client: c, + } +} + +type AuthUserAPI interface { + // AddUser adds a user. + AddUser(ctx context.Context, username string, password string) error + + // RemoveUser removes a user. + RemoveUser(ctx context.Context, username string) error + + // GetUser retrieves user details. + GetUser(ctx context.Context, username string) (*User, error) + + // GrantUser grants a user some permission roles. + GrantUser(ctx context.Context, username string, roles []string) (*User, error) + + // RevokeUser revokes some permission roles from a user. + RevokeUser(ctx context.Context, username string, roles []string) (*User, error) + + // ChangePassword changes the user's password. + ChangePassword(ctx context.Context, username string, password string) (*User, error) + + // ListUsers lists the users. + ListUsers(ctx context.Context) ([]string, error) +} + +type httpAuthUserAPI struct { + client httpClient +} + +type authUserAPIAction struct { + verb string + username string + user *User +} + +type authUserAPIList struct{} + +func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "users", "") + req, _ := http.NewRequest("GET", u.String(), nil) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "users", l.username) + if l.user == nil { + req, _ := http.NewRequest(l.verb, u.String(), nil) + return req + } + b, err := json.Marshal(l.user) + if err != nil { + panic(err) + } + body := bytes.NewReader(b) + req, _ := http.NewRequest(l.verb, u.String(), body) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) { + resp, body, err := u.client.Do(ctx, &authUserAPIList{}) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return nil, err + } + return nil, sec + } + + var userList struct { + Users []userListEntry `json:"users"` + } + + if err = json.Unmarshal(body, &userList); err != nil { + return nil, err + } + + ret := make([]string, 0, len(userList.Users)) + for _, u := range userList.Users { + ret = append(ret, u.User) + } + return ret, nil +} + +func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error { + user := &User{ + User: username, + Password: password, + } + return u.addRemoveUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error { + return u.addRemoveUser(ctx, &authUserAPIAction{ + verb: "DELETE", + username: username, + }) +} + +func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error { + resp, body, err := u.client.Do(ctx, req) + if err != nil { + return err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return err + } + return sec + } + return nil +} + +func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) { + return u.modUser(ctx, &authUserAPIAction{ + verb: "GET", + username: username, + }) +} + +func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) { + user := &User{ + User: username, + Grant: roles, + } + return u.modUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) { + user := &User{ + User: username, + Revoke: roles, + } + return u.modUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) { + user := &User{ + User: username, + Password: password, + } + return u.modUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) { + resp, body, err := u.client.Do(ctx, req) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return nil, err + } + return nil, sec + } + var user User + if err = json.Unmarshal(body, &user); err != nil { + var userR UserRoles + if urerr := json.Unmarshal(body, &userR); urerr != nil { + return nil, err + } + user.User = userR.User + for _, r := range userR.Roles { + user.Roles = append(user.Roles, r.Role) + } + } + return &user, nil +} diff --git a/vendor/github.com/coreos/etcd/client/cancelreq.go b/vendor/github.com/coreos/etcd/client/cancelreq.go new file mode 100644 index 00000000000..76d1f040198 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/cancelreq.go @@ -0,0 +1,18 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// borrowed from golang/net/context/ctxhttp/cancelreq.go + +package client + +import "net/http" + +func requestCanceler(tr CancelableTransport, req *http.Request) func() { + ch := make(chan struct{}) + req.Cancel = ch + + return func() { + close(ch) + } +} diff --git a/vendor/github.com/coreos/etcd/client/client.go b/vendor/github.com/coreos/etcd/client/client.go new file mode 100644 index 00000000000..e6874505666 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/client.go @@ -0,0 +1,710 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math/rand" + "net" + "net/http" + "net/url" + "sort" + "strconv" + "sync" + "time" + + "github.com/coreos/etcd/version" +) + +var ( + ErrNoEndpoints = errors.New("client: no endpoints available") + ErrTooManyRedirects = errors.New("client: too many redirects") + ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured") + ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available") + errTooManyRedirectChecks = errors.New("client: too many redirect checks") + + // oneShotCtxValue is set on a context using WithValue(&oneShotValue) so + // that Do() will not retry a request + oneShotCtxValue interface{} +) + +var DefaultRequestTimeout = 5 * time.Second + +var DefaultTransport CancelableTransport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, +} + +type EndpointSelectionMode int + +const ( + // EndpointSelectionRandom is the default value of the 'SelectionMode'. + // As the name implies, the client object will pick a node from the members + // of the cluster in a random fashion. If the cluster has three members, A, B, + // and C, the client picks any node from its three members as its request + // destination. + EndpointSelectionRandom EndpointSelectionMode = iota + + // If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader', + // requests are sent directly to the cluster leader. This reduces + // forwarding roundtrips compared to making requests to etcd followers + // who then forward them to the cluster leader. In the event of a leader + // failure, however, clients configured this way cannot prioritize among + // the remaining etcd followers. Therefore, when a client sets 'SelectionMode' + // to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to + // maintain its knowledge of current cluster state. + // + // This mode should be used with Client.AutoSync(). + EndpointSelectionPrioritizeLeader +) + +type Config struct { + // Endpoints defines a set of URLs (schemes, hosts and ports only) + // that can be used to communicate with a logical etcd cluster. For + // example, a three-node cluster could be provided like so: + // + // Endpoints: []string{ + // "http://node1.example.com:2379", + // "http://node2.example.com:2379", + // "http://node3.example.com:2379", + // } + // + // If multiple endpoints are provided, the Client will attempt to + // use them all in the event that one or more of them are unusable. + // + // If Client.Sync is ever called, the Client may cache an alternate + // set of endpoints to continue operation. + Endpoints []string + + // Transport is used by the Client to drive HTTP requests. If not + // provided, DefaultTransport will be used. + Transport CancelableTransport + + // CheckRedirect specifies the policy for handling HTTP redirects. + // If CheckRedirect is not nil, the Client calls it before + // following an HTTP redirect. The sole argument is the number of + // requests that have already been made. If CheckRedirect returns + // an error, Client.Do will not make any further requests and return + // the error back it to the caller. + // + // If CheckRedirect is nil, the Client uses its default policy, + // which is to stop after 10 consecutive requests. + CheckRedirect CheckRedirectFunc + + // Username specifies the user credential to add as an authorization header + Username string + + // Password is the password for the specified user to add as an authorization header + // to the request. + Password string + + // HeaderTimeoutPerRequest specifies the time limit to wait for response + // header in a single request made by the Client. The timeout includes + // connection time, any redirects, and header wait time. + // + // For non-watch GET request, server returns the response body immediately. + // For PUT/POST/DELETE request, server will attempt to commit request + // before responding, which is expected to take `100ms + 2 * RTT`. + // For watch request, server returns the header immediately to notify Client + // watch start. But if server is behind some kind of proxy, the response + // header may be cached at proxy, and Client cannot rely on this behavior. + // + // Especially, wait request will ignore this timeout. + // + // One API call may send multiple requests to different etcd servers until it + // succeeds. Use context of the API to specify the overall timeout. + // + // A HeaderTimeoutPerRequest of zero means no timeout. + HeaderTimeoutPerRequest time.Duration + + // SelectionMode is an EndpointSelectionMode enum that specifies the + // policy for choosing the etcd cluster node to which requests are sent. + SelectionMode EndpointSelectionMode +} + +func (cfg *Config) transport() CancelableTransport { + if cfg.Transport == nil { + return DefaultTransport + } + return cfg.Transport +} + +func (cfg *Config) checkRedirect() CheckRedirectFunc { + if cfg.CheckRedirect == nil { + return DefaultCheckRedirect + } + return cfg.CheckRedirect +} + +// CancelableTransport mimics net/http.Transport, but requires that +// the object also support request cancellation. +type CancelableTransport interface { + http.RoundTripper + CancelRequest(req *http.Request) +} + +type CheckRedirectFunc func(via int) error + +// DefaultCheckRedirect follows up to 10 redirects, but no more. +var DefaultCheckRedirect CheckRedirectFunc = func(via int) error { + if via > 10 { + return ErrTooManyRedirects + } + return nil +} + +type Client interface { + // Sync updates the internal cache of the etcd cluster's membership. + Sync(context.Context) error + + // AutoSync periodically calls Sync() every given interval. + // The recommended sync interval is 10 seconds to 1 minute, which does + // not bring too much overhead to server and makes client catch up the + // cluster change in time. + // + // The example to use it: + // + // for { + // err := client.AutoSync(ctx, 10*time.Second) + // if err == context.DeadlineExceeded || err == context.Canceled { + // break + // } + // log.Print(err) + // } + AutoSync(context.Context, time.Duration) error + + // Endpoints returns a copy of the current set of API endpoints used + // by Client to resolve HTTP requests. If Sync has ever been called, + // this may differ from the initial Endpoints provided in the Config. + Endpoints() []string + + // SetEndpoints sets the set of API endpoints used by Client to resolve + // HTTP requests. If the given endpoints are not valid, an error will be + // returned + SetEndpoints(eps []string) error + + // GetVersion retrieves the current etcd server and cluster version + GetVersion(ctx context.Context) (*version.Versions, error) + + httpClient +} + +func New(cfg Config) (Client, error) { + c := &httpClusterClient{ + clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest), + rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), + selectionMode: cfg.SelectionMode, + } + if cfg.Username != "" { + c.credentials = &credentials{ + username: cfg.Username, + password: cfg.Password, + } + } + if err := c.SetEndpoints(cfg.Endpoints); err != nil { + return nil, err + } + return c, nil +} + +type httpClient interface { + Do(context.Context, httpAction) (*http.Response, []byte, error) +} + +func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory { + return func(ep url.URL) httpClient { + return &redirectFollowingHTTPClient{ + checkRedirect: cr, + client: &simpleHTTPClient{ + transport: tr, + endpoint: ep, + headerTimeout: headerTimeout, + }, + } + } +} + +type credentials struct { + username string + password string +} + +type httpClientFactory func(url.URL) httpClient + +type httpAction interface { + HTTPRequest(url.URL) *http.Request +} + +type httpClusterClient struct { + clientFactory httpClientFactory + endpoints []url.URL + pinned int + credentials *credentials + sync.RWMutex + rand *rand.Rand + selectionMode EndpointSelectionMode +} + +func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) { + ceps := make([]url.URL, len(eps)) + copy(ceps, eps) + + // To perform a lookup on the new endpoint list without using the current + // client, we'll copy it + clientCopy := &httpClusterClient{ + clientFactory: c.clientFactory, + credentials: c.credentials, + rand: c.rand, + + pinned: 0, + endpoints: ceps, + } + + mAPI := NewMembersAPI(clientCopy) + leader, err := mAPI.Leader(ctx) + if err != nil { + return "", err + } + if len(leader.ClientURLs) == 0 { + return "", ErrNoLeaderEndpoint + } + + return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs? +} + +func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) { + if len(eps) == 0 { + return []url.URL{}, ErrNoEndpoints + } + + neps := make([]url.URL, len(eps)) + for i, ep := range eps { + u, err := url.Parse(ep) + if err != nil { + return []url.URL{}, err + } + neps[i] = *u + } + return neps, nil +} + +func (c *httpClusterClient) SetEndpoints(eps []string) error { + neps, err := c.parseEndpoints(eps) + if err != nil { + return err + } + + c.Lock() + defer c.Unlock() + + c.endpoints = shuffleEndpoints(c.rand, neps) + // We're not doing anything for PrioritizeLeader here. This is + // due to not having a context meaning we can't call getLeaderEndpoint + // However, if you're using PrioritizeLeader, you've already been told + // to regularly call sync, where we do have a ctx, and can figure the + // leader. PrioritizeLeader is also quite a loose guarantee, so deal + // with it + c.pinned = 0 + + return nil +} + +func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { + action := act + c.RLock() + leps := len(c.endpoints) + eps := make([]url.URL, leps) + n := copy(eps, c.endpoints) + pinned := c.pinned + + if c.credentials != nil { + action = &authedAction{ + act: act, + credentials: *c.credentials, + } + } + c.RUnlock() + + if leps == 0 { + return nil, nil, ErrNoEndpoints + } + + if leps != n { + return nil, nil, errors.New("unable to pick endpoint: copy failed") + } + + var resp *http.Response + var body []byte + var err error + cerr := &ClusterError{} + isOneShot := ctx.Value(&oneShotCtxValue) != nil + + for i := pinned; i < leps+pinned; i++ { + k := i % leps + hc := c.clientFactory(eps[k]) + resp, body, err = hc.Do(ctx, action) + if err != nil { + cerr.Errors = append(cerr.Errors, err) + if err == ctx.Err() { + return nil, nil, ctx.Err() + } + if err == context.Canceled || err == context.DeadlineExceeded { + return nil, nil, err + } + } else if resp.StatusCode/100 == 5 { + switch resp.StatusCode { + case http.StatusInternalServerError, http.StatusServiceUnavailable: + // TODO: make sure this is a no leader response + cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String())) + default: + cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode))) + } + err = cerr.Errors[0] + } + if err != nil { + if !isOneShot { + continue + } + c.Lock() + c.pinned = (k + 1) % leps + c.Unlock() + return nil, nil, err + } + if k != pinned { + c.Lock() + c.pinned = k + c.Unlock() + } + return resp, body, nil + } + + return nil, nil, cerr +} + +func (c *httpClusterClient) Endpoints() []string { + c.RLock() + defer c.RUnlock() + + eps := make([]string, len(c.endpoints)) + for i, ep := range c.endpoints { + eps[i] = ep.String() + } + + return eps +} + +func (c *httpClusterClient) Sync(ctx context.Context) error { + mAPI := NewMembersAPI(c) + ms, err := mAPI.List(ctx) + if err != nil { + return err + } + + var eps []string + for _, m := range ms { + eps = append(eps, m.ClientURLs...) + } + + neps, err := c.parseEndpoints(eps) + if err != nil { + return err + } + + npin := 0 + + switch c.selectionMode { + case EndpointSelectionRandom: + c.RLock() + eq := endpointsEqual(c.endpoints, neps) + c.RUnlock() + + if eq { + return nil + } + // When items in the endpoint list changes, we choose a new pin + neps = shuffleEndpoints(c.rand, neps) + case EndpointSelectionPrioritizeLeader: + nle, err := c.getLeaderEndpoint(ctx, neps) + if err != nil { + return ErrNoLeaderEndpoint + } + + for i, n := range neps { + if n.String() == nle { + npin = i + break + } + } + default: + return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode) + } + + c.Lock() + defer c.Unlock() + c.endpoints = neps + c.pinned = npin + + return nil +} + +func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error { + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + err := c.Sync(ctx) + if err != nil { + return err + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + } + } +} + +func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) { + act := &getAction{Prefix: "/version"} + + resp, body, err := c.Do(ctx, act) + if err != nil { + return nil, err + } + + switch resp.StatusCode { + case http.StatusOK: + if len(body) == 0 { + return nil, ErrEmptyBody + } + var vresp version.Versions + if err := json.Unmarshal(body, &vresp); err != nil { + return nil, ErrInvalidJSON + } + return &vresp, nil + default: + var etcdErr Error + if err := json.Unmarshal(body, &etcdErr); err != nil { + return nil, ErrInvalidJSON + } + return nil, etcdErr + } +} + +type roundTripResponse struct { + resp *http.Response + err error +} + +type simpleHTTPClient struct { + transport CancelableTransport + endpoint url.URL + headerTimeout time.Duration +} + +func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { + req := act.HTTPRequest(c.endpoint) + + if err := printcURL(req); err != nil { + return nil, nil, err + } + + isWait := false + if req != nil && req.URL != nil { + ws := req.URL.Query().Get("wait") + if len(ws) != 0 { + var err error + isWait, err = strconv.ParseBool(ws) + if err != nil { + return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req) + } + } + } + + var hctx context.Context + var hcancel context.CancelFunc + if !isWait && c.headerTimeout > 0 { + hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout) + } else { + hctx, hcancel = context.WithCancel(ctx) + } + defer hcancel() + + reqcancel := requestCanceler(c.transport, req) + + rtchan := make(chan roundTripResponse, 1) + go func() { + resp, err := c.transport.RoundTrip(req) + rtchan <- roundTripResponse{resp: resp, err: err} + close(rtchan) + }() + + var resp *http.Response + var err error + + select { + case rtresp := <-rtchan: + resp, err = rtresp.resp, rtresp.err + case <-hctx.Done(): + // cancel and wait for request to actually exit before continuing + reqcancel() + rtresp := <-rtchan + resp = rtresp.resp + switch { + case ctx.Err() != nil: + err = ctx.Err() + case hctx.Err() != nil: + err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String()) + default: + panic("failed to get error from context") + } + } + + // always check for resp nil-ness to deal with possible + // race conditions between channels above + defer func() { + if resp != nil { + resp.Body.Close() + } + }() + + if err != nil { + return nil, nil, err + } + + var body []byte + done := make(chan struct{}) + go func() { + body, err = ioutil.ReadAll(resp.Body) + done <- struct{}{} + }() + + select { + case <-ctx.Done(): + resp.Body.Close() + <-done + return nil, nil, ctx.Err() + case <-done: + } + + return resp, body, err +} + +type authedAction struct { + act httpAction + credentials credentials +} + +func (a *authedAction) HTTPRequest(url url.URL) *http.Request { + r := a.act.HTTPRequest(url) + r.SetBasicAuth(a.credentials.username, a.credentials.password) + return r +} + +type redirectFollowingHTTPClient struct { + client httpClient + checkRedirect CheckRedirectFunc +} + +func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { + next := act + for i := 0; i < 100; i++ { + if i > 0 { + if err := r.checkRedirect(i); err != nil { + return nil, nil, err + } + } + resp, body, err := r.client.Do(ctx, next) + if err != nil { + return nil, nil, err + } + if resp.StatusCode/100 == 3 { + hdr := resp.Header.Get("Location") + if hdr == "" { + return nil, nil, fmt.Errorf("Location header not set") + } + loc, err := url.Parse(hdr) + if err != nil { + return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr) + } + next = &redirectedHTTPAction{ + action: act, + location: *loc, + } + continue + } + return resp, body, nil + } + + return nil, nil, errTooManyRedirectChecks +} + +type redirectedHTTPAction struct { + action httpAction + location url.URL +} + +func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request { + orig := r.action.HTTPRequest(ep) + orig.URL = &r.location + return orig +} + +func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL { + // copied from Go 1.9<= rand.Rand.Perm + n := len(eps) + p := make([]int, n) + for i := 0; i < n; i++ { + j := r.Intn(i + 1) + p[i] = p[j] + p[j] = i + } + neps := make([]url.URL, n) + for i, k := range p { + neps[i] = eps[k] + } + return neps +} + +func endpointsEqual(left, right []url.URL) bool { + if len(left) != len(right) { + return false + } + + sLeft := make([]string, len(left)) + sRight := make([]string, len(right)) + for i, l := range left { + sLeft[i] = l.String() + } + for i, r := range right { + sRight[i] = r.String() + } + + sort.Strings(sLeft) + sort.Strings(sRight) + for i := range sLeft { + if sLeft[i] != sRight[i] { + return false + } + } + return true +} diff --git a/vendor/github.com/coreos/etcd/client/cluster_error.go b/vendor/github.com/coreos/etcd/client/cluster_error.go new file mode 100644 index 00000000000..34618cdbd9e --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/cluster_error.go @@ -0,0 +1,37 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import "fmt" + +type ClusterError struct { + Errors []error +} + +func (ce *ClusterError) Error() string { + s := ErrClusterUnavailable.Error() + for i, e := range ce.Errors { + s += fmt.Sprintf("; error #%d: %s\n", i, e) + } + return s +} + +func (ce *ClusterError) Detail() string { + s := "" + for i, e := range ce.Errors { + s += fmt.Sprintf("error #%d: %s\n", i, e) + } + return s +} diff --git a/vendor/github.com/coreos/etcd/client/curl.go b/vendor/github.com/coreos/etcd/client/curl.go new file mode 100644 index 00000000000..c8bc9fba20e --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/curl.go @@ -0,0 +1,70 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "os" +) + +var ( + cURLDebug = false +) + +func EnablecURLDebug() { + cURLDebug = true +} + +func DisablecURLDebug() { + cURLDebug = false +} + +// printcURL prints the cURL equivalent request to stderr. +// It returns an error if the body of the request cannot +// be read. +// The caller MUST cancel the request if there is an error. +func printcURL(req *http.Request) error { + if !cURLDebug { + return nil + } + var ( + command string + b []byte + err error + ) + + if req.URL != nil { + command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String()) + } + + if req.Body != nil { + b, err = ioutil.ReadAll(req.Body) + if err != nil { + return err + } + command += fmt.Sprintf(" -d %q", string(b)) + } + + fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command) + + // reset body + body := bytes.NewBuffer(b) + req.Body = ioutil.NopCloser(body) + + return nil +} diff --git a/vendor/github.com/coreos/etcd/client/discover.go b/vendor/github.com/coreos/etcd/client/discover.go new file mode 100644 index 00000000000..442e35fe543 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/discover.go @@ -0,0 +1,40 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "github.com/coreos/etcd/pkg/srv" +) + +// Discoverer is an interface that wraps the Discover method. +type Discoverer interface { + // Discover looks up the etcd servers for the domain. + Discover(domain string) ([]string, error) +} + +type srvDiscover struct{} + +// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. +func NewSRVDiscover() Discoverer { + return &srvDiscover{} +} + +func (d *srvDiscover) Discover(domain string) ([]string, error) { + srvs, err := srv.GetClient("etcd-client", domain) + if err != nil { + return nil, err + } + return srvs.Endpoints, nil +} diff --git a/vendor/github.com/coreos/etcd/client/doc.go b/vendor/github.com/coreos/etcd/client/doc.go new file mode 100644 index 00000000000..ad4eca4e163 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/doc.go @@ -0,0 +1,73 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package client provides bindings for the etcd APIs. + +Create a Config and exchange it for a Client: + + import ( + "net/http" + "context" + + "github.com/coreos/etcd/client" + ) + + cfg := client.Config{ + Endpoints: []string{"http://127.0.0.1:2379"}, + Transport: DefaultTransport, + } + + c, err := client.New(cfg) + if err != nil { + // handle error + } + +Clients are safe for concurrent use by multiple goroutines. + +Create a KeysAPI using the Client, then use it to interact with etcd: + + kAPI := client.NewKeysAPI(c) + + // create a new key /foo with the value "bar" + _, err = kAPI.Create(context.Background(), "/foo", "bar") + if err != nil { + // handle error + } + + // delete the newly created key only if the value is still "bar" + _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"}) + if err != nil { + // handle error + } + +Use a custom context to set timeouts on your operations: + + import "time" + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // set a new key, ignoring its previous state + _, err := kAPI.Set(ctx, "/ping", "pong", nil) + if err != nil { + if err == context.DeadlineExceeded { + // request took longer than 5s + } else { + // handle error + } + } + +*/ +package client diff --git a/vendor/github.com/coreos/etcd/client/keys.generated.go b/vendor/github.com/coreos/etcd/client/keys.generated.go new file mode 100644 index 00000000000..237fdbe8ffd --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/keys.generated.go @@ -0,0 +1,5218 @@ +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package client + +import ( + "errors" + "fmt" + "reflect" + "runtime" + time "time" + + codec1978 "github.com/ugorji/go/codec" +) + +const ( + // ----- content types ---- + codecSelferC_UTF87612 = 1 + codecSelferC_RAW7612 = 0 + // ----- value types used ---- + codecSelferValueTypeArray7612 = 10 + codecSelferValueTypeMap7612 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey7612 = 2 + codecSelfer_containerMapValue7612 = 3 + codecSelfer_containerMapEnd7612 = 4 + codecSelfer_containerArrayElem7612 = 6 + codecSelfer_containerArrayEnd7612 = 7 +) + +var ( + codecSelferBitsize7612 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr7612 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer7612 struct{} + +func init() { + if codec1978.GenVersion != 8 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 8, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 time.Duration + _ = v0 + } +} + +func (x *Error) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(4) + } else { + r.WriteMapStart(4) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Code)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("errorCode")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Code)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Message)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("message")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Message)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Cause)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("cause")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Cause)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeUint(uint64(x.Index)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("index")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeUint(uint64(x.Index)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *Error) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *Error) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "errorCode": + if r.TryDecodeAsNil() { + x.Code = 0 + } else { + yyv4 := &x.Code + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int)(yyv4)) = int(r.DecodeInt(codecSelferBitsize7612)) + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv6 := &x.Message + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "cause": + if r.TryDecodeAsNil() { + x.Cause = "" + } else { + yyv8 := &x.Cause + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "index": + if r.TryDecodeAsNil() { + x.Index = 0 + } else { + yyv10 := &x.Index + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*uint64)(yyv10)) = uint64(r.DecodeUint(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *Error) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Code = 0 + } else { + yyv13 := &x.Code + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*int)(yyv13)) = int(r.DecodeInt(codecSelferBitsize7612)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv15 := &x.Message + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Cause = "" + } else { + yyv17 := &x.Cause + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Index = 0 + } else { + yyv19 := &x.Index + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*uint64)(yyv19)) = uint64(r.DecodeUint(64)) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj12-1, "") + } + r.ReadArrayEnd() +} + +func (x PrevExistType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x)) + } +} + +func (x *PrevExistType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *WatcherOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(2) + } else { + r.WriteMapStart(2) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeUint(uint64(x.AfterIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("AfterIndex")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeUint(uint64(x.AfterIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *WatcherOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *WatcherOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "AfterIndex": + if r.TryDecodeAsNil() { + x.AfterIndex = 0 + } else { + yyv4 := &x.AfterIndex + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*uint64)(yyv4)) = uint64(r.DecodeUint(64)) + } + } + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv6 := &x.Recursive + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(yyv6)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *WatcherOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.AfterIndex = 0 + } else { + yyv9 := &x.AfterIndex + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*uint64)(yyv9)) = uint64(r.DecodeUint(64)) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv11 := &x.Recursive + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(yyv11)) = r.DecodeBool() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj8-1, "") + } + r.ReadArrayEnd() +} + +func (x *CreateInOrderOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(1) + } else { + r.WriteMapStart(1) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("TTL")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *CreateInOrderOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *CreateInOrderOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "TTL": + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv4 := &x.TTL + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + *((*int64)(yyv4)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *CreateInOrderOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv7 := &x.TTL + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else { + *((*int64)(yyv7)) = int64(r.DecodeInt(64)) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj6-1, "") + } + r.ReadArrayEnd() +} + +func (x *SetOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(7) + } else { + r.WriteMapStart(7) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevValue")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevIndex")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + x.PrevExist.CodecEncodeSelf(e) + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevExist")) + r.WriteMapElemValue() + x.PrevExist.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("TTL")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Refresh)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Refresh")) + r.WriteMapElemValue() + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Refresh)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Dir")) + r.WriteMapElemValue() + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeBool(bool(x.NoValueOnSuccess)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("NoValueOnSuccess")) + r.WriteMapElemValue() + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeBool(bool(x.NoValueOnSuccess)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *SetOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *SetOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "PrevValue": + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv4 := &x.PrevValue + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "PrevIndex": + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv6 := &x.PrevIndex + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*uint64)(yyv6)) = uint64(r.DecodeUint(64)) + } + } + case "PrevExist": + if r.TryDecodeAsNil() { + x.PrevExist = "" + } else { + yyv8 := &x.PrevExist + yyv8.CodecDecodeSelf(d) + } + case "TTL": + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv9 := &x.TTL + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else { + *((*int64)(yyv9)) = int64(r.DecodeInt(64)) + } + } + case "Refresh": + if r.TryDecodeAsNil() { + x.Refresh = false + } else { + yyv11 := &x.Refresh + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(yyv11)) = r.DecodeBool() + } + } + case "Dir": + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv13 := &x.Dir + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*bool)(yyv13)) = r.DecodeBool() + } + } + case "NoValueOnSuccess": + if r.TryDecodeAsNil() { + x.NoValueOnSuccess = false + } else { + yyv15 := &x.NoValueOnSuccess + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *SetOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj17 int + var yyb17 bool + var yyhl17 bool = l >= 0 + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv18 := &x.PrevValue + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv20 := &x.PrevIndex + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*uint64)(yyv20)) = uint64(r.DecodeUint(64)) + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevExist = "" + } else { + yyv22 := &x.PrevExist + yyv22.CodecDecodeSelf(d) + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv23 := &x.TTL + yym24 := z.DecBinary() + _ = yym24 + if false { + } else if z.HasExtensions() && z.DecExt(yyv23) { + } else { + *((*int64)(yyv23)) = int64(r.DecodeInt(64)) + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Refresh = false + } else { + yyv25 := &x.Refresh + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*bool)(yyv25)) = r.DecodeBool() + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv27 := &x.Dir + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*bool)(yyv27)) = r.DecodeBool() + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.NoValueOnSuccess = false + } else { + yyv29 := &x.NoValueOnSuccess + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*bool)(yyv29)) = r.DecodeBool() + } + } + for { + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj17-1, "") + } + r.ReadArrayEnd() +} + +func (x *GetOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(3) + } else { + r.WriteMapStart(3) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeBool(bool(x.Sort)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Sort")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(x.Sort)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.Quorum)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Quorum")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.Quorum)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *GetOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *GetOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv4 := &x.Recursive + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*bool)(yyv4)) = r.DecodeBool() + } + } + case "Sort": + if r.TryDecodeAsNil() { + x.Sort = false + } else { + yyv6 := &x.Sort + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(yyv6)) = r.DecodeBool() + } + } + case "Quorum": + if r.TryDecodeAsNil() { + x.Quorum = false + } else { + yyv8 := &x.Quorum + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *GetOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv11 := &x.Recursive + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(yyv11)) = r.DecodeBool() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Sort = false + } else { + yyv13 := &x.Sort + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*bool)(yyv13)) = r.DecodeBool() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Quorum = false + } else { + yyv15 := &x.Quorum + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj10-1, "") + } + r.ReadArrayEnd() +} + +func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(4) + } else { + r.WriteMapStart(4) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevValue")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevIndex")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Dir")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *DeleteOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "PrevValue": + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv4 := &x.PrevValue + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "PrevIndex": + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv6 := &x.PrevIndex + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*uint64)(yyv6)) = uint64(r.DecodeUint(64)) + } + } + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv8 := &x.Recursive + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + case "Dir": + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv10 := &x.Dir + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv13 := &x.PrevValue + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv15 := &x.PrevIndex + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*uint64)(yyv15)) = uint64(r.DecodeUint(64)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv17 := &x.Recursive + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*bool)(yyv17)) = r.DecodeBool() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv19 := &x.Dir + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj12-1, "") + } + r.ReadArrayEnd() +} + +func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(3) + } else { + r.WriteMapStart(3) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Action)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("action")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Action)) + } + } + var yyn6 bool + if x.Node == nil { + yyn6 = true + goto LABEL6 + } + LABEL6: + if yyr2 || yy2arr2 { + if yyn6 { + r.WriteArrayElem() + r.EncodeNil() + } else { + r.WriteArrayElem() + if x.Node == nil { + r.EncodeNil() + } else { + x.Node.CodecEncodeSelf(e) + } + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("node")) + r.WriteMapElemValue() + if yyn6 { + r.EncodeNil() + } else { + if x.Node == nil { + r.EncodeNil() + } else { + x.Node.CodecEncodeSelf(e) + } + } + } + var yyn9 bool + if x.PrevNode == nil { + yyn9 = true + goto LABEL9 + } + LABEL9: + if yyr2 || yy2arr2 { + if yyn9 { + r.WriteArrayElem() + r.EncodeNil() + } else { + r.WriteArrayElem() + if x.PrevNode == nil { + r.EncodeNil() + } else { + x.PrevNode.CodecEncodeSelf(e) + } + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("prevNode")) + r.WriteMapElemValue() + if yyn9 { + r.EncodeNil() + } else { + if x.PrevNode == nil { + r.EncodeNil() + } else { + x.PrevNode.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "action": + if r.TryDecodeAsNil() { + x.Action = "" + } else { + yyv4 := &x.Action + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "node": + if x.Node == nil { + x.Node = new(Node) + } + if r.TryDecodeAsNil() { + if x.Node != nil { + x.Node = nil + } + } else { + if x.Node == nil { + x.Node = new(Node) + } + x.Node.CodecDecodeSelf(d) + } + case "prevNode": + if x.PrevNode == nil { + x.PrevNode = new(Node) + } + if r.TryDecodeAsNil() { + if x.PrevNode != nil { + x.PrevNode = nil + } + } else { + if x.PrevNode == nil { + x.PrevNode = new(Node) + } + x.PrevNode.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Action = "" + } else { + yyv9 := &x.Action + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + if x.Node == nil { + x.Node = new(Node) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + if x.Node != nil { + x.Node = nil + } + } else { + if x.Node == nil { + x.Node = new(Node) + } + x.Node.CodecDecodeSelf(d) + } + if x.PrevNode == nil { + x.PrevNode = new(Node) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + if x.PrevNode != nil { + x.PrevNode = nil + } + } else { + if x.PrevNode == nil { + x.PrevNode = new(Node) + } + x.PrevNode.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj8-1, "") + } + r.ReadArrayEnd() +} + +func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _ = yyq2 + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Dir != false + yyq2[6] = x.Expiration != nil + yyq2[7] = x.TTL != 0 + if yyr2 || yy2arr2 { + r.WriteArrayStart(8) + } else { + var yynn2 = 5 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.WriteMapStart(yynn2) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("key")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[1] { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("dir")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("value")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + if x.Nodes == nil { + r.EncodeNil() + } else { + x.Nodes.CodecEncodeSelf(e) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("nodes")) + r.WriteMapElemValue() + if x.Nodes == nil { + r.EncodeNil() + } else { + x.Nodes.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeUint(uint64(x.CreatedIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("createdIndex")) + r.WriteMapElemValue() + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeUint(uint64(x.CreatedIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeUint(uint64(x.ModifiedIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("modifiedIndex")) + r.WriteMapElemValue() + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeUint(uint64(x.ModifiedIndex)) + } + } + var yyn21 bool + if x.Expiration == nil { + yyn21 = true + goto LABEL21 + } + LABEL21: + if yyr2 || yy2arr2 { + if yyn21 { + r.WriteArrayElem() + r.EncodeNil() + } else { + r.WriteArrayElem() + if yyq2[6] { + if x.Expiration == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else if yym23 := z.TimeRtidIfBinc(); yym23 != 0 { + r.EncodeBuiltin(yym23, x.Expiration) + } else if z.HasExtensions() && z.EncExt(x.Expiration) { + } else if yym22 { + z.EncBinaryMarshal(x.Expiration) + } else if !yym22 && z.IsJSONHandle() { + z.EncJSONMarshal(x.Expiration) + } else { + z.EncFallback(x.Expiration) + } + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[6] { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("expiration")) + r.WriteMapElemValue() + if yyn21 { + r.EncodeNil() + } else { + if x.Expiration == nil { + r.EncodeNil() + } else { + yym24 := z.EncBinary() + _ = yym24 + if false { + } else if yym25 := z.TimeRtidIfBinc(); yym25 != 0 { + r.EncodeBuiltin(yym25, x.Expiration) + } else if z.HasExtensions() && z.EncExt(x.Expiration) { + } else if yym24 { + z.EncBinaryMarshal(x.Expiration) + } else if !yym24 && z.IsJSONHandle() { + z.EncJSONMarshal(x.Expiration) + } else { + z.EncFallback(x.Expiration) + } + } + } + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + if yyq2[7] { + yym27 := z.EncBinary() + _ = yym27 + if false { + } else { + r.EncodeInt(int64(x.TTL)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[7] { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("ttl")) + r.WriteMapElemValue() + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeInt(int64(x.TTL)) + } + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv4 := &x.Key + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "dir": + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv6 := &x.Dir + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(yyv6)) = r.DecodeBool() + } + } + case "value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv8 := &x.Value + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "nodes": + if r.TryDecodeAsNil() { + x.Nodes = nil + } else { + yyv10 := &x.Nodes + yyv10.CodecDecodeSelf(d) + } + case "createdIndex": + if r.TryDecodeAsNil() { + x.CreatedIndex = 0 + } else { + yyv11 := &x.CreatedIndex + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*uint64)(yyv11)) = uint64(r.DecodeUint(64)) + } + } + case "modifiedIndex": + if r.TryDecodeAsNil() { + x.ModifiedIndex = 0 + } else { + yyv13 := &x.ModifiedIndex + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*uint64)(yyv13)) = uint64(r.DecodeUint(64)) + } + } + case "expiration": + if x.Expiration == nil { + x.Expiration = new(time.Time) + } + if r.TryDecodeAsNil() { + if x.Expiration != nil { + x.Expiration = nil + } + } else { + if x.Expiration == nil { + x.Expiration = new(time.Time) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if yym17 := z.TimeRtidIfBinc(); yym17 != 0 { + r.DecodeBuiltin(yym17, x.Expiration) + } else if z.HasExtensions() && z.DecExt(x.Expiration) { + } else if yym16 { + z.DecBinaryUnmarshal(x.Expiration) + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.Expiration) + } else { + z.DecFallback(x.Expiration, false) + } + } + case "ttl": + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv18 := &x.TTL + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*int64)(yyv18)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj20 int + var yyb20 bool + var yyhl20 bool = l >= 0 + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv21 := &x.Key + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv23 := &x.Dir + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*bool)(yyv23)) = r.DecodeBool() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv25 := &x.Value + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Nodes = nil + } else { + yyv27 := &x.Nodes + yyv27.CodecDecodeSelf(d) + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.CreatedIndex = 0 + } else { + yyv28 := &x.CreatedIndex + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*uint64)(yyv28)) = uint64(r.DecodeUint(64)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.ModifiedIndex = 0 + } else { + yyv30 := &x.ModifiedIndex + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*uint64)(yyv30)) = uint64(r.DecodeUint(64)) + } + } + if x.Expiration == nil { + x.Expiration = new(time.Time) + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + if x.Expiration != nil { + x.Expiration = nil + } + } else { + if x.Expiration == nil { + x.Expiration = new(time.Time) + } + yym33 := z.DecBinary() + _ = yym33 + if false { + } else if yym34 := z.TimeRtidIfBinc(); yym34 != 0 { + r.DecodeBuiltin(yym34, x.Expiration) + } else if z.HasExtensions() && z.DecExt(x.Expiration) { + } else if yym33 { + z.DecBinaryUnmarshal(x.Expiration) + } else if !yym33 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.Expiration) + } else { + z.DecFallback(x.Expiration, false) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv35 := &x.TTL + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*int64)(yyv35)) = int64(r.DecodeInt(64)) + } + } + for { + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj20-1, "") + } + r.ReadArrayEnd() +} + +func (x Nodes) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + h.encNodes((Nodes)(x), e) + } + } +} + +func (x *Nodes) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + h.decNodes((*Nodes)(x), d) + } +} + +func (x *httpKeysAPI) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(0) + } else { + r.WriteMapStart(0) + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *httpKeysAPI) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *httpKeysAPI) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *httpKeysAPI) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj4 int + var yyb4 bool + var yyhl4 bool = l >= 0 + for { + yyj4++ + if yyhl4 { + yyb4 = yyj4 > l + } else { + yyb4 = r.CheckBreak() + } + if yyb4 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj4-1, "") + } + r.ReadArrayEnd() +} + +func (x *httpWatcher) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(0) + } else { + r.WriteMapStart(0) + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *httpWatcher) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *httpWatcher) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *httpWatcher) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj4 int + var yyb4 bool + var yyhl4 bool = l >= 0 + for { + yyj4++ + if yyhl4 { + yyb4 = yyj4 > l + } else { + yyb4 = r.CheckBreak() + } + if yyb4 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj4-1, "") + } + r.ReadArrayEnd() +} + +func (x *getAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(5) + } else { + r.WriteMapStart(5) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Key")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Sorted)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Sorted")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Sorted)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Quorum)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Quorum")) + r.WriteMapElemValue() + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Quorum)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *getAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *getAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv4 := &x.Prefix + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv6 := &x.Key + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv8 := &x.Recursive + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + case "Sorted": + if r.TryDecodeAsNil() { + x.Sorted = false + } else { + yyv10 := &x.Sorted + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + case "Quorum": + if r.TryDecodeAsNil() { + x.Quorum = false + } else { + yyv12 := &x.Quorum + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *getAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv15 := &x.Prefix + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv17 := &x.Key + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv19 := &x.Recursive + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Sorted = false + } else { + yyv21 := &x.Sorted + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*bool)(yyv21)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Quorum = false + } else { + yyv23 := &x.Quorum + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*bool)(yyv23)) = r.DecodeBool() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj14-1, "") + } + r.ReadArrayEnd() +} + +func (x *waitAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(4) + } else { + r.WriteMapStart(4) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Key")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeUint(uint64(x.WaitIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("WaitIndex")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeUint(uint64(x.WaitIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *waitAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *waitAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv4 := &x.Prefix + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv6 := &x.Key + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "WaitIndex": + if r.TryDecodeAsNil() { + x.WaitIndex = 0 + } else { + yyv8 := &x.WaitIndex + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*uint64)(yyv8)) = uint64(r.DecodeUint(64)) + } + } + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv10 := &x.Recursive + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *waitAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv13 := &x.Prefix + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv15 := &x.Key + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.WaitIndex = 0 + } else { + yyv17 := &x.WaitIndex + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*uint64)(yyv17)) = uint64(r.DecodeUint(64)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv19 := &x.Recursive + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj12-1, "") + } + r.ReadArrayEnd() +} + +func (x *setAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(10) + } else { + r.WriteMapStart(10) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Key")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Value")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevValue")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevIndex")) + r.WriteMapElemValue() + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + x.PrevExist.CodecEncodeSelf(e) + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevExist")) + r.WriteMapElemValue() + x.PrevExist.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym22 := z.EncBinary() + _ = yym22 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("TTL")) + r.WriteMapElemValue() + yym23 := z.EncBinary() + _ = yym23 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeBool(bool(x.Refresh)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Refresh")) + r.WriteMapElemValue() + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeBool(bool(x.Refresh)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Dir")) + r.WriteMapElemValue() + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeBool(bool(x.NoValueOnSuccess)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("NoValueOnSuccess")) + r.WriteMapElemValue() + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeBool(bool(x.NoValueOnSuccess)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *setAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *setAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv4 := &x.Prefix + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv6 := &x.Key + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "Value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv8 := &x.Value + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "PrevValue": + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv10 := &x.PrevValue + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "PrevIndex": + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv12 := &x.PrevIndex + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*uint64)(yyv12)) = uint64(r.DecodeUint(64)) + } + } + case "PrevExist": + if r.TryDecodeAsNil() { + x.PrevExist = "" + } else { + yyv14 := &x.PrevExist + yyv14.CodecDecodeSelf(d) + } + case "TTL": + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv15 := &x.TTL + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(yyv15) { + } else { + *((*int64)(yyv15)) = int64(r.DecodeInt(64)) + } + } + case "Refresh": + if r.TryDecodeAsNil() { + x.Refresh = false + } else { + yyv17 := &x.Refresh + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*bool)(yyv17)) = r.DecodeBool() + } + } + case "Dir": + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv19 := &x.Dir + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + case "NoValueOnSuccess": + if r.TryDecodeAsNil() { + x.NoValueOnSuccess = false + } else { + yyv21 := &x.NoValueOnSuccess + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*bool)(yyv21)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *setAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj23 int + var yyb23 bool + var yyhl23 bool = l >= 0 + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv24 := &x.Prefix + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + *((*string)(yyv24)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv26 := &x.Key + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*string)(yyv26)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv28 := &x.Value + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*string)(yyv28)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv30 := &x.PrevValue + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*string)(yyv30)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv32 := &x.PrevIndex + yym33 := z.DecBinary() + _ = yym33 + if false { + } else { + *((*uint64)(yyv32)) = uint64(r.DecodeUint(64)) + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevExist = "" + } else { + yyv34 := &x.PrevExist + yyv34.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv35 := &x.TTL + yym36 := z.DecBinary() + _ = yym36 + if false { + } else if z.HasExtensions() && z.DecExt(yyv35) { + } else { + *((*int64)(yyv35)) = int64(r.DecodeInt(64)) + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Refresh = false + } else { + yyv37 := &x.Refresh + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*bool)(yyv37)) = r.DecodeBool() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv39 := &x.Dir + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*bool)(yyv39)) = r.DecodeBool() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.NoValueOnSuccess = false + } else { + yyv41 := &x.NoValueOnSuccess + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*bool)(yyv41)) = r.DecodeBool() + } + } + for { + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj23-1, "") + } + r.ReadArrayEnd() +} + +func (x *deleteAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(6) + } else { + r.WriteMapStart(6) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Key")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevValue")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevIndex")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Dir")) + r.WriteMapElemValue() + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.WriteMapElemValue() + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *deleteAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *deleteAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv4 := &x.Prefix + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv6 := &x.Key + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "PrevValue": + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv8 := &x.PrevValue + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "PrevIndex": + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv10 := &x.PrevIndex + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*uint64)(yyv10)) = uint64(r.DecodeUint(64)) + } + } + case "Dir": + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv12 := &x.Dir + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv14 := &x.Recursive + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *deleteAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv17 := &x.Prefix + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv19 := &x.Key + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv21 := &x.PrevValue + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv23 := &x.PrevIndex + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*uint64)(yyv23)) = uint64(r.DecodeUint(64)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv25 := &x.Dir + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*bool)(yyv25)) = r.DecodeBool() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv27 := &x.Recursive + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*bool)(yyv27)) = r.DecodeBool() + } + } + for { + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj16-1, "") + } + r.ReadArrayEnd() +} + +func (x *createInOrderAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(4) + } else { + r.WriteMapStart(4) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Dir)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Dir")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Dir)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Value")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("TTL")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *createInOrderAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *createInOrderAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv4 := &x.Prefix + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Dir": + if r.TryDecodeAsNil() { + x.Dir = "" + } else { + yyv6 := &x.Dir + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "Value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv8 := &x.Value + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "TTL": + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv10 := &x.TTL + yym11 := z.DecBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.DecExt(yyv10) { + } else { + *((*int64)(yyv10)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *createInOrderAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv13 := &x.Prefix + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Dir = "" + } else { + yyv15 := &x.Dir + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv17 := &x.Value + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv19 := &x.TTL + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else { + *((*int64)(yyv19)) = int64(r.DecodeInt(64)) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj12-1, "") + } + r.ReadArrayEnd() +} + +func (x codecSelfer7612) encNodes(v Nodes, e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.WriteArrayStart(len(v)) + for _, yyv1 := range v { + r.WriteArrayElem() + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + r.WriteArrayEnd() +} + +func (x codecSelfer7612) decNodes(v *Nodes, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []*Node{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else { + yyhl1 := yyl1 > 0 + var yyrl1 int + _ = yyrl1 + if yyhl1 { + if yyl1 > cap(yyv1) { + yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]*Node, yyrl1) + } + yyc1 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + } + var yyj1 int + // var yydn1 bool + for ; (yyhl1 && yyj1 < yyl1) || !(yyhl1 || r.CheckBreak()); yyj1++ { + if yyj1 == 0 && len(yyv1) == 0 { + if yyhl1 { + yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + } else { + yyrl1 = 8 + } + yyv1 = make([]*Node, yyrl1) + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + // yydn1 = r.TryDecodeAsNil() + + // if indefinite, etc, then expand the slice if necessary + var yydb1 bool + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, nil) + yyc1 = true + + } + if yydb1 { + z.DecSwallow() + } else { + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = Node{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(Node) + } + yyw2 := yyv1[yyj1] + yyw2.CodecDecodeSelf(d) + } + + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = make([]*Node, 0) + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } + +} diff --git a/vendor/github.com/coreos/etcd/client/keys.go b/vendor/github.com/coreos/etcd/client/keys.go new file mode 100644 index 00000000000..e8373b94507 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/keys.go @@ -0,0 +1,682 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +//go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/coreos/etcd/pkg/pathutil" + "github.com/ugorji/go/codec" +) + +const ( + ErrorCodeKeyNotFound = 100 + ErrorCodeTestFailed = 101 + ErrorCodeNotFile = 102 + ErrorCodeNotDir = 104 + ErrorCodeNodeExist = 105 + ErrorCodeRootROnly = 107 + ErrorCodeDirNotEmpty = 108 + ErrorCodeUnauthorized = 110 + + ErrorCodePrevValueRequired = 201 + ErrorCodeTTLNaN = 202 + ErrorCodeIndexNaN = 203 + ErrorCodeInvalidField = 209 + ErrorCodeInvalidForm = 210 + + ErrorCodeRaftInternal = 300 + ErrorCodeLeaderElect = 301 + + ErrorCodeWatcherCleared = 400 + ErrorCodeEventIndexCleared = 401 +) + +type Error struct { + Code int `json:"errorCode"` + Message string `json:"message"` + Cause string `json:"cause"` + Index uint64 `json:"index"` +} + +func (e Error) Error() string { + return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index) +} + +var ( + ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.") + ErrEmptyBody = errors.New("client: response body is empty") +) + +// PrevExistType is used to define an existence condition when setting +// or deleting Nodes. +type PrevExistType string + +const ( + PrevIgnore = PrevExistType("") + PrevExist = PrevExistType("true") + PrevNoExist = PrevExistType("false") +) + +var ( + defaultV2KeysPrefix = "/v2/keys" +) + +// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value +// API over HTTP. +func NewKeysAPI(c Client) KeysAPI { + return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix) +} + +// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller +// to provide a custom base URL path. This should only be used in +// very rare cases. +func NewKeysAPIWithPrefix(c Client, p string) KeysAPI { + return &httpKeysAPI{ + client: c, + prefix: p, + } +} + +type KeysAPI interface { + // Get retrieves a set of Nodes from etcd + Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) + + // Set assigns a new value to a Node identified by a given key. The caller + // may define a set of conditions in the SetOptions. If SetOptions.Dir=true + // then value is ignored. + Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error) + + // Delete removes a Node identified by the given key, optionally destroying + // all of its children as well. The caller may define a set of required + // conditions in an DeleteOptions object. + Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) + + // Create is an alias for Set w/ PrevExist=false + Create(ctx context.Context, key, value string) (*Response, error) + + // CreateInOrder is used to atomically create in-order keys within the given directory. + CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error) + + // Update is an alias for Set w/ PrevExist=true + Update(ctx context.Context, key, value string) (*Response, error) + + // Watcher builds a new Watcher targeted at a specific Node identified + // by the given key. The Watcher may be configured at creation time + // through a WatcherOptions object. The returned Watcher is designed + // to emit events that happen to a Node, and optionally to its children. + Watcher(key string, opts *WatcherOptions) Watcher +} + +type WatcherOptions struct { + // AfterIndex defines the index after-which the Watcher should + // start emitting events. For example, if a value of 5 is + // provided, the first event will have an index >= 6. + // + // Setting AfterIndex to 0 (default) means that the Watcher + // should start watching for events starting at the current + // index, whatever that may be. + AfterIndex uint64 + + // Recursive specifies whether or not the Watcher should emit + // events that occur in children of the given keyspace. If set + // to false (default), events will be limited to those that + // occur for the exact key. + Recursive bool +} + +type CreateInOrderOptions struct { + // TTL defines a period of time after-which the Node should + // expire and no longer exist. Values <= 0 are ignored. Given + // that the zero-value is ignored, TTL cannot be used to set + // a TTL of 0. + TTL time.Duration +} + +type SetOptions struct { + // PrevValue specifies what the current value of the Node must + // be in order for the Set operation to succeed. + // + // Leaving this field empty means that the caller wishes to + // ignore the current value of the Node. This cannot be used + // to compare the Node's current value to an empty string. + // + // PrevValue is ignored if Dir=true + PrevValue string + + // PrevIndex indicates what the current ModifiedIndex of the + // Node must be in order for the Set operation to succeed. + // + // If PrevIndex is set to 0 (default), no comparison is made. + PrevIndex uint64 + + // PrevExist specifies whether the Node must currently exist + // (PrevExist) or not (PrevNoExist). If the caller does not + // care about existence, set PrevExist to PrevIgnore, or simply + // leave it unset. + PrevExist PrevExistType + + // TTL defines a period of time after-which the Node should + // expire and no longer exist. Values <= 0 are ignored. Given + // that the zero-value is ignored, TTL cannot be used to set + // a TTL of 0. + TTL time.Duration + + // Refresh set to true means a TTL value can be updated + // without firing a watch or changing the node value. A + // value must not be provided when refreshing a key. + Refresh bool + + // Dir specifies whether or not this Node should be created as a directory. + Dir bool + + // NoValueOnSuccess specifies whether the response contains the current value of the Node. + // If set, the response will only contain the current value when the request fails. + NoValueOnSuccess bool +} + +type GetOptions struct { + // Recursive defines whether or not all children of the Node + // should be returned. + Recursive bool + + // Sort instructs the server whether or not to sort the Nodes. + // If true, the Nodes are sorted alphabetically by key in + // ascending order (A to z). If false (default), the Nodes will + // not be sorted and the ordering used should not be considered + // predictable. + Sort bool + + // Quorum specifies whether it gets the latest committed value that + // has been applied in quorum of members, which ensures external + // consistency (or linearizability). + Quorum bool +} + +type DeleteOptions struct { + // PrevValue specifies what the current value of the Node must + // be in order for the Delete operation to succeed. + // + // Leaving this field empty means that the caller wishes to + // ignore the current value of the Node. This cannot be used + // to compare the Node's current value to an empty string. + PrevValue string + + // PrevIndex indicates what the current ModifiedIndex of the + // Node must be in order for the Delete operation to succeed. + // + // If PrevIndex is set to 0 (default), no comparison is made. + PrevIndex uint64 + + // Recursive defines whether or not all children of the Node + // should be deleted. If set to true, all children of the Node + // identified by the given key will be deleted. If left unset + // or explicitly set to false, only a single Node will be + // deleted. + Recursive bool + + // Dir specifies whether or not this Node should be removed as a directory. + Dir bool +} + +type Watcher interface { + // Next blocks until an etcd event occurs, then returns a Response + // representing that event. The behavior of Next depends on the + // WatcherOptions used to construct the Watcher. Next is designed to + // be called repeatedly, each time blocking until a subsequent event + // is available. + // + // If the provided context is cancelled, Next will return a non-nil + // error. Any other failures encountered while waiting for the next + // event (connection issues, deserialization failures, etc) will + // also result in a non-nil error. + Next(context.Context) (*Response, error) +} + +type Response struct { + // Action is the name of the operation that occurred. Possible values + // include get, set, delete, update, create, compareAndSwap, + // compareAndDelete and expire. + Action string `json:"action"` + + // Node represents the state of the relevant etcd Node. + Node *Node `json:"node"` + + // PrevNode represents the previous state of the Node. PrevNode is non-nil + // only if the Node existed before the action occurred and the action + // caused a change to the Node. + PrevNode *Node `json:"prevNode"` + + // Index holds the cluster-level index at the time the Response was generated. + // This index is not tied to the Node(s) contained in this Response. + Index uint64 `json:"-"` + + // ClusterID holds the cluster-level ID reported by the server. This + // should be different for different etcd clusters. + ClusterID string `json:"-"` +} + +type Node struct { + // Key represents the unique location of this Node (e.g. "/foo/bar"). + Key string `json:"key"` + + // Dir reports whether node describes a directory. + Dir bool `json:"dir,omitempty"` + + // Value is the current data stored on this Node. If this Node + // is a directory, Value will be empty. + Value string `json:"value"` + + // Nodes holds the children of this Node, only if this Node is a directory. + // This slice of will be arbitrarily deep (children, grandchildren, great- + // grandchildren, etc.) if a recursive Get or Watch request were made. + Nodes Nodes `json:"nodes"` + + // CreatedIndex is the etcd index at-which this Node was created. + CreatedIndex uint64 `json:"createdIndex"` + + // ModifiedIndex is the etcd index at-which this Node was last modified. + ModifiedIndex uint64 `json:"modifiedIndex"` + + // Expiration is the server side expiration time of the key. + Expiration *time.Time `json:"expiration,omitempty"` + + // TTL is the time to live of the key in second. + TTL int64 `json:"ttl,omitempty"` +} + +func (n *Node) String() string { + return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL) +} + +// TTLDuration returns the Node's TTL as a time.Duration object +func (n *Node) TTLDuration() time.Duration { + return time.Duration(n.TTL) * time.Second +} + +type Nodes []*Node + +// interfaces for sorting + +func (ns Nodes) Len() int { return len(ns) } +func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key } +func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] } + +type httpKeysAPI struct { + client httpClient + prefix string +} + +func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) { + act := &setAction{ + Prefix: k.prefix, + Key: key, + Value: val, + } + + if opts != nil { + act.PrevValue = opts.PrevValue + act.PrevIndex = opts.PrevIndex + act.PrevExist = opts.PrevExist + act.TTL = opts.TTL + act.Refresh = opts.Refresh + act.Dir = opts.Dir + act.NoValueOnSuccess = opts.NoValueOnSuccess + } + + doCtx := ctx + if act.PrevExist == PrevNoExist { + doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue) + } + resp, body, err := k.client.Do(doCtx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) { + return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist}) +} + +func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) { + act := &createInOrderAction{ + Prefix: k.prefix, + Dir: dir, + Value: val, + } + + if opts != nil { + act.TTL = opts.TTL + } + + resp, body, err := k.client.Do(ctx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) { + return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist}) +} + +func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) { + act := &deleteAction{ + Prefix: k.prefix, + Key: key, + } + + if opts != nil { + act.PrevValue = opts.PrevValue + act.PrevIndex = opts.PrevIndex + act.Dir = opts.Dir + act.Recursive = opts.Recursive + } + + doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue) + resp, body, err := k.client.Do(doCtx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) { + act := &getAction{ + Prefix: k.prefix, + Key: key, + } + + if opts != nil { + act.Recursive = opts.Recursive + act.Sorted = opts.Sort + act.Quorum = opts.Quorum + } + + resp, body, err := k.client.Do(ctx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher { + act := waitAction{ + Prefix: k.prefix, + Key: key, + } + + if opts != nil { + act.Recursive = opts.Recursive + if opts.AfterIndex > 0 { + act.WaitIndex = opts.AfterIndex + 1 + } + } + + return &httpWatcher{ + client: k.client, + nextWait: act, + } +} + +type httpWatcher struct { + client httpClient + nextWait waitAction +} + +func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) { + for { + httpresp, body, err := hw.client.Do(ctx, &hw.nextWait) + if err != nil { + return nil, err + } + + resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body) + if err != nil { + if err == ErrEmptyBody { + continue + } + return nil, err + } + + hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1 + return resp, nil + } +} + +// v2KeysURL forms a URL representing the location of a key. +// The endpoint argument represents the base URL of an etcd +// server. The prefix is the path needed to route from the +// provided endpoint's path to the root of the keys API +// (typically "/v2/keys"). +func v2KeysURL(ep url.URL, prefix, key string) *url.URL { + // We concatenate all parts together manually. We cannot use + // path.Join because it does not reserve trailing slash. + // We call CanonicalURLPath to further cleanup the path. + if prefix != "" && prefix[0] != '/' { + prefix = "/" + prefix + } + if key != "" && key[0] != '/' { + key = "/" + key + } + ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key) + return &ep +} + +type getAction struct { + Prefix string + Key string + Recursive bool + Sorted bool + Quorum bool +} + +func (g *getAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, g.Prefix, g.Key) + + params := u.Query() + params.Set("recursive", strconv.FormatBool(g.Recursive)) + params.Set("sorted", strconv.FormatBool(g.Sorted)) + params.Set("quorum", strconv.FormatBool(g.Quorum)) + u.RawQuery = params.Encode() + + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +type waitAction struct { + Prefix string + Key string + WaitIndex uint64 + Recursive bool +} + +func (w *waitAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, w.Prefix, w.Key) + + params := u.Query() + params.Set("wait", "true") + params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10)) + params.Set("recursive", strconv.FormatBool(w.Recursive)) + u.RawQuery = params.Encode() + + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +type setAction struct { + Prefix string + Key string + Value string + PrevValue string + PrevIndex uint64 + PrevExist PrevExistType + TTL time.Duration + Refresh bool + Dir bool + NoValueOnSuccess bool +} + +func (a *setAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, a.Prefix, a.Key) + + params := u.Query() + form := url.Values{} + + // we're either creating a directory or setting a key + if a.Dir { + params.Set("dir", strconv.FormatBool(a.Dir)) + } else { + // These options are only valid for setting a key + if a.PrevValue != "" { + params.Set("prevValue", a.PrevValue) + } + form.Add("value", a.Value) + } + + // Options which apply to both setting a key and creating a dir + if a.PrevIndex != 0 { + params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) + } + if a.PrevExist != PrevIgnore { + params.Set("prevExist", string(a.PrevExist)) + } + if a.TTL > 0 { + form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) + } + + if a.Refresh { + form.Add("refresh", "true") + } + if a.NoValueOnSuccess { + params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess)) + } + + u.RawQuery = params.Encode() + body := strings.NewReader(form.Encode()) + + req, _ := http.NewRequest("PUT", u.String(), body) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + return req +} + +type deleteAction struct { + Prefix string + Key string + PrevValue string + PrevIndex uint64 + Dir bool + Recursive bool +} + +func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, a.Prefix, a.Key) + + params := u.Query() + if a.PrevValue != "" { + params.Set("prevValue", a.PrevValue) + } + if a.PrevIndex != 0 { + params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) + } + if a.Dir { + params.Set("dir", "true") + } + if a.Recursive { + params.Set("recursive", "true") + } + u.RawQuery = params.Encode() + + req, _ := http.NewRequest("DELETE", u.String(), nil) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + return req +} + +type createInOrderAction struct { + Prefix string + Dir string + Value string + TTL time.Duration +} + +func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, a.Prefix, a.Dir) + + form := url.Values{} + form.Add("value", a.Value) + if a.TTL > 0 { + form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) + } + body := strings.NewReader(form.Encode()) + + req, _ := http.NewRequest("POST", u.String(), body) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + return req +} + +func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) { + switch code { + case http.StatusOK, http.StatusCreated: + if len(body) == 0 { + return nil, ErrEmptyBody + } + res, err = unmarshalSuccessfulKeysResponse(header, body) + default: + err = unmarshalFailedKeysResponse(body) + } + + return +} + +func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) { + var res Response + err := codec.NewDecoderBytes(body, new(codec.JsonHandle)).Decode(&res) + if err != nil { + return nil, ErrInvalidJSON + } + if header.Get("X-Etcd-Index") != "" { + res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64) + if err != nil { + return nil, err + } + } + res.ClusterID = header.Get("X-Etcd-Cluster-ID") + return &res, nil +} + +func unmarshalFailedKeysResponse(body []byte) error { + var etcdErr Error + if err := json.Unmarshal(body, &etcdErr); err != nil { + return ErrInvalidJSON + } + return etcdErr +} diff --git a/vendor/github.com/coreos/etcd/client/members.go b/vendor/github.com/coreos/etcd/client/members.go new file mode 100644 index 00000000000..aafa3d1b870 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/members.go @@ -0,0 +1,303 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "path" + + "github.com/coreos/etcd/pkg/types" +) + +var ( + defaultV2MembersPrefix = "/v2/members" + defaultLeaderSuffix = "/leader" +) + +type Member struct { + // ID is the unique identifier of this Member. + ID string `json:"id"` + + // Name is a human-readable, non-unique identifier of this Member. + Name string `json:"name"` + + // PeerURLs represents the HTTP(S) endpoints this Member uses to + // participate in etcd's consensus protocol. + PeerURLs []string `json:"peerURLs"` + + // ClientURLs represents the HTTP(S) endpoints on which this Member + // serves its client-facing APIs. + ClientURLs []string `json:"clientURLs"` +} + +type memberCollection []Member + +func (c *memberCollection) UnmarshalJSON(data []byte) error { + d := struct { + Members []Member + }{} + + if err := json.Unmarshal(data, &d); err != nil { + return err + } + + if d.Members == nil { + *c = make([]Member, 0) + return nil + } + + *c = d.Members + return nil +} + +type memberCreateOrUpdateRequest struct { + PeerURLs types.URLs +} + +func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) { + s := struct { + PeerURLs []string `json:"peerURLs"` + }{ + PeerURLs: make([]string, len(m.PeerURLs)), + } + + for i, u := range m.PeerURLs { + s.PeerURLs[i] = u.String() + } + + return json.Marshal(&s) +} + +// NewMembersAPI constructs a new MembersAPI that uses HTTP to +// interact with etcd's membership API. +func NewMembersAPI(c Client) MembersAPI { + return &httpMembersAPI{ + client: c, + } +} + +type MembersAPI interface { + // List enumerates the current cluster membership. + List(ctx context.Context) ([]Member, error) + + // Add instructs etcd to accept a new Member into the cluster. + Add(ctx context.Context, peerURL string) (*Member, error) + + // Remove demotes an existing Member out of the cluster. + Remove(ctx context.Context, mID string) error + + // Update instructs etcd to update an existing Member in the cluster. + Update(ctx context.Context, mID string, peerURLs []string) error + + // Leader gets current leader of the cluster + Leader(ctx context.Context) (*Member, error) +} + +type httpMembersAPI struct { + client httpClient +} + +func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) { + req := &membersAPIActionList{} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return nil, err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + return nil, err + } + + var mCollection memberCollection + if err := json.Unmarshal(body, &mCollection); err != nil { + return nil, err + } + + return []Member(mCollection), nil +} + +func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) { + urls, err := types.NewURLs([]string{peerURL}) + if err != nil { + return nil, err + } + + req := &membersAPIActionAdd{peerURLs: urls} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return nil, err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusCreated { + var merr membersError + if err := json.Unmarshal(body, &merr); err != nil { + return nil, err + } + return nil, merr + } + + var memb Member + if err := json.Unmarshal(body, &memb); err != nil { + return nil, err + } + + return &memb, nil +} + +func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error { + urls, err := types.NewURLs(peerURLs) + if err != nil { + return err + } + + req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent { + var merr membersError + if err := json.Unmarshal(body, &merr); err != nil { + return err + } + return merr + } + + return nil +} + +func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error { + req := &membersAPIActionRemove{memberID: memberID} + resp, _, err := m.client.Do(ctx, req) + if err != nil { + return err + } + + return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone) +} + +func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) { + req := &membersAPIActionLeader{} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return nil, err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + return nil, err + } + + var leader Member + if err := json.Unmarshal(body, &leader); err != nil { + return nil, err + } + + return &leader, nil +} + +type membersAPIActionList struct{} + +func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +type membersAPIActionRemove struct { + memberID string +} + +func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + u.Path = path.Join(u.Path, d.memberID) + req, _ := http.NewRequest("DELETE", u.String(), nil) + return req +} + +type membersAPIActionAdd struct { + peerURLs types.URLs +} + +func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} + b, _ := json.Marshal(&m) + req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b)) + req.Header.Set("Content-Type", "application/json") + return req +} + +type membersAPIActionUpdate struct { + memberID string + peerURLs types.URLs +} + +func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} + u.Path = path.Join(u.Path, a.memberID) + b, _ := json.Marshal(&m) + req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b)) + req.Header.Set("Content-Type", "application/json") + return req +} + +func assertStatusCode(got int, want ...int) (err error) { + for _, w := range want { + if w == got { + return nil + } + } + return fmt.Errorf("unexpected status code %d", got) +} + +type membersAPIActionLeader struct{} + +func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + u.Path = path.Join(u.Path, defaultLeaderSuffix) + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +// v2MembersURL add the necessary path to the provided endpoint +// to route requests to the default v2 members API. +func v2MembersURL(ep url.URL) *url.URL { + ep.Path = path.Join(ep.Path, defaultV2MembersPrefix) + return &ep +} + +type membersError struct { + Message string `json:"message"` + Code int `json:"-"` +} + +func (e membersError) Error() string { + return e.Message +} diff --git a/vendor/github.com/coreos/etcd/client/util.go b/vendor/github.com/coreos/etcd/client/util.go new file mode 100644 index 00000000000..15a8babff4d --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/util.go @@ -0,0 +1,53 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "regexp" +) + +var ( + roleNotFoundRegExp *regexp.Regexp + userNotFoundRegExp *regexp.Regexp +) + +func init() { + roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.") + userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.") +} + +// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound. +func IsKeyNotFound(err error) bool { + if cErr, ok := err.(Error); ok { + return cErr.Code == ErrorCodeKeyNotFound + } + return false +} + +// IsRoleNotFound returns true if the error means role not found of v2 API. +func IsRoleNotFound(err error) bool { + if ae, ok := err.(authError); ok { + return roleNotFoundRegExp.MatchString(ae.Message) + } + return false +} + +// IsUserNotFound returns true if the error means user not found of v2 API. +func IsUserNotFound(err error) bool { + if ae, ok := err.(authError); ok { + return userNotFoundRegExp.MatchString(ae.Message) + } + return false +} diff --git a/vendor/github.com/coreos/etcd/clientv3/README.md b/vendor/github.com/coreos/etcd/clientv3/README.md new file mode 100644 index 00000000000..376bfba7614 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/README.md @@ -0,0 +1,85 @@ +# etcd/clientv3 + +[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3) + +`etcd/clientv3` is the official Go etcd client for v3. + +## Install + +```bash +go get github.com/coreos/etcd/clientv3 +``` + +## Get started + +Create client using `clientv3.New`: + +```go +cli, err := clientv3.New(clientv3.Config{ + Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, + DialTimeout: 5 * time.Second, +}) +if err != nil { + // handle error! +} +defer cli.Close() +``` + +etcd v3 uses [`gRPC`](http://www.grpc.io) for remote procedure calls. And `clientv3` uses +[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it. +If the client is not closed, the connection will have leaky goroutines. To specify client request timeout, +pass `context.WithTimeout` to APIs: + +```go +ctx, cancel := context.WithTimeout(context.Background(), timeout) +resp, err := cli.Put(ctx, "sample_key", "sample_value") +cancel() +if err != nil { + // handle error! +} +// use the response +``` + +etcd uses `cmd/vendor` directory to store external dependencies, which are +to be compiled into etcd release binaries. `client` can be imported without +vendoring. For full compatibility, it is recommended to vendor builds using +etcd's vendored packages, using tools like godep, as in +[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories). +For more detail, please read [Go vendor design](https://golang.org/s/go15vendor). + +## Error Handling + +etcd client returns 2 types of errors: + +1. context error: canceled or deadline exceeded. +2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes). + +Here is the example code to handle client errors: + +```go +resp, err := cli.Put(ctx, "", "") +if err != nil { + switch err { + case context.Canceled: + log.Fatalf("ctx is canceled by another routine: %v", err) + case context.DeadlineExceeded: + log.Fatalf("ctx is attached with a deadline is exceeded: %v", err) + case rpctypes.ErrEmptyKey: + log.Fatalf("client-side error: %v", err) + default: + log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err) + } +} +``` + +## Metrics + +The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/coreos/etcd/blob/master/clientv3/example_metrics_test.go). + +## Namespacing + +The [namespace](https://godoc.org/github.com/coreos/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix. + +## Examples + +More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3). diff --git a/vendor/github.com/coreos/etcd/clientv3/auth.go b/vendor/github.com/coreos/etcd/clientv3/auth.go new file mode 100644 index 00000000000..8df670f163a --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/auth.go @@ -0,0 +1,223 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "fmt" + "strings" + + "github.com/coreos/etcd/auth/authpb" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + "google.golang.org/grpc" +) + +type ( + AuthEnableResponse pb.AuthEnableResponse + AuthDisableResponse pb.AuthDisableResponse + AuthenticateResponse pb.AuthenticateResponse + AuthUserAddResponse pb.AuthUserAddResponse + AuthUserDeleteResponse pb.AuthUserDeleteResponse + AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse + AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse + AuthUserGetResponse pb.AuthUserGetResponse + AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse + AuthRoleAddResponse pb.AuthRoleAddResponse + AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse + AuthRoleGetResponse pb.AuthRoleGetResponse + AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse + AuthRoleDeleteResponse pb.AuthRoleDeleteResponse + AuthUserListResponse pb.AuthUserListResponse + AuthRoleListResponse pb.AuthRoleListResponse + + PermissionType authpb.Permission_Type + Permission authpb.Permission +) + +const ( + PermRead = authpb.READ + PermWrite = authpb.WRITE + PermReadWrite = authpb.READWRITE +) + +type Auth interface { + // AuthEnable enables auth of an etcd cluster. + AuthEnable(ctx context.Context) (*AuthEnableResponse, error) + + // AuthDisable disables auth of an etcd cluster. + AuthDisable(ctx context.Context) (*AuthDisableResponse, error) + + // UserAdd adds a new user to an etcd cluster. + UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) + + // UserDelete deletes a user from an etcd cluster. + UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) + + // UserChangePassword changes a password of a user. + UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) + + // UserGrantRole grants a role to a user. + UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) + + // UserGet gets a detailed information of a user. + UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) + + // UserList gets a list of all users. + UserList(ctx context.Context) (*AuthUserListResponse, error) + + // UserRevokeRole revokes a role of a user. + UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) + + // RoleAdd adds a new role to an etcd cluster. + RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) + + // RoleGrantPermission grants a permission to a role. + RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) + + // RoleGet gets a detailed information of a role. + RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) + + // RoleList gets a list of all roles. + RoleList(ctx context.Context) (*AuthRoleListResponse, error) + + // RoleRevokePermission revokes a permission from a role. + RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) + + // RoleDelete deletes a role. + RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) +} + +type auth struct { + remote pb.AuthClient +} + +func NewAuth(c *Client) Auth { + return &auth{remote: RetryAuthClient(c)} +} + +func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { + resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}) + return (*AuthEnableResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { + resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}) + return (*AuthDisableResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { + resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}) + return (*AuthUserAddResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { + resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}) + return (*AuthUserDeleteResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { + resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}) + return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { + resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}) + return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { + resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}) + return (*AuthUserGetResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) { + resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}) + return (*AuthUserListResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { + resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}) + return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { + resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}) + return (*AuthRoleAddResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) { + perm := &authpb.Permission{ + Key: []byte(key), + RangeEnd: []byte(rangeEnd), + PermType: authpb.Permission_Type(permType), + } + resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}) + return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { + resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}) + return (*AuthRoleGetResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { + resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}) + return (*AuthRoleListResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { + resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}) + return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err) +} + +func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { + resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}) + return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err) +} + +func StrToPermissionType(s string) (PermissionType, error) { + val, ok := authpb.Permission_Type_value[strings.ToUpper(s)] + if ok { + return PermissionType(val), nil + } + return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s) +} + +type authenticator struct { + conn *grpc.ClientConn // conn in-use + remote pb.AuthClient +} + +func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) { + resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}) + return (*AuthenticateResponse)(resp), toErr(ctx, err) +} + +func (auth *authenticator) close() { + auth.conn.Close() +} + +func newAuthenticator(endpoint string, opts []grpc.DialOption) (*authenticator, error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return nil, err + } + + return &authenticator{ + conn: conn, + remote: pb.NewAuthClient(conn), + }, nil +} diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer.go b/vendor/github.com/coreos/etcd/clientv3/balancer.go new file mode 100644 index 00000000000..2c8c2981d30 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/balancer.go @@ -0,0 +1,438 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "net/url" + "strings" + "sync" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// ErrNoAddrAvilable is returned by Get() when the balancer does not have +// any active connection to endpoints at the time. +// This error is returned only when opts.BlockingWait is true. +var ErrNoAddrAvilable = grpc.Errorf(codes.Unavailable, "there is no address available") + +type notifyMsg int + +const ( + notifyReset notifyMsg = iota + notifyNext +) + +// simpleBalancer does the bare minimum to expose multiple eps +// to the grpc reconnection code path +type simpleBalancer struct { + // addrs are the client's endpoint addresses for grpc + addrs []grpc.Address + + // eps holds the raw endpoints from the client + eps []string + + // notifyCh notifies grpc of the set of addresses for connecting + notifyCh chan []grpc.Address + + // readyc closes once the first connection is up + readyc chan struct{} + readyOnce sync.Once + + // mu protects all fields below. + mu sync.RWMutex + + // upc closes when pinAddr transitions from empty to non-empty or the balancer closes. + upc chan struct{} + + // downc closes when grpc calls down() on pinAddr + downc chan struct{} + + // stopc is closed to signal updateNotifyLoop should stop. + stopc chan struct{} + + // donec closes when all goroutines are exited + donec chan struct{} + + // updateAddrsC notifies updateNotifyLoop to update addrs. + updateAddrsC chan notifyMsg + + // grpc issues TLS cert checks using the string passed into dial so + // that string must be the host. To recover the full scheme://host URL, + // have a map from hosts to the original endpoint. + hostPort2ep map[string]string + + // pinAddr is the currently pinned address; set to the empty string on + // initialization and shutdown. + pinAddr string + + closed bool +} + +func newSimpleBalancer(eps []string) *simpleBalancer { + notifyCh := make(chan []grpc.Address) + addrs := eps2addrs(eps) + sb := &simpleBalancer{ + addrs: addrs, + eps: eps, + notifyCh: notifyCh, + readyc: make(chan struct{}), + upc: make(chan struct{}), + stopc: make(chan struct{}), + downc: make(chan struct{}), + donec: make(chan struct{}), + updateAddrsC: make(chan notifyMsg), + hostPort2ep: getHostPort2ep(eps), + } + close(sb.downc) + go sb.updateNotifyLoop() + return sb +} + +func (b *simpleBalancer) Start(target string, config grpc.BalancerConfig) error { return nil } + +func (b *simpleBalancer) ConnectNotify() <-chan struct{} { + b.mu.Lock() + defer b.mu.Unlock() + return b.upc +} + +func (b *simpleBalancer) ready() <-chan struct{} { return b.readyc } + +func (b *simpleBalancer) endpoint(hostPort string) string { + b.mu.Lock() + defer b.mu.Unlock() + return b.hostPort2ep[hostPort] +} + +func (b *simpleBalancer) endpoints() []string { + b.mu.RLock() + defer b.mu.RUnlock() + return b.eps +} + +func (b *simpleBalancer) pinned() string { + b.mu.RLock() + defer b.mu.RUnlock() + return b.pinAddr +} + +func getHostPort2ep(eps []string) map[string]string { + hm := make(map[string]string, len(eps)) + for i := range eps { + _, host, _ := parseEndpoint(eps[i]) + hm[host] = eps[i] + } + return hm +} + +func (b *simpleBalancer) updateAddrs(eps ...string) { + np := getHostPort2ep(eps) + + b.mu.Lock() + + match := len(np) == len(b.hostPort2ep) + for k, v := range np { + if b.hostPort2ep[k] != v { + match = false + break + } + } + if match { + // same endpoints, so no need to update address + b.mu.Unlock() + return + } + + b.hostPort2ep = np + b.addrs, b.eps = eps2addrs(eps), eps + + // updating notifyCh can trigger new connections, + // only update addrs if all connections are down + // or addrs does not include pinAddr. + update := !hasAddr(b.addrs, b.pinAddr) + b.mu.Unlock() + + if update { + select { + case b.updateAddrsC <- notifyNext: + case <-b.stopc: + } + } +} + +func (b *simpleBalancer) next() { + b.mu.RLock() + downc := b.downc + b.mu.RUnlock() + select { + case b.updateAddrsC <- notifyNext: + case <-b.stopc: + } + // wait until disconnect so new RPCs are not issued on old connection + select { + case <-downc: + case <-b.stopc: + } +} + +func hasAddr(addrs []grpc.Address, targetAddr string) bool { + for _, addr := range addrs { + if targetAddr == addr.Addr { + return true + } + } + return false +} + +func (b *simpleBalancer) updateNotifyLoop() { + defer close(b.donec) + + for { + b.mu.RLock() + upc, downc, addr := b.upc, b.downc, b.pinAddr + b.mu.RUnlock() + // downc or upc should be closed + select { + case <-downc: + downc = nil + default: + } + select { + case <-upc: + upc = nil + default: + } + switch { + case downc == nil && upc == nil: + // stale + select { + case <-b.stopc: + return + default: + } + case downc == nil: + b.notifyAddrs(notifyReset) + select { + case <-upc: + case msg := <-b.updateAddrsC: + b.notifyAddrs(msg) + case <-b.stopc: + return + } + case upc == nil: + select { + // close connections that are not the pinned address + case b.notifyCh <- []grpc.Address{{Addr: addr}}: + case <-downc: + case <-b.stopc: + return + } + select { + case <-downc: + b.notifyAddrs(notifyReset) + case msg := <-b.updateAddrsC: + b.notifyAddrs(msg) + case <-b.stopc: + return + } + } + } +} + +func (b *simpleBalancer) notifyAddrs(msg notifyMsg) { + if msg == notifyNext { + select { + case b.notifyCh <- []grpc.Address{}: + case <-b.stopc: + return + } + } + b.mu.RLock() + addrs := b.addrs + pinAddr := b.pinAddr + downc := b.downc + b.mu.RUnlock() + + var waitDown bool + if pinAddr != "" { + waitDown = true + for _, a := range addrs { + if a.Addr == pinAddr { + waitDown = false + } + } + } + + select { + case b.notifyCh <- addrs: + if waitDown { + select { + case <-downc: + case <-b.stopc: + } + } + case <-b.stopc: + } +} + +func (b *simpleBalancer) Up(addr grpc.Address) func(error) { + f, _ := b.up(addr) + return f +} + +func (b *simpleBalancer) up(addr grpc.Address) (func(error), bool) { + b.mu.Lock() + defer b.mu.Unlock() + + // gRPC might call Up after it called Close. We add this check + // to "fix" it up at application layer. Otherwise, will panic + // if b.upc is already closed. + if b.closed { + return func(err error) {}, false + } + // gRPC might call Up on a stale address. + // Prevent updating pinAddr with a stale address. + if !hasAddr(b.addrs, addr.Addr) { + return func(err error) {}, false + } + if b.pinAddr != "" { + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr) + } + return func(err error) {}, false + } + // notify waiting Get()s and pin first connected address + close(b.upc) + b.downc = make(chan struct{}) + b.pinAddr = addr.Addr + if logger.V(4) { + logger.Infof("clientv3/balancer: pin %q", addr.Addr) + } + // notify client that a connection is up + b.readyOnce.Do(func() { close(b.readyc) }) + return func(err error) { + b.mu.Lock() + b.upc = make(chan struct{}) + close(b.downc) + b.pinAddr = "" + b.mu.Unlock() + if logger.V(4) { + logger.Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error()) + } + }, true +} + +func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) { + var ( + addr string + closed bool + ) + + // If opts.BlockingWait is false (for fail-fast RPCs), it should return + // an address it has notified via Notify immediately instead of blocking. + if !opts.BlockingWait { + b.mu.RLock() + closed = b.closed + addr = b.pinAddr + b.mu.RUnlock() + if closed { + return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing + } + if addr == "" { + return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable + } + return grpc.Address{Addr: addr}, func() {}, nil + } + + for { + b.mu.RLock() + ch := b.upc + b.mu.RUnlock() + select { + case <-ch: + case <-b.donec: + return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing + case <-ctx.Done(): + return grpc.Address{Addr: ""}, nil, ctx.Err() + } + b.mu.RLock() + closed = b.closed + addr = b.pinAddr + b.mu.RUnlock() + // Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed. + if closed { + return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing + } + if addr != "" { + break + } + } + return grpc.Address{Addr: addr}, func() {}, nil +} + +func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.notifyCh } + +func (b *simpleBalancer) Close() error { + b.mu.Lock() + // In case gRPC calls close twice. TODO: remove the checking + // when we are sure that gRPC wont call close twice. + if b.closed { + b.mu.Unlock() + <-b.donec + return nil + } + b.closed = true + close(b.stopc) + b.pinAddr = "" + + // In the case of following scenario: + // 1. upc is not closed; no pinned address + // 2. client issues an RPC, calling invoke(), which calls Get(), enters for loop, blocks + // 3. client.conn.Close() calls balancer.Close(); closed = true + // 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled + // we must close upc so Get() exits from blocking on upc + select { + case <-b.upc: + default: + // terminate all waiting Get()s + close(b.upc) + } + + b.mu.Unlock() + + // wait for updateNotifyLoop to finish + <-b.donec + close(b.notifyCh) + + return nil +} + +func getHost(ep string) string { + url, uerr := url.Parse(ep) + if uerr != nil || !strings.Contains(ep, "://") { + return ep + } + return url.Host +} + +func eps2addrs(eps []string) []grpc.Address { + addrs := make([]grpc.Address, len(eps)) + for i := range eps { + addrs[i].Addr = getHost(eps[i]) + } + return addrs +} diff --git a/vendor/github.com/coreos/etcd/clientv3/client.go b/vendor/github.com/coreos/etcd/clientv3/client.go new file mode 100644 index 00000000000..bff7d7cc638 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/client.go @@ -0,0 +1,528 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +var ( + ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints") + ErrOldCluster = errors.New("etcdclient: old cluster version") +) + +// Client provides and manages an etcd v3 client session. +type Client struct { + Cluster + KV + Lease + Watcher + Auth + Maintenance + + conn *grpc.ClientConn + dialerrc chan error + + cfg Config + creds *credentials.TransportCredentials + balancer *healthBalancer + mu sync.Mutex + + ctx context.Context + cancel context.CancelFunc + + // Username is a user name for authentication. + Username string + // Password is a password for authentication. + Password string + // tokenCred is an instance of WithPerRPCCredentials()'s argument + tokenCred *authTokenCredential +} + +// New creates a new etcdv3 client from a given configuration. +func New(cfg Config) (*Client, error) { + if len(cfg.Endpoints) == 0 { + return nil, ErrNoAvailableEndpoints + } + + return newClient(&cfg) +} + +// NewCtxClient creates a client with a context but no underlying grpc +// connection. This is useful for embedded cases that override the +// service interface implementations and do not need connection management. +func NewCtxClient(ctx context.Context) *Client { + cctx, cancel := context.WithCancel(ctx) + return &Client{ctx: cctx, cancel: cancel} +} + +// NewFromURL creates a new etcdv3 client from a URL. +func NewFromURL(url string) (*Client, error) { + return New(Config{Endpoints: []string{url}}) +} + +// Close shuts down the client's etcd connections. +func (c *Client) Close() error { + c.cancel() + c.Watcher.Close() + c.Lease.Close() + if c.conn != nil { + return toErr(c.ctx, c.conn.Close()) + } + return c.ctx.Err() +} + +// Ctx is a context for "out of band" messages (e.g., for sending +// "clean up" message when another context is canceled). It is +// canceled on client Close(). +func (c *Client) Ctx() context.Context { return c.ctx } + +// Endpoints lists the registered endpoints for the client. +func (c *Client) Endpoints() (eps []string) { + // copy the slice; protect original endpoints from being changed + eps = make([]string, len(c.cfg.Endpoints)) + copy(eps, c.cfg.Endpoints) + return +} + +// SetEndpoints updates client's endpoints. +func (c *Client) SetEndpoints(eps ...string) { + c.mu.Lock() + c.cfg.Endpoints = eps + c.mu.Unlock() + c.balancer.updateAddrs(eps...) +} + +// Sync synchronizes client's endpoints with the known endpoints from the etcd membership. +func (c *Client) Sync(ctx context.Context) error { + mresp, err := c.MemberList(ctx) + if err != nil { + return err + } + var eps []string + for _, m := range mresp.Members { + eps = append(eps, m.ClientURLs...) + } + c.SetEndpoints(eps...) + return nil +} + +func (c *Client) autoSync() { + if c.cfg.AutoSyncInterval == time.Duration(0) { + return + } + + for { + select { + case <-c.ctx.Done(): + return + case <-time.After(c.cfg.AutoSyncInterval): + ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second) + err := c.Sync(ctx) + cancel() + if err != nil && err != c.ctx.Err() { + logger.Println("Auto sync endpoints failed:", err) + } + } + } +} + +type authTokenCredential struct { + token string + tokenMu *sync.RWMutex +} + +func (cred authTokenCredential) RequireTransportSecurity() bool { + return false +} + +func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) { + cred.tokenMu.RLock() + defer cred.tokenMu.RUnlock() + return map[string]string{ + "token": cred.token, + }, nil +} + +func parseEndpoint(endpoint string) (proto string, host string, scheme string) { + proto = "tcp" + host = endpoint + url, uerr := url.Parse(endpoint) + if uerr != nil || !strings.Contains(endpoint, "://") { + return + } + scheme = url.Scheme + + // strip scheme:// prefix since grpc dials by host + host = url.Host + switch url.Scheme { + case "http", "https": + case "unix", "unixs": + proto = "unix" + host = url.Host + url.Path + default: + proto, host = "", "" + } + return +} + +func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) { + creds = c.creds + switch scheme { + case "unix": + case "http": + creds = nil + case "https", "unixs": + if creds != nil { + break + } + tlsconfig := &tls.Config{} + emptyCreds := credentials.NewTLS(tlsconfig) + creds = &emptyCreds + default: + creds = nil + } + return +} + +// dialSetupOpts gives the dial opts prior to any authentication +func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts []grpc.DialOption) { + if c.cfg.DialTimeout > 0 { + opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)} + } + if c.cfg.DialKeepAliveTime > 0 { + params := keepalive.ClientParameters{ + Time: c.cfg.DialKeepAliveTime, + Timeout: c.cfg.DialKeepAliveTimeout, + } + opts = append(opts, grpc.WithKeepaliveParams(params)) + } + opts = append(opts, dopts...) + + f := func(host string, t time.Duration) (net.Conn, error) { + proto, host, _ := parseEndpoint(c.balancer.endpoint(host)) + if host == "" && endpoint != "" { + // dialing an endpoint not in the balancer; use + // endpoint passed into dial + proto, host, _ = parseEndpoint(endpoint) + } + if proto == "" { + return nil, fmt.Errorf("unknown scheme for %q", host) + } + select { + case <-c.ctx.Done(): + return nil, c.ctx.Err() + default: + } + dialer := &net.Dialer{Timeout: t} + conn, err := dialer.DialContext(c.ctx, proto, host) + if err != nil { + select { + case c.dialerrc <- err: + default: + } + } + return conn, err + } + opts = append(opts, grpc.WithDialer(f)) + + creds := c.creds + if _, _, scheme := parseEndpoint(endpoint); len(scheme) != 0 { + creds = c.processCreds(scheme) + } + if creds != nil { + opts = append(opts, grpc.WithTransportCredentials(*creds)) + } else { + opts = append(opts, grpc.WithInsecure()) + } + + return opts +} + +// Dial connects to a single endpoint using the client's config. +func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) { + return c.dial(endpoint) +} + +func (c *Client) getToken(ctx context.Context) error { + var err error // return last error in a case of fail + var auth *authenticator + + for i := 0; i < len(c.cfg.Endpoints); i++ { + endpoint := c.cfg.Endpoints[i] + host := getHost(endpoint) + // use dial options without dopts to avoid reusing the client balancer + auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint)) + if err != nil { + continue + } + defer auth.close() + + var resp *AuthenticateResponse + resp, err = auth.authenticate(ctx, c.Username, c.Password) + if err != nil { + continue + } + + c.tokenCred.tokenMu.Lock() + c.tokenCred.token = resp.Token + c.tokenCred.tokenMu.Unlock() + + return nil + } + + return err +} + +func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) { + opts := c.dialSetupOpts(endpoint, dopts...) + host := getHost(endpoint) + if c.Username != "" && c.Password != "" { + c.tokenCred = &authTokenCredential{ + tokenMu: &sync.RWMutex{}, + } + + ctx := c.ctx + if c.cfg.DialTimeout > 0 { + cctx, cancel := context.WithTimeout(ctx, c.cfg.DialTimeout) + defer cancel() + ctx = cctx + } + + err := c.getToken(ctx) + if err != nil { + if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled { + if err == ctx.Err() && ctx.Err() != c.ctx.Err() { + err = context.DeadlineExceeded + } + return nil, err + } + } else { + opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred)) + } + } + + opts = append(opts, c.cfg.DialOptions...) + + conn, err := grpc.DialContext(c.ctx, host, opts...) + if err != nil { + return nil, err + } + return conn, nil +} + +// WithRequireLeader requires client requests to only succeed +// when the cluster has a leader. +func WithRequireLeader(ctx context.Context) context.Context { + md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) + return metadata.NewOutgoingContext(ctx, md) +} + +func newClient(cfg *Config) (*Client, error) { + if cfg == nil { + cfg = &Config{} + } + var creds *credentials.TransportCredentials + if cfg.TLS != nil { + c := credentials.NewTLS(cfg.TLS) + creds = &c + } + + // use a temporary skeleton client to bootstrap first connection + baseCtx := context.TODO() + if cfg.Context != nil { + baseCtx = cfg.Context + } + + ctx, cancel := context.WithCancel(baseCtx) + client := &Client{ + conn: nil, + dialerrc: make(chan error, 1), + cfg: *cfg, + creds: creds, + ctx: ctx, + cancel: cancel, + } + if cfg.Username != "" && cfg.Password != "" { + client.Username = cfg.Username + client.Password = cfg.Password + } + + sb := newSimpleBalancer(cfg.Endpoints) + hc := func(ep string) (bool, error) { return grpcHealthCheck(client, ep) } + client.balancer = newHealthBalancer(sb, cfg.DialTimeout, hc) + + // use Endpoints[0] so that for https:// without any tls config given, then + // grpc will assume the certificate server name is the endpoint host. + conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer)) + if err != nil { + client.cancel() + client.balancer.Close() + return nil, err + } + client.conn = conn + + // wait for a connection + if cfg.DialTimeout > 0 { + hasConn := false + waitc := time.After(cfg.DialTimeout) + select { + case <-client.balancer.ready(): + hasConn = true + case <-ctx.Done(): + case <-waitc: + } + if !hasConn { + err := context.DeadlineExceeded + select { + case err = <-client.dialerrc: + default: + } + client.cancel() + client.balancer.Close() + conn.Close() + return nil, err + } + } + + client.Cluster = NewCluster(client) + client.KV = NewKV(client) + client.Lease = NewLease(client) + client.Watcher = NewWatcher(client) + client.Auth = NewAuth(client) + client.Maintenance = NewMaintenance(client) + + if cfg.RejectOldCluster { + if err := client.checkVersion(); err != nil { + client.Close() + return nil, err + } + } + + go client.autoSync() + return client, nil +} + +func (c *Client) checkVersion() (err error) { + var wg sync.WaitGroup + errc := make(chan error, len(c.cfg.Endpoints)) + ctx, cancel := context.WithCancel(c.ctx) + if c.cfg.DialTimeout > 0 { + ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout) + } + wg.Add(len(c.cfg.Endpoints)) + for _, ep := range c.cfg.Endpoints { + // if cluster is current, any endpoint gives a recent version + go func(e string) { + defer wg.Done() + resp, rerr := c.Status(ctx, e) + if rerr != nil { + errc <- rerr + return + } + vs := strings.Split(resp.Version, ".") + maj, min := 0, 0 + if len(vs) >= 2 { + maj, _ = strconv.Atoi(vs[0]) + min, rerr = strconv.Atoi(vs[1]) + } + if maj < 3 || (maj == 3 && min < 2) { + rerr = ErrOldCluster + } + errc <- rerr + }(ep) + } + // wait for success + for i := 0; i < len(c.cfg.Endpoints); i++ { + if err = <-errc; err == nil { + break + } + } + cancel() + wg.Wait() + return err +} + +// ActiveConnection returns the current in-use connection +func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn } + +// isHaltErr returns true if the given error and context indicate no forward +// progress can be made, even after reconnecting. +func isHaltErr(ctx context.Context, err error) bool { + if ctx != nil && ctx.Err() != nil { + return true + } + if err == nil { + return false + } + ev, _ := status.FromError(err) + // Unavailable codes mean the system will be right back. + // (e.g., can't connect, lost leader) + // Treat Internal codes as if something failed, leaving the + // system in an inconsistent state, but retrying could make progress. + // (e.g., failed in middle of send, corrupted frame) + // TODO: are permanent Internal errors possible from grpc? + return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal +} + +func toErr(ctx context.Context, err error) error { + if err == nil { + return nil + } + err = rpctypes.Error(err) + if _, ok := err.(rpctypes.EtcdError); ok { + return err + } + ev, _ := status.FromError(err) + code := ev.Code() + switch code { + case codes.DeadlineExceeded: + fallthrough + case codes.Canceled: + if ctx.Err() != nil { + err = ctx.Err() + } + case codes.Unavailable: + case codes.FailedPrecondition: + err = grpc.ErrClientConnClosing + } + return err +} + +func canceledByCaller(stopCtx context.Context, err error) bool { + if stopCtx.Err() == nil || err == nil { + return false + } + + return err == context.Canceled || err == context.DeadlineExceeded +} diff --git a/vendor/github.com/coreos/etcd/clientv3/cluster.go b/vendor/github.com/coreos/etcd/clientv3/cluster.go new file mode 100644 index 00000000000..8beba58a67b --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/cluster.go @@ -0,0 +1,92 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" +) + +type ( + Member pb.Member + MemberListResponse pb.MemberListResponse + MemberAddResponse pb.MemberAddResponse + MemberRemoveResponse pb.MemberRemoveResponse + MemberUpdateResponse pb.MemberUpdateResponse +) + +type Cluster interface { + // MemberList lists the current cluster membership. + MemberList(ctx context.Context) (*MemberListResponse, error) + + // MemberAdd adds a new member into the cluster. + MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) + + // MemberRemove removes an existing member from the cluster. + MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) + + // MemberUpdate updates the peer addresses of the member. + MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) +} + +type cluster struct { + remote pb.ClusterClient +} + +func NewCluster(c *Client) Cluster { + return &cluster{remote: RetryClusterClient(c)} +} + +func NewClusterFromClusterClient(remote pb.ClusterClient) Cluster { + return &cluster{remote: remote} +} + +func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { + r := &pb.MemberAddRequest{PeerURLs: peerAddrs} + resp, err := c.remote.MemberAdd(ctx, r) + if err != nil { + return nil, toErr(ctx, err) + } + return (*MemberAddResponse)(resp), nil +} + +func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) { + r := &pb.MemberRemoveRequest{ID: id} + resp, err := c.remote.MemberRemove(ctx, r) + if err != nil { + return nil, toErr(ctx, err) + } + return (*MemberRemoveResponse)(resp), nil +} + +func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) { + // it is safe to retry on update. + r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} + resp, err := c.remote.MemberUpdate(ctx, r) + if err == nil { + return (*MemberUpdateResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { + // it is safe to retry on list. + resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}) + if err == nil { + return (*MemberListResponse)(resp), nil + } + return nil, toErr(ctx, err) +} diff --git a/vendor/github.com/coreos/etcd/clientv3/compact_op.go b/vendor/github.com/coreos/etcd/clientv3/compact_op.go new file mode 100644 index 00000000000..41e80c1da5d --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/compact_op.go @@ -0,0 +1,51 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" +) + +// CompactOp represents a compact operation. +type CompactOp struct { + revision int64 + physical bool +} + +// CompactOption configures compact operation. +type CompactOption func(*CompactOp) + +func (op *CompactOp) applyCompactOpts(opts []CompactOption) { + for _, opt := range opts { + opt(op) + } +} + +// OpCompact wraps slice CompactOption to create a CompactOp. +func OpCompact(rev int64, opts ...CompactOption) CompactOp { + ret := CompactOp{revision: rev} + ret.applyCompactOpts(opts) + return ret +} + +func (op CompactOp) toRequest() *pb.CompactionRequest { + return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical} +} + +// WithCompactPhysical makes Compact wait until all compacted entries are +// removed from the etcd server's storage. +func WithCompactPhysical() CompactOption { + return func(op *CompactOp) { op.physical = true } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/compare.go b/vendor/github.com/coreos/etcd/clientv3/compare.go new file mode 100644 index 00000000000..b5f0a255279 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/compare.go @@ -0,0 +1,140 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" +) + +type CompareTarget int +type CompareResult int + +const ( + CompareVersion CompareTarget = iota + CompareCreated + CompareModified + CompareValue +) + +type Cmp pb.Compare + +func Compare(cmp Cmp, result string, v interface{}) Cmp { + var r pb.Compare_CompareResult + + switch result { + case "=": + r = pb.Compare_EQUAL + case "!=": + r = pb.Compare_NOT_EQUAL + case ">": + r = pb.Compare_GREATER + case "<": + r = pb.Compare_LESS + default: + panic("Unknown result op") + } + + cmp.Result = r + switch cmp.Target { + case pb.Compare_VALUE: + val, ok := v.(string) + if !ok { + panic("bad compare value") + } + cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)} + case pb.Compare_VERSION: + cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)} + case pb.Compare_CREATE: + cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)} + case pb.Compare_MOD: + cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)} + case pb.Compare_LEASE: + cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)} + default: + panic("Unknown compare type") + } + return cmp +} + +func Value(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_VALUE} +} + +func Version(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_VERSION} +} + +func CreateRevision(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_CREATE} +} + +func ModRevision(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_MOD} +} + +// LeaseValue compares a key's LeaseID to a value of your choosing. The empty +// LeaseID is 0, otherwise known as `NoLease`. +func LeaseValue(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_LEASE} +} + +// KeyBytes returns the byte slice holding with the comparison key. +func (cmp *Cmp) KeyBytes() []byte { return cmp.Key } + +// WithKeyBytes sets the byte slice for the comparison key. +func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key } + +// ValueBytes returns the byte slice holding the comparison value, if any. +func (cmp *Cmp) ValueBytes() []byte { + if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok { + return tu.Value + } + return nil +} + +// WithValueBytes sets the byte slice for the comparison's value. +func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v } + +// WithRange sets the comparison to scan the range [key, end). +func (cmp Cmp) WithRange(end string) Cmp { + cmp.RangeEnd = []byte(end) + return cmp +} + +// WithPrefix sets the comparison to scan all keys prefixed by the key. +func (cmp Cmp) WithPrefix() Cmp { + cmp.RangeEnd = getPrefix(cmp.Key) + return cmp +} + +// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise. +func mustInt64(val interface{}) int64 { + if v, ok := val.(int64); ok { + return v + } + if v, ok := val.(int); ok { + return int64(v) + } + panic("bad value") +} + +// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an +// int64 otherwise. +func mustInt64orLeaseID(val interface{}) int64 { + if v, ok := val.(LeaseID); ok { + return int64(v) + } + return mustInt64(val) +} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go new file mode 100644 index 00000000000..dcdbf511d1b --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go @@ -0,0 +1,17 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package concurrency implements concurrency operations on top of +// etcd such as distributed locks, barriers, and elections. +package concurrency diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go new file mode 100644 index 00000000000..e18a0ed4ad9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go @@ -0,0 +1,245 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "context" + "errors" + "fmt" + + v3 "github.com/coreos/etcd/clientv3" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/mvcc/mvccpb" +) + +var ( + ErrElectionNotLeader = errors.New("election: not leader") + ErrElectionNoLeader = errors.New("election: no leader") +) + +type Election struct { + session *Session + + keyPrefix string + + leaderKey string + leaderRev int64 + leaderSession *Session + hdr *pb.ResponseHeader +} + +// NewElection returns a new election on a given key prefix. +func NewElection(s *Session, pfx string) *Election { + return &Election{session: s, keyPrefix: pfx + "/"} +} + +// ResumeElection initializes an election with a known leader. +func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election { + return &Election{ + session: s, + leaderKey: leaderKey, + leaderRev: leaderRev, + leaderSession: s, + } +} + +// Campaign puts a value as eligible for the election. It blocks until +// it is elected, an error occurs, or the context is cancelled. +func (e *Election) Campaign(ctx context.Context, val string) error { + s := e.session + client := e.session.Client() + + k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease()) + txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0)) + txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease()))) + txn = txn.Else(v3.OpGet(k)) + resp, err := txn.Commit() + if err != nil { + return err + } + e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s + if !resp.Succeeded { + kv := resp.Responses[0].GetResponseRange().Kvs[0] + e.leaderRev = kv.CreateRevision + if string(kv.Value) != val { + if err = e.Proclaim(ctx, val); err != nil { + e.Resign(ctx) + return err + } + } + } + + _, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1) + if err != nil { + // clean up in case of context cancel + select { + case <-ctx.Done(): + e.Resign(client.Ctx()) + default: + e.leaderSession = nil + } + return err + } + e.hdr = resp.Header + + return nil +} + +// Proclaim lets the leader announce a new value without another election. +func (e *Election) Proclaim(ctx context.Context, val string) error { + if e.leaderSession == nil { + return ErrElectionNotLeader + } + client := e.session.Client() + cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev) + txn := client.Txn(ctx).If(cmp) + txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease()))) + tresp, terr := txn.Commit() + if terr != nil { + return terr + } + if !tresp.Succeeded { + e.leaderKey = "" + return ErrElectionNotLeader + } + + e.hdr = tresp.Header + return nil +} + +// Resign lets a leader start a new election. +func (e *Election) Resign(ctx context.Context) (err error) { + if e.leaderSession == nil { + return nil + } + client := e.session.Client() + cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev) + resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit() + if err == nil { + e.hdr = resp.Header + } + e.leaderKey = "" + e.leaderSession = nil + return err +} + +// Leader returns the leader value for the current election. +func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) { + client := e.session.Client() + resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...) + if err != nil { + return nil, err + } else if len(resp.Kvs) == 0 { + // no leader currently elected + return nil, ErrElectionNoLeader + } + return resp, nil +} + +// Observe returns a channel that reliably observes ordered leader proposals +// as GetResponse values on every current elected leader key. It will not +// necessarily fetch all historical leader updates, but will always post the +// most recent leader value. +// +// The channel closes when the context is canceled or the underlying watcher +// is otherwise disrupted. +func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse { + retc := make(chan v3.GetResponse) + go e.observe(ctx, retc) + return retc +} + +func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) { + client := e.session.Client() + + defer close(ch) + for { + resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...) + if err != nil { + return + } + + var kv *mvccpb.KeyValue + var hdr *pb.ResponseHeader + + if len(resp.Kvs) == 0 { + cctx, cancel := context.WithCancel(ctx) + // wait for first key put on prefix + opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()} + wch := client.Watch(cctx, e.keyPrefix, opts...) + for kv == nil { + wr, ok := <-wch + if !ok || wr.Err() != nil { + cancel() + return + } + // only accept puts; a delete will make observe() spin + for _, ev := range wr.Events { + if ev.Type == mvccpb.PUT { + hdr, kv = &wr.Header, ev.Kv + // may have multiple revs; hdr.rev = the last rev + // set to kv's rev in case batch has multiple Puts + hdr.Revision = kv.ModRevision + break + } + } + } + cancel() + } else { + hdr, kv = resp.Header, resp.Kvs[0] + } + + select { + case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}: + case <-ctx.Done(): + return + } + + cctx, cancel := context.WithCancel(ctx) + wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1)) + keyDeleted := false + for !keyDeleted { + wr, ok := <-wch + if !ok { + cancel() + return + } + for _, ev := range wr.Events { + if ev.Type == mvccpb.DELETE { + keyDeleted = true + break + } + resp.Header = &wr.Header + resp.Kvs = []*mvccpb.KeyValue{ev.Kv} + select { + case ch <- *resp: + case <-cctx.Done(): + cancel() + return + } + } + } + cancel() + } +} + +// Key returns the leader key if elected, empty string otherwise. +func (e *Election) Key() string { return e.leaderKey } + +// Rev returns the leader key's creation revision, if elected. +func (e *Election) Rev() int64 { return e.leaderRev } + +// Header is the response header from the last successful election proposal. +func (e *Election) Header() *pb.ResponseHeader { return e.hdr } diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go new file mode 100644 index 00000000000..4b6e399bd4e --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go @@ -0,0 +1,65 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "context" + "fmt" + + v3 "github.com/coreos/etcd/clientv3" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/mvcc/mvccpb" +) + +func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error { + cctx, cancel := context.WithCancel(ctx) + defer cancel() + + var wr v3.WatchResponse + wch := client.Watch(cctx, key, v3.WithRev(rev)) + for wr = range wch { + for _, ev := range wr.Events { + if ev.Type == mvccpb.DELETE { + return nil + } + } + } + if err := wr.Err(); err != nil { + return err + } + if err := ctx.Err(); err != nil { + return err + } + return fmt.Errorf("lost watcher waiting for delete") +} + +// waitDeletes efficiently waits until all keys matching the prefix and no greater +// than the create revision. +func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) { + getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev)) + for { + resp, err := client.Get(ctx, pfx, getOpts...) + if err != nil { + return nil, err + } + if len(resp.Kvs) == 0 { + return resp.Header, nil + } + lastKey := string(resp.Kvs[0].Key) + if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil { + return nil, err + } + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go new file mode 100644 index 00000000000..dac9ba5a2a3 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go @@ -0,0 +1,118 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "context" + "fmt" + "sync" + + v3 "github.com/coreos/etcd/clientv3" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" +) + +// Mutex implements the sync Locker interface with etcd +type Mutex struct { + s *Session + + pfx string + myKey string + myRev int64 + hdr *pb.ResponseHeader +} + +func NewMutex(s *Session, pfx string) *Mutex { + return &Mutex{s, pfx + "/", "", -1, nil} +} + +// Lock locks the mutex with a cancelable context. If the context is canceled +// while trying to acquire the lock, the mutex tries to clean its stale lock entry. +func (m *Mutex) Lock(ctx context.Context) error { + s := m.s + client := m.s.Client() + + m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease()) + cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0) + // put self in lock waiters via myKey; oldest waiter holds lock + put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease())) + // reuse key in case this session already holds the lock + get := v3.OpGet(m.myKey) + // fetch current holder to complete uncontended path with only one RPC + getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...) + resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit() + if err != nil { + return err + } + m.myRev = resp.Header.Revision + if !resp.Succeeded { + m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision + } + // if no key on prefix / the minimum rev is key, already hold the lock + ownerKey := resp.Responses[1].GetResponseRange().Kvs + if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev { + m.hdr = resp.Header + return nil + } + + // wait for deletion revisions prior to myKey + hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1) + // release lock key if cancelled + select { + case <-ctx.Done(): + m.Unlock(client.Ctx()) + default: + m.hdr = hdr + } + return werr +} + +func (m *Mutex) Unlock(ctx context.Context) error { + client := m.s.Client() + if _, err := client.Delete(ctx, m.myKey); err != nil { + return err + } + m.myKey = "\x00" + m.myRev = -1 + return nil +} + +func (m *Mutex) IsOwner() v3.Cmp { + return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev) +} + +func (m *Mutex) Key() string { return m.myKey } + +// Header is the response header received from etcd on acquiring the lock. +func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr } + +type lockerMutex struct{ *Mutex } + +func (lm *lockerMutex) Lock() { + client := lm.s.Client() + if err := lm.Mutex.Lock(client.Ctx()); err != nil { + panic(err) + } +} +func (lm *lockerMutex) Unlock() { + client := lm.s.Client() + if err := lm.Mutex.Unlock(client.Ctx()); err != nil { + panic(err) + } +} + +// NewLocker creates a sync.Locker backed by an etcd mutex. +func NewLocker(s *Session, pfx string) sync.Locker { + return &lockerMutex{NewMutex(s, pfx)} +} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go new file mode 100644 index 00000000000..c399d64a61d --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go @@ -0,0 +1,141 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "context" + "time" + + v3 "github.com/coreos/etcd/clientv3" +) + +const defaultSessionTTL = 60 + +// Session represents a lease kept alive for the lifetime of a client. +// Fault-tolerant applications may use sessions to reason about liveness. +type Session struct { + client *v3.Client + opts *sessionOptions + id v3.LeaseID + + cancel context.CancelFunc + donec <-chan struct{} +} + +// NewSession gets the leased session for a client. +func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) { + ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()} + for _, opt := range opts { + opt(ops) + } + + id := ops.leaseID + if id == v3.NoLease { + resp, err := client.Grant(ops.ctx, int64(ops.ttl)) + if err != nil { + return nil, err + } + id = v3.LeaseID(resp.ID) + } + + ctx, cancel := context.WithCancel(ops.ctx) + keepAlive, err := client.KeepAlive(ctx, id) + if err != nil || keepAlive == nil { + cancel() + return nil, err + } + + donec := make(chan struct{}) + s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec} + + // keep the lease alive until client error or cancelled context + go func() { + defer close(donec) + for range keepAlive { + // eat messages until keep alive channel closes + } + }() + + return s, nil +} + +// Client is the etcd client that is attached to the session. +func (s *Session) Client() *v3.Client { + return s.client +} + +// Lease is the lease ID for keys bound to the session. +func (s *Session) Lease() v3.LeaseID { return s.id } + +// Done returns a channel that closes when the lease is orphaned, expires, or +// is otherwise no longer being refreshed. +func (s *Session) Done() <-chan struct{} { return s.donec } + +// Orphan ends the refresh for the session lease. This is useful +// in case the state of the client connection is indeterminate (revoke +// would fail) or when transferring lease ownership. +func (s *Session) Orphan() { + s.cancel() + <-s.donec +} + +// Close orphans the session and revokes the session lease. +func (s *Session) Close() error { + s.Orphan() + // if revoke takes longer than the ttl, lease is expired anyway + ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second) + _, err := s.client.Revoke(ctx, s.id) + cancel() + return err +} + +type sessionOptions struct { + ttl int + leaseID v3.LeaseID + ctx context.Context +} + +// SessionOption configures Session. +type SessionOption func(*sessionOptions) + +// WithTTL configures the session's TTL in seconds. +// If TTL is <= 0, the default 60 seconds TTL will be used. +func WithTTL(ttl int) SessionOption { + return func(so *sessionOptions) { + if ttl > 0 { + so.ttl = ttl + } + } +} + +// WithLease specifies the existing leaseID to be used for the session. +// This is useful in process restart scenario, for example, to reclaim +// leadership from an election prior to restart. +func WithLease(leaseID v3.LeaseID) SessionOption { + return func(so *sessionOptions) { + so.leaseID = leaseID + } +} + +// WithContext assigns a context to the session instead of defaulting to +// using the client context. This is useful for canceling NewSession and +// Close operations immediately without having to close the client. If the +// context is canceled before Close() completes, the session's lease will be +// abandoned and left to expire instead of being revoked. +func WithContext(ctx context.Context) SessionOption { + return func(so *sessionOptions) { + so.ctx = ctx + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go new file mode 100644 index 00000000000..d11023ebe36 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go @@ -0,0 +1,387 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "context" + "math" + + v3 "github.com/coreos/etcd/clientv3" +) + +// STM is an interface for software transactional memory. +type STM interface { + // Get returns the value for a key and inserts the key in the txn's read set. + // If Get fails, it aborts the transaction with an error, never returning. + Get(key ...string) string + // Put adds a value for a key to the write set. + Put(key, val string, opts ...v3.OpOption) + // Rev returns the revision of a key in the read set. + Rev(key string) int64 + // Del deletes a key. + Del(key string) + + // commit attempts to apply the txn's changes to the server. + commit() *v3.TxnResponse + reset() +} + +// Isolation is an enumeration of transactional isolation levels which +// describes how transactions should interfere and conflict. +type Isolation int + +const ( + // SerializableSnapshot provides serializable isolation and also checks + // for write conflicts. + SerializableSnapshot Isolation = iota + // Serializable reads within the same transaction attempt return data + // from the at the revision of the first read. + Serializable + // RepeatableReads reads within the same transaction attempt always + // return the same data. + RepeatableReads + // ReadCommitted reads keys from any committed revision. + ReadCommitted +) + +// stmError safely passes STM errors through panic to the STM error channel. +type stmError struct{ err error } + +type stmOptions struct { + iso Isolation + ctx context.Context + prefetch []string +} + +type stmOption func(*stmOptions) + +// WithIsolation specifies the transaction isolation level. +func WithIsolation(lvl Isolation) stmOption { + return func(so *stmOptions) { so.iso = lvl } +} + +// WithAbortContext specifies the context for permanently aborting the transaction. +func WithAbortContext(ctx context.Context) stmOption { + return func(so *stmOptions) { so.ctx = ctx } +} + +// WithPrefetch is a hint to prefetch a list of keys before trying to apply. +// If an STM transaction will unconditionally fetch a set of keys, prefetching +// those keys will save the round-trip cost from requesting each key one by one +// with Get(). +func WithPrefetch(keys ...string) stmOption { + return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) } +} + +// NewSTM initiates a new STM instance, using serializable snapshot isolation by default. +func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) { + opts := &stmOptions{ctx: c.Ctx()} + for _, f := range so { + f(opts) + } + if len(opts.prefetch) != 0 { + f := apply + apply = func(s STM) error { + s.Get(opts.prefetch...) + return f(s) + } + } + return runSTM(mkSTM(c, opts), apply) +} + +func mkSTM(c *v3.Client, opts *stmOptions) STM { + switch opts.iso { + case SerializableSnapshot: + s := &stmSerializable{ + stm: stm{client: c, ctx: opts.ctx}, + prefetch: make(map[string]*v3.GetResponse), + } + s.conflicts = func() []v3.Cmp { + return append(s.rset.cmps(), s.wset.cmps(s.rset.first()+1)...) + } + return s + case Serializable: + s := &stmSerializable{ + stm: stm{client: c, ctx: opts.ctx}, + prefetch: make(map[string]*v3.GetResponse), + } + s.conflicts = func() []v3.Cmp { return s.rset.cmps() } + return s + case RepeatableReads: + s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}} + s.conflicts = func() []v3.Cmp { return s.rset.cmps() } + return s + case ReadCommitted: + s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}} + s.conflicts = func() []v3.Cmp { return nil } + return s + default: + panic("unsupported stm") + } +} + +type stmResponse struct { + resp *v3.TxnResponse + err error +} + +func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) { + outc := make(chan stmResponse, 1) + go func() { + defer func() { + if r := recover(); r != nil { + e, ok := r.(stmError) + if !ok { + // client apply panicked + panic(r) + } + outc <- stmResponse{nil, e.err} + } + }() + var out stmResponse + for { + s.reset() + if out.err = apply(s); out.err != nil { + break + } + if out.resp = s.commit(); out.resp != nil { + break + } + } + outc <- out + }() + r := <-outc + return r.resp, r.err +} + +// stm implements repeatable-read software transactional memory over etcd +type stm struct { + client *v3.Client + ctx context.Context + // rset holds read key values and revisions + rset readSet + // wset holds overwritten keys and their values + wset writeSet + // getOpts are the opts used for gets + getOpts []v3.OpOption + // conflicts computes the current conflicts on the txn + conflicts func() []v3.Cmp +} + +type stmPut struct { + val string + op v3.Op +} + +type readSet map[string]*v3.GetResponse + +func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) { + for i, resp := range txnresp.Responses { + rs[keys[i]] = (*v3.GetResponse)(resp.GetResponseRange()) + } +} + +// first returns the store revision from the first fetch +func (rs readSet) first() int64 { + ret := int64(math.MaxInt64 - 1) + for _, resp := range rs { + if rev := resp.Header.Revision; rev < ret { + ret = rev + } + } + return ret +} + +// cmps guards the txn from updates to read set +func (rs readSet) cmps() []v3.Cmp { + cmps := make([]v3.Cmp, 0, len(rs)) + for k, rk := range rs { + cmps = append(cmps, isKeyCurrent(k, rk)) + } + return cmps +} + +type writeSet map[string]stmPut + +func (ws writeSet) get(keys ...string) *stmPut { + for _, key := range keys { + if wv, ok := ws[key]; ok { + return &wv + } + } + return nil +} + +// cmps returns a cmp list testing no writes have happened past rev +func (ws writeSet) cmps(rev int64) []v3.Cmp { + cmps := make([]v3.Cmp, 0, len(ws)) + for key := range ws { + cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev)) + } + return cmps +} + +// puts is the list of ops for all pending writes +func (ws writeSet) puts() []v3.Op { + puts := make([]v3.Op, 0, len(ws)) + for _, v := range ws { + puts = append(puts, v.op) + } + return puts +} + +func (s *stm) Get(keys ...string) string { + if wv := s.wset.get(keys...); wv != nil { + return wv.val + } + return respToValue(s.fetch(keys...)) +} + +func (s *stm) Put(key, val string, opts ...v3.OpOption) { + s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)} +} + +func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} } + +func (s *stm) Rev(key string) int64 { + if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 { + return resp.Kvs[0].ModRevision + } + return 0 +} + +func (s *stm) commit() *v3.TxnResponse { + txnresp, err := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...).Commit() + if err != nil { + panic(stmError{err}) + } + if txnresp.Succeeded { + return txnresp + } + return nil +} + +func (s *stm) fetch(keys ...string) *v3.GetResponse { + if len(keys) == 0 { + return nil + } + ops := make([]v3.Op, len(keys)) + for i, key := range keys { + if resp, ok := s.rset[key]; ok { + return resp + } + ops[i] = v3.OpGet(key, s.getOpts...) + } + txnresp, err := s.client.Txn(s.ctx).Then(ops...).Commit() + if err != nil { + panic(stmError{err}) + } + s.rset.add(keys, txnresp) + return (*v3.GetResponse)(txnresp.Responses[0].GetResponseRange()) +} + +func (s *stm) reset() { + s.rset = make(map[string]*v3.GetResponse) + s.wset = make(map[string]stmPut) +} + +type stmSerializable struct { + stm + prefetch map[string]*v3.GetResponse +} + +func (s *stmSerializable) Get(keys ...string) string { + if wv := s.wset.get(keys...); wv != nil { + return wv.val + } + firstRead := len(s.rset) == 0 + for _, key := range keys { + if resp, ok := s.prefetch[key]; ok { + delete(s.prefetch, key) + s.rset[key] = resp + } + } + resp := s.stm.fetch(keys...) + if firstRead { + // txn's base revision is defined by the first read + s.getOpts = []v3.OpOption{ + v3.WithRev(resp.Header.Revision), + v3.WithSerializable(), + } + } + return respToValue(resp) +} + +func (s *stmSerializable) Rev(key string) int64 { + s.Get(key) + return s.stm.Rev(key) +} + +func (s *stmSerializable) gets() ([]string, []v3.Op) { + keys := make([]string, 0, len(s.rset)) + ops := make([]v3.Op, 0, len(s.rset)) + for k := range s.rset { + keys = append(keys, k) + ops = append(ops, v3.OpGet(k)) + } + return keys, ops +} + +func (s *stmSerializable) commit() *v3.TxnResponse { + keys, getops := s.gets() + txn := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...) + // use Else to prefetch keys in case of conflict to save a round trip + txnresp, err := txn.Else(getops...).Commit() + if err != nil { + panic(stmError{err}) + } + if txnresp.Succeeded { + return txnresp + } + // load prefetch with Else data + s.rset.add(keys, txnresp) + s.prefetch = s.rset + s.getOpts = nil + return nil +} + +func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp { + if len(r.Kvs) != 0 { + return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision) + } + return v3.Compare(v3.ModRevision(k), "=", 0) +} + +func respToValue(resp *v3.GetResponse) string { + if resp == nil || len(resp.Kvs) == 0 { + return "" + } + return string(resp.Kvs[0].Value) +} + +// NewSTMRepeatable is deprecated. +func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { + return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads)) +} + +// NewSTMSerializable is deprecated. +func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { + return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable)) +} + +// NewSTMReadCommitted is deprecated. +func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { + return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted)) +} diff --git a/vendor/github.com/coreos/etcd/clientv3/config.go b/vendor/github.com/coreos/etcd/clientv3/config.go new file mode 100644 index 00000000000..87d61cf577d --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/config.go @@ -0,0 +1,62 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "crypto/tls" + "time" + + "google.golang.org/grpc" +) + +type Config struct { + // Endpoints is a list of URLs. + Endpoints []string `json:"endpoints"` + + // AutoSyncInterval is the interval to update endpoints with its latest members. + // 0 disables auto-sync. By default auto-sync is disabled. + AutoSyncInterval time.Duration `json:"auto-sync-interval"` + + // DialTimeout is the timeout for failing to establish a connection. + DialTimeout time.Duration `json:"dial-timeout"` + + // DialKeepAliveTime is the time in seconds after which client pings the server to see if + // transport is alive. + DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"` + + // DialKeepAliveTimeout is the time in seconds that the client waits for a response for the + // keep-alive probe. If the response is not received in this time, the connection is closed. + DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"` + + // TLS holds the client secure credentials, if any. + TLS *tls.Config + + // Username is a user name for authentication. + Username string `json:"username"` + + // Password is a password for authentication. + Password string `json:"password"` + + // RejectOldCluster when set will refuse to create a client against an outdated cluster. + RejectOldCluster bool `json:"reject-old-cluster"` + + // DialOptions is a list of dial options for the grpc client (e.g., for interceptors). + DialOptions []grpc.DialOption + + // Context is the default client context; it can be used to cancel grpc dial out and + // other operations that do not have an explicit context. + Context context.Context +} diff --git a/vendor/github.com/coreos/etcd/clientv3/doc.go b/vendor/github.com/coreos/etcd/clientv3/doc.go new file mode 100644 index 00000000000..dacc5bb346f --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/doc.go @@ -0,0 +1,64 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package clientv3 implements the official Go etcd client for v3. +// +// Create client using `clientv3.New`: +// +// cli, err := clientv3.New(clientv3.Config{ +// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, +// DialTimeout: 5 * time.Second, +// }) +// if err != nil { +// // handle error! +// } +// defer cli.Close() +// +// Make sure to close the client after using it. If the client is not closed, the +// connection will have leaky goroutines. +// +// To specify a client request timeout, wrap the context with context.WithTimeout: +// +// ctx, cancel := context.WithTimeout(context.Background(), timeout) +// resp, err := kvc.Put(ctx, "sample_key", "sample_value") +// cancel() +// if err != nil { +// // handle error! +// } +// // use the response +// +// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed. +// Clients are safe for concurrent use by multiple goroutines. +// +// etcd client returns 2 types of errors: +// +// 1. context error: canceled or deadline exceeded. +// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go +// +// Here is the example code to handle client errors: +// +// resp, err := kvc.Put(ctx, "", "") +// if err != nil { +// if err == context.Canceled { +// // ctx is canceled by another routine +// } else if err == context.DeadlineExceeded { +// // ctx is attached with a deadline and it exceeded +// } else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok { +// // process (verr.Errors) +// } else { +// // bad cluster endpoints, which are not etcd servers +// } +// } +// +package clientv3 diff --git a/vendor/github.com/coreos/etcd/clientv3/health_balancer.go b/vendor/github.com/coreos/etcd/clientv3/health_balancer.go new file mode 100644 index 00000000000..8f4ba08ae69 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/health_balancer.go @@ -0,0 +1,249 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/status" +) + +const minHealthRetryDuration = 3 * time.Second +const unknownService = "unknown service grpc.health.v1.Health" + +type healthCheckFunc func(ep string) (bool, error) + +// healthBalancer wraps a balancer so that it uses health checking +// to choose its endpoints. +type healthBalancer struct { + *simpleBalancer + + // healthCheck checks an endpoint's health. + healthCheck healthCheckFunc + healthCheckTimeout time.Duration + + // mu protects addrs, eps, unhealthy map, and stopc. + mu sync.RWMutex + + // addrs stores all grpc addresses associated with the balancer. + addrs []grpc.Address + + // eps stores all client endpoints + eps []string + + // unhealthy tracks the last unhealthy time of endpoints. + unhealthy map[string]time.Time + + stopc chan struct{} + stopOnce sync.Once + + hostPort2ep map[string]string + + wg sync.WaitGroup +} + +func newHealthBalancer(b *simpleBalancer, timeout time.Duration, hc healthCheckFunc) *healthBalancer { + hb := &healthBalancer{ + simpleBalancer: b, + healthCheck: hc, + eps: b.endpoints(), + addrs: eps2addrs(b.endpoints()), + hostPort2ep: getHostPort2ep(b.endpoints()), + unhealthy: make(map[string]time.Time), + stopc: make(chan struct{}), + } + if timeout < minHealthRetryDuration { + timeout = minHealthRetryDuration + } + hb.healthCheckTimeout = timeout + + hb.wg.Add(1) + go func() { + defer hb.wg.Done() + hb.updateUnhealthy(timeout) + }() + + return hb +} + +func (hb *healthBalancer) Up(addr grpc.Address) func(error) { + f, used := hb.up(addr) + if !used { + return f + } + return func(err error) { + // If connected to a black hole endpoint or a killed server, the gRPC ping + // timeout will induce a network I/O error, and retrying until success; + // finding healthy endpoint on retry could take several timeouts and redials. + // To avoid wasting retries, gray-list unhealthy endpoints. + hb.hostPortError(addr.Addr, err) + f(err) + } +} + +func (hb *healthBalancer) up(addr grpc.Address) (func(error), bool) { + if !hb.mayPin(addr) { + return func(err error) {}, false + } + return hb.simpleBalancer.up(addr) +} + +func (hb *healthBalancer) Close() error { + hb.stopOnce.Do(func() { close(hb.stopc) }) + hb.wg.Wait() + return hb.simpleBalancer.Close() +} + +func (hb *healthBalancer) updateAddrs(eps ...string) { + addrs, hostPort2ep := eps2addrs(eps), getHostPort2ep(eps) + hb.mu.Lock() + hb.addrs, hb.eps, hb.hostPort2ep = addrs, eps, hostPort2ep + hb.unhealthy = make(map[string]time.Time) + hb.mu.Unlock() + hb.simpleBalancer.updateAddrs(eps...) +} + +func (hb *healthBalancer) endpoint(host string) string { + hb.mu.RLock() + defer hb.mu.RUnlock() + return hb.hostPort2ep[host] +} + +func (hb *healthBalancer) endpoints() []string { + hb.mu.RLock() + defer hb.mu.RUnlock() + return hb.eps +} + +func (hb *healthBalancer) updateUnhealthy(timeout time.Duration) { + for { + select { + case <-time.After(timeout): + hb.mu.Lock() + for k, v := range hb.unhealthy { + if time.Since(v) > timeout { + delete(hb.unhealthy, k) + if logger.V(4) { + logger.Infof("clientv3/health-balancer: removes %q from unhealthy after %v", k, timeout) + } + } + } + hb.mu.Unlock() + eps := []string{} + for _, addr := range hb.liveAddrs() { + eps = append(eps, hb.endpoint(addr.Addr)) + } + hb.simpleBalancer.updateAddrs(eps...) + case <-hb.stopc: + return + } + } +} + +func (hb *healthBalancer) liveAddrs() []grpc.Address { + hb.mu.RLock() + defer hb.mu.RUnlock() + hbAddrs := hb.addrs + if len(hb.addrs) == 1 || len(hb.unhealthy) == 0 || len(hb.unhealthy) == len(hb.addrs) { + return hbAddrs + } + addrs := make([]grpc.Address, 0, len(hb.addrs)-len(hb.unhealthy)) + for _, addr := range hb.addrs { + if _, unhealthy := hb.unhealthy[addr.Addr]; !unhealthy { + addrs = append(addrs, addr) + } + } + return addrs +} + +func (hb *healthBalancer) hostPortError(hostPort string, err error) { + hb.mu.Lock() + if _, ok := hb.hostPort2ep[hostPort]; ok { + hb.unhealthy[hostPort] = time.Now() + if logger.V(4) { + logger.Infof("clientv3/health-balancer: marking %q as unhealthy (%q)", hostPort, err.Error()) + } + } + hb.mu.Unlock() +} + +func (hb *healthBalancer) mayPin(addr grpc.Address) bool { + hb.mu.RLock() + if _, ok := hb.hostPort2ep[addr.Addr]; !ok { // stale host:port + hb.mu.RUnlock() + return false + } + skip := len(hb.addrs) == 1 || len(hb.unhealthy) == 0 || len(hb.addrs) == len(hb.unhealthy) + failedTime, bad := hb.unhealthy[addr.Addr] + dur := hb.healthCheckTimeout + hb.mu.RUnlock() + if skip || !bad { + return true + } + // prevent isolated member's endpoint from being infinitely retried, as follows: + // 1. keepalive pings detects GoAway with http2.ErrCodeEnhanceYourCalm + // 2. balancer 'Up' unpins with grpc: failed with network I/O error + // 3. grpc-healthcheck still SERVING, thus retry to pin + // instead, return before grpc-healthcheck if failed within healthcheck timeout + if elapsed := time.Since(failedTime); elapsed < dur { + if logger.V(4) { + logger.Infof("clientv3/health-balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, dur) + } + return false + } + if ok, _ := hb.healthCheck(addr.Addr); ok { + hb.mu.Lock() + delete(hb.unhealthy, addr.Addr) + hb.mu.Unlock() + if logger.V(4) { + logger.Infof("clientv3/health-balancer: %q is healthy (health check success)", addr.Addr) + } + return true + } + hb.mu.Lock() + hb.unhealthy[addr.Addr] = time.Now() + hb.mu.Unlock() + if logger.V(4) { + logger.Infof("clientv3/health-balancer: %q becomes unhealthy (health check failed)", addr.Addr) + } + return false +} + +func grpcHealthCheck(client *Client, ep string) (bool, error) { + conn, err := client.dial(ep) + if err != nil { + return false, err + } + defer conn.Close() + cli := healthpb.NewHealthClient(conn) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{}) + cancel() + if err != nil { + if s, ok := status.FromError(err); ok && s.Code() == codes.Unavailable { + if s.Message() == unknownService { + // etcd < v3.3.0 + return true, nil + } + } + return false, err + } + return resp.Status == healthpb.HealthCheckResponse_SERVING, nil +} diff --git a/vendor/github.com/coreos/etcd/clientv3/kv.go b/vendor/github.com/coreos/etcd/clientv3/kv.go new file mode 100644 index 00000000000..b578d9ebe46 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/kv.go @@ -0,0 +1,165 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" +) + +type ( + CompactResponse pb.CompactionResponse + PutResponse pb.PutResponse + GetResponse pb.RangeResponse + DeleteResponse pb.DeleteRangeResponse + TxnResponse pb.TxnResponse +) + +type KV interface { + // Put puts a key-value pair into etcd. + // Note that key,value can be plain bytes array and string is + // an immutable representation of that bytes array. + // To get a string of bytes, do string([]byte{0x10, 0x20}). + Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) + + // Get retrieves keys. + // By default, Get will return the value for "key", if any. + // When passed WithRange(end), Get will return the keys in the range [key, end). + // When passed WithFromKey(), Get returns keys greater than or equal to key. + // When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision; + // if the required revision is compacted, the request will fail with ErrCompacted . + // When passed WithLimit(limit), the number of returned keys is bounded by limit. + // When passed WithSort(), the keys will be sorted. + Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) + + // Delete deletes a key, or optionally using WithRange(end), [key, end). + Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) + + // Compact compacts etcd KV history before the given rev. + Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) + + // Do applies a single Op on KV without a transaction. + // Do is useful when creating arbitrary operations to be issued at a + // later time; the user can range over the operations, calling Do to + // execute them. Get/Put/Delete, on the other hand, are best suited + // for when the operation should be issued at the time of declaration. + Do(ctx context.Context, op Op) (OpResponse, error) + + // Txn creates a transaction. + Txn(ctx context.Context) Txn +} + +type OpResponse struct { + put *PutResponse + get *GetResponse + del *DeleteResponse + txn *TxnResponse +} + +func (op OpResponse) Put() *PutResponse { return op.put } +func (op OpResponse) Get() *GetResponse { return op.get } +func (op OpResponse) Del() *DeleteResponse { return op.del } +func (op OpResponse) Txn() *TxnResponse { return op.txn } + +func (resp *PutResponse) OpResponse() OpResponse { + return OpResponse{put: resp} +} +func (resp *GetResponse) OpResponse() OpResponse { + return OpResponse{get: resp} +} +func (resp *DeleteResponse) OpResponse() OpResponse { + return OpResponse{del: resp} +} +func (resp *TxnResponse) OpResponse() OpResponse { + return OpResponse{txn: resp} +} + +type kv struct { + remote pb.KVClient +} + +func NewKV(c *Client) KV { + return &kv{remote: RetryKVClient(c)} +} + +func NewKVFromKVClient(remote pb.KVClient) KV { + return &kv{remote: remote} +} + +func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) { + r, err := kv.Do(ctx, OpPut(key, val, opts...)) + return r.put, toErr(ctx, err) +} + +func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) { + r, err := kv.Do(ctx, OpGet(key, opts...)) + return r.get, toErr(ctx, err) +} + +func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) { + r, err := kv.Do(ctx, OpDelete(key, opts...)) + return r.del, toErr(ctx, err) +} + +func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) { + resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest()) + if err != nil { + return nil, toErr(ctx, err) + } + return (*CompactResponse)(resp), err +} + +func (kv *kv) Txn(ctx context.Context) Txn { + return &txn{ + kv: kv, + ctx: ctx, + } +} + +func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { + var err error + switch op.t { + case tRange: + var resp *pb.RangeResponse + resp, err = kv.remote.Range(ctx, op.toRangeRequest()) + if err == nil { + return OpResponse{get: (*GetResponse)(resp)}, nil + } + case tPut: + var resp *pb.PutResponse + r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} + resp, err = kv.remote.Put(ctx, r) + if err == nil { + return OpResponse{put: (*PutResponse)(resp)}, nil + } + case tDeleteRange: + var resp *pb.DeleteRangeResponse + r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} + resp, err = kv.remote.DeleteRange(ctx, r) + if err == nil { + return OpResponse{del: (*DeleteResponse)(resp)}, nil + } + case tTxn: + var resp *pb.TxnResponse + resp, err = kv.remote.Txn(ctx, op.toTxnRequest()) + if err == nil { + return OpResponse{txn: (*TxnResponse)(resp)}, nil + } + default: + panic("Unknown op") + } + return OpResponse{}, toErr(ctx, err) +} diff --git a/vendor/github.com/coreos/etcd/clientv3/lease.go b/vendor/github.com/coreos/etcd/clientv3/lease.go new file mode 100644 index 00000000000..aa9ea2d78aa --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/lease.go @@ -0,0 +1,560 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "sync" + "time" + + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + "google.golang.org/grpc/metadata" +) + +type ( + LeaseRevokeResponse pb.LeaseRevokeResponse + LeaseID int64 +) + +// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse. +type LeaseGrantResponse struct { + *pb.ResponseHeader + ID LeaseID + TTL int64 + Error string +} + +// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse. +type LeaseKeepAliveResponse struct { + *pb.ResponseHeader + ID LeaseID + TTL int64 +} + +// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse. +type LeaseTimeToLiveResponse struct { + *pb.ResponseHeader + ID LeaseID `json:"id"` + + // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. + TTL int64 `json:"ttl"` + + // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. + GrantedTTL int64 `json:"granted-ttl"` + + // Keys is the list of keys attached to this lease. + Keys [][]byte `json:"keys"` +} + +// LeaseStatus represents a lease status. +type LeaseStatus struct { + ID LeaseID `json:"id"` + // TODO: TTL int64 +} + +// LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse. +type LeaseLeasesResponse struct { + *pb.ResponseHeader + Leases []LeaseStatus `json:"leases"` +} + +const ( + // defaultTTL is the assumed lease TTL used for the first keepalive + // deadline before the actual TTL is known to the client. + defaultTTL = 5 * time.Second + // a small buffer to store unsent lease responses. + leaseResponseChSize = 16 + // NoLease is a lease ID for the absence of a lease. + NoLease LeaseID = 0 + + // retryConnWait is how long to wait before retrying request due to an error + retryConnWait = 500 * time.Millisecond +) + +// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error. +// +// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected. +type ErrKeepAliveHalted struct { + Reason error +} + +func (e ErrKeepAliveHalted) Error() string { + s := "etcdclient: leases keep alive halted" + if e.Reason != nil { + s += ": " + e.Reason.Error() + } + return s +} + +type Lease interface { + // Grant creates a new lease. + Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) + + // Revoke revokes the given lease. + Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) + + // TimeToLive retrieves the lease information of the given lease ID. + TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) + + // Leases retrieves all leases. + Leases(ctx context.Context) (*LeaseLeasesResponse, error) + + // KeepAlive keeps the given lease alive forever. + KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) + + // KeepAliveOnce renews the lease once. In most of the cases, KeepAlive + // should be used instead of KeepAliveOnce. + KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) + + // Close releases all resources Lease keeps for efficient communication + // with the etcd server. + Close() error +} + +type lessor struct { + mu sync.Mutex // guards all fields + + // donec is closed and loopErr is set when recvKeepAliveLoop stops + donec chan struct{} + loopErr error + + remote pb.LeaseClient + + stream pb.Lease_LeaseKeepAliveClient + streamCancel context.CancelFunc + + stopCtx context.Context + stopCancel context.CancelFunc + + keepAlives map[LeaseID]*keepAlive + + // firstKeepAliveTimeout is the timeout for the first keepalive request + // before the actual TTL is known to the lease client + firstKeepAliveTimeout time.Duration + + // firstKeepAliveOnce ensures stream starts after first KeepAlive call. + firstKeepAliveOnce sync.Once +} + +// keepAlive multiplexes a keepalive for a lease over multiple channels +type keepAlive struct { + chs []chan<- *LeaseKeepAliveResponse + ctxs []context.Context + // deadline is the time the keep alive channels close if no response + deadline time.Time + // nextKeepAlive is when to send the next keep alive message + nextKeepAlive time.Time + // donec is closed on lease revoke, expiration, or cancel. + donec chan struct{} +} + +func NewLease(c *Client) Lease { + return NewLeaseFromLeaseClient(RetryLeaseClient(c), c.cfg.DialTimeout+time.Second) +} + +func NewLeaseFromLeaseClient(remote pb.LeaseClient, keepAliveTimeout time.Duration) Lease { + l := &lessor{ + donec: make(chan struct{}), + keepAlives: make(map[LeaseID]*keepAlive), + remote: remote, + firstKeepAliveTimeout: keepAliveTimeout, + } + if l.firstKeepAliveTimeout == time.Second { + l.firstKeepAliveTimeout = defaultTTL + } + reqLeaderCtx := WithRequireLeader(context.Background()) + l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx) + return l +} + +func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) { + r := &pb.LeaseGrantRequest{TTL: ttl} + resp, err := l.remote.LeaseGrant(ctx, r) + if err == nil { + gresp := &LeaseGrantResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + Error: resp.Error, + } + return gresp, nil + } + return nil, toErr(ctx, err) +} + +func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) { + r := &pb.LeaseRevokeRequest{ID: int64(id)} + resp, err := l.remote.LeaseRevoke(ctx, r) + if err == nil { + return (*LeaseRevokeResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) { + r := toLeaseTimeToLiveRequest(id, opts...) + resp, err := l.remote.LeaseTimeToLive(ctx, r) + if err == nil { + gresp := &LeaseTimeToLiveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + GrantedTTL: resp.GrantedTTL, + Keys: resp.Keys, + } + return gresp, nil + } + return nil, toErr(ctx, err) +} + +func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) { + resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}) + if err == nil { + leases := make([]LeaseStatus, len(resp.Leases)) + for i := range resp.Leases { + leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)} + } + return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil + } + return nil, toErr(ctx, err) +} + +func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) { + ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize) + + l.mu.Lock() + // ensure that recvKeepAliveLoop is still running + select { + case <-l.donec: + err := l.loopErr + l.mu.Unlock() + close(ch) + return ch, ErrKeepAliveHalted{Reason: err} + default: + } + ka, ok := l.keepAlives[id] + if !ok { + // create fresh keep alive + ka = &keepAlive{ + chs: []chan<- *LeaseKeepAliveResponse{ch}, + ctxs: []context.Context{ctx}, + deadline: time.Now().Add(l.firstKeepAliveTimeout), + nextKeepAlive: time.Now(), + donec: make(chan struct{}), + } + l.keepAlives[id] = ka + } else { + // add channel and context to existing keep alive + ka.ctxs = append(ka.ctxs, ctx) + ka.chs = append(ka.chs, ch) + } + l.mu.Unlock() + + go l.keepAliveCtxCloser(id, ctx, ka.donec) + l.firstKeepAliveOnce.Do(func() { + go l.recvKeepAliveLoop() + go l.deadlineLoop() + }) + + return ch, nil +} + +func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { + for { + resp, err := l.keepAliveOnce(ctx, id) + if err == nil { + if resp.TTL <= 0 { + err = rpctypes.ErrLeaseNotFound + } + return resp, err + } + if isHaltErr(ctx, err) { + return nil, toErr(ctx, err) + } + } +} + +func (l *lessor) Close() error { + l.stopCancel() + // close for synchronous teardown if stream goroutines never launched + l.firstKeepAliveOnce.Do(func() { close(l.donec) }) + <-l.donec + return nil +} + +func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-chan struct{}) { + select { + case <-donec: + return + case <-l.donec: + return + case <-ctx.Done(): + } + + l.mu.Lock() + defer l.mu.Unlock() + + ka, ok := l.keepAlives[id] + if !ok { + return + } + + // close channel and remove context if still associated with keep alive + for i, c := range ka.ctxs { + if c == ctx { + close(ka.chs[i]) + ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...) + ka.chs = append(ka.chs[:i], ka.chs[i+1:]...) + break + } + } + // remove if no one more listeners + if len(ka.chs) == 0 { + delete(l.keepAlives, id) + } +} + +// closeRequireLeader scans keepAlives for ctxs that have require leader +// and closes the associated channels. +func (l *lessor) closeRequireLeader() { + l.mu.Lock() + defer l.mu.Unlock() + for _, ka := range l.keepAlives { + reqIdxs := 0 + // find all required leader channels, close, mark as nil + for i, ctx := range ka.ctxs { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + continue + } + ks := md[rpctypes.MetadataRequireLeaderKey] + if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader { + continue + } + close(ka.chs[i]) + ka.chs[i] = nil + reqIdxs++ + } + if reqIdxs == 0 { + continue + } + // remove all channels that required a leader from keepalive + newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs) + newCtxs := make([]context.Context, len(newChs)) + newIdx := 0 + for i := range ka.chs { + if ka.chs[i] == nil { + continue + } + newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx] + newIdx++ + } + ka.chs, ka.ctxs = newChs, newCtxs + } +} + +func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { + cctx, cancel := context.WithCancel(ctx) + defer cancel() + + stream, err := l.remote.LeaseKeepAlive(cctx) + if err != nil { + return nil, toErr(ctx, err) + } + + err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)}) + if err != nil { + return nil, toErr(ctx, err) + } + + resp, rerr := stream.Recv() + if rerr != nil { + return nil, toErr(ctx, rerr) + } + + karesp := &LeaseKeepAliveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + } + return karesp, nil +} + +func (l *lessor) recvKeepAliveLoop() (gerr error) { + defer func() { + l.mu.Lock() + close(l.donec) + l.loopErr = gerr + for _, ka := range l.keepAlives { + ka.close() + } + l.keepAlives = make(map[LeaseID]*keepAlive) + l.mu.Unlock() + }() + + for { + stream, err := l.resetRecv() + if err != nil { + if canceledByCaller(l.stopCtx, err) { + return err + } + } else { + for { + resp, err := stream.Recv() + if err != nil { + if canceledByCaller(l.stopCtx, err) { + return err + } + + if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader { + l.closeRequireLeader() + } + break + } + + l.recvKeepAlive(resp) + } + } + + select { + case <-time.After(retryConnWait): + continue + case <-l.stopCtx.Done(): + return l.stopCtx.Err() + } + } +} + +// resetRecv opens a new lease stream and starts sending keep alive requests. +func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { + sctx, cancel := context.WithCancel(l.stopCtx) + stream, err := l.remote.LeaseKeepAlive(sctx) + if err != nil { + cancel() + return nil, err + } + + l.mu.Lock() + defer l.mu.Unlock() + if l.stream != nil && l.streamCancel != nil { + l.streamCancel() + } + + l.streamCancel = cancel + l.stream = stream + + go l.sendKeepAliveLoop(stream) + return stream, nil +} + +// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse +func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { + karesp := &LeaseKeepAliveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + } + + l.mu.Lock() + defer l.mu.Unlock() + + ka, ok := l.keepAlives[karesp.ID] + if !ok { + return + } + + if karesp.TTL <= 0 { + // lease expired; close all keep alive channels + delete(l.keepAlives, karesp.ID) + ka.close() + return + } + + // send update to all channels + nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0) + ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second) + for _, ch := range ka.chs { + select { + case ch <- karesp: + ka.nextKeepAlive = nextKeepAlive + default: + } + } +} + +// deadlineLoop reaps any keep alive channels that have not received a response +// within the lease TTL +func (l *lessor) deadlineLoop() { + for { + select { + case <-time.After(time.Second): + case <-l.donec: + return + } + now := time.Now() + l.mu.Lock() + for id, ka := range l.keepAlives { + if ka.deadline.Before(now) { + // waited too long for response; lease may be expired + ka.close() + delete(l.keepAlives, id) + } + } + l.mu.Unlock() + } +} + +// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream. +func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { + for { + var tosend []LeaseID + + now := time.Now() + l.mu.Lock() + for id, ka := range l.keepAlives { + if ka.nextKeepAlive.Before(now) { + tosend = append(tosend, id) + } + } + l.mu.Unlock() + + for _, id := range tosend { + r := &pb.LeaseKeepAliveRequest{ID: int64(id)} + if err := stream.Send(r); err != nil { + // TODO do something with this error? + return + } + } + + select { + case <-time.After(500 * time.Millisecond): + case <-stream.Context().Done(): + return + case <-l.donec: + return + case <-l.stopCtx.Done(): + return + } + } +} + +func (ka *keepAlive) close() { + close(ka.donec) + for _, ch := range ka.chs { + close(ch) + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/logger.go b/vendor/github.com/coreos/etcd/clientv3/logger.go new file mode 100644 index 00000000000..784c395c765 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/logger.go @@ -0,0 +1,95 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "io/ioutil" + "sync" + + "google.golang.org/grpc/grpclog" +) + +// Logger is the logger used by client library. +// It implements grpclog.LoggerV2 interface. +type Logger grpclog.LoggerV2 + +var ( + logger settableLogger +) + +type settableLogger struct { + l grpclog.LoggerV2 + mu sync.RWMutex +} + +func init() { + // disable client side logs by default + logger.mu.Lock() + logger.l = grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard) + + // logger has to override the grpclog at initialization so that + // any changes to the grpclog go through logger with locking + // instead of through SetLogger + // + // now updates only happen through settableLogger.set + grpclog.SetLoggerV2(&logger) + logger.mu.Unlock() +} + +// SetLogger sets client-side Logger. By default, logs are disabled. +func SetLogger(l Logger) { + logger.set(l) +} + +// GetLogger returns the current logger. +func GetLogger() Logger { + return logger.get() +} + +func (s *settableLogger) set(l Logger) { + s.mu.Lock() + logger.l = l + s.mu.Unlock() +} + +func (s *settableLogger) get() Logger { + s.mu.RLock() + l := logger.l + s.mu.RUnlock() + return l +} + +// implement the grpclog.LoggerV2 interface + +func (s *settableLogger) Info(args ...interface{}) { s.get().Info(args...) } +func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) } +func (s *settableLogger) Infoln(args ...interface{}) { s.get().Infoln(args...) } +func (s *settableLogger) Warning(args ...interface{}) { s.get().Warning(args...) } +func (s *settableLogger) Warningf(format string, args ...interface{}) { + s.get().Warningf(format, args...) +} +func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) } +func (s *settableLogger) Error(args ...interface{}) { s.get().Error(args...) } +func (s *settableLogger) Errorf(format string, args ...interface{}) { + s.get().Errorf(format, args...) +} +func (s *settableLogger) Errorln(args ...interface{}) { s.get().Errorln(args...) } +func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) } +func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) } +func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) } +func (s *settableLogger) Print(args ...interface{}) { s.get().Info(args...) } +func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) } +func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) } +func (s *settableLogger) V(l int) bool { return s.get().V(l) } diff --git a/vendor/github.com/coreos/etcd/clientv3/maintenance.go b/vendor/github.com/coreos/etcd/clientv3/maintenance.go new file mode 100644 index 00000000000..25abc9c9100 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/maintenance.go @@ -0,0 +1,205 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "io" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" +) + +type ( + DefragmentResponse pb.DefragmentResponse + AlarmResponse pb.AlarmResponse + AlarmMember pb.AlarmMember + StatusResponse pb.StatusResponse + HashKVResponse pb.HashKVResponse + MoveLeaderResponse pb.MoveLeaderResponse +) + +type Maintenance interface { + // AlarmList gets all active alarms. + AlarmList(ctx context.Context) (*AlarmResponse, error) + + // AlarmDisarm disarms a given alarm. + AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error) + + // Defragment releases wasted space from internal fragmentation on a given etcd member. + // Defragment is only needed when deleting a large number of keys and want to reclaim + // the resources. + // Defragment is an expensive operation. User should avoid defragmenting multiple members + // at the same time. + // To defragment multiple members in the cluster, user need to call defragment multiple + // times with different endpoints. + Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) + + // Status gets the status of the endpoint. + Status(ctx context.Context, endpoint string) (*StatusResponse, error) + + // HashKV returns a hash of the KV state at the time of the RPC. + // If revision is zero, the hash is computed on all keys. If the revision + // is non-zero, the hash is computed on all keys at or below the given revision. + HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) + + // Snapshot provides a reader for a point-in-time snapshot of etcd. + Snapshot(ctx context.Context) (io.ReadCloser, error) + + // MoveLeader requests current leader to transfer its leadership to the transferee. + // Request must be made to the leader. + MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) +} + +type maintenance struct { + dial func(endpoint string) (pb.MaintenanceClient, func(), error) + remote pb.MaintenanceClient +} + +func NewMaintenance(c *Client) Maintenance { + return &maintenance{ + dial: func(endpoint string) (pb.MaintenanceClient, func(), error) { + conn, err := c.dial(endpoint) + if err != nil { + return nil, nil, err + } + cancel := func() { conn.Close() } + return RetryMaintenanceClient(c, conn), cancel, nil + }, + remote: RetryMaintenanceClient(c, c.conn), + } +} + +func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient) Maintenance { + return &maintenance{ + dial: func(string) (pb.MaintenanceClient, func(), error) { + return remote, func() {}, nil + }, + remote: remote, + } +} + +func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { + req := &pb.AlarmRequest{ + Action: pb.AlarmRequest_GET, + MemberID: 0, // all + Alarm: pb.AlarmType_NONE, // all + } + resp, err := m.remote.Alarm(ctx, req) + if err == nil { + return (*AlarmResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) { + req := &pb.AlarmRequest{ + Action: pb.AlarmRequest_DEACTIVATE, + MemberID: am.MemberID, + Alarm: am.Alarm, + } + + if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE { + ar, err := m.AlarmList(ctx) + if err != nil { + return nil, toErr(ctx, err) + } + ret := AlarmResponse{} + for _, am := range ar.Alarms { + dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am)) + if derr != nil { + return nil, toErr(ctx, derr) + } + ret.Alarms = append(ret.Alarms, dresp.Alarms...) + } + return &ret, nil + } + + resp, err := m.remote.Alarm(ctx, req) + if err == nil { + return (*AlarmResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) { + remote, cancel, err := m.dial(endpoint) + if err != nil { + return nil, toErr(ctx, err) + } + defer cancel() + resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}) + if err != nil { + return nil, toErr(ctx, err) + } + return (*DefragmentResponse)(resp), nil +} + +func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) { + remote, cancel, err := m.dial(endpoint) + if err != nil { + return nil, toErr(ctx, err) + } + defer cancel() + resp, err := remote.Status(ctx, &pb.StatusRequest{}) + if err != nil { + return nil, toErr(ctx, err) + } + return (*StatusResponse)(resp), nil +} + +func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) { + remote, cancel, err := m.dial(endpoint) + if err != nil { + return nil, toErr(ctx, err) + } + defer cancel() + resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}) + if err != nil { + return nil, toErr(ctx, err) + } + return (*HashKVResponse)(resp), nil +} + +func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { + ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}) + if err != nil { + return nil, toErr(ctx, err) + } + + pr, pw := io.Pipe() + go func() { + for { + resp, err := ss.Recv() + if err != nil { + pw.CloseWithError(err) + return + } + if resp == nil && err == nil { + break + } + if _, werr := pw.Write(resp.Blob); werr != nil { + pw.CloseWithError(werr) + return + } + } + pw.Close() + }() + return pr, nil +} + +func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) { + resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}) + return (*MoveLeaderResponse)(resp), toErr(ctx, err) +} diff --git a/vendor/github.com/coreos/etcd/clientv3/op.go b/vendor/github.com/coreos/etcd/clientv3/op.go new file mode 100644 index 00000000000..c6ec5bf5200 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/op.go @@ -0,0 +1,513 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + +type opType int + +const ( + // A default Op has opType 0, which is invalid. + tRange opType = iota + 1 + tPut + tDeleteRange + tTxn +) + +var ( + noPrefixEnd = []byte{0} +) + +// Op represents an Operation that kv can execute. +type Op struct { + t opType + key []byte + end []byte + + // for range + limit int64 + sort *SortOption + serializable bool + keysOnly bool + countOnly bool + minModRev int64 + maxModRev int64 + minCreateRev int64 + maxCreateRev int64 + + // for range, watch + rev int64 + + // for watch, put, delete + prevKV bool + + // for put + ignoreValue bool + ignoreLease bool + + // progressNotify is for progress updates. + progressNotify bool + // createdNotify is for created event + createdNotify bool + // filters for watchers + filterPut bool + filterDelete bool + + // for put + val []byte + leaseID LeaseID + + // txn + cmps []Cmp + thenOps []Op + elseOps []Op +} + +// accessors / mutators + +func (op Op) IsTxn() bool { return op.t == tTxn } +func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps } + +// KeyBytes returns the byte slice holding the Op's key. +func (op Op) KeyBytes() []byte { return op.key } + +// WithKeyBytes sets the byte slice for the Op's key. +func (op *Op) WithKeyBytes(key []byte) { op.key = key } + +// RangeBytes returns the byte slice holding with the Op's range end, if any. +func (op Op) RangeBytes() []byte { return op.end } + +// Rev returns the requested revision, if any. +func (op Op) Rev() int64 { return op.rev } + +// IsPut returns true iff the operation is a Put. +func (op Op) IsPut() bool { return op.t == tPut } + +// IsGet returns true iff the operation is a Get. +func (op Op) IsGet() bool { return op.t == tRange } + +// IsDelete returns true iff the operation is a Delete. +func (op Op) IsDelete() bool { return op.t == tDeleteRange } + +// IsSerializable returns true if the serializable field is true. +func (op Op) IsSerializable() bool { return op.serializable == true } + +// IsKeysOnly returns whether keysOnly is set. +func (op Op) IsKeysOnly() bool { return op.keysOnly == true } + +// IsCountOnly returns whether countOnly is set. +func (op Op) IsCountOnly() bool { return op.countOnly == true } + +// MinModRev returns the operation's minimum modify revision. +func (op Op) MinModRev() int64 { return op.minModRev } + +// MaxModRev returns the operation's maximum modify revision. +func (op Op) MaxModRev() int64 { return op.maxModRev } + +// MinCreateRev returns the operation's minimum create revision. +func (op Op) MinCreateRev() int64 { return op.minCreateRev } + +// MaxCreateRev returns the operation's maximum create revision. +func (op Op) MaxCreateRev() int64 { return op.maxCreateRev } + +// WithRangeBytes sets the byte slice for the Op's range end. +func (op *Op) WithRangeBytes(end []byte) { op.end = end } + +// ValueBytes returns the byte slice holding the Op's value, if any. +func (op Op) ValueBytes() []byte { return op.val } + +// WithValueBytes sets the byte slice for the Op's value. +func (op *Op) WithValueBytes(v []byte) { op.val = v } + +func (op Op) toRangeRequest() *pb.RangeRequest { + if op.t != tRange { + panic("op.t != tRange") + } + r := &pb.RangeRequest{ + Key: op.key, + RangeEnd: op.end, + Limit: op.limit, + Revision: op.rev, + Serializable: op.serializable, + KeysOnly: op.keysOnly, + CountOnly: op.countOnly, + MinModRevision: op.minModRev, + MaxModRevision: op.maxModRev, + MinCreateRevision: op.minCreateRev, + MaxCreateRevision: op.maxCreateRev, + } + if op.sort != nil { + r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order) + r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target) + } + return r +} + +func (op Op) toTxnRequest() *pb.TxnRequest { + thenOps := make([]*pb.RequestOp, len(op.thenOps)) + for i, tOp := range op.thenOps { + thenOps[i] = tOp.toRequestOp() + } + elseOps := make([]*pb.RequestOp, len(op.elseOps)) + for i, eOp := range op.elseOps { + elseOps[i] = eOp.toRequestOp() + } + cmps := make([]*pb.Compare, len(op.cmps)) + for i := range op.cmps { + cmps[i] = (*pb.Compare)(&op.cmps[i]) + } + return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps} +} + +func (op Op) toRequestOp() *pb.RequestOp { + switch op.t { + case tRange: + return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}} + case tPut: + r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} + return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}} + case tDeleteRange: + r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} + return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}} + case tTxn: + return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}} + default: + panic("Unknown Op") + } +} + +func (op Op) isWrite() bool { + if op.t == tTxn { + for _, tOp := range op.thenOps { + if tOp.isWrite() { + return true + } + } + for _, tOp := range op.elseOps { + if tOp.isWrite() { + return true + } + } + return false + } + return op.t != tRange +} + +func OpGet(key string, opts ...OpOption) Op { + ret := Op{t: tRange, key: []byte(key)} + ret.applyOpts(opts) + return ret +} + +func OpDelete(key string, opts ...OpOption) Op { + ret := Op{t: tDeleteRange, key: []byte(key)} + ret.applyOpts(opts) + switch { + case ret.leaseID != 0: + panic("unexpected lease in delete") + case ret.limit != 0: + panic("unexpected limit in delete") + case ret.rev != 0: + panic("unexpected revision in delete") + case ret.sort != nil: + panic("unexpected sort in delete") + case ret.serializable: + panic("unexpected serializable in delete") + case ret.countOnly: + panic("unexpected countOnly in delete") + case ret.minModRev != 0, ret.maxModRev != 0: + panic("unexpected mod revision filter in delete") + case ret.minCreateRev != 0, ret.maxCreateRev != 0: + panic("unexpected create revision filter in delete") + case ret.filterDelete, ret.filterPut: + panic("unexpected filter in delete") + case ret.createdNotify: + panic("unexpected createdNotify in delete") + } + return ret +} + +func OpPut(key, val string, opts ...OpOption) Op { + ret := Op{t: tPut, key: []byte(key), val: []byte(val)} + ret.applyOpts(opts) + switch { + case ret.end != nil: + panic("unexpected range in put") + case ret.limit != 0: + panic("unexpected limit in put") + case ret.rev != 0: + panic("unexpected revision in put") + case ret.sort != nil: + panic("unexpected sort in put") + case ret.serializable: + panic("unexpected serializable in put") + case ret.countOnly: + panic("unexpected countOnly in put") + case ret.minModRev != 0, ret.maxModRev != 0: + panic("unexpected mod revision filter in put") + case ret.minCreateRev != 0, ret.maxCreateRev != 0: + panic("unexpected create revision filter in put") + case ret.filterDelete, ret.filterPut: + panic("unexpected filter in put") + case ret.createdNotify: + panic("unexpected createdNotify in put") + } + return ret +} + +func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op { + return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps} +} + +func opWatch(key string, opts ...OpOption) Op { + ret := Op{t: tRange, key: []byte(key)} + ret.applyOpts(opts) + switch { + case ret.leaseID != 0: + panic("unexpected lease in watch") + case ret.limit != 0: + panic("unexpected limit in watch") + case ret.sort != nil: + panic("unexpected sort in watch") + case ret.serializable: + panic("unexpected serializable in watch") + case ret.countOnly: + panic("unexpected countOnly in watch") + case ret.minModRev != 0, ret.maxModRev != 0: + panic("unexpected mod revision filter in watch") + case ret.minCreateRev != 0, ret.maxCreateRev != 0: + panic("unexpected create revision filter in watch") + } + return ret +} + +func (op *Op) applyOpts(opts []OpOption) { + for _, opt := range opts { + opt(op) + } +} + +// OpOption configures Operations like Get, Put, Delete. +type OpOption func(*Op) + +// WithLease attaches a lease ID to a key in 'Put' request. +func WithLease(leaseID LeaseID) OpOption { + return func(op *Op) { op.leaseID = leaseID } +} + +// WithLimit limits the number of results to return from 'Get' request. +// If WithLimit is given a 0 limit, it is treated as no limit. +func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } } + +// WithRev specifies the store revision for 'Get' request. +// Or the start revision of 'Watch' request. +func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } } + +// WithSort specifies the ordering in 'Get' request. It requires +// 'WithRange' and/or 'WithPrefix' to be specified too. +// 'target' specifies the target to sort by: key, version, revisions, value. +// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'. +func WithSort(target SortTarget, order SortOrder) OpOption { + return func(op *Op) { + if target == SortByKey && order == SortAscend { + // If order != SortNone, server fetches the entire key-space, + // and then applies the sort and limit, if provided. + // Since by default the server returns results sorted by keys + // in lexicographically ascending order, the client should ignore + // SortOrder if the target is SortByKey. + order = SortNone + } + op.sort = &SortOption{target, order} + } +} + +// GetPrefixRangeEnd gets the range end of the prefix. +// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'. +func GetPrefixRangeEnd(prefix string) string { + return string(getPrefix([]byte(prefix))) +} + +func getPrefix(key []byte) []byte { + end := make([]byte, len(key)) + copy(end, key) + for i := len(end) - 1; i >= 0; i-- { + if end[i] < 0xff { + end[i] = end[i] + 1 + end = end[:i+1] + return end + } + } + // next prefix does not exist (e.g., 0xffff); + // default to WithFromKey policy + return noPrefixEnd +} + +// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate +// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())' +// can return 'foo1', 'foo2', and so on. +func WithPrefix() OpOption { + return func(op *Op) { + if len(op.key) == 0 { + op.key, op.end = []byte{0}, []byte{0} + return + } + op.end = getPrefix(op.key) + } +} + +// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests. +// For example, 'Get' requests with 'WithRange(end)' returns +// the keys in the range [key, end). +// endKey must be lexicographically greater than start key. +func WithRange(endKey string) OpOption { + return func(op *Op) { op.end = []byte(endKey) } +} + +// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests +// to be equal or greater than the key in the argument. +func WithFromKey() OpOption { return WithRange("\x00") } + +// WithSerializable makes 'Get' request serializable. By default, +// it's linearizable. Serializable requests are better for lower latency +// requirement. +func WithSerializable() OpOption { + return func(op *Op) { op.serializable = true } +} + +// WithKeysOnly makes the 'Get' request return only the keys and the corresponding +// values will be omitted. +func WithKeysOnly() OpOption { + return func(op *Op) { op.keysOnly = true } +} + +// WithCountOnly makes the 'Get' request return only the count of keys. +func WithCountOnly() OpOption { + return func(op *Op) { op.countOnly = true } +} + +// WithMinModRev filters out keys for Get with modification revisions less than the given revision. +func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } } + +// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision. +func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } } + +// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision. +func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } } + +// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision. +func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } } + +// WithFirstCreate gets the key with the oldest creation revision in the request range. +func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) } + +// WithLastCreate gets the key with the latest creation revision in the request range. +func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) } + +// WithFirstKey gets the lexically first key in the request range. +func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) } + +// WithLastKey gets the lexically last key in the request range. +func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) } + +// WithFirstRev gets the key with the oldest modification revision in the request range. +func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) } + +// WithLastRev gets the key with the latest modification revision in the request range. +func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) } + +// withTop gets the first key over the get's prefix given a sort order +func withTop(target SortTarget, order SortOrder) []OpOption { + return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)} +} + +// WithProgressNotify makes watch server send periodic progress updates +// every 10 minutes when there is no incoming events. +// Progress updates have zero events in WatchResponse. +func WithProgressNotify() OpOption { + return func(op *Op) { + op.progressNotify = true + } +} + +// WithCreatedNotify makes watch server sends the created event. +func WithCreatedNotify() OpOption { + return func(op *Op) { + op.createdNotify = true + } +} + +// WithFilterPut discards PUT events from the watcher. +func WithFilterPut() OpOption { + return func(op *Op) { op.filterPut = true } +} + +// WithFilterDelete discards DELETE events from the watcher. +func WithFilterDelete() OpOption { + return func(op *Op) { op.filterDelete = true } +} + +// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted, +// nothing will be returned. +func WithPrevKV() OpOption { + return func(op *Op) { + op.prevKV = true + } +} + +// WithIgnoreValue updates the key using its current value. +// This option can not be combined with non-empty values. +// Returns an error if the key does not exist. +func WithIgnoreValue() OpOption { + return func(op *Op) { + op.ignoreValue = true + } +} + +// WithIgnoreLease updates the key using its current lease. +// This option can not be combined with WithLease. +// Returns an error if the key does not exist. +func WithIgnoreLease() OpOption { + return func(op *Op) { + op.ignoreLease = true + } +} + +// LeaseOp represents an Operation that lease can execute. +type LeaseOp struct { + id LeaseID + + // for TimeToLive + attachedKeys bool +} + +// LeaseOption configures lease operations. +type LeaseOption func(*LeaseOp) + +func (op *LeaseOp) applyOpts(opts []LeaseOption) { + for _, opt := range opts { + opt(op) + } +} + +// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID. +func WithAttachedKeys() LeaseOption { + return func(op *LeaseOp) { op.attachedKeys = true } +} + +func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest { + ret := &LeaseOp{id: id} + ret.applyOpts(opts) + return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys} +} diff --git a/vendor/github.com/coreos/etcd/clientv3/ready_wait.go b/vendor/github.com/coreos/etcd/clientv3/ready_wait.go new file mode 100644 index 00000000000..c6ef585b5b4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/ready_wait.go @@ -0,0 +1,30 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import "context" + +// TODO: remove this when "FailFast=false" is fixed. +// See https://github.com/grpc/grpc-go/issues/1532. +func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error { + select { + case <-ready: + return nil + case <-rpcCtx.Done(): + return rpcCtx.Err() + case <-clientCtx.Done(): + return clientCtx.Err() + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/retry.go b/vendor/github.com/coreos/etcd/clientv3/retry.go new file mode 100644 index 00000000000..e6d17d0320e --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/retry.go @@ -0,0 +1,495 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type rpcFunc func(ctx context.Context) error +type retryRPCFunc func(context.Context, rpcFunc) error +type retryStopErrFunc func(error) bool + +func isRepeatableStopError(err error) bool { + eErr := rpctypes.Error(err) + // always stop retry on etcd errors + if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable { + return true + } + // only retry if unavailable + ev, _ := status.FromError(err) + return ev.Code() != codes.Unavailable +} + +func isNonRepeatableStopError(err error) bool { + ev, _ := status.FromError(err) + if ev.Code() != codes.Unavailable { + return true + } + return rpctypes.ErrorDesc(err) != "there is no address available" +} + +func (c *Client) newRetryWrapper(isStop retryStopErrFunc) retryRPCFunc { + return func(rpcCtx context.Context, f rpcFunc) error { + for { + if err := readyWait(rpcCtx, c.ctx, c.balancer.ConnectNotify()); err != nil { + return err + } + pinned := c.balancer.pinned() + err := f(rpcCtx) + if err == nil { + return nil + } + if logger.V(4) { + logger.Infof("clientv3/retry: error %q on pinned endpoint %q", err.Error(), pinned) + } + + if s, ok := status.FromError(err); ok && (s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded || s.Code() == codes.Internal) { + // mark this before endpoint switch is triggered + c.balancer.hostPortError(pinned, err) + c.balancer.next() + if logger.V(4) { + logger.Infof("clientv3/retry: switching from %q due to error %q", pinned, err.Error()) + } + } + + if isStop(err) { + return err + } + } + } +} + +func (c *Client) newAuthRetryWrapper() retryRPCFunc { + return func(rpcCtx context.Context, f rpcFunc) error { + for { + pinned := c.balancer.pinned() + err := f(rpcCtx) + if err == nil { + return nil + } + if logger.V(4) { + logger.Infof("clientv3/auth-retry: error %q on pinned endpoint %q", err.Error(), pinned) + } + // always stop retry on etcd errors other than invalid auth token + if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken { + gterr := c.getToken(rpcCtx) + if gterr != nil { + if logger.V(4) { + logger.Infof("clientv3/auth-retry: cannot retry due to error %q(%q) on pinned endpoint %q", err.Error(), gterr.Error(), pinned) + } + return err // return the original error for simplicity + } + continue + } + return err + } + } +} + +// RetryKVClient implements a KVClient. +func RetryKVClient(c *Client) pb.KVClient { + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + conn := pb.NewKVClient(c.conn) + retryBasic := &retryKVClient{&nonRepeatableKVClient{conn, nonRepeatableRetry}, repeatableRetry} + retryAuthWrapper := c.newAuthRetryWrapper() + return &retryKVClient{ + &nonRepeatableKVClient{retryBasic, retryAuthWrapper}, + retryAuthWrapper} +} + +type retryKVClient struct { + *nonRepeatableKVClient + repeatableRetry retryRPCFunc +} + +func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) { + err = rkv.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Range(rctx, in, opts...) + return err + }) + return resp, err +} + +type nonRepeatableKVClient struct { + kc pb.KVClient + nonRepeatableRetry retryRPCFunc +} + +func (rkv *nonRepeatableKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Put(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rkv *nonRepeatableKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) { + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.DeleteRange(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rkv *nonRepeatableKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) { + // TODO: repeatableRetry if read-only txn + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Txn(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rkv *nonRepeatableKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) { + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Compact(rctx, in, opts...) + return err + }) + return resp, err +} + +type retryLeaseClient struct { + lc pb.LeaseClient + repeatableRetry retryRPCFunc +} + +// RetryLeaseClient implements a LeaseClient. +func RetryLeaseClient(c *Client) pb.LeaseClient { + retry := &retryLeaseClient{ + pb.NewLeaseClient(c.conn), + c.newRetryWrapper(isRepeatableStopError), + } + return &retryLeaseClient{retry, c.newAuthRetryWrapper()} +} + +func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) { + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rlc.lc.LeaseTimeToLive(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) { + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rlc.lc.LeaseLeases(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) { + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rlc.lc.LeaseGrant(rctx, in, opts...) + return err + }) + return resp, err + +} + +func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) { + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rlc.lc.LeaseRevoke(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) { + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + stream, err = rlc.lc.LeaseKeepAlive(rctx, opts...) + return err + }) + return stream, err +} + +type retryClusterClient struct { + *nonRepeatableClusterClient + repeatableRetry retryRPCFunc +} + +// RetryClusterClient implements a ClusterClient. +func RetryClusterClient(c *Client) pb.ClusterClient { + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + cc := pb.NewClusterClient(c.conn) + return &retryClusterClient{&nonRepeatableClusterClient{cc, nonRepeatableRetry}, repeatableRetry} +} + +func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) { + err = rcc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberList(rctx, in, opts...) + return err + }) + return resp, err +} + +type nonRepeatableClusterClient struct { + cc pb.ClusterClient + nonRepeatableRetry retryRPCFunc +} + +func (rcc *nonRepeatableClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { + err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberAdd(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rcc *nonRepeatableClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) { + err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberRemove(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rcc *nonRepeatableClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) { + err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberUpdate(rctx, in, opts...) + return err + }) + return resp, err +} + +// RetryMaintenanceClient implements a Maintenance. +func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient { + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + mc := pb.NewMaintenanceClient(conn) + return &retryMaintenanceClient{&nonRepeatableMaintenanceClient{mc, nonRepeatableRetry}, repeatableRetry} +} + +type retryMaintenanceClient struct { + *nonRepeatableMaintenanceClient + repeatableRetry retryRPCFunc +} + +func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Alarm(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Status(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Hash(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.HashKV(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + stream, err = rmc.mc.Snapshot(rctx, in, opts...) + return err + }) + return stream, err +} + +func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.MoveLeader(rctx, in, opts...) + return err + }) + return resp, err +} + +type nonRepeatableMaintenanceClient struct { + mc pb.MaintenanceClient + nonRepeatableRetry retryRPCFunc +} + +func (rmc *nonRepeatableMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) { + err = rmc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Defragment(rctx, in, opts...) + return err + }) + return resp, err +} + +type retryAuthClient struct { + *nonRepeatableAuthClient + repeatableRetry retryRPCFunc +} + +// RetryAuthClient implements a AuthClient. +func RetryAuthClient(c *Client) pb.AuthClient { + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + ac := pb.NewAuthClient(c.conn) + return &retryAuthClient{&nonRepeatableAuthClient{ac, nonRepeatableRetry}, repeatableRetry} +} + +func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserList(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserGet(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleGet(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleList(rctx, in, opts...) + return err + }) + return resp, err +} + +type nonRepeatableAuthClient struct { + ac pb.AuthClient + nonRepeatableRetry retryRPCFunc +} + +func (rac *nonRepeatableAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.AuthEnable(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.AuthDisable(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserAdd(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserDelete(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserChangePassword(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserGrantRole(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserRevokeRole(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleAdd(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleDelete(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleGrantPermission(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleRevokePermission(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.Authenticate(rctx, in, opts...) + return err + }) + return resp, err +} diff --git a/vendor/github.com/coreos/etcd/clientv3/sort.go b/vendor/github.com/coreos/etcd/clientv3/sort.go new file mode 100644 index 00000000000..2bb9d9a13b7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/sort.go @@ -0,0 +1,37 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +type SortTarget int +type SortOrder int + +const ( + SortNone SortOrder = iota + SortAscend + SortDescend +) + +const ( + SortByKey SortTarget = iota + SortByVersion + SortByCreateRevision + SortByModRevision + SortByValue +) + +type SortOption struct { + Target SortTarget + Order SortOrder +} diff --git a/vendor/github.com/coreos/etcd/clientv3/txn.go b/vendor/github.com/coreos/etcd/clientv3/txn.go new file mode 100644 index 00000000000..8169b621509 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/txn.go @@ -0,0 +1,147 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "sync" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" +) + +// Txn is the interface that wraps mini-transactions. +// +// Txn(context.TODO()).If( +// Compare(Value(k1), ">", v1), +// Compare(Version(k1), "=", 2) +// ).Then( +// OpPut(k2,v2), OpPut(k3,v3) +// ).Else( +// OpPut(k4,v4), OpPut(k5,v5) +// ).Commit() +// +type Txn interface { + // If takes a list of comparison. If all comparisons passed in succeed, + // the operations passed into Then() will be executed. Or the operations + // passed into Else() will be executed. + If(cs ...Cmp) Txn + + // Then takes a list of operations. The Ops list will be executed, if the + // comparisons passed in If() succeed. + Then(ops ...Op) Txn + + // Else takes a list of operations. The Ops list will be executed, if the + // comparisons passed in If() fail. + Else(ops ...Op) Txn + + // Commit tries to commit the transaction. + Commit() (*TxnResponse, error) +} + +type txn struct { + kv *kv + ctx context.Context + + mu sync.Mutex + cif bool + cthen bool + celse bool + + isWrite bool + + cmps []*pb.Compare + + sus []*pb.RequestOp + fas []*pb.RequestOp +} + +func (txn *txn) If(cs ...Cmp) Txn { + txn.mu.Lock() + defer txn.mu.Unlock() + + if txn.cif { + panic("cannot call If twice!") + } + + if txn.cthen { + panic("cannot call If after Then!") + } + + if txn.celse { + panic("cannot call If after Else!") + } + + txn.cif = true + + for i := range cs { + txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i])) + } + + return txn +} + +func (txn *txn) Then(ops ...Op) Txn { + txn.mu.Lock() + defer txn.mu.Unlock() + + if txn.cthen { + panic("cannot call Then twice!") + } + if txn.celse { + panic("cannot call Then after Else!") + } + + txn.cthen = true + + for _, op := range ops { + txn.isWrite = txn.isWrite || op.isWrite() + txn.sus = append(txn.sus, op.toRequestOp()) + } + + return txn +} + +func (txn *txn) Else(ops ...Op) Txn { + txn.mu.Lock() + defer txn.mu.Unlock() + + if txn.celse { + panic("cannot call Else twice!") + } + + txn.celse = true + + for _, op := range ops { + txn.isWrite = txn.isWrite || op.isWrite() + txn.fas = append(txn.fas, op.toRequestOp()) + } + + return txn +} + +func (txn *txn) Commit() (*TxnResponse, error) { + txn.mu.Lock() + defer txn.mu.Unlock() + + r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas} + + var resp *pb.TxnResponse + var err error + resp, err = txn.kv.remote.Txn(txn.ctx, r) + if err != nil { + return nil, toErr(txn.ctx, err) + } + return (*TxnResponse)(resp), nil +} diff --git a/vendor/github.com/coreos/etcd/clientv3/watch.go b/vendor/github.com/coreos/etcd/clientv3/watch.go new file mode 100644 index 00000000000..91e6db26a03 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/watch.go @@ -0,0 +1,806 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "fmt" + "sync" + "time" + + v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + mvccpb "github.com/coreos/etcd/mvcc/mvccpb" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +const ( + EventTypeDelete = mvccpb.DELETE + EventTypePut = mvccpb.PUT + + closeSendErrTimeout = 250 * time.Millisecond +) + +type Event mvccpb.Event + +type WatchChan <-chan WatchResponse + +type Watcher interface { + // Watch watches on a key or prefix. The watched events will be returned + // through the returned channel. If revisions waiting to be sent over the + // watch are compacted, then the watch will be canceled by the server, the + // client will post a compacted error watch response, and the channel will close. + Watch(ctx context.Context, key string, opts ...OpOption) WatchChan + + // Close closes the watcher and cancels all watch requests. + Close() error +} + +type WatchResponse struct { + Header pb.ResponseHeader + Events []*Event + + // CompactRevision is the minimum revision the watcher may receive. + CompactRevision int64 + + // Canceled is used to indicate watch failure. + // If the watch failed and the stream was about to close, before the channel is closed, + // the channel sends a final response that has Canceled set to true with a non-nil Err(). + Canceled bool + + // Created is used to indicate the creation of the watcher. + Created bool + + closeErr error + + // cancelReason is a reason of canceling watch + cancelReason string +} + +// IsCreate returns true if the event tells that the key is newly created. +func (e *Event) IsCreate() bool { + return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision +} + +// IsModify returns true if the event tells that a new value is put on existing key. +func (e *Event) IsModify() bool { + return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision +} + +// Err is the error value if this WatchResponse holds an error. +func (wr *WatchResponse) Err() error { + switch { + case wr.closeErr != nil: + return v3rpc.Error(wr.closeErr) + case wr.CompactRevision != 0: + return v3rpc.ErrCompacted + case wr.Canceled: + if len(wr.cancelReason) != 0 { + return v3rpc.Error(grpc.Errorf(codes.FailedPrecondition, "%s", wr.cancelReason)) + } + return v3rpc.ErrFutureRev + } + return nil +} + +// IsProgressNotify returns true if the WatchResponse is progress notification. +func (wr *WatchResponse) IsProgressNotify() bool { + return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0 +} + +// watcher implements the Watcher interface +type watcher struct { + remote pb.WatchClient + + // mu protects the grpc streams map + mu sync.RWMutex + + // streams holds all the active grpc streams keyed by ctx value. + streams map[string]*watchGrpcStream +} + +// watchGrpcStream tracks all watch resources attached to a single grpc stream. +type watchGrpcStream struct { + owner *watcher + remote pb.WatchClient + + // ctx controls internal remote.Watch requests + ctx context.Context + // ctxKey is the key used when looking up this stream's context + ctxKey string + cancel context.CancelFunc + + // substreams holds all active watchers on this grpc stream + substreams map[int64]*watcherStream + // resuming holds all resuming watchers on this grpc stream + resuming []*watcherStream + + // reqc sends a watch request from Watch() to the main goroutine + reqc chan *watchRequest + // respc receives data from the watch client + respc chan *pb.WatchResponse + // donec closes to broadcast shutdown + donec chan struct{} + // errc transmits errors from grpc Recv to the watch stream reconnect logic + errc chan error + // closingc gets the watcherStream of closing watchers + closingc chan *watcherStream + // wg is Done when all substream goroutines have exited + wg sync.WaitGroup + + // resumec closes to signal that all substreams should begin resuming + resumec chan struct{} + // closeErr is the error that closed the watch stream + closeErr error +} + +// watchRequest is issued by the subscriber to start a new watcher +type watchRequest struct { + ctx context.Context + key string + end string + rev int64 + // send created notification event if this field is true + createdNotify bool + // progressNotify is for progress updates + progressNotify bool + // filters is the list of events to filter out + filters []pb.WatchCreateRequest_FilterType + // get the previous key-value pair before the event happens + prevKV bool + // retc receives a chan WatchResponse once the watcher is established + retc chan chan WatchResponse +} + +// watcherStream represents a registered watcher +type watcherStream struct { + // initReq is the request that initiated this request + initReq watchRequest + + // outc publishes watch responses to subscriber + outc chan WatchResponse + // recvc buffers watch responses before publishing + recvc chan *WatchResponse + // donec closes when the watcherStream goroutine stops. + donec chan struct{} + // closing is set to true when stream should be scheduled to shutdown. + closing bool + // id is the registered watch id on the grpc stream + id int64 + + // buf holds all events received from etcd but not yet consumed by the client + buf []*WatchResponse +} + +func NewWatcher(c *Client) Watcher { + return NewWatchFromWatchClient(pb.NewWatchClient(c.conn)) +} + +func NewWatchFromWatchClient(wc pb.WatchClient) Watcher { + return &watcher{ + remote: wc, + streams: make(map[string]*watchGrpcStream), + } +} + +// never closes +var valCtxCh = make(chan struct{}) +var zeroTime = time.Unix(0, 0) + +// ctx with only the values; never Done +type valCtx struct{ context.Context } + +func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false } +func (vc *valCtx) Done() <-chan struct{} { return valCtxCh } +func (vc *valCtx) Err() error { return nil } + +func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream { + ctx, cancel := context.WithCancel(&valCtx{inctx}) + wgs := &watchGrpcStream{ + owner: w, + remote: w.remote, + ctx: ctx, + ctxKey: streamKeyFromCtx(inctx), + cancel: cancel, + substreams: make(map[int64]*watcherStream), + respc: make(chan *pb.WatchResponse), + reqc: make(chan *watchRequest), + donec: make(chan struct{}), + errc: make(chan error, 1), + closingc: make(chan *watcherStream), + resumec: make(chan struct{}), + } + go wgs.run() + return wgs +} + +// Watch posts a watch request to run() and waits for a new watcher channel +func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan { + ow := opWatch(key, opts...) + + var filters []pb.WatchCreateRequest_FilterType + if ow.filterPut { + filters = append(filters, pb.WatchCreateRequest_NOPUT) + } + if ow.filterDelete { + filters = append(filters, pb.WatchCreateRequest_NODELETE) + } + + wr := &watchRequest{ + ctx: ctx, + createdNotify: ow.createdNotify, + key: string(ow.key), + end: string(ow.end), + rev: ow.rev, + progressNotify: ow.progressNotify, + filters: filters, + prevKV: ow.prevKV, + retc: make(chan chan WatchResponse, 1), + } + + ok := false + ctxKey := streamKeyFromCtx(ctx) + + // find or allocate appropriate grpc watch stream + w.mu.Lock() + if w.streams == nil { + // closed + w.mu.Unlock() + ch := make(chan WatchResponse) + close(ch) + return ch + } + wgs := w.streams[ctxKey] + if wgs == nil { + wgs = w.newWatcherGrpcStream(ctx) + w.streams[ctxKey] = wgs + } + donec := wgs.donec + reqc := wgs.reqc + w.mu.Unlock() + + // couldn't create channel; return closed channel + closeCh := make(chan WatchResponse, 1) + + // submit request + select { + case reqc <- wr: + ok = true + case <-wr.ctx.Done(): + case <-donec: + if wgs.closeErr != nil { + closeCh <- WatchResponse{closeErr: wgs.closeErr} + break + } + // retry; may have dropped stream from no ctxs + return w.Watch(ctx, key, opts...) + } + + // receive channel + if ok { + select { + case ret := <-wr.retc: + return ret + case <-ctx.Done(): + case <-donec: + if wgs.closeErr != nil { + closeCh <- WatchResponse{closeErr: wgs.closeErr} + break + } + // retry; may have dropped stream from no ctxs + return w.Watch(ctx, key, opts...) + } + } + + close(closeCh) + return closeCh +} + +func (w *watcher) Close() (err error) { + w.mu.Lock() + streams := w.streams + w.streams = nil + w.mu.Unlock() + for _, wgs := range streams { + if werr := wgs.close(); werr != nil { + err = werr + } + } + return err +} + +func (w *watchGrpcStream) close() (err error) { + w.cancel() + <-w.donec + select { + case err = <-w.errc: + default: + } + return toErr(w.ctx, err) +} + +func (w *watcher) closeStream(wgs *watchGrpcStream) { + w.mu.Lock() + close(wgs.donec) + wgs.cancel() + if w.streams != nil { + delete(w.streams, wgs.ctxKey) + } + w.mu.Unlock() +} + +func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) { + if resp.WatchId == -1 { + // failed; no channel + close(ws.recvc) + return + } + ws.id = resp.WatchId + w.substreams[ws.id] = ws +} + +func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) { + select { + case ws.outc <- *resp: + case <-ws.initReq.ctx.Done(): + case <-time.After(closeSendErrTimeout): + } + close(ws.outc) +} + +func (w *watchGrpcStream) closeSubstream(ws *watcherStream) { + // send channel response in case stream was never established + select { + case ws.initReq.retc <- ws.outc: + default: + } + // close subscriber's channel + if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil { + go w.sendCloseSubstream(ws, &WatchResponse{closeErr: w.closeErr}) + } else if ws.outc != nil { + close(ws.outc) + } + if ws.id != -1 { + delete(w.substreams, ws.id) + return + } + for i := range w.resuming { + if w.resuming[i] == ws { + w.resuming[i] = nil + return + } + } +} + +// run is the root of the goroutines for managing a watcher client +func (w *watchGrpcStream) run() { + var wc pb.Watch_WatchClient + var closeErr error + + // substreams marked to close but goroutine still running; needed for + // avoiding double-closing recvc on grpc stream teardown + closing := make(map[*watcherStream]struct{}) + + defer func() { + w.closeErr = closeErr + // shutdown substreams and resuming substreams + for _, ws := range w.substreams { + if _, ok := closing[ws]; !ok { + close(ws.recvc) + closing[ws] = struct{}{} + } + } + for _, ws := range w.resuming { + if _, ok := closing[ws]; ws != nil && !ok { + close(ws.recvc) + closing[ws] = struct{}{} + } + } + w.joinSubstreams() + for range closing { + w.closeSubstream(<-w.closingc) + } + w.wg.Wait() + w.owner.closeStream(w) + }() + + // start a stream with the etcd grpc server + if wc, closeErr = w.newWatchClient(); closeErr != nil { + return + } + + cancelSet := make(map[int64]struct{}) + + for { + select { + // Watch() requested + case wreq := <-w.reqc: + outc := make(chan WatchResponse, 1) + ws := &watcherStream{ + initReq: *wreq, + id: -1, + outc: outc, + // unbuffered so resumes won't cause repeat events + recvc: make(chan *WatchResponse), + } + + ws.donec = make(chan struct{}) + w.wg.Add(1) + go w.serveSubstream(ws, w.resumec) + + // queue up for watcher creation/resume + w.resuming = append(w.resuming, ws) + if len(w.resuming) == 1 { + // head of resume queue, can register a new watcher + wc.Send(ws.initReq.toPB()) + } + // New events from the watch client + case pbresp := <-w.respc: + switch { + case pbresp.Created: + // response to head of queue creation + if ws := w.resuming[0]; ws != nil { + w.addSubstream(pbresp, ws) + w.dispatchEvent(pbresp) + w.resuming[0] = nil + } + if ws := w.nextResume(); ws != nil { + wc.Send(ws.initReq.toPB()) + } + case pbresp.Canceled && pbresp.CompactRevision == 0: + delete(cancelSet, pbresp.WatchId) + if ws, ok := w.substreams[pbresp.WatchId]; ok { + // signal to stream goroutine to update closingc + close(ws.recvc) + closing[ws] = struct{}{} + } + default: + // dispatch to appropriate watch stream + if ok := w.dispatchEvent(pbresp); ok { + break + } + // watch response on unexpected watch id; cancel id + if _, ok := cancelSet[pbresp.WatchId]; ok { + break + } + cancelSet[pbresp.WatchId] = struct{}{} + cr := &pb.WatchRequest_CancelRequest{ + CancelRequest: &pb.WatchCancelRequest{ + WatchId: pbresp.WatchId, + }, + } + req := &pb.WatchRequest{RequestUnion: cr} + wc.Send(req) + } + // watch client failed on Recv; spawn another if possible + case err := <-w.errc: + if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader { + closeErr = err + return + } + if wc, closeErr = w.newWatchClient(); closeErr != nil { + return + } + if ws := w.nextResume(); ws != nil { + wc.Send(ws.initReq.toPB()) + } + cancelSet = make(map[int64]struct{}) + case <-w.ctx.Done(): + return + case ws := <-w.closingc: + w.closeSubstream(ws) + delete(closing, ws) + if len(w.substreams)+len(w.resuming) == 0 { + // no more watchers on this stream, shutdown + return + } + } + } +} + +// nextResume chooses the next resuming to register with the grpc stream. Abandoned +// streams are marked as nil in the queue since the head must wait for its inflight registration. +func (w *watchGrpcStream) nextResume() *watcherStream { + for len(w.resuming) != 0 { + if w.resuming[0] != nil { + return w.resuming[0] + } + w.resuming = w.resuming[1:len(w.resuming)] + } + return nil +} + +// dispatchEvent sends a WatchResponse to the appropriate watcher stream +func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { + events := make([]*Event, len(pbresp.Events)) + for i, ev := range pbresp.Events { + events[i] = (*Event)(ev) + } + wr := &WatchResponse{ + Header: *pbresp.Header, + Events: events, + CompactRevision: pbresp.CompactRevision, + Created: pbresp.Created, + Canceled: pbresp.Canceled, + cancelReason: pbresp.CancelReason, + } + ws, ok := w.substreams[pbresp.WatchId] + if !ok { + return false + } + select { + case ws.recvc <- wr: + case <-ws.donec: + return false + } + return true +} + +// serveWatchClient forwards messages from the grpc stream to run() +func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) { + for { + resp, err := wc.Recv() + if err != nil { + select { + case w.errc <- err: + case <-w.donec: + } + return + } + select { + case w.respc <- resp: + case <-w.donec: + return + } + } +} + +// serveSubstream forwards watch responses from run() to the subscriber +func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) { + if ws.closing { + panic("created substream goroutine but substream is closing") + } + + // nextRev is the minimum expected next revision + nextRev := ws.initReq.rev + resuming := false + defer func() { + if !resuming { + ws.closing = true + } + close(ws.donec) + if !resuming { + w.closingc <- ws + } + w.wg.Done() + }() + + emptyWr := &WatchResponse{} + for { + curWr := emptyWr + outc := ws.outc + + if len(ws.buf) > 0 { + curWr = ws.buf[0] + } else { + outc = nil + } + select { + case outc <- *curWr: + if ws.buf[0].Err() != nil { + return + } + ws.buf[0] = nil + ws.buf = ws.buf[1:] + case wr, ok := <-ws.recvc: + if !ok { + // shutdown from closeSubstream + return + } + + if wr.Created { + if ws.initReq.retc != nil { + ws.initReq.retc <- ws.outc + // to prevent next write from taking the slot in buffered channel + // and posting duplicate create events + ws.initReq.retc = nil + + // send first creation event only if requested + if ws.initReq.createdNotify { + ws.outc <- *wr + } + // once the watch channel is returned, a current revision + // watch must resume at the store revision. This is necessary + // for the following case to work as expected: + // wch := m1.Watch("a") + // m2.Put("a", "b") + // <-wch + // If the revision is only bound on the first observed event, + // if wch is disconnected before the Put is issued, then reconnects + // after it is committed, it'll miss the Put. + if ws.initReq.rev == 0 { + nextRev = wr.Header.Revision + } + } + } else { + // current progress of watch; <= store revision + nextRev = wr.Header.Revision + } + + if len(wr.Events) > 0 { + nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1 + } + ws.initReq.rev = nextRev + + // created event is already sent above, + // watcher should not post duplicate events + if wr.Created { + continue + } + + // TODO pause channel if buffer gets too large + ws.buf = append(ws.buf, wr) + case <-w.ctx.Done(): + return + case <-ws.initReq.ctx.Done(): + return + case <-resumec: + resuming = true + return + } + } + // lazily send cancel message if events on missing id +} + +func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) { + // mark all substreams as resuming + close(w.resumec) + w.resumec = make(chan struct{}) + w.joinSubstreams() + for _, ws := range w.substreams { + ws.id = -1 + w.resuming = append(w.resuming, ws) + } + // strip out nils, if any + var resuming []*watcherStream + for _, ws := range w.resuming { + if ws != nil { + resuming = append(resuming, ws) + } + } + w.resuming = resuming + w.substreams = make(map[int64]*watcherStream) + + // connect to grpc stream while accepting watcher cancelation + stopc := make(chan struct{}) + donec := w.waitCancelSubstreams(stopc) + wc, err := w.openWatchClient() + close(stopc) + <-donec + + // serve all non-closing streams, even if there's a client error + // so that the teardown path can shutdown the streams as expected. + for _, ws := range w.resuming { + if ws.closing { + continue + } + ws.donec = make(chan struct{}) + w.wg.Add(1) + go w.serveSubstream(ws, w.resumec) + } + + if err != nil { + return nil, v3rpc.Error(err) + } + + // receive data from new grpc stream + go w.serveWatchClient(wc) + return wc, nil +} + +func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} { + var wg sync.WaitGroup + wg.Add(len(w.resuming)) + donec := make(chan struct{}) + for i := range w.resuming { + go func(ws *watcherStream) { + defer wg.Done() + if ws.closing { + if ws.initReq.ctx.Err() != nil && ws.outc != nil { + close(ws.outc) + ws.outc = nil + } + return + } + select { + case <-ws.initReq.ctx.Done(): + // closed ws will be removed from resuming + ws.closing = true + close(ws.outc) + ws.outc = nil + w.wg.Add(1) + go func() { + defer w.wg.Done() + w.closingc <- ws + }() + case <-stopc: + } + }(w.resuming[i]) + } + go func() { + defer close(donec) + wg.Wait() + }() + return donec +} + +// joinSubstreams waits for all substream goroutines to complete. +func (w *watchGrpcStream) joinSubstreams() { + for _, ws := range w.substreams { + <-ws.donec + } + for _, ws := range w.resuming { + if ws != nil { + <-ws.donec + } + } +} + +// openWatchClient retries opening a watch client until success or halt. +// manually retry in case "ws==nil && err==nil" +// TODO: remove FailFast=false +func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { + for { + select { + case <-w.ctx.Done(): + if err == nil { + return nil, w.ctx.Err() + } + return nil, err + default: + } + if ws, err = w.remote.Watch(w.ctx, grpc.FailFast(false)); ws != nil && err == nil { + break + } + if isHaltErr(w.ctx, err) { + return nil, v3rpc.Error(err) + } + } + return ws, nil +} + +// toPB converts an internal watch request structure to its protobuf WatchRequest structure. +func (wr *watchRequest) toPB() *pb.WatchRequest { + req := &pb.WatchCreateRequest{ + StartRevision: wr.rev, + Key: []byte(wr.key), + RangeEnd: []byte(wr.end), + ProgressNotify: wr.progressNotify, + Filters: wr.filters, + PrevKv: wr.prevKV, + } + cr := &pb.WatchRequest_CreateRequest{CreateRequest: req} + return &pb.WatchRequest{RequestUnion: cr} +} + +func streamKeyFromCtx(ctx context.Context) string { + if md, ok := metadata.FromOutgoingContext(ctx); ok { + return fmt.Sprintf("%+v", md) + } + return "" +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go new file mode 100644 index 00000000000..f72c6a644f3 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go @@ -0,0 +1,16 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package rpctypes has types and values shared by the etcd server and client for v3 RPC interaction. +package rpctypes diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go new file mode 100644 index 00000000000..446e4f6b870 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go @@ -0,0 +1,212 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpctypes + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// server-side error +var ( + ErrGRPCEmptyKey = status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err() + ErrGRPCKeyNotFound = status.New(codes.InvalidArgument, "etcdserver: key not found").Err() + ErrGRPCValueProvided = status.New(codes.InvalidArgument, "etcdserver: value is provided").Err() + ErrGRPCLeaseProvided = status.New(codes.InvalidArgument, "etcdserver: lease is provided").Err() + ErrGRPCTooManyOps = status.New(codes.InvalidArgument, "etcdserver: too many operations in txn request").Err() + ErrGRPCDuplicateKey = status.New(codes.InvalidArgument, "etcdserver: duplicate key given in txn request").Err() + ErrGRPCCompacted = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted").Err() + ErrGRPCFutureRev = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision").Err() + ErrGRPCNoSpace = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err() + + ErrGRPCLeaseNotFound = status.New(codes.NotFound, "etcdserver: requested lease not found").Err() + ErrGRPCLeaseExist = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err() + + ErrGRPCMemberExist = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err() + ErrGRPCPeerURLExist = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err() + ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err() + ErrGRPCMemberBadURLs = status.New(codes.InvalidArgument, "etcdserver: given member URLs are invalid").Err() + ErrGRPCMemberNotFound = status.New(codes.NotFound, "etcdserver: member not found").Err() + + ErrGRPCRequestTooLarge = status.New(codes.InvalidArgument, "etcdserver: request is too large").Err() + ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: too many requests").Err() + + ErrGRPCRootUserNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not exist").Err() + ErrGRPCRootRoleNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not have root role").Err() + ErrGRPCUserAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: user name already exists").Err() + ErrGRPCUserEmpty = status.New(codes.InvalidArgument, "etcdserver: user name is empty").Err() + ErrGRPCUserNotFound = status.New(codes.FailedPrecondition, "etcdserver: user name not found").Err() + ErrGRPCRoleAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: role name already exists").Err() + ErrGRPCRoleNotFound = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err() + ErrGRPCAuthFailed = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err() + ErrGRPCPermissionDenied = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err() + ErrGRPCRoleNotGranted = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err() + ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err() + ErrGRPCAuthNotEnabled = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err() + ErrGRPCInvalidAuthToken = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err() + ErrGRPCInvalidAuthMgmt = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err() + + ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: no leader").Err() + ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err() + ErrGRPCNotCapable = status.New(codes.Unavailable, "etcdserver: not capable").Err() + ErrGRPCStopped = status.New(codes.Unavailable, "etcdserver: server stopped").Err() + ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: request timed out").Err() + ErrGRPCTimeoutDueToLeaderFail = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err() + ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err() + ErrGRPCUnhealthy = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err() + ErrGRPCCorrupt = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err() + + errStringToError = map[string]error{ + ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey, + ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound, + ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided, + ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided, + + ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps, + ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey, + ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted, + ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev, + ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace, + + ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound, + ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist, + + ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist, + ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist, + ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted, + ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs, + ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound, + + ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge, + ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests, + + ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist, + ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist, + ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist, + ErrorDesc(ErrGRPCUserEmpty): ErrGRPCUserEmpty, + ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound, + ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist, + ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound, + ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed, + ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied, + ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted, + ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted, + ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled, + ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken, + ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt, + + ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader, + ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader, + ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable, + ErrorDesc(ErrGRPCStopped): ErrGRPCStopped, + ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout, + ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail, + ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost, + ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy, + ErrorDesc(ErrGRPCCorrupt): ErrGRPCCorrupt, + } +) + +// client-side error +var ( + ErrEmptyKey = Error(ErrGRPCEmptyKey) + ErrKeyNotFound = Error(ErrGRPCKeyNotFound) + ErrValueProvided = Error(ErrGRPCValueProvided) + ErrLeaseProvided = Error(ErrGRPCLeaseProvided) + ErrTooManyOps = Error(ErrGRPCTooManyOps) + ErrDuplicateKey = Error(ErrGRPCDuplicateKey) + ErrCompacted = Error(ErrGRPCCompacted) + ErrFutureRev = Error(ErrGRPCFutureRev) + ErrNoSpace = Error(ErrGRPCNoSpace) + + ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound) + ErrLeaseExist = Error(ErrGRPCLeaseExist) + + ErrMemberExist = Error(ErrGRPCMemberExist) + ErrPeerURLExist = Error(ErrGRPCPeerURLExist) + ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted) + ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs) + ErrMemberNotFound = Error(ErrGRPCMemberNotFound) + + ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge) + ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests) + + ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist) + ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist) + ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist) + ErrUserEmpty = Error(ErrGRPCUserEmpty) + ErrUserNotFound = Error(ErrGRPCUserNotFound) + ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist) + ErrRoleNotFound = Error(ErrGRPCRoleNotFound) + ErrAuthFailed = Error(ErrGRPCAuthFailed) + ErrPermissionDenied = Error(ErrGRPCPermissionDenied) + ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted) + ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted) + ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled) + ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken) + ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt) + + ErrNoLeader = Error(ErrGRPCNoLeader) + ErrNotLeader = Error(ErrGRPCNotLeader) + ErrNotCapable = Error(ErrGRPCNotCapable) + ErrStopped = Error(ErrGRPCStopped) + ErrTimeout = Error(ErrGRPCTimeout) + ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail) + ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost) + ErrUnhealthy = Error(ErrGRPCUnhealthy) + ErrCorrupt = Error(ErrGRPCCorrupt) +) + +// EtcdError defines gRPC server errors. +// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323) +type EtcdError struct { + code codes.Code + desc string +} + +// Code returns grpc/codes.Code. +// TODO: define clientv3/codes.Code. +func (e EtcdError) Code() codes.Code { + return e.code +} + +func (e EtcdError) Error() string { + return e.desc +} + +func Error(err error) error { + if err == nil { + return nil + } + verr, ok := errStringToError[ErrorDesc(err)] + if !ok { // not gRPC error + return err + } + ev, ok := status.FromError(verr) + var desc string + if ok { + desc = ev.Message() + } else { + desc = verr.Error() + } + return EtcdError{code: ev.Code(), desc: desc} +} + +func ErrorDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go new file mode 100644 index 00000000000..5c590e1aec9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go @@ -0,0 +1,20 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpctypes + +var ( + MetadataRequireLeaderKey = "hasleader" + MetadataHasLeader = "true" +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go new file mode 100644 index 00000000000..e4007f5de73 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go @@ -0,0 +1,1052 @@ +// Code generated by protoc-gen-gogo. +// source: etcdserver.proto +// DO NOT EDIT! + +/* + Package etcdserverpb is a generated protocol buffer package. + + It is generated from these files: + etcdserver.proto + raft_internal.proto + rpc.proto + + It has these top-level messages: + Request + Metadata + RequestHeader + InternalRaftRequest + EmptyResponse + InternalAuthenticateRequest + ResponseHeader + RangeRequest + RangeResponse + PutRequest + PutResponse + DeleteRangeRequest + DeleteRangeResponse + RequestOp + ResponseOp + Compare + TxnRequest + TxnResponse + CompactionRequest + CompactionResponse + HashRequest + HashKVRequest + HashKVResponse + HashResponse + SnapshotRequest + SnapshotResponse + WatchRequest + WatchCreateRequest + WatchCancelRequest + WatchResponse + LeaseGrantRequest + LeaseGrantResponse + LeaseRevokeRequest + LeaseRevokeResponse + LeaseKeepAliveRequest + LeaseKeepAliveResponse + LeaseTimeToLiveRequest + LeaseTimeToLiveResponse + LeaseLeasesRequest + LeaseStatus + LeaseLeasesResponse + Member + MemberAddRequest + MemberAddResponse + MemberRemoveRequest + MemberRemoveResponse + MemberUpdateRequest + MemberUpdateResponse + MemberListRequest + MemberListResponse + DefragmentRequest + DefragmentResponse + MoveLeaderRequest + MoveLeaderResponse + AlarmRequest + AlarmMember + AlarmResponse + StatusRequest + StatusResponse + AuthEnableRequest + AuthDisableRequest + AuthenticateRequest + AuthUserAddRequest + AuthUserGetRequest + AuthUserDeleteRequest + AuthUserChangePasswordRequest + AuthUserGrantRoleRequest + AuthUserRevokeRoleRequest + AuthRoleAddRequest + AuthRoleGetRequest + AuthUserListRequest + AuthRoleListRequest + AuthRoleDeleteRequest + AuthRoleGrantPermissionRequest + AuthRoleRevokePermissionRequest + AuthEnableResponse + AuthDisableResponse + AuthenticateResponse + AuthUserAddResponse + AuthUserGetResponse + AuthUserDeleteResponse + AuthUserChangePasswordResponse + AuthUserGrantRoleResponse + AuthUserRevokeRoleResponse + AuthRoleAddResponse + AuthRoleGetResponse + AuthRoleListResponse + AuthUserListResponse + AuthRoleDeleteResponse + AuthRoleGrantPermissionResponse + AuthRoleRevokePermissionResponse +*/ +package etcdserverpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Request struct { + ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` + Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"` + Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"` + Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"` + Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"` + PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"` + PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"` + PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"` + Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"` + Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"` + Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"` + Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"` + Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"` + Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"` + Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"` + Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"` + Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{0} } + +type Metadata struct { + NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"` + ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{1} } + +func init() { + proto.RegisterType((*Request)(nil), "etcdserverpb.Request") + proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata") +} +func (m *Request) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Request) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID)) + dAtA[i] = 0x12 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method))) + i += copy(dAtA[i:], m.Method) + dAtA[i] = 0x1a + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x22 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val))) + i += copy(dAtA[i:], m.Val) + dAtA[i] = 0x28 + i++ + if m.Dir { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x32 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue))) + i += copy(dAtA[i:], m.PrevValue) + dAtA[i] = 0x38 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex)) + if m.PrevExist != nil { + dAtA[i] = 0x40 + i++ + if *m.PrevExist { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + dAtA[i] = 0x48 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration)) + dAtA[i] = 0x50 + i++ + if m.Wait { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x58 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since)) + dAtA[i] = 0x60 + i++ + if m.Recursive { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x68 + i++ + if m.Sorted { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x70 + i++ + if m.Quorum { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x78 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time)) + dAtA[i] = 0x80 + i++ + dAtA[i] = 0x1 + i++ + if m.Stream { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.Refresh != nil { + dAtA[i] = 0x88 + i++ + dAtA[i] = 0x1 + i++ + if *m.Refresh { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Metadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID)) + dAtA[i] = 0x10 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeFixed64Etcdserver(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Etcdserver(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Request) Size() (n int) { + var l int + _ = l + n += 1 + sovEtcdserver(uint64(m.ID)) + l = len(m.Method) + n += 1 + l + sovEtcdserver(uint64(l)) + l = len(m.Path) + n += 1 + l + sovEtcdserver(uint64(l)) + l = len(m.Val) + n += 1 + l + sovEtcdserver(uint64(l)) + n += 2 + l = len(m.PrevValue) + n += 1 + l + sovEtcdserver(uint64(l)) + n += 1 + sovEtcdserver(uint64(m.PrevIndex)) + if m.PrevExist != nil { + n += 2 + } + n += 1 + sovEtcdserver(uint64(m.Expiration)) + n += 2 + n += 1 + sovEtcdserver(uint64(m.Since)) + n += 2 + n += 2 + n += 2 + n += 1 + sovEtcdserver(uint64(m.Time)) + n += 3 + if m.Refresh != nil { + n += 3 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Metadata) Size() (n int) { + var l int + _ = l + n += 1 + sovEtcdserver(uint64(m.NodeID)) + n += 1 + sovEtcdserver(uint64(m.ClusterID)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovEtcdserver(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozEtcdserver(x uint64) (n int) { + return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEtcdserver + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Method = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEtcdserver + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEtcdserver + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Val = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Dir = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEtcdserver + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PrevValue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevIndex", wireType) + } + m.PrevIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PrevIndex |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevExist", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.PrevExist = &b + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Expiration", wireType) + } + m.Expiration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Expiration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Wait", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Wait = bool(v != 0) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType) + } + m.Since = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Since |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Recursive = bool(v != 0) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sorted", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Sorted = bool(v != 0) + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Quorum", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Quorum = bool(v != 0) + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + m.Time = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Time |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stream = bool(v != 0) + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Refresh", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Refresh = &b + default: + iNdEx = preIndex + skippy, err := skipEtcdserver(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEtcdserver + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + m.NodeID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodeID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType) + } + m.ClusterID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ClusterID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEtcdserver(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEtcdserver + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEtcdserver(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthEtcdserver + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEtcdserver + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipEtcdserver(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("etcdserver.proto", fileDescriptorEtcdserver) } + +var fileDescriptorEtcdserver = []byte{ + // 380 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30, + 0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb, + 0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58, + 0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f, + 0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79, + 0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d, + 0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a, + 0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89, + 0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93, + 0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe, + 0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c, + 0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70, + 0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab, + 0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11, + 0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7, + 0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89, + 0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82, + 0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6, + 0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63, + 0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6, + 0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff, + 0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea, + 0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f, + 0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto new file mode 100644 index 00000000000..25e0aca5d9f --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto @@ -0,0 +1,34 @@ +syntax = "proto2"; +package etcdserverpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; + +message Request { + optional uint64 ID = 1 [(gogoproto.nullable) = false]; + optional string Method = 2 [(gogoproto.nullable) = false]; + optional string Path = 3 [(gogoproto.nullable) = false]; + optional string Val = 4 [(gogoproto.nullable) = false]; + optional bool Dir = 5 [(gogoproto.nullable) = false]; + optional string PrevValue = 6 [(gogoproto.nullable) = false]; + optional uint64 PrevIndex = 7 [(gogoproto.nullable) = false]; + optional bool PrevExist = 8 [(gogoproto.nullable) = true]; + optional int64 Expiration = 9 [(gogoproto.nullable) = false]; + optional bool Wait = 10 [(gogoproto.nullable) = false]; + optional uint64 Since = 11 [(gogoproto.nullable) = false]; + optional bool Recursive = 12 [(gogoproto.nullable) = false]; + optional bool Sorted = 13 [(gogoproto.nullable) = false]; + optional bool Quorum = 14 [(gogoproto.nullable) = false]; + optional int64 Time = 15 [(gogoproto.nullable) = false]; + optional bool Stream = 16 [(gogoproto.nullable) = false]; + optional bool Refresh = 17 [(gogoproto.nullable) = true]; +} + +message Metadata { + optional uint64 NodeID = 1 [(gogoproto.nullable) = false]; + optional uint64 ClusterID = 2 [(gogoproto.nullable) = false]; +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go new file mode 100644 index 00000000000..44a3b6f69eb --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go @@ -0,0 +1,2094 @@ +// Code generated by protoc-gen-gogo. +// source: raft_internal.proto +// DO NOT EDIT! + +package etcdserverpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type RequestHeader struct { + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // username is a username that is associated with an auth token of gRPC connection + Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` + // auth_revision is a revision number of auth.authStore. It is not related to mvcc + AuthRevision uint64 `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"` +} + +func (m *RequestHeader) Reset() { *m = RequestHeader{} } +func (m *RequestHeader) String() string { return proto.CompactTextString(m) } +func (*RequestHeader) ProtoMessage() {} +func (*RequestHeader) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{0} } + +// An InternalRaftRequest is the union of all requests which can be +// sent via raft. +type InternalRaftRequest struct { + Header *RequestHeader `protobuf:"bytes,100,opt,name=header" json:"header,omitempty"` + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + V2 *Request `protobuf:"bytes,2,opt,name=v2" json:"v2,omitempty"` + Range *RangeRequest `protobuf:"bytes,3,opt,name=range" json:"range,omitempty"` + Put *PutRequest `protobuf:"bytes,4,opt,name=put" json:"put,omitempty"` + DeleteRange *DeleteRangeRequest `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange" json:"delete_range,omitempty"` + Txn *TxnRequest `protobuf:"bytes,6,opt,name=txn" json:"txn,omitempty"` + Compaction *CompactionRequest `protobuf:"bytes,7,opt,name=compaction" json:"compaction,omitempty"` + LeaseGrant *LeaseGrantRequest `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant" json:"lease_grant,omitempty"` + LeaseRevoke *LeaseRevokeRequest `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke" json:"lease_revoke,omitempty"` + Alarm *AlarmRequest `protobuf:"bytes,10,opt,name=alarm" json:"alarm,omitempty"` + AuthEnable *AuthEnableRequest `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable" json:"auth_enable,omitempty"` + AuthDisable *AuthDisableRequest `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable" json:"auth_disable,omitempty"` + Authenticate *InternalAuthenticateRequest `protobuf:"bytes,1012,opt,name=authenticate" json:"authenticate,omitempty"` + AuthUserAdd *AuthUserAddRequest `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd" json:"auth_user_add,omitempty"` + AuthUserDelete *AuthUserDeleteRequest `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete" json:"auth_user_delete,omitempty"` + AuthUserGet *AuthUserGetRequest `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet" json:"auth_user_get,omitempty"` + AuthUserChangePassword *AuthUserChangePasswordRequest `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword" json:"auth_user_change_password,omitempty"` + AuthUserGrantRole *AuthUserGrantRoleRequest `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole" json:"auth_user_grant_role,omitempty"` + AuthUserRevokeRole *AuthUserRevokeRoleRequest `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole" json:"auth_user_revoke_role,omitempty"` + AuthUserList *AuthUserListRequest `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList" json:"auth_user_list,omitempty"` + AuthRoleList *AuthRoleListRequest `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList" json:"auth_role_list,omitempty"` + AuthRoleAdd *AuthRoleAddRequest `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd" json:"auth_role_add,omitempty"` + AuthRoleDelete *AuthRoleDeleteRequest `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete" json:"auth_role_delete,omitempty"` + AuthRoleGet *AuthRoleGetRequest `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet" json:"auth_role_get,omitempty"` + AuthRoleGrantPermission *AuthRoleGrantPermissionRequest `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission" json:"auth_role_grant_permission,omitempty"` + AuthRoleRevokePermission *AuthRoleRevokePermissionRequest `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission" json:"auth_role_revoke_permission,omitempty"` +} + +func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} } +func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) } +func (*InternalRaftRequest) ProtoMessage() {} +func (*InternalRaftRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{1} } + +type EmptyResponse struct { +} + +func (m *EmptyResponse) Reset() { *m = EmptyResponse{} } +func (m *EmptyResponse) String() string { return proto.CompactTextString(m) } +func (*EmptyResponse) ProtoMessage() {} +func (*EmptyResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{2} } + +// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest? +// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing. +// For avoiding misusage the field, we have an internal version of AuthenticateRequest. +type InternalAuthenticateRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + // simple_token is generated in API layer (etcdserver/v3_server.go) + SimpleToken string `protobuf:"bytes,3,opt,name=simple_token,json=simpleToken,proto3" json:"simple_token,omitempty"` +} + +func (m *InternalAuthenticateRequest) Reset() { *m = InternalAuthenticateRequest{} } +func (m *InternalAuthenticateRequest) String() string { return proto.CompactTextString(m) } +func (*InternalAuthenticateRequest) ProtoMessage() {} +func (*InternalAuthenticateRequest) Descriptor() ([]byte, []int) { + return fileDescriptorRaftInternal, []int{3} +} + +func init() { + proto.RegisterType((*RequestHeader)(nil), "etcdserverpb.RequestHeader") + proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest") + proto.RegisterType((*EmptyResponse)(nil), "etcdserverpb.EmptyResponse") + proto.RegisterType((*InternalAuthenticateRequest)(nil), "etcdserverpb.InternalAuthenticateRequest") +} +func (m *RequestHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestHeader) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) + } + if len(m.Username) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Username))) + i += copy(dAtA[i:], m.Username) + } + if m.AuthRevision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRevision)) + } + return i, nil +} + +func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) + } + if m.V2 != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.V2.Size())) + n1, err := m.V2.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.Range != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Range.Size())) + n2, err := m.Range.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.Put != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Put.Size())) + n3, err := m.Put.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.DeleteRange != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.DeleteRange.Size())) + n4, err := m.DeleteRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.Txn != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Txn.Size())) + n5, err := m.Txn.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if m.Compaction != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Compaction.Size())) + n6, err := m.Compaction.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.LeaseGrant != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseGrant.Size())) + n7, err := m.LeaseGrant.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.LeaseRevoke != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseRevoke.Size())) + n8, err := m.LeaseRevoke.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.Alarm != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Alarm.Size())) + n9, err := m.Alarm.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.Header != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x6 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Header.Size())) + n10, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if m.AuthEnable != nil { + dAtA[i] = 0xc2 + i++ + dAtA[i] = 0x3e + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthEnable.Size())) + n11, err := m.AuthEnable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.AuthDisable != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x3f + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthDisable.Size())) + n12, err := m.AuthDisable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.Authenticate != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x3f + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Authenticate.Size())) + n13, err := m.Authenticate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + if m.AuthUserAdd != nil { + dAtA[i] = 0xe2 + i++ + dAtA[i] = 0x44 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserAdd.Size())) + n14, err := m.AuthUserAdd.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if m.AuthUserDelete != nil { + dAtA[i] = 0xea + i++ + dAtA[i] = 0x44 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserDelete.Size())) + n15, err := m.AuthUserDelete.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + if m.AuthUserGet != nil { + dAtA[i] = 0xf2 + i++ + dAtA[i] = 0x44 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserGet.Size())) + n16, err := m.AuthUserGet.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.AuthUserChangePassword != nil { + dAtA[i] = 0xfa + i++ + dAtA[i] = 0x44 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserChangePassword.Size())) + n17, err := m.AuthUserChangePassword.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if m.AuthUserGrantRole != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x45 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserGrantRole.Size())) + n18, err := m.AuthUserGrantRole.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + if m.AuthUserRevokeRole != nil { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x45 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserRevokeRole.Size())) + n19, err := m.AuthUserRevokeRole.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + if m.AuthUserList != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x45 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserList.Size())) + n20, err := m.AuthUserList.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if m.AuthRoleList != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x45 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleList.Size())) + n21, err := m.AuthRoleList.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if m.AuthRoleAdd != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleAdd.Size())) + n22, err := m.AuthRoleAdd.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + if m.AuthRoleDelete != nil { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleDelete.Size())) + n23, err := m.AuthRoleDelete.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + if m.AuthRoleGet != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleGet.Size())) + n24, err := m.AuthRoleGet.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + if m.AuthRoleGrantPermission != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleGrantPermission.Size())) + n25, err := m.AuthRoleGrantPermission.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + if m.AuthRoleRevokePermission != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleRevokePermission.Size())) + n26, err := m.AuthRoleRevokePermission.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + return i, nil +} + +func (m *EmptyResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EmptyResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *InternalAuthenticateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InternalAuthenticateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Password) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) + } + if len(m.SimpleToken) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.SimpleToken))) + i += copy(dAtA[i:], m.SimpleToken) + } + return i, nil +} + +func encodeFixed64RaftInternal(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32RaftInternal(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *RequestHeader) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRaftInternal(uint64(m.ID)) + } + l = len(m.Username) + if l > 0 { + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRevision != 0 { + n += 1 + sovRaftInternal(uint64(m.AuthRevision)) + } + return n +} + +func (m *InternalRaftRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRaftInternal(uint64(m.ID)) + } + if m.V2 != nil { + l = m.V2.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Range != nil { + l = m.Range.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Put != nil { + l = m.Put.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.DeleteRange != nil { + l = m.DeleteRange.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Txn != nil { + l = m.Txn.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Compaction != nil { + l = m.Compaction.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.LeaseGrant != nil { + l = m.LeaseGrant.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.LeaseRevoke != nil { + l = m.LeaseRevoke.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Alarm != nil { + l = m.Alarm.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } + if m.Header != nil { + l = m.Header.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthEnable != nil { + l = m.AuthEnable.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthDisable != nil { + l = m.AuthDisable.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.Authenticate != nil { + l = m.Authenticate.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserAdd != nil { + l = m.AuthUserAdd.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserDelete != nil { + l = m.AuthUserDelete.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserGet != nil { + l = m.AuthUserGet.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserChangePassword != nil { + l = m.AuthUserChangePassword.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserGrantRole != nil { + l = m.AuthUserGrantRole.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserRevokeRole != nil { + l = m.AuthUserRevokeRole.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthUserList != nil { + l = m.AuthUserList.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleList != nil { + l = m.AuthRoleList.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleAdd != nil { + l = m.AuthRoleAdd.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleDelete != nil { + l = m.AuthRoleDelete.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleGet != nil { + l = m.AuthRoleGet.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleGrantPermission != nil { + l = m.AuthRoleGrantPermission.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + if m.AuthRoleRevokePermission != nil { + l = m.AuthRoleRevokePermission.Size() + n += 2 + l + sovRaftInternal(uint64(l)) + } + return n +} + +func (m *EmptyResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *InternalAuthenticateRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRaftInternal(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovRaftInternal(uint64(l)) + } + l = len(m.SimpleToken) + if l > 0 { + n += 1 + l + sovRaftInternal(uint64(l)) + } + return n +} + +func sovRaftInternal(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRaftInternal(x uint64) (n int) { + return sovRaftInternal(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *RequestHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType) + } + m.AuthRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AuthRevision |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaftInternal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftInternal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field V2", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.V2 == nil { + m.V2 = &Request{} + } + if err := m.V2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Range == nil { + m.Range = &RangeRequest{} + } + if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Put == nil { + m.Put = &PutRequest{} + } + if err := m.Put.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeleteRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeleteRange == nil { + m.DeleteRange = &DeleteRangeRequest{} + } + if err := m.DeleteRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Txn == nil { + m.Txn = &TxnRequest{} + } + if err := m.Txn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Compaction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Compaction == nil { + m.Compaction = &CompactionRequest{} + } + if err := m.Compaction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseGrant", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LeaseGrant == nil { + m.LeaseGrant = &LeaseGrantRequest{} + } + if err := m.LeaseGrant.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseRevoke", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LeaseRevoke == nil { + m.LeaseRevoke = &LeaseRevokeRequest{} + } + if err := m.LeaseRevoke.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Alarm == nil { + m.Alarm = &AlarmRequest{} + } + if err := m.Alarm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 100: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &RequestHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1000: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthEnable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthEnable == nil { + m.AuthEnable = &AuthEnableRequest{} + } + if err := m.AuthEnable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1011: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthDisable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthDisable == nil { + m.AuthDisable = &AuthDisableRequest{} + } + if err := m.AuthDisable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1012: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authenticate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Authenticate == nil { + m.Authenticate = &InternalAuthenticateRequest{} + } + if err := m.Authenticate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1100: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserAdd", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserAdd == nil { + m.AuthUserAdd = &AuthUserAddRequest{} + } + if err := m.AuthUserAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1101: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserDelete", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserDelete == nil { + m.AuthUserDelete = &AuthUserDeleteRequest{} + } + if err := m.AuthUserDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1102: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserGet == nil { + m.AuthUserGet = &AuthUserGetRequest{} + } + if err := m.AuthUserGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1103: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserChangePassword", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserChangePassword == nil { + m.AuthUserChangePassword = &AuthUserChangePasswordRequest{} + } + if err := m.AuthUserChangePassword.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1104: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGrantRole", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserGrantRole == nil { + m.AuthUserGrantRole = &AuthUserGrantRoleRequest{} + } + if err := m.AuthUserGrantRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1105: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserRevokeRole", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserRevokeRole == nil { + m.AuthUserRevokeRole = &AuthUserRevokeRoleRequest{} + } + if err := m.AuthUserRevokeRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1106: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthUserList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthUserList == nil { + m.AuthUserList = &AuthUserListRequest{} + } + if err := m.AuthUserList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1107: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleList == nil { + m.AuthRoleList = &AuthRoleListRequest{} + } + if err := m.AuthRoleList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1200: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleAdd", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleAdd == nil { + m.AuthRoleAdd = &AuthRoleAddRequest{} + } + if err := m.AuthRoleAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1201: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleDelete", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleDelete == nil { + m.AuthRoleDelete = &AuthRoleDeleteRequest{} + } + if err := m.AuthRoleDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1202: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleGet == nil { + m.AuthRoleGet = &AuthRoleGetRequest{} + } + if err := m.AuthRoleGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1203: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGrantPermission", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleGrantPermission == nil { + m.AuthRoleGrantPermission = &AuthRoleGrantPermissionRequest{} + } + if err := m.AuthRoleGrantPermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1204: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleRevokePermission", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthRoleRevokePermission == nil { + m.AuthRoleRevokePermission = &AuthRoleRevokePermissionRequest{} + } + if err := m.AuthRoleRevokePermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftInternal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftInternal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EmptyResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EmptyResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EmptyResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRaftInternal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftInternal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InternalAuthenticateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InternalAuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SimpleToken", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaftInternal + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SimpleToken = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaftInternal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaftInternal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRaftInternal(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRaftInternal + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRaftInternal(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRaftInternal = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("raft_internal.proto", fileDescriptorRaftInternal) } + +var fileDescriptorRaftInternal = []byte{ + // 837 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xdd, 0x52, 0xd3, 0x40, + 0x14, 0xc7, 0x69, 0xf9, 0xec, 0xb6, 0x54, 0x5c, 0x40, 0xd7, 0x32, 0x53, 0xa1, 0x8c, 0x8a, 0x5f, + 0xe8, 0x94, 0x07, 0xd0, 0x4a, 0x19, 0x60, 0x86, 0x61, 0x98, 0x0c, 0xce, 0x38, 0xe3, 0x45, 0x5c, + 0x9a, 0x43, 0x1b, 0x49, 0x93, 0xb8, 0xd9, 0x56, 0x7c, 0x13, 0x1f, 0xc3, 0xaf, 0x87, 0xe0, 0xc2, + 0x0f, 0xd4, 0x17, 0x50, 0xbc, 0xf1, 0xca, 0x1b, 0x7d, 0x00, 0x67, 0x3f, 0x92, 0x34, 0x6d, 0xca, + 0x5d, 0x72, 0xce, 0xff, 0xfc, 0xce, 0xd9, 0xec, 0x7f, 0xbb, 0x45, 0xb3, 0x8c, 0x1e, 0x72, 0xd3, + 0x76, 0x39, 0x30, 0x97, 0x3a, 0xab, 0x3e, 0xf3, 0xb8, 0x87, 0x0b, 0xc0, 0x1b, 0x56, 0x00, 0xac, + 0x0b, 0xcc, 0x3f, 0x28, 0xcd, 0x35, 0xbd, 0xa6, 0x27, 0x13, 0xf7, 0xc4, 0x93, 0xd2, 0x94, 0x66, + 0x62, 0x8d, 0x8e, 0xe4, 0x98, 0xdf, 0x50, 0x8f, 0x95, 0x67, 0x68, 0xda, 0x80, 0x17, 0x1d, 0x08, + 0xf8, 0x16, 0x50, 0x0b, 0x18, 0x2e, 0xa2, 0xec, 0x76, 0x9d, 0x64, 0x16, 0x33, 0x2b, 0x63, 0x46, + 0x76, 0xbb, 0x8e, 0x4b, 0x68, 0xaa, 0x13, 0x88, 0x96, 0x6d, 0x20, 0xd9, 0xc5, 0xcc, 0x4a, 0xce, + 0x88, 0xde, 0xf1, 0x32, 0x9a, 0xa6, 0x1d, 0xde, 0x32, 0x19, 0x74, 0xed, 0xc0, 0xf6, 0x5c, 0x32, + 0x2a, 0xcb, 0x0a, 0x22, 0x68, 0xe8, 0x58, 0xe5, 0x4f, 0x11, 0xcd, 0x6e, 0xeb, 0xa9, 0x0d, 0x7a, + 0xc8, 0x75, 0xbb, 0x81, 0x46, 0xd7, 0x50, 0xb6, 0x5b, 0x95, 0x2d, 0xf2, 0xd5, 0xf9, 0xd5, 0xde, + 0x75, 0xad, 0xea, 0x12, 0x23, 0xdb, 0xad, 0xe2, 0xfb, 0x68, 0x9c, 0x51, 0xb7, 0x09, 0xb2, 0x57, + 0xbe, 0x5a, 0xea, 0x53, 0x8a, 0x54, 0x28, 0x57, 0x42, 0x7c, 0x0b, 0x8d, 0xfa, 0x1d, 0x4e, 0xc6, + 0xa4, 0x9e, 0x24, 0xf5, 0x7b, 0x9d, 0x70, 0x1e, 0x43, 0x88, 0xf0, 0x3a, 0x2a, 0x58, 0xe0, 0x00, + 0x07, 0x53, 0x35, 0x19, 0x97, 0x45, 0x8b, 0xc9, 0xa2, 0xba, 0x54, 0x24, 0x5a, 0xe5, 0xad, 0x38, + 0x26, 0x1a, 0xf2, 0x63, 0x97, 0x4c, 0xa4, 0x35, 0xdc, 0x3f, 0x76, 0xa3, 0x86, 0xfc, 0xd8, 0xc5, + 0x0f, 0x10, 0x6a, 0x78, 0x6d, 0x9f, 0x36, 0xb8, 0xf8, 0x7e, 0x93, 0xb2, 0xe4, 0x6a, 0xb2, 0x64, + 0x3d, 0xca, 0x87, 0x95, 0x3d, 0x25, 0xf8, 0x21, 0xca, 0x3b, 0x40, 0x03, 0x30, 0x9b, 0x8c, 0xba, + 0x9c, 0x4c, 0xa5, 0x11, 0x76, 0x84, 0x60, 0x53, 0xe4, 0x23, 0x82, 0x13, 0x85, 0xc4, 0x9a, 0x15, + 0x81, 0x41, 0xd7, 0x3b, 0x02, 0x92, 0x4b, 0x5b, 0xb3, 0x44, 0x18, 0x52, 0x10, 0xad, 0xd9, 0x89, + 0x63, 0x62, 0x5b, 0xa8, 0x43, 0x59, 0x9b, 0xa0, 0xb4, 0x6d, 0xa9, 0x89, 0x54, 0xb4, 0x2d, 0x52, + 0x88, 0xd7, 0xd0, 0x44, 0x4b, 0x5a, 0x8e, 0x58, 0xb2, 0x64, 0x21, 0x75, 0xcf, 0x95, 0x2b, 0x0d, + 0x2d, 0xc5, 0x35, 0x94, 0x97, 0x8e, 0x03, 0x97, 0x1e, 0x38, 0x40, 0x7e, 0xa7, 0x7e, 0xb0, 0x5a, + 0x87, 0xb7, 0x36, 0xa4, 0x20, 0x5a, 0x2e, 0x8d, 0x42, 0xb8, 0x8e, 0xa4, 0x3f, 0x4d, 0xcb, 0x0e, + 0x24, 0xe3, 0xef, 0x64, 0xda, 0x7a, 0x05, 0xa3, 0xae, 0x14, 0xd1, 0x7a, 0x69, 0x1c, 0xc3, 0xbb, + 0x8a, 0x02, 0x2e, 0xb7, 0x1b, 0x94, 0x03, 0xf9, 0xa7, 0x28, 0x37, 0x93, 0x94, 0xd0, 0xf7, 0xb5, + 0x1e, 0x69, 0x88, 0x4b, 0xd4, 0xe3, 0x0d, 0x7d, 0x94, 0xc4, 0xd9, 0x32, 0xa9, 0x65, 0x91, 0x8f, + 0x53, 0xc3, 0xc6, 0x7a, 0x1c, 0x00, 0xab, 0x59, 0x56, 0x62, 0x2c, 0x1d, 0xc3, 0xbb, 0x68, 0x26, + 0xc6, 0x28, 0x4f, 0x92, 0x4f, 0x8a, 0xb4, 0x9c, 0x4e, 0xd2, 0x66, 0xd6, 0xb0, 0x22, 0x4d, 0x84, + 0x93, 0x63, 0x35, 0x81, 0x93, 0xcf, 0xe7, 0x8e, 0xb5, 0x09, 0x7c, 0x60, 0xac, 0x4d, 0xe0, 0xb8, + 0x89, 0xae, 0xc4, 0x98, 0x46, 0x4b, 0x9c, 0x12, 0xd3, 0xa7, 0x41, 0xf0, 0xd2, 0x63, 0x16, 0xf9, + 0xa2, 0x90, 0xb7, 0xd3, 0x91, 0xeb, 0x52, 0xbd, 0xa7, 0xc5, 0x21, 0xfd, 0x12, 0x4d, 0x4d, 0xe3, + 0x27, 0x68, 0xae, 0x67, 0x5e, 0x61, 0x6f, 0x93, 0x79, 0x0e, 0x90, 0x53, 0xd5, 0xe3, 0xfa, 0x90, + 0xb1, 0xe5, 0xd1, 0xf0, 0xe2, 0xad, 0xbe, 0x48, 0xfb, 0x33, 0xf8, 0x29, 0x9a, 0x8f, 0xc9, 0xea, + 0xa4, 0x28, 0xf4, 0x57, 0x85, 0xbe, 0x91, 0x8e, 0xd6, 0x47, 0xa6, 0x87, 0x8d, 0xe9, 0x40, 0x0a, + 0x6f, 0xa1, 0x62, 0x0c, 0x77, 0xec, 0x80, 0x93, 0x6f, 0x8a, 0xba, 0x94, 0x4e, 0xdd, 0xb1, 0x03, + 0x9e, 0xf0, 0x51, 0x18, 0x8c, 0x48, 0x62, 0x34, 0x45, 0xfa, 0x3e, 0x94, 0x24, 0x5a, 0x0f, 0x90, + 0xc2, 0x60, 0xb4, 0xf5, 0x92, 0x24, 0x1c, 0xf9, 0x26, 0x37, 0x6c, 0xeb, 0x45, 0x4d, 0xbf, 0x23, + 0x75, 0x2c, 0x72, 0xa4, 0xc4, 0x68, 0x47, 0xbe, 0xcd, 0x0d, 0x73, 0xa4, 0xa8, 0x4a, 0x71, 0x64, + 0x1c, 0x4e, 0x8e, 0x25, 0x1c, 0xf9, 0xee, 0xdc, 0xb1, 0xfa, 0x1d, 0xa9, 0x63, 0xf8, 0x39, 0x2a, + 0xf5, 0x60, 0xa4, 0x51, 0x7c, 0x60, 0x6d, 0x3b, 0x90, 0xf7, 0xd8, 0x7b, 0xc5, 0xbc, 0x33, 0x84, + 0x29, 0xe4, 0x7b, 0x91, 0x3a, 0xe4, 0x5f, 0xa6, 0xe9, 0x79, 0xdc, 0x46, 0x0b, 0x71, 0x2f, 0x6d, + 0x9d, 0x9e, 0x66, 0x1f, 0x54, 0xb3, 0xbb, 0xe9, 0xcd, 0x94, 0x4b, 0x06, 0xbb, 0x11, 0x3a, 0x44, + 0x50, 0xb9, 0x80, 0xa6, 0x37, 0xda, 0x3e, 0x7f, 0x65, 0x40, 0xe0, 0x7b, 0x6e, 0x00, 0x15, 0x1f, + 0x2d, 0x9c, 0xf3, 0x43, 0x84, 0x31, 0x1a, 0x93, 0xb7, 0x7b, 0x46, 0xde, 0xee, 0xf2, 0x59, 0xdc, + 0xfa, 0xd1, 0xf9, 0xd4, 0xb7, 0x7e, 0xf8, 0x8e, 0x97, 0x50, 0x21, 0xb0, 0xdb, 0xbe, 0x03, 0x26, + 0xf7, 0x8e, 0x40, 0x5d, 0xfa, 0x39, 0x23, 0xaf, 0x62, 0xfb, 0x22, 0xf4, 0x68, 0xee, 0xe4, 0x67, + 0x79, 0xe4, 0xe4, 0xac, 0x9c, 0x39, 0x3d, 0x2b, 0x67, 0x7e, 0x9c, 0x95, 0x33, 0xaf, 0x7f, 0x95, + 0x47, 0x0e, 0x26, 0xe4, 0x5f, 0x8e, 0xb5, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xff, 0xc9, 0xfc, + 0x0e, 0xca, 0x08, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto new file mode 100644 index 00000000000..25d45d3c4f2 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto @@ -0,0 +1,74 @@ +syntax = "proto3"; +package etcdserverpb; + +import "gogoproto/gogo.proto"; +import "etcdserver.proto"; +import "rpc.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; + +message RequestHeader { + uint64 ID = 1; + // username is a username that is associated with an auth token of gRPC connection + string username = 2; + // auth_revision is a revision number of auth.authStore. It is not related to mvcc + uint64 auth_revision = 3; +} + +// An InternalRaftRequest is the union of all requests which can be +// sent via raft. +message InternalRaftRequest { + RequestHeader header = 100; + uint64 ID = 1; + + Request v2 = 2; + + RangeRequest range = 3; + PutRequest put = 4; + DeleteRangeRequest delete_range = 5; + TxnRequest txn = 6; + CompactionRequest compaction = 7; + + LeaseGrantRequest lease_grant = 8; + LeaseRevokeRequest lease_revoke = 9; + + AlarmRequest alarm = 10; + + AuthEnableRequest auth_enable = 1000; + AuthDisableRequest auth_disable = 1011; + + InternalAuthenticateRequest authenticate = 1012; + + AuthUserAddRequest auth_user_add = 1100; + AuthUserDeleteRequest auth_user_delete = 1101; + AuthUserGetRequest auth_user_get = 1102; + AuthUserChangePasswordRequest auth_user_change_password = 1103; + AuthUserGrantRoleRequest auth_user_grant_role = 1104; + AuthUserRevokeRoleRequest auth_user_revoke_role = 1105; + AuthUserListRequest auth_user_list = 1106; + AuthRoleListRequest auth_role_list = 1107; + + AuthRoleAddRequest auth_role_add = 1200; + AuthRoleDeleteRequest auth_role_delete = 1201; + AuthRoleGetRequest auth_role_get = 1202; + AuthRoleGrantPermissionRequest auth_role_grant_permission = 1203; + AuthRoleRevokePermissionRequest auth_role_revoke_permission = 1204; +} + +message EmptyResponse { +} + +// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest? +// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing. +// For avoiding misusage the field, we have an internal version of AuthenticateRequest. +message InternalAuthenticateRequest { + string name = 1; + string password = 2; + + // simple_token is generated in API layer (etcdserver/v3_server.go) + string simple_token = 3; +} + diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go new file mode 100644 index 00000000000..97e0c4c49e0 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go @@ -0,0 +1,18682 @@ +// Code generated by protoc-gen-gogo. +// source: rpc.proto +// DO NOT EDIT! + +package etcdserverpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + mvccpb "github.com/coreos/etcd/mvcc/mvccpb" + + authpb "github.com/coreos/etcd/auth/authpb" + + context "golang.org/x/net/context" + + grpc "google.golang.org/grpc" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type AlarmType int32 + +const ( + AlarmType_NONE AlarmType = 0 + AlarmType_NOSPACE AlarmType = 1 + AlarmType_CORRUPT AlarmType = 2 +) + +var AlarmType_name = map[int32]string{ + 0: "NONE", + 1: "NOSPACE", + 2: "CORRUPT", +} +var AlarmType_value = map[string]int32{ + "NONE": 0, + "NOSPACE": 1, + "CORRUPT": 2, +} + +func (x AlarmType) String() string { + return proto.EnumName(AlarmType_name, int32(x)) +} +func (AlarmType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } + +type RangeRequest_SortOrder int32 + +const ( + RangeRequest_NONE RangeRequest_SortOrder = 0 + RangeRequest_ASCEND RangeRequest_SortOrder = 1 + RangeRequest_DESCEND RangeRequest_SortOrder = 2 +) + +var RangeRequest_SortOrder_name = map[int32]string{ + 0: "NONE", + 1: "ASCEND", + 2: "DESCEND", +} +var RangeRequest_SortOrder_value = map[string]int32{ + "NONE": 0, + "ASCEND": 1, + "DESCEND": 2, +} + +func (x RangeRequest_SortOrder) String() string { + return proto.EnumName(RangeRequest_SortOrder_name, int32(x)) +} +func (RangeRequest_SortOrder) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1, 0} } + +type RangeRequest_SortTarget int32 + +const ( + RangeRequest_KEY RangeRequest_SortTarget = 0 + RangeRequest_VERSION RangeRequest_SortTarget = 1 + RangeRequest_CREATE RangeRequest_SortTarget = 2 + RangeRequest_MOD RangeRequest_SortTarget = 3 + RangeRequest_VALUE RangeRequest_SortTarget = 4 +) + +var RangeRequest_SortTarget_name = map[int32]string{ + 0: "KEY", + 1: "VERSION", + 2: "CREATE", + 3: "MOD", + 4: "VALUE", +} +var RangeRequest_SortTarget_value = map[string]int32{ + "KEY": 0, + "VERSION": 1, + "CREATE": 2, + "MOD": 3, + "VALUE": 4, +} + +func (x RangeRequest_SortTarget) String() string { + return proto.EnumName(RangeRequest_SortTarget_name, int32(x)) +} +func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1, 1} } + +type Compare_CompareResult int32 + +const ( + Compare_EQUAL Compare_CompareResult = 0 + Compare_GREATER Compare_CompareResult = 1 + Compare_LESS Compare_CompareResult = 2 + Compare_NOT_EQUAL Compare_CompareResult = 3 +) + +var Compare_CompareResult_name = map[int32]string{ + 0: "EQUAL", + 1: "GREATER", + 2: "LESS", + 3: "NOT_EQUAL", +} +var Compare_CompareResult_value = map[string]int32{ + "EQUAL": 0, + "GREATER": 1, + "LESS": 2, + "NOT_EQUAL": 3, +} + +func (x Compare_CompareResult) String() string { + return proto.EnumName(Compare_CompareResult_name, int32(x)) +} +func (Compare_CompareResult) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9, 0} } + +type Compare_CompareTarget int32 + +const ( + Compare_VERSION Compare_CompareTarget = 0 + Compare_CREATE Compare_CompareTarget = 1 + Compare_MOD Compare_CompareTarget = 2 + Compare_VALUE Compare_CompareTarget = 3 + Compare_LEASE Compare_CompareTarget = 4 +) + +var Compare_CompareTarget_name = map[int32]string{ + 0: "VERSION", + 1: "CREATE", + 2: "MOD", + 3: "VALUE", + 4: "LEASE", +} +var Compare_CompareTarget_value = map[string]int32{ + "VERSION": 0, + "CREATE": 1, + "MOD": 2, + "VALUE": 3, + "LEASE": 4, +} + +func (x Compare_CompareTarget) String() string { + return proto.EnumName(Compare_CompareTarget_name, int32(x)) +} +func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9, 1} } + +type WatchCreateRequest_FilterType int32 + +const ( + // filter out put event. + WatchCreateRequest_NOPUT WatchCreateRequest_FilterType = 0 + // filter out delete event. + WatchCreateRequest_NODELETE WatchCreateRequest_FilterType = 1 +) + +var WatchCreateRequest_FilterType_name = map[int32]string{ + 0: "NOPUT", + 1: "NODELETE", +} +var WatchCreateRequest_FilterType_value = map[string]int32{ + "NOPUT": 0, + "NODELETE": 1, +} + +func (x WatchCreateRequest_FilterType) String() string { + return proto.EnumName(WatchCreateRequest_FilterType_name, int32(x)) +} +func (WatchCreateRequest_FilterType) EnumDescriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{21, 0} +} + +type AlarmRequest_AlarmAction int32 + +const ( + AlarmRequest_GET AlarmRequest_AlarmAction = 0 + AlarmRequest_ACTIVATE AlarmRequest_AlarmAction = 1 + AlarmRequest_DEACTIVATE AlarmRequest_AlarmAction = 2 +) + +var AlarmRequest_AlarmAction_name = map[int32]string{ + 0: "GET", + 1: "ACTIVATE", + 2: "DEACTIVATE", +} +var AlarmRequest_AlarmAction_value = map[string]int32{ + "GET": 0, + "ACTIVATE": 1, + "DEACTIVATE": 2, +} + +func (x AlarmRequest_AlarmAction) String() string { + return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x)) +} +func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{48, 0} +} + +type ResponseHeader struct { + // cluster_id is the ID of the cluster which sent the response. + ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // member_id is the ID of the member which sent the response. + MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"` + // revision is the key-value store revision when the request was applied. + Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"` + // raft_term is the raft term when the request was applied. + RaftTerm uint64 `protobuf:"varint,4,opt,name=raft_term,json=raftTerm,proto3" json:"raft_term,omitempty"` +} + +func (m *ResponseHeader) Reset() { *m = ResponseHeader{} } +func (m *ResponseHeader) String() string { return proto.CompactTextString(m) } +func (*ResponseHeader) ProtoMessage() {} +func (*ResponseHeader) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } + +func (m *ResponseHeader) GetClusterId() uint64 { + if m != nil { + return m.ClusterId + } + return 0 +} + +func (m *ResponseHeader) GetMemberId() uint64 { + if m != nil { + return m.MemberId + } + return 0 +} + +func (m *ResponseHeader) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *ResponseHeader) GetRaftTerm() uint64 { + if m != nil { + return m.RaftTerm + } + return 0 +} + +type RangeRequest struct { + // key is the first key for the range. If range_end is not given, the request only looks up key. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // range_end is the upper bound on the requested range [key, range_end). + // If range_end is '\0', the range is all keys >= key. + // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), + // then the range request gets all keys prefixed with key. + // If both key and range_end are '\0', then the range request returns all keys. + RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` + // limit is a limit on the number of keys returned for the request. When limit is set to 0, + // it is treated as no limit. + Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` + // revision is the point-in-time of the key-value store to use for the range. + // If revision is less or equal to zero, the range is over the newest key-value store. + // If the revision has been compacted, ErrCompacted is returned as a response. + Revision int64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"` + // sort_order is the order for returned sorted results. + SortOrder RangeRequest_SortOrder `protobuf:"varint,5,opt,name=sort_order,json=sortOrder,proto3,enum=etcdserverpb.RangeRequest_SortOrder" json:"sort_order,omitempty"` + // sort_target is the key-value field to use for sorting. + SortTarget RangeRequest_SortTarget `protobuf:"varint,6,opt,name=sort_target,json=sortTarget,proto3,enum=etcdserverpb.RangeRequest_SortTarget" json:"sort_target,omitempty"` + // serializable sets the range request to use serializable member-local reads. + // Range requests are linearizable by default; linearizable requests have higher + // latency and lower throughput than serializable requests but reflect the current + // consensus of the cluster. For better performance, in exchange for possible stale reads, + // a serializable range request is served locally without needing to reach consensus + // with other nodes in the cluster. + Serializable bool `protobuf:"varint,7,opt,name=serializable,proto3" json:"serializable,omitempty"` + // keys_only when set returns only the keys and not the values. + KeysOnly bool `protobuf:"varint,8,opt,name=keys_only,json=keysOnly,proto3" json:"keys_only,omitempty"` + // count_only when set returns only the count of the keys in the range. + CountOnly bool `protobuf:"varint,9,opt,name=count_only,json=countOnly,proto3" json:"count_only,omitempty"` + // min_mod_revision is the lower bound for returned key mod revisions; all keys with + // lesser mod revisions will be filtered away. + MinModRevision int64 `protobuf:"varint,10,opt,name=min_mod_revision,json=minModRevision,proto3" json:"min_mod_revision,omitempty"` + // max_mod_revision is the upper bound for returned key mod revisions; all keys with + // greater mod revisions will be filtered away. + MaxModRevision int64 `protobuf:"varint,11,opt,name=max_mod_revision,json=maxModRevision,proto3" json:"max_mod_revision,omitempty"` + // min_create_revision is the lower bound for returned key create revisions; all keys with + // lesser create trevisions will be filtered away. + MinCreateRevision int64 `protobuf:"varint,12,opt,name=min_create_revision,json=minCreateRevision,proto3" json:"min_create_revision,omitempty"` + // max_create_revision is the upper bound for returned key create revisions; all keys with + // greater create revisions will be filtered away. + MaxCreateRevision int64 `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"` +} + +func (m *RangeRequest) Reset() { *m = RangeRequest{} } +func (m *RangeRequest) String() string { return proto.CompactTextString(m) } +func (*RangeRequest) ProtoMessage() {} +func (*RangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} } + +func (m *RangeRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *RangeRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *RangeRequest) GetLimit() int64 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *RangeRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder { + if m != nil { + return m.SortOrder + } + return RangeRequest_NONE +} + +func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget { + if m != nil { + return m.SortTarget + } + return RangeRequest_KEY +} + +func (m *RangeRequest) GetSerializable() bool { + if m != nil { + return m.Serializable + } + return false +} + +func (m *RangeRequest) GetKeysOnly() bool { + if m != nil { + return m.KeysOnly + } + return false +} + +func (m *RangeRequest) GetCountOnly() bool { + if m != nil { + return m.CountOnly + } + return false +} + +func (m *RangeRequest) GetMinModRevision() int64 { + if m != nil { + return m.MinModRevision + } + return 0 +} + +func (m *RangeRequest) GetMaxModRevision() int64 { + if m != nil { + return m.MaxModRevision + } + return 0 +} + +func (m *RangeRequest) GetMinCreateRevision() int64 { + if m != nil { + return m.MinCreateRevision + } + return 0 +} + +func (m *RangeRequest) GetMaxCreateRevision() int64 { + if m != nil { + return m.MaxCreateRevision + } + return 0 +} + +type RangeResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // kvs is the list of key-value pairs matched by the range request. + // kvs is empty when count is requested. + Kvs []*mvccpb.KeyValue `protobuf:"bytes,2,rep,name=kvs" json:"kvs,omitempty"` + // more indicates if there are more keys to return in the requested range. + More bool `protobuf:"varint,3,opt,name=more,proto3" json:"more,omitempty"` + // count is set to the number of keys within the range when requested. + Count int64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` +} + +func (m *RangeResponse) Reset() { *m = RangeResponse{} } +func (m *RangeResponse) String() string { return proto.CompactTextString(m) } +func (*RangeResponse) ProtoMessage() {} +func (*RangeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{2} } + +func (m *RangeResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue { + if m != nil { + return m.Kvs + } + return nil +} + +func (m *RangeResponse) GetMore() bool { + if m != nil { + return m.More + } + return false +} + +func (m *RangeResponse) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +type PutRequest struct { + // key is the key, in bytes, to put into the key-value store. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // value is the value, in bytes, to associate with the key in the key-value store. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // lease is the lease ID to associate with the key in the key-value store. A lease + // value of 0 indicates no lease. + Lease int64 `protobuf:"varint,3,opt,name=lease,proto3" json:"lease,omitempty"` + // If prev_kv is set, etcd gets the previous key-value pair before changing it. + // The previous key-value pair will be returned in the put response. + PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` + // If ignore_value is set, etcd updates the key using its current value. + // Returns an error if the key does not exist. + IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"` + // If ignore_lease is set, etcd updates the key using its current lease. + // Returns an error if the key does not exist. + IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"` +} + +func (m *PutRequest) Reset() { *m = PutRequest{} } +func (m *PutRequest) String() string { return proto.CompactTextString(m) } +func (*PutRequest) ProtoMessage() {} +func (*PutRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{3} } + +func (m *PutRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *PutRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *PutRequest) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +func (m *PutRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + +func (m *PutRequest) GetIgnoreValue() bool { + if m != nil { + return m.IgnoreValue + } + return false +} + +func (m *PutRequest) GetIgnoreLease() bool { + if m != nil { + return m.IgnoreLease + } + return false +} + +type PutResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // if prev_kv is set in the request, the previous key-value pair will be returned. + PrevKv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"` +} + +func (m *PutResponse) Reset() { *m = PutResponse{} } +func (m *PutResponse) String() string { return proto.CompactTextString(m) } +func (*PutResponse) ProtoMessage() {} +func (*PutResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{4} } + +func (m *PutResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *PutResponse) GetPrevKv() *mvccpb.KeyValue { + if m != nil { + return m.PrevKv + } + return nil +} + +type DeleteRangeRequest struct { + // key is the first key to delete in the range. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // range_end is the key following the last key to delete for the range [key, range_end). + // If range_end is not given, the range is defined to contain only the key argument. + // If range_end is one bit larger than the given key, then the range is all the keys + // with the prefix (the given key). + // If range_end is '\0', the range is all keys greater than or equal to the key argument. + RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` + // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. + // The previous key-value pairs will be returned in the delete response. + PrevKv bool `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` +} + +func (m *DeleteRangeRequest) Reset() { *m = DeleteRangeRequest{} } +func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRangeRequest) ProtoMessage() {} +func (*DeleteRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{5} } + +func (m *DeleteRangeRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *DeleteRangeRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *DeleteRangeRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + +type DeleteRangeResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // deleted is the number of keys deleted by the delete range request. + Deleted int64 `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"` + // if prev_kv is set in the request, the previous key-value pairs will be returned. + PrevKvs []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs" json:"prev_kvs,omitempty"` +} + +func (m *DeleteRangeResponse) Reset() { *m = DeleteRangeResponse{} } +func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteRangeResponse) ProtoMessage() {} +func (*DeleteRangeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{6} } + +func (m *DeleteRangeResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *DeleteRangeResponse) GetDeleted() int64 { + if m != nil { + return m.Deleted + } + return 0 +} + +func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue { + if m != nil { + return m.PrevKvs + } + return nil +} + +type RequestOp struct { + // request is a union of request types accepted by a transaction. + // + // Types that are valid to be assigned to Request: + // *RequestOp_RequestRange + // *RequestOp_RequestPut + // *RequestOp_RequestDeleteRange + // *RequestOp_RequestTxn + Request isRequestOp_Request `protobuf_oneof:"request"` +} + +func (m *RequestOp) Reset() { *m = RequestOp{} } +func (m *RequestOp) String() string { return proto.CompactTextString(m) } +func (*RequestOp) ProtoMessage() {} +func (*RequestOp) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{7} } + +type isRequestOp_Request interface { + isRequestOp_Request() + MarshalTo([]byte) (int, error) + Size() int +} + +type RequestOp_RequestRange struct { + RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,json=requestRange,oneof"` +} +type RequestOp_RequestPut struct { + RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,json=requestPut,oneof"` +} +type RequestOp_RequestDeleteRange struct { + RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,oneof"` +} +type RequestOp_RequestTxn struct { + RequestTxn *TxnRequest `protobuf:"bytes,4,opt,name=request_txn,json=requestTxn,oneof"` +} + +func (*RequestOp_RequestRange) isRequestOp_Request() {} +func (*RequestOp_RequestPut) isRequestOp_Request() {} +func (*RequestOp_RequestDeleteRange) isRequestOp_Request() {} +func (*RequestOp_RequestTxn) isRequestOp_Request() {} + +func (m *RequestOp) GetRequest() isRequestOp_Request { + if m != nil { + return m.Request + } + return nil +} + +func (m *RequestOp) GetRequestRange() *RangeRequest { + if x, ok := m.GetRequest().(*RequestOp_RequestRange); ok { + return x.RequestRange + } + return nil +} + +func (m *RequestOp) GetRequestPut() *PutRequest { + if x, ok := m.GetRequest().(*RequestOp_RequestPut); ok { + return x.RequestPut + } + return nil +} + +func (m *RequestOp) GetRequestDeleteRange() *DeleteRangeRequest { + if x, ok := m.GetRequest().(*RequestOp_RequestDeleteRange); ok { + return x.RequestDeleteRange + } + return nil +} + +func (m *RequestOp) GetRequestTxn() *TxnRequest { + if x, ok := m.GetRequest().(*RequestOp_RequestTxn); ok { + return x.RequestTxn + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RequestOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _RequestOp_OneofMarshaler, _RequestOp_OneofUnmarshaler, _RequestOp_OneofSizer, []interface{}{ + (*RequestOp_RequestRange)(nil), + (*RequestOp_RequestPut)(nil), + (*RequestOp_RequestDeleteRange)(nil), + (*RequestOp_RequestTxn)(nil), + } +} + +func _RequestOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RequestOp) + // request + switch x := m.Request.(type) { + case *RequestOp_RequestRange: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RequestRange); err != nil { + return err + } + case *RequestOp_RequestPut: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RequestPut); err != nil { + return err + } + case *RequestOp_RequestDeleteRange: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RequestDeleteRange); err != nil { + return err + } + case *RequestOp_RequestTxn: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RequestTxn); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("RequestOp.Request has unexpected type %T", x) + } + return nil +} + +func _RequestOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RequestOp) + switch tag { + case 1: // request.request_range + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RangeRequest) + err := b.DecodeMessage(msg) + m.Request = &RequestOp_RequestRange{msg} + return true, err + case 2: // request.request_put + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PutRequest) + err := b.DecodeMessage(msg) + m.Request = &RequestOp_RequestPut{msg} + return true, err + case 3: // request.request_delete_range + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DeleteRangeRequest) + err := b.DecodeMessage(msg) + m.Request = &RequestOp_RequestDeleteRange{msg} + return true, err + case 4: // request.request_txn + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TxnRequest) + err := b.DecodeMessage(msg) + m.Request = &RequestOp_RequestTxn{msg} + return true, err + default: + return false, nil + } +} + +func _RequestOp_OneofSizer(msg proto.Message) (n int) { + m := msg.(*RequestOp) + // request + switch x := m.Request.(type) { + case *RequestOp_RequestRange: + s := proto.Size(x.RequestRange) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RequestOp_RequestPut: + s := proto.Size(x.RequestPut) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RequestOp_RequestDeleteRange: + s := proto.Size(x.RequestDeleteRange) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RequestOp_RequestTxn: + s := proto.Size(x.RequestTxn) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ResponseOp struct { + // response is a union of response types returned by a transaction. + // + // Types that are valid to be assigned to Response: + // *ResponseOp_ResponseRange + // *ResponseOp_ResponsePut + // *ResponseOp_ResponseDeleteRange + // *ResponseOp_ResponseTxn + Response isResponseOp_Response `protobuf_oneof:"response"` +} + +func (m *ResponseOp) Reset() { *m = ResponseOp{} } +func (m *ResponseOp) String() string { return proto.CompactTextString(m) } +func (*ResponseOp) ProtoMessage() {} +func (*ResponseOp) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{8} } + +type isResponseOp_Response interface { + isResponseOp_Response() + MarshalTo([]byte) (int, error) + Size() int +} + +type ResponseOp_ResponseRange struct { + ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,json=responseRange,oneof"` +} +type ResponseOp_ResponsePut struct { + ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,json=responsePut,oneof"` +} +type ResponseOp_ResponseDeleteRange struct { + ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,oneof"` +} +type ResponseOp_ResponseTxn struct { + ResponseTxn *TxnResponse `protobuf:"bytes,4,opt,name=response_txn,json=responseTxn,oneof"` +} + +func (*ResponseOp_ResponseRange) isResponseOp_Response() {} +func (*ResponseOp_ResponsePut) isResponseOp_Response() {} +func (*ResponseOp_ResponseDeleteRange) isResponseOp_Response() {} +func (*ResponseOp_ResponseTxn) isResponseOp_Response() {} + +func (m *ResponseOp) GetResponse() isResponseOp_Response { + if m != nil { + return m.Response + } + return nil +} + +func (m *ResponseOp) GetResponseRange() *RangeResponse { + if x, ok := m.GetResponse().(*ResponseOp_ResponseRange); ok { + return x.ResponseRange + } + return nil +} + +func (m *ResponseOp) GetResponsePut() *PutResponse { + if x, ok := m.GetResponse().(*ResponseOp_ResponsePut); ok { + return x.ResponsePut + } + return nil +} + +func (m *ResponseOp) GetResponseDeleteRange() *DeleteRangeResponse { + if x, ok := m.GetResponse().(*ResponseOp_ResponseDeleteRange); ok { + return x.ResponseDeleteRange + } + return nil +} + +func (m *ResponseOp) GetResponseTxn() *TxnResponse { + if x, ok := m.GetResponse().(*ResponseOp_ResponseTxn); ok { + return x.ResponseTxn + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ResponseOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ResponseOp_OneofMarshaler, _ResponseOp_OneofUnmarshaler, _ResponseOp_OneofSizer, []interface{}{ + (*ResponseOp_ResponseRange)(nil), + (*ResponseOp_ResponsePut)(nil), + (*ResponseOp_ResponseDeleteRange)(nil), + (*ResponseOp_ResponseTxn)(nil), + } +} + +func _ResponseOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ResponseOp) + // response + switch x := m.Response.(type) { + case *ResponseOp_ResponseRange: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResponseRange); err != nil { + return err + } + case *ResponseOp_ResponsePut: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResponsePut); err != nil { + return err + } + case *ResponseOp_ResponseDeleteRange: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResponseDeleteRange); err != nil { + return err + } + case *ResponseOp_ResponseTxn: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResponseTxn); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ResponseOp.Response has unexpected type %T", x) + } + return nil +} + +func _ResponseOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ResponseOp) + switch tag { + case 1: // response.response_range + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RangeResponse) + err := b.DecodeMessage(msg) + m.Response = &ResponseOp_ResponseRange{msg} + return true, err + case 2: // response.response_put + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PutResponse) + err := b.DecodeMessage(msg) + m.Response = &ResponseOp_ResponsePut{msg} + return true, err + case 3: // response.response_delete_range + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DeleteRangeResponse) + err := b.DecodeMessage(msg) + m.Response = &ResponseOp_ResponseDeleteRange{msg} + return true, err + case 4: // response.response_txn + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TxnResponse) + err := b.DecodeMessage(msg) + m.Response = &ResponseOp_ResponseTxn{msg} + return true, err + default: + return false, nil + } +} + +func _ResponseOp_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ResponseOp) + // response + switch x := m.Response.(type) { + case *ResponseOp_ResponseRange: + s := proto.Size(x.ResponseRange) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ResponseOp_ResponsePut: + s := proto.Size(x.ResponsePut) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ResponseOp_ResponseDeleteRange: + s := proto.Size(x.ResponseDeleteRange) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ResponseOp_ResponseTxn: + s := proto.Size(x.ResponseTxn) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Compare struct { + // result is logical comparison operation for this comparison. + Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult" json:"result,omitempty"` + // target is the key-value field to inspect for the comparison. + Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget" json:"target,omitempty"` + // key is the subject key for the comparison operation. + Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // Types that are valid to be assigned to TargetUnion: + // *Compare_Version + // *Compare_CreateRevision + // *Compare_ModRevision + // *Compare_Value + // *Compare_Lease + TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"` + // range_end compares the given target to all keys in the range [key, range_end). + // See RangeRequest for more details on key ranges. + RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` +} + +func (m *Compare) Reset() { *m = Compare{} } +func (m *Compare) String() string { return proto.CompactTextString(m) } +func (*Compare) ProtoMessage() {} +func (*Compare) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9} } + +type isCompare_TargetUnion interface { + isCompare_TargetUnion() + MarshalTo([]byte) (int, error) + Size() int +} + +type Compare_Version struct { + Version int64 `protobuf:"varint,4,opt,name=version,proto3,oneof"` +} +type Compare_CreateRevision struct { + CreateRevision int64 `protobuf:"varint,5,opt,name=create_revision,json=createRevision,proto3,oneof"` +} +type Compare_ModRevision struct { + ModRevision int64 `protobuf:"varint,6,opt,name=mod_revision,json=modRevision,proto3,oneof"` +} +type Compare_Value struct { + Value []byte `protobuf:"bytes,7,opt,name=value,proto3,oneof"` +} +type Compare_Lease struct { + Lease int64 `protobuf:"varint,8,opt,name=lease,proto3,oneof"` +} + +func (*Compare_Version) isCompare_TargetUnion() {} +func (*Compare_CreateRevision) isCompare_TargetUnion() {} +func (*Compare_ModRevision) isCompare_TargetUnion() {} +func (*Compare_Value) isCompare_TargetUnion() {} +func (*Compare_Lease) isCompare_TargetUnion() {} + +func (m *Compare) GetTargetUnion() isCompare_TargetUnion { + if m != nil { + return m.TargetUnion + } + return nil +} + +func (m *Compare) GetResult() Compare_CompareResult { + if m != nil { + return m.Result + } + return Compare_EQUAL +} + +func (m *Compare) GetTarget() Compare_CompareTarget { + if m != nil { + return m.Target + } + return Compare_VERSION +} + +func (m *Compare) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *Compare) GetVersion() int64 { + if x, ok := m.GetTargetUnion().(*Compare_Version); ok { + return x.Version + } + return 0 +} + +func (m *Compare) GetCreateRevision() int64 { + if x, ok := m.GetTargetUnion().(*Compare_CreateRevision); ok { + return x.CreateRevision + } + return 0 +} + +func (m *Compare) GetModRevision() int64 { + if x, ok := m.GetTargetUnion().(*Compare_ModRevision); ok { + return x.ModRevision + } + return 0 +} + +func (m *Compare) GetValue() []byte { + if x, ok := m.GetTargetUnion().(*Compare_Value); ok { + return x.Value + } + return nil +} + +func (m *Compare) GetLease() int64 { + if x, ok := m.GetTargetUnion().(*Compare_Lease); ok { + return x.Lease + } + return 0 +} + +func (m *Compare) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Compare) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Compare_OneofMarshaler, _Compare_OneofUnmarshaler, _Compare_OneofSizer, []interface{}{ + (*Compare_Version)(nil), + (*Compare_CreateRevision)(nil), + (*Compare_ModRevision)(nil), + (*Compare_Value)(nil), + (*Compare_Lease)(nil), + } +} + +func _Compare_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Compare) + // target_union + switch x := m.TargetUnion.(type) { + case *Compare_Version: + _ = b.EncodeVarint(4<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.Version)) + case *Compare_CreateRevision: + _ = b.EncodeVarint(5<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.CreateRevision)) + case *Compare_ModRevision: + _ = b.EncodeVarint(6<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.ModRevision)) + case *Compare_Value: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + _ = b.EncodeRawBytes(x.Value) + case *Compare_Lease: + _ = b.EncodeVarint(8<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.Lease)) + case nil: + default: + return fmt.Errorf("Compare.TargetUnion has unexpected type %T", x) + } + return nil +} + +func _Compare_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Compare) + switch tag { + case 4: // target_union.version + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.TargetUnion = &Compare_Version{int64(x)} + return true, err + case 5: // target_union.create_revision + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.TargetUnion = &Compare_CreateRevision{int64(x)} + return true, err + case 6: // target_union.mod_revision + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.TargetUnion = &Compare_ModRevision{int64(x)} + return true, err + case 7: // target_union.value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.TargetUnion = &Compare_Value{x} + return true, err + case 8: // target_union.lease + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.TargetUnion = &Compare_Lease{int64(x)} + return true, err + default: + return false, nil + } +} + +func _Compare_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Compare) + // target_union + switch x := m.TargetUnion.(type) { + case *Compare_Version: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Version)) + case *Compare_CreateRevision: + n += proto.SizeVarint(5<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.CreateRevision)) + case *Compare_ModRevision: + n += proto.SizeVarint(6<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.ModRevision)) + case *Compare_Value: + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Value))) + n += len(x.Value) + case *Compare_Lease: + n += proto.SizeVarint(8<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Lease)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// From google paxosdb paper: +// Our implementation hinges around a powerful primitive which we call MultiOp. All other database +// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically +// and consists of three components: +// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check +// for the absence or presence of a value, or compare with a given value. Two different tests in the guard +// may apply to the same or different entries in the database. All tests in the guard are applied and +// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise +// it executes f op (see item 3 below). +// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or +// lookup operation, and applies to a single database entry. Two different operations in the list may apply +// to the same or different entries in the database. These operations are executed +// if guard evaluates to +// true. +// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. +type TxnRequest struct { + // compare is a list of predicates representing a conjunction of terms. + // If the comparisons succeed, then the success requests will be processed in order, + // and the response will contain their respective responses in order. + // If the comparisons fail, then the failure requests will be processed in order, + // and the response will contain their respective responses in order. + Compare []*Compare `protobuf:"bytes,1,rep,name=compare" json:"compare,omitempty"` + // success is a list of requests which will be applied when compare evaluates to true. + Success []*RequestOp `protobuf:"bytes,2,rep,name=success" json:"success,omitempty"` + // failure is a list of requests which will be applied when compare evaluates to false. + Failure []*RequestOp `protobuf:"bytes,3,rep,name=failure" json:"failure,omitempty"` +} + +func (m *TxnRequest) Reset() { *m = TxnRequest{} } +func (m *TxnRequest) String() string { return proto.CompactTextString(m) } +func (*TxnRequest) ProtoMessage() {} +func (*TxnRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{10} } + +func (m *TxnRequest) GetCompare() []*Compare { + if m != nil { + return m.Compare + } + return nil +} + +func (m *TxnRequest) GetSuccess() []*RequestOp { + if m != nil { + return m.Success + } + return nil +} + +func (m *TxnRequest) GetFailure() []*RequestOp { + if m != nil { + return m.Failure + } + return nil +} + +type TxnResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // succeeded is set to true if the compare evaluated to true or false otherwise. + Succeeded bool `protobuf:"varint,2,opt,name=succeeded,proto3" json:"succeeded,omitempty"` + // responses is a list of responses corresponding to the results from applying + // success if succeeded is true or failure if succeeded is false. + Responses []*ResponseOp `protobuf:"bytes,3,rep,name=responses" json:"responses,omitempty"` +} + +func (m *TxnResponse) Reset() { *m = TxnResponse{} } +func (m *TxnResponse) String() string { return proto.CompactTextString(m) } +func (*TxnResponse) ProtoMessage() {} +func (*TxnResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{11} } + +func (m *TxnResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *TxnResponse) GetSucceeded() bool { + if m != nil { + return m.Succeeded + } + return false +} + +func (m *TxnResponse) GetResponses() []*ResponseOp { + if m != nil { + return m.Responses + } + return nil +} + +// CompactionRequest compacts the key-value store up to a given revision. All superseded keys +// with a revision less than the compaction revision will be removed. +type CompactionRequest struct { + // revision is the key-value store revision for the compaction operation. + Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"` + // physical is set so the RPC will wait until the compaction is physically + // applied to the local database such that compacted entries are totally + // removed from the backend database. + Physical bool `protobuf:"varint,2,opt,name=physical,proto3" json:"physical,omitempty"` +} + +func (m *CompactionRequest) Reset() { *m = CompactionRequest{} } +func (m *CompactionRequest) String() string { return proto.CompactTextString(m) } +func (*CompactionRequest) ProtoMessage() {} +func (*CompactionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{12} } + +func (m *CompactionRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *CompactionRequest) GetPhysical() bool { + if m != nil { + return m.Physical + } + return false +} + +type CompactionResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *CompactionResponse) Reset() { *m = CompactionResponse{} } +func (m *CompactionResponse) String() string { return proto.CompactTextString(m) } +func (*CompactionResponse) ProtoMessage() {} +func (*CompactionResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{13} } + +func (m *CompactionResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type HashRequest struct { +} + +func (m *HashRequest) Reset() { *m = HashRequest{} } +func (m *HashRequest) String() string { return proto.CompactTextString(m) } +func (*HashRequest) ProtoMessage() {} +func (*HashRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{14} } + +type HashKVRequest struct { + // revision is the key-value store revision for the hash operation. + Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"` +} + +func (m *HashKVRequest) Reset() { *m = HashKVRequest{} } +func (m *HashKVRequest) String() string { return proto.CompactTextString(m) } +func (*HashKVRequest) ProtoMessage() {} +func (*HashKVRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{15} } + +func (m *HashKVRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +type HashKVResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // hash is the hash value computed from the responding member's MVCC keys up to a given revision. + Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` + // compact_revision is the compacted revision of key-value store when hash begins. + CompactRevision int64 `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` +} + +func (m *HashKVResponse) Reset() { *m = HashKVResponse{} } +func (m *HashKVResponse) String() string { return proto.CompactTextString(m) } +func (*HashKVResponse) ProtoMessage() {} +func (*HashKVResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{16} } + +func (m *HashKVResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *HashKVResponse) GetHash() uint32 { + if m != nil { + return m.Hash + } + return 0 +} + +func (m *HashKVResponse) GetCompactRevision() int64 { + if m != nil { + return m.CompactRevision + } + return 0 +} + +type HashResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // hash is the hash value computed from the responding member's KV's backend. + Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` +} + +func (m *HashResponse) Reset() { *m = HashResponse{} } +func (m *HashResponse) String() string { return proto.CompactTextString(m) } +func (*HashResponse) ProtoMessage() {} +func (*HashResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{17} } + +func (m *HashResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *HashResponse) GetHash() uint32 { + if m != nil { + return m.Hash + } + return 0 +} + +type SnapshotRequest struct { +} + +func (m *SnapshotRequest) Reset() { *m = SnapshotRequest{} } +func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*SnapshotRequest) ProtoMessage() {} +func (*SnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{18} } + +type SnapshotResponse struct { + // header has the current key-value store information. The first header in the snapshot + // stream indicates the point in time of the snapshot. + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // remaining_bytes is the number of blob bytes to be sent after this message + RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"` + // blob contains the next chunk of the snapshot in the snapshot stream. + Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"` +} + +func (m *SnapshotResponse) Reset() { *m = SnapshotResponse{} } +func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*SnapshotResponse) ProtoMessage() {} +func (*SnapshotResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{19} } + +func (m *SnapshotResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *SnapshotResponse) GetRemainingBytes() uint64 { + if m != nil { + return m.RemainingBytes + } + return 0 +} + +func (m *SnapshotResponse) GetBlob() []byte { + if m != nil { + return m.Blob + } + return nil +} + +type WatchRequest struct { + // request_union is a request to either create a new watcher or cancel an existing watcher. + // + // Types that are valid to be assigned to RequestUnion: + // *WatchRequest_CreateRequest + // *WatchRequest_CancelRequest + RequestUnion isWatchRequest_RequestUnion `protobuf_oneof:"request_union"` +} + +func (m *WatchRequest) Reset() { *m = WatchRequest{} } +func (m *WatchRequest) String() string { return proto.CompactTextString(m) } +func (*WatchRequest) ProtoMessage() {} +func (*WatchRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{20} } + +type isWatchRequest_RequestUnion interface { + isWatchRequest_RequestUnion() + MarshalTo([]byte) (int, error) + Size() int +} + +type WatchRequest_CreateRequest struct { + CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,oneof"` +} +type WatchRequest_CancelRequest struct { + CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,oneof"` +} + +func (*WatchRequest_CreateRequest) isWatchRequest_RequestUnion() {} +func (*WatchRequest_CancelRequest) isWatchRequest_RequestUnion() {} + +func (m *WatchRequest) GetRequestUnion() isWatchRequest_RequestUnion { + if m != nil { + return m.RequestUnion + } + return nil +} + +func (m *WatchRequest) GetCreateRequest() *WatchCreateRequest { + if x, ok := m.GetRequestUnion().(*WatchRequest_CreateRequest); ok { + return x.CreateRequest + } + return nil +} + +func (m *WatchRequest) GetCancelRequest() *WatchCancelRequest { + if x, ok := m.GetRequestUnion().(*WatchRequest_CancelRequest); ok { + return x.CancelRequest + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*WatchRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _WatchRequest_OneofMarshaler, _WatchRequest_OneofUnmarshaler, _WatchRequest_OneofSizer, []interface{}{ + (*WatchRequest_CreateRequest)(nil), + (*WatchRequest_CancelRequest)(nil), + } +} + +func _WatchRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*WatchRequest) + // request_union + switch x := m.RequestUnion.(type) { + case *WatchRequest_CreateRequest: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CreateRequest); err != nil { + return err + } + case *WatchRequest_CancelRequest: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CancelRequest); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("WatchRequest.RequestUnion has unexpected type %T", x) + } + return nil +} + +func _WatchRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*WatchRequest) + switch tag { + case 1: // request_union.create_request + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(WatchCreateRequest) + err := b.DecodeMessage(msg) + m.RequestUnion = &WatchRequest_CreateRequest{msg} + return true, err + case 2: // request_union.cancel_request + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(WatchCancelRequest) + err := b.DecodeMessage(msg) + m.RequestUnion = &WatchRequest_CancelRequest{msg} + return true, err + default: + return false, nil + } +} + +func _WatchRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*WatchRequest) + // request_union + switch x := m.RequestUnion.(type) { + case *WatchRequest_CreateRequest: + s := proto.Size(x.CreateRequest) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *WatchRequest_CancelRequest: + s := proto.Size(x.CancelRequest) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type WatchCreateRequest struct { + // key is the key to register for watching. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // range_end is the end of the range [key, range_end) to watch. If range_end is not given, + // only the key argument is watched. If range_end is equal to '\0', all keys greater than + // or equal to the key argument are watched. + // If the range_end is one bit larger than the given key, + // then all keys with the prefix (the given key) will be watched. + RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` + // start_revision is an optional revision to watch from (inclusive). No start_revision is "now". + StartRevision int64 `protobuf:"varint,3,opt,name=start_revision,json=startRevision,proto3" json:"start_revision,omitempty"` + // progress_notify is set so that the etcd server will periodically send a WatchResponse with + // no events to the new watcher if there are no recent events. It is useful when clients + // wish to recover a disconnected watcher starting from a recent known revision. + // The etcd server may decide how often it will send notifications based on current load. + ProgressNotify bool `protobuf:"varint,4,opt,name=progress_notify,json=progressNotify,proto3" json:"progress_notify,omitempty"` + // filters filter the events at server side before it sends back to the watcher. + Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,packed,name=filters,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"` + // If prev_kv is set, created watcher gets the previous KV before the event happens. + // If the previous KV is already compacted, nothing will be returned. + PrevKv bool `protobuf:"varint,6,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` +} + +func (m *WatchCreateRequest) Reset() { *m = WatchCreateRequest{} } +func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) } +func (*WatchCreateRequest) ProtoMessage() {} +func (*WatchCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{21} } + +func (m *WatchCreateRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *WatchCreateRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *WatchCreateRequest) GetStartRevision() int64 { + if m != nil { + return m.StartRevision + } + return 0 +} + +func (m *WatchCreateRequest) GetProgressNotify() bool { + if m != nil { + return m.ProgressNotify + } + return false +} + +func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType { + if m != nil { + return m.Filters + } + return nil +} + +func (m *WatchCreateRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + +type WatchCancelRequest struct { + // watch_id is the watcher id to cancel so that no more events are transmitted. + WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` +} + +func (m *WatchCancelRequest) Reset() { *m = WatchCancelRequest{} } +func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) } +func (*WatchCancelRequest) ProtoMessage() {} +func (*WatchCancelRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{22} } + +func (m *WatchCancelRequest) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + +type WatchResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // watch_id is the ID of the watcher that corresponds to the response. + WatchId int64 `protobuf:"varint,2,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` + // created is set to true if the response is for a create watch request. + // The client should record the watch_id and expect to receive events for + // the created watcher from the same stream. + // All events sent to the created watcher will attach with the same watch_id. + Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"` + // canceled is set to true if the response is for a cancel watch request. + // No further events will be sent to the canceled watcher. + Canceled bool `protobuf:"varint,4,opt,name=canceled,proto3" json:"canceled,omitempty"` + // compact_revision is set to the minimum index if a watcher tries to watch + // at a compacted index. + // + // This happens when creating a watcher at a compacted revision or the watcher cannot + // catch up with the progress of the key-value store. + // + // The client should treat the watcher as canceled and should not try to create any + // watcher with the same start_revision again. + CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` + // cancel_reason indicates the reason for canceling the watcher. + CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"` + Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events" json:"events,omitempty"` +} + +func (m *WatchResponse) Reset() { *m = WatchResponse{} } +func (m *WatchResponse) String() string { return proto.CompactTextString(m) } +func (*WatchResponse) ProtoMessage() {} +func (*WatchResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{23} } + +func (m *WatchResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *WatchResponse) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + +func (m *WatchResponse) GetCreated() bool { + if m != nil { + return m.Created + } + return false +} + +func (m *WatchResponse) GetCanceled() bool { + if m != nil { + return m.Canceled + } + return false +} + +func (m *WatchResponse) GetCompactRevision() int64 { + if m != nil { + return m.CompactRevision + } + return 0 +} + +func (m *WatchResponse) GetCancelReason() string { + if m != nil { + return m.CancelReason + } + return "" +} + +func (m *WatchResponse) GetEvents() []*mvccpb.Event { + if m != nil { + return m.Events + } + return nil +} + +type LeaseGrantRequest struct { + // TTL is the advisory time-to-live in seconds. + TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"` + // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *LeaseGrantRequest) Reset() { *m = LeaseGrantRequest{} } +func (m *LeaseGrantRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseGrantRequest) ProtoMessage() {} +func (*LeaseGrantRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{24} } + +func (m *LeaseGrantRequest) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseGrantRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +type LeaseGrantResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // ID is the lease ID for the granted lease. + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` + // TTL is the server chosen lease time-to-live in seconds. + TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` + Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *LeaseGrantResponse) Reset() { *m = LeaseGrantResponse{} } +func (m *LeaseGrantResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseGrantResponse) ProtoMessage() {} +func (*LeaseGrantResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{25} } + +func (m *LeaseGrantResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseGrantResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseGrantResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseGrantResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +type LeaseRevokeRequest struct { + // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *LeaseRevokeRequest) Reset() { *m = LeaseRevokeRequest{} } +func (m *LeaseRevokeRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseRevokeRequest) ProtoMessage() {} +func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{26} } + +func (m *LeaseRevokeRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +type LeaseRevokeResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *LeaseRevokeResponse) Reset() { *m = LeaseRevokeResponse{} } +func (m *LeaseRevokeResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseRevokeResponse) ProtoMessage() {} +func (*LeaseRevokeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{27} } + +func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type LeaseKeepAliveRequest struct { + // ID is the lease ID for the lease to keep alive. + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *LeaseKeepAliveRequest) Reset() { *m = LeaseKeepAliveRequest{} } +func (m *LeaseKeepAliveRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseKeepAliveRequest) ProtoMessage() {} +func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{28} } + +func (m *LeaseKeepAliveRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +type LeaseKeepAliveResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // ID is the lease ID from the keep alive request. + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` + // TTL is the new time-to-live for the lease. + TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` +} + +func (m *LeaseKeepAliveResponse) Reset() { *m = LeaseKeepAliveResponse{} } +func (m *LeaseKeepAliveResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseKeepAliveResponse) ProtoMessage() {} +func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{29} } + +func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseKeepAliveResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseKeepAliveResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +type LeaseTimeToLiveRequest struct { + // ID is the lease ID for the lease. + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // keys is true to query all the keys attached to this lease. + Keys bool `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` +} + +func (m *LeaseTimeToLiveRequest) Reset() { *m = LeaseTimeToLiveRequest{} } +func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseTimeToLiveRequest) ProtoMessage() {} +func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{30} } + +func (m *LeaseTimeToLiveRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseTimeToLiveRequest) GetKeys() bool { + if m != nil { + return m.Keys + } + return false +} + +type LeaseTimeToLiveResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // ID is the lease ID from the keep alive request. + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` + // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. + TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` + // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. + GrantedTTL int64 `protobuf:"varint,4,opt,name=grantedTTL,proto3" json:"grantedTTL,omitempty"` + // Keys is the list of keys attached to this lease. + Keys [][]byte `protobuf:"bytes,5,rep,name=keys" json:"keys,omitempty"` +} + +func (m *LeaseTimeToLiveResponse) Reset() { *m = LeaseTimeToLiveResponse{} } +func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseTimeToLiveResponse) ProtoMessage() {} +func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{31} } + +func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseTimeToLiveResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 { + if m != nil { + return m.GrantedTTL + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte { + if m != nil { + return m.Keys + } + return nil +} + +type LeaseLeasesRequest struct { +} + +func (m *LeaseLeasesRequest) Reset() { *m = LeaseLeasesRequest{} } +func (m *LeaseLeasesRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseLeasesRequest) ProtoMessage() {} +func (*LeaseLeasesRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{32} } + +type LeaseStatus struct { + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *LeaseStatus) Reset() { *m = LeaseStatus{} } +func (m *LeaseStatus) String() string { return proto.CompactTextString(m) } +func (*LeaseStatus) ProtoMessage() {} +func (*LeaseStatus) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{33} } + +func (m *LeaseStatus) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +type LeaseLeasesResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Leases []*LeaseStatus `protobuf:"bytes,2,rep,name=leases" json:"leases,omitempty"` +} + +func (m *LeaseLeasesResponse) Reset() { *m = LeaseLeasesResponse{} } +func (m *LeaseLeasesResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseLeasesResponse) ProtoMessage() {} +func (*LeaseLeasesResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{34} } + +func (m *LeaseLeasesResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseLeasesResponse) GetLeases() []*LeaseStatus { + if m != nil { + return m.Leases + } + return nil +} + +type Member struct { + // ID is the member ID for this member. + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // name is the human-readable name of the member. If the member is not started, the name will be an empty string. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // peerURLs is the list of URLs the member exposes to the cluster for communication. + PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs" json:"peerURLs,omitempty"` + // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. + ClientURLs []string `protobuf:"bytes,4,rep,name=clientURLs" json:"clientURLs,omitempty"` +} + +func (m *Member) Reset() { *m = Member{} } +func (m *Member) String() string { return proto.CompactTextString(m) } +func (*Member) ProtoMessage() {} +func (*Member) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{35} } + +func (m *Member) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *Member) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Member) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + +func (m *Member) GetClientURLs() []string { + if m != nil { + return m.ClientURLs + } + return nil +} + +type MemberAddRequest struct { + // peerURLs is the list of URLs the added member will use to communicate with the cluster. + PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs" json:"peerURLs,omitempty"` +} + +func (m *MemberAddRequest) Reset() { *m = MemberAddRequest{} } +func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) } +func (*MemberAddRequest) ProtoMessage() {} +func (*MemberAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{36} } + +func (m *MemberAddRequest) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + +type MemberAddResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // member is the member information for the added member. + Member *Member `protobuf:"bytes,2,opt,name=member" json:"member,omitempty"` + // members is a list of all members after adding the new member. + Members []*Member `protobuf:"bytes,3,rep,name=members" json:"members,omitempty"` +} + +func (m *MemberAddResponse) Reset() { *m = MemberAddResponse{} } +func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) } +func (*MemberAddResponse) ProtoMessage() {} +func (*MemberAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{37} } + +func (m *MemberAddResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberAddResponse) GetMember() *Member { + if m != nil { + return m.Member + } + return nil +} + +func (m *MemberAddResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type MemberRemoveRequest struct { + // ID is the member ID of the member to remove. + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *MemberRemoveRequest) Reset() { *m = MemberRemoveRequest{} } +func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) } +func (*MemberRemoveRequest) ProtoMessage() {} +func (*MemberRemoveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{38} } + +func (m *MemberRemoveRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +type MemberRemoveResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // members is a list of all members after removing the member. + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` +} + +func (m *MemberRemoveResponse) Reset() { *m = MemberRemoveResponse{} } +func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) } +func (*MemberRemoveResponse) ProtoMessage() {} +func (*MemberRemoveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{39} } + +func (m *MemberRemoveResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberRemoveResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type MemberUpdateRequest struct { + // ID is the member ID of the member to update. + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // peerURLs is the new list of URLs the member will use to communicate with the cluster. + PeerURLs []string `protobuf:"bytes,2,rep,name=peerURLs" json:"peerURLs,omitempty"` +} + +func (m *MemberUpdateRequest) Reset() { *m = MemberUpdateRequest{} } +func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) } +func (*MemberUpdateRequest) ProtoMessage() {} +func (*MemberUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{40} } + +func (m *MemberUpdateRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *MemberUpdateRequest) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + +type MemberUpdateResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // members is a list of all members after updating the member. + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` +} + +func (m *MemberUpdateResponse) Reset() { *m = MemberUpdateResponse{} } +func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) } +func (*MemberUpdateResponse) ProtoMessage() {} +func (*MemberUpdateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{41} } + +func (m *MemberUpdateResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberUpdateResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type MemberListRequest struct { +} + +func (m *MemberListRequest) Reset() { *m = MemberListRequest{} } +func (m *MemberListRequest) String() string { return proto.CompactTextString(m) } +func (*MemberListRequest) ProtoMessage() {} +func (*MemberListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{42} } + +type MemberListResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // members is a list of all members associated with the cluster. + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` +} + +func (m *MemberListResponse) Reset() { *m = MemberListResponse{} } +func (m *MemberListResponse) String() string { return proto.CompactTextString(m) } +func (*MemberListResponse) ProtoMessage() {} +func (*MemberListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{43} } + +func (m *MemberListResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *MemberListResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + +type DefragmentRequest struct { +} + +func (m *DefragmentRequest) Reset() { *m = DefragmentRequest{} } +func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) } +func (*DefragmentRequest) ProtoMessage() {} +func (*DefragmentRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{44} } + +type DefragmentResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *DefragmentResponse) Reset() { *m = DefragmentResponse{} } +func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) } +func (*DefragmentResponse) ProtoMessage() {} +func (*DefragmentResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{45} } + +func (m *DefragmentResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type MoveLeaderRequest struct { + // targetID is the node ID for the new leader. + TargetID uint64 `protobuf:"varint,1,opt,name=targetID,proto3" json:"targetID,omitempty"` +} + +func (m *MoveLeaderRequest) Reset() { *m = MoveLeaderRequest{} } +func (m *MoveLeaderRequest) String() string { return proto.CompactTextString(m) } +func (*MoveLeaderRequest) ProtoMessage() {} +func (*MoveLeaderRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{46} } + +func (m *MoveLeaderRequest) GetTargetID() uint64 { + if m != nil { + return m.TargetID + } + return 0 +} + +type MoveLeaderResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *MoveLeaderResponse) Reset() { *m = MoveLeaderResponse{} } +func (m *MoveLeaderResponse) String() string { return proto.CompactTextString(m) } +func (*MoveLeaderResponse) ProtoMessage() {} +func (*MoveLeaderResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{47} } + +func (m *MoveLeaderResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AlarmRequest struct { + // action is the kind of alarm request to issue. The action + // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a + // raised alarm. + Action AlarmRequest_AlarmAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.AlarmRequest_AlarmAction" json:"action,omitempty"` + // memberID is the ID of the member associated with the alarm. If memberID is 0, the + // alarm request covers all members. + MemberID uint64 `protobuf:"varint,2,opt,name=memberID,proto3" json:"memberID,omitempty"` + // alarm is the type of alarm to consider for this request. + Alarm AlarmType `protobuf:"varint,3,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` +} + +func (m *AlarmRequest) Reset() { *m = AlarmRequest{} } +func (m *AlarmRequest) String() string { return proto.CompactTextString(m) } +func (*AlarmRequest) ProtoMessage() {} +func (*AlarmRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{48} } + +func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction { + if m != nil { + return m.Action + } + return AlarmRequest_GET +} + +func (m *AlarmRequest) GetMemberID() uint64 { + if m != nil { + return m.MemberID + } + return 0 +} + +func (m *AlarmRequest) GetAlarm() AlarmType { + if m != nil { + return m.Alarm + } + return AlarmType_NONE +} + +type AlarmMember struct { + // memberID is the ID of the member associated with the raised alarm. + MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"` + // alarm is the type of alarm which has been raised. + Alarm AlarmType `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` +} + +func (m *AlarmMember) Reset() { *m = AlarmMember{} } +func (m *AlarmMember) String() string { return proto.CompactTextString(m) } +func (*AlarmMember) ProtoMessage() {} +func (*AlarmMember) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{49} } + +func (m *AlarmMember) GetMemberID() uint64 { + if m != nil { + return m.MemberID + } + return 0 +} + +func (m *AlarmMember) GetAlarm() AlarmType { + if m != nil { + return m.Alarm + } + return AlarmType_NONE +} + +type AlarmResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // alarms is a list of alarms associated with the alarm request. + Alarms []*AlarmMember `protobuf:"bytes,2,rep,name=alarms" json:"alarms,omitempty"` +} + +func (m *AlarmResponse) Reset() { *m = AlarmResponse{} } +func (m *AlarmResponse) String() string { return proto.CompactTextString(m) } +func (*AlarmResponse) ProtoMessage() {} +func (*AlarmResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{50} } + +func (m *AlarmResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AlarmResponse) GetAlarms() []*AlarmMember { + if m != nil { + return m.Alarms + } + return nil +} + +type StatusRequest struct { +} + +func (m *StatusRequest) Reset() { *m = StatusRequest{} } +func (m *StatusRequest) String() string { return proto.CompactTextString(m) } +func (*StatusRequest) ProtoMessage() {} +func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{51} } + +type StatusResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // version is the cluster protocol version used by the responding member. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + // dbSize is the size of the backend database, in bytes, of the responding member. + DbSize int64 `protobuf:"varint,3,opt,name=dbSize,proto3" json:"dbSize,omitempty"` + // leader is the member ID which the responding member believes is the current leader. + Leader uint64 `protobuf:"varint,4,opt,name=leader,proto3" json:"leader,omitempty"` + // raftIndex is the current raft index of the responding member. + RaftIndex uint64 `protobuf:"varint,5,opt,name=raftIndex,proto3" json:"raftIndex,omitempty"` + // raftTerm is the current raft term of the responding member. + RaftTerm uint64 `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"` +} + +func (m *StatusResponse) Reset() { *m = StatusResponse{} } +func (m *StatusResponse) String() string { return proto.CompactTextString(m) } +func (*StatusResponse) ProtoMessage() {} +func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{52} } + +func (m *StatusResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *StatusResponse) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *StatusResponse) GetDbSize() int64 { + if m != nil { + return m.DbSize + } + return 0 +} + +func (m *StatusResponse) GetLeader() uint64 { + if m != nil { + return m.Leader + } + return 0 +} + +func (m *StatusResponse) GetRaftIndex() uint64 { + if m != nil { + return m.RaftIndex + } + return 0 +} + +func (m *StatusResponse) GetRaftTerm() uint64 { + if m != nil { + return m.RaftTerm + } + return 0 +} + +type AuthEnableRequest struct { +} + +func (m *AuthEnableRequest) Reset() { *m = AuthEnableRequest{} } +func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) } +func (*AuthEnableRequest) ProtoMessage() {} +func (*AuthEnableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{53} } + +type AuthDisableRequest struct { +} + +func (m *AuthDisableRequest) Reset() { *m = AuthDisableRequest{} } +func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) } +func (*AuthDisableRequest) ProtoMessage() {} +func (*AuthDisableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{54} } + +type AuthenticateRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` +} + +func (m *AuthenticateRequest) Reset() { *m = AuthenticateRequest{} } +func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) } +func (*AuthenticateRequest) ProtoMessage() {} +func (*AuthenticateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{55} } + +func (m *AuthenticateRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthenticateRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +type AuthUserAddRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` +} + +func (m *AuthUserAddRequest) Reset() { *m = AuthUserAddRequest{} } +func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserAddRequest) ProtoMessage() {} +func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{56} } + +func (m *AuthUserAddRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserAddRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +type AuthUserGetRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *AuthUserGetRequest) Reset() { *m = AuthUserGetRequest{} } +func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserGetRequest) ProtoMessage() {} +func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{57} } + +func (m *AuthUserGetRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type AuthUserDeleteRequest struct { + // name is the name of the user to delete. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *AuthUserDeleteRequest) Reset() { *m = AuthUserDeleteRequest{} } +func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserDeleteRequest) ProtoMessage() {} +func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{58} } + +func (m *AuthUserDeleteRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type AuthUserChangePasswordRequest struct { + // name is the name of the user whose password is being changed. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // password is the new password for the user. + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` +} + +func (m *AuthUserChangePasswordRequest) Reset() { *m = AuthUserChangePasswordRequest{} } +func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserChangePasswordRequest) ProtoMessage() {} +func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{59} +} + +func (m *AuthUserChangePasswordRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserChangePasswordRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +type AuthUserGrantRoleRequest struct { + // user is the name of the user which should be granted a given role. + User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` + // role is the name of the role to grant to the user. + Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` +} + +func (m *AuthUserGrantRoleRequest) Reset() { *m = AuthUserGrantRoleRequest{} } +func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserGrantRoleRequest) ProtoMessage() {} +func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{60} } + +func (m *AuthUserGrantRoleRequest) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *AuthUserGrantRoleRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +type AuthUserRevokeRoleRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` +} + +func (m *AuthUserRevokeRoleRequest) Reset() { *m = AuthUserRevokeRoleRequest{} } +func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserRevokeRoleRequest) ProtoMessage() {} +func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{61} } + +func (m *AuthUserRevokeRoleRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserRevokeRoleRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +type AuthRoleAddRequest struct { + // name is the name of the role to add to the authentication system. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *AuthRoleAddRequest) Reset() { *m = AuthRoleAddRequest{} } +func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleAddRequest) ProtoMessage() {} +func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{62} } + +func (m *AuthRoleAddRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type AuthRoleGetRequest struct { + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` +} + +func (m *AuthRoleGetRequest) Reset() { *m = AuthRoleGetRequest{} } +func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGetRequest) ProtoMessage() {} +func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{63} } + +func (m *AuthRoleGetRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +type AuthUserListRequest struct { +} + +func (m *AuthUserListRequest) Reset() { *m = AuthUserListRequest{} } +func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserListRequest) ProtoMessage() {} +func (*AuthUserListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{64} } + +type AuthRoleListRequest struct { +} + +func (m *AuthRoleListRequest) Reset() { *m = AuthRoleListRequest{} } +func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleListRequest) ProtoMessage() {} +func (*AuthRoleListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{65} } + +type AuthRoleDeleteRequest struct { + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` +} + +func (m *AuthRoleDeleteRequest) Reset() { *m = AuthRoleDeleteRequest{} } +func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleDeleteRequest) ProtoMessage() {} +func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{66} } + +func (m *AuthRoleDeleteRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +type AuthRoleGrantPermissionRequest struct { + // name is the name of the role which will be granted the permission. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // perm is the permission to grant to the role. + Perm *authpb.Permission `protobuf:"bytes,2,opt,name=perm" json:"perm,omitempty"` +} + +func (m *AuthRoleGrantPermissionRequest) Reset() { *m = AuthRoleGrantPermissionRequest{} } +func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGrantPermissionRequest) ProtoMessage() {} +func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{67} +} + +func (m *AuthRoleGrantPermissionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission { + if m != nil { + return m.Perm + } + return nil +} + +type AuthRoleRevokePermissionRequest struct { + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + RangeEnd string `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` +} + +func (m *AuthRoleRevokePermissionRequest) Reset() { *m = AuthRoleRevokePermissionRequest{} } +func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleRevokePermissionRequest) ProtoMessage() {} +func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{68} +} + +func (m *AuthRoleRevokePermissionRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +func (m *AuthRoleRevokePermissionRequest) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() string { + if m != nil { + return m.RangeEnd + } + return "" +} + +type AuthEnableResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthEnableResponse) Reset() { *m = AuthEnableResponse{} } +func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) } +func (*AuthEnableResponse) ProtoMessage() {} +func (*AuthEnableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{69} } + +func (m *AuthEnableResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthDisableResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthDisableResponse) Reset() { *m = AuthDisableResponse{} } +func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) } +func (*AuthDisableResponse) ProtoMessage() {} +func (*AuthDisableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{70} } + +func (m *AuthDisableResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthenticateResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // token is an authorized token that can be used in succeeding RPCs + Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"` +} + +func (m *AuthenticateResponse) Reset() { *m = AuthenticateResponse{} } +func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) } +func (*AuthenticateResponse) ProtoMessage() {} +func (*AuthenticateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{71} } + +func (m *AuthenticateResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthenticateResponse) GetToken() string { + if m != nil { + return m.Token + } + return "" +} + +type AuthUserAddResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthUserAddResponse) Reset() { *m = AuthUserAddResponse{} } +func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserAddResponse) ProtoMessage() {} +func (*AuthUserAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{72} } + +func (m *AuthUserAddResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthUserGetResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Roles []string `protobuf:"bytes,2,rep,name=roles" json:"roles,omitempty"` +} + +func (m *AuthUserGetResponse) Reset() { *m = AuthUserGetResponse{} } +func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserGetResponse) ProtoMessage() {} +func (*AuthUserGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{73} } + +func (m *AuthUserGetResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthUserGetResponse) GetRoles() []string { + if m != nil { + return m.Roles + } + return nil +} + +type AuthUserDeleteResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthUserDeleteResponse) Reset() { *m = AuthUserDeleteResponse{} } +func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserDeleteResponse) ProtoMessage() {} +func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{74} } + +func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthUserChangePasswordResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthUserChangePasswordResponse) Reset() { *m = AuthUserChangePasswordResponse{} } +func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserChangePasswordResponse) ProtoMessage() {} +func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{75} +} + +func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthUserGrantRoleResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthUserGrantRoleResponse) Reset() { *m = AuthUserGrantRoleResponse{} } +func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserGrantRoleResponse) ProtoMessage() {} +func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{76} } + +func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthUserRevokeRoleResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthUserRevokeRoleResponse) Reset() { *m = AuthUserRevokeRoleResponse{} } +func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserRevokeRoleResponse) ProtoMessage() {} +func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{77} } + +func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthRoleAddResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthRoleAddResponse) Reset() { *m = AuthRoleAddResponse{} } +func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleAddResponse) ProtoMessage() {} +func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{78} } + +func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthRoleGetResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Perm []*authpb.Permission `protobuf:"bytes,2,rep,name=perm" json:"perm,omitempty"` +} + +func (m *AuthRoleGetResponse) Reset() { *m = AuthRoleGetResponse{} } +func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGetResponse) ProtoMessage() {} +func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{79} } + +func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthRoleGetResponse) GetPerm() []*authpb.Permission { + if m != nil { + return m.Perm + } + return nil +} + +type AuthRoleListResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Roles []string `protobuf:"bytes,2,rep,name=roles" json:"roles,omitempty"` +} + +func (m *AuthRoleListResponse) Reset() { *m = AuthRoleListResponse{} } +func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleListResponse) ProtoMessage() {} +func (*AuthRoleListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{80} } + +func (m *AuthRoleListResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthRoleListResponse) GetRoles() []string { + if m != nil { + return m.Roles + } + return nil +} + +type AuthUserListResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Users []string `protobuf:"bytes,2,rep,name=users" json:"users,omitempty"` +} + +func (m *AuthUserListResponse) Reset() { *m = AuthUserListResponse{} } +func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserListResponse) ProtoMessage() {} +func (*AuthUserListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{81} } + +func (m *AuthUserListResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AuthUserListResponse) GetUsers() []string { + if m != nil { + return m.Users + } + return nil +} + +type AuthRoleDeleteResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthRoleDeleteResponse) Reset() { *m = AuthRoleDeleteResponse{} } +func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleDeleteResponse) ProtoMessage() {} +func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{82} } + +func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthRoleGrantPermissionResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthRoleGrantPermissionResponse) Reset() { *m = AuthRoleGrantPermissionResponse{} } +func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGrantPermissionResponse) ProtoMessage() {} +func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{83} +} + +func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type AuthRoleRevokePermissionResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *AuthRoleRevokePermissionResponse) Reset() { *m = AuthRoleRevokePermissionResponse{} } +func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleRevokePermissionResponse) ProtoMessage() {} +func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{84} +} + +func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func init() { + proto.RegisterType((*ResponseHeader)(nil), "etcdserverpb.ResponseHeader") + proto.RegisterType((*RangeRequest)(nil), "etcdserverpb.RangeRequest") + proto.RegisterType((*RangeResponse)(nil), "etcdserverpb.RangeResponse") + proto.RegisterType((*PutRequest)(nil), "etcdserverpb.PutRequest") + proto.RegisterType((*PutResponse)(nil), "etcdserverpb.PutResponse") + proto.RegisterType((*DeleteRangeRequest)(nil), "etcdserverpb.DeleteRangeRequest") + proto.RegisterType((*DeleteRangeResponse)(nil), "etcdserverpb.DeleteRangeResponse") + proto.RegisterType((*RequestOp)(nil), "etcdserverpb.RequestOp") + proto.RegisterType((*ResponseOp)(nil), "etcdserverpb.ResponseOp") + proto.RegisterType((*Compare)(nil), "etcdserverpb.Compare") + proto.RegisterType((*TxnRequest)(nil), "etcdserverpb.TxnRequest") + proto.RegisterType((*TxnResponse)(nil), "etcdserverpb.TxnResponse") + proto.RegisterType((*CompactionRequest)(nil), "etcdserverpb.CompactionRequest") + proto.RegisterType((*CompactionResponse)(nil), "etcdserverpb.CompactionResponse") + proto.RegisterType((*HashRequest)(nil), "etcdserverpb.HashRequest") + proto.RegisterType((*HashKVRequest)(nil), "etcdserverpb.HashKVRequest") + proto.RegisterType((*HashKVResponse)(nil), "etcdserverpb.HashKVResponse") + proto.RegisterType((*HashResponse)(nil), "etcdserverpb.HashResponse") + proto.RegisterType((*SnapshotRequest)(nil), "etcdserverpb.SnapshotRequest") + proto.RegisterType((*SnapshotResponse)(nil), "etcdserverpb.SnapshotResponse") + proto.RegisterType((*WatchRequest)(nil), "etcdserverpb.WatchRequest") + proto.RegisterType((*WatchCreateRequest)(nil), "etcdserverpb.WatchCreateRequest") + proto.RegisterType((*WatchCancelRequest)(nil), "etcdserverpb.WatchCancelRequest") + proto.RegisterType((*WatchResponse)(nil), "etcdserverpb.WatchResponse") + proto.RegisterType((*LeaseGrantRequest)(nil), "etcdserverpb.LeaseGrantRequest") + proto.RegisterType((*LeaseGrantResponse)(nil), "etcdserverpb.LeaseGrantResponse") + proto.RegisterType((*LeaseRevokeRequest)(nil), "etcdserverpb.LeaseRevokeRequest") + proto.RegisterType((*LeaseRevokeResponse)(nil), "etcdserverpb.LeaseRevokeResponse") + proto.RegisterType((*LeaseKeepAliveRequest)(nil), "etcdserverpb.LeaseKeepAliveRequest") + proto.RegisterType((*LeaseKeepAliveResponse)(nil), "etcdserverpb.LeaseKeepAliveResponse") + proto.RegisterType((*LeaseTimeToLiveRequest)(nil), "etcdserverpb.LeaseTimeToLiveRequest") + proto.RegisterType((*LeaseTimeToLiveResponse)(nil), "etcdserverpb.LeaseTimeToLiveResponse") + proto.RegisterType((*LeaseLeasesRequest)(nil), "etcdserverpb.LeaseLeasesRequest") + proto.RegisterType((*LeaseStatus)(nil), "etcdserverpb.LeaseStatus") + proto.RegisterType((*LeaseLeasesResponse)(nil), "etcdserverpb.LeaseLeasesResponse") + proto.RegisterType((*Member)(nil), "etcdserverpb.Member") + proto.RegisterType((*MemberAddRequest)(nil), "etcdserverpb.MemberAddRequest") + proto.RegisterType((*MemberAddResponse)(nil), "etcdserverpb.MemberAddResponse") + proto.RegisterType((*MemberRemoveRequest)(nil), "etcdserverpb.MemberRemoveRequest") + proto.RegisterType((*MemberRemoveResponse)(nil), "etcdserverpb.MemberRemoveResponse") + proto.RegisterType((*MemberUpdateRequest)(nil), "etcdserverpb.MemberUpdateRequest") + proto.RegisterType((*MemberUpdateResponse)(nil), "etcdserverpb.MemberUpdateResponse") + proto.RegisterType((*MemberListRequest)(nil), "etcdserverpb.MemberListRequest") + proto.RegisterType((*MemberListResponse)(nil), "etcdserverpb.MemberListResponse") + proto.RegisterType((*DefragmentRequest)(nil), "etcdserverpb.DefragmentRequest") + proto.RegisterType((*DefragmentResponse)(nil), "etcdserverpb.DefragmentResponse") + proto.RegisterType((*MoveLeaderRequest)(nil), "etcdserverpb.MoveLeaderRequest") + proto.RegisterType((*MoveLeaderResponse)(nil), "etcdserverpb.MoveLeaderResponse") + proto.RegisterType((*AlarmRequest)(nil), "etcdserverpb.AlarmRequest") + proto.RegisterType((*AlarmMember)(nil), "etcdserverpb.AlarmMember") + proto.RegisterType((*AlarmResponse)(nil), "etcdserverpb.AlarmResponse") + proto.RegisterType((*StatusRequest)(nil), "etcdserverpb.StatusRequest") + proto.RegisterType((*StatusResponse)(nil), "etcdserverpb.StatusResponse") + proto.RegisterType((*AuthEnableRequest)(nil), "etcdserverpb.AuthEnableRequest") + proto.RegisterType((*AuthDisableRequest)(nil), "etcdserverpb.AuthDisableRequest") + proto.RegisterType((*AuthenticateRequest)(nil), "etcdserverpb.AuthenticateRequest") + proto.RegisterType((*AuthUserAddRequest)(nil), "etcdserverpb.AuthUserAddRequest") + proto.RegisterType((*AuthUserGetRequest)(nil), "etcdserverpb.AuthUserGetRequest") + proto.RegisterType((*AuthUserDeleteRequest)(nil), "etcdserverpb.AuthUserDeleteRequest") + proto.RegisterType((*AuthUserChangePasswordRequest)(nil), "etcdserverpb.AuthUserChangePasswordRequest") + proto.RegisterType((*AuthUserGrantRoleRequest)(nil), "etcdserverpb.AuthUserGrantRoleRequest") + proto.RegisterType((*AuthUserRevokeRoleRequest)(nil), "etcdserverpb.AuthUserRevokeRoleRequest") + proto.RegisterType((*AuthRoleAddRequest)(nil), "etcdserverpb.AuthRoleAddRequest") + proto.RegisterType((*AuthRoleGetRequest)(nil), "etcdserverpb.AuthRoleGetRequest") + proto.RegisterType((*AuthUserListRequest)(nil), "etcdserverpb.AuthUserListRequest") + proto.RegisterType((*AuthRoleListRequest)(nil), "etcdserverpb.AuthRoleListRequest") + proto.RegisterType((*AuthRoleDeleteRequest)(nil), "etcdserverpb.AuthRoleDeleteRequest") + proto.RegisterType((*AuthRoleGrantPermissionRequest)(nil), "etcdserverpb.AuthRoleGrantPermissionRequest") + proto.RegisterType((*AuthRoleRevokePermissionRequest)(nil), "etcdserverpb.AuthRoleRevokePermissionRequest") + proto.RegisterType((*AuthEnableResponse)(nil), "etcdserverpb.AuthEnableResponse") + proto.RegisterType((*AuthDisableResponse)(nil), "etcdserverpb.AuthDisableResponse") + proto.RegisterType((*AuthenticateResponse)(nil), "etcdserverpb.AuthenticateResponse") + proto.RegisterType((*AuthUserAddResponse)(nil), "etcdserverpb.AuthUserAddResponse") + proto.RegisterType((*AuthUserGetResponse)(nil), "etcdserverpb.AuthUserGetResponse") + proto.RegisterType((*AuthUserDeleteResponse)(nil), "etcdserverpb.AuthUserDeleteResponse") + proto.RegisterType((*AuthUserChangePasswordResponse)(nil), "etcdserverpb.AuthUserChangePasswordResponse") + proto.RegisterType((*AuthUserGrantRoleResponse)(nil), "etcdserverpb.AuthUserGrantRoleResponse") + proto.RegisterType((*AuthUserRevokeRoleResponse)(nil), "etcdserverpb.AuthUserRevokeRoleResponse") + proto.RegisterType((*AuthRoleAddResponse)(nil), "etcdserverpb.AuthRoleAddResponse") + proto.RegisterType((*AuthRoleGetResponse)(nil), "etcdserverpb.AuthRoleGetResponse") + proto.RegisterType((*AuthRoleListResponse)(nil), "etcdserverpb.AuthRoleListResponse") + proto.RegisterType((*AuthUserListResponse)(nil), "etcdserverpb.AuthUserListResponse") + proto.RegisterType((*AuthRoleDeleteResponse)(nil), "etcdserverpb.AuthRoleDeleteResponse") + proto.RegisterType((*AuthRoleGrantPermissionResponse)(nil), "etcdserverpb.AuthRoleGrantPermissionResponse") + proto.RegisterType((*AuthRoleRevokePermissionResponse)(nil), "etcdserverpb.AuthRoleRevokePermissionResponse") + proto.RegisterEnum("etcdserverpb.AlarmType", AlarmType_name, AlarmType_value) + proto.RegisterEnum("etcdserverpb.RangeRequest_SortOrder", RangeRequest_SortOrder_name, RangeRequest_SortOrder_value) + proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value) + proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value) + proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value) + proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value) + proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for KV service + +type KVClient interface { + // Range gets the keys in the range from the key-value store. + Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) + // Put puts the given key into the key-value store. + // A put request increments the revision of the key-value store + // and generates one event in the event history. + Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) + // DeleteRange deletes the given range from the key-value store. + // A delete request increments the revision of the key-value store + // and generates a delete event in the event history for every deleted key. + DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) + // Txn processes multiple requests in a single transaction. + // A txn request increments the revision of the key-value store + // and generates events with the same revision for every completed request. + // It is not allowed to modify the same key several times within one txn. + Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) + // Compact compacts the event history in the etcd key-value store. The key-value + // store should be periodically compacted or the event history will continue to grow + // indefinitely. + Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) +} + +type kVClient struct { + cc *grpc.ClientConn +} + +func NewKVClient(cc *grpc.ClientConn) KVClient { + return &kVClient{cc} +} + +func (c *kVClient) Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) { + out := new(RangeResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/Range", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) { + out := new(PutResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/Put", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) { + out := new(DeleteRangeResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/DeleteRange", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) { + out := new(TxnResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/Txn", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *kVClient) Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) { + out := new(CompactionResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/Compact", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for KV service + +type KVServer interface { + // Range gets the keys in the range from the key-value store. + Range(context.Context, *RangeRequest) (*RangeResponse, error) + // Put puts the given key into the key-value store. + // A put request increments the revision of the key-value store + // and generates one event in the event history. + Put(context.Context, *PutRequest) (*PutResponse, error) + // DeleteRange deletes the given range from the key-value store. + // A delete request increments the revision of the key-value store + // and generates a delete event in the event history for every deleted key. + DeleteRange(context.Context, *DeleteRangeRequest) (*DeleteRangeResponse, error) + // Txn processes multiple requests in a single transaction. + // A txn request increments the revision of the key-value store + // and generates events with the same revision for every completed request. + // It is not allowed to modify the same key several times within one txn. + Txn(context.Context, *TxnRequest) (*TxnResponse, error) + // Compact compacts the event history in the etcd key-value store. The key-value + // store should be periodically compacted or the event history will continue to grow + // indefinitely. + Compact(context.Context, *CompactionRequest) (*CompactionResponse, error) +} + +func RegisterKVServer(s *grpc.Server, srv KVServer) { + s.RegisterService(&_KV_serviceDesc, srv) +} + +func _KV_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RangeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Range(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/Range", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Range(ctx, req.(*RangeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PutRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Put(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/Put", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Put(ctx, req.(*PutRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_DeleteRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteRangeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).DeleteRange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/DeleteRange", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).DeleteRange(ctx, req.(*DeleteRangeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_Txn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TxnRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Txn(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/Txn", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Txn(ctx, req.(*TxnRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _KV_Compact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CompactionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KVServer).Compact(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.KV/Compact", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KVServer).Compact(ctx, req.(*CompactionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _KV_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.KV", + HandlerType: (*KVServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Range", + Handler: _KV_Range_Handler, + }, + { + MethodName: "Put", + Handler: _KV_Put_Handler, + }, + { + MethodName: "DeleteRange", + Handler: _KV_DeleteRange_Handler, + }, + { + MethodName: "Txn", + Handler: _KV_Txn_Handler, + }, + { + MethodName: "Compact", + Handler: _KV_Compact_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "rpc.proto", +} + +// Client API for Watch service + +type WatchClient interface { + // Watch watches for events happening or that have happened. Both input and output + // are streams; the input stream is for creating and canceling watchers and the output + // stream sends events. One watch RPC can watch on multiple key ranges, streaming events + // for several watches at once. The entire event history can be watched starting from the + // last compaction revision. + Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) +} + +type watchClient struct { + cc *grpc.ClientConn +} + +func NewWatchClient(cc *grpc.ClientConn) WatchClient { + return &watchClient{cc} +} + +func (c *watchClient) Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Watch_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Watch/Watch", opts...) + if err != nil { + return nil, err + } + x := &watchWatchClient{stream} + return x, nil +} + +type Watch_WatchClient interface { + Send(*WatchRequest) error + Recv() (*WatchResponse, error) + grpc.ClientStream +} + +type watchWatchClient struct { + grpc.ClientStream +} + +func (x *watchWatchClient) Send(m *WatchRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *watchWatchClient) Recv() (*WatchResponse, error) { + m := new(WatchResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Watch service + +type WatchServer interface { + // Watch watches for events happening or that have happened. Both input and output + // are streams; the input stream is for creating and canceling watchers and the output + // stream sends events. One watch RPC can watch on multiple key ranges, streaming events + // for several watches at once. The entire event history can be watched starting from the + // last compaction revision. + Watch(Watch_WatchServer) error +} + +func RegisterWatchServer(s *grpc.Server, srv WatchServer) { + s.RegisterService(&_Watch_serviceDesc, srv) +} + +func _Watch_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(WatchServer).Watch(&watchWatchServer{stream}) +} + +type Watch_WatchServer interface { + Send(*WatchResponse) error + Recv() (*WatchRequest, error) + grpc.ServerStream +} + +type watchWatchServer struct { + grpc.ServerStream +} + +func (x *watchWatchServer) Send(m *WatchResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *watchWatchServer) Recv() (*WatchRequest, error) { + m := new(WatchRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _Watch_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Watch", + HandlerType: (*WatchServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: _Watch_Watch_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "rpc.proto", +} + +// Client API for Lease service + +type LeaseClient interface { + // LeaseGrant creates a lease which expires if the server does not receive a keepAlive + // within a given time to live period. All keys attached to the lease will be expired and + // deleted if the lease expires. Each expired key generates a delete event in the event history. + LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) + // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. + LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) + // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client + // to the server and streaming keep alive responses from the server to the client. + LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) + // LeaseTimeToLive retrieves lease information. + LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) + // LeaseLeases lists all existing leases. + LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) +} + +type leaseClient struct { + cc *grpc.ClientConn +} + +func NewLeaseClient(cc *grpc.ClientConn) LeaseClient { + return &leaseClient{cc} +} + +func (c *leaseClient) LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) { + out := new(LeaseGrantResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseGrant", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leaseClient) LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) { + out := new(LeaseRevokeResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseRevoke", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Lease_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Lease/LeaseKeepAlive", opts...) + if err != nil { + return nil, err + } + x := &leaseLeaseKeepAliveClient{stream} + return x, nil +} + +type Lease_LeaseKeepAliveClient interface { + Send(*LeaseKeepAliveRequest) error + Recv() (*LeaseKeepAliveResponse, error) + grpc.ClientStream +} + +type leaseLeaseKeepAliveClient struct { + grpc.ClientStream +} + +func (x *leaseLeaseKeepAliveClient) Send(m *LeaseKeepAliveRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *leaseLeaseKeepAliveClient) Recv() (*LeaseKeepAliveResponse, error) { + m := new(LeaseKeepAliveResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) { + out := new(LeaseTimeToLiveResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leaseClient) LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) { + out := new(LeaseLeasesResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseLeases", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Lease service + +type LeaseServer interface { + // LeaseGrant creates a lease which expires if the server does not receive a keepAlive + // within a given time to live period. All keys attached to the lease will be expired and + // deleted if the lease expires. Each expired key generates a delete event in the event history. + LeaseGrant(context.Context, *LeaseGrantRequest) (*LeaseGrantResponse, error) + // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. + LeaseRevoke(context.Context, *LeaseRevokeRequest) (*LeaseRevokeResponse, error) + // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client + // to the server and streaming keep alive responses from the server to the client. + LeaseKeepAlive(Lease_LeaseKeepAliveServer) error + // LeaseTimeToLive retrieves lease information. + LeaseTimeToLive(context.Context, *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error) + // LeaseLeases lists all existing leases. + LeaseLeases(context.Context, *LeaseLeasesRequest) (*LeaseLeasesResponse, error) +} + +func RegisterLeaseServer(s *grpc.Server, srv LeaseServer) { + s.RegisterService(&_Lease_serviceDesc, srv) +} + +func _Lease_LeaseGrant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseGrantRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeaseServer).LeaseGrant(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Lease/LeaseGrant", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeaseServer).LeaseGrant(ctx, req.(*LeaseGrantRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Lease_LeaseRevoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseRevokeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeaseServer).LeaseRevoke(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Lease/LeaseRevoke", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeaseServer).LeaseRevoke(ctx, req.(*LeaseRevokeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Lease_LeaseKeepAlive_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(LeaseServer).LeaseKeepAlive(&leaseLeaseKeepAliveServer{stream}) +} + +type Lease_LeaseKeepAliveServer interface { + Send(*LeaseKeepAliveResponse) error + Recv() (*LeaseKeepAliveRequest, error) + grpc.ServerStream +} + +type leaseLeaseKeepAliveServer struct { + grpc.ServerStream +} + +func (x *leaseLeaseKeepAliveServer) Send(m *LeaseKeepAliveResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *leaseLeaseKeepAliveServer) Recv() (*LeaseKeepAliveRequest, error) { + m := new(LeaseKeepAliveRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Lease_LeaseTimeToLive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseTimeToLiveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeaseServer).LeaseTimeToLive(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Lease/LeaseTimeToLive", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeaseServer).LeaseTimeToLive(ctx, req.(*LeaseTimeToLiveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Lease_LeaseLeases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseLeasesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeaseServer).LeaseLeases(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Lease/LeaseLeases", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeaseServer).LeaseLeases(ctx, req.(*LeaseLeasesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Lease_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Lease", + HandlerType: (*LeaseServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "LeaseGrant", + Handler: _Lease_LeaseGrant_Handler, + }, + { + MethodName: "LeaseRevoke", + Handler: _Lease_LeaseRevoke_Handler, + }, + { + MethodName: "LeaseTimeToLive", + Handler: _Lease_LeaseTimeToLive_Handler, + }, + { + MethodName: "LeaseLeases", + Handler: _Lease_LeaseLeases_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "LeaseKeepAlive", + Handler: _Lease_LeaseKeepAlive_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "rpc.proto", +} + +// Client API for Cluster service + +type ClusterClient interface { + // MemberAdd adds a member into the cluster. + MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) + // MemberRemove removes an existing member from the cluster. + MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) + // MemberUpdate updates the member configuration. + MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) + // MemberList lists all the members in the cluster. + MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) +} + +type clusterClient struct { + cc *grpc.ClientConn +} + +func NewClusterClient(cc *grpc.ClientConn) ClusterClient { + return &clusterClient{cc} +} + +func (c *clusterClient) MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) { + out := new(MemberAddResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberAdd", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterClient) MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) { + out := new(MemberRemoveResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberRemove", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterClient) MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) { + out := new(MemberUpdateResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberUpdate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterClient) MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) { + out := new(MemberListResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberList", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Cluster service + +type ClusterServer interface { + // MemberAdd adds a member into the cluster. + MemberAdd(context.Context, *MemberAddRequest) (*MemberAddResponse, error) + // MemberRemove removes an existing member from the cluster. + MemberRemove(context.Context, *MemberRemoveRequest) (*MemberRemoveResponse, error) + // MemberUpdate updates the member configuration. + MemberUpdate(context.Context, *MemberUpdateRequest) (*MemberUpdateResponse, error) + // MemberList lists all the members in the cluster. + MemberList(context.Context, *MemberListRequest) (*MemberListResponse, error) +} + +func RegisterClusterServer(s *grpc.Server, srv ClusterServer) { + s.RegisterService(&_Cluster_serviceDesc, srv) +} + +func _Cluster_MemberAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberAddRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberAdd(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberAdd", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberAdd(ctx, req.(*MemberAddRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Cluster_MemberRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberRemoveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberRemove(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberRemove", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberRemove(ctx, req.(*MemberRemoveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Cluster_MemberUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberUpdateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberUpdate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberUpdate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberUpdate(ctx, req.(*MemberUpdateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Cluster_MemberList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberList(ctx, req.(*MemberListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Cluster_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Cluster", + HandlerType: (*ClusterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "MemberAdd", + Handler: _Cluster_MemberAdd_Handler, + }, + { + MethodName: "MemberRemove", + Handler: _Cluster_MemberRemove_Handler, + }, + { + MethodName: "MemberUpdate", + Handler: _Cluster_MemberUpdate_Handler, + }, + { + MethodName: "MemberList", + Handler: _Cluster_MemberList_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "rpc.proto", +} + +// Client API for Maintenance service + +type MaintenanceClient interface { + // Alarm activates, deactivates, and queries alarms regarding cluster health. + Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) + // Status gets the status of the member. + Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) + // Defragment defragments a member's backend database to recover storage space. + Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) + // Hash computes the hash of the KV's backend. + // This is designed for testing; do not use this in production when there + // are ongoing transactions. + Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) + // HashKV computes the hash of all MVCC keys up to a given revision. + HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) + // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. + Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) + // MoveLeader requests current leader node to transfer its leadership to transferee. + MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) +} + +type maintenanceClient struct { + cc *grpc.ClientConn +} + +func NewMaintenanceClient(cc *grpc.ClientConn) MaintenanceClient { + return &maintenanceClient{cc} +} + +func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) { + out := new(AlarmResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Alarm", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { + out := new(StatusResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) { + out := new(DefragmentResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) { + out := new(HashResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Hash", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) { + out := new(HashKVResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/HashKV", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *maintenanceClient) Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Maintenance_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Maintenance/Snapshot", opts...) + if err != nil { + return nil, err + } + x := &maintenanceSnapshotClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Maintenance_SnapshotClient interface { + Recv() (*SnapshotResponse, error) + grpc.ClientStream +} + +type maintenanceSnapshotClient struct { + grpc.ClientStream +} + +func (x *maintenanceSnapshotClient) Recv() (*SnapshotResponse, error) { + m := new(SnapshotResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *maintenanceClient) MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) { + out := new(MoveLeaderResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/MoveLeader", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Maintenance service + +type MaintenanceServer interface { + // Alarm activates, deactivates, and queries alarms regarding cluster health. + Alarm(context.Context, *AlarmRequest) (*AlarmResponse, error) + // Status gets the status of the member. + Status(context.Context, *StatusRequest) (*StatusResponse, error) + // Defragment defragments a member's backend database to recover storage space. + Defragment(context.Context, *DefragmentRequest) (*DefragmentResponse, error) + // Hash computes the hash of the KV's backend. + // This is designed for testing; do not use this in production when there + // are ongoing transactions. + Hash(context.Context, *HashRequest) (*HashResponse, error) + // HashKV computes the hash of all MVCC keys up to a given revision. + HashKV(context.Context, *HashKVRequest) (*HashKVResponse, error) + // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. + Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error + // MoveLeader requests current leader node to transfer its leadership to transferee. + MoveLeader(context.Context, *MoveLeaderRequest) (*MoveLeaderResponse, error) +} + +func RegisterMaintenanceServer(s *grpc.Server, srv MaintenanceServer) { + s.RegisterService(&_Maintenance_serviceDesc, srv) +} + +func _Maintenance_Alarm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AlarmRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Alarm(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Alarm", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Alarm(ctx, req.(*AlarmRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Status(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Status", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Status(ctx, req.(*StatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Defragment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DefragmentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Defragment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Defragment", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Defragment(ctx, req.(*DefragmentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Hash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HashRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).Hash(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/Hash", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).Hash(ctx, req.(*HashRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_HashKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HashKVRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).HashKV(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/HashKV", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).HashKV(ctx, req.(*HashKVRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Maintenance_Snapshot_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SnapshotRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(MaintenanceServer).Snapshot(m, &maintenanceSnapshotServer{stream}) +} + +type Maintenance_SnapshotServer interface { + Send(*SnapshotResponse) error + grpc.ServerStream +} + +type maintenanceSnapshotServer struct { + grpc.ServerStream +} + +func (x *maintenanceSnapshotServer) Send(m *SnapshotResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Maintenance_MoveLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MoveLeaderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).MoveLeader(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/MoveLeader", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).MoveLeader(ctx, req.(*MoveLeaderRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Maintenance_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Maintenance", + HandlerType: (*MaintenanceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Alarm", + Handler: _Maintenance_Alarm_Handler, + }, + { + MethodName: "Status", + Handler: _Maintenance_Status_Handler, + }, + { + MethodName: "Defragment", + Handler: _Maintenance_Defragment_Handler, + }, + { + MethodName: "Hash", + Handler: _Maintenance_Hash_Handler, + }, + { + MethodName: "HashKV", + Handler: _Maintenance_HashKV_Handler, + }, + { + MethodName: "MoveLeader", + Handler: _Maintenance_MoveLeader_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Snapshot", + Handler: _Maintenance_Snapshot_Handler, + ServerStreams: true, + }, + }, + Metadata: "rpc.proto", +} + +// Client API for Auth service + +type AuthClient interface { + // AuthEnable enables authentication. + AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) + // AuthDisable disables authentication. + AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) + // Authenticate processes an authenticate request. + Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) + // UserAdd adds a new user. + UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) + // UserGet gets detailed user information. + UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) + // UserList gets a list of all users. + UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) + // UserDelete deletes a specified user. + UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) + // UserChangePassword changes the password of a specified user. + UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) + // UserGrant grants a role to a specified user. + UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) + // UserRevokeRole revokes a role of specified user. + UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) + // RoleAdd adds a new role. + RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) + // RoleGet gets detailed role information. + RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) + // RoleList gets lists of all roles. + RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) + // RoleDelete deletes a specified role. + RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) + // RoleGrantPermission grants a permission of a specified key or range to a specified role. + RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) + // RoleRevokePermission revokes a key or range permission of a specified role. + RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) +} + +type authClient struct { + cc *grpc.ClientConn +} + +func NewAuthClient(cc *grpc.ClientConn) AuthClient { + return &authClient{cc} +} + +func (c *authClient) AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) { + out := new(AuthEnableResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/AuthEnable", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) { + out := new(AuthDisableResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/AuthDisable", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) { + out := new(AuthenticateResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/Authenticate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) { + out := new(AuthUserAddResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserAdd", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) { + out := new(AuthUserGetResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserGet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) { + out := new(AuthUserListResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserList", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) { + out := new(AuthUserDeleteResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserDelete", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) { + out := new(AuthUserChangePasswordResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserChangePassword", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) { + out := new(AuthUserGrantRoleResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserGrantRole", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) { + out := new(AuthUserRevokeRoleResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserRevokeRole", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) { + out := new(AuthRoleAddResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleAdd", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) { + out := new(AuthRoleGetResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleGet", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) { + out := new(AuthRoleListResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleList", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) { + out := new(AuthRoleDeleteResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleDelete", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) { + out := new(AuthRoleGrantPermissionResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleGrantPermission", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) { + out := new(AuthRoleRevokePermissionResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleRevokePermission", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Auth service + +type AuthServer interface { + // AuthEnable enables authentication. + AuthEnable(context.Context, *AuthEnableRequest) (*AuthEnableResponse, error) + // AuthDisable disables authentication. + AuthDisable(context.Context, *AuthDisableRequest) (*AuthDisableResponse, error) + // Authenticate processes an authenticate request. + Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error) + // UserAdd adds a new user. + UserAdd(context.Context, *AuthUserAddRequest) (*AuthUserAddResponse, error) + // UserGet gets detailed user information. + UserGet(context.Context, *AuthUserGetRequest) (*AuthUserGetResponse, error) + // UserList gets a list of all users. + UserList(context.Context, *AuthUserListRequest) (*AuthUserListResponse, error) + // UserDelete deletes a specified user. + UserDelete(context.Context, *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error) + // UserChangePassword changes the password of a specified user. + UserChangePassword(context.Context, *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error) + // UserGrant grants a role to a specified user. + UserGrantRole(context.Context, *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error) + // UserRevokeRole revokes a role of specified user. + UserRevokeRole(context.Context, *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error) + // RoleAdd adds a new role. + RoleAdd(context.Context, *AuthRoleAddRequest) (*AuthRoleAddResponse, error) + // RoleGet gets detailed role information. + RoleGet(context.Context, *AuthRoleGetRequest) (*AuthRoleGetResponse, error) + // RoleList gets lists of all roles. + RoleList(context.Context, *AuthRoleListRequest) (*AuthRoleListResponse, error) + // RoleDelete deletes a specified role. + RoleDelete(context.Context, *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error) + // RoleGrantPermission grants a permission of a specified key or range to a specified role. + RoleGrantPermission(context.Context, *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error) + // RoleRevokePermission revokes a key or range permission of a specified role. + RoleRevokePermission(context.Context, *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error) +} + +func RegisterAuthServer(s *grpc.Server, srv AuthServer) { + s.RegisterService(&_Auth_serviceDesc, srv) +} + +func _Auth_AuthEnable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthEnableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).AuthEnable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/AuthEnable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).AuthEnable(ctx, req.(*AuthEnableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_AuthDisable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthDisableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).AuthDisable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/AuthDisable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).AuthDisable(ctx, req.(*AuthDisableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_Authenticate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthenticateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).Authenticate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/Authenticate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).Authenticate(ctx, req.(*AuthenticateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserAddRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserAdd(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserAdd", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserAdd(ctx, req.(*AuthUserAddRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserGetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserGet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserGet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserGet(ctx, req.(*AuthUserGetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserList(ctx, req.(*AuthUserListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserDelete(ctx, req.(*AuthUserDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserChangePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserChangePasswordRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserChangePassword(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserChangePassword", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserChangePassword(ctx, req.(*AuthUserChangePasswordRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserGrantRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserGrantRoleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserGrantRole(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserGrantRole", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserGrantRole(ctx, req.(*AuthUserGrantRoleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_UserRevokeRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthUserRevokeRoleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).UserRevokeRole(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/UserRevokeRole", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).UserRevokeRole(ctx, req.(*AuthUserRevokeRoleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleAddRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleAdd(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleAdd", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleAdd(ctx, req.(*AuthRoleAddRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleGetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleGet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleGet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleGet(ctx, req.(*AuthRoleGetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleList(ctx, req.(*AuthRoleListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleDelete(ctx, req.(*AuthRoleDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleGrantPermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleGrantPermissionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleGrantPermission(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleGrantPermission", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleGrantPermission(ctx, req.(*AuthRoleGrantPermissionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_RoleRevokePermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthRoleRevokePermissionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).RoleRevokePermission(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Auth/RoleRevokePermission", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).RoleRevokePermission(ctx, req.(*AuthRoleRevokePermissionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Auth_serviceDesc = grpc.ServiceDesc{ + ServiceName: "etcdserverpb.Auth", + HandlerType: (*AuthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AuthEnable", + Handler: _Auth_AuthEnable_Handler, + }, + { + MethodName: "AuthDisable", + Handler: _Auth_AuthDisable_Handler, + }, + { + MethodName: "Authenticate", + Handler: _Auth_Authenticate_Handler, + }, + { + MethodName: "UserAdd", + Handler: _Auth_UserAdd_Handler, + }, + { + MethodName: "UserGet", + Handler: _Auth_UserGet_Handler, + }, + { + MethodName: "UserList", + Handler: _Auth_UserList_Handler, + }, + { + MethodName: "UserDelete", + Handler: _Auth_UserDelete_Handler, + }, + { + MethodName: "UserChangePassword", + Handler: _Auth_UserChangePassword_Handler, + }, + { + MethodName: "UserGrantRole", + Handler: _Auth_UserGrantRole_Handler, + }, + { + MethodName: "UserRevokeRole", + Handler: _Auth_UserRevokeRole_Handler, + }, + { + MethodName: "RoleAdd", + Handler: _Auth_RoleAdd_Handler, + }, + { + MethodName: "RoleGet", + Handler: _Auth_RoleGet_Handler, + }, + { + MethodName: "RoleList", + Handler: _Auth_RoleList_Handler, + }, + { + MethodName: "RoleDelete", + Handler: _Auth_RoleDelete_Handler, + }, + { + MethodName: "RoleGrantPermission", + Handler: _Auth_RoleGrantPermission_Handler, + }, + { + MethodName: "RoleRevokePermission", + Handler: _Auth_RoleRevokePermission_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "rpc.proto", +} + +func (m *ResponseHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseHeader) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ClusterId != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ClusterId)) + } + if m.MemberId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MemberId)) + } + if m.Revision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) + } + if m.RaftTerm != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) + } + return i, nil +} + +func (m *RangeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RangeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) + } + if m.Limit != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) + } + if m.Revision != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) + } + if m.SortOrder != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.SortOrder)) + } + if m.SortTarget != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.SortTarget)) + } + if m.Serializable { + dAtA[i] = 0x38 + i++ + if m.Serializable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.KeysOnly { + dAtA[i] = 0x40 + i++ + if m.KeysOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.CountOnly { + dAtA[i] = 0x48 + i++ + if m.CountOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.MinModRevision != 0 { + dAtA[i] = 0x50 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MinModRevision)) + } + if m.MaxModRevision != 0 { + dAtA[i] = 0x58 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MaxModRevision)) + } + if m.MinCreateRevision != 0 { + dAtA[i] = 0x60 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MinCreateRevision)) + } + if m.MaxCreateRevision != 0 { + dAtA[i] = 0x68 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MaxCreateRevision)) + } + return i, nil +} + +func (m *RangeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RangeResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n1, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if len(m.Kvs) > 0 { + for _, msg := range m.Kvs { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.More { + dAtA[i] = 0x18 + i++ + if m.More { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Count != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Count)) + } + return i, nil +} + +func (m *PutRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + if m.Lease != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) + } + if m.PrevKv { + dAtA[i] = 0x20 + i++ + if m.PrevKv { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.IgnoreValue { + dAtA[i] = 0x28 + i++ + if m.IgnoreValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.IgnoreLease { + dAtA[i] = 0x30 + i++ + if m.IgnoreLease { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PutResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PutResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n2, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.PrevKv != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.PrevKv.Size())) + n3, err := m.PrevKv.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} + +func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) + } + if m.PrevKv { + dAtA[i] = 0x18 + i++ + if m.PrevKv { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n4, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.Deleted != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Deleted)) + } + if len(m.PrevKvs) > 0 { + for _, msg := range m.PrevKvs { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RequestOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestOp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Request != nil { + nn5, err := m.Request.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn5 + } + return i, nil +} + +func (m *RequestOp_RequestRange) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.RequestRange != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestRange.Size())) + n6, err := m.RequestRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} +func (m *RequestOp_RequestPut) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.RequestPut != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestPut.Size())) + n7, err := m.RequestPut.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} +func (m *RequestOp_RequestDeleteRange) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.RequestDeleteRange != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestDeleteRange.Size())) + n8, err := m.RequestDeleteRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} +func (m *RequestOp_RequestTxn) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.RequestTxn != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestTxn.Size())) + n9, err := m.RequestTxn.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} +func (m *ResponseOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseOp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Response != nil { + nn10, err := m.Response.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn10 + } + return i, nil +} + +func (m *ResponseOp_ResponseRange) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ResponseRange != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponseRange.Size())) + n11, err := m.ResponseRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + return i, nil +} +func (m *ResponseOp_ResponsePut) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ResponsePut != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponsePut.Size())) + n12, err := m.ResponsePut.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} +func (m *ResponseOp_ResponseDeleteRange) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ResponseDeleteRange != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponseDeleteRange.Size())) + n13, err := m.ResponseDeleteRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + return i, nil +} +func (m *ResponseOp_ResponseTxn) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ResponseTxn != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponseTxn.Size())) + n14, err := m.ResponseTxn.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + return i, nil +} +func (m *Compare) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Compare) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Result != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Result)) + } + if m.Target != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Target)) + } + if len(m.Key) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if m.TargetUnion != nil { + nn15, err := m.TargetUnion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn15 + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x4 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) + } + return i, nil +} + +func (m *Compare_Version) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Version)) + return i, nil +} +func (m *Compare_CreateRevision) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x28 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CreateRevision)) + return i, nil +} +func (m *Compare_ModRevision) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x30 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ModRevision)) + return i, nil +} +func (m *Compare_Value) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Value != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} +func (m *Compare_Lease) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x40 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) + return i, nil +} +func (m *TxnRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TxnRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Compare) > 0 { + for _, msg := range m.Compare { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Success) > 0 { + for _, msg := range m.Success { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Failure) > 0 { + for _, msg := range m.Failure { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *TxnResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TxnResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n16, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.Succeeded { + dAtA[i] = 0x10 + i++ + if m.Succeeded { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Responses) > 0 { + for _, msg := range m.Responses { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CompactionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompactionRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Revision != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) + } + if m.Physical { + dAtA[i] = 0x10 + i++ + if m.Physical { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *CompactionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompactionResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n17, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + return i, nil +} + +func (m *HashRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *HashKVRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashKVRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Revision != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) + } + return i, nil +} + +func (m *HashKVResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashKVResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n18, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + if m.Hash != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Hash)) + } + if m.CompactRevision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) + } + return i, nil +} + +func (m *HashResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n19, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + if m.Hash != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Hash)) + } + return i, nil +} + +func (m *SnapshotRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *SnapshotResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n20, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if m.RemainingBytes != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RemainingBytes)) + } + if len(m.Blob) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Blob))) + i += copy(dAtA[i:], m.Blob) + } + return i, nil +} + +func (m *WatchRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RequestUnion != nil { + nn21, err := m.RequestUnion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn21 + } + return i, nil +} + +func (m *WatchRequest_CreateRequest) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.CreateRequest != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CreateRequest.Size())) + n22, err := m.CreateRequest.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + return i, nil +} +func (m *WatchRequest_CancelRequest) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.CancelRequest != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CancelRequest.Size())) + n23, err := m.CancelRequest.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + return i, nil +} +func (m *WatchCreateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchCreateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) + } + if m.StartRevision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.StartRevision)) + } + if m.ProgressNotify { + dAtA[i] = 0x20 + i++ + if m.ProgressNotify { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Filters) > 0 { + dAtA25 := make([]byte, len(m.Filters)*10) + var j24 int + for _, num := range m.Filters { + for num >= 1<<7 { + dAtA25[j24] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j24++ + } + dAtA25[j24] = uint8(num) + j24++ + } + dAtA[i] = 0x2a + i++ + i = encodeVarintRpc(dAtA, i, uint64(j24)) + i += copy(dAtA[i:], dAtA25[:j24]) + } + if m.PrevKv { + dAtA[i] = 0x30 + i++ + if m.PrevKv { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *WatchCancelRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchCancelRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.WatchId != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) + } + return i, nil +} + +func (m *WatchResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n26, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if m.WatchId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) + } + if m.Created { + dAtA[i] = 0x18 + i++ + if m.Created { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Canceled { + dAtA[i] = 0x20 + i++ + if m.Canceled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.CompactRevision != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) + } + if len(m.CancelReason) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason))) + i += copy(dAtA[i:], m.CancelReason) + } + if len(m.Events) > 0 { + for _, msg := range m.Events { + dAtA[i] = 0x5a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LeaseGrantRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseGrantRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.TTL != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) + } + if m.ID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *LeaseGrantResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseGrantResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n27, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + if m.ID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if m.TTL != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) + } + if len(m.Error) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + } + return i, nil +} + +func (m *LeaseRevokeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseRevokeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *LeaseRevokeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseRevokeResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n28, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + return i, nil +} + +func (m *LeaseKeepAliveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseKeepAliveRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *LeaseKeepAliveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n29, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + } + if m.ID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if m.TTL != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) + } + return i, nil +} + +func (m *LeaseTimeToLiveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseTimeToLiveRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if m.Keys { + dAtA[i] = 0x10 + i++ + if m.Keys { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *LeaseTimeToLiveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseTimeToLiveResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n30, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + if m.ID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if m.TTL != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) + } + if m.GrantedTTL != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.GrantedTTL)) + } + if len(m.Keys) > 0 { + for _, b := range m.Keys { + dAtA[i] = 0x2a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(b))) + i += copy(dAtA[i:], b) + } + } + return i, nil +} + +func (m *LeaseLeasesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseLeasesRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *LeaseStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *LeaseLeasesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseLeasesResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n31, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + if len(m.Leases) > 0 { + for _, msg := range m.Leases { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Member) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Member) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if len(m.Name) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.ClientURLs) > 0 { + for _, s := range m.ClientURLs { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *MemberAddRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberAddRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *MemberAddResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n32, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + if m.Member != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Member.Size())) + n33, err := m.Member.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MemberRemoveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberRemoveRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *MemberRemoveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n34, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MemberUpdateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberUpdateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *MemberUpdateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n35, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MemberListRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberListRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *MemberListResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemberListResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n36, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DefragmentRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DefragmentRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *DefragmentResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n37, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + } + return i, nil +} + +func (m *MoveLeaderRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MoveLeaderRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.TargetID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TargetID)) + } + return i, nil +} + +func (m *MoveLeaderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MoveLeaderResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n38, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + } + return i, nil +} + +func (m *AlarmRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlarmRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Action != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Action)) + } + if m.MemberID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) + } + if m.Alarm != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) + } + return i, nil +} + +func (m *AlarmMember) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlarmMember) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.MemberID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) + } + if m.Alarm != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) + } + return i, nil +} + +func (m *AlarmResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlarmResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n39, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + } + if len(m.Alarms) > 0 { + for _, msg := range m.Alarms { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *StatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *StatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n40, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + } + if len(m.Version) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + } + if m.DbSize != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.DbSize)) + } + if m.Leader != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Leader)) + } + if m.RaftIndex != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RaftIndex)) + } + if m.RaftTerm != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) + } + return i, nil +} + +func (m *AuthEnableRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthEnableRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *AuthDisableRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthDisableRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *AuthenticateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthenticateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Password) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) + } + return i, nil +} + +func (m *AuthUserAddRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserAddRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Password) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) + } + return i, nil +} + +func (m *AuthUserGetRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserGetRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + return i, nil +} + +func (m *AuthUserDeleteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserDeleteRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + return i, nil +} + +func (m *AuthUserChangePasswordRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserChangePasswordRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Password) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) + } + return i, nil +} + +func (m *AuthUserGrantRoleRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserGrantRoleRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.User) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + } + if len(m.Role) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + } + return i, nil +} + +func (m *AuthUserRevokeRoleRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserRevokeRoleRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Role) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + } + return i, nil +} + +func (m *AuthRoleAddRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleAddRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + return i, nil +} + +func (m *AuthRoleGetRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleGetRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Role) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + } + return i, nil +} + +func (m *AuthUserListRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserListRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *AuthRoleListRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleListRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *AuthRoleDeleteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleDeleteRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Role) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + } + return i, nil +} + +func (m *AuthRoleGrantPermissionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleGrantPermissionRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Perm != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Perm.Size())) + n41, err := m.Perm.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + } + return i, nil +} + +func (m *AuthRoleRevokePermissionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleRevokePermissionRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Role) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) + } + return i, nil +} + +func (m *AuthEnableResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthEnableResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n42, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 + } + return i, nil +} + +func (m *AuthDisableResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthDisableResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n43, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 + } + return i, nil +} + +func (m *AuthenticateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthenticateResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n44, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 + } + if len(m.Token) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Token))) + i += copy(dAtA[i:], m.Token) + } + return i, nil +} + +func (m *AuthUserAddResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserAddResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n45, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n45 + } + return i, nil +} + +func (m *AuthUserGetResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserGetResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n46, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n46 + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *AuthUserDeleteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserDeleteResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n47, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n47 + } + return i, nil +} + +func (m *AuthUserChangePasswordResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserChangePasswordResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n48, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 + } + return i, nil +} + +func (m *AuthUserGrantRoleResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserGrantRoleResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n49, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n49 + } + return i, nil +} + +func (m *AuthUserRevokeRoleResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserRevokeRoleResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n50, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n50 + } + return i, nil +} + +func (m *AuthRoleAddResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleAddResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n51, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n51 + } + return i, nil +} + +func (m *AuthRoleGetResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleGetResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n52, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n52 + } + if len(m.Perm) > 0 { + for _, msg := range m.Perm { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *AuthRoleListResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleListResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n53, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n53 + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *AuthUserListResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthUserListResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n54, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n54 + } + if len(m.Users) > 0 { + for _, s := range m.Users { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *AuthRoleDeleteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleDeleteResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n55, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n55 + } + return i, nil +} + +func (m *AuthRoleGrantPermissionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleGrantPermissionResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n56, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n56 + } + return i, nil +} + +func (m *AuthRoleRevokePermissionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthRoleRevokePermissionResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n57, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n57 + } + return i, nil +} + +func encodeFixed64Rpc(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Rpc(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintRpc(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *ResponseHeader) Size() (n int) { + var l int + _ = l + if m.ClusterId != 0 { + n += 1 + sovRpc(uint64(m.ClusterId)) + } + if m.MemberId != 0 { + n += 1 + sovRpc(uint64(m.MemberId)) + } + if m.Revision != 0 { + n += 1 + sovRpc(uint64(m.Revision)) + } + if m.RaftTerm != 0 { + n += 1 + sovRpc(uint64(m.RaftTerm)) + } + return n +} + +func (m *RangeRequest) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.Limit != 0 { + n += 1 + sovRpc(uint64(m.Limit)) + } + if m.Revision != 0 { + n += 1 + sovRpc(uint64(m.Revision)) + } + if m.SortOrder != 0 { + n += 1 + sovRpc(uint64(m.SortOrder)) + } + if m.SortTarget != 0 { + n += 1 + sovRpc(uint64(m.SortTarget)) + } + if m.Serializable { + n += 2 + } + if m.KeysOnly { + n += 2 + } + if m.CountOnly { + n += 2 + } + if m.MinModRevision != 0 { + n += 1 + sovRpc(uint64(m.MinModRevision)) + } + if m.MaxModRevision != 0 { + n += 1 + sovRpc(uint64(m.MaxModRevision)) + } + if m.MinCreateRevision != 0 { + n += 1 + sovRpc(uint64(m.MinCreateRevision)) + } + if m.MaxCreateRevision != 0 { + n += 1 + sovRpc(uint64(m.MaxCreateRevision)) + } + return n +} + +func (m *RangeResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Kvs) > 0 { + for _, e := range m.Kvs { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.More { + n += 2 + } + if m.Count != 0 { + n += 1 + sovRpc(uint64(m.Count)) + } + return n +} + +func (m *PutRequest) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.Lease != 0 { + n += 1 + sovRpc(uint64(m.Lease)) + } + if m.PrevKv { + n += 2 + } + if m.IgnoreValue { + n += 2 + } + if m.IgnoreLease { + n += 2 + } + return n +} + +func (m *PutResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.PrevKv != nil { + l = m.PrevKv.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *DeleteRangeRequest) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.PrevKv { + n += 2 + } + return n +} + +func (m *DeleteRangeResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Deleted != 0 { + n += 1 + sovRpc(uint64(m.Deleted)) + } + if len(m.PrevKvs) > 0 { + for _, e := range m.PrevKvs { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *RequestOp) Size() (n int) { + var l int + _ = l + if m.Request != nil { + n += m.Request.Size() + } + return n +} + +func (m *RequestOp_RequestRange) Size() (n int) { + var l int + _ = l + if m.RequestRange != nil { + l = m.RequestRange.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *RequestOp_RequestPut) Size() (n int) { + var l int + _ = l + if m.RequestPut != nil { + l = m.RequestPut.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *RequestOp_RequestDeleteRange) Size() (n int) { + var l int + _ = l + if m.RequestDeleteRange != nil { + l = m.RequestDeleteRange.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *RequestOp_RequestTxn) Size() (n int) { + var l int + _ = l + if m.RequestTxn != nil { + l = m.RequestTxn.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *ResponseOp) Size() (n int) { + var l int + _ = l + if m.Response != nil { + n += m.Response.Size() + } + return n +} + +func (m *ResponseOp_ResponseRange) Size() (n int) { + var l int + _ = l + if m.ResponseRange != nil { + l = m.ResponseRange.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *ResponseOp_ResponsePut) Size() (n int) { + var l int + _ = l + if m.ResponsePut != nil { + l = m.ResponsePut.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *ResponseOp_ResponseDeleteRange) Size() (n int) { + var l int + _ = l + if m.ResponseDeleteRange != nil { + l = m.ResponseDeleteRange.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *ResponseOp_ResponseTxn) Size() (n int) { + var l int + _ = l + if m.ResponseTxn != nil { + l = m.ResponseTxn.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *Compare) Size() (n int) { + var l int + _ = l + if m.Result != 0 { + n += 1 + sovRpc(uint64(m.Result)) + } + if m.Target != 0 { + n += 1 + sovRpc(uint64(m.Target)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.TargetUnion != nil { + n += m.TargetUnion.Size() + } + l = len(m.RangeEnd) + if l > 0 { + n += 2 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *Compare_Version) Size() (n int) { + var l int + _ = l + n += 1 + sovRpc(uint64(m.Version)) + return n +} +func (m *Compare_CreateRevision) Size() (n int) { + var l int + _ = l + n += 1 + sovRpc(uint64(m.CreateRevision)) + return n +} +func (m *Compare_ModRevision) Size() (n int) { + var l int + _ = l + n += 1 + sovRpc(uint64(m.ModRevision)) + return n +} +func (m *Compare_Value) Size() (n int) { + var l int + _ = l + if m.Value != nil { + l = len(m.Value) + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *Compare_Lease) Size() (n int) { + var l int + _ = l + n += 1 + sovRpc(uint64(m.Lease)) + return n +} +func (m *TxnRequest) Size() (n int) { + var l int + _ = l + if len(m.Compare) > 0 { + for _, e := range m.Compare { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.Success) > 0 { + for _, e := range m.Success { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.Failure) > 0 { + for _, e := range m.Failure { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *TxnResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Succeeded { + n += 2 + } + if len(m.Responses) > 0 { + for _, e := range m.Responses { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *CompactionRequest) Size() (n int) { + var l int + _ = l + if m.Revision != 0 { + n += 1 + sovRpc(uint64(m.Revision)) + } + if m.Physical { + n += 2 + } + return n +} + +func (m *CompactionResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *HashRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *HashKVRequest) Size() (n int) { + var l int + _ = l + if m.Revision != 0 { + n += 1 + sovRpc(uint64(m.Revision)) + } + return n +} + +func (m *HashKVResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Hash != 0 { + n += 1 + sovRpc(uint64(m.Hash)) + } + if m.CompactRevision != 0 { + n += 1 + sovRpc(uint64(m.CompactRevision)) + } + return n +} + +func (m *HashResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Hash != 0 { + n += 1 + sovRpc(uint64(m.Hash)) + } + return n +} + +func (m *SnapshotRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *SnapshotResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.RemainingBytes != 0 { + n += 1 + sovRpc(uint64(m.RemainingBytes)) + } + l = len(m.Blob) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *WatchRequest) Size() (n int) { + var l int + _ = l + if m.RequestUnion != nil { + n += m.RequestUnion.Size() + } + return n +} + +func (m *WatchRequest_CreateRequest) Size() (n int) { + var l int + _ = l + if m.CreateRequest != nil { + l = m.CreateRequest.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *WatchRequest_CancelRequest) Size() (n int) { + var l int + _ = l + if m.CancelRequest != nil { + l = m.CancelRequest.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *WatchCreateRequest) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.StartRevision != 0 { + n += 1 + sovRpc(uint64(m.StartRevision)) + } + if m.ProgressNotify { + n += 2 + } + if len(m.Filters) > 0 { + l = 0 + for _, e := range m.Filters { + l += sovRpc(uint64(e)) + } + n += 1 + sovRpc(uint64(l)) + l + } + if m.PrevKv { + n += 2 + } + return n +} + +func (m *WatchCancelRequest) Size() (n int) { + var l int + _ = l + if m.WatchId != 0 { + n += 1 + sovRpc(uint64(m.WatchId)) + } + return n +} + +func (m *WatchResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.WatchId != 0 { + n += 1 + sovRpc(uint64(m.WatchId)) + } + if m.Created { + n += 2 + } + if m.Canceled { + n += 2 + } + if m.CompactRevision != 0 { + n += 1 + sovRpc(uint64(m.CompactRevision)) + } + l = len(m.CancelReason) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *LeaseGrantRequest) Size() (n int) { + var l int + _ = l + if m.TTL != 0 { + n += 1 + sovRpc(uint64(m.TTL)) + } + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + return n +} + +func (m *LeaseGrantResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.TTL != 0 { + n += 1 + sovRpc(uint64(m.TTL)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *LeaseRevokeRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + return n +} + +func (m *LeaseRevokeResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *LeaseKeepAliveRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + return n +} + +func (m *LeaseKeepAliveResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.TTL != 0 { + n += 1 + sovRpc(uint64(m.TTL)) + } + return n +} + +func (m *LeaseTimeToLiveRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.Keys { + n += 2 + } + return n +} + +func (m *LeaseTimeToLiveResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.TTL != 0 { + n += 1 + sovRpc(uint64(m.TTL)) + } + if m.GrantedTTL != 0 { + n += 1 + sovRpc(uint64(m.GrantedTTL)) + } + if len(m.Keys) > 0 { + for _, b := range m.Keys { + l = len(b) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *LeaseLeasesRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *LeaseStatus) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + return n +} + +func (m *LeaseLeasesResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Leases) > 0 { + for _, e := range m.Leases { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *Member) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if len(m.ClientURLs) > 0 { + for _, s := range m.ClientURLs { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *MemberAddRequest) Size() (n int) { + var l int + _ = l + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *MemberAddResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.Member != nil { + l = m.Member.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *MemberRemoveRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + return n +} + +func (m *MemberRemoveResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *MemberUpdateRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if len(m.PeerURLs) > 0 { + for _, s := range m.PeerURLs { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *MemberUpdateResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *MemberListRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *MemberListResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *DefragmentRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *DefragmentResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *MoveLeaderRequest) Size() (n int) { + var l int + _ = l + if m.TargetID != 0 { + n += 1 + sovRpc(uint64(m.TargetID)) + } + return n +} + +func (m *MoveLeaderResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AlarmRequest) Size() (n int) { + var l int + _ = l + if m.Action != 0 { + n += 1 + sovRpc(uint64(m.Action)) + } + if m.MemberID != 0 { + n += 1 + sovRpc(uint64(m.MemberID)) + } + if m.Alarm != 0 { + n += 1 + sovRpc(uint64(m.Alarm)) + } + return n +} + +func (m *AlarmMember) Size() (n int) { + var l int + _ = l + if m.MemberID != 0 { + n += 1 + sovRpc(uint64(m.MemberID)) + } + if m.Alarm != 0 { + n += 1 + sovRpc(uint64(m.Alarm)) + } + return n +} + +func (m *AlarmResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Alarms) > 0 { + for _, e := range m.Alarms { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *StatusRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *StatusResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.DbSize != 0 { + n += 1 + sovRpc(uint64(m.DbSize)) + } + if m.Leader != 0 { + n += 1 + sovRpc(uint64(m.Leader)) + } + if m.RaftIndex != 0 { + n += 1 + sovRpc(uint64(m.RaftIndex)) + } + if m.RaftTerm != 0 { + n += 1 + sovRpc(uint64(m.RaftTerm)) + } + return n +} + +func (m *AuthEnableRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *AuthDisableRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *AuthenticateRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserAddRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserGetRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserDeleteRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserChangePasswordRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserGrantRoleRequest) Size() (n int) { + var l int + _ = l + l = len(m.User) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserRevokeRoleRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleAddRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleGetRequest) Size() (n int) { + var l int + _ = l + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserListRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *AuthRoleListRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *AuthRoleDeleteRequest) Size() (n int) { + var l int + _ = l + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleGrantPermissionRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + if m.Perm != nil { + l = m.Perm.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleRevokePermissionRequest) Size() (n int) { + var l int + _ = l + l = len(m.Role) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthEnableResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthDisableResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthenticateResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + l = len(m.Token) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserAddResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserGetResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *AuthUserDeleteResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserChangePasswordResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserGrantRoleResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthUserRevokeRoleResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleAddResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleGetResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Perm) > 0 { + for _, e := range m.Perm { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *AuthRoleListResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Roles) > 0 { + for _, s := range m.Roles { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *AuthUserListResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Users) > 0 { + for _, s := range m.Users { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + +func (m *AuthRoleDeleteResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleGrantPermissionResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func (m *AuthRoleRevokePermissionResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + +func sovRpc(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRpc(x uint64) (n int) { + return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ResponseHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) + } + m.ClusterId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ClusterId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemberId", wireType) + } + m.MemberId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemberId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType) + } + m.RaftTerm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftTerm |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RangeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RangeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SortOrder", wireType) + } + m.SortOrder = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SortOrder |= (RangeRequest_SortOrder(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SortTarget", wireType) + } + m.SortTarget = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SortTarget |= (RangeRequest_SortTarget(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Serializable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Serializable = bool(v != 0) + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeysOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.KeysOnly = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CountOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CountOnly = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinModRevision", wireType) + } + m.MinModRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinModRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxModRevision", wireType) + } + m.MaxModRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxModRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinCreateRevision", wireType) + } + m.MinCreateRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinCreateRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxCreateRevision", wireType) + } + m.MaxCreateRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxCreateRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RangeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RangeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kvs = append(m.Kvs, &mvccpb.KeyValue{}) + if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field More", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.More = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PutRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PutRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PutRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + m.Lease = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lease |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PrevKv = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IgnoreValue = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreLease", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IgnoreLease = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PutResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PutResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PutResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PrevKv == nil { + m.PrevKv = &mvccpb.KeyValue{} + } + if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteRangeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PrevKv = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteRangeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType) + } + m.Deleted = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Deleted |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKvs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PrevKvs = append(m.PrevKvs, &mvccpb.KeyValue{}) + if err := m.PrevKvs[len(m.PrevKvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RangeRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &RequestOp_RequestRange{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestPut", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PutRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &RequestOp_RequestPut{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestDeleteRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &DeleteRangeRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &RequestOp_RequestDeleteRange{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestTxn", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &TxnRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &RequestOp_RequestTxn{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RangeResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Response = &ResponseOp_ResponseRange{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponsePut", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PutResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Response = &ResponseOp_ResponsePut{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseDeleteRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &DeleteRangeResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Response = &ResponseOp_ResponseDeleteRange{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseTxn", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &TxnResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Response = &ResponseOp_ResponseTxn{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Compare) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Compare: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Compare: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + m.Result = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Result |= (Compare_CompareResult(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + m.Target = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Target |= (Compare_CompareTarget(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetUnion = &Compare_Version{v} + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetUnion = &Compare_CreateRevision{v} + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetUnion = &Compare_ModRevision{v} + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := make([]byte, postIndex-iNdEx) + copy(v, dAtA[iNdEx:postIndex]) + m.TargetUnion = &Compare_Value{v} + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetUnion = &Compare_Lease{v} + case 64: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TxnRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TxnRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TxnRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Compare", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Compare = append(m.Compare, &Compare{}) + if err := m.Compare[len(m.Compare)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Success = append(m.Success, &RequestOp{}) + if err := m.Success[len(m.Success)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Failure", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Failure = append(m.Failure, &RequestOp{}) + if err := m.Failure[len(m.Failure)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TxnResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TxnResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TxnResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Succeeded = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Responses = append(m.Responses, &ResponseOp{}) + if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompactionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompactionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompactionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Physical", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Physical = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompactionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompactionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompactionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HashRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HashRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HashRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HashKVRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HashKVRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HashKVRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HashKVResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HashKVResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HashKVResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + m.Hash = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Hash |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType) + } + m.CompactRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompactRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HashResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HashResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HashResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + m.Hash = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Hash |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RemainingBytes", wireType) + } + m.RemainingBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RemainingBytes |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blob", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Blob = append(m.Blob[:0], dAtA[iNdEx:postIndex]...) + if m.Blob == nil { + m.Blob = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &WatchCreateRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.RequestUnion = &WatchRequest_CreateRequest{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CancelRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &WatchCancelRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.RequestUnion = &WatchRequest_CancelRequest{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchCreateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartRevision", wireType) + } + m.StartRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressNotify", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressNotify = bool(v != 0) + case 5: + if wireType == 0 { + var v WatchCreateRequest_FilterType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Filters = append(m.Filters, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v WatchCreateRequest_FilterType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Filters = append(m.Filters, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PrevKv = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchCancelRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchCancelRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType) + } + m.WatchId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WatchId |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType) + } + m.WatchId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WatchId |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Created = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Canceled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Canceled = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType) + } + m.CompactRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompactRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CancelReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, &mvccpb.Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseGrantRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseGrantRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + m.TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseGrantResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseGrantResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + m.TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseRevokeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseRevokeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseRevokeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseRevokeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseKeepAliveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseKeepAliveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseKeepAliveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseKeepAliveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + m.TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseTimeToLiveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseTimeToLiveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Keys = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseTimeToLiveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseTimeToLiveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + m.TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GrantedTTL", wireType) + } + m.GrantedTTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GrantedTTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx)) + copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseLeasesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseLeasesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseLeasesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseLeasesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseLeasesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Leases = append(m.Leases, &LeaseStatus{}) + if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Member) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Member: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientURLs = append(m.ClientURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberAddRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberAddRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberAddResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Member == nil { + m.Member = &Member{} + } + if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberRemoveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberRemoveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberUpdateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberUpdateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberListRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberListRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberListResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberListResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DefragmentRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DefragmentRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DefragmentRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DefragmentResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DefragmentResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DefragmentResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MoveLeaderRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MoveLeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetID", wireType) + } + m.TargetID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TargetID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MoveLeaderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MoveLeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AlarmRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AlarmRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AlarmRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= (AlarmRequest_AlarmAction(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType) + } + m.MemberID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemberID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) + } + m.Alarm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Alarm |= (AlarmType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AlarmMember) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AlarmMember: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AlarmMember: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType) + } + m.MemberID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemberID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) + } + m.Alarm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Alarm |= (AlarmType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AlarmResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AlarmResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AlarmResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Alarms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Alarms = append(m.Alarms, &AlarmMember{}) + if err := m.Alarms[len(m.Alarms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DbSize", wireType) + } + m.DbSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DbSize |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + m.Leader = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Leader |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType) + } + m.RaftIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftIndex |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType) + } + m.RaftTerm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftTerm |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthEnableRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthEnableRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthEnableRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthDisableRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthDisableRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthDisableRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthenticateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserAddRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserGetRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserDeleteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserChangePasswordRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserChangePasswordRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserGrantRoleRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserGrantRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserRevokeRoleRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserRevokeRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleAddRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleGetRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserListRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserListRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleListRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleListRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleDeleteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Perm == nil { + m.Perm = &authpb.Permission{} + } + if err := m.Perm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthEnableResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthEnableResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthDisableResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthDisableResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthenticateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthenticateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Token = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserAddResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserGetResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserGetResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserDeleteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserChangePasswordResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserChangePasswordResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserGrantRoleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserGrantRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserRevokeRoleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserRevokeRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleAddResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleGetResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleGetResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Perm = append(m.Perm, &authpb.Permission{}) + if err := m.Perm[len(m.Perm)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleListResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthUserListResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthUserListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Users = append(m.Users, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleDeleteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRpc(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRpc + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRpc + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRpc(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("rpc.proto", fileDescriptorRpc) } + +var fileDescriptorRpc = []byte{ + // 3674 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0x5b, 0x6f, 0x1b, 0xc7, + 0x77, 0xd7, 0x92, 0x22, 0x29, 0x1e, 0x5e, 0x44, 0x8d, 0x64, 0x9b, 0xa2, 0x6d, 0x59, 0x1e, 0xdf, + 0x64, 0x3b, 0x16, 0xff, 0x7f, 0x25, 0xed, 0x83, 0x5b, 0x04, 0x91, 0x25, 0xc6, 0x52, 0x24, 0x4b, + 0xca, 0x8a, 0x56, 0x52, 0x20, 0x28, 0xb1, 0x22, 0xc7, 0xd2, 0x42, 0xe4, 0x2e, 0xb3, 0xbb, 0xa4, + 0xa5, 0x34, 0x2d, 0x8a, 0x20, 0x41, 0xd1, 0x02, 0x7d, 0x69, 0x1e, 0x7a, 0x7b, 0x2c, 0x8a, 0x22, + 0x2f, 0x7d, 0x2b, 0xfa, 0x15, 0x8a, 0xbe, 0xb4, 0x40, 0xbf, 0x40, 0x91, 0xf6, 0xa5, 0xdf, 0xa1, + 0x45, 0xff, 0x98, 0xdb, 0xee, 0xec, 0x72, 0x97, 0x52, 0xc2, 0x24, 0x2f, 0xf2, 0xce, 0x99, 0x33, + 0xe7, 0x9c, 0x39, 0x33, 0xe7, 0x9c, 0x99, 0xdf, 0xd0, 0x90, 0x77, 0xfa, 0xed, 0xd5, 0xbe, 0x63, + 0x7b, 0x36, 0x2a, 0x12, 0xaf, 0xdd, 0x71, 0x89, 0x33, 0x24, 0x4e, 0xff, 0xb8, 0xb6, 0x70, 0x62, + 0x9f, 0xd8, 0xac, 0xa3, 0x4e, 0xbf, 0x38, 0x4f, 0x6d, 0x91, 0xf2, 0xd4, 0x7b, 0xc3, 0x76, 0x9b, + 0xfd, 0xe9, 0x1f, 0xd7, 0xcf, 0x86, 0xa2, 0xeb, 0x26, 0xeb, 0x32, 0x06, 0xde, 0x29, 0xfb, 0xd3, + 0x3f, 0x66, 0xff, 0x88, 0xce, 0x5b, 0x27, 0xb6, 0x7d, 0xd2, 0x25, 0x75, 0xa3, 0x6f, 0xd6, 0x0d, + 0xcb, 0xb2, 0x3d, 0xc3, 0x33, 0x6d, 0xcb, 0xe5, 0xbd, 0xf8, 0x1b, 0x0d, 0xca, 0x3a, 0x71, 0xfb, + 0xb6, 0xe5, 0x92, 0x2d, 0x62, 0x74, 0x88, 0x83, 0x6e, 0x03, 0xb4, 0xbb, 0x03, 0xd7, 0x23, 0x4e, + 0xcb, 0xec, 0x54, 0xb5, 0x65, 0x6d, 0x65, 0x5a, 0xcf, 0x0b, 0xca, 0x76, 0x07, 0xdd, 0x84, 0x7c, + 0x8f, 0xf4, 0x8e, 0x79, 0x6f, 0x8a, 0xf5, 0xce, 0x70, 0xc2, 0x76, 0x07, 0xd5, 0x60, 0xc6, 0x21, + 0x43, 0xd3, 0x35, 0x6d, 0xab, 0x9a, 0x5e, 0xd6, 0x56, 0xd2, 0xba, 0xdf, 0xa6, 0x03, 0x1d, 0xe3, + 0x8d, 0xd7, 0xf2, 0x88, 0xd3, 0xab, 0x4e, 0xf3, 0x81, 0x94, 0xd0, 0x24, 0x4e, 0x0f, 0x7f, 0x9d, + 0x81, 0xa2, 0x6e, 0x58, 0x27, 0x44, 0x27, 0x9f, 0x0f, 0x88, 0xeb, 0xa1, 0x0a, 0xa4, 0xcf, 0xc8, + 0x05, 0x53, 0x5f, 0xd4, 0xe9, 0x27, 0x1f, 0x6f, 0x9d, 0x90, 0x16, 0xb1, 0xb8, 0xe2, 0x22, 0x1d, + 0x6f, 0x9d, 0x90, 0x86, 0xd5, 0x41, 0x0b, 0x90, 0xe9, 0x9a, 0x3d, 0xd3, 0x13, 0x5a, 0x79, 0x23, + 0x64, 0xce, 0x74, 0xc4, 0x9c, 0x0d, 0x00, 0xd7, 0x76, 0xbc, 0x96, 0xed, 0x74, 0x88, 0x53, 0xcd, + 0x2c, 0x6b, 0x2b, 0xe5, 0xb5, 0xfb, 0xab, 0xea, 0x42, 0xac, 0xaa, 0x06, 0xad, 0x1e, 0xda, 0x8e, + 0xb7, 0x4f, 0x79, 0xf5, 0xbc, 0x2b, 0x3f, 0xd1, 0x87, 0x50, 0x60, 0x42, 0x3c, 0xc3, 0x39, 0x21, + 0x5e, 0x35, 0xcb, 0xa4, 0x3c, 0xb8, 0x44, 0x4a, 0x93, 0x31, 0xeb, 0x4c, 0x3d, 0xff, 0x46, 0x18, + 0x8a, 0x2e, 0x71, 0x4c, 0xa3, 0x6b, 0x7e, 0x61, 0x1c, 0x77, 0x49, 0x35, 0xb7, 0xac, 0xad, 0xcc, + 0xe8, 0x21, 0x1a, 0x9d, 0xff, 0x19, 0xb9, 0x70, 0x5b, 0xb6, 0xd5, 0xbd, 0xa8, 0xce, 0x30, 0x86, + 0x19, 0x4a, 0xd8, 0xb7, 0xba, 0x17, 0x6c, 0xd1, 0xec, 0x81, 0xe5, 0xf1, 0xde, 0x3c, 0xeb, 0xcd, + 0x33, 0x0a, 0xeb, 0x5e, 0x81, 0x4a, 0xcf, 0xb4, 0x5a, 0x3d, 0xbb, 0xd3, 0xf2, 0x1d, 0x02, 0xcc, + 0x21, 0xe5, 0x9e, 0x69, 0xbd, 0xb2, 0x3b, 0xba, 0x74, 0x0b, 0xe5, 0x34, 0xce, 0xc3, 0x9c, 0x05, + 0xc1, 0x69, 0x9c, 0xab, 0x9c, 0xab, 0x30, 0x4f, 0x65, 0xb6, 0x1d, 0x62, 0x78, 0x24, 0x60, 0x2e, + 0x32, 0xe6, 0xb9, 0x9e, 0x69, 0x6d, 0xb0, 0x9e, 0x10, 0xbf, 0x71, 0x3e, 0xc2, 0x5f, 0x12, 0xfc, + 0xc6, 0x79, 0x98, 0x1f, 0xaf, 0x42, 0xde, 0xf7, 0x39, 0x9a, 0x81, 0xe9, 0xbd, 0xfd, 0xbd, 0x46, + 0x65, 0x0a, 0x01, 0x64, 0xd7, 0x0f, 0x37, 0x1a, 0x7b, 0x9b, 0x15, 0x0d, 0x15, 0x20, 0xb7, 0xd9, + 0xe0, 0x8d, 0x14, 0x7e, 0x01, 0x10, 0x78, 0x17, 0xe5, 0x20, 0xbd, 0xd3, 0xf8, 0xbd, 0xca, 0x14, + 0xe5, 0x39, 0x6a, 0xe8, 0x87, 0xdb, 0xfb, 0x7b, 0x15, 0x8d, 0x0e, 0xde, 0xd0, 0x1b, 0xeb, 0xcd, + 0x46, 0x25, 0x45, 0x39, 0x5e, 0xed, 0x6f, 0x56, 0xd2, 0x28, 0x0f, 0x99, 0xa3, 0xf5, 0xdd, 0xd7, + 0x8d, 0xca, 0x34, 0xfe, 0x56, 0x83, 0x92, 0x58, 0x2f, 0x1e, 0x13, 0xe8, 0x3d, 0xc8, 0x9e, 0xb2, + 0xb8, 0x60, 0x5b, 0xb1, 0xb0, 0x76, 0x2b, 0xb2, 0xb8, 0xa1, 0xd8, 0xd1, 0x05, 0x2f, 0xc2, 0x90, + 0x3e, 0x1b, 0xba, 0xd5, 0xd4, 0x72, 0x7a, 0xa5, 0xb0, 0x56, 0x59, 0xe5, 0x01, 0xbb, 0xba, 0x43, + 0x2e, 0x8e, 0x8c, 0xee, 0x80, 0xe8, 0xb4, 0x13, 0x21, 0x98, 0xee, 0xd9, 0x0e, 0x61, 0x3b, 0x76, + 0x46, 0x67, 0xdf, 0x74, 0x1b, 0xb3, 0x45, 0x13, 0xbb, 0x95, 0x37, 0xf0, 0x77, 0x1a, 0xc0, 0xc1, + 0xc0, 0x4b, 0x0e, 0x8d, 0x05, 0xc8, 0x0c, 0xa9, 0x60, 0x11, 0x16, 0xbc, 0xc1, 0x62, 0x82, 0x18, + 0x2e, 0xf1, 0x63, 0x82, 0x36, 0xd0, 0x0d, 0xc8, 0xf5, 0x1d, 0x32, 0x6c, 0x9d, 0x0d, 0x99, 0x92, + 0x19, 0x3d, 0x4b, 0x9b, 0x3b, 0x43, 0x74, 0x17, 0x8a, 0xe6, 0x89, 0x65, 0x3b, 0xa4, 0xc5, 0x65, + 0x65, 0x58, 0x6f, 0x81, 0xd3, 0x98, 0xdd, 0x0a, 0x0b, 0x17, 0x9c, 0x55, 0x59, 0x76, 0x29, 0x09, + 0x5b, 0x50, 0x60, 0xa6, 0x4e, 0xe4, 0xbe, 0xc7, 0x81, 0x8d, 0x29, 0x36, 0x6c, 0xd4, 0x85, 0xc2, + 0x6a, 0xfc, 0x19, 0xa0, 0x4d, 0xd2, 0x25, 0x1e, 0x99, 0x24, 0x7b, 0x28, 0x3e, 0x49, 0xab, 0x3e, + 0xc1, 0x7f, 0xa1, 0xc1, 0x7c, 0x48, 0xfc, 0x44, 0xd3, 0xaa, 0x42, 0xae, 0xc3, 0x84, 0x71, 0x0b, + 0xd2, 0xba, 0x6c, 0xa2, 0xa7, 0x30, 0x23, 0x0c, 0x70, 0xab, 0xe9, 0x84, 0x4d, 0x93, 0xe3, 0x36, + 0xb9, 0xf8, 0xbb, 0x14, 0xe4, 0xc5, 0x44, 0xf7, 0xfb, 0x68, 0x1d, 0x4a, 0x0e, 0x6f, 0xb4, 0xd8, + 0x7c, 0x84, 0x45, 0xb5, 0xe4, 0x24, 0xb4, 0x35, 0xa5, 0x17, 0xc5, 0x10, 0x46, 0x46, 0xbf, 0x03, + 0x05, 0x29, 0xa2, 0x3f, 0xf0, 0x84, 0xcb, 0xab, 0x61, 0x01, 0xc1, 0xfe, 0xdb, 0x9a, 0xd2, 0x41, + 0xb0, 0x1f, 0x0c, 0x3c, 0xd4, 0x84, 0x05, 0x39, 0x98, 0xcf, 0x46, 0x98, 0x91, 0x66, 0x52, 0x96, + 0xc3, 0x52, 0x46, 0x97, 0x6a, 0x6b, 0x4a, 0x47, 0x62, 0xbc, 0xd2, 0xa9, 0x9a, 0xe4, 0x9d, 0xf3, + 0xe4, 0x3d, 0x62, 0x52, 0xf3, 0xdc, 0x1a, 0x35, 0xa9, 0x79, 0x6e, 0xbd, 0xc8, 0x43, 0x4e, 0xb4, + 0xf0, 0x3f, 0xa7, 0x00, 0xe4, 0x6a, 0xec, 0xf7, 0xd1, 0x26, 0x94, 0x1d, 0xd1, 0x0a, 0x79, 0xeb, + 0x66, 0xac, 0xb7, 0xc4, 0x22, 0x4e, 0xe9, 0x25, 0x39, 0x88, 0x1b, 0xf7, 0x3e, 0x14, 0x7d, 0x29, + 0x81, 0xc3, 0x16, 0x63, 0x1c, 0xe6, 0x4b, 0x28, 0xc8, 0x01, 0xd4, 0x65, 0x9f, 0xc0, 0x35, 0x7f, + 0x7c, 0x8c, 0xcf, 0xee, 0x8e, 0xf1, 0x99, 0x2f, 0x70, 0x5e, 0x4a, 0x50, 0xbd, 0xa6, 0x1a, 0x16, + 0xb8, 0x6d, 0x31, 0xc6, 0x6d, 0xa3, 0x86, 0x51, 0xc7, 0x01, 0xad, 0x97, 0xbc, 0x89, 0xff, 0x27, + 0x0d, 0xb9, 0x0d, 0xbb, 0xd7, 0x37, 0x1c, 0xba, 0x1a, 0x59, 0x87, 0xb8, 0x83, 0xae, 0xc7, 0xdc, + 0x55, 0x5e, 0xbb, 0x17, 0x96, 0x28, 0xd8, 0xe4, 0xbf, 0x3a, 0x63, 0xd5, 0xc5, 0x10, 0x3a, 0x58, + 0x94, 0xc7, 0xd4, 0x15, 0x06, 0x8b, 0xe2, 0x28, 0x86, 0xc8, 0x40, 0x4e, 0x07, 0x81, 0x5c, 0x83, + 0xdc, 0x90, 0x38, 0x41, 0x49, 0xdf, 0x9a, 0xd2, 0x25, 0x01, 0x3d, 0x86, 0xd9, 0x68, 0x79, 0xc9, + 0x08, 0x9e, 0x72, 0x3b, 0x5c, 0x8d, 0xee, 0x41, 0x31, 0x54, 0xe3, 0xb2, 0x82, 0xaf, 0xd0, 0x53, + 0x4a, 0xdc, 0x75, 0x99, 0x57, 0x69, 0x3d, 0x2e, 0x6e, 0x4d, 0xc9, 0xcc, 0x7a, 0x5d, 0x66, 0xd6, + 0x19, 0x31, 0x4a, 0xe4, 0xd6, 0x50, 0x92, 0xf9, 0x20, 0x9c, 0x64, 0xf0, 0x07, 0x50, 0x0a, 0x39, + 0x88, 0xd6, 0x9d, 0xc6, 0xc7, 0xaf, 0xd7, 0x77, 0x79, 0x91, 0x7a, 0xc9, 0xea, 0x92, 0x5e, 0xd1, + 0x68, 0xad, 0xdb, 0x6d, 0x1c, 0x1e, 0x56, 0x52, 0xa8, 0x04, 0xf9, 0xbd, 0xfd, 0x66, 0x8b, 0x73, + 0xa5, 0xf1, 0x4b, 0x5f, 0x82, 0x28, 0x72, 0x4a, 0x6d, 0x9b, 0x52, 0x6a, 0x9b, 0x26, 0x6b, 0x5b, + 0x2a, 0xa8, 0x6d, 0xac, 0xcc, 0xed, 0x36, 0xd6, 0x0f, 0x1b, 0x95, 0xe9, 0x17, 0x65, 0x28, 0x72, + 0xff, 0xb6, 0x06, 0x16, 0x2d, 0xb5, 0x7f, 0xa7, 0x01, 0x04, 0xd1, 0x84, 0xea, 0x90, 0x6b, 0x73, + 0x3d, 0x55, 0x8d, 0x25, 0xa3, 0x6b, 0xb1, 0x4b, 0xa6, 0x4b, 0x2e, 0xf4, 0x6b, 0xc8, 0xb9, 0x83, + 0x76, 0x9b, 0xb8, 0xb2, 0xe4, 0xdd, 0x88, 0xe6, 0x43, 0x91, 0xad, 0x74, 0xc9, 0x47, 0x87, 0xbc, + 0x31, 0xcc, 0xee, 0x80, 0x15, 0xc0, 0xf1, 0x43, 0x04, 0x1f, 0xfe, 0x6b, 0x0d, 0x0a, 0xca, 0xe6, + 0xfd, 0x91, 0x49, 0xf8, 0x16, 0xe4, 0x99, 0x0d, 0xa4, 0x23, 0xd2, 0xf0, 0x8c, 0x1e, 0x10, 0xd0, + 0x6f, 0x43, 0x5e, 0x46, 0x80, 0xcc, 0xc4, 0xd5, 0x78, 0xb1, 0xfb, 0x7d, 0x3d, 0x60, 0xc5, 0x3b, + 0x30, 0xc7, 0xbc, 0xd2, 0xa6, 0x87, 0x6b, 0xe9, 0x47, 0xf5, 0xf8, 0xa9, 0x45, 0x8e, 0x9f, 0x35, + 0x98, 0xe9, 0x9f, 0x5e, 0xb8, 0x66, 0xdb, 0xe8, 0x0a, 0x2b, 0xfc, 0x36, 0xfe, 0x08, 0x90, 0x2a, + 0x6c, 0x92, 0xe9, 0xe2, 0x12, 0x14, 0xb6, 0x0c, 0xf7, 0x54, 0x98, 0x84, 0x9f, 0x42, 0x89, 0x36, + 0x77, 0x8e, 0xae, 0x60, 0x23, 0xbb, 0x1c, 0x48, 0xee, 0x89, 0x7c, 0x8e, 0x60, 0xfa, 0xd4, 0x70, + 0x4f, 0xd9, 0x44, 0x4b, 0x3a, 0xfb, 0x46, 0x8f, 0xa1, 0xd2, 0xe6, 0x93, 0x6c, 0x45, 0xae, 0x0c, + 0xb3, 0x82, 0xee, 0x9f, 0x04, 0x3f, 0x85, 0x22, 0x9f, 0xc3, 0x4f, 0x6d, 0x04, 0x9e, 0x83, 0xd9, + 0x43, 0xcb, 0xe8, 0xbb, 0xa7, 0xb6, 0xac, 0x6e, 0x74, 0xd2, 0x95, 0x80, 0x36, 0x91, 0xc6, 0x47, + 0x30, 0xeb, 0x90, 0x9e, 0x61, 0x5a, 0xa6, 0x75, 0xd2, 0x3a, 0xbe, 0xf0, 0x88, 0x2b, 0x2e, 0x4c, + 0x65, 0x9f, 0xfc, 0x82, 0x52, 0xa9, 0x69, 0xc7, 0x5d, 0xfb, 0x58, 0xa4, 0x39, 0xf6, 0x8d, 0xff, + 0x49, 0x83, 0xe2, 0x27, 0x86, 0xd7, 0x96, 0x4b, 0x87, 0xb6, 0xa1, 0xec, 0x27, 0x37, 0x46, 0x11, + 0xb6, 0x44, 0x4a, 0x2c, 0x1b, 0x23, 0x8f, 0xd2, 0xb2, 0x3a, 0x96, 0xda, 0x2a, 0x81, 0x89, 0x32, + 0xac, 0x36, 0xe9, 0xfa, 0xa2, 0x52, 0xc9, 0xa2, 0x18, 0xa3, 0x2a, 0x4a, 0x25, 0xbc, 0x98, 0x0d, + 0x8e, 0x1f, 0x3c, 0x97, 0xfc, 0x4d, 0x0a, 0xd0, 0xa8, 0x0d, 0x3f, 0xf4, 0x44, 0xf6, 0x00, 0xca, + 0xae, 0x67, 0x38, 0x23, 0x7b, 0xa3, 0xc4, 0xa8, 0x7e, 0x82, 0x7e, 0x04, 0xb3, 0x7d, 0xc7, 0x3e, + 0x71, 0x88, 0xeb, 0xb6, 0x2c, 0xdb, 0x33, 0xdf, 0x5c, 0x88, 0x43, 0x6d, 0x59, 0x92, 0xf7, 0x18, + 0x15, 0x35, 0x20, 0xf7, 0xc6, 0xec, 0x7a, 0xc4, 0x71, 0xab, 0x99, 0xe5, 0xf4, 0x4a, 0x79, 0xed, + 0xe9, 0x65, 0x5e, 0x5b, 0xfd, 0x90, 0xf1, 0x37, 0x2f, 0xfa, 0x44, 0x97, 0x63, 0xd5, 0x83, 0x62, + 0x36, 0x74, 0x50, 0x7c, 0x00, 0x10, 0xf0, 0xd3, 0x54, 0xbb, 0xb7, 0x7f, 0xf0, 0xba, 0x59, 0x99, + 0x42, 0x45, 0x98, 0xd9, 0xdb, 0xdf, 0x6c, 0xec, 0x36, 0x68, 0x5e, 0xc6, 0x75, 0xe9, 0x1b, 0xd5, + 0x87, 0x68, 0x11, 0x66, 0xde, 0x52, 0xaa, 0xbc, 0x6f, 0xa7, 0xf5, 0x1c, 0x6b, 0x6f, 0x77, 0xf0, + 0x9f, 0xa7, 0xa0, 0x24, 0x76, 0xc1, 0x44, 0x5b, 0x51, 0x55, 0x91, 0x0a, 0xa9, 0xa0, 0xa7, 0x52, + 0xbe, 0x3b, 0x3a, 0xe2, 0xf0, 0x2b, 0x9b, 0x34, 0x37, 0xf0, 0xc5, 0x26, 0x1d, 0xe1, 0x56, 0xbf, + 0x1d, 0x1b, 0xbe, 0x99, 0xd8, 0xf0, 0x45, 0xf7, 0xa0, 0xe4, 0xef, 0x36, 0xc3, 0x15, 0xb5, 0x36, + 0xaf, 0x17, 0xe5, 0x46, 0xa2, 0x34, 0xf4, 0x00, 0xb2, 0x64, 0x48, 0x2c, 0xcf, 0xad, 0x16, 0x58, + 0xd6, 0x2d, 0xc9, 0xf3, 0x6f, 0x83, 0x52, 0x75, 0xd1, 0x89, 0x7f, 0x0b, 0xe6, 0xd8, 0x3d, 0xe3, + 0xa5, 0x63, 0x58, 0xea, 0x85, 0xa8, 0xd9, 0xdc, 0x15, 0xae, 0xa3, 0x9f, 0xa8, 0x0c, 0xa9, 0xed, + 0x4d, 0x31, 0xd1, 0xd4, 0xf6, 0x26, 0xfe, 0x4a, 0x03, 0xa4, 0x8e, 0x9b, 0xc8, 0x97, 0x11, 0xe1, + 0x52, 0x7d, 0x3a, 0x50, 0xbf, 0x00, 0x19, 0xe2, 0x38, 0xb6, 0xc3, 0xbc, 0x96, 0xd7, 0x79, 0x03, + 0xdf, 0x17, 0x36, 0xe8, 0x64, 0x68, 0x9f, 0xf9, 0x81, 0xc1, 0xa5, 0x69, 0xbe, 0xa9, 0x3b, 0x30, + 0x1f, 0xe2, 0x9a, 0x28, 0xfb, 0x3f, 0x82, 0x6b, 0x4c, 0xd8, 0x0e, 0x21, 0xfd, 0xf5, 0xae, 0x39, + 0x4c, 0xd4, 0xda, 0x87, 0xeb, 0x51, 0xc6, 0x9f, 0xd7, 0x47, 0xf8, 0x77, 0x85, 0xc6, 0xa6, 0xd9, + 0x23, 0x4d, 0x7b, 0x37, 0xd9, 0x36, 0x9a, 0x1d, 0xcf, 0xc8, 0x85, 0x2b, 0xca, 0x24, 0xfb, 0xc6, + 0x7f, 0xaf, 0xc1, 0x8d, 0x91, 0xe1, 0x3f, 0xf3, 0xaa, 0x2e, 0x01, 0x9c, 0xd0, 0xed, 0x43, 0x3a, + 0xb4, 0x83, 0xdf, 0xd0, 0x15, 0x8a, 0x6f, 0x27, 0x4d, 0x30, 0x45, 0x61, 0xe7, 0x82, 0x58, 0x73, + 0xf6, 0xc7, 0x95, 0x35, 0xe6, 0x36, 0x14, 0x18, 0xe1, 0xd0, 0x33, 0xbc, 0x81, 0x3b, 0xb2, 0x18, + 0x7f, 0x24, 0xb6, 0x80, 0x1c, 0x34, 0xd1, 0xbc, 0x7e, 0x0d, 0x59, 0x76, 0x38, 0x95, 0x47, 0xb3, + 0xc8, 0x6d, 0x40, 0xb1, 0x43, 0x17, 0x8c, 0xf8, 0x14, 0xb2, 0xaf, 0x18, 0xa2, 0xa7, 0x58, 0x36, + 0x2d, 0x97, 0xc2, 0x32, 0x7a, 0x1c, 0x67, 0xc8, 0xeb, 0xec, 0x9b, 0x9d, 0x64, 0x08, 0x71, 0x5e, + 0xeb, 0xbb, 0xfc, 0xc4, 0x94, 0xd7, 0xfd, 0x36, 0x75, 0x59, 0xbb, 0x6b, 0x12, 0xcb, 0x63, 0xbd, + 0xd3, 0xac, 0x57, 0xa1, 0xe0, 0x55, 0xa8, 0x70, 0x4d, 0xeb, 0x9d, 0x8e, 0x72, 0x22, 0xf1, 0xe5, + 0x69, 0x61, 0x79, 0xf8, 0x1f, 0x34, 0x98, 0x53, 0x06, 0x4c, 0xe4, 0x98, 0x77, 0x20, 0xcb, 0x71, + 0x4b, 0x51, 0xfc, 0x16, 0xc2, 0xa3, 0xb8, 0x1a, 0x5d, 0xf0, 0xa0, 0x55, 0xc8, 0xf1, 0x2f, 0x79, + 0x2c, 0x8c, 0x67, 0x97, 0x4c, 0xf8, 0x01, 0xcc, 0x0b, 0x12, 0xe9, 0xd9, 0x71, 0x7b, 0x9b, 0x39, + 0x14, 0x7f, 0x09, 0x0b, 0x61, 0xb6, 0x89, 0xa6, 0xa4, 0x18, 0x99, 0xba, 0x8a, 0x91, 0xeb, 0xd2, + 0xc8, 0xd7, 0xfd, 0x8e, 0x52, 0xab, 0xa3, 0xab, 0xae, 0xae, 0x48, 0x2a, 0xb2, 0x22, 0xfe, 0x04, + 0xa4, 0x88, 0x5f, 0x74, 0x02, 0xf3, 0x72, 0x3b, 0xec, 0x9a, 0xae, 0x7f, 0x82, 0xfb, 0x02, 0x90, + 0x4a, 0xfc, 0xa5, 0x0d, 0xda, 0x24, 0x6f, 0x1c, 0xe3, 0xa4, 0x47, 0xfc, 0xfa, 0x44, 0xcf, 0xf3, + 0x2a, 0x71, 0xa2, 0x8c, 0x5e, 0x87, 0xb9, 0x57, 0xf6, 0x90, 0xa6, 0x06, 0x4a, 0x0d, 0x42, 0x86, + 0xdf, 0xe7, 0xfc, 0x65, 0xf3, 0xdb, 0x54, 0xb9, 0x3a, 0x60, 0x22, 0xe5, 0xff, 0xa6, 0x41, 0x71, + 0xbd, 0x6b, 0x38, 0x3d, 0xa9, 0xf8, 0x7d, 0xc8, 0xf2, 0x5b, 0x8a, 0x00, 0x06, 0x1e, 0x86, 0xc5, + 0xa8, 0xbc, 0xbc, 0xb1, 0xce, 0xef, 0x34, 0x62, 0x14, 0x35, 0x5c, 0xbc, 0x1d, 0x6c, 0x46, 0xde, + 0x12, 0x36, 0xd1, 0x33, 0xc8, 0x18, 0x74, 0x08, 0x4b, 0xc1, 0xe5, 0xe8, 0xfd, 0x90, 0x49, 0x63, + 0x87, 0x33, 0xce, 0x85, 0xdf, 0x83, 0x82, 0xa2, 0x81, 0xde, 0x80, 0x5f, 0x36, 0xc4, 0x01, 0x6c, + 0x7d, 0xa3, 0xb9, 0x7d, 0xc4, 0x2f, 0xc6, 0x65, 0x80, 0xcd, 0x86, 0xdf, 0x4e, 0xe1, 0x4f, 0xc5, + 0x28, 0x91, 0xef, 0x54, 0x7b, 0xb4, 0x24, 0x7b, 0x52, 0x57, 0xb2, 0xe7, 0x1c, 0x4a, 0x62, 0xfa, + 0x93, 0xa6, 0x6f, 0x26, 0x2f, 0x21, 0x7d, 0x2b, 0xc6, 0xeb, 0x82, 0x11, 0xcf, 0x42, 0x49, 0x24, + 0x74, 0xb1, 0xff, 0xfe, 0x55, 0x83, 0xb2, 0xa4, 0x4c, 0x0a, 0x60, 0x4a, 0xec, 0x85, 0x57, 0x00, + 0x1f, 0x79, 0xb9, 0x0e, 0xd9, 0xce, 0xf1, 0xa1, 0xf9, 0x85, 0x04, 0x9b, 0x45, 0x8b, 0xd2, 0xbb, + 0x5c, 0x0f, 0x7f, 0xf1, 0x11, 0x2d, 0x7a, 0x0b, 0x77, 0x8c, 0x37, 0xde, 0xb6, 0xd5, 0x21, 0xe7, + 0xec, 0xdc, 0x38, 0xad, 0x07, 0x04, 0x76, 0x29, 0x15, 0x2f, 0x43, 0xec, 0xb0, 0xa8, 0xbe, 0x14, + 0xcd, 0xc3, 0xdc, 0xfa, 0xc0, 0x3b, 0x6d, 0x58, 0xc6, 0x71, 0x57, 0x66, 0x2c, 0x5a, 0x66, 0x29, + 0x71, 0xd3, 0x74, 0x55, 0x6a, 0x03, 0xe6, 0x29, 0x95, 0x58, 0x9e, 0xd9, 0x56, 0xd2, 0x9b, 0x2c, + 0x62, 0x5a, 0xa4, 0x88, 0x19, 0xae, 0xfb, 0xd6, 0x76, 0x3a, 0x62, 0x6a, 0x7e, 0x1b, 0x6f, 0x72, + 0xe1, 0xaf, 0xdd, 0x50, 0x99, 0xfa, 0xa1, 0x52, 0x56, 0x02, 0x29, 0x2f, 0x89, 0x37, 0x46, 0x0a, + 0x7e, 0x0a, 0xd7, 0x24, 0xa7, 0x00, 0xf7, 0xc6, 0x30, 0xef, 0xc3, 0x6d, 0xc9, 0xbc, 0x71, 0x4a, + 0x6f, 0x4f, 0x07, 0x42, 0xe1, 0x8f, 0xb5, 0xf3, 0x05, 0x54, 0x7d, 0x3b, 0xd9, 0x61, 0xd9, 0xee, + 0xaa, 0x06, 0x0c, 0x5c, 0xb1, 0x67, 0xf2, 0x3a, 0xfb, 0xa6, 0x34, 0xc7, 0xee, 0xfa, 0x47, 0x02, + 0xfa, 0x8d, 0x37, 0x60, 0x51, 0xca, 0x10, 0xc7, 0xd8, 0xb0, 0x90, 0x11, 0x83, 0xe2, 0x84, 0x08, + 0x87, 0xd1, 0xa1, 0xe3, 0xdd, 0xae, 0x72, 0x86, 0x5d, 0xcb, 0x64, 0x6a, 0x8a, 0xcc, 0x6b, 0x7c, + 0x47, 0x50, 0xc3, 0xd4, 0x8a, 0x21, 0xc8, 0x54, 0x80, 0x4a, 0x16, 0x0b, 0x41, 0xc9, 0x23, 0x0b, + 0x31, 0x22, 0xfa, 0x33, 0x58, 0xf2, 0x8d, 0xa0, 0x7e, 0x3b, 0x20, 0x4e, 0xcf, 0x74, 0x5d, 0x05, + 0x0e, 0x8a, 0x9b, 0xf8, 0x43, 0x98, 0xee, 0x13, 0x91, 0x53, 0x0a, 0x6b, 0x68, 0x95, 0xbf, 0xdf, + 0xae, 0x2a, 0x83, 0x59, 0x3f, 0xee, 0xc0, 0x1d, 0x29, 0x9d, 0x7b, 0x34, 0x56, 0x7c, 0xd4, 0x28, + 0x79, 0xeb, 0xe6, 0x6e, 0x1d, 0xbd, 0x75, 0xa7, 0xf9, 0xda, 0xfb, 0x10, 0xe5, 0x47, 0xdc, 0x91, + 0x32, 0xb6, 0x26, 0xaa, 0x15, 0x3b, 0xdc, 0xa7, 0x7e, 0x48, 0x4e, 0x24, 0xec, 0x18, 0x16, 0xc2, + 0x91, 0x3c, 0x51, 0x1a, 0x5b, 0x80, 0x8c, 0x67, 0x9f, 0x11, 0x99, 0xc4, 0x78, 0x43, 0x1a, 0xec, + 0x87, 0xf9, 0x44, 0x06, 0x1b, 0x81, 0x30, 0xb6, 0x25, 0x27, 0xb5, 0x97, 0xae, 0xa6, 0x3c, 0x7c, + 0xf1, 0x06, 0xde, 0x83, 0xeb, 0xd1, 0x34, 0x31, 0x91, 0xc9, 0x47, 0x7c, 0x03, 0xc7, 0x65, 0x92, + 0x89, 0xe4, 0x7e, 0x1c, 0x24, 0x03, 0x25, 0xa1, 0x4c, 0x24, 0x52, 0x87, 0x5a, 0x5c, 0x7e, 0xf9, + 0x29, 0xf6, 0xab, 0x9f, 0x6e, 0x26, 0x12, 0xe6, 0x06, 0xc2, 0x26, 0x5f, 0xfe, 0x20, 0x47, 0xa4, + 0xc7, 0xe6, 0x08, 0x11, 0x24, 0x41, 0x16, 0xfb, 0x19, 0x36, 0x9d, 0xd0, 0x11, 0x24, 0xd0, 0x49, + 0x75, 0xd0, 0x1a, 0xe2, 0xeb, 0x60, 0x0d, 0xb9, 0xb1, 0xd5, 0xb4, 0x3b, 0xd1, 0x62, 0x7c, 0x12, + 0xe4, 0xce, 0x91, 0xcc, 0x3c, 0x91, 0xe0, 0x4f, 0x61, 0x39, 0x39, 0x29, 0x4f, 0x22, 0xf9, 0x49, + 0x1d, 0xf2, 0xfe, 0x81, 0x52, 0xf9, 0xed, 0x43, 0x01, 0x72, 0x7b, 0xfb, 0x87, 0x07, 0xeb, 0x1b, + 0x0d, 0xfe, 0xe3, 0x87, 0x8d, 0x7d, 0x5d, 0x7f, 0x7d, 0xd0, 0xac, 0xa4, 0xd6, 0xfe, 0x2f, 0x0d, + 0xa9, 0x9d, 0x23, 0xf4, 0xfb, 0x90, 0xe1, 0x2f, 0x81, 0x63, 0x9e, 0x7f, 0x6b, 0xe3, 0x1e, 0x3b, + 0xf1, 0xad, 0xaf, 0xfe, 0xe3, 0xbf, 0xbf, 0x4d, 0x5d, 0xc7, 0x73, 0xf5, 0xe1, 0xbb, 0x46, 0xb7, + 0x7f, 0x6a, 0xd4, 0xcf, 0x86, 0x75, 0x56, 0x20, 0x9e, 0x6b, 0x4f, 0xd0, 0x11, 0xa4, 0x0f, 0x06, + 0x1e, 0x4a, 0x7c, 0x1b, 0xae, 0x25, 0x3f, 0x82, 0xe2, 0x1a, 0x93, 0xbc, 0x80, 0x67, 0x55, 0xc9, + 0xfd, 0x81, 0x47, 0xe5, 0x0e, 0xa1, 0xa0, 0xbe, 0x63, 0x5e, 0xfa, 0x6a, 0x5c, 0xbb, 0xfc, 0x8d, + 0x14, 0x63, 0xa6, 0xef, 0x16, 0xbe, 0xa1, 0xea, 0xe3, 0xcf, 0xad, 0xea, 0x7c, 0x9a, 0xe7, 0x16, + 0x4a, 0x7c, 0x58, 0xae, 0x25, 0xbf, 0x9d, 0xc6, 0xcf, 0xc7, 0x3b, 0xb7, 0xa8, 0x5c, 0x5b, 0xbc, + 0x9d, 0xb6, 0x3d, 0x74, 0x27, 0xe6, 0xed, 0x4c, 0x7d, 0x25, 0xaa, 0x2d, 0x27, 0x33, 0x08, 0x4d, + 0x77, 0x99, 0xa6, 0x9b, 0xf8, 0xba, 0xaa, 0xa9, 0xed, 0xf3, 0x3d, 0xd7, 0x9e, 0xac, 0x9d, 0x42, + 0x86, 0xc1, 0xc4, 0xa8, 0x25, 0x3f, 0x6a, 0x31, 0x00, 0x77, 0xc2, 0x0e, 0x08, 0x01, 0xcc, 0x78, + 0x91, 0x69, 0x9b, 0xc7, 0x65, 0x5f, 0x1b, 0x43, 0x8a, 0x9f, 0x6b, 0x4f, 0x56, 0xb4, 0x5f, 0x69, + 0x6b, 0xff, 0x3b, 0x0d, 0x19, 0x06, 0x1a, 0xa1, 0x3e, 0x40, 0x80, 0xa9, 0x46, 0xe7, 0x39, 0x82, + 0xd2, 0x46, 0xe7, 0x39, 0x0a, 0xc7, 0xe2, 0x3b, 0x4c, 0xf3, 0x22, 0x5e, 0xf0, 0x35, 0x33, 0x40, + 0xaa, 0xce, 0x30, 0x36, 0xea, 0xd6, 0xb7, 0x02, 0x37, 0xe3, 0xd1, 0x86, 0xe2, 0x24, 0x86, 0xc0, + 0xd5, 0xe8, 0x36, 0x89, 0x01, 0x56, 0xf1, 0x3d, 0xa6, 0xf4, 0x36, 0xae, 0xaa, 0xce, 0xe5, 0x7a, + 0x1d, 0xc6, 0x49, 0x15, 0x7f, 0xad, 0x41, 0x39, 0x8c, 0x8f, 0xa2, 0x7b, 0x31, 0xa2, 0xa3, 0x30, + 0x6b, 0xed, 0xfe, 0x78, 0xa6, 0x44, 0x13, 0xb8, 0xfe, 0x33, 0x42, 0xfa, 0x06, 0xe5, 0x14, 0xbe, + 0x47, 0x7f, 0xa2, 0xc1, 0x6c, 0x04, 0xf5, 0x44, 0x71, 0x2a, 0x46, 0x30, 0xd5, 0xda, 0x83, 0x4b, + 0xb8, 0x84, 0x25, 0x8f, 0x98, 0x25, 0x77, 0xf1, 0xad, 0x51, 0x67, 0x78, 0x66, 0x8f, 0x78, 0xb6, + 0xb0, 0xc6, 0x5f, 0x09, 0x0e, 0x51, 0xc6, 0xae, 0x44, 0x08, 0xf2, 0x8c, 0x5d, 0x89, 0x30, 0xbe, + 0x39, 0x6e, 0x25, 0x38, 0x30, 0x49, 0x37, 0xfa, 0xff, 0xa7, 0x21, 0xb7, 0xc1, 0x7f, 0x8c, 0x88, + 0x3c, 0xc8, 0xfb, 0x60, 0x20, 0x5a, 0x8a, 0x03, 0x66, 0x82, 0x8b, 0x43, 0xed, 0x4e, 0x62, 0xbf, + 0x50, 0xff, 0x90, 0xa9, 0x5f, 0xc6, 0x37, 0x7d, 0xf5, 0xe2, 0x47, 0x8f, 0x75, 0x0e, 0x01, 0xd4, + 0x8d, 0x4e, 0x87, 0x4e, 0xfd, 0x8f, 0x35, 0x28, 0xaa, 0x98, 0x1d, 0xba, 0x1b, 0x0b, 0x09, 0xa9, + 0xb0, 0x5f, 0x0d, 0x8f, 0x63, 0x11, 0xfa, 0x1f, 0x33, 0xfd, 0xf7, 0xf0, 0x52, 0x92, 0x7e, 0x87, + 0xf1, 0x87, 0x4d, 0xe0, 0xa8, 0x5b, 0xbc, 0x09, 0x21, 0x50, 0x2f, 0xde, 0x84, 0x30, 0x68, 0x77, + 0xb9, 0x09, 0x03, 0xc6, 0x4f, 0x4d, 0x38, 0x07, 0x08, 0x40, 0x36, 0x14, 0xeb, 0x5c, 0xe5, 0x2a, + 0x15, 0x0d, 0xfe, 0x51, 0x7c, 0x2e, 0x66, 0xeb, 0x45, 0x74, 0x77, 0x4d, 0x97, 0x26, 0x81, 0xb5, + 0x7f, 0xcc, 0x42, 0xe1, 0x95, 0x61, 0x5a, 0x1e, 0xb1, 0x0c, 0xab, 0x4d, 0xd0, 0x09, 0x64, 0x58, + 0xad, 0x8c, 0x66, 0x3c, 0x15, 0x7c, 0x8a, 0x66, 0xbc, 0x10, 0x32, 0x83, 0x1f, 0x30, 0xd5, 0x77, + 0x70, 0xcd, 0x57, 0xdd, 0x0b, 0xe4, 0xd7, 0x19, 0xaa, 0x42, 0xa7, 0x7c, 0x06, 0x59, 0x01, 0xd8, + 0x47, 0xa4, 0x85, 0xd0, 0x96, 0xda, 0xad, 0xf8, 0xce, 0xc4, 0x5d, 0xa6, 0xea, 0x72, 0x19, 0x33, + 0x55, 0xf6, 0x07, 0x00, 0x01, 0x66, 0x18, 0xf5, 0xef, 0x08, 0xc4, 0x58, 0x5b, 0x4e, 0x66, 0x10, + 0x8a, 0x9f, 0x30, 0xc5, 0xf7, 0xf1, 0x9d, 0x58, 0xc5, 0x1d, 0x7f, 0x00, 0x55, 0xde, 0x86, 0xe9, + 0x2d, 0xc3, 0x3d, 0x45, 0x91, 0xea, 0xa7, 0xfc, 0x90, 0xa0, 0x56, 0x8b, 0xeb, 0x12, 0xaa, 0xee, + 0x33, 0x55, 0x4b, 0x78, 0x31, 0x56, 0xd5, 0xa9, 0xe1, 0xd2, 0x62, 0x82, 0x4c, 0xc8, 0xf2, 0x1f, + 0x17, 0x44, 0xdd, 0x19, 0xfa, 0x81, 0x42, 0xd4, 0x9d, 0xe1, 0xdf, 0x23, 0x5c, 0x51, 0xd5, 0x00, + 0x66, 0xe4, 0x93, 0x3e, 0xba, 0x1d, 0x59, 0x9e, 0xf0, 0xf3, 0x7f, 0x6d, 0x29, 0xa9, 0x5b, 0x28, + 0x5c, 0x61, 0x0a, 0x31, 0xbe, 0x1d, 0xbf, 0x7e, 0x82, 0xfd, 0xb9, 0xf6, 0xe4, 0x57, 0x1a, 0xad, + 0x1a, 0x10, 0x60, 0xaf, 0x23, 0x41, 0x12, 0x85, 0x71, 0x47, 0x82, 0x64, 0x04, 0xb6, 0xc5, 0xef, + 0x32, 0xed, 0xcf, 0xf0, 0x4a, 0xac, 0x76, 0xcf, 0x31, 0x2c, 0xf7, 0x0d, 0x71, 0x9e, 0x71, 0x90, + 0xcd, 0x3d, 0x35, 0xfb, 0x34, 0x60, 0xfe, 0xac, 0x02, 0xd3, 0xf4, 0x9c, 0x4a, 0x0b, 0x76, 0x70, + 0xbd, 0x8f, 0x9a, 0x33, 0x02, 0xaa, 0x45, 0xcd, 0x19, 0x45, 0x06, 0x62, 0x0a, 0x36, 0xfb, 0x11, + 0x3a, 0x61, 0x5c, 0xd4, 0xf1, 0x1e, 0x14, 0x14, 0x10, 0x00, 0xc5, 0x48, 0x0c, 0x43, 0x76, 0xd1, + 0x32, 0x11, 0x83, 0x20, 0xe0, 0x65, 0xa6, 0xb4, 0x86, 0xaf, 0x85, 0x95, 0x76, 0x38, 0x1b, 0xd5, + 0xfa, 0x25, 0x14, 0x55, 0xb4, 0x00, 0xc5, 0x08, 0x8d, 0x60, 0x82, 0xd1, 0xec, 0x18, 0x07, 0x36, + 0xc4, 0xa4, 0x09, 0xff, 0x27, 0xf7, 0x92, 0x97, 0x6a, 0xff, 0x1c, 0x72, 0x02, 0x43, 0x88, 0x9b, + 0x6f, 0x18, 0x45, 0x8c, 0x9b, 0x6f, 0x04, 0x80, 0x88, 0x39, 0xfd, 0x31, 0xb5, 0xf4, 0xae, 0x24, + 0x4b, 0x92, 0x50, 0xf9, 0x92, 0x78, 0x49, 0x2a, 0x03, 0x5c, 0x2c, 0x49, 0xa5, 0x72, 0x4f, 0x1d, + 0xab, 0xf2, 0x84, 0x78, 0x22, 0xa4, 0xe4, 0x25, 0x10, 0x25, 0x48, 0x54, 0xf3, 0x3f, 0x1e, 0xc7, + 0x92, 0x78, 0x60, 0x0f, 0xb4, 0x8a, 0xe4, 0x8f, 0xfe, 0x10, 0x20, 0x00, 0x3c, 0xa2, 0x67, 0xb0, + 0x58, 0xd4, 0x34, 0x7a, 0x06, 0x8b, 0xc7, 0x4c, 0x62, 0x12, 0x49, 0xa0, 0x9c, 0x5f, 0x1a, 0xa8, + 0xfa, 0xbf, 0xd4, 0x00, 0x8d, 0x02, 0x24, 0xe8, 0x69, 0xbc, 0x8a, 0x58, 0x40, 0xb6, 0xf6, 0xce, + 0xd5, 0x98, 0x13, 0xeb, 0x45, 0x60, 0x57, 0x9b, 0x0d, 0xe9, 0xbf, 0xa5, 0x96, 0x7d, 0xa3, 0x41, + 0x29, 0x04, 0xb1, 0xa0, 0x87, 0x09, 0xeb, 0x1c, 0x01, 0x75, 0x6b, 0x8f, 0x2e, 0xe5, 0x4b, 0x3c, + 0x9f, 0x29, 0xbb, 0x42, 0x1e, 0xd1, 0xff, 0x54, 0x83, 0x72, 0x18, 0x97, 0x41, 0x09, 0x0a, 0x46, + 0x90, 0xe1, 0xda, 0xca, 0xe5, 0x8c, 0x57, 0x58, 0xad, 0xe0, 0xd4, 0xfe, 0x39, 0xe4, 0x04, 0x9c, + 0x13, 0x17, 0x16, 0x61, 0x60, 0x39, 0x2e, 0x2c, 0x22, 0x58, 0x50, 0x52, 0x58, 0x38, 0x76, 0x97, + 0x28, 0x91, 0x28, 0x40, 0x9f, 0x24, 0x95, 0xe3, 0x23, 0x31, 0x82, 0x18, 0x8d, 0x55, 0x19, 0x44, + 0xa2, 0x84, 0x7c, 0x50, 0x82, 0xc4, 0x4b, 0x22, 0x31, 0x8a, 0x18, 0x25, 0x45, 0x22, 0xd3, 0xaa, + 0x44, 0x62, 0x80, 0xd0, 0xc4, 0x45, 0xe2, 0x08, 0x6c, 0x1e, 0x17, 0x89, 0xa3, 0x20, 0x4f, 0xd2, + 0xda, 0x32, 0xe5, 0xa1, 0x48, 0x9c, 0x8f, 0x41, 0x74, 0xd0, 0x3b, 0x09, 0x3e, 0x8d, 0x85, 0xe4, + 0x6b, 0xcf, 0xae, 0xc8, 0x3d, 0x3e, 0x02, 0xf8, 0x6a, 0xc8, 0x08, 0xf8, 0x5b, 0x0d, 0x16, 0xe2, + 0x20, 0x21, 0x94, 0xa0, 0x2c, 0x01, 0xcf, 0xaf, 0xad, 0x5e, 0x95, 0xfd, 0x0a, 0x7e, 0xf3, 0x63, + 0xe2, 0x45, 0xe5, 0x5f, 0xbe, 0x5f, 0xd2, 0xfe, 0xfd, 0xfb, 0x25, 0xed, 0x3f, 0xbf, 0x5f, 0xd2, + 0xfe, 0xea, 0xbf, 0x96, 0xa6, 0x8e, 0xb3, 0xec, 0x7f, 0x82, 0xbd, 0xfb, 0x9b, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xdd, 0x84, 0xb6, 0xd7, 0x90, 0x36, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto new file mode 100644 index 00000000000..2c18ce101c6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto @@ -0,0 +1,1053 @@ +syntax = "proto3"; +package etcdserverpb; + +import "gogoproto/gogo.proto"; +import "etcd/mvcc/mvccpb/kv.proto"; +import "etcd/auth/authpb/auth.proto"; + +// for grpc-gateway +import "google/api/annotations.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +service KV { + // Range gets the keys in the range from the key-value store. + rpc Range(RangeRequest) returns (RangeResponse) { + option (google.api.http) = { + post: "/v3alpha/kv/range" + body: "*" + }; + } + + // Put puts the given key into the key-value store. + // A put request increments the revision of the key-value store + // and generates one event in the event history. + rpc Put(PutRequest) returns (PutResponse) { + option (google.api.http) = { + post: "/v3alpha/kv/put" + body: "*" + }; + } + + // DeleteRange deletes the given range from the key-value store. + // A delete request increments the revision of the key-value store + // and generates a delete event in the event history for every deleted key. + rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) { + option (google.api.http) = { + post: "/v3alpha/kv/deleterange" + body: "*" + }; + } + + // Txn processes multiple requests in a single transaction. + // A txn request increments the revision of the key-value store + // and generates events with the same revision for every completed request. + // It is not allowed to modify the same key several times within one txn. + rpc Txn(TxnRequest) returns (TxnResponse) { + option (google.api.http) = { + post: "/v3alpha/kv/txn" + body: "*" + }; + } + + // Compact compacts the event history in the etcd key-value store. The key-value + // store should be periodically compacted or the event history will continue to grow + // indefinitely. + rpc Compact(CompactionRequest) returns (CompactionResponse) { + option (google.api.http) = { + post: "/v3alpha/kv/compaction" + body: "*" + }; + } +} + +service Watch { + // Watch watches for events happening or that have happened. Both input and output + // are streams; the input stream is for creating and canceling watchers and the output + // stream sends events. One watch RPC can watch on multiple key ranges, streaming events + // for several watches at once. The entire event history can be watched starting from the + // last compaction revision. + rpc Watch(stream WatchRequest) returns (stream WatchResponse) { + option (google.api.http) = { + post: "/v3alpha/watch" + body: "*" + }; + } +} + +service Lease { + // LeaseGrant creates a lease which expires if the server does not receive a keepAlive + // within a given time to live period. All keys attached to the lease will be expired and + // deleted if the lease expires. Each expired key generates a delete event in the event history. + rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) { + option (google.api.http) = { + post: "/v3alpha/lease/grant" + body: "*" + }; + } + + // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. + rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) { + option (google.api.http) = { + post: "/v3alpha/kv/lease/revoke" + body: "*" + }; + } + + // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client + // to the server and streaming keep alive responses from the server to the client. + rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) { + option (google.api.http) = { + post: "/v3alpha/lease/keepalive" + body: "*" + }; + } + + // LeaseTimeToLive retrieves lease information. + rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) { + option (google.api.http) = { + post: "/v3alpha/kv/lease/timetolive" + body: "*" + }; + } + + // LeaseLeases lists all existing leases. + rpc LeaseLeases(LeaseLeasesRequest) returns (LeaseLeasesResponse) { + option (google.api.http) = { + post: "/v3alpha/kv/lease/leases" + body: "*" + }; + } +} + +service Cluster { + // MemberAdd adds a member into the cluster. + rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) { + option (google.api.http) = { + post: "/v3alpha/cluster/member/add" + body: "*" + }; + } + + // MemberRemove removes an existing member from the cluster. + rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) { + option (google.api.http) = { + post: "/v3alpha/cluster/member/remove" + body: "*" + }; + } + + // MemberUpdate updates the member configuration. + rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) { + option (google.api.http) = { + post: "/v3alpha/cluster/member/update" + body: "*" + }; + } + + // MemberList lists all the members in the cluster. + rpc MemberList(MemberListRequest) returns (MemberListResponse) { + option (google.api.http) = { + post: "/v3alpha/cluster/member/list" + body: "*" + }; + } +} + +service Maintenance { + // Alarm activates, deactivates, and queries alarms regarding cluster health. + rpc Alarm(AlarmRequest) returns (AlarmResponse) { + option (google.api.http) = { + post: "/v3alpha/maintenance/alarm" + body: "*" + }; + } + + // Status gets the status of the member. + rpc Status(StatusRequest) returns (StatusResponse) { + option (google.api.http) = { + post: "/v3alpha/maintenance/status" + body: "*" + }; + } + + // Defragment defragments a member's backend database to recover storage space. + rpc Defragment(DefragmentRequest) returns (DefragmentResponse) { + option (google.api.http) = { + post: "/v3alpha/maintenance/defragment" + body: "*" + }; + } + + // Hash computes the hash of the KV's backend. + // This is designed for testing; do not use this in production when there + // are ongoing transactions. + rpc Hash(HashRequest) returns (HashResponse) { + option (google.api.http) = { + post: "/v3alpha/maintenance/hash" + body: "*" + }; + } + + // HashKV computes the hash of all MVCC keys up to a given revision. + rpc HashKV(HashKVRequest) returns (HashKVResponse) { + option (google.api.http) = { + post: "/v3alpha/maintenance/hash" + body: "*" + }; + } + + // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. + rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) { + option (google.api.http) = { + post: "/v3alpha/maintenance/snapshot" + body: "*" + }; + } + + // MoveLeader requests current leader node to transfer its leadership to transferee. + rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) { + option (google.api.http) = { + post: "/v3alpha/maintenance/transfer-leadership" + body: "*" + }; + } +} + +service Auth { + // AuthEnable enables authentication. + rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/enable" + body: "*" + }; + } + + // AuthDisable disables authentication. + rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/disable" + body: "*" + }; + } + + // Authenticate processes an authenticate request. + rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/authenticate" + body: "*" + }; + } + + // UserAdd adds a new user. + rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/user/add" + body: "*" + }; + } + + // UserGet gets detailed user information. + rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/user/get" + body: "*" + }; + } + + // UserList gets a list of all users. + rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/user/list" + body: "*" + }; + } + + // UserDelete deletes a specified user. + rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/user/delete" + body: "*" + }; + } + + // UserChangePassword changes the password of a specified user. + rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/user/changepw" + body: "*" + }; + } + + // UserGrant grants a role to a specified user. + rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/user/grant" + body: "*" + }; + } + + // UserRevokeRole revokes a role of specified user. + rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/user/revoke" + body: "*" + }; + } + + // RoleAdd adds a new role. + rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/role/add" + body: "*" + }; + } + + // RoleGet gets detailed role information. + rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/role/get" + body: "*" + }; + } + + // RoleList gets lists of all roles. + rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/role/list" + body: "*" + }; + } + + // RoleDelete deletes a specified role. + rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/role/delete" + body: "*" + }; + } + + // RoleGrantPermission grants a permission of a specified key or range to a specified role. + rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/role/grant" + body: "*" + }; + } + + // RoleRevokePermission revokes a key or range permission of a specified role. + rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) { + option (google.api.http) = { + post: "/v3alpha/auth/role/revoke" + body: "*" + }; + } +} + +message ResponseHeader { + // cluster_id is the ID of the cluster which sent the response. + uint64 cluster_id = 1; + // member_id is the ID of the member which sent the response. + uint64 member_id = 2; + // revision is the key-value store revision when the request was applied. + int64 revision = 3; + // raft_term is the raft term when the request was applied. + uint64 raft_term = 4; +} + +message RangeRequest { + enum SortOrder { + NONE = 0; // default, no sorting + ASCEND = 1; // lowest target value first + DESCEND = 2; // highest target value first + } + enum SortTarget { + KEY = 0; + VERSION = 1; + CREATE = 2; + MOD = 3; + VALUE = 4; + } + + // key is the first key for the range. If range_end is not given, the request only looks up key. + bytes key = 1; + // range_end is the upper bound on the requested range [key, range_end). + // If range_end is '\0', the range is all keys >= key. + // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), + // then the range request gets all keys prefixed with key. + // If both key and range_end are '\0', then the range request returns all keys. + bytes range_end = 2; + // limit is a limit on the number of keys returned for the request. When limit is set to 0, + // it is treated as no limit. + int64 limit = 3; + // revision is the point-in-time of the key-value store to use for the range. + // If revision is less or equal to zero, the range is over the newest key-value store. + // If the revision has been compacted, ErrCompacted is returned as a response. + int64 revision = 4; + + // sort_order is the order for returned sorted results. + SortOrder sort_order = 5; + + // sort_target is the key-value field to use for sorting. + SortTarget sort_target = 6; + + // serializable sets the range request to use serializable member-local reads. + // Range requests are linearizable by default; linearizable requests have higher + // latency and lower throughput than serializable requests but reflect the current + // consensus of the cluster. For better performance, in exchange for possible stale reads, + // a serializable range request is served locally without needing to reach consensus + // with other nodes in the cluster. + bool serializable = 7; + + // keys_only when set returns only the keys and not the values. + bool keys_only = 8; + + // count_only when set returns only the count of the keys in the range. + bool count_only = 9; + + // min_mod_revision is the lower bound for returned key mod revisions; all keys with + // lesser mod revisions will be filtered away. + int64 min_mod_revision = 10; + + // max_mod_revision is the upper bound for returned key mod revisions; all keys with + // greater mod revisions will be filtered away. + int64 max_mod_revision = 11; + + // min_create_revision is the lower bound for returned key create revisions; all keys with + // lesser create trevisions will be filtered away. + int64 min_create_revision = 12; + + // max_create_revision is the upper bound for returned key create revisions; all keys with + // greater create revisions will be filtered away. + int64 max_create_revision = 13; +} + +message RangeResponse { + ResponseHeader header = 1; + // kvs is the list of key-value pairs matched by the range request. + // kvs is empty when count is requested. + repeated mvccpb.KeyValue kvs = 2; + // more indicates if there are more keys to return in the requested range. + bool more = 3; + // count is set to the number of keys within the range when requested. + int64 count = 4; +} + +message PutRequest { + // key is the key, in bytes, to put into the key-value store. + bytes key = 1; + // value is the value, in bytes, to associate with the key in the key-value store. + bytes value = 2; + // lease is the lease ID to associate with the key in the key-value store. A lease + // value of 0 indicates no lease. + int64 lease = 3; + + // If prev_kv is set, etcd gets the previous key-value pair before changing it. + // The previous key-value pair will be returned in the put response. + bool prev_kv = 4; + + // If ignore_value is set, etcd updates the key using its current value. + // Returns an error if the key does not exist. + bool ignore_value = 5; + + // If ignore_lease is set, etcd updates the key using its current lease. + // Returns an error if the key does not exist. + bool ignore_lease = 6; +} + +message PutResponse { + ResponseHeader header = 1; + // if prev_kv is set in the request, the previous key-value pair will be returned. + mvccpb.KeyValue prev_kv = 2; +} + +message DeleteRangeRequest { + // key is the first key to delete in the range. + bytes key = 1; + // range_end is the key following the last key to delete for the range [key, range_end). + // If range_end is not given, the range is defined to contain only the key argument. + // If range_end is one bit larger than the given key, then the range is all the keys + // with the prefix (the given key). + // If range_end is '\0', the range is all keys greater than or equal to the key argument. + bytes range_end = 2; + + // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. + // The previous key-value pairs will be returned in the delete response. + bool prev_kv = 3; +} + +message DeleteRangeResponse { + ResponseHeader header = 1; + // deleted is the number of keys deleted by the delete range request. + int64 deleted = 2; + // if prev_kv is set in the request, the previous key-value pairs will be returned. + repeated mvccpb.KeyValue prev_kvs = 3; +} + +message RequestOp { + // request is a union of request types accepted by a transaction. + oneof request { + RangeRequest request_range = 1; + PutRequest request_put = 2; + DeleteRangeRequest request_delete_range = 3; + TxnRequest request_txn = 4; + } +} + +message ResponseOp { + // response is a union of response types returned by a transaction. + oneof response { + RangeResponse response_range = 1; + PutResponse response_put = 2; + DeleteRangeResponse response_delete_range = 3; + TxnResponse response_txn = 4; + } +} + +message Compare { + enum CompareResult { + EQUAL = 0; + GREATER = 1; + LESS = 2; + NOT_EQUAL = 3; + } + enum CompareTarget { + VERSION = 0; + CREATE = 1; + MOD = 2; + VALUE= 3; + LEASE = 4; + } + // result is logical comparison operation for this comparison. + CompareResult result = 1; + // target is the key-value field to inspect for the comparison. + CompareTarget target = 2; + // key is the subject key for the comparison operation. + bytes key = 3; + oneof target_union { + // version is the version of the given key + int64 version = 4; + // create_revision is the creation revision of the given key + int64 create_revision = 5; + // mod_revision is the last modified revision of the given key. + int64 mod_revision = 6; + // value is the value of the given key, in bytes. + bytes value = 7; + // lease is the lease id of the given key. + int64 lease = 8; + // leave room for more target_union field tags, jump to 64 + } + + // range_end compares the given target to all keys in the range [key, range_end). + // See RangeRequest for more details on key ranges. + bytes range_end = 64; + // TODO: fill out with most of the rest of RangeRequest fields when needed. +} + +// From google paxosdb paper: +// Our implementation hinges around a powerful primitive which we call MultiOp. All other database +// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically +// and consists of three components: +// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check +// for the absence or presence of a value, or compare with a given value. Two different tests in the guard +// may apply to the same or different entries in the database. All tests in the guard are applied and +// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise +// it executes f op (see item 3 below). +// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or +// lookup operation, and applies to a single database entry. Two different operations in the list may apply +// to the same or different entries in the database. These operations are executed +// if guard evaluates to +// true. +// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. +message TxnRequest { + // compare is a list of predicates representing a conjunction of terms. + // If the comparisons succeed, then the success requests will be processed in order, + // and the response will contain their respective responses in order. + // If the comparisons fail, then the failure requests will be processed in order, + // and the response will contain their respective responses in order. + repeated Compare compare = 1; + // success is a list of requests which will be applied when compare evaluates to true. + repeated RequestOp success = 2; + // failure is a list of requests which will be applied when compare evaluates to false. + repeated RequestOp failure = 3; +} + +message TxnResponse { + ResponseHeader header = 1; + // succeeded is set to true if the compare evaluated to true or false otherwise. + bool succeeded = 2; + // responses is a list of responses corresponding to the results from applying + // success if succeeded is true or failure if succeeded is false. + repeated ResponseOp responses = 3; +} + +// CompactionRequest compacts the key-value store up to a given revision. All superseded keys +// with a revision less than the compaction revision will be removed. +message CompactionRequest { + // revision is the key-value store revision for the compaction operation. + int64 revision = 1; + // physical is set so the RPC will wait until the compaction is physically + // applied to the local database such that compacted entries are totally + // removed from the backend database. + bool physical = 2; +} + +message CompactionResponse { + ResponseHeader header = 1; +} + +message HashRequest { +} + +message HashKVRequest { + // revision is the key-value store revision for the hash operation. + int64 revision = 1; +} + +message HashKVResponse { + ResponseHeader header = 1; + // hash is the hash value computed from the responding member's MVCC keys up to a given revision. + uint32 hash = 2; + // compact_revision is the compacted revision of key-value store when hash begins. + int64 compact_revision = 3; +} + +message HashResponse { + ResponseHeader header = 1; + // hash is the hash value computed from the responding member's KV's backend. + uint32 hash = 2; +} + +message SnapshotRequest { +} + +message SnapshotResponse { + // header has the current key-value store information. The first header in the snapshot + // stream indicates the point in time of the snapshot. + ResponseHeader header = 1; + + // remaining_bytes is the number of blob bytes to be sent after this message + uint64 remaining_bytes = 2; + + // blob contains the next chunk of the snapshot in the snapshot stream. + bytes blob = 3; +} + +message WatchRequest { + // request_union is a request to either create a new watcher or cancel an existing watcher. + oneof request_union { + WatchCreateRequest create_request = 1; + WatchCancelRequest cancel_request = 2; + } +} + +message WatchCreateRequest { + // key is the key to register for watching. + bytes key = 1; + // range_end is the end of the range [key, range_end) to watch. If range_end is not given, + // only the key argument is watched. If range_end is equal to '\0', all keys greater than + // or equal to the key argument are watched. + // If the range_end is one bit larger than the given key, + // then all keys with the prefix (the given key) will be watched. + bytes range_end = 2; + // start_revision is an optional revision to watch from (inclusive). No start_revision is "now". + int64 start_revision = 3; + // progress_notify is set so that the etcd server will periodically send a WatchResponse with + // no events to the new watcher if there are no recent events. It is useful when clients + // wish to recover a disconnected watcher starting from a recent known revision. + // The etcd server may decide how often it will send notifications based on current load. + bool progress_notify = 4; + + enum FilterType { + // filter out put event. + NOPUT = 0; + // filter out delete event. + NODELETE = 1; + } + // filters filter the events at server side before it sends back to the watcher. + repeated FilterType filters = 5; + + // If prev_kv is set, created watcher gets the previous KV before the event happens. + // If the previous KV is already compacted, nothing will be returned. + bool prev_kv = 6; +} + +message WatchCancelRequest { + // watch_id is the watcher id to cancel so that no more events are transmitted. + int64 watch_id = 1; +} + +message WatchResponse { + ResponseHeader header = 1; + // watch_id is the ID of the watcher that corresponds to the response. + int64 watch_id = 2; + // created is set to true if the response is for a create watch request. + // The client should record the watch_id and expect to receive events for + // the created watcher from the same stream. + // All events sent to the created watcher will attach with the same watch_id. + bool created = 3; + // canceled is set to true if the response is for a cancel watch request. + // No further events will be sent to the canceled watcher. + bool canceled = 4; + // compact_revision is set to the minimum index if a watcher tries to watch + // at a compacted index. + // + // This happens when creating a watcher at a compacted revision or the watcher cannot + // catch up with the progress of the key-value store. + // + // The client should treat the watcher as canceled and should not try to create any + // watcher with the same start_revision again. + int64 compact_revision = 5; + + // cancel_reason indicates the reason for canceling the watcher. + string cancel_reason = 6; + + repeated mvccpb.Event events = 11; +} + +message LeaseGrantRequest { + // TTL is the advisory time-to-live in seconds. + int64 TTL = 1; + // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. + int64 ID = 2; +} + +message LeaseGrantResponse { + ResponseHeader header = 1; + // ID is the lease ID for the granted lease. + int64 ID = 2; + // TTL is the server chosen lease time-to-live in seconds. + int64 TTL = 3; + string error = 4; +} + +message LeaseRevokeRequest { + // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. + int64 ID = 1; +} + +message LeaseRevokeResponse { + ResponseHeader header = 1; +} + +message LeaseKeepAliveRequest { + // ID is the lease ID for the lease to keep alive. + int64 ID = 1; +} + +message LeaseKeepAliveResponse { + ResponseHeader header = 1; + // ID is the lease ID from the keep alive request. + int64 ID = 2; + // TTL is the new time-to-live for the lease. + int64 TTL = 3; +} + +message LeaseTimeToLiveRequest { + // ID is the lease ID for the lease. + int64 ID = 1; + // keys is true to query all the keys attached to this lease. + bool keys = 2; +} + +message LeaseTimeToLiveResponse { + ResponseHeader header = 1; + // ID is the lease ID from the keep alive request. + int64 ID = 2; + // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. + int64 TTL = 3; + // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. + int64 grantedTTL = 4; + // Keys is the list of keys attached to this lease. + repeated bytes keys = 5; +} + +message LeaseLeasesRequest { +} + +message LeaseStatus { + int64 ID = 1; + // TODO: int64 TTL = 2; +} + +message LeaseLeasesResponse { + ResponseHeader header = 1; + repeated LeaseStatus leases = 2; +} + +message Member { + // ID is the member ID for this member. + uint64 ID = 1; + // name is the human-readable name of the member. If the member is not started, the name will be an empty string. + string name = 2; + // peerURLs is the list of URLs the member exposes to the cluster for communication. + repeated string peerURLs = 3; + // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. + repeated string clientURLs = 4; +} + +message MemberAddRequest { + // peerURLs is the list of URLs the added member will use to communicate with the cluster. + repeated string peerURLs = 1; +} + +message MemberAddResponse { + ResponseHeader header = 1; + // member is the member information for the added member. + Member member = 2; + // members is a list of all members after adding the new member. + repeated Member members = 3; +} + +message MemberRemoveRequest { + // ID is the member ID of the member to remove. + uint64 ID = 1; +} + +message MemberRemoveResponse { + ResponseHeader header = 1; + // members is a list of all members after removing the member. + repeated Member members = 2; +} + +message MemberUpdateRequest { + // ID is the member ID of the member to update. + uint64 ID = 1; + // peerURLs is the new list of URLs the member will use to communicate with the cluster. + repeated string peerURLs = 2; +} + +message MemberUpdateResponse{ + ResponseHeader header = 1; + // members is a list of all members after updating the member. + repeated Member members = 2; +} + +message MemberListRequest { +} + +message MemberListResponse { + ResponseHeader header = 1; + // members is a list of all members associated with the cluster. + repeated Member members = 2; +} + +message DefragmentRequest { +} + +message DefragmentResponse { + ResponseHeader header = 1; +} + +message MoveLeaderRequest { + // targetID is the node ID for the new leader. + uint64 targetID = 1; +} + +message MoveLeaderResponse { + ResponseHeader header = 1; +} + +enum AlarmType { + NONE = 0; // default, used to query if any alarm is active + NOSPACE = 1; // space quota is exhausted + CORRUPT = 2; // kv store corruption detected +} + +message AlarmRequest { + enum AlarmAction { + GET = 0; + ACTIVATE = 1; + DEACTIVATE = 2; + } + // action is the kind of alarm request to issue. The action + // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a + // raised alarm. + AlarmAction action = 1; + // memberID is the ID of the member associated with the alarm. If memberID is 0, the + // alarm request covers all members. + uint64 memberID = 2; + // alarm is the type of alarm to consider for this request. + AlarmType alarm = 3; +} + +message AlarmMember { + // memberID is the ID of the member associated with the raised alarm. + uint64 memberID = 1; + // alarm is the type of alarm which has been raised. + AlarmType alarm = 2; +} + +message AlarmResponse { + ResponseHeader header = 1; + // alarms is a list of alarms associated with the alarm request. + repeated AlarmMember alarms = 2; +} + +message StatusRequest { +} + +message StatusResponse { + ResponseHeader header = 1; + // version is the cluster protocol version used by the responding member. + string version = 2; + // dbSize is the size of the backend database, in bytes, of the responding member. + int64 dbSize = 3; + // leader is the member ID which the responding member believes is the current leader. + uint64 leader = 4; + // raftIndex is the current raft index of the responding member. + uint64 raftIndex = 5; + // raftTerm is the current raft term of the responding member. + uint64 raftTerm = 6; +} + +message AuthEnableRequest { +} + +message AuthDisableRequest { +} + +message AuthenticateRequest { + string name = 1; + string password = 2; +} + +message AuthUserAddRequest { + string name = 1; + string password = 2; +} + +message AuthUserGetRequest { + string name = 1; +} + +message AuthUserDeleteRequest { + // name is the name of the user to delete. + string name = 1; +} + +message AuthUserChangePasswordRequest { + // name is the name of the user whose password is being changed. + string name = 1; + // password is the new password for the user. + string password = 2; +} + +message AuthUserGrantRoleRequest { + // user is the name of the user which should be granted a given role. + string user = 1; + // role is the name of the role to grant to the user. + string role = 2; +} + +message AuthUserRevokeRoleRequest { + string name = 1; + string role = 2; +} + +message AuthRoleAddRequest { + // name is the name of the role to add to the authentication system. + string name = 1; +} + +message AuthRoleGetRequest { + string role = 1; +} + +message AuthUserListRequest { +} + +message AuthRoleListRequest { +} + +message AuthRoleDeleteRequest { + string role = 1; +} + +message AuthRoleGrantPermissionRequest { + // name is the name of the role which will be granted the permission. + string name = 1; + // perm is the permission to grant to the role. + authpb.Permission perm = 2; +} + +message AuthRoleRevokePermissionRequest { + string role = 1; + string key = 2; + string range_end = 3; +} + +message AuthEnableResponse { + ResponseHeader header = 1; +} + +message AuthDisableResponse { + ResponseHeader header = 1; +} + +message AuthenticateResponse { + ResponseHeader header = 1; + // token is an authorized token that can be used in succeeding RPCs + string token = 2; +} + +message AuthUserAddResponse { + ResponseHeader header = 1; +} + +message AuthUserGetResponse { + ResponseHeader header = 1; + + repeated string roles = 2; +} + +message AuthUserDeleteResponse { + ResponseHeader header = 1; +} + +message AuthUserChangePasswordResponse { + ResponseHeader header = 1; +} + +message AuthUserGrantRoleResponse { + ResponseHeader header = 1; +} + +message AuthUserRevokeRoleResponse { + ResponseHeader header = 1; +} + +message AuthRoleAddResponse { + ResponseHeader header = 1; +} + +message AuthRoleGetResponse { + ResponseHeader header = 1; + + repeated authpb.Permission perm = 2; +} + +message AuthRoleListResponse { + ResponseHeader header = 1; + + repeated string roles = 2; +} + +message AuthUserListResponse { + ResponseHeader header = 1; + + repeated string users = 2; +} + +message AuthRoleDeleteResponse { + ResponseHeader header = 1; +} + +message AuthRoleGrantPermissionResponse { + ResponseHeader header = 1; +} + +message AuthRoleRevokePermissionResponse { + ResponseHeader header = 1; +} diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go new file mode 100644 index 00000000000..7033f132662 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go @@ -0,0 +1,735 @@ +// Code generated by protoc-gen-gogo. +// source: kv.proto +// DO NOT EDIT! + +/* + Package mvccpb is a generated protocol buffer package. + + It is generated from these files: + kv.proto + + It has these top-level messages: + KeyValue + Event +*/ +package mvccpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Event_EventType int32 + +const ( + PUT Event_EventType = 0 + DELETE Event_EventType = 1 +) + +var Event_EventType_name = map[int32]string{ + 0: "PUT", + 1: "DELETE", +} +var Event_EventType_value = map[string]int32{ + "PUT": 0, + "DELETE": 1, +} + +func (x Event_EventType) String() string { + return proto.EnumName(Event_EventType_name, int32(x)) +} +func (Event_EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptorKv, []int{1, 0} } + +type KeyValue struct { + // key is the key in bytes. An empty key is not allowed. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // create_revision is the revision of last creation on this key. + CreateRevision int64 `protobuf:"varint,2,opt,name=create_revision,json=createRevision,proto3" json:"create_revision,omitempty"` + // mod_revision is the revision of last modification on this key. + ModRevision int64 `protobuf:"varint,3,opt,name=mod_revision,json=modRevision,proto3" json:"mod_revision,omitempty"` + // version is the version of the key. A deletion resets + // the version to zero and any modification of the key + // increases its version. + Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` + // value is the value held by the key, in bytes. + Value []byte `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"` + // lease is the ID of the lease that attached to key. + // When the attached lease expires, the key will be deleted. + // If lease is 0, then no lease is attached to the key. + Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"` +} + +func (m *KeyValue) Reset() { *m = KeyValue{} } +func (m *KeyValue) String() string { return proto.CompactTextString(m) } +func (*KeyValue) ProtoMessage() {} +func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{0} } + +type Event struct { + // type is the kind of event. If type is a PUT, it indicates + // new data has been stored to the key. If type is a DELETE, + // it indicates the key was deleted. + Type Event_EventType `protobuf:"varint,1,opt,name=type,proto3,enum=mvccpb.Event_EventType" json:"type,omitempty"` + // kv holds the KeyValue for the event. + // A PUT event contains current kv pair. + // A PUT event with kv.Version=1 indicates the creation of a key. + // A DELETE/EXPIRE event contains the deleted key with + // its modification revision set to the revision of deletion. + Kv *KeyValue `protobuf:"bytes,2,opt,name=kv" json:"kv,omitempty"` + // prev_kv holds the key-value pair before the event happens. + PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"` +} + +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{1} } + +func init() { + proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue") + proto.RegisterType((*Event)(nil), "mvccpb.Event") + proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value) +} +func (m *KeyValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintKv(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if m.CreateRevision != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision)) + } + if m.ModRevision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.ModRevision)) + } + if m.Version != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Version)) + } + if len(m.Value) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintKv(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + if m.Lease != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Lease)) + } + return i, nil +} + +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Type)) + } + if m.Kv != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Kv.Size())) + n1, err := m.Kv.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.PrevKv != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintKv(dAtA, i, uint64(m.PrevKv.Size())) + n2, err := m.PrevKv.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func encodeFixed64Kv(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Kv(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintKv(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *KeyValue) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovKv(uint64(l)) + } + if m.CreateRevision != 0 { + n += 1 + sovKv(uint64(m.CreateRevision)) + } + if m.ModRevision != 0 { + n += 1 + sovKv(uint64(m.ModRevision)) + } + if m.Version != 0 { + n += 1 + sovKv(uint64(m.Version)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovKv(uint64(l)) + } + if m.Lease != 0 { + n += 1 + sovKv(uint64(m.Lease)) + } + return n +} + +func (m *Event) Size() (n int) { + var l int + _ = l + if m.Type != 0 { + n += 1 + sovKv(uint64(m.Type)) + } + if m.Kv != nil { + l = m.Kv.Size() + n += 1 + l + sovKv(uint64(l)) + } + if m.PrevKv != nil { + l = m.PrevKv.Size() + n += 1 + l + sovKv(uint64(l)) + } + return n +} + +func sovKv(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozKv(x uint64) (n int) { + return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *KeyValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType) + } + m.CreateRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreateRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType) + } + m.ModRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ModRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + m.Lease = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lease |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipKv(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKv + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (Event_EventType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Kv == nil { + m.Kv = &KeyValue{} + } + if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PrevKv == nil { + m.PrevKv = &KeyValue{} + } + if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKv(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKv + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipKv(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthKv + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKv + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipKv(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowKv = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("kv.proto", fileDescriptorKv) } + +var fileDescriptorKv = []byte{ + // 303 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, + 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18, + 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94, + 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa, + 0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3, + 0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae, + 0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7, + 0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3, + 0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d, + 0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b, + 0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23, + 0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36, + 0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34, + 0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad, + 0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30, + 0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a, + 0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94, + 0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff, + 0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto new file mode 100644 index 00000000000..23c911b7da8 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto @@ -0,0 +1,49 @@ +syntax = "proto3"; +package mvccpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; +option (gogoproto.goproto_enum_prefix_all) = false; + +message KeyValue { + // key is the key in bytes. An empty key is not allowed. + bytes key = 1; + // create_revision is the revision of last creation on this key. + int64 create_revision = 2; + // mod_revision is the revision of last modification on this key. + int64 mod_revision = 3; + // version is the version of the key. A deletion resets + // the version to zero and any modification of the key + // increases its version. + int64 version = 4; + // value is the value held by the key, in bytes. + bytes value = 5; + // lease is the ID of the lease that attached to key. + // When the attached lease expires, the key will be deleted. + // If lease is 0, then no lease is attached to the key. + int64 lease = 6; +} + +message Event { + enum EventType { + PUT = 0; + DELETE = 1; + } + // type is the kind of event. If type is a PUT, it indicates + // new data has been stored to the key. If type is a DELETE, + // it indicates the key was deleted. + EventType type = 1; + // kv holds the KeyValue for the event. + // A PUT event contains current kv pair. + // A PUT event with kv.Version=1 indicates the creation of a key. + // A DELETE/EXPIRE event contains the deleted key with + // its modification revision set to the revision of deletion. + KeyValue kv = 2; + + // prev_kv holds the key-value pair before the event happens. + KeyValue prev_kv = 3; +} diff --git a/vendor/github.com/coreos/etcd/pkg/pathutil/path.go b/vendor/github.com/coreos/etcd/pkg/pathutil/path.go new file mode 100644 index 00000000000..f26254ba933 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/pathutil/path.go @@ -0,0 +1,31 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pathutil implements utility functions for handling slash-separated +// paths. +package pathutil + +import "path" + +// CanonicalURLPath returns the canonical url path for p, which follows the rules: +// 1. the path always starts with "/" +// 2. replace multiple slashes with a single slash +// 3. replace each '.' '..' path name element with equivalent one +// 4. keep the trailing slash +// The function is borrowed from stdlib http.cleanPath in server.go. +func CanonicalURLPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root, + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + return np +} diff --git a/vendor/github.com/coreos/etcd/pkg/srv/srv.go b/vendor/github.com/coreos/etcd/pkg/srv/srv.go new file mode 100644 index 00000000000..600061ce8ea --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/srv/srv.go @@ -0,0 +1,141 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package srv looks up DNS SRV records. +package srv + +import ( + "fmt" + "net" + "net/url" + "strings" + + "github.com/coreos/etcd/pkg/types" +) + +var ( + // indirection for testing + lookupSRV = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict + resolveTCPAddr = net.ResolveTCPAddr +) + +// GetCluster gets the cluster information via DNS discovery. +// Also sees each entry as a separate instance. +func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error) { + tempName := int(0) + tcp2ap := make(map[string]url.URL) + + // First, resolve the apurls + for _, url := range apurls { + tcpAddr, err := resolveTCPAddr("tcp", url.Host) + if err != nil { + return nil, err + } + tcp2ap[tcpAddr.String()] = url + } + + stringParts := []string{} + updateNodeMap := func(service, scheme string) error { + _, addrs, err := lookupSRV(service, "tcp", dns) + if err != nil { + return err + } + for _, srv := range addrs { + port := fmt.Sprintf("%d", srv.Port) + host := net.JoinHostPort(srv.Target, port) + tcpAddr, terr := resolveTCPAddr("tcp", host) + if terr != nil { + err = terr + continue + } + n := "" + url, ok := tcp2ap[tcpAddr.String()] + if ok { + n = name + } + if n == "" { + n = fmt.Sprintf("%d", tempName) + tempName++ + } + // SRV records have a trailing dot but URL shouldn't. + shortHost := strings.TrimSuffix(srv.Target, ".") + urlHost := net.JoinHostPort(shortHost, port) + if ok && url.Scheme != scheme { + err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String()) + } else { + stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost)) + } + } + if len(stringParts) == 0 { + return err + } + return nil + } + + failCount := 0 + err := updateNodeMap(service+"-ssl", "https") + srvErr := make([]string, 2) + if err != nil { + srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _%s-ssl %s", service, err) + failCount++ + } + err = updateNodeMap(service, "http") + if err != nil { + srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _%s %s", service, err) + failCount++ + } + if failCount == 2 { + return nil, fmt.Errorf("srv: too many errors querying DNS SRV records (%q, %q)", srvErr[0], srvErr[1]) + } + return stringParts, nil +} + +type SRVClients struct { + Endpoints []string + SRVs []*net.SRV +} + +// GetClient looks up the client endpoints for a service and domain. +func GetClient(service, domain string) (*SRVClients, error) { + var urls []*url.URL + var srvs []*net.SRV + + updateURLs := func(service, scheme string) error { + _, addrs, err := lookupSRV(service, "tcp", domain) + if err != nil { + return err + } + for _, srv := range addrs { + urls = append(urls, &url.URL{ + Scheme: scheme, + Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), + }) + } + srvs = append(srvs, addrs...) + return nil + } + + errHTTPS := updateURLs(service+"-ssl", "https") + errHTTP := updateURLs(service, "http") + + if errHTTPS != nil && errHTTP != nil { + return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) + } + + endpoints := make([]string, len(urls)) + for i := range urls { + endpoints[i] = urls[i].String() + } + return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go b/vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go new file mode 100644 index 00000000000..3b6aa670baf --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tlsutil provides utility functions for handling TLS. +package tlsutil diff --git a/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go b/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go new file mode 100644 index 00000000000..79b1f632ed5 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go @@ -0,0 +1,72 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tlsutil + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "io/ioutil" +) + +// NewCertPool creates x509 certPool with provided CA files. +func NewCertPool(CAFiles []string) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + for _, CAFile := range CAFiles { + pemByte, err := ioutil.ReadFile(CAFile) + if err != nil { + return nil, err + } + + for { + var block *pem.Block + block, pemByte = pem.Decode(pemByte) + if block == nil { + break + } + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + certPool.AddCert(cert) + } + } + + return certPool, nil +} + +// NewCert generates TLS cert by using the given cert,key and parse function. +func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) { + cert, err := ioutil.ReadFile(certfile) + if err != nil { + return nil, err + } + + key, err := ioutil.ReadFile(keyfile) + if err != nil { + return nil, err + } + + if parseFunc == nil { + parseFunc = tls.X509KeyPair + } + + tlsCert, err := parseFunc(cert, key) + if err != nil { + return nil, err + } + return &tlsCert, nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/doc.go b/vendor/github.com/coreos/etcd/pkg/transport/doc.go new file mode 100644 index 00000000000..37658ce591a --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/doc.go @@ -0,0 +1,17 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport implements various HTTP transport utilities based on Go +// net package. +package transport diff --git a/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go new file mode 100644 index 00000000000..6ccae4ee4a1 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go @@ -0,0 +1,94 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "crypto/tls" + "fmt" + "net" + "time" +) + +type keepAliveConn interface { + SetKeepAlive(bool) error + SetKeepAlivePeriod(d time.Duration) error +} + +// NewKeepAliveListener returns a listener that listens on the given address. +// Be careful when wrap around KeepAliveListener with another Listener if TLSInfo is not nil. +// Some pkgs (like go/http) might expect Listener to return TLSConn type to start TLS handshake. +// http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html +func NewKeepAliveListener(l net.Listener, scheme string, tlscfg *tls.Config) (net.Listener, error) { + if scheme == "https" { + if tlscfg == nil { + return nil, fmt.Errorf("cannot listen on TLS for given listener: KeyFile and CertFile are not presented") + } + return newTLSKeepaliveListener(l, tlscfg), nil + } + + return &keepaliveListener{ + Listener: l, + }, nil +} + +type keepaliveListener struct{ net.Listener } + +func (kln *keepaliveListener) Accept() (net.Conn, error) { + c, err := kln.Listener.Accept() + if err != nil { + return nil, err + } + kac := c.(keepAliveConn) + // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl + // default on linux: 30 + 8 * 30 + // default on osx: 30 + 8 * 75 + kac.SetKeepAlive(true) + kac.SetKeepAlivePeriod(30 * time.Second) + return c, nil +} + +// A tlsKeepaliveListener implements a network listener (net.Listener) for TLS connections. +type tlsKeepaliveListener struct { + net.Listener + config *tls.Config +} + +// Accept waits for and returns the next incoming TLS connection. +// The returned connection c is a *tls.Conn. +func (l *tlsKeepaliveListener) Accept() (c net.Conn, err error) { + c, err = l.Listener.Accept() + if err != nil { + return + } + kac := c.(keepAliveConn) + // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl + // default on linux: 30 + 8 * 30 + // default on osx: 30 + 8 * 75 + kac.SetKeepAlive(true) + kac.SetKeepAlivePeriod(30 * time.Second) + c = tls.Server(c, l.config) + return +} + +// NewListener creates a Listener which accepts connections from an inner +// Listener and wraps each connection with Server. +// The configuration config must be non-nil and must have +// at least one certificate. +func newTLSKeepaliveListener(inner net.Listener, config *tls.Config) net.Listener { + l := &tlsKeepaliveListener{} + l.Listener = inner + l.config = config + return l +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go b/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go new file mode 100644 index 00000000000..930c542066f --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go @@ -0,0 +1,80 @@ +// Copyright 2013 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport provides network utility functions, complementing the more +// common ones in the net package. +package transport + +import ( + "errors" + "net" + "sync" + "time" +) + +var ( + ErrNotTCP = errors.New("only tcp connections have keepalive") +) + +// LimitListener returns a Listener that accepts at most n simultaneous +// connections from the provided Listener. +func LimitListener(l net.Listener, n int) net.Listener { + return &limitListener{l, make(chan struct{}, n)} +} + +type limitListener struct { + net.Listener + sem chan struct{} +} + +func (l *limitListener) acquire() { l.sem <- struct{}{} } +func (l *limitListener) release() { <-l.sem } + +func (l *limitListener) Accept() (net.Conn, error) { + l.acquire() + c, err := l.Listener.Accept() + if err != nil { + l.release() + return nil, err + } + return &limitListenerConn{Conn: c, release: l.release}, nil +} + +type limitListenerConn struct { + net.Conn + releaseOnce sync.Once + release func() +} + +func (l *limitListenerConn) Close() error { + err := l.Conn.Close() + l.releaseOnce.Do(l.release) + return err +} + +func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error { + tcpc, ok := l.Conn.(*net.TCPConn) + if !ok { + return ErrNotTCP + } + return tcpc.SetKeepAlive(doKeepAlive) +} + +func (l *limitListenerConn) SetKeepAlivePeriod(d time.Duration) error { + tcpc, ok := l.Conn.(*net.TCPConn) + if !ok { + return ErrNotTCP + } + return tcpc.SetKeepAlivePeriod(d) +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener.go b/vendor/github.com/coreos/etcd/pkg/transport/listener.go new file mode 100644 index 00000000000..555618e6f0b --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/listener.go @@ -0,0 +1,281 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net" + "os" + "path/filepath" + "strings" + "time" + + "github.com/coreos/etcd/pkg/tlsutil" +) + +func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) { + if l, err = newListener(addr, scheme); err != nil { + return nil, err + } + return wrapTLS(addr, scheme, tlsinfo, l) +} + +func newListener(addr string, scheme string) (net.Listener, error) { + if scheme == "unix" || scheme == "unixs" { + // unix sockets via unix://laddr + return NewUnixListener(addr) + } + return net.Listen("tcp", addr) +} + +func wrapTLS(addr, scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) { + if scheme != "https" && scheme != "unixs" { + return l, nil + } + return newTLSListener(l, tlsinfo, checkSAN) +} + +type TLSInfo struct { + CertFile string + KeyFile string + CAFile string // TODO: deprecate this in v4 + TrustedCAFile string + ClientCertAuth bool + CRLFile string + InsecureSkipVerify bool + + // ServerName ensures the cert matches the given host in case of discovery / virtual hosting + ServerName string + + // HandshakeFailure is optionally called when a connection fails to handshake. The + // connection will be closed immediately afterwards. + HandshakeFailure func(*tls.Conn, error) + + selfCert bool + + // parseFunc exists to simplify testing. Typically, parseFunc + // should be left nil. In that case, tls.X509KeyPair will be used. + parseFunc func([]byte, []byte) (tls.Certificate, error) + + // AllowedCN is a CN which must be provided by a client. + AllowedCN string +} + +func (info TLSInfo) String() string { + return fmt.Sprintf("cert = %s, key = %s, ca = %s, trusted-ca = %s, client-cert-auth = %v, crl-file = %s", info.CertFile, info.KeyFile, info.CAFile, info.TrustedCAFile, info.ClientCertAuth, info.CRLFile) +} + +func (info TLSInfo) Empty() bool { + return info.CertFile == "" && info.KeyFile == "" +} + +func SelfCert(dirpath string, hosts []string) (info TLSInfo, err error) { + if err = os.MkdirAll(dirpath, 0700); err != nil { + return + } + + certPath := filepath.Join(dirpath, "cert.pem") + keyPath := filepath.Join(dirpath, "key.pem") + _, errcert := os.Stat(certPath) + _, errkey := os.Stat(keyPath) + if errcert == nil && errkey == nil { + info.CertFile = certPath + info.KeyFile = keyPath + info.selfCert = true + return + } + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return + } + + tmpl := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{Organization: []string{"etcd"}}, + NotBefore: time.Now(), + NotAfter: time.Now().Add(365 * (24 * time.Hour)), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + for _, host := range hosts { + h, _, _ := net.SplitHostPort(host) + if ip := net.ParseIP(h); ip != nil { + tmpl.IPAddresses = append(tmpl.IPAddresses, ip) + } else { + tmpl.DNSNames = append(tmpl.DNSNames, h) + } + } + + priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + return + } + + derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv) + if err != nil { + return + } + + certOut, err := os.Create(certPath) + if err != nil { + return + } + pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) + certOut.Close() + + b, err := x509.MarshalECPrivateKey(priv) + if err != nil { + return + } + keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return + } + pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}) + keyOut.Close() + + return SelfCert(dirpath, hosts) +} + +func (info TLSInfo) baseConfig() (*tls.Config, error) { + if info.KeyFile == "" || info.CertFile == "" { + return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile) + } + + tlsCert, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + if err != nil { + return nil, err + } + + cfg := &tls.Config{ + Certificates: []tls.Certificate{*tlsCert}, + MinVersion: tls.VersionTLS12, + ServerName: info.ServerName, + } + + if info.AllowedCN != "" { + cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + for _, chains := range verifiedChains { + if len(chains) != 0 { + if info.AllowedCN == chains[0].Subject.CommonName { + return nil + } + } + } + return errors.New("CommonName authentication failed") + } + } + + // this only reloads certs when there's a client request + // TODO: support server-side refresh (e.g. inotify, SIGHUP), caching + cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { + return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + } + cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) { + return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + } + return cfg, nil +} + +// cafiles returns a list of CA file paths. +func (info TLSInfo) cafiles() []string { + cs := make([]string, 0) + if info.CAFile != "" { + cs = append(cs, info.CAFile) + } + if info.TrustedCAFile != "" { + cs = append(cs, info.TrustedCAFile) + } + return cs +} + +// ServerConfig generates a tls.Config object for use by an HTTP server. +func (info TLSInfo) ServerConfig() (*tls.Config, error) { + cfg, err := info.baseConfig() + if err != nil { + return nil, err + } + + cfg.ClientAuth = tls.NoClientCert + if info.CAFile != "" || info.ClientCertAuth { + cfg.ClientAuth = tls.RequireAndVerifyClientCert + } + + CAFiles := info.cafiles() + if len(CAFiles) > 0 { + cp, err := tlsutil.NewCertPool(CAFiles) + if err != nil { + return nil, err + } + cfg.ClientCAs = cp + } + + // "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server + cfg.NextProtos = []string{"h2"} + + return cfg, nil +} + +// ClientConfig generates a tls.Config object for use by an HTTP client. +func (info TLSInfo) ClientConfig() (*tls.Config, error) { + var cfg *tls.Config + var err error + + if !info.Empty() { + cfg, err = info.baseConfig() + if err != nil { + return nil, err + } + } else { + cfg = &tls.Config{ServerName: info.ServerName} + } + cfg.InsecureSkipVerify = info.InsecureSkipVerify + + CAFiles := info.cafiles() + if len(CAFiles) > 0 { + cfg.RootCAs, err = tlsutil.NewCertPool(CAFiles) + if err != nil { + return nil, err + } + } + + if info.selfCert { + cfg.InsecureSkipVerify = true + } + return cfg, nil +} + +// IsClosedConnError returns true if the error is from closing listener, cmux. +// copied from golang.org/x/net/http2/http2.go +func IsClosedConnError(err error) bool { + // 'use of closed network connection' (Go <=1.8) + // 'use of closed file or network connection' (Go >1.8, internal/poll.ErrClosing) + // 'mux: listener closed' (cmux.ErrListenerClosed) + return err != nil && strings.Contains(err.Error(), "closed") +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go b/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go new file mode 100644 index 00000000000..6f1600945cc --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go @@ -0,0 +1,272 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "strings" + "sync" +) + +// tlsListener overrides a TLS listener so it will reject client +// certificates with insufficient SAN credentials or CRL revoked +// certificates. +type tlsListener struct { + net.Listener + connc chan net.Conn + donec chan struct{} + err error + handshakeFailure func(*tls.Conn, error) + check tlsCheckFunc +} + +type tlsCheckFunc func(context.Context, *tls.Conn) error + +// NewTLSListener handshakes TLS connections and performs optional CRL checking. +func NewTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) { + check := func(context.Context, *tls.Conn) error { return nil } + return newTLSListener(l, tlsinfo, check) +} + +func newTLSListener(l net.Listener, tlsinfo *TLSInfo, check tlsCheckFunc) (net.Listener, error) { + if tlsinfo == nil || tlsinfo.Empty() { + l.Close() + return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", l.Addr().String()) + } + tlscfg, err := tlsinfo.ServerConfig() + if err != nil { + return nil, err + } + + hf := tlsinfo.HandshakeFailure + if hf == nil { + hf = func(*tls.Conn, error) {} + } + + if len(tlsinfo.CRLFile) > 0 { + prevCheck := check + check = func(ctx context.Context, tlsConn *tls.Conn) error { + if err := prevCheck(ctx, tlsConn); err != nil { + return err + } + st := tlsConn.ConnectionState() + if certs := st.PeerCertificates; len(certs) > 0 { + return checkCRL(tlsinfo.CRLFile, certs) + } + return nil + } + } + + tlsl := &tlsListener{ + Listener: tls.NewListener(l, tlscfg), + connc: make(chan net.Conn), + donec: make(chan struct{}), + handshakeFailure: hf, + check: check, + } + go tlsl.acceptLoop() + return tlsl, nil +} + +func (l *tlsListener) Accept() (net.Conn, error) { + select { + case conn := <-l.connc: + return conn, nil + case <-l.donec: + return nil, l.err + } +} + +func checkSAN(ctx context.Context, tlsConn *tls.Conn) error { + st := tlsConn.ConnectionState() + if certs := st.PeerCertificates; len(certs) > 0 { + addr := tlsConn.RemoteAddr().String() + return checkCertSAN(ctx, certs[0], addr) + } + return nil +} + +// acceptLoop launches each TLS handshake in a separate goroutine +// to prevent a hanging TLS connection from blocking other connections. +func (l *tlsListener) acceptLoop() { + var wg sync.WaitGroup + var pendingMu sync.Mutex + + pending := make(map[net.Conn]struct{}) + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + pendingMu.Lock() + for c := range pending { + c.Close() + } + pendingMu.Unlock() + wg.Wait() + close(l.donec) + }() + + for { + conn, err := l.Listener.Accept() + if err != nil { + l.err = err + return + } + + pendingMu.Lock() + pending[conn] = struct{}{} + pendingMu.Unlock() + + wg.Add(1) + go func() { + defer func() { + if conn != nil { + conn.Close() + } + wg.Done() + }() + + tlsConn := conn.(*tls.Conn) + herr := tlsConn.Handshake() + pendingMu.Lock() + delete(pending, conn) + pendingMu.Unlock() + + if herr != nil { + l.handshakeFailure(tlsConn, herr) + return + } + if err := l.check(ctx, tlsConn); err != nil { + l.handshakeFailure(tlsConn, err) + return + } + + select { + case l.connc <- tlsConn: + conn = nil + case <-ctx.Done(): + } + }() + } +} + +func checkCRL(crlPath string, cert []*x509.Certificate) error { + // TODO: cache + crlBytes, err := ioutil.ReadFile(crlPath) + if err != nil { + return err + } + certList, err := x509.ParseCRL(crlBytes) + if err != nil { + return err + } + revokedSerials := make(map[string]struct{}) + for _, rc := range certList.TBSCertList.RevokedCertificates { + revokedSerials[string(rc.SerialNumber.Bytes())] = struct{}{} + } + for _, c := range cert { + serial := string(c.SerialNumber.Bytes()) + if _, ok := revokedSerials[serial]; ok { + return fmt.Errorf("transport: certificate serial %x revoked", serial) + } + } + return nil +} + +func checkCertSAN(ctx context.Context, cert *x509.Certificate, remoteAddr string) error { + if len(cert.IPAddresses) == 0 && len(cert.DNSNames) == 0 { + return nil + } + h, _, herr := net.SplitHostPort(remoteAddr) + if herr != nil { + return herr + } + if len(cert.IPAddresses) > 0 { + cerr := cert.VerifyHostname(h) + if cerr == nil { + return nil + } + if len(cert.DNSNames) == 0 { + return cerr + } + } + if len(cert.DNSNames) > 0 { + ok, err := isHostInDNS(ctx, h, cert.DNSNames) + if ok { + return nil + } + errStr := "" + if err != nil { + errStr = " (" + err.Error() + ")" + } + return fmt.Errorf("tls: %q does not match any of DNSNames %q"+errStr, h, cert.DNSNames) + } + return nil +} + +func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) { + // reverse lookup + wildcards, names := []string{}, []string{} + for _, dns := range dnsNames { + if strings.HasPrefix(dns, "*.") { + wildcards = append(wildcards, dns[1:]) + } else { + names = append(names, dns) + } + } + lnames, lerr := net.DefaultResolver.LookupAddr(ctx, host) + for _, name := range lnames { + // strip trailing '.' from PTR record + if name[len(name)-1] == '.' { + name = name[:len(name)-1] + } + for _, wc := range wildcards { + if strings.HasSuffix(name, wc) { + return true, nil + } + } + for _, n := range names { + if n == name { + return true, nil + } + } + } + err = lerr + + // forward lookup + for _, dns := range names { + addrs, lerr := net.DefaultResolver.LookupHost(ctx, dns) + if lerr != nil { + err = lerr + continue + } + for _, addr := range addrs { + if addr == host { + return true, nil + } + } + } + return false, err +} + +func (l *tlsListener) Close() error { + err := l.Listener.Close() + <-l.donec + return err +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go new file mode 100644 index 00000000000..7e8c02030fe --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go @@ -0,0 +1,44 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "time" +) + +type timeoutConn struct { + net.Conn + wtimeoutd time.Duration + rdtimeoutd time.Duration +} + +func (c timeoutConn) Write(b []byte) (n int, err error) { + if c.wtimeoutd > 0 { + if err := c.SetWriteDeadline(time.Now().Add(c.wtimeoutd)); err != nil { + return 0, err + } + } + return c.Conn.Write(b) +} + +func (c timeoutConn) Read(b []byte) (n int, err error) { + if c.rdtimeoutd > 0 { + if err := c.SetReadDeadline(time.Now().Add(c.rdtimeoutd)); err != nil { + return 0, err + } + } + return c.Conn.Read(b) +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go new file mode 100644 index 00000000000..6ae39ecfc9b --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go @@ -0,0 +1,36 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "time" +) + +type rwTimeoutDialer struct { + wtimeoutd time.Duration + rdtimeoutd time.Duration + net.Dialer +} + +func (d *rwTimeoutDialer) Dial(network, address string) (net.Conn, error) { + conn, err := d.Dialer.Dial(network, address) + tconn := &timeoutConn{ + rdtimeoutd: d.rdtimeoutd, + wtimeoutd: d.wtimeoutd, + Conn: conn, + } + return tconn, err +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go new file mode 100644 index 00000000000..b35e04955bb --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go @@ -0,0 +1,57 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "time" +) + +// NewTimeoutListener returns a listener that listens on the given address. +// If read/write on the accepted connection blocks longer than its time limit, +// it will return timeout error. +func NewTimeoutListener(addr string, scheme string, tlsinfo *TLSInfo, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) { + ln, err := newListener(addr, scheme) + if err != nil { + return nil, err + } + ln = &rwTimeoutListener{ + Listener: ln, + rdtimeoutd: rdtimeoutd, + wtimeoutd: wtimeoutd, + } + if ln, err = wrapTLS(addr, scheme, tlsinfo, ln); err != nil { + return nil, err + } + return ln, nil +} + +type rwTimeoutListener struct { + net.Listener + wtimeoutd time.Duration + rdtimeoutd time.Duration +} + +func (rwln *rwTimeoutListener) Accept() (net.Conn, error) { + c, err := rwln.Listener.Accept() + if err != nil { + return nil, err + } + return timeoutConn{ + Conn: c, + wtimeoutd: rwln.wtimeoutd, + rdtimeoutd: rwln.rdtimeoutd, + }, nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go new file mode 100644 index 00000000000..ea16b4c0f86 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go @@ -0,0 +1,51 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "net/http" + "time" +) + +// NewTimeoutTransport returns a transport created using the given TLS info. +// If read/write on the created connection blocks longer than its time limit, +// it will return timeout error. +// If read/write timeout is set, transport will not be able to reuse connection. +func NewTimeoutTransport(info TLSInfo, dialtimeoutd, rdtimeoutd, wtimeoutd time.Duration) (*http.Transport, error) { + tr, err := NewTransport(info, dialtimeoutd) + if err != nil { + return nil, err + } + + if rdtimeoutd != 0 || wtimeoutd != 0 { + // the timed out connection will timeout soon after it is idle. + // it should not be put back to http transport as an idle connection for future usage. + tr.MaxIdleConnsPerHost = -1 + } else { + // allow more idle connections between peers to avoid unnecessary port allocation. + tr.MaxIdleConnsPerHost = 1024 + } + + tr.Dial = (&rwTimeoutDialer{ + Dialer: net.Dialer{ + Timeout: dialtimeoutd, + KeepAlive: 30 * time.Second, + }, + rdtimeoutd: rdtimeoutd, + wtimeoutd: wtimeoutd, + }).Dial + return tr, nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/tls.go b/vendor/github.com/coreos/etcd/pkg/transport/tls.go new file mode 100644 index 00000000000..62fe0d38519 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/tls.go @@ -0,0 +1,49 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "fmt" + "strings" + "time" +) + +// ValidateSecureEndpoints scans the given endpoints against tls info, returning only those +// endpoints that could be validated as secure. +func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) { + t, err := NewTransport(tlsInfo, 5*time.Second) + if err != nil { + return nil, err + } + var errs []string + var endpoints []string + for _, ep := range eps { + if !strings.HasPrefix(ep, "https://") { + errs = append(errs, fmt.Sprintf("%q is insecure", ep)) + continue + } + conn, cerr := t.Dial("tcp", ep[len("https://"):]) + if cerr != nil { + errs = append(errs, fmt.Sprintf("%q failed to dial (%v)", ep, cerr)) + continue + } + conn.Close() + endpoints = append(endpoints, ep) + } + if len(errs) != 0 { + err = fmt.Errorf("%s", strings.Join(errs, ",")) + } + return endpoints, err +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/transport.go b/vendor/github.com/coreos/etcd/pkg/transport/transport.go new file mode 100644 index 00000000000..4a7fe69d2e1 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/transport.go @@ -0,0 +1,71 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "net/http" + "strings" + "time" +) + +type unixTransport struct{ *http.Transport } + +func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, error) { + cfg, err := info.ClientConfig() + if err != nil { + return nil, err + } + + t := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: dialtimeoutd, + // value taken from http.DefaultTransport + KeepAlive: 30 * time.Second, + }).Dial, + // value taken from http.DefaultTransport + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: cfg, + } + + dialer := (&net.Dialer{ + Timeout: dialtimeoutd, + KeepAlive: 30 * time.Second, + }) + dial := func(net, addr string) (net.Conn, error) { + return dialer.Dial("unix", addr) + } + + tu := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: cfg, + } + ut := &unixTransport{tu} + + t.RegisterProtocol("unix", ut) + t.RegisterProtocol("unixs", ut) + + return t, nil +} + +func (urt *unixTransport) RoundTrip(req *http.Request) (*http.Response, error) { + url := *req.URL + req.URL = &url + req.URL.Scheme = strings.Replace(req.URL.Scheme, "unix", "http", 1) + return urt.Transport.RoundTrip(req) +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go new file mode 100644 index 00000000000..123e2036f0f --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go @@ -0,0 +1,40 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "os" +) + +type unixListener struct{ net.Listener } + +func NewUnixListener(addr string) (net.Listener, error) { + if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { + return nil, err + } + l, err := net.Listen("unix", addr) + if err != nil { + return nil, err + } + return &unixListener{l}, nil +} + +func (ul *unixListener) Close() error { + if err := os.Remove(ul.Addr().String()); err != nil && !os.IsNotExist(err) { + return err + } + return ul.Listener.Close() +} diff --git a/vendor/github.com/coreos/etcd/pkg/types/doc.go b/vendor/github.com/coreos/etcd/pkg/types/doc.go new file mode 100644 index 00000000000..de8ef0bd712 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/doc.go @@ -0,0 +1,17 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package types declares various data types and implements type-checking +// functions. +package types diff --git a/vendor/github.com/coreos/etcd/pkg/types/id.go b/vendor/github.com/coreos/etcd/pkg/types/id.go new file mode 100644 index 00000000000..1b042d9ce65 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/id.go @@ -0,0 +1,41 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "strconv" +) + +// ID represents a generic identifier which is canonically +// stored as a uint64 but is typically represented as a +// base-16 string for input/output +type ID uint64 + +func (i ID) String() string { + return strconv.FormatUint(uint64(i), 16) +} + +// IDFromString attempts to create an ID from a base-16 string. +func IDFromString(s string) (ID, error) { + i, err := strconv.ParseUint(s, 16, 64) + return ID(i), err +} + +// IDSlice implements the sort interface +type IDSlice []ID + +func (p IDSlice) Len() int { return len(p) } +func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) } +func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/coreos/etcd/pkg/types/set.go b/vendor/github.com/coreos/etcd/pkg/types/set.go new file mode 100644 index 00000000000..73ef431bef1 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/set.go @@ -0,0 +1,178 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "reflect" + "sort" + "sync" +) + +type Set interface { + Add(string) + Remove(string) + Contains(string) bool + Equals(Set) bool + Length() int + Values() []string + Copy() Set + Sub(Set) Set +} + +func NewUnsafeSet(values ...string) *unsafeSet { + set := &unsafeSet{make(map[string]struct{})} + for _, v := range values { + set.Add(v) + } + return set +} + +func NewThreadsafeSet(values ...string) *tsafeSet { + us := NewUnsafeSet(values...) + return &tsafeSet{us, sync.RWMutex{}} +} + +type unsafeSet struct { + d map[string]struct{} +} + +// Add adds a new value to the set (no-op if the value is already present) +func (us *unsafeSet) Add(value string) { + us.d[value] = struct{}{} +} + +// Remove removes the given value from the set +func (us *unsafeSet) Remove(value string) { + delete(us.d, value) +} + +// Contains returns whether the set contains the given value +func (us *unsafeSet) Contains(value string) (exists bool) { + _, exists = us.d[value] + return +} + +// ContainsAll returns whether the set contains all given values +func (us *unsafeSet) ContainsAll(values []string) bool { + for _, s := range values { + if !us.Contains(s) { + return false + } + } + return true +} + +// Equals returns whether the contents of two sets are identical +func (us *unsafeSet) Equals(other Set) bool { + v1 := sort.StringSlice(us.Values()) + v2 := sort.StringSlice(other.Values()) + v1.Sort() + v2.Sort() + return reflect.DeepEqual(v1, v2) +} + +// Length returns the number of elements in the set +func (us *unsafeSet) Length() int { + return len(us.d) +} + +// Values returns the values of the Set in an unspecified order. +func (us *unsafeSet) Values() (values []string) { + values = make([]string, 0) + for val := range us.d { + values = append(values, val) + } + return +} + +// Copy creates a new Set containing the values of the first +func (us *unsafeSet) Copy() Set { + cp := NewUnsafeSet() + for val := range us.d { + cp.Add(val) + } + + return cp +} + +// Sub removes all elements in other from the set +func (us *unsafeSet) Sub(other Set) Set { + oValues := other.Values() + result := us.Copy().(*unsafeSet) + + for _, val := range oValues { + if _, ok := result.d[val]; !ok { + continue + } + delete(result.d, val) + } + + return result +} + +type tsafeSet struct { + us *unsafeSet + m sync.RWMutex +} + +func (ts *tsafeSet) Add(value string) { + ts.m.Lock() + defer ts.m.Unlock() + ts.us.Add(value) +} + +func (ts *tsafeSet) Remove(value string) { + ts.m.Lock() + defer ts.m.Unlock() + ts.us.Remove(value) +} + +func (ts *tsafeSet) Contains(value string) (exists bool) { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Contains(value) +} + +func (ts *tsafeSet) Equals(other Set) bool { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Equals(other) +} + +func (ts *tsafeSet) Length() int { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Length() +} + +func (ts *tsafeSet) Values() (values []string) { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Values() +} + +func (ts *tsafeSet) Copy() Set { + ts.m.RLock() + defer ts.m.RUnlock() + usResult := ts.us.Copy().(*unsafeSet) + return &tsafeSet{usResult, sync.RWMutex{}} +} + +func (ts *tsafeSet) Sub(other Set) Set { + ts.m.RLock() + defer ts.m.RUnlock() + usResult := ts.us.Sub(other).(*unsafeSet) + return &tsafeSet{usResult, sync.RWMutex{}} +} diff --git a/vendor/github.com/coreos/etcd/pkg/types/slice.go b/vendor/github.com/coreos/etcd/pkg/types/slice.go new file mode 100644 index 00000000000..0dd9ca798ae --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/slice.go @@ -0,0 +1,22 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +// Uint64Slice implements sort interface +type Uint64Slice []uint64 + +func (p Uint64Slice) Len() int { return len(p) } +func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/coreos/etcd/pkg/types/urls.go b/vendor/github.com/coreos/etcd/pkg/types/urls.go new file mode 100644 index 00000000000..9e5d03ff645 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/urls.go @@ -0,0 +1,82 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "errors" + "fmt" + "net" + "net/url" + "sort" + "strings" +) + +type URLs []url.URL + +func NewURLs(strs []string) (URLs, error) { + all := make([]url.URL, len(strs)) + if len(all) == 0 { + return nil, errors.New("no valid URLs given") + } + for i, in := range strs { + in = strings.TrimSpace(in) + u, err := url.Parse(in) + if err != nil { + return nil, err + } + if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" { + return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in) + } + if _, _, err := net.SplitHostPort(u.Host); err != nil { + return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in) + } + if u.Path != "" { + return nil, fmt.Errorf("URL must not contain a path: %s", in) + } + all[i] = *u + } + us := URLs(all) + us.Sort() + + return us, nil +} + +func MustNewURLs(strs []string) URLs { + urls, err := NewURLs(strs) + if err != nil { + panic(err) + } + return urls +} + +func (us URLs) String() string { + return strings.Join(us.StringSlice(), ",") +} + +func (us *URLs) Sort() { + sort.Sort(us) +} +func (us URLs) Len() int { return len(us) } +func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() } +func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] } + +func (us URLs) StringSlice() []string { + out := make([]string, len(us)) + for i := range us { + out[i] = us[i].String() + } + + return out +} diff --git a/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go b/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go new file mode 100644 index 00000000000..47690cc381a --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go @@ -0,0 +1,107 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "sort" + "strings" +) + +// URLsMap is a map from a name to its URLs. +type URLsMap map[string]URLs + +// NewURLsMap returns a URLsMap instantiated from the given string, +// which consists of discovery-formatted names-to-URLs, like: +// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380 +func NewURLsMap(s string) (URLsMap, error) { + m := parse(s) + + cl := URLsMap{} + for name, urls := range m { + us, err := NewURLs(urls) + if err != nil { + return nil, err + } + cl[name] = us + } + return cl, nil +} + +// NewURLsMapFromStringMap takes a map of strings and returns a URLsMap. The +// string values in the map can be multiple values separated by the sep string. +func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) { + var err error + um := URLsMap{} + for k, v := range m { + um[k], err = NewURLs(strings.Split(v, sep)) + if err != nil { + return nil, err + } + } + return um, nil +} + +// String turns URLsMap into discovery-formatted name-to-URLs sorted by name. +func (c URLsMap) String() string { + var pairs []string + for name, urls := range c { + for _, url := range urls { + pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String())) + } + } + sort.Strings(pairs) + return strings.Join(pairs, ",") +} + +// URLs returns a list of all URLs. +// The returned list is sorted in ascending lexicographical order. +func (c URLsMap) URLs() []string { + var urls []string + for _, us := range c { + for _, u := range us { + urls = append(urls, u.String()) + } + } + sort.Strings(urls) + return urls +} + +// Len returns the size of URLsMap. +func (c URLsMap) Len() int { + return len(c) +} + +// parse parses the given string and returns a map listing the values specified for each key. +func parse(s string) map[string][]string { + m := make(map[string][]string) + for s != "" { + key := s + if i := strings.IndexAny(key, ","); i >= 0 { + key, s = key[:i], key[i+1:] + } else { + s = "" + } + if key == "" { + continue + } + value := "" + if i := strings.Index(key, "="); i >= 0 { + key, value = key[:i], key[i+1:] + } + m[key] = append(m[key], value) + } + return m +} diff --git a/vendor/github.com/coreos/etcd/version/version.go b/vendor/github.com/coreos/etcd/version/version.go new file mode 100644 index 00000000000..9134cebd35b --- /dev/null +++ b/vendor/github.com/coreos/etcd/version/version.go @@ -0,0 +1,56 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package version implements etcd version parsing and contains latest version +// information. +package version + +import ( + "fmt" + "strings" + + "github.com/coreos/go-semver/semver" +) + +var ( + // MinClusterVersion is the min cluster version this etcd binary is compatible with. + MinClusterVersion = "3.0.0" + Version = "3.2.0+git" + APIVersion = "unknown" + + // Git SHA Value will be set during build + GitSHA = "Not provided (use ./build instead of go build)" +) + +func init() { + ver, err := semver.NewVersion(Version) + if err == nil { + APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor) + } +} + +type Versions struct { + Server string `json:"etcdserver"` + Cluster string `json:"etcdcluster"` + // TODO: raft state machine version +} + +// Cluster only keeps the major.minor. +func Cluster(v string) string { + vs := strings.Split(v, ".") + if len(vs) <= 2 { + return v + } + return fmt.Sprintf("%s.%s", vs[0], vs[1]) +} diff --git a/vendor/github.com/coreos/go-semver/LICENSE b/vendor/github.com/coreos/go-semver/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go new file mode 100644 index 00000000000..76cf4852c76 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/semver.go @@ -0,0 +1,296 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Semantic Versions http://semver.org +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +type Version struct { + Major int64 + Minor int64 + Patch int64 + PreRelease PreRelease + Metadata string +} + +type PreRelease string + +func splitOff(input *string, delim string) (val string) { + parts := strings.SplitN(*input, delim, 2) + + if len(parts) == 2 { + *input = parts[0] + val = parts[1] + } + + return val +} + +func New(version string) *Version { + return Must(NewVersion(version)) +} + +func NewVersion(version string) (*Version, error) { + v := Version{} + + if err := v.Set(version); err != nil { + return nil, err + } + + return &v, nil +} + +// Must is a helper for wrapping NewVersion and will panic if err is not nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + return v +} + +// Set parses and updates v from the given version string. Implements flag.Value +func (v *Version) Set(version string) error { + metadata := splitOff(&version, "+") + preRelease := PreRelease(splitOff(&version, "-")) + dotParts := strings.SplitN(version, ".", 3) + + if len(dotParts) != 3 { + return fmt.Errorf("%s is not in dotted-tri format", version) + } + + if err := validateIdentifier(string(preRelease)); err != nil { + return fmt.Errorf("failed to validate pre-release: %v", err) + } + + if err := validateIdentifier(metadata); err != nil { + return fmt.Errorf("failed to validate metadata: %v", err) + } + + parsed := make([]int64, 3, 3) + + for i, v := range dotParts[:3] { + val, err := strconv.ParseInt(v, 10, 64) + parsed[i] = val + if err != nil { + return err + } + } + + v.Metadata = metadata + v.PreRelease = preRelease + v.Major = parsed[0] + v.Minor = parsed[1] + v.Patch = parsed[2] + return nil +} + +func (v Version) String() string { + var buffer bytes.Buffer + + fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch) + + if v.PreRelease != "" { + fmt.Fprintf(&buffer, "-%s", v.PreRelease) + } + + if v.Metadata != "" { + fmt.Fprintf(&buffer, "+%s", v.Metadata) + } + + return buffer.String() +} + +func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { + var data string + if err := unmarshal(&data); err != nil { + return err + } + return v.Set(data) +} + +func (v Version) MarshalJSON() ([]byte, error) { + return []byte(`"` + v.String() + `"`), nil +} + +func (v *Version) UnmarshalJSON(data []byte) error { + l := len(data) + if l == 0 || string(data) == `""` { + return nil + } + if l < 2 || data[0] != '"' || data[l-1] != '"' { + return errors.New("invalid semver string") + } + return v.Set(string(data[1 : l-1])) +} + +// Compare tests if v is less than, equal to, or greater than versionB, +// returning -1, 0, or +1 respectively. +func (v Version) Compare(versionB Version) int { + if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 { + return cmp + } + return preReleaseCompare(v, versionB) +} + +// Equal tests if v is equal to versionB. +func (v Version) Equal(versionB Version) bool { + return v.Compare(versionB) == 0 +} + +// LessThan tests if v is less than versionB. +func (v Version) LessThan(versionB Version) bool { + return v.Compare(versionB) < 0 +} + +// Slice converts the comparable parts of the semver into a slice of integers. +func (v Version) Slice() []int64 { + return []int64{v.Major, v.Minor, v.Patch} +} + +func (p PreRelease) Slice() []string { + preRelease := string(p) + return strings.Split(preRelease, ".") +} + +func preReleaseCompare(versionA Version, versionB Version) int { + a := versionA.PreRelease + b := versionB.PreRelease + + /* Handle the case where if two versions are otherwise equal it is the + * one without a PreRelease that is greater */ + if len(a) == 0 && (len(b) > 0) { + return 1 + } else if len(b) == 0 && (len(a) > 0) { + return -1 + } + + // If there is a prerelease, check and compare each part. + return recursivePreReleaseCompare(a.Slice(), b.Slice()) +} + +func recursiveCompare(versionA []int64, versionB []int64) int { + if len(versionA) == 0 { + return 0 + } + + a := versionA[0] + b := versionB[0] + + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursiveCompare(versionA[1:], versionB[1:]) +} + +func recursivePreReleaseCompare(versionA []string, versionB []string) int { + // A larger set of pre-release fields has a higher precedence than a smaller set, + // if all of the preceding identifiers are equal. + if len(versionA) == 0 { + if len(versionB) > 0 { + return -1 + } + return 0 + } else if len(versionB) == 0 { + // We're longer than versionB so return 1. + return 1 + } + + a := versionA[0] + b := versionB[0] + + aInt := false + bInt := false + + aI, err := strconv.Atoi(versionA[0]) + if err == nil { + aInt = true + } + + bI, err := strconv.Atoi(versionB[0]) + if err == nil { + bInt = true + } + + // Numeric identifiers always have lower precedence than non-numeric identifiers. + if aInt && !bInt { + return -1 + } else if !aInt && bInt { + return 1 + } + + // Handle Integer Comparison + if aInt && bInt { + if aI > bI { + return 1 + } else if aI < bI { + return -1 + } + } + + // Handle String Comparison + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursivePreReleaseCompare(versionA[1:], versionB[1:]) +} + +// BumpMajor increments the Major field by 1 and resets all other fields to their default values +func (v *Version) BumpMajor() { + v.Major += 1 + v.Minor = 0 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpMinor increments the Minor field by 1 and resets all other fields to their default values +func (v *Version) BumpMinor() { + v.Minor += 1 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpPatch increments the Patch field by 1 and resets all other fields to their default values +func (v *Version) BumpPatch() { + v.Patch += 1 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// validateIdentifier makes sure the provided identifier satisfies semver spec +func validateIdentifier(id string) error { + if id != "" && !reIdentifier.MatchString(id) { + return fmt.Errorf("%s is not a valid semver identifier", id) + } + return nil +} + +// reIdentifier is a regular expression used to check that pre-release and metadata +// identifiers satisfy the spec requirements +var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`) diff --git a/vendor/github.com/coreos/go-semver/semver/sort.go b/vendor/github.com/coreos/go-semver/semver/sort.go new file mode 100644 index 00000000000..e256b41a5dd --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/sort.go @@ -0,0 +1,38 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semver + +import ( + "sort" +) + +type Versions []*Version + +func (s Versions) Len() int { + return len(s) +} + +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s Versions) Less(i, j int) bool { + return s[i].LessThan(*s[j]) +} + +// Sort sorts the given slice of Version +func Sort(versions []*Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/dgrijalva/jwt-go/LICENSE b/vendor/github.com/dgrijalva/jwt-go/LICENSE new file mode 100644 index 00000000000..df83a9c2f01 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md new file mode 100644 index 00000000000..7fc1f793cbc --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md @@ -0,0 +1,97 @@ +## Migration Guide from v2 -> v3 + +Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. + +### `Token.Claims` is now an interface type + +The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. + +`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. + +The old example for parsing a token looked like this.. + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is now directly mapped to... + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. + +```go + type MyCustomClaims struct { + User string + *StandardClaims + } + + if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { + claims := token.Claims.(*MyCustomClaims) + fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) + } +``` + +### `ParseFromRequest` has been moved + +To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. + +`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. + +This simple parsing example: + +```go + if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is directly mapped to: + +```go + if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +There are several concrete `Extractor` types provided for your convenience: + +* `HeaderExtractor` will search a list of headers until one contains content. +* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. +* `MultiExtractor` will try a list of `Extractors` in order until one returns content. +* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. +* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument +* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header + + +### RSA signing methods no longer accept `[]byte` keys + +Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. + +To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. + +```go + func keyLookupFunc(*Token) (interface{}, error) { + // Don't forget to validate the alg is what you expect: + if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { + return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) + } + + // Look up key + key, err := lookupPublicKey(token.Header["kid"]) + if err != nil { + return nil, err + } + + // Unpack key from PEM encoded PKCS8 + return jwt.ParseRSAPublicKeyFromPEM(key) + } +``` diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md new file mode 100644 index 00000000000..25aec486c63 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/README.md @@ -0,0 +1,85 @@ +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) + +[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) + +**BREAKING CHANGES:*** Version 3.0.0 is here. It includes _a lot_ of changes including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. + +**NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. + + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Examples + +See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: + +* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) +* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) +* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) + +## Extensions + +This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. + +Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go + +## Compliance + +This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: + +* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). + +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +* The author of the token was in the possession of the signing secret +* The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +## More + +Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md new file mode 100644 index 00000000000..c21551f6bbd --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md @@ -0,0 +1,111 @@ +## `jwt-go` Version History + +#### 3.1.0 + +* Improvements to `jwt` command line tool +* Added `SkipClaimsValidation` option to `Parser` +* Documentation updates + +#### 3.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. + * `ParseFromRequest` has been moved to `request` subpackage and usage has changed + * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. +* Other Additions and Changes + * Added `Claims` interface type to allow users to decode the claims into a custom type + * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. + * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage + * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` + * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. + * Added several new, more specific, validation errors to error type bitmask + * Moved examples from README to executable example files + * Signing method registry is now thread safe + * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) + +#### 2.7.0 + +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. + +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying +* Error text for expired tokens includes how long it's been expired +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` +* Documentation updates + +#### 2.6.0 + +* Exposed inner error within ValidationError +* Fixed validation errors when using UseJSONNumber flag +* Added several unit tests + +#### 2.5.0 + +* Added support for signing method none. You shouldn't use this. The API tries to make this clear. +* Updated/fixed some documentation +* Added more helpful error message when trying to parse tokens that begin with `BEARER ` + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/vendor/github.com/dgrijalva/jwt-go/claims.go b/vendor/github.com/dgrijalva/jwt-go/claims.go new file mode 100644 index 00000000000..f0228f02e03 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/claims.go @@ -0,0 +1,134 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// For a type to be a Claims object, it must just have a Valid method that determines +// if the token is invalid for any supported reason +type Claims interface { + Valid() error +} + +// Structured version of Claims Section, as referenced at +// https://tools.ietf.org/html/rfc7519#section-4.1 +// See examples for how to use this with your own claim types +type StandardClaims struct { + Audience string `json:"aud,omitempty"` + ExpiresAt int64 `json:"exp,omitempty"` + Id string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c StandardClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if c.VerifyExpiresAt(now, false) == false { + delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) + vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Errors |= ValidationErrorExpired + } + + if c.VerifyIssuedAt(now, false) == false { + vErr.Inner = fmt.Errorf("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if c.VerifyNotBefore(now, false) == false { + vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud(c.Audience, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { + return verifyExp(c.ExpiresAt, cmp, req) +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { + return verifyIat(c.IssuedAt, cmp, req) +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + return verifyNbf(c.NotBefore, cmp, req) +} + +// ----- helpers + +func verifyAud(aud string, cmp string, required bool) bool { + if aud == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyExp(exp int64, now int64, required bool) bool { + if exp == 0 { + return !required + } + return now <= exp +} + +func verifyIat(iat int64, now int64, required bool) bool { + if iat == 0 { + return !required + } + return now >= iat +} + +func verifyIss(iss string, cmp string, required bool) bool { + if iss == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyNbf(nbf int64, now int64, required bool) bool { + if nbf == 0 { + return !required + } + return now >= nbf +} diff --git a/vendor/github.com/dgrijalva/jwt-go/doc.go b/vendor/github.com/dgrijalva/jwt-go/doc.go new file mode 100644 index 00000000000..a86dc1a3b34 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go new file mode 100644 index 00000000000..2f59a222363 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go @@ -0,0 +1,147 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// Implements the ECDSA family of signing methods signing methods +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return ErrInvalidKeyType + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { + return nil + } else { + return ErrECDSAVerification + } +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outpus (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return EncodeSegment(out), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go new file mode 100644 index 00000000000..d19624b7264 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go @@ -0,0 +1,67 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") +) + +// Parse PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/errors.go b/vendor/github.com/dgrijalva/jwt-go/errors.go new file mode 100644 index 00000000000..1c93024aad2 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/errors.go @@ -0,0 +1,59 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid") + ErrInvalidKeyType = errors.New("key is of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + + // Standard Claim validation errors + ValidationErrorAudience // AUD validation failed + ValidationErrorExpired // EXP validation failed + ValidationErrorIssuedAt // IAT validation failed + ValidationErrorIssuer // ISS validation failed + ValidationErrorNotValidYet // NBF validation failed + ValidationErrorId // JTI validation failed + ValidationErrorClaimsInvalid // Generic claims validation error +) + +// Helper for constructing a ValidationError with a string error message +func NewValidationError(errorText string, errorFlags uint32) *ValidationError { + return &ValidationError{ + text: errorText, + Errors: errorFlags, + } +} + +// The error from Parse if token is not valid +type ValidationError struct { + Inner error // stores the error returned by external dependencies, i.e.: KeyFunc + Errors uint32 // bitfield. see ValidationError... constants + text string // errors that do not have a valid error just have text +} + +// Validation error is an error type +func (e ValidationError) Error() string { + if e.Inner != nil { + return e.Inner.Error() + } else if e.text != "" { + return e.text + } else { + return "token is invalid" + } +} + +// No errors +func (e *ValidationError) valid() bool { + return e.Errors == 0 +} diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/dgrijalva/jwt-go/hmac.go new file mode 100644 index 00000000000..c2299192545 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/hmac.go @@ -0,0 +1,94 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// Implements the HMAC-SHA family of signing methods signing methods +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return ErrInvalidKeyType + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Implements the Sign method from SigningMethod for this signing method. +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil + } + + return "", ErrInvalidKey +} diff --git a/vendor/github.com/dgrijalva/jwt-go/map_claims.go b/vendor/github.com/dgrijalva/jwt-go/map_claims.go new file mode 100644 index 00000000000..291213c460d --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/map_claims.go @@ -0,0 +1,94 @@ +package jwt + +import ( + "encoding/json" + "errors" + // "fmt" +) + +// Claims type that uses the map[string]interface{} for JSON decoding +// This is the default claims type if you don't supply one +type MapClaims map[string]interface{} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyAudience(cmp string, req bool) bool { + aud, _ := m["aud"].(string) + return verifyAud(aud, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { + switch exp := m["exp"].(type) { + case float64: + return verifyExp(int64(exp), cmp, req) + case json.Number: + v, _ := exp.Int64() + return verifyExp(v, cmp, req) + } + return req == false +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { + switch iat := m["iat"].(type) { + case float64: + return verifyIat(int64(iat), cmp, req) + case json.Number: + v, _ := iat.Int64() + return verifyIat(v, cmp, req) + } + return req == false +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { + switch nbf := m["nbf"].(type) { + case float64: + return verifyNbf(int64(nbf), cmp, req) + case json.Number: + v, _ := nbf.Int64() + return verifyNbf(v, cmp, req) + } + return req == false +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (m MapClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + if m.VerifyExpiresAt(now, false) == false { + vErr.Inner = errors.New("Token is expired") + vErr.Errors |= ValidationErrorExpired + } + + if m.VerifyIssuedAt(now, false) == false { + vErr.Inner = errors.New("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if m.VerifyNotBefore(now, false) == false { + vErr.Inner = errors.New("Token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} diff --git a/vendor/github.com/dgrijalva/jwt-go/none.go b/vendor/github.com/dgrijalva/jwt-go/none.go new file mode 100644 index 00000000000..f04d189d067 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/none.go @@ -0,0 +1,52 @@ +package jwt + +// Implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if signature != "" { + return NewValidationError( + "'none' signing method with non-empty signature", + ValidationErrorSignatureInvalid, + ) + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return "", nil + } + return "", NoneSignatureTypeDisallowedError +} diff --git a/vendor/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/dgrijalva/jwt-go/parser.go new file mode 100644 index 00000000000..7bf1c4ea084 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/parser.go @@ -0,0 +1,131 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + ValidMethods []string // If populated, only these methods will be considered valid + UseJSONNumber bool // Use JSON Number format in JSON decoder + SkipClaimsValidation bool // Skip claims validation during token parsing +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + parts := strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + var err error + token := &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + // Verify signing method is in the required set + if p.ValidMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.ValidMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} + } + + vErr := &ValidationError{} + + // Validate Claims + if !p.SkipClaimsValidation { + if err := token.Claims.Valid(); err != nil { + + // If the Claims Valid returned an error, check if it is a validation error, + // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set + if e, ok := err.(*ValidationError); !ok { + vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} + } else { + vErr = e + } + } + } + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr.Inner = err + vErr.Errors |= ValidationErrorSignatureInvalid + } + + if vErr.valid() { + token.Valid = true + return token, nil + } + + return token, vErr +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/dgrijalva/jwt-go/rsa.go new file mode 100644 index 00000000000..0ae0b1984e5 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa.go @@ -0,0 +1,100 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSA family of signing methods signing methods +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this signing method, must be an rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return ErrInvalidKeyType + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Implements the Sign method from SigningMethod +// For this signing method, must be an rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + var ok bool + + // Validate type of key + if rsaKey, ok = key.(*rsa.PrivateKey); !ok { + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go new file mode 100644 index 00000000000..10ee9db8a4e --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go @@ -0,0 +1,126 @@ +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions +} + +// Specific instances for RS/PS and company +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA256, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA384, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA512, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options) +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go new file mode 100644 index 00000000000..213a90dbbf8 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go @@ -0,0 +1,69 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key") + ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") +) + +// Parse PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/signing_method.go b/vendor/github.com/dgrijalva/jwt-go/signing_method.go new file mode 100644 index 00000000000..ed1f212b21e --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/signing_method.go @@ -0,0 +1,35 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// Implement SigningMethod to add new methods for signing or verifying tokens. +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// Register the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// Get a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} diff --git a/vendor/github.com/dgrijalva/jwt-go/token.go b/vendor/github.com/dgrijalva/jwt-go/token.go new file mode 100644 index 00000000000..d637e0867c6 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/token.go @@ -0,0 +1,108 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Parse methods use this callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// A JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// Create a new Token. Takes a signing method +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// Get the complete, signed token +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// Generate the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + parts := make([]string, 2) + for i, _ := range parts { + var jsonValue []byte + if i == 0 { + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err + } + } else { + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err + } + } + + parts[i] = EncodeSegment(jsonValue) + } + return strings.Join(parts, "."), nil +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return new(Parser).Parse(tokenString, keyFunc) +} + +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) +} + +// Encode JWT specific base64url encoding with padding stripped +func EncodeSegment(seg []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") +} + +// Decode JWT specific base64url encoding with padding stripped +func DecodeSegment(seg string) ([]byte, error) { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + + return base64.URLEncoding.DecodeString(seg) +} diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE new file mode 100644 index 00000000000..1b1b1921efa --- /dev/null +++ b/vendor/github.com/golang/protobuf/LICENSE @@ -0,0 +1,31 @@ +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile new file mode 100644 index 00000000000..e2e0651a934 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C testdata + protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto + make diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go new file mode 100644 index 00000000000..e392575b353 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/clone.go @@ -0,0 +1,229 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + + out := reflect.New(in.Type().Elem()) + // out is empty so a merge is a deep copy. + mergeStruct(out.Elem(), in.Elem()) + return out.Interface().(Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + // Explicit test prior to mergeStruct so that mistyped nils will fail + panic("proto: type mismatch") + } + if in.IsNil() { + // Merging nil into non-nil is a quiet no-op + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := extendable(in.Addr().Interface()); ok { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go new file mode 100644 index 00000000000..aa207298f99 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -0,0 +1,970 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// The fundamental decoders that interpret bytes on the wire. +// Those that take integer types all return uint64 and are +// therefore of type valueDecoder. + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// These are not ValueDecoders: they produce an array of bytes or a string. +// bytes, embedded messages + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +// If the protocol buffer has extensions, and the field matches, add it as an extension. +// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. +func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { + oi := o.index + + err := o.skip(t, tag, wire) + if err != nil { + return err + } + + if !unrecField.IsValid() { + return nil + } + + ptr := structPointer_Bytes(base, unrecField) + + // Add the skipped field to struct field + obuf := o.buf + + o.buf = *ptr + o.EncodeVarint(uint64(tag<<3 | wire)) + *ptr = append(o.buf, obuf[oi:o.index]...) + + o.buf = obuf + + return nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +func (o *Buffer) skip(t reflect.Type, tag, wire int) error { + + var u uint64 + var err error + + switch wire { + case WireVarint: + _, err = o.DecodeVarint() + case WireFixed64: + _, err = o.DecodeFixed64() + case WireBytes: + _, err = o.DecodeRawBytes(false) + case WireFixed32: + _, err = o.DecodeFixed32() + case WireStartGroup: + for { + u, err = o.DecodeVarint() + if err != nil { + break + } + fwire := int(u & 0x7) + if fwire == WireEndGroup { + break + } + ftag := int(u >> 3) + err = o.skip(t, ftag, fwire) + if err != nil { + break + } + } + default: + err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) + } + return err +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The method should reset the receiver before +// decoding starts. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + return UnmarshalMerge(buf, pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +func (p *Buffer) DecodeGroup(pb Message) error { + typ, base, err := getbase(pb) + if err != nil { + return err + } + return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + typ, base, err := getbase(pb) + if err != nil { + return err + } + + err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) + + if collectStats { + stats.Decode++ + } + + return err +} + +// unmarshalType does the work of unmarshaling a structure. +func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { + var state errorState + required, reqFields := prop.reqCount, uint64(0) + + var err error + for err == nil && o.index < len(o.buf) { + oi := o.index + var u uint64 + u, err = o.DecodeVarint() + if err != nil { + break + } + wire := int(u & 0x7) + if wire == WireEndGroup { + if is_group { + if required > 0 { + // Not enough information to determine the exact field. + // (See below.) + return &RequiredNotSetError{"{Unknown}"} + } + return nil // input is satisfied + } + return fmt.Errorf("proto: %s: wiretype end group for non-group", st) + } + tag := int(u >> 3) + if tag <= 0 { + return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) + } + fieldnum, ok := prop.decoderTags.get(tag) + if !ok { + // Maybe it's an extension? + if prop.extendable { + if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + extmap := e.extensionsWrite() + ext := extmap[int32(tag)] // may be missing + ext.enc = append(ext.enc, o.buf[oi:o.index]...) + extmap[int32(tag)] = ext + } + continue + } + } + // Maybe it's a oneof? + if prop.oneofUnmarshaler != nil { + m := structPointer_Interface(base, st).(Message) + // First return value indicates whether tag is a oneof field. + ok, err = prop.oneofUnmarshaler(m, tag, wire, o) + if err == ErrInternalBadWireType { + // Map the error to something more descriptive. + // Do the formatting here to save generated code space. + err = fmt.Errorf("bad wiretype for oneof field in %T", m) + } + if ok { + continue + } + } + err = o.skipAndSave(st, tag, wire, base, prop.unrecField) + continue + } + p := prop.Prop[fieldnum] + + if p.dec == nil { + fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) + continue + } + dec := p.dec + if wire != WireStartGroup && wire != p.WireType { + if wire == WireBytes && p.packedDec != nil { + // a packable field + dec = p.packedDec + } else { + err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) + continue + } + } + decErr := dec(o, p, base) + if decErr != nil && !state.shouldContinue(decErr, p) { + err = decErr + } + if err == nil && p.Required { + // Successfully decoded a required field. + if tag <= 64 { + // use bitmap for fields 1-64 to catch field reuse. + var mask uint64 = 1 << uint64(tag-1) + if reqFields&mask == 0 { + // new required field + reqFields |= mask + required-- + } + } else { + // This is imprecise. It can be fooled by a required field + // with a tag > 64 that is encoded twice; that's very rare. + // A fully correct implementation would require allocating + // a data structure, which we would like to avoid. + required-- + } + } + } + if err == nil { + if is_group { + return io.ErrUnexpectedEOF + } + if state.err != nil { + return state.err + } + if required > 0 { + // Not enough information to determine the exact field. If we use extra + // CPU, we could determine the field only if the missing required field + // has a tag <= 64 and we check reqFields. + return &RequiredNotSetError{"{Unknown}"} + } + } + return err +} + +// Individual type decoders +// For each, +// u is the decoded value, +// v is a pointer to the field (pointer) in the struct + +// Sizes of the pools to allocate inside the Buffer. +// The goal is modest amortization and allocation +// on at least 16-byte boundaries. +const ( + boolPoolSize = 16 + uint32PoolSize = 8 + uint64PoolSize = 4 +) + +// Decode a bool. +func (o *Buffer) dec_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + if len(o.bools) == 0 { + o.bools = make([]bool, boolPoolSize) + } + o.bools[0] = u != 0 + *structPointer_Bool(base, p.field) = &o.bools[0] + o.bools = o.bools[1:] + return nil +} + +func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + *structPointer_BoolVal(base, p.field) = u != 0 + return nil +} + +// Decode an int32. +func (o *Buffer) dec_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) + return nil +} + +func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) + return nil +} + +// Decode an int64. +func (o *Buffer) dec_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, u) + return nil +} + +func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, u) + return nil +} + +// Decode a string. +func (o *Buffer) dec_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_String(base, p.field) = &s + return nil +} + +func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_StringVal(base, p.field) = s + return nil +} + +// Decode a slice of bytes ([]byte). +func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + *structPointer_Bytes(base, p.field) = b + return nil +} + +// Decode a slice of bools ([]bool). +func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + v := structPointer_BoolSlice(base, p.field) + *v = append(*v, u != 0) + return nil +} + +// Decode a slice of bools ([]bool) in packed format. +func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { + v := structPointer_BoolSlice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded bools + fin := o.index + nb + if fin < o.index { + return errOverflow + } + + y := *v + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + y = append(y, u != 0) + } + + *v = y + return nil +} + +// Decode a slice of int32s ([]int32). +func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + structPointer_Word32Slice(base, p.field).Append(uint32(u)) + return nil +} + +// Decode a slice of int32s ([]int32) in packed format. +func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int32s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(uint32(u)) + } + return nil +} + +// Decode a slice of int64s ([]int64). +func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + + structPointer_Word64Slice(base, p.field).Append(u) + return nil +} + +// Decode a slice of int64s ([]int64) in packed format. +func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int64s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(u) + } + return nil +} + +// Decode a slice of strings ([]string). +func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + v := structPointer_StringSlice(base, p.field) + *v = append(*v, s) + return nil +} + +// Decode a slice of slice of bytes ([][]byte). +func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + v := structPointer_BytesSlice(base, p.field) + *v = append(*v, b) + return nil +} + +// Decode a map field. +func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + oi := o.index // index at the end of this map entry + o.index -= len(raw) // move buffer back to start of map entry + + mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V + if mptr.Elem().IsNil() { + mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) + } + v := mptr.Elem() // map[K]V + + // Prepare addressable doubly-indirect placeholders for the key and value types. + // See enc_new_map for why. + keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K + keybase := toStructPointer(keyptr.Addr()) // **K + + var valbase structPointer + var valptr reflect.Value + switch p.mtype.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valptr = reflect.ValueOf(&dummy) // *[]byte + valbase = toStructPointer(valptr) // *[]byte + case reflect.Ptr: + // message; valptr is **Msg; need to allocate the intermediate pointer + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valptr.Set(reflect.New(valptr.Type().Elem())) + valbase = toStructPointer(valptr) + default: + // everything else + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valbase = toStructPointer(valptr.Addr()) // **V + } + + // Decode. + // This parses a restricted wire format, namely the encoding of a message + // with two fields. See enc_new_map for the format. + for o.index < oi { + // tagcode for key and value properties are always a single byte + // because they have tags 1 and 2. + tagcode := o.buf[o.index] + o.index++ + switch tagcode { + case p.mkeyprop.tagcode[0]: + if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { + return err + } + case p.mvalprop.tagcode[0]: + if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { + return err + } + default: + // TODO: Should we silently skip this instead? + return fmt.Errorf("proto: bad map data tag %d", raw[0]) + } + } + keyelem, valelem := keyptr.Elem(), valptr.Elem() + if !keyelem.IsValid() { + keyelem = reflect.Zero(p.mtype.Key()) + } + if !valelem.IsValid() { + valelem = reflect.Zero(p.mtype.Elem()) + } + + v.SetMapIndex(keyelem, valelem) + return nil +} + +// Decode a group. +func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + return o.unmarshalType(p.stype, p.sprop, true, bas) +} + +// Decode an embedded message. +func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := structPointer_Interface(bas, p.stype) + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of embedded messages. +func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, false, base) +} + +// Decode a slice of embedded groups. +func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, true, base) +} + +// Decode a slice of structs ([]*struct). +func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { + v := reflect.New(p.stype) + bas := toStructPointer(v) + structPointer_StructPointerSlice(base, p.field).Append(bas) + + if is_group { + err := o.unmarshalType(p.stype, p.sprop, is_group, bas) + return err + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := v.Interface() + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, bas) + + o.buf = obuf + o.index = oi + + return err +} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go new file mode 100644 index 00000000000..8b84d1b22d4 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/encode.go @@ -0,0 +1,1362 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "fmt" + "reflect" + "sort" +) + +// RequiredNotSetError is the error returned if Marshal is called with +// a protocol buffer struct whose required fields have not +// all been initialized. It is also the error returned if Unmarshal is +// called with an encoded protocol buffer that does not include all the +// required fields. +// +// When printed, RequiredNotSetError reports the first unset required field in a +// message. If the field cannot be precisely determined, it is reported as +// "{Unknown}". +type RequiredNotSetError struct { + field string +} + +func (e *RequiredNotSetError) Error() string { + return fmt.Sprintf("proto: required field %q not set", e.field) +} + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// maxMarshalSize is the largest allowed size of an encoded protobuf, +// since C++ and Java use signed int32s for the size. +const maxMarshalSize = 1<<31 - 1 + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + return sizeVarint(x) +} + +func sizeVarint(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +func sizeFixed64(x uint64) int { + return 8 +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +func sizeFixed32(x uint64) int { + return 4 +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) +} + +func sizeZigzag64(x uint64) int { + return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +func sizeZigzag32(x uint64) int { + return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +func sizeRawBytes(b []byte) int { + return sizeVarint(uint64(len(b))) + + len(b) +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +func sizeStringBytes(s string) int { + return sizeVarint(uint64(len(s))) + + len(s) +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, returning the data. +func Marshal(pb Message) ([]byte, error) { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + return m.Marshal() + } + p := NewBuffer(nil) + err := p.Marshal(pb) + if p.buf == nil && err == nil { + // Return a non-nil slice on success. + return []byte{}, nil + } + return p.buf, err +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + var state errorState + err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) + } + return err +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, writing the result to the +// Buffer. +func (p *Buffer) Marshal(pb Message) error { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + data, err := m.Marshal() + p.buf = append(p.buf, data...) + return err + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + err = p.enc_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + (stats).Encode++ // Parens are to work around a goimports bug. + } + + if len(p.buf) > maxMarshalSize { + return ErrTooLarge + } + return err +} + +// Size returns the encoded size of a protocol buffer. +func Size(pb Message) (n int) { + // Can the object marshal itself? If so, Size is slow. + // TODO: add Size to Marshaler, or add a Sizer interface. + if m, ok := pb.(Marshaler); ok { + b, _ := m.Marshal() + return len(b) + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return 0 + } + if err == nil { + n = size_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + (stats).Size++ // Parens are to work around a goimports bug. + } + + return +} + +// Individual type encoders. + +// Encode a bool. +func (o *Buffer) enc_bool(p *Properties, base structPointer) error { + v := *structPointer_Bool(base, p.field) + if v == nil { + return ErrNil + } + x := 0 + if *v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + if !v { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, 1) + return nil +} + +func size_bool(p *Properties, base structPointer) int { + v := *structPointer_Bool(base, p.field) + if v == nil { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +func size_proto3_bool(p *Properties, base structPointer) int { + v := *structPointer_BoolVal(base, p.field) + if !v && !p.oneof { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode an int32. +func (o *Buffer) enc_int32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a uint32. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := word32_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := word32_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode an int64. +func (o *Buffer) enc_int64(p *Properties, base structPointer) error { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return ErrNil + } + x := word64_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return 0 + } + x := word64_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +func size_proto3_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a string. +func (o *Buffer) enc_string(p *Properties, base structPointer) error { + v := *structPointer_String(base, p.field) + if v == nil { + return ErrNil + } + x := *v + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(x) + return nil +} + +func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + if v == "" { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_string(p *Properties, base structPointer) (n int) { + v := *structPointer_String(base, p.field) + if v == nil { + return 0 + } + x := *v + n += len(p.tagcode) + n += sizeStringBytes(x) + return +} + +func size_proto3_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + if v == "" && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} + +// Encode a message struct. +func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return state.err + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +func size_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a group struct. +func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { + var state errorState + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return ErrNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + err := o.enc_struct(p.sprop, b) + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return state.err +} + +func size_struct_group(p *Properties, base structPointer) (n int) { + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return 0 + } + + n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) + n += size_struct(p.sprop, b) + n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return +} + +// Encode a slice of bools ([]bool). +func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + for _, x := range s { + o.buf = append(o.buf, p.tagcode...) + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_bool(p *Properties, base structPointer) int { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + return l * (len(p.tagcode) + 1) // each bool takes exactly one byte +} + +// Encode a slice of bools ([]bool) in packed format. +func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(l)) // each bool takes exactly one byte + for _, x := range s { + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_packed_bool(p *Properties, base structPointer) (n int) { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeVarint(uint64(l)) + n += l // each bool takes exactly one byte + return +} + +// Encode a slice of bytes ([]byte). +func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func size_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +// Encode a slice of int32s ([]int32). +func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of int32s ([]int32) in packed format. +func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(buf, uint64(x)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + bufSize += p.valSize(uint64(x)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of uint32s ([]uint32). +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := s.Index(i) + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := s.Index(i) + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of uint32s ([]uint32) in packed format. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, uint64(s.Index(i))) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(uint64(s.Index(i))) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of int64s ([]int64). +func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, s.Index(i)) + } + return nil +} + +func size_slice_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + n += p.valSize(s.Index(i)) + } + return +} + +// Encode a slice of int64s ([]int64) in packed format. +func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, s.Index(i)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(s.Index(i)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of slice of bytes ([][]byte). +func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(ss[i]) + } + return nil +} + +func size_slice_slice_byte(p *Properties, base structPointer) (n int) { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return 0 + } + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeRawBytes(ss[i]) + } + return +} + +// Encode a slice of strings ([]string). +func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(ss[i]) + } + return nil +} + +func size_slice_string(p *Properties, base structPointer) (n int) { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeStringBytes(ss[i]) + } + return +} + +// Encode a slice of message structs ([]*struct). +func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + } + return state.err +} + +func size_slice_struct_message(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +// Encode a slice of group structs ([]*struct). +func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return errRepeatedHasNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + + err := o.enc_struct(p.sprop, b) + + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + } + return state.err +} + +func size_slice_struct_group(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) + n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return // return size up to this point + } + + n += size_struct(p.sprop, b) + } + return +} + +// Encode an extension map. +func (o *Buffer) enc_map(p *Properties, base structPointer) error { + exts := structPointer_ExtMap(base, p.field) + if err := encodeExtensionsMap(*exts); err != nil { + return err + } + + return o.enc_map_body(*exts) +} + +func (o *Buffer) enc_exts(p *Properties, base structPointer) error { + exts := structPointer_Extensions(base, p.field) + + v, mu := exts.extensionsRead() + if v == nil { + return nil + } + + mu.Lock() + defer mu.Unlock() + if err := encodeExtensionsMap(v); err != nil { + return err + } + + return o.enc_map_body(v) +} + +func (o *Buffer) enc_map_body(v map[int32]Extension) error { + // Fast-path for common cases: zero or one extensions. + if len(v) <= 1 { + for _, e := range v { + o.buf = append(o.buf, e.enc...) + } + return nil + } + + // Sort keys to provide a deterministic encoding. + keys := make([]int, 0, len(v)) + for k := range v { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + o.buf = append(o.buf, v[int32(k)].enc...) + } + return nil +} + +func size_map(p *Properties, base structPointer) int { + v := structPointer_ExtMap(base, p.field) + return extensionsMapSize(*v) +} + +func size_exts(p *Properties, base structPointer) int { + v := structPointer_Extensions(base, p.field) + return extensionsSize(v) +} + +// Encode a map field. +func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { + var state errorState // XXX: or do we need to plumb this through? + + /* + A map defined as + map map_field = N; + is encoded in the same way as + message MapFieldEntry { + key_type key = 1; + value_type value = 2; + } + repeated MapFieldEntry map_field = N; + */ + + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + if v.Len() == 0 { + return nil + } + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + enc := func() error { + if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { + return err + } + if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { + return err + } + return nil + } + + // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + + keycopy.Set(key) + valcopy.Set(val) + + o.buf = append(o.buf, p.tagcode...) + if err := o.enc_len_thing(enc, &state); err != nil { + return err + } + } + return nil +} + +func size_new_map(p *Properties, base structPointer) int { + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + n := 0 + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + keycopy.Set(key) + valcopy.Set(val) + + // Tag codes for key and val are the responsibility of the sub-sizer. + keysize := p.mkeyprop.size(p.mkeyprop, keybase) + valsize := p.mvalprop.size(p.mvalprop, valbase) + entry := keysize + valsize + // Add on tag code and length of map entry itself. + n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry + } + return n +} + +// mapEncodeScratch returns a new reflect.Value matching the map's value type, +// and a structPointer suitable for passing to an encoder or sizer. +func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { + // Prepare addressable doubly-indirect placeholders for the key and value types. + // This is needed because the element-type encoders expect **T, but the map iteration produces T. + + keycopy = reflect.New(mapType.Key()).Elem() // addressable K + keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K + keyptr.Set(keycopy.Addr()) // + keybase = toStructPointer(keyptr.Addr()) // **K + + // Value types are more varied and require special handling. + switch mapType.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte + valbase = toStructPointer(valcopy.Addr()) + case reflect.Ptr: + // message; the generated field type is map[K]*Msg (so V is *Msg), + // so we only need one level of indirection. + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valbase = toStructPointer(valcopy.Addr()) + default: + // everything else + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V + valptr.Set(valcopy.Addr()) // + valbase = toStructPointer(valptr.Addr()) // **V + } + return +} + +// Encode a struct. +func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { + var state errorState + // Encode fields in tag order so that decoders may use optimizations + // that depend on the ordering. + // https://developers.google.com/protocol-buffers/docs/encoding#order + for _, i := range prop.order { + p := prop.Prop[i] + if p.enc != nil { + err := p.enc(o, p, base) + if err != nil { + if err == ErrNil { + if p.Required && state.err == nil { + state.err = &RequiredNotSetError{p.Name} + } + } else if err == errRepeatedHasNil { + // Give more context to nil values in repeated fields. + return errors.New("repeated field " + p.OrigName + " has nil element") + } else if !state.shouldContinue(err, p) { + return err + } + } + if len(o.buf) > maxMarshalSize { + return ErrTooLarge + } + } + } + + // Do oneof fields. + if prop.oneofMarshaler != nil { + m := structPointer_Interface(base, prop.stype).(Message) + if err := prop.oneofMarshaler(m, o); err == ErrNil { + return errOneofHasNil + } else if err != nil { + return err + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + if len(o.buf)+len(v) > maxMarshalSize { + return ErrTooLarge + } + if len(v) > 0 { + o.buf = append(o.buf, v...) + } + } + + return state.err +} + +func size_struct(prop *StructProperties, base structPointer) (n int) { + for _, i := range prop.order { + p := prop.Prop[i] + if p.size != nil { + n += p.size(p, base) + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + n += len(v) + } + + // Factor in any oneof fields. + if prop.oneofSizer != nil { + m := structPointer_Interface(base, prop.stype).(Message) + n += prop.oneofSizer(m) + } + + return +} + +var zeroes [20]byte // longer than any conceivable sizeVarint + +// Encode a struct, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { + return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) +} + +// Encode something, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { + iLen := len(o.buf) + o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length + iMsg := len(o.buf) + err := enc() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + lMsg := len(o.buf) - iMsg + lLen := sizeVarint(uint64(lMsg)) + switch x := lLen - (iMsg - iLen); { + case x > 0: // actual length is x bytes larger than the space we reserved + // Move msg x bytes right. + o.buf = append(o.buf, zeroes[:x]...) + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + case x < 0: // actual length is x bytes smaller than the space we reserved + // Move msg x bytes left. + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + o.buf = o.buf[:len(o.buf)+x] // x is negative + } + // Encode the length in the reserved space. + o.buf = o.buf[:iLen] + o.EncodeVarint(uint64(lMsg)) + o.buf = o.buf[:len(o.buf)+lMsg] + return state.err +} + +// errorState maintains the first error that occurs and updates that error +// with additional context. +type errorState struct { + err error +} + +// shouldContinue reports whether encoding should continue upon encountering the +// given error. If the error is RequiredNotSetError, shouldContinue returns true +// and, if this is the first appearance of that error, remembers it for future +// reporting. +// +// If prop is not nil, it may update any error with additional context about the +// field with the error. +func (s *errorState) shouldContinue(err error, prop *Properties) bool { + // Ignore unset required fields. + reqNotSet, ok := err.(*RequiredNotSetError) + if !ok { + return false + } + if s.err == nil { + if prop != nil { + err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} + } + s.err = err + } + return true +} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go new file mode 100644 index 00000000000..2ed1cf59666 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + b1, ok := f1.Interface().(raw) + if ok { + b2 := f2.Interface().(raw) + // RawMessage + if !bytes.Equal(b1.Bytes(), b2.Bytes()) { + return false + } + continue + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + if !bytes.Equal(u1, u2) { + return false + } + + return true +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + continue + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go new file mode 100644 index 00000000000..eaad2183126 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -0,0 +1,587 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, bool) { + if ep, ok := p.(extendableProto); ok { + return ep, ok + } + if ep, ok := p.(extendableProtoV1); ok { + return extensionAdapter{ep}, ok + } + return nil, false +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() +var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + epb, ok := extendable(base) + if !ok { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// encode encodes any unmarshaled (unencoded) extensions in e. +func encodeExtensions(e *XXX_InternalExtensions) error { + m, mu := e.extensionsRead() + if m == nil { + return nil // fast path + } + mu.Lock() + defer mu.Unlock() + return encodeExtensionsMap(m) +} + +// encode encodes any unmarshaled (unencoded) extensions in e. +func encodeExtensionsMap(m map[int32]Extension) error { + for k, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + m[k] = e + } + return nil +} + +func extensionsSize(e *XXX_InternalExtensions) (n int) { + m, mu := e.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + defer mu.Unlock() + return extensionsMapSize(m) +} + +func extensionsMapSize(m map[int32]Extension) (n int) { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + n += props.size(props, toStructPointer(x)) + } + return +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + // TODO: Check types, field numbers, etc.? + epb, ok := extendable(pb) + if !ok { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok = extmap[extension.Field] + mu.Unlock() + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + epb, ok := extendable(pb) + if !ok { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, extension.Field) +} + +// GetExtension parses and returns the given extension of pb. +// If the extension is not present and has no default value it returns ErrMissingExtension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + epb, ok := extendable(pb) + if !ok { + return nil, errors.New("proto: not an extendable proto") + } + + if err := checkExtensionTypes(epb, extension); err != nil { + return nil, err + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + o := NewBuffer(b) + + t := reflect.TypeOf(extension.ExtensionType) + + props := extensionProperties(extension) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate a "field" to store the pointer/slice itself; the + // pointer/slice will be stored here. We pass + // the address of this field to props.dec. + // This passes a zero field and a *t and lets props.dec + // interpret it as a *struct{ x t }. + value := reflect.New(t).Elem() + + for { + // Discard wire type and field number varint. It isn't needed. + if _, err := o.DecodeVarint(); err != nil { + return nil, err + } + + if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + return nil, err + } + + if o.index >= len(o.buf) { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, ok := extendable(pb) + if !ok { + return nil, errors.New("proto: not an extendable proto") + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, ok := extendable(pb) + if !ok { + return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + epb, ok := extendable(pb) + if !ok { + return errors.New("proto: not an extendable proto") + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + epb, ok := extendable(pb) + if !ok { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go new file mode 100644 index 00000000000..1c225504a01 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -0,0 +1,897 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/golang/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + // pools of basic types to amortize allocation. + bools []bool + uint32s []uint32 + uint64s []uint64 + + // extra pools, only used with pointer_reflect.go + int32s []int32 + int64s []int64 + float32s []float32 + float64s []float64 +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + index := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = index +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. + +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{ + vs: vs, + // default Less function: textual comparison + less: func(a, b reflect.Value) bool { + return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) + }, + } + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; + // numeric keys are sorted numerically. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +// ProtoPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion2 = true + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion1 = true diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go new file mode 100644 index 00000000000..fd982decd66 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -0,0 +1,311 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + if ms.find(pb) != nil { + return true + } + return false +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + if err := encodeExtensions(exts); err != nil { + return nil, err + } + m, _ = exts.extensionsRead() + case map[int32]Extension: + if err := encodeExtensionsMap(exts); err != nil { + return nil, err + } + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + + // Sort extension IDs to provide a deterministic encoding. + // See also enc_map in encode.go. + ids := make([]int, 0, len(m)) + for id := range m { + ids = append(ids, int(id)) + } + sort.Ints(ids) + + ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} + for _, id := range ids { + e := m[int32(id)] + // Remove the wire type and field number varint, as well as the length varint. + msg := skipVarint(skipVarint(e.enc)) + + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: Int32(int32(id)), + Message: msg, + }) + } + return Marshal(ms) +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m, _ = exts.extensionsRead() + case map[int32]Extension: + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + if i > 0 { + b.WriteByte(',') + } + + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go new file mode 100644 index 00000000000..fb512e2e16d --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -0,0 +1,484 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "math" + "reflect" +) + +// A structPointer is a pointer to a struct. +type structPointer struct { + v reflect.Value +} + +// toStructPointer returns a structPointer equivalent to the given reflect value. +// The reflect value must itself be a pointer to a struct. +func toStructPointer(v reflect.Value) structPointer { + return structPointer{v} +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p.v.IsNil() +} + +// Interface returns the struct pointer as an interface value. +func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { + return p.v.Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// field returns the given field in the struct as a reflect value. +func structPointer_field(p structPointer, f field) reflect.Value { + // Special case: an extension map entry with a value of type T + // passes a *T to the struct-handling code with a zero field, + // expecting that it will be treated as equivalent to *struct{ X T }, + // which has the same memory layout. We have to handle that case + // specially, because reflect will panic if we call FieldByIndex on a + // non-struct. + if f == nil { + return p.v.Elem() + } + + return p.v.Elem().FieldByIndex(f) +} + +// ifield returns the given field in the struct as an interface value. +func structPointer_ifield(p structPointer, f field) interface{} { + return structPointer_field(p, f).Addr().Interface() +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return structPointer_ifield(p, f).(*[]byte) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return structPointer_ifield(p, f).(*[][]byte) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return structPointer_ifield(p, f).(**bool) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return structPointer_ifield(p, f).(*bool) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return structPointer_ifield(p, f).(*[]bool) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return structPointer_ifield(p, f).(**string) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return structPointer_ifield(p, f).(*string) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return structPointer_ifield(p, f).(*[]string) +} + +// Extensions returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return structPointer_ifield(p, f).(*XXX_InternalExtensions) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return structPointer_ifield(p, f).(*map[int32]Extension) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return structPointer_field(p, f).Addr() +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + structPointer_field(p, f).Set(q.v) +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return structPointer{structPointer_field(p, f)} +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { + return structPointerSlice{structPointer_field(p, f)} +} + +// A structPointerSlice represents the address of a slice of pointers to structs +// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. +type structPointerSlice struct { + v reflect.Value +} + +func (p structPointerSlice) Len() int { return p.v.Len() } +func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } +func (p structPointerSlice) Append(q structPointer) { + p.v.Set(reflect.Append(p.v, q.v)) +} + +var ( + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + float32Type = reflect.TypeOf(float32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) +) + +// A word32 represents a field of type *int32, *uint32, *float32, or *enum. +// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. +type word32 struct { + v reflect.Value +} + +// IsNil reports whether p is nil. +func word32_IsNil(p word32) bool { + return p.v.IsNil() +} + +// Set sets p to point at a newly allocated word with bits set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + t := p.v.Type().Elem() + switch t { + case int32Type: + if len(o.int32s) == 0 { + o.int32s = make([]int32, uint32PoolSize) + } + o.int32s[0] = int32(x) + p.v.Set(reflect.ValueOf(&o.int32s[0])) + o.int32s = o.int32s[1:] + return + case uint32Type: + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + p.v.Set(reflect.ValueOf(&o.uint32s[0])) + o.uint32s = o.uint32s[1:] + return + case float32Type: + if len(o.float32s) == 0 { + o.float32s = make([]float32, uint32PoolSize) + } + o.float32s[0] = math.Float32frombits(x) + p.v.Set(reflect.ValueOf(&o.float32s[0])) + o.float32s = o.float32s[1:] + return + } + + // must be enum + p.v.Set(reflect.New(t)) + p.v.Elem().SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32_Get(p word32) uint32 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32{structPointer_field(p, f)} +} + +// A word32Val represents a field of type int32, uint32, float32, or enum. +// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. +type word32Val struct { + v reflect.Value +} + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + switch p.v.Type() { + case int32Type: + p.v.SetInt(int64(x)) + return + case uint32Type: + p.v.SetUint(uint64(x)) + return + case float32Type: + p.v.SetFloat(float64(math.Float32frombits(x))) + return + } + + // must be enum + p.v.SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32Val_Get(p word32Val) uint32 { + elem := p.v + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val{structPointer_field(p, f)} +} + +// A word32Slice is a slice of 32-bit values. +// That is, v.Type() is []int32, []uint32, []float32, or []enum. +type word32Slice struct { + v reflect.Value +} + +func (p word32Slice) Append(x uint32) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int32: + elem.SetInt(int64(int32(x))) + case reflect.Uint32: + elem.SetUint(uint64(x)) + case reflect.Float32: + elem.SetFloat(float64(math.Float32frombits(x))) + } +} + +func (p word32Slice) Len() int { + return p.v.Len() +} + +func (p word32Slice) Index(i int) uint32 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) word32Slice { + return word32Slice{structPointer_field(p, f)} +} + +// word64 is like word32 but for 64-bit values. +type word64 struct { + v reflect.Value +} + +func word64_Set(p word64, o *Buffer, x uint64) { + t := p.v.Type().Elem() + switch t { + case int64Type: + if len(o.int64s) == 0 { + o.int64s = make([]int64, uint64PoolSize) + } + o.int64s[0] = int64(x) + p.v.Set(reflect.ValueOf(&o.int64s[0])) + o.int64s = o.int64s[1:] + return + case uint64Type: + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + p.v.Set(reflect.ValueOf(&o.uint64s[0])) + o.uint64s = o.uint64s[1:] + return + case float64Type: + if len(o.float64s) == 0 { + o.float64s = make([]float64, uint64PoolSize) + } + o.float64s[0] = math.Float64frombits(x) + p.v.Set(reflect.ValueOf(&o.float64s[0])) + o.float64s = o.float64s[1:] + return + } + panic("unreachable") +} + +func word64_IsNil(p word64) bool { + return p.v.IsNil() +} + +func word64_Get(p word64) uint64 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64{structPointer_field(p, f)} +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val struct { + v reflect.Value +} + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + switch p.v.Type() { + case int64Type: + p.v.SetInt(int64(x)) + return + case uint64Type: + p.v.SetUint(x) + return + case float64Type: + p.v.SetFloat(math.Float64frombits(x)) + return + } + panic("unreachable") +} + +func word64Val_Get(p word64Val) uint64 { + elem := p.v + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val{structPointer_field(p, f)} +} + +type word64Slice struct { + v reflect.Value +} + +func (p word64Slice) Append(x uint64) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int64: + elem.SetInt(int64(int64(x))) + case reflect.Uint64: + elem.SetUint(uint64(x)) + case reflect.Float64: + elem.SetFloat(float64(math.Float64frombits(x))) + } +} + +func (p word64Slice) Len() int { + return p.v.Len() +} + +func (p word64Slice) Index(i int) uint64 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return uint64(elem.Uint()) + case reflect.Float64: + return math.Float64bits(float64(elem.Float())) + } + panic("unreachable") +} + +func structPointer_Word64Slice(p structPointer, f field) word64Slice { + return word64Slice{structPointer_field(p, f)} +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go new file mode 100644 index 00000000000..6b5567d47cd --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,270 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +// NOTE: These type_Foo functions would more idiomatically be methods, +// but Go does not allow methods on pointer types, and we must preserve +// some pointer type for the garbage collector. We use these +// funcs with clunky names as our poor approximation to methods. +// +// An alternative would be +// type structPointer struct { p unsafe.Pointer } +// but that does not registerize as well. + +// A structPointer is a pointer to a struct. +type structPointer unsafe.Pointer + +// toStructPointer returns a structPointer equivalent to the given reflect value. +func toStructPointer(v reflect.Value) structPointer { + return structPointer(unsafe.Pointer(v.Pointer())) +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p == nil +} + +// Interface returns the struct pointer, assumed to have element type t, +// as an interface value. +func structPointer_Interface(p structPointer, t reflect.Type) interface{} { + return reflect.NewAt(t, unsafe.Pointer(p)).Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != ^field(0) +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { + return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). +type structPointerSlice []structPointer + +func (v *structPointerSlice) Len() int { return len(*v) } +func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } +func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } + +// A word32 is the address of a "pointer to 32-bit value" field. +type word32 **uint32 + +// IsNil reports whether *v is nil. +func word32_IsNil(p word32) bool { + return *p == nil +} + +// Set sets *v to point at a newly allocated word set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + *p = &o.uint32s[0] + o.uint32s = o.uint32s[1:] +} + +// Get gets the value pointed at by *v. +func word32_Get(p word32) uint32 { + return **p +} + +// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Val is the address of a 32-bit value field. +type word32Val *uint32 + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + *p = x +} + +// Get gets the value pointed at by p. +func word32Val_Get(p word32Val) uint32 { + return *p +} + +// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Slice is a slice of 32-bit values. +type word32Slice []uint32 + +func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } +func (v *word32Slice) Len() int { return len(*v) } +func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } + +// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) *word32Slice { + return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// word64 is like word32 but for 64-bit values. +type word64 **uint64 + +func word64_Set(p word64, o *Buffer, x uint64) { + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + *p = &o.uint64s[0] + o.uint64s = o.uint64s[1:] +} + +func word64_IsNil(p word64) bool { + return *p == nil +} + +func word64_Get(p word64) uint64 { + return **p +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val *uint64 + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + *p = x +} + +func word64Val_Get(p word64Val) uint64 { + return *p +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Slice is like word32Slice but for 64-bit values. +type word64Slice []uint64 + +func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } +func (v *word64Slice) Len() int { return len(*v) } +func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } + +func structPointer_Word64Slice(p structPointer, f field) *word64Slice { + return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go new file mode 100644 index 00000000000..ec2289c0058 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -0,0 +1,872 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +const startSize = 10 // initial slice/string sizes + +// Encoders are defined in encode.go +// An encoder outputs the full representation of a field, including its +// tag and encoder type. +type encoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueEncoder encodes a single integer in a particular encoding. +type valueEncoder func(o *Buffer, x uint64) error + +// Sizers are defined in encode.go +// A sizer returns the encoded size of a field, including its tag and encoder +// type. +type sizer func(prop *Properties, base structPointer) int + +// A valueSizer returns the encoded size of a single integer in a particular +// encoding. +type valueSizer func(x uint64) int + +// Decoders are defined in decode.go +// A decoder creates a value from its wire representation. +// Unrecognized subelements are saved in unrec. +type decoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueDecoder decodes a single integer in a particular encoding. +type valueDecoder func(o *Buffer) (x uint64, err error) + +// A oneofMarshaler does the marshaling for all oneof fields in a message. +type oneofMarshaler func(Message, *Buffer) error + +// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. +type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) + +// A oneofSizer does the sizing for all oneof fields in a message. +type oneofSizer func(Message) int + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + unrecField field // field id of the XXX_unrecognized []byte field + extendable bool // is this an extendable proto + + oneofMarshaler oneofMarshaler + oneofUnmarshaler oneofUnmarshaler + oneofSizer oneofSizer + stype reflect.Type + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field; set for []byte only + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + def_uint64 uint64 + + enc encoder + valEnc valueEncoder // set for bool and numeric types only + field field + tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) + tagbuf [8]byte + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only + isMarshaler bool + isUnmarshaler bool + + mtype reflect.Type // set for map types only + mkeyprop *Properties // set for map types only + mvalprop *Properties // set for map types only + + size sizer + valSize valueSizer // set for bool and numeric types only + + dec decoder + valDec valueDecoder // set for bool and numeric types only + + // If this is a packable field, this will be the decoder for the packed version of the field. + packedDec decoder +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s = "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeVarint + p.valDec = (*Buffer).DecodeVarint + p.valSize = sizeVarint + case "fixed32": + p.WireType = WireFixed32 + p.valEnc = (*Buffer).EncodeFixed32 + p.valDec = (*Buffer).DecodeFixed32 + p.valSize = sizeFixed32 + case "fixed64": + p.WireType = WireFixed64 + p.valEnc = (*Buffer).EncodeFixed64 + p.valDec = (*Buffer).DecodeFixed64 + p.valSize = sizeFixed64 + case "zigzag32": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag32 + p.valDec = (*Buffer).DecodeZigzag32 + p.valSize = sizeZigzag32 + case "zigzag64": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag64 + p.valDec = (*Buffer).DecodeZigzag64 + p.valSize = sizeZigzag64 + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break + } + } + } +} + +func logNoSliceEnc(t1, t2 reflect.Type) { + fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// Initialize the fields for encoding and decoding. +func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + p.enc = nil + p.dec = nil + p.size = nil + + switch t1 := typ; t1.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) + + // proto3 scalar types + + case reflect.Bool: + p.enc = (*Buffer).enc_proto3_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_proto3_bool + case reflect.Int32: + p.enc = (*Buffer).enc_proto3_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_proto3_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_proto3_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_proto3_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.String: + p.enc = (*Buffer).enc_proto3_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_proto3_string + + case reflect.Ptr: + switch t2 := t1.Elem(); t2.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) + break + case reflect.Bool: + p.enc = (*Buffer).enc_bool + p.dec = (*Buffer).dec_bool + p.size = size_bool + case reflect.Int32: + p.enc = (*Buffer).enc_int32 + p.dec = (*Buffer).dec_int32 + p.size = size_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_uint32 + p.dec = (*Buffer).dec_int32 // can reuse + p.size = size_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_int64 + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_int32 + p.size = size_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_int64 // can just treat them as bits + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.String: + p.enc = (*Buffer).enc_string + p.dec = (*Buffer).dec_string + p.size = size_string + case reflect.Struct: + p.stype = t1.Elem() + p.isMarshaler = isMarshaler(t1) + p.isUnmarshaler = isUnmarshaler(t1) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_struct_message + p.dec = (*Buffer).dec_struct_message + p.size = size_struct_message + } else { + p.enc = (*Buffer).enc_struct_group + p.dec = (*Buffer).dec_struct_group + p.size = size_struct_group + } + } + + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + default: + logNoSliceEnc(t1, t2) + break + case reflect.Bool: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_bool + p.size = size_slice_packed_bool + } else { + p.enc = (*Buffer).enc_slice_bool + p.size = size_slice_bool + } + p.dec = (*Buffer).dec_slice_bool + p.packedDec = (*Buffer).dec_slice_packed_bool + case reflect.Int32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int32 + p.size = size_slice_packed_int32 + } else { + p.enc = (*Buffer).enc_slice_int32 + p.size = size_slice_int32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Uint32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Int64, reflect.Uint64: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + case reflect.Uint8: + p.dec = (*Buffer).dec_slice_byte + if p.proto3 { + p.enc = (*Buffer).enc_proto3_slice_byte + p.size = size_proto3_slice_byte + } else { + p.enc = (*Buffer).enc_slice_byte + p.size = size_slice_byte + } + case reflect.Float32, reflect.Float64: + switch t2.Bits() { + case 32: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case 64: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + default: + logNoSliceEnc(t1, t2) + break + } + case reflect.String: + p.enc = (*Buffer).enc_slice_string + p.dec = (*Buffer).dec_slice_string + p.size = size_slice_string + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) + break + case reflect.Struct: + p.stype = t2.Elem() + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_slice_struct_message + p.dec = (*Buffer).dec_slice_struct_message + p.size = size_slice_struct_message + } else { + p.enc = (*Buffer).enc_slice_struct_group + p.dec = (*Buffer).dec_slice_struct_group + p.size = size_slice_struct_group + } + } + case reflect.Slice: + switch t2.Elem().Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) + break + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_slice_byte + p.dec = (*Buffer).dec_slice_slice_byte + p.size = size_slice_slice_byte + } + } + + case reflect.Map: + p.enc = (*Buffer).enc_new_map + p.dec = (*Buffer).dec_new_map + p.size = size_new_map + + p.mtype = t1 + p.mkeyprop = &Properties{} + p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.mvalprop = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + + // precalculate tag code + wire := p.WireType + if p.Packed { + wire = WireBytes + } + x := uint32(p.Tag)<<3 | uint32(wire) + i := 0 + for i = 0; x > 127; i++ { + p.tagbuf[i] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + p.tagbuf[i] = uint8(x) + p.tagcode = p.tagbuf[0 : i+1] + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +// isMarshaler reports whether type t implements Marshaler. +func isMarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isMarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isMarshaler") + } + return t.Implements(marshalerType) +} + +// isUnmarshaler reports whether type t implements Unmarshaler. +func isUnmarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isUnmarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isUnmarshaler") + } + return t.Implements(unmarshalerType) +} + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if f != nil { + p.field = toField(f) + } + if tag == "" { + return + } + p.Parse(tag) + p.setEncAndDec(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || + reflect.PtrTo(t).Implements(extendableProtoV1Type) + prop.unrecField = invalidField + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + if f.Name == "XXX_InternalExtensions" { // special case + p.enc = (*Buffer).enc_exts + p.dec = nil // not needed + p.size = size_exts + } else if f.Name == "XXX_extensions" { // special case + p.enc = (*Buffer).enc_map + p.dec = nil // not needed + p.size = size_map + } else if f.Name == "XXX_unrecognized" { // special case + prop.unrecField = toField(&f) + } + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { + fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + var oots []interface{} + prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() + prop.stype = t + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// Return the Properties object for the x[0]'th field of the structure. +func propByIndex(t reflect.Type, x []int) *Properties { + if len(x) != 1 { + fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) + return nil + } + prop := GetProperties(t) + return prop.Prop[x[0]] +} + +// Get the address and type of a pointer to a struct from an interface. +func getbase(pb Message) (t reflect.Type, b structPointer, err error) { + if pb == nil { + err = ErrNil + return + } + // get the reflect type of the pointer to the struct. + t = reflect.TypeOf(pb) + // get the address of the struct. + value := reflect.ValueOf(pb) + b = toStructPointer(value) + return +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypes = make(map[string]reflect.Type) + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypes[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +func MessageType(name string) reflect.Type { return protoTypes[name] } + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go new file mode 100644 index 00000000000..965876bf033 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -0,0 +1,854 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + gtNewline = []byte(">\n") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +// raw is the interface satisfied by RawMessage. +type raw interface { + Bytes() []byte +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.mkeyprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.mvalprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if b, ok := fv.Interface().(raw); ok { + if err := writeRaw(w, b.Bytes()); err != nil { + return err + } + continue + } + + // Enums have a String method, so writeAny will work fine. + if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv.Addr() + if _, ok := extendable(pv.Interface()); ok { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeRaw writes an uninterpreted raw message. +func writeRaw(w *textWriter, b []byte) error { + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if err := writeUnknownStruct(w, b); err != nil { + return err + } + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + return nil +} + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else if err := tm.writeStruct(w, v); err != nil { + return err + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, err := fmt.Fprintf(w, "/* %v */\n", err) + return err + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, err := w.Write(endBraceNewline); err != nil { + return err + } + continue + } + if _, err := fmt.Fprint(w, tag); err != nil { + return err + } + if wire != WireStartGroup { + if err := w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err := w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err = w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + ep, _ := extendable(pv.Interface()) + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + m, mu := ep.extensionsRead() + if m == nil { + return nil + } + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(ep, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go new file mode 100644 index 00000000000..5e14513f28c --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -0,0 +1,895 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") + errBadHex = errors.New("proto: bad hexadecimal") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + base := 8 + ss := s[:2] + s = s[2:] + if r == 'x' || r == 'X' { + base = 16 + } else { + ss = string(r) + ss + } + i, err := strconv.ParseUint(ss, base, 8) + if err != nil { + return "", "", err + } + return string([]byte{byte(i)}), s, nil + case 'u', 'U': + n := 4 + if r == 'U' { + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) + } + + bs := make([]byte, n/2) + for i := 0; i < n; i += 2 { + a, ok1 := unhex(s[i]) + b, ok2 := unhex(s[i+1]) + if !ok1 || !ok2 { + return "", "", errBadHex + } + bs[i/2] = a<<4 | b + } + s = s[n:] + return string(bs), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Adapted from src/pkg/strconv/quote.go. +func unhex(b byte) (v byte, ok bool) { + switch { + case '0' <= b && b <= '9': + return b - '0', true + case 'a' <= b && b <= 'f': + return b - 'a' + 10, true + case 'A' <= b && b <= 'F': + return b - 'A' + 10, true + } + return 0, false +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + err := um.UnmarshalText([]byte(s)) + return err + } + pb.Reset() + v := reflect.ValueOf(pb) + if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { + return pe + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go new file mode 100644 index 00000000000..b2af97f4a98 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -0,0 +1,139 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *any.Any) (string, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*any.Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func Empty(any *any.Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *any.Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = Empty(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *any.Any, pb proto.Message) bool { + aname, err := AnyMessageName(any) + if err != nil { + return false + } + + return aname == proto.MessageName(pb) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go new file mode 100644 index 00000000000..f34601723de --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -0,0 +1,178 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/any.proto + +/* +Package any is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/any.proto + +It has these top-level messages: + Any +*/ +package any + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Any) XXX_WellKnownType() string { return "Any" } + +func (m *Any) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Any) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 185 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, + 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, + 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, + 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, + 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, + 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, + 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, + 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, + 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, + 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto new file mode 100644 index 00000000000..c7486676231 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -0,0 +1,149 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/any"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go new file mode 100644 index 00000000000..c0d595da7ab --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package ptypes contains code for interacting with well-known types. +*/ +package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go new file mode 100644 index 00000000000..65cb0f8eb5f --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -0,0 +1,102 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" + + durpb "github.com/golang/protobuf/ptypes/duration" +) + +const ( + // Range of a durpb.Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the durpb.Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid durpb.Duration +// may still be too large to fit into a time.Duration (the range of durpb.Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *durpb.Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) + } + return nil +} + +// Duration converts a durpb.Duration to a time.Duration. Duration +// returns an error if the durpb.Duration is invalid or is too large to be +// represented in a time.Duration. +func Duration(p *durpb.Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a durpb.Duration. +func DurationProto(d time.Duration) *durpb.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &durpb.Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go new file mode 100644 index 00000000000..b2410a098eb --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -0,0 +1,144 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/duration.proto + +/* +Package duration is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/duration.proto + +It has these top-level messages: + Duration +*/ +package duration + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Duration) XXX_WellKnownType() string { return "Duration" } + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 190 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, + 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, + 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, + 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, + 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, + 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, + 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, + 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, + 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, + 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, + 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto new file mode 100644 index 00000000000..975fce41aae --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto @@ -0,0 +1,117 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/duration"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +message Duration { + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/regen.sh b/vendor/github.com/golang/protobuf/ptypes/regen.sh new file mode 100755 index 00000000000..b50a9414ac2 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/regen.sh @@ -0,0 +1,43 @@ +#!/bin/bash -e +# +# This script fetches and rebuilds the "well-known types" protocol buffers. +# To run this you will need protoc and goprotobuf installed; +# see https://github.com/golang/protobuf for instructions. +# You also need Go and Git installed. + +PKG=github.com/golang/protobuf/ptypes +UPSTREAM=https://github.com/google/protobuf +UPSTREAM_SUBDIR=src/google/protobuf +PROTO_FILES=(any duration empty struct timestamp wrappers) + +function die() { + echo 1>&2 $* + exit 1 +} + +# Sanity check that the right tools are accessible. +for tool in go git protoc protoc-gen-go; do + q=$(which $tool) || die "didn't find $tool" + echo 1>&2 "$tool: $q" +done + +tmpdir=$(mktemp -d -t regen-wkt.XXXXXX) +trap 'rm -rf $tmpdir' EXIT + +echo -n 1>&2 "finding package dir... " +pkgdir=$(go list -f '{{.Dir}}' $PKG) +echo 1>&2 $pkgdir +base=$(echo $pkgdir | sed "s,/$PKG\$,,") +echo 1>&2 "base: $base" +cd "$base" + +echo 1>&2 "fetching latest protos... " +git clone -q $UPSTREAM $tmpdir + +for file in ${PROTO_FILES[@]}; do + echo 1>&2 "* $file" + protoc --go_out=. -I$tmpdir/src $tmpdir/src/google/protobuf/$file.proto || die + cp $tmpdir/src/google/protobuf/$file.proto $PKG/$file +done + +echo 1>&2 "All OK" diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go new file mode 100644 index 00000000000..47f10dbc2cc --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -0,0 +1,134 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" + + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *tspb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func Timestamp(ts *tspb.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampNow returns a google.protobuf.Timestamp for the current time. +func TimestampNow() *tspb.Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*tspb.Timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := &tspb.Timestamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *tspb.Timestamp) string { + t, err := Timestamp(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 00000000000..e23e4a25daf --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,160 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/timestamp.proto + +/* +Package timestamp is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/timestamp.proto + +It has these top-level messages: + Timestamp +*/ +package timestamp + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) +// to obtain a formatter capable of generating timestamps in this format. +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 191 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, + 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, + 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, + 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, + 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, + 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, + 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, + 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, + 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, + 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, + 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto new file mode 100644 index 00000000000..b7cbd17502f --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -0,0 +1,133 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/timestamp"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) +// to obtain a formatter capable of generating timestamps in this format. +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/vendor/github.com/gophercloud/gophercloud/FAQ.md b/vendor/github.com/gophercloud/gophercloud/FAQ.md new file mode 100644 index 00000000000..88a366a288b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/FAQ.md @@ -0,0 +1,148 @@ +# Tips + +## Implementing default logging and re-authentication attempts + +You can implement custom logging and/or limit re-auth attempts by creating a custom HTTP client +like the following and setting it as the provider client's HTTP Client (via the +`gophercloud.ProviderClient.HTTPClient` field): + +```go +//... + +// LogRoundTripper satisfies the http.RoundTripper interface and is used to +// customize the default Gophercloud RoundTripper to allow for logging. +type LogRoundTripper struct { + rt http.RoundTripper + numReauthAttempts int +} + +// newHTTPClient return a custom HTTP client that allows for logging relevant +// information before and after the HTTP request. +func newHTTPClient() http.Client { + return http.Client{ + Transport: &LogRoundTripper{ + rt: http.DefaultTransport, + }, + } +} + +// RoundTrip performs a round-trip HTTP request and logs relevant information about it. +func (lrt *LogRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) { + glog.Infof("Request URL: %s\n", request.URL) + + response, err := lrt.rt.RoundTrip(request) + if response == nil { + return nil, err + } + + if response.StatusCode == http.StatusUnauthorized { + if lrt.numReauthAttempts == 3 { + return response, fmt.Errorf("Tried to re-authenticate 3 times with no success.") + } + lrt.numReauthAttempts++ + } + + glog.Debugf("Response Status: %s\n", response.Status) + + return response, nil +} + +endpoint := "https://127.0.0.1/auth" +pc := openstack.NewClient(endpoint) +pc.HTTPClient = newHTTPClient() + +//... +``` + + +## Implementing custom objects + +OpenStack request/response objects may differ among variable names or types. + +### Custom request objects + +To pass custom options to a request, implement the desired `OptsBuilder` interface. For +example, to pass in + +```go +type MyCreateServerOpts struct { + Name string + Size int +} +``` + +to `servers.Create`, simply implement the `servers.CreateOptsBuilder` interface: + +```go +func (o MyCreateServeropts) ToServerCreateMap() (map[string]interface{}, error) { + return map[string]interface{}{ + "name": o.Name, + "size": o.Size, + }, nil +} +``` + +create an instance of your custom options object, and pass it to `servers.Create`: + +```go +// ... +myOpts := MyCreateServerOpts{ + Name: "s1", + Size: "100", +} +server, err := servers.Create(computeClient, myOpts).Extract() +// ... +``` + +### Custom response objects + +Some OpenStack services have extensions. Extensions that are supported in Gophercloud can be +combined to create a custom object: + +```go +// ... +type MyVolume struct { + volumes.Volume + tenantattr.VolumeExt +} + +var v struct { + MyVolume `json:"volume"` +} + +err := volumes.Get(client, volID).ExtractInto(&v) +// ... +``` + +## Overriding default `UnmarshalJSON` method + +For some response objects, a field may be a custom type or may be allowed to take on +different types. In these cases, overriding the default `UnmarshalJSON` method may be +necessary. To do this, declare the JSON `struct` field tag as "-" and create an `UnmarshalJSON` +method on the type: + +```go +// ... +type MyVolume struct { + ID string `json: "id"` + TimeCreated time.Time `json: "-"` +} + +func (r *MyVolume) UnmarshalJSON(b []byte) error { + type tmp MyVolume + var s struct { + tmp + TimeCreated gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Volume(s.tmp) + + r.TimeCreated = time.Time(s.CreatedAt) + + return err +} +// ... +``` diff --git a/vendor/github.com/gophercloud/gophercloud/LICENSE b/vendor/github.com/gophercloud/gophercloud/LICENSE new file mode 100644 index 00000000000..fbbbc9e4cba --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/LICENSE @@ -0,0 +1,191 @@ +Copyright 2012-2013 Rackspace, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. + +------ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/gophercloud/gophercloud/MIGRATING.md b/vendor/github.com/gophercloud/gophercloud/MIGRATING.md new file mode 100644 index 00000000000..aa383c9cc9e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/MIGRATING.md @@ -0,0 +1,32 @@ +# Compute + +## Floating IPs + +* `github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingip` is now `github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips` +* `floatingips.Associate` and `floatingips.Disassociate` have been removed. +* `floatingips.DisassociateOpts` is now required to disassociate a Floating IP. + +## Security Groups + +* `secgroups.AddServerToGroup` is now `secgroups.AddServer`. +* `secgroups.RemoveServerFromGroup` is now `secgroups.RemoveServer`. + +## Servers + +* `servers.Reboot` now requires a `servers.RebootOpts` struct: + + ```golang + rebootOpts := &servers.RebootOpts{ + Type: servers.SoftReboot, + } + res := servers.Reboot(client, server.ID, rebootOpts) + ``` + +# Identity + +## V3 + +### Tokens + +* `Token.ExpiresAt` is now of type `gophercloud.JSONRFC3339Milli` instead of + `time.Time` diff --git a/vendor/github.com/gophercloud/gophercloud/README.md b/vendor/github.com/gophercloud/gophercloud/README.md new file mode 100644 index 00000000000..60ca479de89 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/README.md @@ -0,0 +1,143 @@ +# Gophercloud: an OpenStack SDK for Go +[![Build Status](https://travis-ci.org/gophercloud/gophercloud.svg?branch=master)](https://travis-ci.org/gophercloud/gophercloud) +[![Coverage Status](https://coveralls.io/repos/github/gophercloud/gophercloud/badge.svg?branch=master)](https://coveralls.io/github/gophercloud/gophercloud?branch=master) + +Gophercloud is an OpenStack Go SDK. + +## Useful links + +* [Reference documentation](http://godoc.org/github.com/gophercloud/gophercloud) +* [Effective Go](https://golang.org/doc/effective_go.html) + +## How to install + +Before installing, you need to ensure that your [GOPATH environment variable](https://golang.org/doc/code.html#GOPATH) +is pointing to an appropriate directory where you want to install Gophercloud: + +```bash +mkdir $HOME/go +export GOPATH=$HOME/go +``` + +To protect yourself against changes in your dependencies, we highly recommend choosing a +[dependency management solution](https://github.com/golang/go/wiki/PackageManagementTools) for +your projects, such as [godep](https://github.com/tools/godep). Once this is set up, you can install +Gophercloud as a dependency like so: + +```bash +go get github.com/gophercloud/gophercloud + +# Edit your code to import relevant packages from "github.com/gophercloud/gophercloud" + +godep save ./... +``` + +This will install all the source files you need into a `Godeps/_workspace` directory, which is +referenceable from your own source files when you use the `godep go` command. + +## Getting started + +### Credentials + +Because you'll be hitting an API, you will need to retrieve your OpenStack +credentials and either store them as environment variables or in your local Go +files. The first method is recommended because it decouples credential +information from source code, allowing you to push the latter to your version +control system without any security risk. + +You will need to retrieve the following: + +* username +* password +* a valid Keystone identity URL + +For users that have the OpenStack dashboard installed, there's a shortcut. If +you visit the `project/access_and_security` path in Horizon and click on the +"Download OpenStack RC File" button at the top right hand corner, you will +download a bash file that exports all of your access details to environment +variables. To execute the file, run `source admin-openrc.sh` and you will be +prompted for your password. + +### Authentication + +Once you have access to your credentials, you can begin plugging them into +Gophercloud. The next step is authentication, and this is handled by a base +"Provider" struct. To get one, you can either pass in your credentials +explicitly, or tell Gophercloud to use environment variables: + +```go +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/openstack/utils" +) + +// Option 1: Pass in the values yourself +opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://openstack.example.com:5000/v2.0", + Username: "{username}", + Password: "{password}", +} + +// Option 2: Use a utility function to retrieve all your environment variables +opts, err := openstack.AuthOptionsFromEnv() +``` + +Once you have the `opts` variable, you can pass it in and get back a +`ProviderClient` struct: + +```go +provider, err := openstack.AuthenticatedClient(opts) +``` + +The `ProviderClient` is the top-level client that all of your OpenStack services +derive from. The provider contains all of the authentication details that allow +your Go code to access the API - such as the base URL and token ID. + +### Provision a server + +Once we have a base Provider, we inject it as a dependency into each OpenStack +service. In order to work with the Compute API, we need a Compute service +client; which can be created like so: + +```go +client, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), +}) +``` + +We then use this `client` for any Compute API operation we want. In our case, +we want to provision a new server - so we invoke the `Create` method and pass +in the flavor ID (hardware specification) and image ID (operating system) we're +interested in: + +```go +import "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + +server, err := servers.Create(client, servers.CreateOpts{ + Name: "My new server!", + FlavorRef: "flavor_id", + ImageRef: "image_id", +}).Extract() +``` + +The above code sample creates a new server with the parameters, and embodies the +new resource in the `server` variable (a +[`servers.Server`](http://godoc.org/github.com/gophercloud/gophercloud) struct). + +## Advanced Usage + +Have a look at the [FAQ](./FAQ.md) for some tips on customizing the way Gophercloud works. + +## Backwards-Compatibility Guarantees + +None. Vendor it and write tests covering the parts you use. + +## Contributing + +See the [contributing guide](./.github/CONTRIBUTING.md). + +## Help and feedback + +If you're struggling with something or have spotted a potential bug, feel free +to submit an issue to our [bug tracker](/issues). diff --git a/vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md b/vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md new file mode 100644 index 00000000000..e7531a83d9d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md @@ -0,0 +1,74 @@ + +## On Pull Requests + +- Before you start a PR there needs to be a Github issue and a discussion about it + on that issue with a core contributor, even if it's just a 'SGTM'. + +- A PR's description must reference the issue it closes with a `For ` (e.g. For #293). + +- A PR's description must contain link(s) to the line(s) in the OpenStack + source code (on Github) that prove(s) the PR code to be valid. Links to documentation + are not good enough. The link(s) should be to a non-`master` branch. For example, + a pull request implementing the creation of a Neutron v2 subnet might put the + following link in the description: + + https://github.com/openstack/neutron/blob/stable/mitaka/neutron/api/v2/attributes.py#L749 + + From that link, a reviewer (or user) can verify the fields in the request/response + objects in the PR. + +- A PR that is in-progress should have `[wip]` in front of the PR's title. When + ready for review, remove the `[wip]` and ping a core contributor with an `@`. + +- Forcing PRs to be small can have the effect of users submitting PRs in a hierarchical chain, with + one depending on the next. If a PR depends on another one, it should have a [Pending #PRNUM] + prefix in the PR title. In addition, it will be the PR submitter's responsibility to remove the + [Pending #PRNUM] tag once the PR has been updated with the merged, dependent PR. That will + let reviewers know it is ready to review. + +- A PR should be small. Even if you intend on implementing an entire + service, a PR should only be one route of that service + (e.g. create server or get server, but not both). + +- Unless explicitly asked, do not squash commits in the middle of a review; only + append. It makes it difficult for the reviewer to see what's changed from one + review to the next. + +## On Code + +- In re design: follow as closely as is reasonable the code already in the library. + Most operations (e.g. create, delete) admit the same design. + +- Unit tests and acceptance (integration) tests must be written to cover each PR. + Tests for operations with several options (e.g. list, create) should include all + the options in the tests. This will allow users to verify an operation on their + own infrastructure and see an example of usage. + +- If in doubt, ask in-line on the PR. + +### File Structure + +- The following should be used in most cases: + + - `requests.go`: contains all the functions that make HTTP requests and the + types associated with the HTTP request (parameters for URL, body, etc) + - `results.go`: contains all the response objects and their methods + - `urls.go`: contains the endpoints to which the requests are made + +### Naming + +- For methods on a type in `results.go`, the receiver should be named `r` and the + variable into which it will be unmarshalled `s`. + +- Functions in `requests.go`, with the exception of functions that return a + `pagination.Pager`, should be named returns of the name `r`. + +- Functions in `requests.go` that accept request bodies should accept as their + last parameter an `interface` named `OptsBuilder` (eg `CreateOptsBuilder`). + This `interface` should have at the least a method named `ToMap` + (eg `ToPortCreateMap`). + +- Functions in `requests.go` that accept query strings should accept as their + last parameter an `interface` named `OptsBuilder` (eg `ListOptsBuilder`). + This `interface` should have at the least a method named `ToQuery` + (eg `ToServerListQuery`). diff --git a/vendor/github.com/gophercloud/gophercloud/auth_options.go b/vendor/github.com/gophercloud/gophercloud/auth_options.go new file mode 100644 index 00000000000..4211470020a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/auth_options.go @@ -0,0 +1,354 @@ +package gophercloud + +/* +AuthOptions stores information needed to authenticate to an OpenStack Cloud. +You can populate one manually, or use a provider's AuthOptionsFromEnv() function +to read relevant information from the standard environment variables. Pass one +to a provider's AuthenticatedClient function to authenticate and obtain a +ProviderClient representing an active session on that provider. + +Its fields are the union of those recognized by each identity implementation and +provider. + +An example of manually providing authentication information: + + opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://openstack.example.com:5000/v2.0", + Username: "{username}", + Password: "{password}", + TenantID: "{tenant_id}", + } + + provider, err := openstack.AuthenticatedClient(opts) + +An example of using AuthOptionsFromEnv(), where the environment variables can +be read from a file, such as a standard openrc file: + + opts, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(opts) +*/ +type AuthOptions struct { + // IdentityEndpoint specifies the HTTP endpoint that is required to work with + // the Identity API of the appropriate version. While it's ultimately needed by + // all of the identity services, it will often be populated by a provider-level + // function. + // + // The IdentityEndpoint is typically referred to as the "auth_url" or + // "OS_AUTH_URL" in the information provided by the cloud operator. + IdentityEndpoint string `json:"-"` + + // Username is required if using Identity V2 API. Consult with your provider's + // control panel to discover your account's username. In Identity V3, either + // UserID or a combination of Username and DomainID or DomainName are needed. + Username string `json:"username,omitempty"` + UserID string `json:"-"` + + Password string `json:"password,omitempty"` + + // At most one of DomainID and DomainName must be provided if using Username + // with Identity V3. Otherwise, either are optional. + DomainID string `json:"-"` + DomainName string `json:"name,omitempty"` + + // The TenantID and TenantName fields are optional for the Identity V2 API. + // The same fields are known as project_id and project_name in the Identity + // V3 API, but are collected as TenantID and TenantName here in both cases. + // Some providers allow you to specify a TenantName instead of the TenantId. + // Some require both. Your provider's authentication policies will determine + // how these fields influence authentication. + // If DomainID or DomainName are provided, they will also apply to TenantName. + // It is not currently possible to authenticate with Username and a Domain + // and scope to a Project in a different Domain by using TenantName. To + // accomplish that, the ProjectID will need to be provided as the TenantID + // option. + TenantID string `json:"tenantId,omitempty"` + TenantName string `json:"tenantName,omitempty"` + + // AllowReauth should be set to true if you grant permission for Gophercloud to + // cache your credentials in memory, and to allow Gophercloud to attempt to + // re-authenticate automatically if/when your token expires. If you set it to + // false, it will not cache these settings, but re-authentication will not be + // possible. This setting defaults to false. + // + // NOTE: The reauth function will try to re-authenticate endlessly if left + // unchecked. The way to limit the number of attempts is to provide a custom + // HTTP client to the provider client and provide a transport that implements + // the RoundTripper interface and stores the number of failed retries. For an + // example of this, see here: + // https://github.com/rackspace/rack/blob/1.0.0/auth/clients.go#L311 + AllowReauth bool `json:"-"` + + // TokenID allows users to authenticate (possibly as another user) with an + // authentication token ID. + TokenID string `json:"-"` +} + +// ToTokenV2CreateMap allows AuthOptions to satisfy the AuthOptionsBuilder +// interface in the v2 tokens package +func (opts AuthOptions) ToTokenV2CreateMap() (map[string]interface{}, error) { + // Populate the request map. + authMap := make(map[string]interface{}) + + if opts.Username != "" { + if opts.Password != "" { + authMap["passwordCredentials"] = map[string]interface{}{ + "username": opts.Username, + "password": opts.Password, + } + } else { + return nil, ErrMissingInput{Argument: "Password"} + } + } else if opts.TokenID != "" { + authMap["token"] = map[string]interface{}{ + "id": opts.TokenID, + } + } else { + return nil, ErrMissingInput{Argument: "Username"} + } + + if opts.TenantID != "" { + authMap["tenantId"] = opts.TenantID + } + if opts.TenantName != "" { + authMap["tenantName"] = opts.TenantName + } + + return map[string]interface{}{"auth": authMap}, nil +} + +func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[string]interface{}, error) { + type domainReq struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + } + + type projectReq struct { + Domain *domainReq `json:"domain,omitempty"` + Name *string `json:"name,omitempty"` + ID *string `json:"id,omitempty"` + } + + type userReq struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Password string `json:"password"` + Domain *domainReq `json:"domain,omitempty"` + } + + type passwordReq struct { + User userReq `json:"user"` + } + + type tokenReq struct { + ID string `json:"id"` + } + + type identityReq struct { + Methods []string `json:"methods"` + Password *passwordReq `json:"password,omitempty"` + Token *tokenReq `json:"token,omitempty"` + } + + type authReq struct { + Identity identityReq `json:"identity"` + } + + type request struct { + Auth authReq `json:"auth"` + } + + // Populate the request structure based on the provided arguments. Create and return an error + // if insufficient or incompatible information is present. + var req request + + if opts.Password == "" { + if opts.TokenID != "" { + // Because we aren't using password authentication, it's an error to also provide any of the user-based authentication + // parameters. + if opts.Username != "" { + return nil, ErrUsernameWithToken{} + } + if opts.UserID != "" { + return nil, ErrUserIDWithToken{} + } + if opts.DomainID != "" { + return nil, ErrDomainIDWithToken{} + } + if opts.DomainName != "" { + return nil, ErrDomainNameWithToken{} + } + + // Configure the request for Token authentication. + req.Auth.Identity.Methods = []string{"token"} + req.Auth.Identity.Token = &tokenReq{ + ID: opts.TokenID, + } + } else { + // If no password or token ID are available, authentication can't continue. + return nil, ErrMissingPassword{} + } + } else { + // Password authentication. + req.Auth.Identity.Methods = []string{"password"} + + // At least one of Username and UserID must be specified. + if opts.Username == "" && opts.UserID == "" { + return nil, ErrUsernameOrUserID{} + } + + if opts.Username != "" { + // If Username is provided, UserID may not be provided. + if opts.UserID != "" { + return nil, ErrUsernameOrUserID{} + } + + // Either DomainID or DomainName must also be specified. + if opts.DomainID == "" && opts.DomainName == "" { + return nil, ErrDomainIDOrDomainName{} + } + + if opts.DomainID != "" { + if opts.DomainName != "" { + return nil, ErrDomainIDOrDomainName{} + } + + // Configure the request for Username and Password authentication with a DomainID. + req.Auth.Identity.Password = &passwordReq{ + User: userReq{ + Name: &opts.Username, + Password: opts.Password, + Domain: &domainReq{ID: &opts.DomainID}, + }, + } + } + + if opts.DomainName != "" { + // Configure the request for Username and Password authentication with a DomainName. + req.Auth.Identity.Password = &passwordReq{ + User: userReq{ + Name: &opts.Username, + Password: opts.Password, + Domain: &domainReq{Name: &opts.DomainName}, + }, + } + } + } + + if opts.UserID != "" { + // If UserID is specified, neither DomainID nor DomainName may be. + if opts.DomainID != "" { + return nil, ErrDomainIDWithUserID{} + } + if opts.DomainName != "" { + return nil, ErrDomainNameWithUserID{} + } + + // Configure the request for UserID and Password authentication. + req.Auth.Identity.Password = &passwordReq{ + User: userReq{ID: &opts.UserID, Password: opts.Password}, + } + } + } + + b, err := BuildRequestBody(req, "") + if err != nil { + return nil, err + } + + if len(scope) != 0 { + b["auth"].(map[string]interface{})["scope"] = scope + } + + return b, nil +} + +func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { + + var scope struct { + ProjectID string + ProjectName string + DomainID string + DomainName string + } + + if opts.TenantID != "" { + scope.ProjectID = opts.TenantID + } else { + if opts.TenantName != "" { + scope.ProjectName = opts.TenantName + scope.DomainID = opts.DomainID + scope.DomainName = opts.DomainName + } + } + + if scope.ProjectName != "" { + // ProjectName provided: either DomainID or DomainName must also be supplied. + // ProjectID may not be supplied. + if scope.DomainID == "" && scope.DomainName == "" { + return nil, ErrScopeDomainIDOrDomainName{} + } + if scope.ProjectID != "" { + return nil, ErrScopeProjectIDOrProjectName{} + } + + if scope.DomainID != "" { + // ProjectName + DomainID + return map[string]interface{}{ + "project": map[string]interface{}{ + "name": &scope.ProjectName, + "domain": map[string]interface{}{"id": &scope.DomainID}, + }, + }, nil + } + + if scope.DomainName != "" { + // ProjectName + DomainName + return map[string]interface{}{ + "project": map[string]interface{}{ + "name": &scope.ProjectName, + "domain": map[string]interface{}{"name": &scope.DomainName}, + }, + }, nil + } + } else if scope.ProjectID != "" { + // ProjectID provided. ProjectName, DomainID, and DomainName may not be provided. + if scope.DomainID != "" { + return nil, ErrScopeProjectIDAlone{} + } + if scope.DomainName != "" { + return nil, ErrScopeProjectIDAlone{} + } + + // ProjectID + return map[string]interface{}{ + "project": map[string]interface{}{ + "id": &scope.ProjectID, + }, + }, nil + } else if scope.DomainID != "" { + // DomainID provided. ProjectID, ProjectName, and DomainName may not be provided. + if scope.DomainName != "" { + return nil, ErrScopeDomainIDOrDomainName{} + } + + // DomainID + return map[string]interface{}{ + "domain": map[string]interface{}{ + "id": &scope.DomainID, + }, + }, nil + } else if scope.DomainName != "" { + // DomainName + return map[string]interface{}{ + "domain": map[string]interface{}{ + "name": &scope.DomainName, + }, + }, nil + } + + return nil, nil +} + +func (opts AuthOptions) CanReauth() bool { + return opts.AllowReauth +} diff --git a/vendor/github.com/gophercloud/gophercloud/doc.go b/vendor/github.com/gophercloud/gophercloud/doc.go new file mode 100644 index 00000000000..30067aa3527 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/doc.go @@ -0,0 +1,93 @@ +/* +Package gophercloud provides a multi-vendor interface to OpenStack-compatible +clouds. The library has a three-level hierarchy: providers, services, and +resources. + +Authenticating with Providers + +Provider structs represent the cloud providers that offer and manage a +collection of services. You will generally want to create one Provider +client per OpenStack cloud. + +Use your OpenStack credentials to create a Provider client. The +IdentityEndpoint is typically refered to as "auth_url" or "OS_AUTH_URL" in +information provided by the cloud operator. Additionally, the cloud may refer to +TenantID or TenantName as project_id and project_name. Credentials are +specified like so: + + opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://openstack.example.com:5000/v2.0", + Username: "{username}", + Password: "{password}", + TenantID: "{tenant_id}", + } + + provider, err := openstack.AuthenticatedClient(opts) + +You may also use the openstack.AuthOptionsFromEnv() helper function. This +function reads in standard environment variables frequently found in an +OpenStack `openrc` file. Again note that Gophercloud currently uses "tenant" +instead of "project". + + opts, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(opts) + +Service Clients + +Service structs are specific to a provider and handle all of the logic and +operations for a particular OpenStack service. Examples of services include: +Compute, Object Storage, Block Storage. In order to define one, you need to +pass in the parent provider, like so: + + opts := gophercloud.EndpointOpts{Region: "RegionOne"} + + client := openstack.NewComputeV2(provider, opts) + +Resources + +Resource structs are the domain models that services make use of in order +to work with and represent the state of API resources: + + server, err := servers.Get(client, "{serverId}").Extract() + +Intermediate Result structs are returned for API operations, which allow +generic access to the HTTP headers, response body, and any errors associated +with the network transaction. To turn a result into a usable resource struct, +you must call the Extract method which is chained to the response, or an +Extract function from an applicable extension: + + result := servers.Get(client, "{serverId}") + + // Attempt to extract the disk configuration from the OS-DCF disk config + // extension: + config, err := diskconfig.ExtractGet(result) + +All requests that enumerate a collection return a Pager struct that is used to +iterate through the results one page at a time. Use the EachPage method on that +Pager to handle each successive Page in a closure, then use the appropriate +extraction method from that request's package to interpret that Page as a slice +of results: + + err := servers.List(client, nil).EachPage(func (page pagination.Page) (bool, error) { + s, err := servers.ExtractServers(page) + if err != nil { + return false, err + } + + // Handle the []servers.Server slice. + + // Return "false" or an error to prematurely stop fetching new pages. + return true, nil + }) + +If you want to obtain the entire collection of pages without doing any +intermediary processing on each page, you can use the AllPages method: + + allPages, err := servers.List(client, nil).AllPages() + allServers, err := servers.ExtractServers(allPages) + +This top-level package contains utility functions and data types that are used +throughout the provider and service packages. Of particular note for end users +are the AuthOptions and EndpointOpts structs. +*/ +package gophercloud diff --git a/vendor/github.com/gophercloud/gophercloud/endpoint_search.go b/vendor/github.com/gophercloud/gophercloud/endpoint_search.go new file mode 100644 index 00000000000..2fbc3c97f14 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/endpoint_search.go @@ -0,0 +1,76 @@ +package gophercloud + +// Availability indicates to whom a specific service endpoint is accessible: +// the internet at large, internal networks only, or only to administrators. +// Different identity services use different terminology for these. Identity v2 +// lists them as different kinds of URLs within the service catalog ("adminURL", +// "internalURL", and "publicURL"), while v3 lists them as "Interfaces" in an +// endpoint's response. +type Availability string + +const ( + // AvailabilityAdmin indicates that an endpoint is only available to + // administrators. + AvailabilityAdmin Availability = "admin" + + // AvailabilityPublic indicates that an endpoint is available to everyone on + // the internet. + AvailabilityPublic Availability = "public" + + // AvailabilityInternal indicates that an endpoint is only available within + // the cluster's internal network. + AvailabilityInternal Availability = "internal" +) + +// EndpointOpts specifies search criteria used by queries against an +// OpenStack service catalog. The options must contain enough information to +// unambiguously identify one, and only one, endpoint within the catalog. +// +// Usually, these are passed to service client factory functions in a provider +// package, like "openstack.NewComputeV2()". +type EndpointOpts struct { + // Type [required] is the service type for the client (e.g., "compute", + // "object-store"). Generally, this will be supplied by the service client + // function, but a user-given value will be honored if provided. + Type string + + // Name [optional] is the service name for the client (e.g., "nova") as it + // appears in the service catalog. Services can have the same Type but a + // different Name, which is why both Type and Name are sometimes needed. + Name string + + // Region [required] is the geographic region in which the endpoint resides, + // generally specifying which datacenter should house your resources. + // Required only for services that span multiple regions. + Region string + + // Availability [optional] is the visibility of the endpoint to be returned. + // Valid types include the constants AvailabilityPublic, AvailabilityInternal, + // or AvailabilityAdmin from this package. + // + // Availability is not required, and defaults to AvailabilityPublic. Not all + // providers or services offer all Availability options. + Availability Availability +} + +/* +EndpointLocator is an internal function to be used by provider implementations. + +It provides an implementation that locates a single endpoint from a service +catalog for a specific ProviderClient based on user-provided EndpointOpts. The +provider then uses it to discover related ServiceClients. +*/ +type EndpointLocator func(EndpointOpts) (string, error) + +// ApplyDefaults is an internal method to be used by provider implementations. +// +// It sets EndpointOpts fields if not already set, including a default type. +// Currently, EndpointOpts.Availability defaults to the public endpoint. +func (eo *EndpointOpts) ApplyDefaults(t string) { + if eo.Type == "" { + eo.Type = t + } + if eo.Availability == "" { + eo.Availability = AvailabilityPublic + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/errors.go b/vendor/github.com/gophercloud/gophercloud/errors.go new file mode 100644 index 00000000000..88fd2ac676b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/errors.go @@ -0,0 +1,401 @@ +package gophercloud + +import "fmt" + +// BaseError is an error type that all other error types embed. +type BaseError struct { + DefaultErrString string + Info string +} + +func (e BaseError) Error() string { + e.DefaultErrString = "An error occurred while executing a Gophercloud request." + return e.choseErrString() +} + +func (e BaseError) choseErrString() string { + if e.Info != "" { + return e.Info + } + return e.DefaultErrString +} + +// ErrMissingInput is the error when input is required in a particular +// situation but not provided by the user +type ErrMissingInput struct { + BaseError + Argument string +} + +func (e ErrMissingInput) Error() string { + e.DefaultErrString = fmt.Sprintf("Missing input for argument [%s]", e.Argument) + return e.choseErrString() +} + +// ErrInvalidInput is an error type used for most non-HTTP Gophercloud errors. +type ErrInvalidInput struct { + ErrMissingInput + Value interface{} +} + +func (e ErrInvalidInput) Error() string { + e.DefaultErrString = fmt.Sprintf("Invalid input provided for argument [%s]: [%+v]", e.Argument, e.Value) + return e.choseErrString() +} + +// ErrUnexpectedResponseCode is returned by the Request method when a response code other than +// those listed in OkCodes is encountered. +type ErrUnexpectedResponseCode struct { + BaseError + URL string + Method string + Expected []int + Actual int + Body []byte +} + +func (e ErrUnexpectedResponseCode) Error() string { + e.DefaultErrString = fmt.Sprintf( + "Expected HTTP response code %v when accessing [%s %s], but got %d instead\n%s", + e.Expected, e.Method, e.URL, e.Actual, e.Body, + ) + return e.choseErrString() +} + +// ErrDefault400 is the default error type returned on a 400 HTTP response code. +type ErrDefault400 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault401 is the default error type returned on a 401 HTTP response code. +type ErrDefault401 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault404 is the default error type returned on a 404 HTTP response code. +type ErrDefault404 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault405 is the default error type returned on a 405 HTTP response code. +type ErrDefault405 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault408 is the default error type returned on a 408 HTTP response code. +type ErrDefault408 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault429 is the default error type returned on a 429 HTTP response code. +type ErrDefault429 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault500 is the default error type returned on a 500 HTTP response code. +type ErrDefault500 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault503 is the default error type returned on a 503 HTTP response code. +type ErrDefault503 struct { + ErrUnexpectedResponseCode +} + +func (e ErrDefault400) Error() string { + return "Invalid request due to incorrect syntax or missing required parameters." +} +func (e ErrDefault401) Error() string { + return "Authentication failed" +} +func (e ErrDefault404) Error() string { + return "Resource not found" +} +func (e ErrDefault405) Error() string { + return "Method not allowed" +} +func (e ErrDefault408) Error() string { + return "The server timed out waiting for the request" +} +func (e ErrDefault429) Error() string { + return "Too many requests have been sent in a given amount of time. Pause" + + " requests, wait up to one minute, and try again." +} +func (e ErrDefault500) Error() string { + return "Internal Server Error" +} +func (e ErrDefault503) Error() string { + return "The service is currently unable to handle the request due to a temporary" + + " overloading or maintenance. This is a temporary condition. Try again later." +} + +// Err400er is the interface resource error types implement to override the error message +// from a 400 error. +type Err400er interface { + Error400(ErrUnexpectedResponseCode) error +} + +// Err401er is the interface resource error types implement to override the error message +// from a 401 error. +type Err401er interface { + Error401(ErrUnexpectedResponseCode) error +} + +// Err404er is the interface resource error types implement to override the error message +// from a 404 error. +type Err404er interface { + Error404(ErrUnexpectedResponseCode) error +} + +// Err405er is the interface resource error types implement to override the error message +// from a 405 error. +type Err405er interface { + Error405(ErrUnexpectedResponseCode) error +} + +// Err408er is the interface resource error types implement to override the error message +// from a 408 error. +type Err408er interface { + Error408(ErrUnexpectedResponseCode) error +} + +// Err429er is the interface resource error types implement to override the error message +// from a 429 error. +type Err429er interface { + Error429(ErrUnexpectedResponseCode) error +} + +// Err500er is the interface resource error types implement to override the error message +// from a 500 error. +type Err500er interface { + Error500(ErrUnexpectedResponseCode) error +} + +// Err503er is the interface resource error types implement to override the error message +// from a 503 error. +type Err503er interface { + Error503(ErrUnexpectedResponseCode) error +} + +// ErrTimeOut is the error type returned when an operations times out. +type ErrTimeOut struct { + BaseError +} + +func (e ErrTimeOut) Error() string { + e.DefaultErrString = "A time out occurred" + return e.choseErrString() +} + +// ErrUnableToReauthenticate is the error type returned when reauthentication fails. +type ErrUnableToReauthenticate struct { + BaseError + ErrOriginal error +} + +func (e ErrUnableToReauthenticate) Error() string { + e.DefaultErrString = fmt.Sprintf("Unable to re-authenticate: %s", e.ErrOriginal) + return e.choseErrString() +} + +// ErrErrorAfterReauthentication is the error type returned when reauthentication +// succeeds, but an error occurs afterword (usually an HTTP error). +type ErrErrorAfterReauthentication struct { + BaseError + ErrOriginal error +} + +func (e ErrErrorAfterReauthentication) Error() string { + e.DefaultErrString = fmt.Sprintf("Successfully re-authenticated, but got error executing request: %s", e.ErrOriginal) + return e.choseErrString() +} + +// ErrServiceNotFound is returned when no service in a service catalog matches +// the provided EndpointOpts. This is generally returned by provider service +// factory methods like "NewComputeV2()" and can mean that a service is not +// enabled for your account. +type ErrServiceNotFound struct { + BaseError +} + +func (e ErrServiceNotFound) Error() string { + e.DefaultErrString = "No suitable service could be found in the service catalog." + return e.choseErrString() +} + +// ErrEndpointNotFound is returned when no available endpoints match the +// provided EndpointOpts. This is also generally returned by provider service +// factory methods, and usually indicates that a region was specified +// incorrectly. +type ErrEndpointNotFound struct { + BaseError +} + +func (e ErrEndpointNotFound) Error() string { + e.DefaultErrString = "No suitable endpoint could be found in the service catalog." + return e.choseErrString() +} + +// ErrResourceNotFound is the error when trying to retrieve a resource's +// ID by name and the resource doesn't exist. +type ErrResourceNotFound struct { + BaseError + Name string + ResourceType string +} + +func (e ErrResourceNotFound) Error() string { + e.DefaultErrString = fmt.Sprintf("Unable to find %s with name %s", e.ResourceType, e.Name) + return e.choseErrString() +} + +// ErrMultipleResourcesFound is the error when trying to retrieve a resource's +// ID by name and multiple resources have the user-provided name. +type ErrMultipleResourcesFound struct { + BaseError + Name string + Count int + ResourceType string +} + +func (e ErrMultipleResourcesFound) Error() string { + e.DefaultErrString = fmt.Sprintf("Found %d %ss matching %s", e.Count, e.ResourceType, e.Name) + return e.choseErrString() +} + +// ErrUnexpectedType is the error when an unexpected type is encountered +type ErrUnexpectedType struct { + BaseError + Expected string + Actual string +} + +func (e ErrUnexpectedType) Error() string { + e.DefaultErrString = fmt.Sprintf("Expected %s but got %s", e.Expected, e.Actual) + return e.choseErrString() +} + +func unacceptedAttributeErr(attribute string) string { + return fmt.Sprintf("The base Identity V3 API does not accept authentication by %s", attribute) +} + +func redundantWithTokenErr(attribute string) string { + return fmt.Sprintf("%s may not be provided when authenticating with a TokenID", attribute) +} + +func redundantWithUserID(attribute string) string { + return fmt.Sprintf("%s may not be provided when authenticating with a UserID", attribute) +} + +// ErrAPIKeyProvided indicates that an APIKey was provided but can't be used. +type ErrAPIKeyProvided struct{ BaseError } + +func (e ErrAPIKeyProvided) Error() string { + return unacceptedAttributeErr("APIKey") +} + +// ErrTenantIDProvided indicates that a TenantID was provided but can't be used. +type ErrTenantIDProvided struct{ BaseError } + +func (e ErrTenantIDProvided) Error() string { + return unacceptedAttributeErr("TenantID") +} + +// ErrTenantNameProvided indicates that a TenantName was provided but can't be used. +type ErrTenantNameProvided struct{ BaseError } + +func (e ErrTenantNameProvided) Error() string { + return unacceptedAttributeErr("TenantName") +} + +// ErrUsernameWithToken indicates that a Username was provided, but token authentication is being used instead. +type ErrUsernameWithToken struct{ BaseError } + +func (e ErrUsernameWithToken) Error() string { + return redundantWithTokenErr("Username") +} + +// ErrUserIDWithToken indicates that a UserID was provided, but token authentication is being used instead. +type ErrUserIDWithToken struct{ BaseError } + +func (e ErrUserIDWithToken) Error() string { + return redundantWithTokenErr("UserID") +} + +// ErrDomainIDWithToken indicates that a DomainID was provided, but token authentication is being used instead. +type ErrDomainIDWithToken struct{ BaseError } + +func (e ErrDomainIDWithToken) Error() string { + return redundantWithTokenErr("DomainID") +} + +// ErrDomainNameWithToken indicates that a DomainName was provided, but token authentication is being used instead.s +type ErrDomainNameWithToken struct{ BaseError } + +func (e ErrDomainNameWithToken) Error() string { + return redundantWithTokenErr("DomainName") +} + +// ErrUsernameOrUserID indicates that neither username nor userID are specified, or both are at once. +type ErrUsernameOrUserID struct{ BaseError } + +func (e ErrUsernameOrUserID) Error() string { + return "Exactly one of Username and UserID must be provided for password authentication" +} + +// ErrDomainIDWithUserID indicates that a DomainID was provided, but unnecessary because a UserID is being used. +type ErrDomainIDWithUserID struct{ BaseError } + +func (e ErrDomainIDWithUserID) Error() string { + return redundantWithUserID("DomainID") +} + +// ErrDomainNameWithUserID indicates that a DomainName was provided, but unnecessary because a UserID is being used. +type ErrDomainNameWithUserID struct{ BaseError } + +func (e ErrDomainNameWithUserID) Error() string { + return redundantWithUserID("DomainName") +} + +// ErrDomainIDOrDomainName indicates that a username was provided, but no domain to scope it. +// It may also indicate that both a DomainID and a DomainName were provided at once. +type ErrDomainIDOrDomainName struct{ BaseError } + +func (e ErrDomainIDOrDomainName) Error() string { + return "You must provide exactly one of DomainID or DomainName to authenticate by Username" +} + +// ErrMissingPassword indicates that no password was provided and no token is available. +type ErrMissingPassword struct{ BaseError } + +func (e ErrMissingPassword) Error() string { + return "You must provide a password to authenticate" +} + +// ErrScopeDomainIDOrDomainName indicates that a domain ID or Name was required in a Scope, but not present. +type ErrScopeDomainIDOrDomainName struct{ BaseError } + +func (e ErrScopeDomainIDOrDomainName) Error() string { + return "You must provide exactly one of DomainID or DomainName in a Scope with ProjectName" +} + +// ErrScopeProjectIDOrProjectName indicates that both a ProjectID and a ProjectName were provided in a Scope. +type ErrScopeProjectIDOrProjectName struct{ BaseError } + +func (e ErrScopeProjectIDOrProjectName) Error() string { + return "You must provide at most one of ProjectID or ProjectName in a Scope" +} + +// ErrScopeProjectIDAlone indicates that a ProjectID was provided with other constraints in a Scope. +type ErrScopeProjectIDAlone struct{ BaseError } + +func (e ErrScopeProjectIDAlone) Error() string { + return "ProjectID must be supplied alone in a Scope" +} + +// ErrScopeEmpty indicates that no credentials were provided in a Scope. +type ErrScopeEmpty struct{ BaseError } + +func (e ErrScopeEmpty) Error() string { + return "You must provide either a Project or Domain in a Scope" +} diff --git a/vendor/github.com/gophercloud/gophercloud/internal/pkg.go b/vendor/github.com/gophercloud/gophercloud/internal/pkg.go new file mode 100644 index 00000000000..5bf0569ce8c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/internal/pkg.go @@ -0,0 +1 @@ +package internal diff --git a/vendor/github.com/gophercloud/gophercloud/internal/util.go b/vendor/github.com/gophercloud/gophercloud/internal/util.go new file mode 100644 index 00000000000..8efb283e729 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/internal/util.go @@ -0,0 +1,34 @@ +package internal + +import ( + "reflect" + "strings" +) + +// RemainingKeys will inspect a struct and compare it to a map. Any struct +// field that does not have a JSON tag that matches a key in the map or +// a matching lower-case field in the map will be returned as an extra. +// +// This is useful for determining the extra fields returned in response bodies +// for resources that can contain an arbitrary or dynamic number of fields. +func RemainingKeys(s interface{}, m map[string]interface{}) (extras map[string]interface{}) { + extras = make(map[string]interface{}) + for k, v := range m { + extras[k] = v + } + + valueOf := reflect.ValueOf(s) + typeOf := reflect.TypeOf(s) + for i := 0; i < valueOf.NumField(); i++ { + field := typeOf.Field(i) + + lowerField := strings.ToLower(field.Name) + delete(extras, lowerField) + + if tagValue := field.Tag.Get("json"); tagValue != "" && tagValue != "-" { + delete(extras, tagValue) + } + } + + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go new file mode 100644 index 00000000000..95286041d66 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go @@ -0,0 +1,64 @@ +package openstack + +import ( + "os" + + "github.com/gophercloud/gophercloud" +) + +var nilOptions = gophercloud.AuthOptions{} + +/* +AuthOptionsFromEnv fills out an identity.AuthOptions structure with the +settings found on the various OpenStack OS_* environment variables. + +The following variables provide sources of truth: OS_AUTH_URL, OS_USERNAME, +OS_PASSWORD, OS_TENANT_ID, and OS_TENANT_NAME. + +Of these, OS_USERNAME, OS_PASSWORD, and OS_AUTH_URL must have settings, +or an error will result. OS_TENANT_ID and OS_TENANT_NAME are optional. + +To use this function, first set the OS_* environment variables (for example, +by sourcing an `openrc` file), then: + + opts, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(opts) +*/ +func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) { + authURL := os.Getenv("OS_AUTH_URL") + username := os.Getenv("OS_USERNAME") + userID := os.Getenv("OS_USERID") + password := os.Getenv("OS_PASSWORD") + tenantID := os.Getenv("OS_TENANT_ID") + tenantName := os.Getenv("OS_TENANT_NAME") + domainID := os.Getenv("OS_DOMAIN_ID") + domainName := os.Getenv("OS_DOMAIN_NAME") + + if authURL == "" { + err := gophercloud.ErrMissingInput{Argument: "authURL"} + return nilOptions, err + } + + if username == "" && userID == "" { + err := gophercloud.ErrMissingInput{Argument: "username"} + return nilOptions, err + } + + if password == "" { + err := gophercloud.ErrMissingInput{Argument: "password"} + return nilOptions, err + } + + ao := gophercloud.AuthOptions{ + IdentityEndpoint: authURL, + UserID: userID, + Username: username, + Password: password, + TenantID: tenantID, + TenantName: tenantName, + DomainID: domainID, + DomainName: domainName, + } + + return ao, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/doc.go new file mode 100644 index 00000000000..a78d3d0482c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/doc.go @@ -0,0 +1,86 @@ +/* +Package volumeactions provides information and interaction with volumes in the +OpenStack Block Storage service. A volume is a detachable block storage +device, akin to a USB hard drive. + +Example of Attaching a Volume to an Instance + + attachOpts := volumeactions.AttachOpts{ + MountPoint: "/mnt", + Mode: "rw", + InstanceUUID: server.ID, + } + + err := volumeactions.Attach(client, volume.ID, attachOpts).ExtractErr() + if err != nil { + panic(err) + } + + detachOpts := volumeactions.DetachOpts{ + AttachmentID: volume.Attachments[0].AttachmentID, + } + + err = volumeactions.Detach(client, volume.ID, detachOpts).ExtractErr() + if err != nil { + panic(err) + } + + +Example of Creating an Image from a Volume + + uploadImageOpts := volumeactions.UploadImageOpts{ + ImageName: "my_vol", + Force: true, + } + + volumeImage, err := volumeactions.UploadImage(client, volume.ID, uploadImageOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", volumeImage) + +Example of Extending a Volume's Size + + extendOpts := volumeactions.ExtendSizeOpts{ + NewSize: 100, + } + + err := volumeactions.ExtendSize(client, volume.ID, extendOpts).ExtractErr() + if err != nil { + panic(err) + } + +Example of Initializing a Volume Connection + + connectOpts := &volumeactions.InitializeConnectionOpts{ + IP: "127.0.0.1", + Host: "stack", + Initiator: "iqn.1994-05.com.redhat:17cf566367d2", + Multipath: gophercloud.Disabled, + Platform: "x86_64", + OSType: "linux2", + } + + connectionInfo, err := volumeactions.InitializeConnection(client, volume.ID, connectOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", connectionInfo["data"]) + + terminateOpts := &volumeactions.InitializeConnectionOpts{ + IP: "127.0.0.1", + Host: "stack", + Initiator: "iqn.1994-05.com.redhat:17cf566367d2", + Multipath: gophercloud.Disabled, + Platform: "x86_64", + OSType: "linux2", + } + + err = volumeactions.TerminateConnection(client, volume.ID, terminateOpts).ExtractErr() + if err != nil { + panic(err) + } +*/ +package volumeactions diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/requests.go new file mode 100644 index 00000000000..a3916c77c16 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/requests.go @@ -0,0 +1,263 @@ +package volumeactions + +import ( + "github.com/gophercloud/gophercloud" +) + +// AttachOptsBuilder allows extensions to add additional parameters to the +// Attach request. +type AttachOptsBuilder interface { + ToVolumeAttachMap() (map[string]interface{}, error) +} + +// AttachMode describes the attachment mode for volumes. +type AttachMode string + +// These constants determine how a volume is attached. +const ( + ReadOnly AttachMode = "ro" + ReadWrite AttachMode = "rw" +) + +// AttachOpts contains options for attaching a Volume. +type AttachOpts struct { + // The mountpoint of this volume. + MountPoint string `json:"mountpoint,omitempty"` + + // The nova instance ID, can't set simultaneously with HostName. + InstanceUUID string `json:"instance_uuid,omitempty"` + + // The hostname of baremetal host, can't set simultaneously with InstanceUUID. + HostName string `json:"host_name,omitempty"` + + // Mount mode of this volume. + Mode AttachMode `json:"mode,omitempty"` +} + +// ToVolumeAttachMap assembles a request body based on the contents of a +// AttachOpts. +func (opts AttachOpts) ToVolumeAttachMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "os-attach") +} + +// Attach will attach a volume based on the values in AttachOpts. +func Attach(client *gophercloud.ServiceClient, id string, opts AttachOptsBuilder) (r AttachResult) { + b, err := opts.ToVolumeAttachMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(attachURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// BeginDetach will mark the volume as detaching. +func BeginDetaching(client *gophercloud.ServiceClient, id string) (r BeginDetachingResult) { + b := map[string]interface{}{"os-begin_detaching": make(map[string]interface{})} + _, r.Err = client.Post(beginDetachingURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// DetachOptsBuilder allows extensions to add additional parameters to the +// Detach request. +type DetachOptsBuilder interface { + ToVolumeDetachMap() (map[string]interface{}, error) +} + +// DetachOpts contains options for detaching a Volume. +type DetachOpts struct { + // AttachmentID is the ID of the attachment between a volume and instance. + AttachmentID string `json:"attachment_id,omitempty"` +} + +// ToVolumeDetachMap assembles a request body based on the contents of a +// DetachOpts. +func (opts DetachOpts) ToVolumeDetachMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "os-detach") +} + +// Detach will detach a volume based on volume ID. +func Detach(client *gophercloud.ServiceClient, id string, opts DetachOptsBuilder) (r DetachResult) { + b, err := opts.ToVolumeDetachMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(detachURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// Reserve will reserve a volume based on volume ID. +func Reserve(client *gophercloud.ServiceClient, id string) (r ReserveResult) { + b := map[string]interface{}{"os-reserve": make(map[string]interface{})} + _, r.Err = client.Post(reserveURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return +} + +// Unreserve will unreserve a volume based on volume ID. +func Unreserve(client *gophercloud.ServiceClient, id string) (r UnreserveResult) { + b := map[string]interface{}{"os-unreserve": make(map[string]interface{})} + _, r.Err = client.Post(unreserveURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return +} + +// InitializeConnectionOptsBuilder allows extensions to add additional parameters to the +// InitializeConnection request. +type InitializeConnectionOptsBuilder interface { + ToVolumeInitializeConnectionMap() (map[string]interface{}, error) +} + +// InitializeConnectionOpts hosts options for InitializeConnection. +// The fields are specific to the storage driver in use and the destination +// attachment. +type InitializeConnectionOpts struct { + IP string `json:"ip,omitempty"` + Host string `json:"host,omitempty"` + Initiator string `json:"initiator,omitempty"` + Wwpns []string `json:"wwpns,omitempty"` + Wwnns string `json:"wwnns,omitempty"` + Multipath *bool `json:"multipath,omitempty"` + Platform string `json:"platform,omitempty"` + OSType string `json:"os_type,omitempty"` +} + +// ToVolumeInitializeConnectionMap assembles a request body based on the contents of a +// InitializeConnectionOpts. +func (opts InitializeConnectionOpts) ToVolumeInitializeConnectionMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "connector") + return map[string]interface{}{"os-initialize_connection": b}, err +} + +// InitializeConnection initializes an iSCSI connection by volume ID. +func InitializeConnection(client *gophercloud.ServiceClient, id string, opts InitializeConnectionOptsBuilder) (r InitializeConnectionResult) { + b, err := opts.ToVolumeInitializeConnectionMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(initializeConnectionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return +} + +// TerminateConnectionOptsBuilder allows extensions to add additional parameters to the +// TerminateConnection request. +type TerminateConnectionOptsBuilder interface { + ToVolumeTerminateConnectionMap() (map[string]interface{}, error) +} + +// TerminateConnectionOpts hosts options for TerminateConnection. +type TerminateConnectionOpts struct { + IP string `json:"ip,omitempty"` + Host string `json:"host,omitempty"` + Initiator string `json:"initiator,omitempty"` + Wwpns []string `json:"wwpns,omitempty"` + Wwnns string `json:"wwnns,omitempty"` + Multipath *bool `json:"multipath,omitempty"` + Platform string `json:"platform,omitempty"` + OSType string `json:"os_type,omitempty"` +} + +// ToVolumeTerminateConnectionMap assembles a request body based on the contents of a +// TerminateConnectionOpts. +func (opts TerminateConnectionOpts) ToVolumeTerminateConnectionMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "connector") + return map[string]interface{}{"os-terminate_connection": b}, err +} + +// TerminateConnection terminates an iSCSI connection by volume ID. +func TerminateConnection(client *gophercloud.ServiceClient, id string, opts TerminateConnectionOptsBuilder) (r TerminateConnectionResult) { + b, err := opts.ToVolumeTerminateConnectionMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(teminateConnectionURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// ExtendSizeOptsBuilder allows extensions to add additional parameters to the +// ExtendSize request. +type ExtendSizeOptsBuilder interface { + ToVolumeExtendSizeMap() (map[string]interface{}, error) +} + +// ExtendSizeOpts contains options for extending the size of an existing Volume. +// This object is passed to the volumes.ExtendSize function. +type ExtendSizeOpts struct { + // NewSize is the new size of the volume, in GB. + NewSize int `json:"new_size" required:"true"` +} + +// ToVolumeExtendSizeMap assembles a request body based on the contents of an +// ExtendSizeOpts. +func (opts ExtendSizeOpts) ToVolumeExtendSizeMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "os-extend") +} + +// ExtendSize will extend the size of the volume based on the provided information. +// This operation does not return a response body. +func ExtendSize(client *gophercloud.ServiceClient, id string, opts ExtendSizeOptsBuilder) (r ExtendSizeResult) { + b, err := opts.ToVolumeExtendSizeMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(extendSizeURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// UploadImageOptsBuilder allows extensions to add additional parameters to the +// UploadImage request. +type UploadImageOptsBuilder interface { + ToVolumeUploadImageMap() (map[string]interface{}, error) +} + +// UploadImageOpts contains options for uploading a Volume to image storage. +type UploadImageOpts struct { + // Container format, may be bare, ofv, ova, etc. + ContainerFormat string `json:"container_format,omitempty"` + + // Disk format, may be raw, qcow2, vhd, vdi, vmdk, etc. + DiskFormat string `json:"disk_format,omitempty"` + + // The name of image that will be stored in glance. + ImageName string `json:"image_name,omitempty"` + + // Force image creation, usable if volume attached to instance. + Force bool `json:"force,omitempty"` +} + +// ToVolumeUploadImageMap assembles a request body based on the contents of a +// UploadImageOpts. +func (opts UploadImageOpts) ToVolumeUploadImageMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "os-volume_upload_image") +} + +// UploadImage will upload an image based on the values in UploadImageOptsBuilder. +func UploadImage(client *gophercloud.ServiceClient, id string, opts UploadImageOptsBuilder) (r UploadImageResult) { + b, err := opts.ToVolumeUploadImageMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(uploadURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/results.go new file mode 100644 index 00000000000..9815f0c26ac --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/results.go @@ -0,0 +1,186 @@ +package volumeactions + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" +) + +// AttachResult contains the response body and error from an Attach request. +type AttachResult struct { + gophercloud.ErrResult +} + +// BeginDetachingResult contains the response body and error from a BeginDetach +// request. +type BeginDetachingResult struct { + gophercloud.ErrResult +} + +// DetachResult contains the response body and error from a Detach request. +type DetachResult struct { + gophercloud.ErrResult +} + +// UploadImageResult contains the response body and error from an UploadImage +// request. +type UploadImageResult struct { + gophercloud.Result +} + +// ReserveResult contains the response body and error from a Reserve request. +type ReserveResult struct { + gophercloud.ErrResult +} + +// UnreserveResult contains the response body and error from an Unreserve +// request. +type UnreserveResult struct { + gophercloud.ErrResult +} + +// TerminateConnectionResult contains the response body and error from a +// TerminateConnection request. +type TerminateConnectionResult struct { + gophercloud.ErrResult +} + +// InitializeConnectionResult contains the response body and error from an +// InitializeConnection request. +type InitializeConnectionResult struct { + gophercloud.Result +} + +// ExtendSizeResult contains the response body and error from an ExtendSize request. +type ExtendSizeResult struct { + gophercloud.ErrResult +} + +// Extract will get the connection information out of the +// InitializeConnectionResult object. +// +// This will be a generic map[string]interface{} and the results will be +// dependent on the type of connection made. +func (r InitializeConnectionResult) Extract() (map[string]interface{}, error) { + var s struct { + ConnectionInfo map[string]interface{} `json:"connection_info"` + } + err := r.ExtractInto(&s) + return s.ConnectionInfo, err +} + +// ImageVolumeType contains volume type information obtained from UploadImage +// action. +type ImageVolumeType struct { + // The ID of a volume type. + ID string `json:"id"` + + // Human-readable display name for the volume type. + Name string `json:"name"` + + // Human-readable description for the volume type. + Description string `json:"display_description"` + + // Flag for public access. + IsPublic bool `json:"is_public"` + + // Extra specifications for volume type. + ExtraSpecs map[string]interface{} `json:"extra_specs"` + + // ID of quality of service specs. + QosSpecsID string `json:"qos_specs_id"` + + // Flag for deletion status of volume type. + Deleted bool `json:"deleted"` + + // The date when volume type was deleted. + DeletedAt time.Time `json:"-"` + + // The date when volume type was created. + CreatedAt time.Time `json:"-"` + + // The date when this volume was last updated. + UpdatedAt time.Time `json:"-"` +} + +func (r *ImageVolumeType) UnmarshalJSON(b []byte) error { + type tmp ImageVolumeType + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + DeletedAt gophercloud.JSONRFC3339MilliNoZ `json:"deleted_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = ImageVolumeType(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + r.DeletedAt = time.Time(s.DeletedAt) + + return err +} + +// VolumeImage contains information about volume uploaded to an image service. +type VolumeImage struct { + // The ID of a volume an image is created from. + VolumeID string `json:"id"` + + // Container format, may be bare, ofv, ova, etc. + ContainerFormat string `json:"container_format"` + + // Disk format, may be raw, qcow2, vhd, vdi, vmdk, etc. + DiskFormat string `json:"disk_format"` + + // Human-readable description for the volume. + Description string `json:"display_description"` + + // The ID of the created image. + ImageID string `json:"image_id"` + + // Human-readable display name for the image. + ImageName string `json:"image_name"` + + // Size of the volume in GB. + Size int `json:"size"` + + // Current status of the volume. + Status string `json:"status"` + + // The date when this volume was last updated. + UpdatedAt time.Time `json:"-"` + + // Volume type object of used volume. + VolumeType ImageVolumeType `json:"volume_type"` +} + +func (r *VolumeImage) UnmarshalJSON(b []byte) error { + type tmp VolumeImage + var s struct { + tmp + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = VolumeImage(s.tmp) + + r.UpdatedAt = time.Time(s.UpdatedAt) + + return err +} + +// Extract will get an object with info about the uploaded image out of the +// UploadImageResult object. +func (r UploadImageResult) Extract() (VolumeImage, error) { + var s struct { + VolumeImage VolumeImage `json:"os-volume_upload_image"` + } + err := r.ExtractInto(&s) + return s.VolumeImage, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/urls.go new file mode 100644 index 00000000000..5efd2b25c05 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/urls.go @@ -0,0 +1,39 @@ +package volumeactions + +import "github.com/gophercloud/gophercloud" + +func attachURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("volumes", id, "action") +} + +func beginDetachingURL(c *gophercloud.ServiceClient, id string) string { + return attachURL(c, id) +} + +func detachURL(c *gophercloud.ServiceClient, id string) string { + return attachURL(c, id) +} + +func uploadURL(c *gophercloud.ServiceClient, id string) string { + return attachURL(c, id) +} + +func reserveURL(c *gophercloud.ServiceClient, id string) string { + return attachURL(c, id) +} + +func unreserveURL(c *gophercloud.ServiceClient, id string) string { + return attachURL(c, id) +} + +func initializeConnectionURL(c *gophercloud.ServiceClient, id string) string { + return attachURL(c, id) +} + +func teminateConnectionURL(c *gophercloud.ServiceClient, id string) string { + return attachURL(c, id) +} + +func extendSizeURL(c *gophercloud.ServiceClient, id string) string { + return attachURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/doc.go new file mode 100644 index 00000000000..307b8b12d2f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/doc.go @@ -0,0 +1,5 @@ +// Package volumes provides information and interaction with volumes in the +// OpenStack Block Storage service. A volume is a detachable block storage +// device, akin to a USB hard drive. It can only be attached to one instance at +// a time. +package volumes diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/requests.go new file mode 100644 index 00000000000..566def51811 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/requests.go @@ -0,0 +1,167 @@ +package volumes + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToVolumeCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains options for creating a Volume. This object is passed to +// the volumes.Create function. For more information about these parameters, +// see the Volume object. +type CreateOpts struct { + Size int `json:"size" required:"true"` + AvailabilityZone string `json:"availability_zone,omitempty"` + Description string `json:"display_description,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` + Name string `json:"display_name,omitempty"` + SnapshotID string `json:"snapshot_id,omitempty"` + SourceVolID string `json:"source_volid,omitempty"` + ImageID string `json:"imageRef,omitempty"` + VolumeType string `json:"volume_type,omitempty"` +} + +// ToVolumeCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToVolumeCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "volume") +} + +// Create will create a new Volume based on the values in CreateOpts. To extract +// the Volume object from the response, call the Extract method on the +// CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToVolumeCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Delete will delete the existing Volume with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// Get retrieves the Volume with the provided ID. To extract the Volume object +// from the response, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToVolumeListQuery() (string, error) +} + +// ListOpts holds options for listing Volumes. It is passed to the volumes.List +// function. +type ListOpts struct { + // admin-only option. Set it to true to see all tenant volumes. + AllTenants bool `q:"all_tenants"` + // List only volumes that contain Metadata. + Metadata map[string]string `q:"metadata"` + // List only volumes that have Name as the display name. + Name string `q:"display_name"` + // List only volumes that have a status of Status. + Status string `q:"status"` +} + +// ToVolumeListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToVolumeListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns Volumes optionally limited by the conditions provided in ListOpts. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToVolumeListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return VolumePage{pagination.SinglePageBase(r)} + }) +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToVolumeUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contain options for updating an existing Volume. This object is passed +// to the volumes.Update function. For more information about the parameters, see +// the Volume object. +type UpdateOpts struct { + Name string `json:"display_name,omitempty"` + Description string `json:"display_description,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` +} + +// ToVolumeUpdateMap assembles a request body based on the contents of an +// UpdateOpts. +func (opts UpdateOpts) ToVolumeUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "volume") +} + +// Update will update the Volume with provided information. To extract the updated +// Volume from the response, call the Extract method on the UpdateResult. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToVolumeUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// IDFromName is a convienience function that returns a server's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + pages, err := List(client, nil).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractVolumes(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "volume"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "volume"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/results.go new file mode 100644 index 00000000000..7f68d148639 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/results.go @@ -0,0 +1,109 @@ +package volumes + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Volume contains all the information associated with an OpenStack Volume. +type Volume struct { + // Current status of the volume. + Status string `json:"status"` + // Human-readable display name for the volume. + Name string `json:"display_name"` + // Instances onto which the volume is attached. + Attachments []map[string]interface{} `json:"attachments"` + // This parameter is no longer used. + AvailabilityZone string `json:"availability_zone"` + // Indicates whether this is a bootable volume. + Bootable string `json:"bootable"` + // The date when this volume was created. + CreatedAt time.Time `json:"-"` + // Human-readable description for the volume. + Description string `json:"display_description"` + // The type of volume to create, either SATA or SSD. + VolumeType string `json:"volume_type"` + // The ID of the snapshot from which the volume was created + SnapshotID string `json:"snapshot_id"` + // The ID of another block storage volume from which the current volume was created + SourceVolID string `json:"source_volid"` + // Arbitrary key-value pairs defined by the user. + Metadata map[string]string `json:"metadata"` + // Unique identifier for the volume. + ID string `json:"id"` + // Size of the volume in GB. + Size int `json:"size"` +} + +func (r *Volume) UnmarshalJSON(b []byte) error { + type tmp Volume + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Volume(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + + return err +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} + +// VolumePage is a pagination.pager that is returned from a call to the List function. +type VolumePage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a VolumePage contains no Volumes. +func (r VolumePage) IsEmpty() (bool, error) { + volumes, err := ExtractVolumes(r) + return len(volumes) == 0, err +} + +// ExtractVolumes extracts and returns Volumes. It is used while iterating over a volumes.List call. +func ExtractVolumes(r pagination.Page) ([]Volume, error) { + var s struct { + Volumes []Volume `json:"volumes"` + } + err := (r.(VolumePage)).ExtractInto(&s) + return s.Volumes, err +} + +// UpdateResult contains the response body and error from an Update request. +type UpdateResult struct { + commonResult +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will get the Volume object out of the commonResult object. +func (r commonResult) Extract() (*Volume, error) { + var s struct { + Volume *Volume `json:"volume"` + } + err := r.ExtractInto(&s) + return s.Volume, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/urls.go new file mode 100644 index 00000000000..8a00f97e98c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/urls.go @@ -0,0 +1,23 @@ +package volumes + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("volumes") +} + +func listURL(c *gophercloud.ServiceClient) string { + return createURL(c) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("volumes", id) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/util.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/util.go new file mode 100644 index 00000000000..e86c1b4b4ee --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/util.go @@ -0,0 +1,22 @@ +package volumes + +import ( + "github.com/gophercloud/gophercloud" +) + +// WaitForStatus will continually poll the resource, checking for a particular +// status. It will do this for the amount of seconds defined. +func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := Get(c, id).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/doc.go new file mode 100644 index 00000000000..307b8b12d2f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/doc.go @@ -0,0 +1,5 @@ +// Package volumes provides information and interaction with volumes in the +// OpenStack Block Storage service. A volume is a detachable block storage +// device, akin to a USB hard drive. It can only be attached to one instance at +// a time. +package volumes diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/requests.go new file mode 100644 index 00000000000..18c9cb272ec --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/requests.go @@ -0,0 +1,182 @@ +package volumes + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToVolumeCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains options for creating a Volume. This object is passed to +// the volumes.Create function. For more information about these parameters, +// see the Volume object. +type CreateOpts struct { + // The size of the volume, in GB + Size int `json:"size" required:"true"` + // The availability zone + AvailabilityZone string `json:"availability_zone,omitempty"` + // ConsistencyGroupID is the ID of a consistency group + ConsistencyGroupID string `json:"consistencygroup_id,omitempty"` + // The volume description + Description string `json:"description,omitempty"` + // One or more metadata key and value pairs to associate with the volume + Metadata map[string]string `json:"metadata,omitempty"` + // The volume name + Name string `json:"name,omitempty"` + // the ID of the existing volume snapshot + SnapshotID string `json:"snapshot_id,omitempty"` + // SourceReplica is a UUID of an existing volume to replicate with + SourceReplica string `json:"source_replica,omitempty"` + // the ID of the existing volume + SourceVolID string `json:"source_volid,omitempty"` + // The ID of the image from which you want to create the volume. + // Required to create a bootable volume. + ImageID string `json:"imageRef,omitempty"` + // The associated volume type + VolumeType string `json:"volume_type,omitempty"` +} + +// ToVolumeCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToVolumeCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "volume") +} + +// Create will create a new Volume based on the values in CreateOpts. To extract +// the Volume object from the response, call the Extract method on the +// CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToVolumeCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// Delete will delete the existing Volume with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// Get retrieves the Volume with the provided ID. To extract the Volume object +// from the response, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToVolumeListQuery() (string, error) +} + +// ListOpts holds options for listing Volumes. It is passed to the volumes.List +// function. +type ListOpts struct { + // admin-only option. Set it to true to see all tenant volumes. + AllTenants bool `q:"all_tenants"` + // List only volumes that contain Metadata. + Metadata map[string]string `q:"metadata"` + // List only volumes that have Name as the display name. + Name string `q:"name"` + // List only volumes that have a status of Status. + Status string `q:"status"` +} + +// ToVolumeListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToVolumeListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns Volumes optionally limited by the conditions provided in ListOpts. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToVolumeListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return VolumePage{pagination.SinglePageBase(r)} + }) +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToVolumeUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contain options for updating an existing Volume. This object is passed +// to the volumes.Update function. For more information about the parameters, see +// the Volume object. +type UpdateOpts struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` +} + +// ToVolumeUpdateMap assembles a request body based on the contents of an +// UpdateOpts. +func (opts UpdateOpts) ToVolumeUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "volume") +} + +// Update will update the Volume with provided information. To extract the updated +// Volume from the response, call the Extract method on the UpdateResult. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToVolumeUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// IDFromName is a convienience function that returns a server's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + pages, err := List(client, nil).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractVolumes(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "volume"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "volume"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/results.go new file mode 100644 index 00000000000..674ec346865 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/results.go @@ -0,0 +1,154 @@ +package volumes + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type Attachment struct { + AttachedAt time.Time `json:"-"` + AttachmentID string `json:"attachment_id"` + Device string `json:"device"` + HostName string `json:"host_name"` + ID string `json:"id"` + ServerID string `json:"server_id"` + VolumeID string `json:"volume_id"` +} + +func (r *Attachment) UnmarshalJSON(b []byte) error { + type tmp Attachment + var s struct { + tmp + AttachedAt gophercloud.JSONRFC3339MilliNoZ `json:"attached_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Attachment(s.tmp) + + r.AttachedAt = time.Time(s.AttachedAt) + + return err +} + +// Volume contains all the information associated with an OpenStack Volume. +type Volume struct { + // Unique identifier for the volume. + ID string `json:"id"` + // Current status of the volume. + Status string `json:"status"` + // Size of the volume in GB. + Size int `json:"size"` + // AvailabilityZone is which availability zone the volume is in. + AvailabilityZone string `json:"availability_zone"` + // The date when this volume was created. + CreatedAt time.Time `json:"-"` + // The date when this volume was last updated + UpdatedAt time.Time `json:"-"` + // Instances onto which the volume is attached. + Attachments []Attachment `json:"attachments"` + // Human-readable display name for the volume. + Name string `json:"name"` + // Human-readable description for the volume. + Description string `json:"description"` + // The type of volume to create, either SATA or SSD. + VolumeType string `json:"volume_type"` + // The ID of the snapshot from which the volume was created + SnapshotID string `json:"snapshot_id"` + // The ID of another block storage volume from which the current volume was created + SourceVolID string `json:"source_volid"` + // Arbitrary key-value pairs defined by the user. + Metadata map[string]string `json:"metadata"` + // UserID is the id of the user who created the volume. + UserID string `json:"user_id"` + // Indicates whether this is a bootable volume. + Bootable string `json:"bootable"` + // Encrypted denotes if the volume is encrypted. + Encrypted bool `json:"encrypted"` + // ReplicationStatus is the status of replication. + ReplicationStatus string `json:"replication_status"` + // ConsistencyGroupID is the consistency group ID. + ConsistencyGroupID string `json:"consistencygroup_id"` + // Multiattach denotes if the volume is multi-attach capable. + Multiattach bool `json:"multiattach"` +} + +func (r *Volume) UnmarshalJSON(b []byte) error { + type tmp Volume + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Volume(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + + return err +} + +// VolumePage is a pagination.pager that is returned from a call to the List function. +type VolumePage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a ListResult contains no Volumes. +func (r VolumePage) IsEmpty() (bool, error) { + volumes, err := ExtractVolumes(r) + return len(volumes) == 0, err +} + +// ExtractVolumes extracts and returns Volumes. It is used while iterating over a volumes.List call. +func ExtractVolumes(r pagination.Page) ([]Volume, error) { + var s []Volume + err := ExtractVolumesInto(r, &s) + return s, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will get the Volume object out of the commonResult object. +func (r commonResult) Extract() (*Volume, error) { + var s Volume + err := r.ExtractInto(&s) + return &s, err +} + +func (r commonResult) ExtractInto(v interface{}) error { + return r.Result.ExtractIntoStructPtr(v, "volume") +} + +func ExtractVolumesInto(r pagination.Page, v interface{}) error { + return r.(VolumePage).Result.ExtractIntoSlicePtr(v, "volumes") +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// UpdateResult contains the response body and error from an Update request. +type UpdateResult struct { + commonResult +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/urls.go new file mode 100644 index 00000000000..170724905ab --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/urls.go @@ -0,0 +1,23 @@ +package volumes + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("volumes") +} + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("volumes", "detail") +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("volumes", id) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/util.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/util.go new file mode 100644 index 00000000000..e86c1b4b4ee --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/util.go @@ -0,0 +1,22 @@ +package volumes + +import ( + "github.com/gophercloud/gophercloud" +) + +// WaitForStatus will continually poll the resource, checking for a particular +// status. It will do this for the amount of seconds defined. +func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := Get(c, id).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/client.go b/vendor/github.com/gophercloud/gophercloud/openstack/client.go new file mode 100644 index 00000000000..77214caca3b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/client.go @@ -0,0 +1,348 @@ +package openstack + +import ( + "fmt" + "net/url" + "reflect" + "regexp" + "strings" + + "github.com/gophercloud/gophercloud" + tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" + tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" + "github.com/gophercloud/gophercloud/openstack/utils" +) + +const ( + // v2 represents Keystone v2. + // It should never increase beyond 2.0. + v2 = "v2.0" + + // v3 represents Keystone v3. + // The version can be anything from v3 to v3.x. + v3 = "v3" +) + +/* +NewClient prepares an unauthenticated ProviderClient instance. +Most users will probably prefer using the AuthenticatedClient function +instead. + +This is useful if you wish to explicitly control the version of the identity +service that's used for authentication explicitly, for example. + +A basic example of using this would be: + + ao, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.NewClient(ao.IdentityEndpoint) + client, err := openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{}) +*/ +func NewClient(endpoint string) (*gophercloud.ProviderClient, error) { + u, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + + u.RawQuery, u.Fragment = "", "" + + var base string + versionRe := regexp.MustCompile("v[0-9.]+/?") + if version := versionRe.FindString(u.Path); version != "" { + base = strings.Replace(u.String(), version, "", -1) + } else { + base = u.String() + } + + endpoint = gophercloud.NormalizeURL(endpoint) + base = gophercloud.NormalizeURL(base) + + return &gophercloud.ProviderClient{ + IdentityBase: base, + IdentityEndpoint: endpoint, + }, nil + +} + +/* +AuthenticatedClient logs in to an OpenStack cloud found at the identity endpoint +specified by the options, acquires a token, and returns a Provider Client +instance that's ready to operate. + +If the full path to a versioned identity endpoint was specified (example: +http://example.com:5000/v3), that path will be used as the endpoint to query. + +If a versionless endpoint was specified (example: http://example.com:5000/), +the endpoint will be queried to determine which versions of the identity service +are available, then chooses the most recent or most supported version. + +Example: + + ao, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(ao) + client, err := openstack.NewNetworkV2(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +*/ +func AuthenticatedClient(options gophercloud.AuthOptions) (*gophercloud.ProviderClient, error) { + client, err := NewClient(options.IdentityEndpoint) + if err != nil { + return nil, err + } + + err = Authenticate(client, options) + if err != nil { + return nil, err + } + return client, nil +} + +// Authenticate or re-authenticate against the most recent identity service +// supported at the provided endpoint. +func Authenticate(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error { + versions := []*utils.Version{ + {ID: v2, Priority: 20, Suffix: "/v2.0/"}, + {ID: v3, Priority: 30, Suffix: "/v3/"}, + } + + chosen, endpoint, err := utils.ChooseVersion(client, versions) + if err != nil { + return err + } + + switch chosen.ID { + case v2: + return v2auth(client, endpoint, options, gophercloud.EndpointOpts{}) + case v3: + return v3auth(client, endpoint, &options, gophercloud.EndpointOpts{}) + default: + // The switch statement must be out of date from the versions list. + return fmt.Errorf("Unrecognized identity version: %s", chosen.ID) + } +} + +// AuthenticateV2 explicitly authenticates against the identity v2 endpoint. +func AuthenticateV2(client *gophercloud.ProviderClient, options gophercloud.AuthOptions, eo gophercloud.EndpointOpts) error { + return v2auth(client, "", options, eo) +} + +func v2auth(client *gophercloud.ProviderClient, endpoint string, options gophercloud.AuthOptions, eo gophercloud.EndpointOpts) error { + v2Client, err := NewIdentityV2(client, eo) + if err != nil { + return err + } + + if endpoint != "" { + v2Client.Endpoint = endpoint + } + + v2Opts := tokens2.AuthOptions{ + IdentityEndpoint: options.IdentityEndpoint, + Username: options.Username, + Password: options.Password, + TenantID: options.TenantID, + TenantName: options.TenantName, + AllowReauth: options.AllowReauth, + TokenID: options.TokenID, + } + + result := tokens2.Create(v2Client, v2Opts) + + token, err := result.ExtractToken() + if err != nil { + return err + } + + catalog, err := result.ExtractServiceCatalog() + if err != nil { + return err + } + + if options.AllowReauth { + client.ReauthFunc = func() error { + client.TokenID = "" + return v2auth(client, endpoint, options, eo) + } + } + client.TokenID = token.ID + client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { + return V2EndpointURL(catalog, opts) + } + + return nil +} + +// AuthenticateV3 explicitly authenticates against the identity v3 service. +func AuthenticateV3(client *gophercloud.ProviderClient, options tokens3.AuthOptionsBuilder, eo gophercloud.EndpointOpts) error { + return v3auth(client, "", options, eo) +} + +func v3auth(client *gophercloud.ProviderClient, endpoint string, opts tokens3.AuthOptionsBuilder, eo gophercloud.EndpointOpts) error { + // Override the generated service endpoint with the one returned by the version endpoint. + v3Client, err := NewIdentityV3(client, eo) + if err != nil { + return err + } + + if endpoint != "" { + v3Client.Endpoint = endpoint + } + + result := tokens3.Create(v3Client, opts) + + token, err := result.ExtractToken() + if err != nil { + return err + } + + catalog, err := result.ExtractServiceCatalog() + if err != nil { + return err + } + + client.TokenID = token.ID + + if opts.CanReauth() { + client.ReauthFunc = func() error { + client.TokenID = "" + return v3auth(client, endpoint, opts, eo) + } + } + client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { + return V3EndpointURL(catalog, opts) + } + + return nil +} + +// NewIdentityV2 creates a ServiceClient that may be used to interact with the +// v2 identity service. +func NewIdentityV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + endpoint := client.IdentityBase + "v2.0/" + clientType := "identity" + var err error + if !reflect.DeepEqual(eo, gophercloud.EndpointOpts{}) { + eo.ApplyDefaults(clientType) + endpoint, err = client.EndpointLocator(eo) + if err != nil { + return nil, err + } + } + + return &gophercloud.ServiceClient{ + ProviderClient: client, + Endpoint: endpoint, + Type: clientType, + }, nil +} + +// NewIdentityV3 creates a ServiceClient that may be used to access the v3 +// identity service. +func NewIdentityV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + endpoint := client.IdentityBase + "v3/" + clientType := "identity" + var err error + if !reflect.DeepEqual(eo, gophercloud.EndpointOpts{}) { + eo.ApplyDefaults(clientType) + endpoint, err = client.EndpointLocator(eo) + if err != nil { + return nil, err + } + } + + // Ensure endpoint still has a suffix of v3. + // This is because EndpointLocator might have found a versionless + // endpoint and requests will fail unless targeted at /v3. + if !strings.HasSuffix(endpoint, "v3/") { + endpoint = endpoint + "v3/" + } + + return &gophercloud.ServiceClient{ + ProviderClient: client, + Endpoint: endpoint, + Type: clientType, + }, nil +} + +func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts, clientType string) (*gophercloud.ServiceClient, error) { + sc := new(gophercloud.ServiceClient) + eo.ApplyDefaults(clientType) + url, err := client.EndpointLocator(eo) + if err != nil { + return sc, err + } + sc.ProviderClient = client + sc.Endpoint = url + sc.Type = clientType + return sc, nil +} + +// NewObjectStorageV1 creates a ServiceClient that may be used with the v1 +// object storage package. +func NewObjectStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "object-store") +} + +// NewComputeV2 creates a ServiceClient that may be used with the v2 compute +// package. +func NewComputeV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "compute") +} + +// NewNetworkV2 creates a ServiceClient that may be used with the v2 network +// package. +func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "network") + sc.ResourceBase = sc.Endpoint + "v2.0/" + return sc, err +} + +// NewBlockStorageV1 creates a ServiceClient that may be used to access the v1 +// block storage service. +func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "volume") +} + +// NewBlockStorageV2 creates a ServiceClient that may be used to access the v2 +// block storage service. +func NewBlockStorageV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "volumev2") +} + +// NewSharedFileSystemV2 creates a ServiceClient that may be used to access the +// v2 shared file system service. +func NewSharedFileSystemV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "sharev2") +} + +// NewCDNV1 creates a ServiceClient that may be used to access the OpenStack v1 +// CDN service. +func NewCDNV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "cdn") +} + +// NewOrchestrationV1 creates a ServiceClient that may be used to access the v1 +// orchestration service. +func NewOrchestrationV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "orchestration") +} + +// NewDBV1 creates a ServiceClient that may be used to access the v1 DB service. +func NewDBV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "database") +} + +// NewDNSV2 creates a ServiceClient that may be used to access the v2 DNS +// service. +func NewDNSV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "dns") + sc.ResourceBase = sc.Endpoint + "v2/" + return sc, err +} + +// NewImageServiceV2 creates a ServiceClient that may be used to access the v2 +// image service. +func NewImageServiceV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "image") + sc.ResourceBase = sc.Endpoint + "v2/" + return sc, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/doc.go new file mode 100644 index 00000000000..80464ba3994 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/doc.go @@ -0,0 +1,26 @@ +/* +Package availabilityzones provides the ability to extend a server result with +availability zone information. Example: + + type ServerWithAZ struct { + servers.Server + availabilityzones.ServerAvailabilityZoneExt + } + + var allServers []ServerWithAZ + + allPages, err := servers.List(client, nil).AllPages() + if err != nil { + panic("Unable to retrieve servers: %s", err) + } + + err = servers.ExtractServersInto(allPages, &allServers) + if err != nil { + panic("Unable to extract servers: %s", err) + } + + for _, server := range allServers { + fmt.Println(server.AvailabilityZone) + } +*/ +package availabilityzones diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/results.go new file mode 100644 index 00000000000..ae874041371 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/results.go @@ -0,0 +1,8 @@ +package availabilityzones + +// ServerAvailabilityZoneExt is an extension to the base Server result which +// includes the Availability Zone information. +type ServerAvailabilityZoneExt struct { + // AvailabilityZone is the availabilty zone the server is in. + AvailabilityZone string `json:"OS-EXT-AZ:availability_zone"` +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/doc.go new file mode 100644 index 00000000000..d291325e0a1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/doc.go @@ -0,0 +1,152 @@ +/* +Package bootfromvolume extends a server create request with the ability to +specify block device options. This can be used to boot a server from a block +storage volume as well as specify multiple ephemeral disks upon creation. + +It is recommended to refer to the Block Device Mapping documentation to see +all possible ways to configure a server's block devices at creation time: + +https://docs.openstack.org/nova/latest/user/block-device-mapping.html + +Note that this package implements `block_device_mapping_v2`. + +Example of Creating a Server From an Image + +This example will boot a server from an image and use a standard ephemeral +disk as the server's root disk. This is virtually no different than creating +a server without using block device mappings. + + blockDevices := []bootfromvolume.BlockDevice{ + bootfromvolume.BlockDevice{ + BootIndex: 0, + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationLocal, + SourceType: bootfromvolume.SourceImage, + UUID: "image-uuid", + }, + } + + serverCreateOpts := servers.CreateOpts{ + Name: "server_name", + FlavorRef: "flavor-uuid", + ImageRef: "image-uuid", + } + + createOpts := bootfromvolume.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + BlockDevice: blockDevices, + } + + server, err := bootfromvolume.Create(client, createOpts).Extract() + if err != nil { + panic(err) + } + +Example of Creating a Server From a New Volume + +This example will create a block storage volume based on the given Image. The +server will use this volume as its root disk. + + blockDevices := []bootfromvolume.BlockDevice{ + bootfromvolume.BlockDevice{ + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationVolume, + SourceType: bootfromvolume.SourceImage, + UUID: "image-uuid", + VolumeSize: 2, + }, + } + + serverCreateOpts := servers.CreateOpts{ + Name: "server_name", + FlavorRef: "flavor-uuid", + } + + createOpts := bootfromvolume.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + BlockDevice: blockDevices, + } + + server, err := bootfromvolume.Create(client, createOpts).Extract() + if err != nil { + panic(err) + } + +Example of Creating a Server From an Existing Volume + +This example will create a server with an existing volume as its root disk. + + blockDevices := []bootfromvolume.BlockDevice{ + bootfromvolume.BlockDevice{ + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationVolume, + SourceType: bootfromvolume.SourceVolume, + UUID: "volume-uuid", + }, + } + + serverCreateOpts := servers.CreateOpts{ + Name: "server_name", + FlavorRef: "flavor-uuid", + } + + createOpts := bootfromvolume.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + BlockDevice: blockDevices, + } + + server, err := bootfromvolume.Create(client, createOpts).Extract() + if err != nil { + panic(err) + } + +Example of Creating a Server with Multiple Ephemeral Disks + +This example will create a server with multiple ephemeral disks. The first +block device will be based off of an existing Image. Each additional +ephemeral disks must have an index of -1. + + blockDevices := []bootfromvolume.BlockDevice{ + bootfromvolume.BlockDevice{ + BootIndex: 0, + DestinationType: bootfromvolume.DestinationLocal, + DeleteOnTermination: true, + SourceType: bootfromvolume.SourceImage, + UUID: "image-uuid", + VolumeSize: 5, + }, + bootfromvolume.BlockDevice{ + BootIndex: -1, + DestinationType: bootfromvolume.DestinationLocal, + DeleteOnTermination: true, + GuestFormat: "ext4", + SourceType: bootfromvolume.SourceBlank, + VolumeSize: 1, + }, + bootfromvolume.BlockDevice{ + BootIndex: -1, + DestinationType: bootfromvolume.DestinationLocal, + DeleteOnTermination: true, + GuestFormat: "ext4", + SourceType: bootfromvolume.SourceBlank, + VolumeSize: 1, + }, + } + + serverCreateOpts := servers.CreateOpts{ + Name: "server_name", + FlavorRef: "flavor-uuid", + ImageRef: "image-uuid", + } + + createOpts := bootfromvolume.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + BlockDevice: blockDevices, + } + + server, err := bootfromvolume.Create(client, createOpts).Extract() + if err != nil { + panic(err) + } +*/ +package bootfromvolume diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go new file mode 100644 index 00000000000..9dae14c7a93 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go @@ -0,0 +1,120 @@ +package bootfromvolume + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" +) + +type ( + // DestinationType represents the type of medium being used as the + // destination of the bootable device. + DestinationType string + + // SourceType represents the type of medium being used as the source of the + // bootable device. + SourceType string +) + +const ( + // DestinationLocal DestinationType is for using an ephemeral disk as the + // destination. + DestinationLocal DestinationType = "local" + + // DestinationVolume DestinationType is for using a volume as the destination. + DestinationVolume DestinationType = "volume" + + // SourceBlank SourceType is for a "blank" or empty source. + SourceBlank SourceType = "blank" + + // SourceImage SourceType is for using images as the source of a block device. + SourceImage SourceType = "image" + + // SourceSnapshot SourceType is for using a volume snapshot as the source of + // a block device. + SourceSnapshot SourceType = "snapshot" + + // SourceVolume SourceType is for using a volume as the source of block + // device. + SourceVolume SourceType = "volume" +) + +// BlockDevice is a structure with options for creating block devices in a +// server. The block device may be created from an image, snapshot, new volume, +// or existing volume. The destination may be a new volume, existing volume +// which will be attached to the instance, ephemeral disk, or boot device. +type BlockDevice struct { + // SourceType must be one of: "volume", "snapshot", "image", or "blank". + SourceType SourceType `json:"source_type" required:"true"` + + // UUID is the unique identifier for the existing volume, snapshot, or + // image (see above). + UUID string `json:"uuid,omitempty"` + + // BootIndex is the boot index. It defaults to 0. + BootIndex int `json:"boot_index"` + + // DeleteOnTermination specifies whether or not to delete the attached volume + // when the server is deleted. Defaults to `false`. + DeleteOnTermination bool `json:"delete_on_termination"` + + // DestinationType is the type that gets created. Possible values are "volume" + // and "local". + DestinationType DestinationType `json:"destination_type,omitempty"` + + // GuestFormat specifies the format of the block device. + GuestFormat string `json:"guest_format,omitempty"` + + // VolumeSize is the size of the volume to create (in gigabytes). This can be + // omitted for existing volumes. + VolumeSize int `json:"volume_size,omitempty"` +} + +// CreateOptsExt is a structure that extends the server `CreateOpts` structure +// by allowing for a block device mapping. +type CreateOptsExt struct { + servers.CreateOptsBuilder + BlockDevice []BlockDevice `json:"block_device_mapping_v2,omitempty"` +} + +// ToServerCreateMap adds the block device mapping option to the base server +// creation options. +func (opts CreateOptsExt) ToServerCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToServerCreateMap() + if err != nil { + return nil, err + } + + if len(opts.BlockDevice) == 0 { + err := gophercloud.ErrMissingInput{} + err.Argument = "bootfromvolume.CreateOptsExt.BlockDevice" + return nil, err + } + + serverMap := base["server"].(map[string]interface{}) + + blockDevice := make([]map[string]interface{}, len(opts.BlockDevice)) + + for i, bd := range opts.BlockDevice { + b, err := gophercloud.BuildRequestBody(bd, "") + if err != nil { + return nil, err + } + blockDevice[i] = b + } + serverMap["block_device_mapping_v2"] = blockDevice + + return base, nil +} + +// Create requests the creation of a server from the given block device mapping. +func Create(client *gophercloud.ServiceClient, opts servers.CreateOptsBuilder) (r servers.CreateResult) { + b, err := opts.ToServerCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/results.go new file mode 100644 index 00000000000..ba1eafabcd0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/results.go @@ -0,0 +1,12 @@ +package bootfromvolume + +import ( + os "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" +) + +// CreateResult temporarily contains the response from a Create call. +// It embeds the standard servers.CreateResults type and so can be used the +// same way as a standard server request result. +type CreateResult struct { + os.CreateResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls.go new file mode 100644 index 00000000000..dc007eadf86 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls.go @@ -0,0 +1,7 @@ +package bootfromvolume + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("os-volumes_boot") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/doc.go new file mode 100644 index 00000000000..f5dbdbf8b94 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/doc.go @@ -0,0 +1,68 @@ +/* +Package floatingips provides the ability to manage floating ips through the +Nova API. + +This API has been deprecated and will be removed from a future release of the +Nova API service. + +For environements that support this extension, this package can be used +regardless of if either Neutron or nova-network is used as the cloud's network +service. + +Example to List Floating IPs + + allPages, err := floatingips.List(computeClient).AllPages() + if err != nil { + panic(err) + } + + allFloatingIPs, err := floatingips.ExtractFloatingIPs(allPages) + if err != nil { + panic(err) + } + + for _, fip := range allFloatingIPs { + fmt.Printf("%+v\n", fip) + } + +Example to Create a Floating IP + + createOpts := floatingips.CreateOpts{ + Pool: "nova", + } + + fip, err := floatingips.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Floating IP + + err := floatingips.Delete(computeClient, "floatingip-id").ExtractErr() + if err != nil { + panic(err) + } + +Example to Associate a Floating IP With a Server + + associateOpts := floatingips.AssociateOpts{ + FloatingIP: "10.10.10.2", + } + + err := floatingips.AssociateInstance(computeClient, "server-id", associateOpts).ExtractErr() + if err != nil { + panic(err) + } + +Example to Disassociate a Floating IP From a Server + + disassociateOpts := floatingips.DisassociateOpts{ + FloatingIP: "10.10.10.2", + } + + err := floatingips.DisassociateInstance(computeClient, "server-id", disassociateOpts).ExtractErr() + if err != nil { + panic(err) + } +*/ +package floatingips diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/requests.go new file mode 100644 index 00000000000..a922639dec9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/requests.go @@ -0,0 +1,114 @@ +package floatingips + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List returns a Pager that allows you to iterate over a collection of FloatingIPs. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { + return FloatingIPPage{pagination.SinglePageBase(r)} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToFloatingIPCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies a Floating IP allocation request. +type CreateOpts struct { + // Pool is the pool of Floating IPs to allocate one from. + Pool string `json:"pool" required:"true"` +} + +// ToFloatingIPCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToFloatingIPCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// Create requests the creation of a new Floating IP. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToFloatingIPCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Get returns data about a previously created Floating IP. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// Delete requests the deletion of a previous allocated Floating IP. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// AssociateOptsBuilder allows extensions to add additional parameters to the +// Associate request. +type AssociateOptsBuilder interface { + ToFloatingIPAssociateMap() (map[string]interface{}, error) +} + +// AssociateOpts specifies the required information to associate a Floating IP with an instance +type AssociateOpts struct { + // FloatingIP is the Floating IP to associate with an instance. + FloatingIP string `json:"address" required:"true"` + + // FixedIP is an optional fixed IP address of the server. + FixedIP string `json:"fixed_address,omitempty"` +} + +// ToFloatingIPAssociateMap constructs a request body from AssociateOpts. +func (opts AssociateOpts) ToFloatingIPAssociateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "addFloatingIp") +} + +// AssociateInstance pairs an allocated Floating IP with a server. +func AssociateInstance(client *gophercloud.ServiceClient, serverID string, opts AssociateOptsBuilder) (r AssociateResult) { + b, err := opts.ToFloatingIPAssociateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(associateURL(client, serverID), b, nil, nil) + return +} + +// DisassociateOptsBuilder allows extensions to add additional parameters to +// the Disassociate request. +type DisassociateOptsBuilder interface { + ToFloatingIPDisassociateMap() (map[string]interface{}, error) +} + +// DisassociateOpts specifies the required information to disassociate a +// Floating IP with a server. +type DisassociateOpts struct { + FloatingIP string `json:"address" required:"true"` +} + +// ToFloatingIPDisassociateMap constructs a request body from DisassociateOpts. +func (opts DisassociateOpts) ToFloatingIPDisassociateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "removeFloatingIp") +} + +// DisassociateInstance decouples an allocated Floating IP from an instance +func DisassociateInstance(client *gophercloud.ServiceClient, serverID string, opts DisassociateOptsBuilder) (r DisassociateResult) { + b, err := opts.ToFloatingIPDisassociateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(disassociateURL(client, serverID), b, nil, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/results.go new file mode 100644 index 00000000000..da4e9da0e62 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/results.go @@ -0,0 +1,115 @@ +package floatingips + +import ( + "encoding/json" + "strconv" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// A FloatingIP is an IP that can be associated with a server. +type FloatingIP struct { + // ID is a unique ID of the Floating IP + ID string `json:"-"` + + // FixedIP is a specific IP on the server to pair the Floating IP with. + FixedIP string `json:"fixed_ip,omitempty"` + + // InstanceID is the ID of the server that is using the Floating IP. + InstanceID string `json:"instance_id"` + + // IP is the actual Floating IP. + IP string `json:"ip"` + + // Pool is the pool of Floating IPs that this Floating IP belongs to. + Pool string `json:"pool"` +} + +func (r *FloatingIP) UnmarshalJSON(b []byte) error { + type tmp FloatingIP + var s struct { + tmp + ID interface{} `json:"id"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = FloatingIP(s.tmp) + + switch t := s.ID.(type) { + case float64: + r.ID = strconv.FormatFloat(t, 'f', -1, 64) + case string: + r.ID = t + } + + return err +} + +// FloatingIPPage stores a single page of FloatingIPs from a List call. +type FloatingIPPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a FloatingIPsPage is empty. +func (page FloatingIPPage) IsEmpty() (bool, error) { + va, err := ExtractFloatingIPs(page) + return len(va) == 0, err +} + +// ExtractFloatingIPs interprets a page of results as a slice of FloatingIPs. +func ExtractFloatingIPs(r pagination.Page) ([]FloatingIP, error) { + var s struct { + FloatingIPs []FloatingIP `json:"floating_ips"` + } + err := (r.(FloatingIPPage)).ExtractInto(&s) + return s.FloatingIPs, err +} + +// FloatingIPResult is the raw result from a FloatingIP request. +type FloatingIPResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any FloatingIP resource +// response as a FloatingIP struct. +func (r FloatingIPResult) Extract() (*FloatingIP, error) { + var s struct { + FloatingIP *FloatingIP `json:"floating_ip"` + } + err := r.ExtractInto(&s) + return s.FloatingIP, err +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a FloatingIP. +type CreateResult struct { + FloatingIPResult +} + +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as a FloatingIP. +type GetResult struct { + FloatingIPResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// AssociateResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type AssociateResult struct { + gophercloud.ErrResult +} + +// DisassociateResult is the response from a Delete operation. Call its +// ExtractErr method to determine if the call succeeded or failed. +type DisassociateResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/urls.go new file mode 100644 index 00000000000..4768e5a8976 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/urls.go @@ -0,0 +1,37 @@ +package floatingips + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "os-floating-ips" + +func resourceURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func listURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func createURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return getURL(c, id) +} + +func serverURL(c *gophercloud.ServiceClient, serverID string) string { + return c.ServiceURL("servers/" + serverID + "/action") +} + +func associateURL(c *gophercloud.ServiceClient, serverID string) string { + return serverURL(c, serverID) +} + +func disassociateURL(c *gophercloud.ServiceClient, serverID string) string { + return serverURL(c, serverID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go new file mode 100644 index 00000000000..dc7b65fda18 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go @@ -0,0 +1,71 @@ +/* +Package keypairs provides the ability to manage key pairs as well as create +servers with a specified key pair. + +Example to List Key Pairs + + allPages, err := keypairs.List(computeClient).AllPages() + if err != nil { + panic(err) + } + + allKeyPairs, err := keypairs.ExtractKeyPairs(allPages) + if err != nil { + panic(err) + } + + for _, kp := range allKeyPairs { + fmt.Printf("%+v\n", kp) + } + +Example to Create a Key Pair + + createOpts := keypairs.CreateOpts{ + Name: "keypair-name", + } + + keypair, err := keypairs.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v", keypair) + +Example to Import a Key Pair + + createOpts := keypairs.CreateOpts{ + Name: "keypair-name", + PublicKey: "public-key", + } + + keypair, err := keypairs.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Key Pair + + err := keypairs.Delete(computeClient, "keypair-name").ExtractErr() + if err != nil { + panic(err) + } + +Example to Create a Server With a Key Pair + + serverCreateOpts := servers.CreateOpts{ + Name: "server_name", + ImageRef: "image-uuid", + FlavorRef: "flavor-uuid", + } + + createOpts := keypairs.CreateOpts{ + CreateOptsBuilder: serverCreateOpts, + KeyName: "keypair-name", + } + + server, err := servers.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } +*/ +package keypairs diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go new file mode 100644 index 00000000000..4e5e499e3aa --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go @@ -0,0 +1,86 @@ +package keypairs + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsExt adds a KeyPair option to the base CreateOpts. +type CreateOptsExt struct { + servers.CreateOptsBuilder + + // KeyName is the name of the key pair. + KeyName string `json:"key_name,omitempty"` +} + +// ToServerCreateMap adds the key_name to the base server creation options. +func (opts CreateOptsExt) ToServerCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToServerCreateMap() + if err != nil { + return nil, err + } + + if opts.KeyName == "" { + return base, nil + } + + serverMap := base["server"].(map[string]interface{}) + serverMap["key_name"] = opts.KeyName + + return base, nil +} + +// List returns a Pager that allows you to iterate over a collection of KeyPairs. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { + return KeyPairPage{pagination.SinglePageBase(r)} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToKeyPairCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies KeyPair creation or import parameters. +type CreateOpts struct { + // Name is a friendly name to refer to this KeyPair in other services. + Name string `json:"name" required:"true"` + + // PublicKey [optional] is a pregenerated OpenSSH-formatted public key. + // If provided, this key will be imported and no new key will be created. + PublicKey string `json:"public_key,omitempty"` +} + +// ToKeyPairCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToKeyPairCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "keypair") +} + +// Create requests the creation of a new KeyPair on the server, or to import a +// pre-existing keypair. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToKeyPairCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Get returns public data about a previously uploaded KeyPair. +func Get(client *gophercloud.ServiceClient, name string) (r GetResult) { + _, r.Err = client.Get(getURL(client, name), &r.Body, nil) + return +} + +// Delete requests the deletion of a previous stored KeyPair from the server. +func Delete(client *gophercloud.ServiceClient, name string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, name), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/results.go new file mode 100644 index 00000000000..2d71034b101 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/results.go @@ -0,0 +1,91 @@ +package keypairs + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// KeyPair is an SSH key known to the OpenStack Cloud that is available to be +// injected into servers. +type KeyPair struct { + // Name is used to refer to this keypair from other services within this + // region. + Name string `json:"name"` + + // Fingerprint is a short sequence of bytes that can be used to authenticate + // or validate a longer public key. + Fingerprint string `json:"fingerprint"` + + // PublicKey is the public key from this pair, in OpenSSH format. + // "ssh-rsa AAAAB3Nz..." + PublicKey string `json:"public_key"` + + // PrivateKey is the private key from this pair, in PEM format. + // "-----BEGIN RSA PRIVATE KEY-----\nMIICXA..." + // It is only present if this KeyPair was just returned from a Create call. + PrivateKey string `json:"private_key"` + + // UserID is the user who owns this KeyPair. + UserID string `json:"user_id"` +} + +// KeyPairPage stores a single page of all KeyPair results from a List call. +// Use the ExtractKeyPairs function to convert the results to a slice of +// KeyPairs. +type KeyPairPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a KeyPairPage is empty. +func (page KeyPairPage) IsEmpty() (bool, error) { + ks, err := ExtractKeyPairs(page) + return len(ks) == 0, err +} + +// ExtractKeyPairs interprets a page of results as a slice of KeyPairs. +func ExtractKeyPairs(r pagination.Page) ([]KeyPair, error) { + type pair struct { + KeyPair KeyPair `json:"keypair"` + } + var s struct { + KeyPairs []pair `json:"keypairs"` + } + err := (r.(KeyPairPage)).ExtractInto(&s) + results := make([]KeyPair, len(s.KeyPairs)) + for i, pair := range s.KeyPairs { + results[i] = pair.KeyPair + } + return results, err +} + +type keyPairResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any KeyPair resource response +// as a KeyPair struct. +func (r keyPairResult) Extract() (*KeyPair, error) { + var s struct { + KeyPair *KeyPair `json:"keypair"` + } + err := r.ExtractInto(&s) + return s.KeyPair, err +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a KeyPair. +type CreateResult struct { + keyPairResult +} + +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as a KeyPair. +type GetResult struct { + keyPairResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/urls.go new file mode 100644 index 00000000000..fec38f36793 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/urls.go @@ -0,0 +1,25 @@ +package keypairs + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "os-keypairs" + +func resourceURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func listURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func createURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func getURL(c *gophercloud.ServiceClient, name string) string { + return c.ServiceURL(resourcePath, name) +} + +func deleteURL(c *gophercloud.ServiceClient, name string) string { + return getURL(c, name) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/doc.go new file mode 100644 index 00000000000..2d9d3acdeca --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/doc.go @@ -0,0 +1,76 @@ +/* +Package schedulerhints extends the server create request with the ability to +specify additional parameters which determine where the server will be +created in the OpenStack cloud. + +Example to Add a Server to a Server Group + + schedulerHints := schedulerhints.SchedulerHints{ + Group: "servergroup-uuid", + } + + serverCreateOpts := servers.CreateOpts{ + Name: "server_name", + ImageRef: "image-uuid", + FlavorRef: "flavor-uuid", + } + + createOpts := schedulerhints.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + SchedulerHints: schedulerHints, + } + + server, err := servers.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Place Server B on a Different Host than Server A + + schedulerHints := schedulerhints.SchedulerHints{ + DifferentHost: []string{ + "server-a-uuid", + } + } + + serverCreateOpts := servers.CreateOpts{ + Name: "server_b", + ImageRef: "image-uuid", + FlavorRef: "flavor-uuid", + } + + createOpts := schedulerhints.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + SchedulerHints: schedulerHints, + } + + server, err := servers.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Place Server B on the Same Host as Server A + + schedulerHints := schedulerhints.SchedulerHints{ + SameHost: []string{ + "server-a-uuid", + } + } + + serverCreateOpts := servers.CreateOpts{ + Name: "server_b", + ImageRef: "image-uuid", + FlavorRef: "flavor-uuid", + } + + createOpts := schedulerhints.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + SchedulerHints: schedulerHints, + } + + server, err := servers.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } +*/ +package schedulerhints diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/requests.go new file mode 100644 index 00000000000..3fabeddef3b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/requests.go @@ -0,0 +1,164 @@ +package schedulerhints + +import ( + "net" + "regexp" + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" +) + +// SchedulerHints represents a set of scheduling hints that are passed to the +// OpenStack scheduler. +type SchedulerHints struct { + // Group specifies a Server Group to place the instance in. + Group string + + // DifferentHost will place the instance on a compute node that does not + // host the given instances. + DifferentHost []string + + // SameHost will place the instance on a compute node that hosts the given + // instances. + SameHost []string + + // Query is a conditional statement that results in compute nodes able to + // host the instance. + Query []interface{} + + // TargetCell specifies a cell name where the instance will be placed. + TargetCell string `json:"target_cell,omitempty"` + + // BuildNearHostIP specifies a subnet of compute nodes to host the instance. + BuildNearHostIP string + + // AdditionalProperies are arbitrary key/values that are not validated by nova. + AdditionalProperties map[string]interface{} +} + +// CreateOptsBuilder builds the scheduler hints into a serializable format. +type CreateOptsBuilder interface { + ToServerSchedulerHintsCreateMap() (map[string]interface{}, error) +} + +// ToServerSchedulerHintsMap builds the scheduler hints into a serializable format. +func (opts SchedulerHints) ToServerSchedulerHintsCreateMap() (map[string]interface{}, error) { + sh := make(map[string]interface{}) + + uuidRegex, _ := regexp.Compile("^[a-z0-9]{8}-[a-z0-9]{4}-[1-5][a-z0-9]{3}-[a-z0-9]{4}-[a-z0-9]{12}$") + + if opts.Group != "" { + if !uuidRegex.MatchString(opts.Group) { + err := gophercloud.ErrInvalidInput{} + err.Argument = "schedulerhints.SchedulerHints.Group" + err.Value = opts.Group + err.Info = "Group must be a UUID" + return nil, err + } + sh["group"] = opts.Group + } + + if len(opts.DifferentHost) > 0 { + for _, diffHost := range opts.DifferentHost { + if !uuidRegex.MatchString(diffHost) { + err := gophercloud.ErrInvalidInput{} + err.Argument = "schedulerhints.SchedulerHints.DifferentHost" + err.Value = opts.DifferentHost + err.Info = "The hosts must be in UUID format." + return nil, err + } + } + sh["different_host"] = opts.DifferentHost + } + + if len(opts.SameHost) > 0 { + for _, sameHost := range opts.SameHost { + if !uuidRegex.MatchString(sameHost) { + err := gophercloud.ErrInvalidInput{} + err.Argument = "schedulerhints.SchedulerHints.SameHost" + err.Value = opts.SameHost + err.Info = "The hosts must be in UUID format." + return nil, err + } + } + sh["same_host"] = opts.SameHost + } + + /* + Query can be something simple like: + [">=", "$free_ram_mb", 1024] + + Or more complex like: + ['and', + ['>=', '$free_ram_mb', 1024], + ['>=', '$free_disk_mb', 200 * 1024] + ] + + Because of the possible complexity, just make sure the length is a minimum of 3. + */ + if len(opts.Query) > 0 { + if len(opts.Query) < 3 { + err := gophercloud.ErrInvalidInput{} + err.Argument = "schedulerhints.SchedulerHints.Query" + err.Value = opts.Query + err.Info = "Must be a conditional statement in the format of [op,variable,value]" + return nil, err + } + sh["query"] = opts.Query + } + + if opts.TargetCell != "" { + sh["target_cell"] = opts.TargetCell + } + + if opts.BuildNearHostIP != "" { + if _, _, err := net.ParseCIDR(opts.BuildNearHostIP); err != nil { + err := gophercloud.ErrInvalidInput{} + err.Argument = "schedulerhints.SchedulerHints.BuildNearHostIP" + err.Value = opts.BuildNearHostIP + err.Info = "Must be a valid subnet in the form 192.168.1.1/24" + return nil, err + } + ipParts := strings.Split(opts.BuildNearHostIP, "/") + sh["build_near_host_ip"] = ipParts[0] + sh["cidr"] = "/" + ipParts[1] + } + + if opts.AdditionalProperties != nil { + for k, v := range opts.AdditionalProperties { + sh[k] = v + } + } + + return sh, nil +} + +// CreateOptsExt adds a SchedulerHints option to the base CreateOpts. +type CreateOptsExt struct { + servers.CreateOptsBuilder + + // SchedulerHints provides a set of hints to the scheduler. + SchedulerHints CreateOptsBuilder +} + +// ToServerCreateMap adds the SchedulerHints option to the base server creation options. +func (opts CreateOptsExt) ToServerCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToServerCreateMap() + if err != nil { + return nil, err + } + + schedulerHints, err := opts.SchedulerHints.ToServerSchedulerHintsCreateMap() + if err != nil { + return nil, err + } + + if len(schedulerHints) == 0 { + return base, nil + } + + base["os:scheduler_hints"] = schedulerHints + + return base, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/doc.go new file mode 100644 index 00000000000..8d3ebf2e5d5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/doc.go @@ -0,0 +1,112 @@ +/* +Package secgroups provides the ability to manage security groups through the +Nova API. + +This API has been deprecated and will be removed from a future release of the +Nova API service. + +For environments that support this extension, this package can be used +regardless of if either Neutron or nova-network is used as the cloud's network +service. + +Example to List Security Groups + + allPages, err := secroups.List(computeClient).AllPages() + if err != nil { + panic(err) + } + + allSecurityGroups, err := secgroups.ExtractSecurityGroups(allPages) + if err != nil { + panic(err) + } + + for _, sg := range allSecurityGroups { + fmt.Printf("%+v\n", sg) + } + +Example to List Security Groups by Server + + serverID := "aab3ad01-9956-4623-a29b-24afc89a7d36" + + allPages, err := secroups.ListByServer(computeClient, serverID).AllPages() + if err != nil { + panic(err) + } + + allSecurityGroups, err := secgroups.ExtractSecurityGroups(allPages) + if err != nil { + panic(err) + } + + for _, sg := range allSecurityGroups { + fmt.Printf("%+v\n", sg) + } + +Example to Create a Security Group + + createOpts := secgroups.CreateOpts{ + Name: "group_name", + Description: "A Security Group", + } + + sg, err := secgroups.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Create a Security Group Rule + + sgID := "37d94f8a-d136-465c-ae46-144f0d8ef141" + + createOpts := secgroups.CreateRuleOpts{ + ParentGroupID: sgID, + FromPort: 22, + ToPort: 22, + IPProtocol: "tcp", + CIDR: "0.0.0.0/0", + } + + rule, err := secgroups.CreateRule(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Add a Security Group to a Server + + serverID := "aab3ad01-9956-4623-a29b-24afc89a7d36" + sgID := "37d94f8a-d136-465c-ae46-144f0d8ef141" + + err := secgroups.AddServer(computeClient, serverID, sgID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Remove a Security Group from a Server + + serverID := "aab3ad01-9956-4623-a29b-24afc89a7d36" + sgID := "37d94f8a-d136-465c-ae46-144f0d8ef141" + + err := secgroups.RemoveServer(computeClient, serverID, sgID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Delete a Security Group + + + sgID := "37d94f8a-d136-465c-ae46-144f0d8ef141" + err := secgroups.Delete(computeClient, sgID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Delete a Security Group Rule + + ruleID := "6221fe3e-383d-46c9-a3a6-845e66c1e8b4" + err := secgroups.DeleteRule(computeClient, ruleID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package secgroups diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/requests.go new file mode 100644 index 00000000000..bcceaeacdd1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/requests.go @@ -0,0 +1,183 @@ +package secgroups + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +func commonList(client *gophercloud.ServiceClient, url string) pagination.Pager { + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return SecurityGroupPage{pagination.SinglePageBase(r)} + }) +} + +// List will return a collection of all the security groups for a particular +// tenant. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return commonList(client, rootURL(client)) +} + +// ListByServer will return a collection of all the security groups which are +// associated with a particular server. +func ListByServer(client *gophercloud.ServiceClient, serverID string) pagination.Pager { + return commonList(client, listByServerURL(client, serverID)) +} + +// GroupOpts is the underlying struct responsible for creating or updating +// security groups. It therefore represents the mutable attributes of a +// security group. +type GroupOpts struct { + // the name of your security group. + Name string `json:"name" required:"true"` + // the description of your security group. + Description string `json:"description" required:"true"` +} + +// CreateOpts is the struct responsible for creating a security group. +type CreateOpts GroupOpts + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToSecGroupCreateMap() (map[string]interface{}, error) +} + +// ToSecGroupCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToSecGroupCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "security_group") +} + +// Create will create a new security group. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToSecGroupCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(rootURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// UpdateOpts is the struct responsible for updating an existing security group. +type UpdateOpts GroupOpts + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToSecGroupUpdateMap() (map[string]interface{}, error) +} + +// ToSecGroupUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToSecGroupUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "security_group") +} + +// Update will modify the mutable properties of a security group, notably its +// name and description. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToSecGroupUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(resourceURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Get will return details for a particular security group. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(resourceURL(client, id), &r.Body, nil) + return +} + +// Delete will permanently delete a security group from the project. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(resourceURL(client, id), nil) + return +} + +// CreateRuleOpts represents the configuration for adding a new rule to an +// existing security group. +type CreateRuleOpts struct { + // ID is the ID of the group that this rule will be added to. + ParentGroupID string `json:"parent_group_id" required:"true"` + + // FromPort is the lower bound of the port range that will be opened. + // Use -1 to allow all ICMP traffic. + FromPort int `json:"from_port"` + + // ToPort is the upper bound of the port range that will be opened. + // Use -1 to allow all ICMP traffic. + ToPort int `json:"to_port"` + + // IPProtocol the protocol type that will be allowed, e.g. TCP. + IPProtocol string `json:"ip_protocol" required:"true"` + + // CIDR is the network CIDR to allow traffic from. + // This is ONLY required if FromGroupID is blank. This represents the IP + // range that will be the source of network traffic to your security group. + // Use 0.0.0.0/0 to allow all IP addresses. + CIDR string `json:"cidr,omitempty" or:"FromGroupID"` + + // FromGroupID represents another security group to allow access. + // This is ONLY required if CIDR is blank. This value represents the ID of a + // group that forwards traffic to the parent group. So, instead of accepting + // network traffic from an entire IP range, you can instead refine the + // inbound source by an existing security group. + FromGroupID string `json:"group_id,omitempty" or:"CIDR"` +} + +// CreateRuleOptsBuilder allows extensions to add additional parameters to the +// CreateRule request. +type CreateRuleOptsBuilder interface { + ToRuleCreateMap() (map[string]interface{}, error) +} + +// ToRuleCreateMap builds a request body from CreateRuleOpts. +func (opts CreateRuleOpts) ToRuleCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "security_group_rule") +} + +// CreateRule will add a new rule to an existing security group (whose ID is +// specified in CreateRuleOpts). You have the option of controlling inbound +// traffic from either an IP range (CIDR) or from another security group. +func CreateRule(client *gophercloud.ServiceClient, opts CreateRuleOptsBuilder) (r CreateRuleResult) { + b, err := opts.ToRuleCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(rootRuleURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// DeleteRule will permanently delete a rule from a security group. +func DeleteRule(client *gophercloud.ServiceClient, id string) (r DeleteRuleResult) { + _, r.Err = client.Delete(resourceRuleURL(client, id), nil) + return +} + +func actionMap(prefix, groupName string) map[string]map[string]string { + return map[string]map[string]string{ + prefix + "SecurityGroup": map[string]string{"name": groupName}, + } +} + +// AddServer will associate a server and a security group, enforcing the +// rules of the group on the server. +func AddServer(client *gophercloud.ServiceClient, serverID, groupName string) (r AddServerResult) { + _, r.Err = client.Post(serverActionURL(client, serverID), actionMap("add", groupName), &r.Body, nil) + return +} + +// RemoveServer will disassociate a server from a security group. +func RemoveServer(client *gophercloud.ServiceClient, serverID, groupName string) (r RemoveServerResult) { + _, r.Err = client.Post(serverActionURL(client, serverID), actionMap("remove", groupName), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/results.go new file mode 100644 index 00000000000..cf08547e901 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/results.go @@ -0,0 +1,214 @@ +package secgroups + +import ( + "encoding/json" + "strconv" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// SecurityGroup represents a security group. +type SecurityGroup struct { + // The unique ID of the group. If Neutron is installed, this ID will be + // represented as a string UUID; if Neutron is not installed, it will be a + // numeric ID. For the sake of consistency, we always cast it to a string. + ID string `json:"-"` + + // The human-readable name of the group, which needs to be unique. + Name string `json:"name"` + + // The human-readable description of the group. + Description string `json:"description"` + + // The rules which determine how this security group operates. + Rules []Rule `json:"rules"` + + // The ID of the tenant to which this security group belongs. + TenantID string `json:"tenant_id"` +} + +func (r *SecurityGroup) UnmarshalJSON(b []byte) error { + type tmp SecurityGroup + var s struct { + tmp + ID interface{} `json:"id"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = SecurityGroup(s.tmp) + + switch t := s.ID.(type) { + case float64: + r.ID = strconv.FormatFloat(t, 'f', -1, 64) + case string: + r.ID = t + } + + return err +} + +// Rule represents a security group rule, a policy which determines how a +// security group operates and what inbound traffic it allows in. +type Rule struct { + // The unique ID. If Neutron is installed, this ID will be + // represented as a string UUID; if Neutron is not installed, it will be a + // numeric ID. For the sake of consistency, we always cast it to a string. + ID string `json:"-"` + + // The lower bound of the port range which this security group should open up. + FromPort int `json:"from_port"` + + // The upper bound of the port range which this security group should open up. + ToPort int `json:"to_port"` + + // The IP protocol (e.g. TCP) which the security group accepts. + IPProtocol string `json:"ip_protocol"` + + // The CIDR IP range whose traffic can be received. + IPRange IPRange `json:"ip_range"` + + // The security group ID to which this rule belongs. + ParentGroupID string `json:"parent_group_id"` + + // Not documented. + Group Group +} + +func (r *Rule) UnmarshalJSON(b []byte) error { + type tmp Rule + var s struct { + tmp + ID interface{} `json:"id"` + ParentGroupID interface{} `json:"parent_group_id"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = Rule(s.tmp) + + switch t := s.ID.(type) { + case float64: + r.ID = strconv.FormatFloat(t, 'f', -1, 64) + case string: + r.ID = t + } + + switch t := s.ParentGroupID.(type) { + case float64: + r.ParentGroupID = strconv.FormatFloat(t, 'f', -1, 64) + case string: + r.ParentGroupID = t + } + + return err +} + +// IPRange represents the IP range whose traffic will be accepted by the +// security group. +type IPRange struct { + CIDR string +} + +// Group represents a group. +type Group struct { + TenantID string `json:"tenant_id"` + Name string +} + +// SecurityGroupPage is a single page of a SecurityGroup collection. +type SecurityGroupPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a page of Security Groups contains any +// results. +func (page SecurityGroupPage) IsEmpty() (bool, error) { + users, err := ExtractSecurityGroups(page) + return len(users) == 0, err +} + +// ExtractSecurityGroups returns a slice of SecurityGroups contained in a +// single page of results. +func ExtractSecurityGroups(r pagination.Page) ([]SecurityGroup, error) { + var s struct { + SecurityGroups []SecurityGroup `json:"security_groups"` + } + err := (r.(SecurityGroupPage)).ExtractInto(&s) + return s.SecurityGroups, err +} + +type commonResult struct { + gophercloud.Result +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret the result as a SecurityGroup. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret the result as a SecurityGroup. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret the result as a SecurityGroup. +type UpdateResult struct { + commonResult +} + +// Extract will extract a SecurityGroup struct from most responses. +func (r commonResult) Extract() (*SecurityGroup, error) { + var s struct { + SecurityGroup *SecurityGroup `json:"security_group"` + } + err := r.ExtractInto(&s) + return s.SecurityGroup, err +} + +// CreateRuleResult represents the result when adding rules to a security group. +// Call its Extract method to interpret the result as a Rule. +type CreateRuleResult struct { + gophercloud.Result +} + +// Extract will extract a Rule struct from a CreateRuleResult. +func (r CreateRuleResult) Extract() (*Rule, error) { + var s struct { + Rule *Rule `json:"security_group_rule"` + } + err := r.ExtractInto(&s) + return s.Rule, err +} + +// DeleteResult is the response from delete operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// DeleteRuleResult is the response from a DeleteRule operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteRuleResult struct { + gophercloud.ErrResult +} + +// AddServerResult is the response from an AddServer operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type AddServerResult struct { + gophercloud.ErrResult +} + +// RemoveServerResult is the response from a RemoveServer operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type RemoveServerResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/urls.go new file mode 100644 index 00000000000..d99746cae92 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/urls.go @@ -0,0 +1,32 @@ +package secgroups + +import "github.com/gophercloud/gophercloud" + +const ( + secgrouppath = "os-security-groups" + rulepath = "os-security-group-rules" +) + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(secgrouppath, id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(secgrouppath) +} + +func listByServerURL(c *gophercloud.ServiceClient, serverID string) string { + return c.ServiceURL("servers", serverID, secgrouppath) +} + +func rootRuleURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rulepath) +} + +func resourceRuleURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rulepath, id) +} + +func serverActionURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("servers", id, "action") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/doc.go new file mode 100644 index 00000000000..814bde37f3c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/doc.go @@ -0,0 +1,40 @@ +/* +Package servergroups provides the ability to manage server groups. + +Example to List Server Groups + + allpages, err := servergroups.List(computeClient).AllPages() + if err != nil { + panic(err) + } + + allServerGroups, err := servergroups.ExtractServerGroups(allPages) + if err != nil { + panic(err) + } + + for _, sg := range allServerGroups { + fmt.Printf("%#v\n", sg) + } + +Example to Create a Server Group + + createOpts := servergroups.CreateOpts{ + Name: "my_sg", + Policies: []string{"anti-affinity"}, + } + + sg, err := servergroups.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Server Group + + sgID := "7a6f29ad-e34d-4368-951a-58a08f11cfb7" + err := servergroups.Delete(computeClient, sgID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package servergroups diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/requests.go new file mode 100644 index 00000000000..1439a5a34ce --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/requests.go @@ -0,0 +1,59 @@ +package servergroups + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List returns a Pager that allows you to iterate over a collection of +// ServerGroups. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { + return ServerGroupPage{pagination.SinglePageBase(r)} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToServerGroupCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies Server Group creation parameters. +type CreateOpts struct { + // Name is the name of the server group + Name string `json:"name" required:"true"` + + // Policies are the server group policies + Policies []string `json:"policies" required:"true"` +} + +// ToServerGroupCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToServerGroupCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "server_group") +} + +// Create requests the creation of a new Server Group. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToServerGroupCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Get returns data about a previously created ServerGroup. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// Delete requests the deletion of a previously allocated ServerGroup. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/results.go new file mode 100644 index 00000000000..b9aeef98154 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/results.go @@ -0,0 +1,87 @@ +package servergroups + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// A ServerGroup creates a policy for instance placement in the cloud. +type ServerGroup struct { + // ID is the unique ID of the Server Group. + ID string `json:"id"` + + // Name is the common name of the server group. + Name string `json:"name"` + + // Polices are the group policies. + // + // Normally a single policy is applied: + // + // "affinity" will place all servers within the server group on the + // same compute node. + // + // "anti-affinity" will place servers within the server group on different + // compute nodes. + Policies []string `json:"policies"` + + // Members are the members of the server group. + Members []string `json:"members"` + + // Metadata includes a list of all user-specified key-value pairs attached + // to the Server Group. + Metadata map[string]interface{} +} + +// ServerGroupPage stores a single page of all ServerGroups results from a +// List call. +type ServerGroupPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a ServerGroupsPage is empty. +func (page ServerGroupPage) IsEmpty() (bool, error) { + va, err := ExtractServerGroups(page) + return len(va) == 0, err +} + +// ExtractServerGroups interprets a page of results as a slice of +// ServerGroups. +func ExtractServerGroups(r pagination.Page) ([]ServerGroup, error) { + var s struct { + ServerGroups []ServerGroup `json:"server_groups"` + } + err := (r.(ServerGroupPage)).ExtractInto(&s) + return s.ServerGroups, err +} + +type ServerGroupResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any Server Group resource +// response as a ServerGroup struct. +func (r ServerGroupResult) Extract() (*ServerGroup, error) { + var s struct { + ServerGroup *ServerGroup `json:"server_group"` + } + err := r.ExtractInto(&s) + return s.ServerGroup, err +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a ServerGroup. +type CreateResult struct { + ServerGroupResult +} + +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as a ServerGroup. +type GetResult struct { + ServerGroupResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/urls.go new file mode 100644 index 00000000000..9a1f99b1997 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/urls.go @@ -0,0 +1,25 @@ +package servergroups + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "os-server-groups" + +func resourceURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func listURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func createURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return getURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/doc.go new file mode 100644 index 00000000000..ab97edb7766 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/doc.go @@ -0,0 +1,19 @@ +/* +Package startstop provides functionality to start and stop servers that have +been provisioned by the OpenStack Compute service. + +Example to Stop and Start a Server + + serverID := "47b6b7b7-568d-40e4-868c-d5c41735532e" + + err := startstop.Stop(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } + + err := startstop.Start(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package startstop diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/requests.go new file mode 100644 index 00000000000..5b4f3f39dd4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/requests.go @@ -0,0 +1,19 @@ +package startstop + +import "github.com/gophercloud/gophercloud" + +func actionURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "action") +} + +// Start is the operation responsible for starting a Compute server. +func Start(client *gophercloud.ServiceClient, id string) (r StartResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"os-start": nil}, nil, nil) + return +} + +// Stop is the operation responsible for stopping a Compute server. +func Stop(client *gophercloud.ServiceClient, id string) (r StopResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"os-stop": nil}, nil, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/results.go new file mode 100644 index 00000000000..83496893328 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/results.go @@ -0,0 +1,15 @@ +package startstop + +import "github.com/gophercloud/gophercloud" + +// StartResult is the response from a Start operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type StartResult struct { + gophercloud.ErrResult +} + +// StopResult is the response from Stop operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type StopResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/doc.go new file mode 100644 index 00000000000..a32e8ffd587 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/doc.go @@ -0,0 +1,26 @@ +/* +Package tenantnetworks provides the ability for tenants to see information +about the networks they have access to. + +This is a deprecated API and will be removed from the Nova API service in a +future version. + +This API works in both Neutron and nova-network based OpenStack clouds. + +Example to List Networks Available to a Tenant + + allPages, err := tenantnetworks.List(computeClient).AllPages() + if err != nil { + panic(err) + } + + allNetworks, err := tenantnetworks.ExtractNetworks(allPages) + if err != nil { + panic(err) + } + + for _, network := range allNetworks { + fmt.Printf("%+v\n", network) + } +*/ +package tenantnetworks diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests.go new file mode 100644 index 00000000000..00899056fdd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests.go @@ -0,0 +1,19 @@ +package tenantnetworks + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List returns a Pager that allows you to iterate over a collection of Networks. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { + return NetworkPage{pagination.SinglePageBase(r)} + }) +} + +// Get returns data about a previously created Network. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/results.go new file mode 100644 index 00000000000..bda77d5f502 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/results.go @@ -0,0 +1,58 @@ +package tenantnetworks + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// A Network represents a network that a server communicates on. +type Network struct { + // CIDR is the IPv4 subnet. + CIDR string `json:"cidr"` + + // ID is the UUID of the network. + ID string `json:"id"` + + // Name is the common name that the network has. + Name string `json:"label"` +} + +// NetworkPage stores a single page of all Networks results from a List call. +type NetworkPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a NetworkPage is empty. +func (page NetworkPage) IsEmpty() (bool, error) { + va, err := ExtractNetworks(page) + return len(va) == 0, err +} + +// ExtractNetworks interprets a page of results as a slice of Network. +func ExtractNetworks(r pagination.Page) ([]Network, error) { + var s struct { + Networks []Network `json:"networks"` + } + err := (r.(NetworkPage)).ExtractInto(&s) + return s.Networks, err +} + +type NetworkResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any Network resource response +// as a Network struct. +func (r NetworkResult) Extract() (*Network, error) { + var s struct { + Network *Network `json:"network"` + } + err := r.ExtractInto(&s) + return s.Network, err +} + +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as a Network. +type GetResult struct { + NetworkResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls.go new file mode 100644 index 00000000000..683041ded37 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls.go @@ -0,0 +1,17 @@ +package tenantnetworks + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "os-tenant-networks" + +func resourceURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func listURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/doc.go new file mode 100644 index 00000000000..484eb20000c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/doc.go @@ -0,0 +1,30 @@ +/* +Package volumeattach provides the ability to attach and detach volumes +from servers. + +Example to Attach a Volume + + serverID := "7ac8686c-de71-4acb-9600-ec18b1a1ed6d" + volumeID := "87463836-f0e2-4029-abf6-20c8892a3103" + + createOpts := volumeattach.CreateOpts{ + Device: "/dev/vdc", + VolumeID: volumeID, + } + + result, err := volumeattach.Create(computeClient, serverID, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Detach a Volume + + serverID := "7ac8686c-de71-4acb-9600-ec18b1a1ed6d" + attachmentID := "ed081613-1c9b-4231-aa5e-ebfd4d87f983" + + err := volumeattach.Delete(computeClient, serverID, attachmentID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package volumeattach diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/requests.go new file mode 100644 index 00000000000..6a262c212e1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/requests.go @@ -0,0 +1,60 @@ +package volumeattach + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List returns a Pager that allows you to iterate over a collection of +// VolumeAttachments. +func List(client *gophercloud.ServiceClient, serverID string) pagination.Pager { + return pagination.NewPager(client, listURL(client, serverID), func(r pagination.PageResult) pagination.Page { + return VolumeAttachmentPage{pagination.SinglePageBase(r)} + }) +} + +// CreateOptsBuilder allows extensions to add parameters to the Create request. +type CreateOptsBuilder interface { + ToVolumeAttachmentCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies volume attachment creation or import parameters. +type CreateOpts struct { + // Device is the device that the volume will attach to the instance as. + // Omit for "auto". + Device string `json:"device,omitempty"` + + // VolumeID is the ID of the volume to attach to the instance. + VolumeID string `json:"volumeId" required:"true"` +} + +// ToVolumeAttachmentCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToVolumeAttachmentCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "volumeAttachment") +} + +// Create requests the creation of a new volume attachment on the server. +func Create(client *gophercloud.ServiceClient, serverID string, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToVolumeAttachmentCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client, serverID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Get returns public data about a previously created VolumeAttachment. +func Get(client *gophercloud.ServiceClient, serverID, attachmentID string) (r GetResult) { + _, r.Err = client.Get(getURL(client, serverID, attachmentID), &r.Body, nil) + return +} + +// Delete requests the deletion of a previous stored VolumeAttachment from +// the server. +func Delete(client *gophercloud.ServiceClient, serverID, attachmentID string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, serverID, attachmentID), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/results.go new file mode 100644 index 00000000000..56d50347291 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/results.go @@ -0,0 +1,77 @@ +package volumeattach + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// VolumeAttachment contains attachment information between a volume +// and server. +type VolumeAttachment struct { + // ID is a unique id of the attachment. + ID string `json:"id"` + + // Device is what device the volume is attached as. + Device string `json:"device"` + + // VolumeID is the ID of the attached volume. + VolumeID string `json:"volumeId"` + + // ServerID is the ID of the instance that has the volume attached. + ServerID string `json:"serverId"` +} + +// VolumeAttachmentPage stores a single page all of VolumeAttachment +// results from a List call. +type VolumeAttachmentPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a VolumeAttachmentPage is empty. +func (page VolumeAttachmentPage) IsEmpty() (bool, error) { + va, err := ExtractVolumeAttachments(page) + return len(va) == 0, err +} + +// ExtractVolumeAttachments interprets a page of results as a slice of +// VolumeAttachment. +func ExtractVolumeAttachments(r pagination.Page) ([]VolumeAttachment, error) { + var s struct { + VolumeAttachments []VolumeAttachment `json:"volumeAttachments"` + } + err := (r.(VolumeAttachmentPage)).ExtractInto(&s) + return s.VolumeAttachments, err +} + +// VolumeAttachmentResult is the result from a volume attachment operation. +type VolumeAttachmentResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any VolumeAttachment resource +// response as a VolumeAttachment struct. +func (r VolumeAttachmentResult) Extract() (*VolumeAttachment, error) { + var s struct { + VolumeAttachment *VolumeAttachment `json:"volumeAttachment"` + } + err := r.ExtractInto(&s) + return s.VolumeAttachment, err +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a VolumeAttachment. +type CreateResult struct { + VolumeAttachmentResult +} + +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as a VolumeAttachment. +type GetResult struct { + VolumeAttachmentResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/urls.go new file mode 100644 index 00000000000..083f8dc4554 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/urls.go @@ -0,0 +1,25 @@ +package volumeattach + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "os-volume_attachments" + +func resourceURL(c *gophercloud.ServiceClient, serverID string) string { + return c.ServiceURL("servers", serverID, resourcePath) +} + +func listURL(c *gophercloud.ServiceClient, serverID string) string { + return resourceURL(c, serverID) +} + +func createURL(c *gophercloud.ServiceClient, serverID string) string { + return resourceURL(c, serverID) +} + +func getURL(c *gophercloud.ServiceClient, serverID, aID string) string { + return c.ServiceURL("servers", serverID, resourcePath, aID) +} + +func deleteURL(c *gophercloud.ServiceClient, serverID, aID string) string { + return getURL(c, serverID, aID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go new file mode 100644 index 00000000000..2661cfac209 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go @@ -0,0 +1,45 @@ +/* +Package flavors provides information and interaction with the flavor API +in the OpenStack Compute service. + +A flavor is an available hardware configuration for a server. Each flavor +has a unique combination of disk space, memory capacity and priority for CPU +time. + +Example to List Flavors + + listOpts := flavors.ListOpts{ + AccessType: flavors.PublicAccess, + } + + allPages, err := flavors.ListDetail(computeClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allFlavors, err := flavors.ExtractFlavors(allPages) + if err != nil { + panic(err) + } + + for _, flavor := range allFlavors { + fmt.Printf("%+v\n", flavor) + } + +Example to Create a Flavor + + createOpts := flavors.CreateOpts{ + ID: "1", + Name: "m1.tiny", + Disk: gophercloud.IntToPointer(1), + RAM: 512, + VCPUs: 1, + RxTxFactor: 1.0, + } + + flavor, err := flavors.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } +*/ +package flavors diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go new file mode 100644 index 00000000000..317fd8530c3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go @@ -0,0 +1,194 @@ +package flavors + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToFlavorListQuery() (string, error) +} + +/* + AccessType maps to OpenStack's Flavor.is_public field. Although the is_public + field is boolean, the request options are ternary, which is why AccessType is + a string. The following values are allowed: + + The AccessType arguement is optional, and if it is not supplied, OpenStack + returns the PublicAccess flavors. +*/ +type AccessType string + +const ( + // PublicAccess returns public flavors and private flavors associated with + // that project. + PublicAccess AccessType = "true" + + // PrivateAccess (admin only) returns private flavors, across all projects. + PrivateAccess AccessType = "false" + + // AllAccess (admin only) returns public and private flavors across all + // projects. + AllAccess AccessType = "None" +) + +/* + ListOpts filters the results returned by the List() function. + For example, a flavor with a minDisk field of 10 will not be returned if you + specify MinDisk set to 20. + + Typically, software will use the last ID of the previous call to List to set + the Marker for the current call. +*/ +type ListOpts struct { + + // ChangesSince, if provided, instructs List to return only those things which + // have changed since the timestamp provided. + ChangesSince string `q:"changes-since"` + + // MinDisk and MinRAM, if provided, elides flavors which do not meet your + // criteria. + MinDisk int `q:"minDisk"` + MinRAM int `q:"minRam"` + + // Marker and Limit control paging. + // Marker instructs List where to start listing from. + Marker string `q:"marker"` + + // Limit instructs List to refrain from sending excessively large lists of + // flavors. + Limit int `q:"limit"` + + // AccessType, if provided, instructs List which set of flavors to return. + // If IsPublic not provided, flavors for the current project are returned. + AccessType AccessType `q:"is_public"` +} + +// ToFlavorListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToFlavorListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListDetail instructs OpenStack to provide a list of flavors. +// You may provide criteria by which List curtails its results for easier +// processing. +func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToFlavorListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return FlavorPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +type CreateOptsBuilder interface { + ToFlavorCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies parameters used for creating a flavor. +type CreateOpts struct { + // Name is the name of the flavor. + Name string `json:"name" required:"true"` + + // RAM is the memory of the flavor, measured in MB. + RAM int `json:"ram" required:"true"` + + // VCPUs is the number of vcpus for the flavor. + VCPUs int `json:"vcpus" required:"true"` + + // Disk the amount of root disk space, measured in GB. + Disk *int `json:"disk" required:"true"` + + // ID is a unique ID for the flavor. + ID string `json:"id,omitempty"` + + // Swap is the amount of swap space for the flavor, measured in MB. + Swap *int `json:"swap,omitempty"` + + // RxTxFactor alters the network bandwidth of a flavor. + RxTxFactor float64 `json:"rxtx_factor,omitempty"` + + // IsPublic flags a flavor as being available to all projects or not. + IsPublic *bool `json:"os-flavor-access:is_public,omitempty"` + + // Ephemeral is the amount of ephemeral disk space, measured in GB. + Ephemeral *int `json:"OS-FLV-EXT-DATA:ephemeral,omitempty"` +} + +// ToFlavorCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToFlavorCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "flavor") +} + +// Create requests the creation of a new flavor. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToFlavorCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Get retrieves details of a single flavor. Use ExtractFlavor to convert its +// result into a Flavor. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// Delete deletes the specified flavor ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// IDFromName is a convienience function that returns a flavor's ID given its +// name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + allPages, err := ListDetail(client, nil).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractFlavors(allPages) + if err != nil { + return "", err + } + + for _, f := range all { + if f.Name == name { + count++ + id = f.ID + } + } + + switch count { + case 0: + err := &gophercloud.ErrResourceNotFound{} + err.ResourceType = "flavor" + err.Name = name + return "", err + case 1: + return id, nil + default: + err := &gophercloud.ErrMultipleResourcesFound{} + err.ResourceType = "flavor" + err.Name = name + err.Count = count + return "", err + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go new file mode 100644 index 00000000000..fda11d3e06a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go @@ -0,0 +1,133 @@ +package flavors + +import ( + "encoding/json" + "strconv" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +type CreateResult struct { + commonResult +} + +// GetResult is the response of a Get operations. Call its Extract method to +// interpret it as a Flavor. +type GetResult struct { + commonResult +} + +// DeleteResult is the result from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Extract provides access to the individual Flavor returned by the Get and +// Create functions. +func (r commonResult) Extract() (*Flavor, error) { + var s struct { + Flavor *Flavor `json:"flavor"` + } + err := r.ExtractInto(&s) + return s.Flavor, err +} + +// Flavor represent (virtual) hardware configurations for server resources +// in a region. +type Flavor struct { + // ID is the flavor's unique ID. + ID string `json:"id"` + + // Disk is the amount of root disk, measured in GB. + Disk int `json:"disk"` + + // RAM is the amount of memory, measured in MB. + RAM int `json:"ram"` + + // Name is the name of the flavor. + Name string `json:"name"` + + // RxTxFactor describes bandwidth alterations of the flavor. + RxTxFactor float64 `json:"rxtx_factor"` + + // Swap is the amount of swap space, measured in MB. + Swap int `json:"swap"` + + // VCPUs indicates how many (virtual) CPUs are available for this flavor. + VCPUs int `json:"vcpus"` + + // IsPublic indicates whether the flavor is public. + IsPublic bool `json:"is_public"` +} + +func (r *Flavor) UnmarshalJSON(b []byte) error { + type tmp Flavor + var s struct { + tmp + Swap interface{} `json:"swap"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = Flavor(s.tmp) + + switch t := s.Swap.(type) { + case float64: + r.Swap = int(t) + case string: + switch t { + case "": + r.Swap = 0 + default: + swap, err := strconv.ParseFloat(t, 64) + if err != nil { + return err + } + r.Swap = int(swap) + } + } + + return nil +} + +// FlavorPage contains a single page of all flavors from a ListDetails call. +type FlavorPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines if a FlavorPage contains any results. +func (page FlavorPage) IsEmpty() (bool, error) { + flavors, err := ExtractFlavors(page) + return len(flavors) == 0, err +} + +// NextPageURL uses the response's embedded link reference to navigate to the +// next page of results. +func (page FlavorPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"flavors_links"` + } + err := page.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractFlavors provides access to the list of flavors in a page acquired +// from the ListDetail operation. +func ExtractFlavors(r pagination.Page) ([]Flavor, error) { + var s struct { + Flavors []Flavor `json:"flavors"` + } + err := (r.(FlavorPage)).ExtractInto(&s) + return s.Flavors, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go new file mode 100644 index 00000000000..518d05b3699 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go @@ -0,0 +1,21 @@ +package flavors + +import ( + "github.com/gophercloud/gophercloud" +) + +func getURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id) +} + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("flavors", "detail") +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("flavors") +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/doc.go new file mode 100644 index 00000000000..22410a79a27 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/doc.go @@ -0,0 +1,32 @@ +/* +Package images provides information and interaction with the images through +the OpenStack Compute service. + +This API is deprecated and will be removed from a future version of the Nova +API service. + +An image is a collection of files used to create or rebuild a server. +Operators provide a number of pre-built OS images by default. You may also +create custom images from cloud servers you have launched. + +Example to List Images + + listOpts := images.ListOpts{ + Limit: 2, + } + + allPages, err := images.ListDetail(computeClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allImages, err := images.ExtractImages(allPages) + if err != nil { + panic(err) + } + + for _, image := range allImages { + fmt.Printf("%+v\n", image) + } +*/ +package images diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/requests.go new file mode 100644 index 00000000000..558b481b9e7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/requests.go @@ -0,0 +1,109 @@ +package images + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// ListDetail request. +type ListOptsBuilder interface { + ToImageListQuery() (string, error) +} + +// ListOpts contain options filtering Images returned from a call to ListDetail. +type ListOpts struct { + // ChangesSince filters Images based on the last changed status (in date-time + // format). + ChangesSince string `q:"changes-since"` + + // Limit limits the number of Images to return. + Limit int `q:"limit"` + + // Mark is an Image UUID at which to set a marker. + Marker string `q:"marker"` + + // Name is the name of the Image. + Name string `q:"name"` + + // Server is the name of the Server (in URL format). + Server string `q:"server"` + + // Status is the current status of the Image. + Status string `q:"status"` + + // Type is the type of image (e.g. BASE, SERVER, ALL). + Type string `q:"type"` +} + +// ToImageListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToImageListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListDetail enumerates the available images. +func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listDetailURL(client) + if opts != nil { + query, err := opts.ToImageListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ImagePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get returns data about a specific image by its ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// Delete deletes the specified image ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// IDFromName is a convienience function that returns an image's ID given its +// name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + allPages, err := ListDetail(client, nil).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractImages(allPages) + if err != nil { + return "", err + } + + for _, f := range all { + if f.Name == name { + count++ + id = f.ID + } + } + + switch count { + case 0: + err := &gophercloud.ErrResourceNotFound{} + err.ResourceType = "image" + err.Name = name + return "", err + case 1: + return id, nil + default: + err := &gophercloud.ErrMultipleResourcesFound{} + err.ResourceType = "image" + err.Name = name + err.Count = count + return "", err + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/results.go new file mode 100644 index 00000000000..70d1018c721 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/results.go @@ -0,0 +1,95 @@ +package images + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as an Image. +type GetResult struct { + gophercloud.Result +} + +// DeleteResult is the result from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Extract interprets a GetResult as an Image. +func (r GetResult) Extract() (*Image, error) { + var s struct { + Image *Image `json:"image"` + } + err := r.ExtractInto(&s) + return s.Image, err +} + +// Image represents an Image returned by the Compute API. +type Image struct { + // ID is the unique ID of an image. + ID string + + // Created is the date when the image was created. + Created string + + // MinDisk is the minimum amount of disk a flavor must have to be able + // to create a server based on the image, measured in GB. + MinDisk int + + // MinRAM is the minimum amount of RAM a flavor must have to be able + // to create a server based on the image, measured in MB. + MinRAM int + + // Name provides a human-readable moniker for the OS image. + Name string + + // The Progress and Status fields indicate image-creation status. + Progress int + + // Status is the current status of the image. + Status string + + // Update is the date when the image was updated. + Updated string + + // Metadata provides free-form key/value pairs that further describe the + // image. + Metadata map[string]interface{} +} + +// ImagePage contains a single page of all Images returne from a ListDetail +// operation. Use ExtractImages to convert it into a slice of usable structs. +type ImagePage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if an ImagePage contains no Image results. +func (page ImagePage) IsEmpty() (bool, error) { + images, err := ExtractImages(page) + return len(images) == 0, err +} + +// NextPageURL uses the response's embedded link reference to navigate to the +// next page of results. +func (page ImagePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"images_links"` + } + err := page.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractImages converts a page of List results into a slice of usable Image +// structs. +func ExtractImages(r pagination.Page) ([]Image, error) { + var s struct { + Images []Image `json:"images"` + } + err := (r.(ImagePage)).ExtractInto(&s) + return s.Images, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/urls.go new file mode 100644 index 00000000000..57787fb725e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/urls.go @@ -0,0 +1,15 @@ +package images + +import "github.com/gophercloud/gophercloud" + +func listDetailURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("images", "detail") +} + +func getURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("images", id) +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("images", id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go new file mode 100644 index 00000000000..3b0ab783626 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go @@ -0,0 +1,115 @@ +/* +Package servers provides information and interaction with the server API +resource in the OpenStack Compute service. + +A server is a virtual machine instance in the compute system. In order for +one to be provisioned, a valid flavor and image are required. + +Example to List Servers + + listOpts := servers.ListOpts{ + AllTenants: true, + } + + allPages, err := servers.List(computeClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allServers, err := servers.ExtractServers(allPages) + if err != nil { + panic(err) + } + + for _, server := range allServers { + fmt.Printf("%+v\n", server) + } + +Example to Create a Server + + createOpts := servers.CreateOpts{ + Name: "server_name", + ImageRef: "image-uuid", + FlavorRef: "flavor-uuid", + } + + server, err := servers.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Server + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + err := servers.Delete(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Force Delete a Server + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + err := servers.ForceDelete(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Reboot a Server + + rebootOpts := servers.RebootOpts{ + Type: servers.SoftReboot, + } + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + + err := servers.Reboot(computeClient, serverID, rebootOpts).ExtractErr() + if err != nil { + panic(err) + } + +Example to Rebuild a Server + + rebuildOpts := servers.RebuildOpts{ + Name: "new_name", + ImageID: "image-uuid", + } + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + + server, err := servers.Rebuilt(computeClient, serverID, rebuildOpts).Extract() + if err != nil { + panic(err) + } + +Example to Resize a Server + + resizeOpts := servers.ResizeOpts{ + FlavorRef: "flavor-uuid", + } + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + + err := servers.Resize(computeClient, serverID, resizeOpts).ExtractErr() + if err != nil { + panic(err) + } + + err = servers.ConfirmResize(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Snapshot a Server + + snapshotOpts := servers.CreateImageOpts{ + Name: "snapshot_name", + } + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + + image, err := servers.CreateImage(computeClient, serverID, snapshotOpts).ExtractImageID() + if err != nil { + panic(err) + } +*/ +package servers diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/errors.go new file mode 100644 index 00000000000..c9f0e3c20b5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/errors.go @@ -0,0 +1,71 @@ +package servers + +import ( + "fmt" + + "github.com/gophercloud/gophercloud" +) + +// ErrNeitherImageIDNorImageNameProvided is the error when neither the image +// ID nor the image name is provided for a server operation +type ErrNeitherImageIDNorImageNameProvided struct{ gophercloud.ErrMissingInput } + +func (e ErrNeitherImageIDNorImageNameProvided) Error() string { + return "One and only one of the image ID and the image name must be provided." +} + +// ErrNeitherFlavorIDNorFlavorNameProvided is the error when neither the flavor +// ID nor the flavor name is provided for a server operation +type ErrNeitherFlavorIDNorFlavorNameProvided struct{ gophercloud.ErrMissingInput } + +func (e ErrNeitherFlavorIDNorFlavorNameProvided) Error() string { + return "One and only one of the flavor ID and the flavor name must be provided." +} + +type ErrNoClientProvidedForIDByName struct{ gophercloud.ErrMissingInput } + +func (e ErrNoClientProvidedForIDByName) Error() string { + return "A service client must be provided to find a resource ID by name." +} + +// ErrInvalidHowParameterProvided is the error when an unknown value is given +// for the `how` argument +type ErrInvalidHowParameterProvided struct{ gophercloud.ErrInvalidInput } + +// ErrNoAdminPassProvided is the error when an administrative password isn't +// provided for a server operation +type ErrNoAdminPassProvided struct{ gophercloud.ErrMissingInput } + +// ErrNoImageIDProvided is the error when an image ID isn't provided for a server +// operation +type ErrNoImageIDProvided struct{ gophercloud.ErrMissingInput } + +// ErrNoIDProvided is the error when a server ID isn't provided for a server +// operation +type ErrNoIDProvided struct{ gophercloud.ErrMissingInput } + +// ErrServer is a generic error type for servers HTTP operations. +type ErrServer struct { + gophercloud.ErrUnexpectedResponseCode + ID string +} + +func (se ErrServer) Error() string { + return fmt.Sprintf("Error while executing HTTP request for server [%s]", se.ID) +} + +// Error404 overrides the generic 404 error message. +func (se ErrServer) Error404(e gophercloud.ErrUnexpectedResponseCode) error { + se.ErrUnexpectedResponseCode = e + return &ErrServerNotFound{se} +} + +// ErrServerNotFound is the error when a 404 is received during server HTTP +// operations. +type ErrServerNotFound struct { + ErrServer +} + +func (e ErrServerNotFound) Error() string { + return fmt.Sprintf("I couldn't find server [%s]", e.ID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go new file mode 100644 index 00000000000..c445f0ede0e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go @@ -0,0 +1,791 @@ +package servers + +import ( + "encoding/base64" + "encoding/json" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors" + "github.com/gophercloud/gophercloud/openstack/compute/v2/images" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToServerListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the server attributes you want to see returned. Marker and Limit are used +// for pagination. +type ListOpts struct { + // ChangesSince is a time/date stamp for when the server last changed status. + ChangesSince string `q:"changes-since"` + + // Image is the name of the image in URL format. + Image string `q:"image"` + + // Flavor is the name of the flavor in URL format. + Flavor string `q:"flavor"` + + // Name of the server as a string; can be queried with regular expressions. + // Realize that ?name=bob returns both bob and bobb. If you need to match bob + // only, you can use a regular expression matching the syntax of the + // underlying database server implemented for Compute. + Name string `q:"name"` + + // Status is the value of the status of the server so that you can filter on + // "ACTIVE" for example. + Status string `q:"status"` + + // Host is the name of the host as a string. + Host string `q:"host"` + + // Marker is a UUID of the server at which you want to set a marker. + Marker string `q:"marker"` + + // Limit is an integer value for the limit of values to return. + Limit int `q:"limit"` + + // AllTenants is a bool to show all tenants. + AllTenants bool `q:"all_tenants"` + + // TenantID lists servers for a particular tenant. + // Setting "AllTenants = true" is required. + TenantID string `q:"tenant_id"` +} + +// ToServerListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToServerListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List makes a request against the API to list servers accessible to you. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listDetailURL(client) + if opts != nil { + query, err := opts.ToServerListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ServerPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToServerCreateMap() (map[string]interface{}, error) +} + +// Network is used within CreateOpts to control a new server's network +// attachments. +type Network struct { + // UUID of a network to attach to the newly provisioned server. + // Required unless Port is provided. + UUID string + + // Port of a neutron network to attach to the newly provisioned server. + // Required unless UUID is provided. + Port string + + // FixedIP specifies a fixed IPv4 address to be used on this network. + FixedIP string +} + +// Personality is an array of files that are injected into the server at launch. +type Personality []*File + +// File is used within CreateOpts and RebuildOpts to inject a file into the +// server at launch. +// File implements the json.Marshaler interface, so when a Create or Rebuild +// operation is requested, json.Marshal will call File's MarshalJSON method. +type File struct { + // Path of the file. + Path string + + // Contents of the file. Maximum content size is 255 bytes. + Contents []byte +} + +// MarshalJSON marshals the escaped file, base64 encoding the contents. +func (f *File) MarshalJSON() ([]byte, error) { + file := struct { + Path string `json:"path"` + Contents string `json:"contents"` + }{ + Path: f.Path, + Contents: base64.StdEncoding.EncodeToString(f.Contents), + } + return json.Marshal(file) +} + +// CreateOpts specifies server creation parameters. +type CreateOpts struct { + // Name is the name to assign to the newly launched server. + Name string `json:"name" required:"true"` + + // ImageRef [optional; required if ImageName is not provided] is the ID or + // full URL to the image that contains the server's OS and initial state. + // Also optional if using the boot-from-volume extension. + ImageRef string `json:"imageRef"` + + // ImageName [optional; required if ImageRef is not provided] is the name of + // the image that contains the server's OS and initial state. + // Also optional if using the boot-from-volume extension. + ImageName string `json:"-"` + + // FlavorRef [optional; required if FlavorName is not provided] is the ID or + // full URL to the flavor that describes the server's specs. + FlavorRef string `json:"flavorRef"` + + // FlavorName [optional; required if FlavorRef is not provided] is the name of + // the flavor that describes the server's specs. + FlavorName string `json:"-"` + + // SecurityGroups lists the names of the security groups to which this server + // should belong. + SecurityGroups []string `json:"-"` + + // UserData contains configuration information or scripts to use upon launch. + // Create will base64-encode it for you, if it isn't already. + UserData []byte `json:"-"` + + // AvailabilityZone in which to launch the server. + AvailabilityZone string `json:"availability_zone,omitempty"` + + // Networks dictates how this server will be attached to available networks. + // By default, the server will be attached to all isolated networks for the + // tenant. + Networks []Network `json:"-"` + + // Metadata contains key-value pairs (up to 255 bytes each) to attach to the + // server. + Metadata map[string]string `json:"metadata,omitempty"` + + // Personality includes files to inject into the server at launch. + // Create will base64-encode file contents for you. + Personality Personality `json:"personality,omitempty"` + + // ConfigDrive enables metadata injection through a configuration drive. + ConfigDrive *bool `json:"config_drive,omitempty"` + + // AdminPass sets the root user password. If not set, a randomly-generated + // password will be created and returned in the response. + AdminPass string `json:"adminPass,omitempty"` + + // AccessIPv4 specifies an IPv4 address for the instance. + AccessIPv4 string `json:"accessIPv4,omitempty"` + + // AccessIPv6 pecifies an IPv6 address for the instance. + AccessIPv6 string `json:"accessIPv6,omitempty"` + + // ServiceClient will allow calls to be made to retrieve an image or + // flavor ID by name. + ServiceClient *gophercloud.ServiceClient `json:"-"` +} + +// ToServerCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToServerCreateMap() (map[string]interface{}, error) { + sc := opts.ServiceClient + opts.ServiceClient = nil + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + if opts.UserData != nil { + var userData string + if _, err := base64.StdEncoding.DecodeString(string(opts.UserData)); err != nil { + userData = base64.StdEncoding.EncodeToString(opts.UserData) + } else { + userData = string(opts.UserData) + } + b["user_data"] = &userData + } + + if len(opts.SecurityGroups) > 0 { + securityGroups := make([]map[string]interface{}, len(opts.SecurityGroups)) + for i, groupName := range opts.SecurityGroups { + securityGroups[i] = map[string]interface{}{"name": groupName} + } + b["security_groups"] = securityGroups + } + + if len(opts.Networks) > 0 { + networks := make([]map[string]interface{}, len(opts.Networks)) + for i, net := range opts.Networks { + networks[i] = make(map[string]interface{}) + if net.UUID != "" { + networks[i]["uuid"] = net.UUID + } + if net.Port != "" { + networks[i]["port"] = net.Port + } + if net.FixedIP != "" { + networks[i]["fixed_ip"] = net.FixedIP + } + } + b["networks"] = networks + } + + // If ImageRef isn't provided, check if ImageName was provided to ascertain + // the image ID. + if opts.ImageRef == "" { + if opts.ImageName != "" { + if sc == nil { + err := ErrNoClientProvidedForIDByName{} + err.Argument = "ServiceClient" + return nil, err + } + imageID, err := images.IDFromName(sc, opts.ImageName) + if err != nil { + return nil, err + } + b["imageRef"] = imageID + } + } + + // If FlavorRef isn't provided, use FlavorName to ascertain the flavor ID. + if opts.FlavorRef == "" { + if opts.FlavorName == "" { + err := ErrNeitherFlavorIDNorFlavorNameProvided{} + err.Argument = "FlavorRef/FlavorName" + return nil, err + } + if sc == nil { + err := ErrNoClientProvidedForIDByName{} + err.Argument = "ServiceClient" + return nil, err + } + flavorID, err := flavors.IDFromName(sc, opts.FlavorName) + if err != nil { + return nil, err + } + b["flavorRef"] = flavorID + } + + return map[string]interface{}{"server": b}, nil +} + +// Create requests a server to be provisioned to the user in the current tenant. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + reqBody, err := opts.ToServerCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(listURL(client), reqBody, &r.Body, nil) + return +} + +// Delete requests that a server previously provisioned be removed from your +// account. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// ForceDelete forces the deletion of a server. +func ForceDelete(client *gophercloud.ServiceClient, id string) (r ActionResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"forceDelete": ""}, nil, nil) + return +} + +// Get requests details on a single server, by ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 203}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional attributes to the +// Update request. +type UpdateOptsBuilder interface { + ToServerUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts specifies the base attributes that may be updated on an existing +// server. +type UpdateOpts struct { + // Name changes the displayed name of the server. + // The server host name will *not* change. + // Server names are not constrained to be unique, even within the same tenant. + Name string `json:"name,omitempty"` + + // AccessIPv4 provides a new IPv4 address for the instance. + AccessIPv4 string `json:"accessIPv4,omitempty"` + + // AccessIPv6 provides a new IPv6 address for the instance. + AccessIPv6 string `json:"accessIPv6,omitempty"` +} + +// ToServerUpdateMap formats an UpdateOpts structure into a request body. +func (opts UpdateOpts) ToServerUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "server") +} + +// Update requests that various attributes of the indicated server be changed. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToServerUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// ChangeAdminPassword alters the administrator or root password for a specified +// server. +func ChangeAdminPassword(client *gophercloud.ServiceClient, id, newPassword string) (r ActionResult) { + b := map[string]interface{}{ + "changePassword": map[string]string{ + "adminPass": newPassword, + }, + } + _, r.Err = client.Post(actionURL(client, id), b, nil, nil) + return +} + +// RebootMethod describes the mechanisms by which a server reboot can be requested. +type RebootMethod string + +// These constants determine how a server should be rebooted. +// See the Reboot() function for further details. +const ( + SoftReboot RebootMethod = "SOFT" + HardReboot RebootMethod = "HARD" + OSReboot = SoftReboot + PowerCycle = HardReboot +) + +// RebootOptsBuilder allows extensions to add additional parameters to the +// reboot request. +type RebootOptsBuilder interface { + ToServerRebootMap() (map[string]interface{}, error) +} + +// RebootOpts provides options to the reboot request. +type RebootOpts struct { + // Type is the type of reboot to perform on the server. + Type RebootMethod `json:"type" required:"true"` +} + +// ToServerRebootMap builds a body for the reboot request. +func (opts *RebootOpts) ToServerRebootMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "reboot") +} + +/* + Reboot requests that a given server reboot. + + Two methods exist for rebooting a server: + + HardReboot (aka PowerCycle) starts the server instance by physically cutting + power to the machine, or if a VM, terminating it at the hypervisor level. + It's done. Caput. Full stop. + Then, after a brief while, power is rtored or the VM instance restarted. + + SoftReboot (aka OSReboot) simply tells the OS to restart under its own + procedure. + E.g., in Linux, asking it to enter runlevel 6, or executing + "sudo shutdown -r now", or by asking Windows to rtart the machine. +*/ +func Reboot(client *gophercloud.ServiceClient, id string, opts RebootOptsBuilder) (r ActionResult) { + b, err := opts.ToServerRebootMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, nil, nil) + return +} + +// RebuildOptsBuilder allows extensions to provide additional parameters to the +// rebuild request. +type RebuildOptsBuilder interface { + ToServerRebuildMap() (map[string]interface{}, error) +} + +// RebuildOpts represents the configuration options used in a server rebuild +// operation. +type RebuildOpts struct { + // AdminPass is the server's admin password + AdminPass string `json:"adminPass,omitempty"` + + // ImageID is the ID of the image you want your server to be provisioned on. + ImageID string `json:"imageRef"` + + // ImageName is readable name of an image. + ImageName string `json:"-"` + + // Name to set the server to + Name string `json:"name,omitempty"` + + // AccessIPv4 [optional] provides a new IPv4 address for the instance. + AccessIPv4 string `json:"accessIPv4,omitempty"` + + // AccessIPv6 [optional] provides a new IPv6 address for the instance. + AccessIPv6 string `json:"accessIPv6,omitempty"` + + // Metadata [optional] contains key-value pairs (up to 255 bytes each) + // to attach to the server. + Metadata map[string]string `json:"metadata,omitempty"` + + // Personality [optional] includes files to inject into the server at launch. + // Rebuild will base64-encode file contents for you. + Personality Personality `json:"personality,omitempty"` + + // ServiceClient will allow calls to be made to retrieve an image or + // flavor ID by name. + ServiceClient *gophercloud.ServiceClient `json:"-"` +} + +// ToServerRebuildMap formats a RebuildOpts struct into a map for use in JSON +func (opts RebuildOpts) ToServerRebuildMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + // If ImageRef isn't provided, check if ImageName was provided to ascertain + // the image ID. + if opts.ImageID == "" { + if opts.ImageName != "" { + if opts.ServiceClient == nil { + err := ErrNoClientProvidedForIDByName{} + err.Argument = "ServiceClient" + return nil, err + } + imageID, err := images.IDFromName(opts.ServiceClient, opts.ImageName) + if err != nil { + return nil, err + } + b["imageRef"] = imageID + } + } + + return map[string]interface{}{"rebuild": b}, nil +} + +// Rebuild will reprovision the server according to the configuration options +// provided in the RebuildOpts struct. +func Rebuild(client *gophercloud.ServiceClient, id string, opts RebuildOptsBuilder) (r RebuildResult) { + b, err := opts.ToServerRebuildMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, &r.Body, nil) + return +} + +// ResizeOptsBuilder allows extensions to add additional parameters to the +// resize request. +type ResizeOptsBuilder interface { + ToServerResizeMap() (map[string]interface{}, error) +} + +// ResizeOpts represents the configuration options used to control a Resize +// operation. +type ResizeOpts struct { + // FlavorRef is the ID of the flavor you wish your server to become. + FlavorRef string `json:"flavorRef" required:"true"` +} + +// ToServerResizeMap formats a ResizeOpts as a map that can be used as a JSON +// request body for the Resize request. +func (opts ResizeOpts) ToServerResizeMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "resize") +} + +// Resize instructs the provider to change the flavor of the server. +// +// Note that this implies rebuilding it. +// +// Unfortunately, one cannot pass rebuild parameters to the resize function. +// When the resize completes, the server will be in RESIZE_VERIFY state. +// While in this state, you can explore the use of the new server's +// configuration. If you like it, call ConfirmResize() to commit the resize +// permanently. Otherwise, call RevertResize() to restore the old configuration. +func Resize(client *gophercloud.ServiceClient, id string, opts ResizeOptsBuilder) (r ActionResult) { + b, err := opts.ToServerResizeMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, nil, nil) + return +} + +// ConfirmResize confirms a previous resize operation on a server. +// See Resize() for more details. +func ConfirmResize(client *gophercloud.ServiceClient, id string) (r ActionResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"confirmResize": nil}, nil, &gophercloud.RequestOpts{ + OkCodes: []int{201, 202, 204}, + }) + return +} + +// RevertResize cancels a previous resize operation on a server. +// See Resize() for more details. +func RevertResize(client *gophercloud.ServiceClient, id string) (r ActionResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"revertResize": nil}, nil, nil) + return +} + +// RescueOptsBuilder is an interface that allows extensions to override the +// default structure of a Rescue request. +type RescueOptsBuilder interface { + ToServerRescueMap() (map[string]interface{}, error) +} + +// RescueOpts represents the configuration options used to control a Rescue +// option. +type RescueOpts struct { + // AdminPass is the desired administrative password for the instance in + // RESCUE mode. If it's left blank, the server will generate a password. + AdminPass string `json:"adminPass,omitempty"` +} + +// ToServerRescueMap formats a RescueOpts as a map that can be used as a JSON +// request body for the Rescue request. +func (opts RescueOpts) ToServerRescueMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "rescue") +} + +// Rescue instructs the provider to place the server into RESCUE mode. +func Rescue(client *gophercloud.ServiceClient, id string, opts RescueOptsBuilder) (r RescueResult) { + b, err := opts.ToServerRescueMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// ResetMetadataOptsBuilder allows extensions to add additional parameters to +// the Reset request. +type ResetMetadataOptsBuilder interface { + ToMetadataResetMap() (map[string]interface{}, error) +} + +// MetadataOpts is a map that contains key-value pairs. +type MetadataOpts map[string]string + +// ToMetadataResetMap assembles a body for a Reset request based on the contents +// of a MetadataOpts. +func (opts MetadataOpts) ToMetadataResetMap() (map[string]interface{}, error) { + return map[string]interface{}{"metadata": opts}, nil +} + +// ToMetadataUpdateMap assembles a body for an Update request based on the +// contents of a MetadataOpts. +func (opts MetadataOpts) ToMetadataUpdateMap() (map[string]interface{}, error) { + return map[string]interface{}{"metadata": opts}, nil +} + +// ResetMetadata will create multiple new key-value pairs for the given server +// ID. +// Note: Using this operation will erase any already-existing metadata and +// create the new metadata provided. To keep any already-existing metadata, +// use the UpdateMetadatas or UpdateMetadata function. +func ResetMetadata(client *gophercloud.ServiceClient, id string, opts ResetMetadataOptsBuilder) (r ResetMetadataResult) { + b, err := opts.ToMetadataResetMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(metadataURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Metadata requests all the metadata for the given server ID. +func Metadata(client *gophercloud.ServiceClient, id string) (r GetMetadataResult) { + _, r.Err = client.Get(metadataURL(client, id), &r.Body, nil) + return +} + +// UpdateMetadataOptsBuilder allows extensions to add additional parameters to +// the Create request. +type UpdateMetadataOptsBuilder interface { + ToMetadataUpdateMap() (map[string]interface{}, error) +} + +// UpdateMetadata updates (or creates) all the metadata specified by opts for +// the given server ID. This operation does not affect already-existing metadata +// that is not specified by opts. +func UpdateMetadata(client *gophercloud.ServiceClient, id string, opts UpdateMetadataOptsBuilder) (r UpdateMetadataResult) { + b, err := opts.ToMetadataUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(metadataURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// MetadatumOptsBuilder allows extensions to add additional parameters to the +// Create request. +type MetadatumOptsBuilder interface { + ToMetadatumCreateMap() (map[string]interface{}, string, error) +} + +// MetadatumOpts is a map of length one that contains a key-value pair. +type MetadatumOpts map[string]string + +// ToMetadatumCreateMap assembles a body for a Create request based on the +// contents of a MetadataumOpts. +func (opts MetadatumOpts) ToMetadatumCreateMap() (map[string]interface{}, string, error) { + if len(opts) != 1 { + err := gophercloud.ErrInvalidInput{} + err.Argument = "servers.MetadatumOpts" + err.Info = "Must have 1 and only 1 key-value pair" + return nil, "", err + } + metadatum := map[string]interface{}{"meta": opts} + var key string + for k := range metadatum["meta"].(MetadatumOpts) { + key = k + } + return metadatum, key, nil +} + +// CreateMetadatum will create or update the key-value pair with the given key +// for the given server ID. +func CreateMetadatum(client *gophercloud.ServiceClient, id string, opts MetadatumOptsBuilder) (r CreateMetadatumResult) { + b, key, err := opts.ToMetadatumCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(metadatumURL(client, id, key), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Metadatum requests the key-value pair with the given key for the given +// server ID. +func Metadatum(client *gophercloud.ServiceClient, id, key string) (r GetMetadatumResult) { + _, r.Err = client.Get(metadatumURL(client, id, key), &r.Body, nil) + return +} + +// DeleteMetadatum will delete the key-value pair with the given key for the +// given server ID. +func DeleteMetadatum(client *gophercloud.ServiceClient, id, key string) (r DeleteMetadatumResult) { + _, r.Err = client.Delete(metadatumURL(client, id, key), nil) + return +} + +// ListAddresses makes a request against the API to list the servers IP +// addresses. +func ListAddresses(client *gophercloud.ServiceClient, id string) pagination.Pager { + return pagination.NewPager(client, listAddressesURL(client, id), func(r pagination.PageResult) pagination.Page { + return AddressPage{pagination.SinglePageBase(r)} + }) +} + +// ListAddressesByNetwork makes a request against the API to list the servers IP +// addresses for the given network. +func ListAddressesByNetwork(client *gophercloud.ServiceClient, id, network string) pagination.Pager { + return pagination.NewPager(client, listAddressesByNetworkURL(client, id, network), func(r pagination.PageResult) pagination.Page { + return NetworkAddressPage{pagination.SinglePageBase(r)} + }) +} + +// CreateImageOptsBuilder allows extensions to add additional parameters to the +// CreateImage request. +type CreateImageOptsBuilder interface { + ToServerCreateImageMap() (map[string]interface{}, error) +} + +// CreateImageOpts provides options to pass to the CreateImage request. +type CreateImageOpts struct { + // Name of the image/snapshot. + Name string `json:"name" required:"true"` + + // Metadata contains key-value pairs (up to 255 bytes each) to attach to + // the created image. + Metadata map[string]string `json:"metadata,omitempty"` +} + +// ToServerCreateImageMap formats a CreateImageOpts structure into a request +// body. +func (opts CreateImageOpts) ToServerCreateImageMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "createImage") +} + +// CreateImage makes a request against the nova API to schedule an image to be +// created of the server +func CreateImage(client *gophercloud.ServiceClient, id string, opts CreateImageOptsBuilder) (r CreateImageResult) { + b, err := opts.ToServerCreateImageMap() + if err != nil { + r.Err = err + return + } + resp, err := client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + r.Err = err + r.Header = resp.Header + return +} + +// IDFromName is a convienience function that returns a server's ID given its +// name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + allPages, err := List(client, nil).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractServers(allPages) + if err != nil { + return "", err + } + + for _, f := range all { + if f.Name == name { + count++ + id = f.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "server"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "server"} + } +} + +// GetPassword makes a request against the nova API to get the encrypted +// administrative password. +func GetPassword(client *gophercloud.ServiceClient, serverId string) (r GetPasswordResult) { + _, r.Err = client.Get(passwordURL(client, serverId), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go new file mode 100644 index 00000000000..7ef80e92e2e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go @@ -0,0 +1,404 @@ +package servers + +import ( + "crypto/rsa" + "encoding/base64" + "encoding/json" + "fmt" + "net/url" + "path" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type serverResult struct { + gophercloud.Result +} + +// Extract interprets any serverResult as a Server, if possible. +func (r serverResult) Extract() (*Server, error) { + var s Server + err := r.ExtractInto(&s) + return &s, err +} + +func (r serverResult) ExtractInto(v interface{}) error { + return r.Result.ExtractIntoStructPtr(v, "server") +} + +func ExtractServersInto(r pagination.Page, v interface{}) error { + return r.(ServerPage).Result.ExtractIntoSlicePtr(v, "servers") +} + +// CreateResult is the response from a Create operation. Call its Extract +// method to interpret it as a Server. +type CreateResult struct { + serverResult +} + +// GetResult is the response from a Get operation. Call its Extract +// method to interpret it as a Server. +type GetResult struct { + serverResult +} + +// UpdateResult is the response from an Update operation. Call its Extract +// method to interpret it as a Server. +type UpdateResult struct { + serverResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// RebuildResult is the response from a Rebuild operation. Call its Extract +// method to interpret it as a Server. +type RebuildResult struct { + serverResult +} + +// ActionResult represents the result of server action operations, like reboot. +// Call its ExtractErr method to determine if the action succeeded or failed. +type ActionResult struct { + gophercloud.ErrResult +} + +// RescueResult is the response from a Rescue operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type RescueResult struct { + ActionResult +} + +// CreateImageResult is the response from a CreateImage operation. Call its +// ExtractImageID method to retrieve the ID of the newly created image. +type CreateImageResult struct { + gophercloud.Result +} + +// GetPasswordResult represent the result of a get os-server-password operation. +// Call its ExtractPassword method to retrieve the password. +type GetPasswordResult struct { + gophercloud.Result +} + +// ExtractPassword gets the encrypted password. +// If privateKey != nil the password is decrypted with the private key. +// If privateKey == nil the encrypted password is returned and can be decrypted +// with: +// echo '' | base64 -D | openssl rsautl -decrypt -inkey +func (r GetPasswordResult) ExtractPassword(privateKey *rsa.PrivateKey) (string, error) { + var s struct { + Password string `json:"password"` + } + err := r.ExtractInto(&s) + if err == nil && privateKey != nil && s.Password != "" { + return decryptPassword(s.Password, privateKey) + } + return s.Password, err +} + +func decryptPassword(encryptedPassword string, privateKey *rsa.PrivateKey) (string, error) { + b64EncryptedPassword := make([]byte, base64.StdEncoding.DecodedLen(len(encryptedPassword))) + + n, err := base64.StdEncoding.Decode(b64EncryptedPassword, []byte(encryptedPassword)) + if err != nil { + return "", fmt.Errorf("Failed to base64 decode encrypted password: %s", err) + } + password, err := rsa.DecryptPKCS1v15(nil, privateKey, b64EncryptedPassword[0:n]) + if err != nil { + return "", fmt.Errorf("Failed to decrypt password: %s", err) + } + + return string(password), nil +} + +// ExtractImageID gets the ID of the newly created server image from the header. +func (r CreateImageResult) ExtractImageID() (string, error) { + if r.Err != nil { + return "", r.Err + } + // Get the image id from the header + u, err := url.ParseRequestURI(r.Header.Get("Location")) + if err != nil { + return "", err + } + imageID := path.Base(u.Path) + if imageID == "." || imageID == "/" { + return "", fmt.Errorf("Failed to parse the ID of newly created image: %s", u) + } + return imageID, nil +} + +// Extract interprets any RescueResult as an AdminPass, if possible. +func (r RescueResult) Extract() (string, error) { + var s struct { + AdminPass string `json:"adminPass"` + } + err := r.ExtractInto(&s) + return s.AdminPass, err +} + +// Server represents a server/instance in the OpenStack cloud. +type Server struct { + // ID uniquely identifies this server amongst all other servers, + // including those not accessible to the current tenant. + ID string `json:"id"` + + // TenantID identifies the tenant owning this server resource. + TenantID string `json:"tenant_id"` + + // UserID uniquely identifies the user account owning the tenant. + UserID string `json:"user_id"` + + // Name contains the human-readable name for the server. + Name string `json:"name"` + + // Updated and Created contain ISO-8601 timestamps of when the state of the + // server last changed, and when it was created. + Updated time.Time `json:"updated"` + Created time.Time `json:"created"` + + // HostID is the host where the server is located in the cloud. + HostID string `json:"hostid"` + + // Status contains the current operational status of the server, + // such as IN_PROGRESS or ACTIVE. + Status string `json:"status"` + + // Progress ranges from 0..100. + // A request made against the server completes only once Progress reaches 100. + Progress int `json:"progress"` + + // AccessIPv4 and AccessIPv6 contain the IP addresses of the server, + // suitable for remote access for administration. + AccessIPv4 string `json:"accessIPv4"` + AccessIPv6 string `json:"accessIPv6"` + + // Image refers to a JSON object, which itself indicates the OS image used to + // deploy the server. + Image map[string]interface{} `json:"-"` + + // Flavor refers to a JSON object, which itself indicates the hardware + // configuration of the deployed server. + Flavor map[string]interface{} `json:"flavor"` + + // Addresses includes a list of all IP addresses assigned to the server, + // keyed by pool. + Addresses map[string]interface{} `json:"addresses"` + + // Metadata includes a list of all user-specified key-value pairs attached + // to the server. + Metadata map[string]string `json:"metadata"` + + // Links includes HTTP references to the itself, useful for passing along to + // other APIs that might want a server reference. + Links []interface{} `json:"links"` + + // KeyName indicates which public key was injected into the server on launch. + KeyName string `json:"key_name"` + + // AdminPass will generally be empty (""). However, it will contain the + // administrative password chosen when provisioning a new server without a + // set AdminPass setting in the first place. + // Note that this is the ONLY time this field will be valid. + AdminPass string `json:"adminPass"` + + // SecurityGroups includes the security groups that this instance has applied + // to it. + SecurityGroups []map[string]interface{} `json:"security_groups"` +} + +func (r *Server) UnmarshalJSON(b []byte) error { + type tmp Server + var s struct { + tmp + Image interface{} `json:"image"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = Server(s.tmp) + + switch t := s.Image.(type) { + case map[string]interface{}: + r.Image = t + case string: + switch t { + case "": + r.Image = nil + } + } + + return err +} + +// ServerPage abstracts the raw results of making a List() request against +// the API. As OpenStack extensions may freely alter the response bodies of +// structures returned to the client, you may only safely access the data +// provided through the ExtractServers call. +type ServerPage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if a page contains no Server results. +func (r ServerPage) IsEmpty() (bool, error) { + s, err := ExtractServers(r) + return len(s) == 0, err +} + +// NextPageURL uses the response's embedded link reference to navigate to the +// next page of results. +func (r ServerPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"servers_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractServers interprets the results of a single page from a List() call, +// producing a slice of Server entities. +func ExtractServers(r pagination.Page) ([]Server, error) { + var s []Server + err := ExtractServersInto(r, &s) + return s, err +} + +// MetadataResult contains the result of a call for (potentially) multiple +// key-value pairs. Call its Extract method to interpret it as a +// map[string]interface. +type MetadataResult struct { + gophercloud.Result +} + +// GetMetadataResult contains the result of a Get operation. Call its Extract +// method to interpret it as a map[string]interface. +type GetMetadataResult struct { + MetadataResult +} + +// ResetMetadataResult contains the result of a Reset operation. Call its +// Extract method to interpret it as a map[string]interface. +type ResetMetadataResult struct { + MetadataResult +} + +// UpdateMetadataResult contains the result of an Update operation. Call its +// Extract method to interpret it as a map[string]interface. +type UpdateMetadataResult struct { + MetadataResult +} + +// MetadatumResult contains the result of a call for individual a single +// key-value pair. +type MetadatumResult struct { + gophercloud.Result +} + +// GetMetadatumResult contains the result of a Get operation. Call its Extract +// method to interpret it as a map[string]interface. +type GetMetadatumResult struct { + MetadatumResult +} + +// CreateMetadatumResult contains the result of a Create operation. Call its +// Extract method to interpret it as a map[string]interface. +type CreateMetadatumResult struct { + MetadatumResult +} + +// DeleteMetadatumResult contains the result of a Delete operation. Call its +// ExtractErr method to determine if the call succeeded or failed. +type DeleteMetadatumResult struct { + gophercloud.ErrResult +} + +// Extract interprets any MetadataResult as a Metadata, if possible. +func (r MetadataResult) Extract() (map[string]string, error) { + var s struct { + Metadata map[string]string `json:"metadata"` + } + err := r.ExtractInto(&s) + return s.Metadata, err +} + +// Extract interprets any MetadatumResult as a Metadatum, if possible. +func (r MetadatumResult) Extract() (map[string]string, error) { + var s struct { + Metadatum map[string]string `json:"meta"` + } + err := r.ExtractInto(&s) + return s.Metadatum, err +} + +// Address represents an IP address. +type Address struct { + Version int `json:"version"` + Address string `json:"addr"` +} + +// AddressPage abstracts the raw results of making a ListAddresses() request +// against the API. As OpenStack extensions may freely alter the response bodies +// of structures returned to the client, you may only safely access the data +// provided through the ExtractAddresses call. +type AddressPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if an AddressPage contains no networks. +func (r AddressPage) IsEmpty() (bool, error) { + addresses, err := ExtractAddresses(r) + return len(addresses) == 0, err +} + +// ExtractAddresses interprets the results of a single page from a +// ListAddresses() call, producing a map of addresses. +func ExtractAddresses(r pagination.Page) (map[string][]Address, error) { + var s struct { + Addresses map[string][]Address `json:"addresses"` + } + err := (r.(AddressPage)).ExtractInto(&s) + return s.Addresses, err +} + +// NetworkAddressPage abstracts the raw results of making a +// ListAddressesByNetwork() request against the API. +// As OpenStack extensions may freely alter the response bodies of structures +// returned to the client, you may only safely access the data provided through +// the ExtractAddresses call. +type NetworkAddressPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a NetworkAddressPage contains no addresses. +func (r NetworkAddressPage) IsEmpty() (bool, error) { + addresses, err := ExtractNetworkAddresses(r) + return len(addresses) == 0, err +} + +// ExtractNetworkAddresses interprets the results of a single page from a +// ListAddressesByNetwork() call, producing a slice of addresses. +func ExtractNetworkAddresses(r pagination.Page) ([]Address, error) { + var s map[string][]Address + err := (r.(NetworkAddressPage)).ExtractInto(&s) + if err != nil { + return nil, err + } + + var key string + for k := range s { + key = k + } + + return s[key], err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/urls.go new file mode 100644 index 00000000000..e892e8d9259 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/urls.go @@ -0,0 +1,51 @@ +package servers + +import "github.com/gophercloud/gophercloud" + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("servers") +} + +func listURL(client *gophercloud.ServiceClient) string { + return createURL(client) +} + +func listDetailURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("servers", "detail") +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id) +} + +func getURL(client *gophercloud.ServiceClient, id string) string { + return deleteURL(client, id) +} + +func updateURL(client *gophercloud.ServiceClient, id string) string { + return deleteURL(client, id) +} + +func actionURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "action") +} + +func metadatumURL(client *gophercloud.ServiceClient, id, key string) string { + return client.ServiceURL("servers", id, "metadata", key) +} + +func metadataURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "metadata") +} + +func listAddressesURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "ips") +} + +func listAddressesByNetworkURL(client *gophercloud.ServiceClient, id, network string) string { + return client.ServiceURL("servers", id, "ips", network) +} + +func passwordURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "os-server-password") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/util.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/util.go new file mode 100644 index 00000000000..cadef054506 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/util.go @@ -0,0 +1,21 @@ +package servers + +import "github.com/gophercloud/gophercloud" + +// WaitForStatus will continually poll a server until it successfully +// transitions to a specified status. It will do this for at most the number +// of seconds specified. +func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := Get(c, id).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/doc.go new file mode 100644 index 00000000000..617fafa6314 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/doc.go @@ -0,0 +1,54 @@ +/* +Package recordsets provides information and interaction with the zone API +resource for the OpenStack DNS service. + +Example to List RecordSets by Zone + + listOpts := recordsets.ListOpts{ + Type: "A", + } + + zoneID := "fff121f5-c506-410a-a69e-2d73ef9cbdbd" + + allPages, err := recordsets.ListByZone(dnsClient, zoneID, listOpts).AllPages() + if err != nil { + panic(err) + } + + allRRs, err := recordsets.ExtractRecordSets(allPages() + if err != nil { + panic(err) + } + + for _, rr := range allRRs { + fmt.Printf("%+v\n", rr) + } + +Example to Create a RecordSet + + createOpts := recordsets.CreateOpts{ + Name: "example.com.", + Type: "A", + TTL: 3600, + Description: "This is a recordset.", + Records: []string{"10.1.0.2"}, + } + + zoneID := "fff121f5-c506-410a-a69e-2d73ef9cbdbd" + + rr, err := recordsets.Create(dnsClient, zoneID, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a RecordSet + + zoneID := "fff121f5-c506-410a-a69e-2d73ef9cbdbd" + recordsetID := "d96ed01a-b439-4eb8-9b90-7a9f71017f7b" + + err := recordsets.Delete(dnsClient, zoneID, recordsetID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package recordsets diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/requests.go new file mode 100644 index 00000000000..2d6ecdc3dcf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/requests.go @@ -0,0 +1,166 @@ +package recordsets + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToRecordSetListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the server attributes you want to see returned. Marker and Limit are used +// for pagination. +// https://developer.openstack.org/api-ref/dns/ +type ListOpts struct { + // Integer value for the limit of values to return. + Limit int `q:"limit"` + + // UUID of the recordset at which you want to set a marker. + Marker string `q:"marker"` + + Data string `q:"data"` + Description string `q:"description"` + Name string `q:"name"` + SortDir string `q:"sort_dir"` + SortKey string `q:"sort_key"` + Status string `q:"status"` + TTL int `q:"ttl"` + Type string `q:"type"` + ZoneID string `q:"zone_id"` +} + +// ToRecordSetListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToRecordSetListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListByZone implements the recordset list request. +func ListByZone(client *gophercloud.ServiceClient, zoneID string, opts ListOptsBuilder) pagination.Pager { + url := baseURL(client, zoneID) + if opts != nil { + query, err := opts.ToRecordSetListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return RecordSetPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get implements the recordset Get request. +func Get(client *gophercloud.ServiceClient, zoneID string, rrsetID string) (r GetResult) { + _, r.Err = client.Get(rrsetURL(client, zoneID, rrsetID), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional attributes to the +// Create request. +type CreateOptsBuilder interface { + ToRecordSetCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies the base attributes that may be used to create a +// RecordSet. +type CreateOpts struct { + // Name is the name of the RecordSet. + Name string `json:"name" required:"true"` + + // Description is a description of the RecordSet. + Description string `json:"description,omitempty"` + + // Records are the DNS records of the RecordSet. + Records []string `json:"records,omitempty"` + + // TTL is the time to live of the RecordSet. + TTL int `json:"ttl,omitempty"` + + // Type is the RRTYPE of the RecordSet. + Type string `json:"type,omitempty"` +} + +// ToRecordSetCreateMap formats an CreateOpts structure into a request body. +func (opts CreateOpts) ToRecordSetCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + return b, nil +} + +// Create creates a recordset in a given zone. +func Create(client *gophercloud.ServiceClient, zoneID string, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToRecordSetCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(baseURL(client, zoneID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201, 202}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional attributes to the +// Update request. +type UpdateOptsBuilder interface { + ToRecordSetUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts specifies the base attributes that may be updated on an existing +// RecordSet. +type UpdateOpts struct { + // Description is a description of the RecordSet. + Description string `json:"description,omitempty"` + + // TTL is the time to live of the RecordSet. + TTL int `json:"ttl,omitempty"` + + // Records are the DNS records of the RecordSet. + Records []string `json:"records,omitempty"` +} + +// ToRecordSetUpdateMap formats an UpdateOpts structure into a request body. +func (opts UpdateOpts) ToRecordSetUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + if opts.TTL > 0 { + b["ttl"] = opts.TTL + } else { + b["ttl"] = nil + } + + return b, nil +} + +// Update updates a recordset in a given zone +func Update(client *gophercloud.ServiceClient, zoneID string, rrsetID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToRecordSetUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(rrsetURL(client, zoneID, rrsetID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete removes an existing RecordSet. +func Delete(client *gophercloud.ServiceClient, zoneID string, rrsetID string) (r DeleteResult) { + _, r.Err = client.Delete(rrsetURL(client, zoneID, rrsetID), &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/results.go new file mode 100644 index 00000000000..0fdc1fe52ea --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/results.go @@ -0,0 +1,147 @@ +package recordsets + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract interprets a GetResult, CreateResult or UpdateResult as a RecordSet. +// An error is returned if the original call or the extraction failed. +func (r commonResult) Extract() (*RecordSet, error) { + var s *RecordSet + err := r.ExtractInto(&s) + return s, err +} + +// CreateResult is the result of a Create operation. Call its Extract method to +// interpret the result as a RecordSet. +type CreateResult struct { + commonResult +} + +// GetResult is the result of a Get operation. Call its Extract method to +// interpret the result as a RecordSet. +type GetResult struct { + commonResult +} + +// RecordSetPage is a single page of RecordSet results. +type RecordSetPage struct { + pagination.LinkedPageBase +} + +// UpdateResult is result of an Update operation. Call its Extract method to +// interpret the result as a RecordSet. +type UpdateResult struct { + commonResult +} + +// DeleteResult is result of a Delete operation. Call its ExtractErr method to +// determine if the operation succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// IsEmpty returns true if the page contains no results. +func (r RecordSetPage) IsEmpty() (bool, error) { + s, err := ExtractRecordSets(r) + return len(s) == 0, err +} + +// ExtractRecordSets extracts a slice of RecordSets from a List result. +func ExtractRecordSets(r pagination.Page) ([]RecordSet, error) { + var s struct { + RecordSets []RecordSet `json:"recordsets"` + } + err := (r.(RecordSetPage)).ExtractInto(&s) + return s.RecordSets, err +} + +// RecordSet represents a DNS Record Set. +type RecordSet struct { + // ID is the unique ID of the recordset + ID string `json:"id"` + + // ZoneID is the ID of the zone the recordset belongs to. + ZoneID string `json:"zone_id"` + + // ProjectID is the ID of the project that owns the recordset. + ProjectID string `json:"project_id"` + + // Name is the name of the recordset. + Name string `json:"name"` + + // ZoneName is the name of the zone the recordset belongs to. + ZoneName string `json:"zone_name"` + + // Type is the RRTYPE of the recordset. + Type string `json:"type"` + + // Records are the DNS records of the recordset. + Records []string `json:"records"` + + // TTL is the time to live of the recordset. + TTL int `json:"ttl"` + + // Status is the status of the recordset. + Status string `json:"status"` + + // Action is the current action in progress of the recordset. + Action string `json:"action"` + + // Description is the description of the recordset. + Description string `json:"description"` + + // Version is the revision of the recordset. + Version int `json:"version"` + + // CreatedAt is the date when the recordset was created. + CreatedAt time.Time `json:"-"` + + // UpdatedAt is the date when the recordset was updated. + UpdatedAt time.Time `json:"-"` + + // Links includes HTTP references to the itself, + // useful for passing along to other APIs that might want a recordset + // reference. + Links []gophercloud.Link `json:"-"` +} + +func (r *RecordSet) UnmarshalJSON(b []byte) error { + type tmp RecordSet + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + Links map[string]interface{} `json:"links"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = RecordSet(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + + if s.Links != nil { + for rel, href := range s.Links { + if v, ok := href.(string); ok { + link := gophercloud.Link{ + Rel: rel, + Href: v, + } + r.Links = append(r.Links, link) + } + } + } + + return err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/urls.go new file mode 100644 index 00000000000..5ec18d1bb78 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/urls.go @@ -0,0 +1,11 @@ +package recordsets + +import "github.com/gophercloud/gophercloud" + +func baseURL(c *gophercloud.ServiceClient, zoneID string) string { + return c.ServiceURL("zones", zoneID, "recordsets") +} + +func rrsetURL(c *gophercloud.ServiceClient, zoneID string, rrsetID string) string { + return c.ServiceURL("zones", zoneID, "recordsets", rrsetID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/doc.go new file mode 100644 index 00000000000..7733155bcfa --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/doc.go @@ -0,0 +1,48 @@ +/* +Package zones provides information and interaction with the zone API +resource for the OpenStack DNS service. + +Example to List Zones + + listOpts := zones.ListOpts{ + Email: "jdoe@example.com", + } + + allPages, err := zones.List(dnsClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allZones, err := zones.ExtractZones(allPages) + if err != nil { + panic(err) + } + + for _, zone := range allZones { + fmt.Printf("%+v\n", zone) + } + +Example to Create a Zone + + createOpts := zones.CreateOpts{ + Name: "example.com.", + Email: "jdoe@example.com", + Type: "PRIMARY", + TTL: 7200, + Description: "This is a zone.", + } + + zone, err := zones.Create(dnsClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Zone + + zoneID := "99d10f68-5623-4491-91a0-6daafa32b60e" + err := zones.Delete(dnsClient, zoneID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package zones diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/requests.go new file mode 100644 index 00000000000..f87deadce74 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/requests.go @@ -0,0 +1,174 @@ +package zones + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add parameters to the List request. +type ListOptsBuilder interface { + ToZoneListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the server attributes you want to see returned. Marker and Limit are used +// for pagination. +// https://developer.openstack.org/api-ref/dns/ +type ListOpts struct { + // Integer value for the limit of values to return. + Limit int `q:"limit"` + + // UUID of the zone at which you want to set a marker. + Marker string `q:"marker"` + + Description string `q:"description"` + Email string `q:"email"` + Name string `q:"name"` + SortDir string `q:"sort_dir"` + SortKey string `q:"sort_key"` + Status string `q:"status"` + TTL int `q:"ttl"` + Type string `q:"type"` +} + +// ToZoneListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToZoneListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List implements a zone List request. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := baseURL(client) + if opts != nil { + query, err := opts.ToZoneListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ZonePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get returns information about a zone, given its ID. +func Get(client *gophercloud.ServiceClient, zoneID string) (r GetResult) { + _, r.Err = client.Get(zoneURL(client, zoneID), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional attributes to the +// Create request. +type CreateOptsBuilder interface { + ToZoneCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies the attributes used to create a zone. +type CreateOpts struct { + // Attributes are settings that supply hints and filters for the zone. + Attributes map[string]string `json:"attributes,omitempty"` + + // Email contact of the zone. + Email string `json:"email,omitempty"` + + // Description of the zone. + Description string `json:"description,omitempty"` + + // Name of the zone. + Name string `json:"name" required:"true"` + + // Masters specifies zone masters if this is a secondary zone. + Masters []string `json:"masters,omitempty"` + + // TTL is the time to live of the zone. + TTL int `json:"-"` + + // Type specifies if this is a primary or secondary zone. + Type string `json:"type,omitempty"` +} + +// ToZoneCreateMap formats an CreateOpts structure into a request body. +func (opts CreateOpts) ToZoneCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + if opts.TTL > 0 { + b["ttl"] = opts.TTL + } + + return b, nil +} + +// Create implements a zone create request. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToZoneCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(baseURL(client), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201, 202}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional attributes to the +// Update request. +type UpdateOptsBuilder interface { + ToZoneUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts specifies the attributes to update a zone. +type UpdateOpts struct { + // Email contact of the zone. + Email string `json:"email,omitempty"` + + // TTL is the time to live of the zone. + TTL int `json:"-"` + + // Masters specifies zone masters if this is a secondary zone. + Masters []string `json:"masters,omitempty"` + + // Description of the zone. + Description string `json:"description,omitempty"` +} + +// ToZoneUpdateMap formats an UpdateOpts structure into a request body. +func (opts UpdateOpts) ToZoneUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + if opts.TTL > 0 { + b["ttl"] = opts.TTL + } + + return b, nil +} + +// Update implements a zone update request. +func Update(client *gophercloud.ServiceClient, zoneID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToZoneUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Patch(zoneURL(client, zoneID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete implements a zone delete request. +func Delete(client *gophercloud.ServiceClient, zoneID string) (r DeleteResult) { + _, r.Err = client.Delete(zoneURL(client, zoneID), &gophercloud.RequestOpts{ + OkCodes: []int{202}, + JSONResponse: &r.Body, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/results.go new file mode 100644 index 00000000000..a36eca7e205 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/results.go @@ -0,0 +1,166 @@ +package zones + +import ( + "encoding/json" + "strconv" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract interprets a GetResult, CreateResult or UpdateResult as a Zone. +// An error is returned if the original call or the extraction failed. +func (r commonResult) Extract() (*Zone, error) { + var s *Zone + err := r.ExtractInto(&s) + return s, err +} + +// CreateResult is the result of a Create request. Call its Extract method +// to interpret the result as a Zone. +type CreateResult struct { + commonResult +} + +// GetResult is the result of a Get request. Call its Extract method +// to interpret the result as a Zone. +type GetResult struct { + commonResult +} + +// UpdateResult is the result of an Update request. Call its Extract method +// to interpret the result as a Zone. +type UpdateResult struct { + commonResult +} + +// DeleteResult is the result of a Delete request. Call its ExtractErr method +// to determine if the request succeeded or failed. +type DeleteResult struct { + commonResult +} + +// ZonePage is a single page of Zone results. +type ZonePage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if the page contains no results. +func (r ZonePage) IsEmpty() (bool, error) { + s, err := ExtractZones(r) + return len(s) == 0, err +} + +// ExtractZones extracts a slice of Zones from a List result. +func ExtractZones(r pagination.Page) ([]Zone, error) { + var s struct { + Zones []Zone `json:"zones"` + } + err := (r.(ZonePage)).ExtractInto(&s) + return s.Zones, err +} + +// Zone represents a DNS zone. +type Zone struct { + // ID uniquely identifies this zone amongst all other zones, including those + // not accessible to the current tenant. + ID string `json:"id"` + + // PoolID is the ID for the pool hosting this zone. + PoolID string `json:"pool_id"` + + // ProjectID identifies the project/tenant owning this resource. + ProjectID string `json:"project_id"` + + // Name is the DNS Name for the zone. + Name string `json:"name"` + + // Email for the zone. Used in SOA records for the zone. + Email string `json:"email"` + + // Description for this zone. + Description string `json:"description"` + + // TTL is the Time to Live for the zone. + TTL int `json:"ttl"` + + // Serial is the current serial number for the zone. + Serial int `json:"-"` + + // Status is the status of the resource. + Status string `json:"status"` + + // Action is the current action in progress on the resource. + Action string `json:"action"` + + // Version of the resource. + Version int `json:"version"` + + // Attributes for the zone. + Attributes map[string]string `json:"attributes"` + + // Type of zone. Primary is controlled by Designate. + // Secondary zones are slaved from another DNS Server. + // Defaults to Primary. + Type string `json:"type"` + + // Masters is the servers for slave servers to get DNS information from. + Masters []string `json:"masters"` + + // CreatedAt is the date when the zone was created. + CreatedAt time.Time `json:"-"` + + // UpdatedAt is the date when the last change was made to the zone. + UpdatedAt time.Time `json:"-"` + + // TransferredAt is the last time an update was retrieved from the + // master servers. + TransferredAt time.Time `json:"-"` + + // Links includes HTTP references to the itself, useful for passing along + // to other APIs that might want a server reference. + Links map[string]interface{} `json:"links"` +} + +func (r *Zone) UnmarshalJSON(b []byte) error { + type tmp Zone + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + TransferredAt gophercloud.JSONRFC3339MilliNoZ `json:"transferred_at"` + Serial interface{} `json:"serial"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Zone(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + r.TransferredAt = time.Time(s.TransferredAt) + + switch t := s.Serial.(type) { + case float64: + r.Serial = int(t) + case string: + switch t { + case "": + r.Serial = 0 + default: + serial, err := strconv.ParseFloat(t, 64) + if err != nil { + return err + } + r.Serial = int(serial) + } + } + + return err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/urls.go new file mode 100644 index 00000000000..9bef7058096 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/urls.go @@ -0,0 +1,11 @@ +package zones + +import "github.com/gophercloud/gophercloud" + +func baseURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("zones") +} + +func zoneURL(c *gophercloud.ServiceClient, zoneID string) string { + return c.ServiceURL("zones", zoneID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/doc.go new file mode 100644 index 00000000000..cedf1f4d3a3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/doc.go @@ -0,0 +1,14 @@ +/* +Package openstack contains resources for the individual OpenStack projects +supported in Gophercloud. It also includes functions to authenticate to an +OpenStack cloud and for provisioning various service-level clients. + +Example of Creating a Service Client + + ao, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(ao) + client, err := openstack.NewNetworkV2(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +*/ +package openstack diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go b/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go new file mode 100644 index 00000000000..070ea7cbef0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go @@ -0,0 +1,107 @@ +package openstack + +import ( + "github.com/gophercloud/gophercloud" + tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" + tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" +) + +/* +V2EndpointURL discovers the endpoint URL for a specific service from a +ServiceCatalog acquired during the v2 identity service. + +The specified EndpointOpts are used to identify a unique, unambiguous endpoint +to return. It's an error both when multiple endpoints match the provided +criteria and when none do. The minimum that can be specified is a Type, but you +will also often need to specify a Name and/or a Region depending on what's +available on your OpenStack deployment. +*/ +func V2EndpointURL(catalog *tokens2.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { + // Extract Endpoints from the catalog entries that match the requested Type, Name if provided, and Region if provided. + var endpoints = make([]tokens2.Endpoint, 0, 1) + for _, entry := range catalog.Entries { + if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { + for _, endpoint := range entry.Endpoints { + if opts.Region == "" || endpoint.Region == opts.Region { + endpoints = append(endpoints, endpoint) + } + } + } + } + + // Report an error if the options were ambiguous. + if len(endpoints) > 1 { + err := &ErrMultipleMatchingEndpointsV2{} + err.Endpoints = endpoints + return "", err + } + + // Extract the appropriate URL from the matching Endpoint. + for _, endpoint := range endpoints { + switch opts.Availability { + case gophercloud.AvailabilityPublic: + return gophercloud.NormalizeURL(endpoint.PublicURL), nil + case gophercloud.AvailabilityInternal: + return gophercloud.NormalizeURL(endpoint.InternalURL), nil + case gophercloud.AvailabilityAdmin: + return gophercloud.NormalizeURL(endpoint.AdminURL), nil + default: + err := &ErrInvalidAvailabilityProvided{} + err.Argument = "Availability" + err.Value = opts.Availability + return "", err + } + } + + // Report an error if there were no matching endpoints. + err := &gophercloud.ErrEndpointNotFound{} + return "", err +} + +/* +V3EndpointURL discovers the endpoint URL for a specific service from a Catalog +acquired during the v3 identity service. + +The specified EndpointOpts are used to identify a unique, unambiguous endpoint +to return. It's an error both when multiple endpoints match the provided +criteria and when none do. The minimum that can be specified is a Type, but you +will also often need to specify a Name and/or a Region depending on what's +available on your OpenStack deployment. +*/ +func V3EndpointURL(catalog *tokens3.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { + // Extract Endpoints from the catalog entries that match the requested Type, Interface, + // Name if provided, and Region if provided. + var endpoints = make([]tokens3.Endpoint, 0, 1) + for _, entry := range catalog.Entries { + if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { + for _, endpoint := range entry.Endpoints { + if opts.Availability != gophercloud.AvailabilityAdmin && + opts.Availability != gophercloud.AvailabilityPublic && + opts.Availability != gophercloud.AvailabilityInternal { + err := &ErrInvalidAvailabilityProvided{} + err.Argument = "Availability" + err.Value = opts.Availability + return "", err + } + if (opts.Availability == gophercloud.Availability(endpoint.Interface)) && + (opts.Region == "" || endpoint.Region == opts.Region) { + endpoints = append(endpoints, endpoint) + } + } + } + } + + // Report an error if the options were ambiguous. + if len(endpoints) > 1 { + return "", ErrMultipleMatchingEndpointsV3{Endpoints: endpoints} + } + + // Extract the URL from the matching Endpoint. + for _, endpoint := range endpoints { + return gophercloud.NormalizeURL(endpoint.URL), nil + } + + // Report an error if there were no matching endpoints. + err := &gophercloud.ErrEndpointNotFound{} + return "", err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/errors.go new file mode 100644 index 00000000000..df410b1c611 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/errors.go @@ -0,0 +1,71 @@ +package openstack + +import ( + "fmt" + + "github.com/gophercloud/gophercloud" + tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" + tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" +) + +// ErrEndpointNotFound is the error when no suitable endpoint can be found +// in the user's catalog +type ErrEndpointNotFound struct{ gophercloud.BaseError } + +func (e ErrEndpointNotFound) Error() string { + return "No suitable endpoint could be found in the service catalog." +} + +// ErrInvalidAvailabilityProvided is the error when an invalid endpoint +// availability is provided +type ErrInvalidAvailabilityProvided struct{ gophercloud.ErrInvalidInput } + +func (e ErrInvalidAvailabilityProvided) Error() string { + return fmt.Sprintf("Unexpected availability in endpoint query: %s", e.Value) +} + +// ErrMultipleMatchingEndpointsV2 is the error when more than one endpoint +// for the given options is found in the v2 catalog +type ErrMultipleMatchingEndpointsV2 struct { + gophercloud.BaseError + Endpoints []tokens2.Endpoint +} + +func (e ErrMultipleMatchingEndpointsV2) Error() string { + return fmt.Sprintf("Discovered %d matching endpoints: %#v", len(e.Endpoints), e.Endpoints) +} + +// ErrMultipleMatchingEndpointsV3 is the error when more than one endpoint +// for the given options is found in the v3 catalog +type ErrMultipleMatchingEndpointsV3 struct { + gophercloud.BaseError + Endpoints []tokens3.Endpoint +} + +func (e ErrMultipleMatchingEndpointsV3) Error() string { + return fmt.Sprintf("Discovered %d matching endpoints: %#v", len(e.Endpoints), e.Endpoints) +} + +// ErrNoAuthURL is the error when the OS_AUTH_URL environment variable is not +// found +type ErrNoAuthURL struct{ gophercloud.ErrInvalidInput } + +func (e ErrNoAuthURL) Error() string { + return "Environment variable OS_AUTH_URL needs to be set." +} + +// ErrNoUsername is the error when the OS_USERNAME environment variable is not +// found +type ErrNoUsername struct{ gophercloud.ErrInvalidInput } + +func (e ErrNoUsername) Error() string { + return "Environment variable OS_USERNAME needs to be set." +} + +// ErrNoPassword is the error when the OS_PASSWORD environment variable is not +// found +type ErrNoPassword struct{ gophercloud.ErrInvalidInput } + +func (e ErrNoPassword) Error() string { + return "Environment variable OS_PASSWORD needs to be set." +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go new file mode 100644 index 00000000000..45623369e18 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go @@ -0,0 +1,65 @@ +/* +Package tenants provides information and interaction with the +tenants API resource for the OpenStack Identity service. + +See http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 +and http://developer.openstack.org/api-ref-identity-v2.html#admin-tenants +for more information. + +Example to List Tenants + + listOpts := tenants.ListOpts{ + Limit: 2, + } + + allPages, err := tenants.List(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allTenants, err := tenants.ExtractTenants(allPages) + if err != nil { + panic(err) + } + + for _, tenant := range allTenants { + fmt.Printf("%+v\n", tenant) + } + +Example to Create a Tenant + + createOpts := tenants.CreateOpts{ + Name: "tenant_name", + Description: "this is a tenant", + Enabled: gophercloud.Enabled, + } + + tenant, err := tenants.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Tenant + + tenantID := "e6db6ed6277c461a853458589063b295" + + updateOpts := tenants.UpdateOpts{ + Description: "this is a new description", + Enabled: gophercloud.Disabled, + } + + tenant, err := tenants.Update(identityClient, tenantID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Tenant + + tenantID := "e6db6ed6277c461a853458589063b295" + + err := tenants.Delete(identitYClient, tenantID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package tenants diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go new file mode 100644 index 00000000000..60f58c8ce39 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go @@ -0,0 +1,116 @@ +package tenants + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts filters the Tenants that are returned by the List call. +type ListOpts struct { + // Marker is the ID of the last Tenant on the previous page. + Marker string `q:"marker"` + + // Limit specifies the page size. + Limit int `q:"limit"` +} + +// List enumerates the Tenants to which the current token has access. +func List(client *gophercloud.ServiceClient, opts *ListOpts) pagination.Pager { + url := listURL(client) + if opts != nil { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return pagination.Pager{Err: err} + } + url += q.String() + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return TenantPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOpts represents the options needed when creating new tenant. +type CreateOpts struct { + // Name is the name of the tenant. + Name string `json:"name" required:"true"` + + // Description is the description of the tenant. + Description string `json:"description,omitempty"` + + // Enabled sets the tenant status to enabled or disabled. + Enabled *bool `json:"enabled,omitempty"` +} + +// CreateOptsBuilder enables extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToTenantCreateMap() (map[string]interface{}, error) +} + +// ToTenantCreateMap assembles a request body based on the contents of +// a CreateOpts. +func (opts CreateOpts) ToTenantCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "tenant") +} + +// Create is the operation responsible for creating new tenant. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToTenantCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Get requests details on a single tenant by ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToTenantUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts specifies the base attributes that may be updated on an existing +// tenant. +type UpdateOpts struct { + // Name is the name of the tenant. + Name string `json:"name,omitempty"` + + // Description is the description of the tenant. + Description string `json:"description,omitempty"` + + // Enabled sets the tenant status to enabled or disabled. + Enabled *bool `json:"enabled,omitempty"` +} + +// ToTenantUpdateMap formats an UpdateOpts structure into a request body. +func (opts UpdateOpts) ToTenantUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "tenant") +} + +// Update is the operation responsible for updating exist tenants by their TenantID. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToTenantUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete is the operation responsible for permanently deleting a tenant. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go new file mode 100644 index 00000000000..bb6c2c6b08a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go @@ -0,0 +1,91 @@ +package tenants + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Tenant is a grouping of users in the identity service. +type Tenant struct { + // ID is a unique identifier for this tenant. + ID string `json:"id"` + + // Name is a friendlier user-facing name for this tenant. + Name string `json:"name"` + + // Description is a human-readable explanation of this Tenant's purpose. + Description string `json:"description"` + + // Enabled indicates whether or not a tenant is active. + Enabled bool `json:"enabled"` +} + +// TenantPage is a single page of Tenant results. +type TenantPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a page of Tenants contains any results. +func (r TenantPage) IsEmpty() (bool, error) { + tenants, err := ExtractTenants(r) + return len(tenants) == 0, err +} + +// NextPageURL extracts the "next" link from the tenants_links section of the result. +func (r TenantPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"tenants_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractTenants returns a slice of Tenants contained in a single page of +// results. +func ExtractTenants(r pagination.Page) ([]Tenant, error) { + var s struct { + Tenants []Tenant `json:"tenants"` + } + err := (r.(TenantPage)).ExtractInto(&s) + return s.Tenants, err +} + +type tenantResult struct { + gophercloud.Result +} + +// Extract interprets any tenantResults as a Tenant. +func (r tenantResult) Extract() (*Tenant, error) { + var s struct { + Tenant *Tenant `json:"tenant"` + } + err := r.ExtractInto(&s) + return s.Tenant, err +} + +// GetResult is the response from a Get request. Call its Extract method to +// interpret it as a Tenant. +type GetResult struct { + tenantResult +} + +// CreateResult is the response from a Create request. Call its Extract method +// to interpret it as a Tenant. +type CreateResult struct { + tenantResult +} + +// DeleteResult is the response from a Get request. Call its ExtractErr method +// to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UpdateResult is the response from a Update request. Call its Extract method +// to interpret it as a Tenant. +type UpdateResult struct { + tenantResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go new file mode 100644 index 00000000000..0f026690790 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go @@ -0,0 +1,23 @@ +package tenants + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("tenants") +} + +func getURL(client *gophercloud.ServiceClient, tenantID string) string { + return client.ServiceURL("tenants", tenantID) +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("tenants") +} + +func deleteURL(client *gophercloud.ServiceClient, tenantID string) string { + return client.ServiceURL("tenants", tenantID) +} + +func updateURL(client *gophercloud.ServiceClient, tenantID string) string { + return client.ServiceURL("tenants", tenantID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go new file mode 100644 index 00000000000..5375eea8726 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go @@ -0,0 +1,46 @@ +/* +Package tokens provides information and interaction with the token API +resource for the OpenStack Identity service. + +For more information, see: +http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 + +Example to Create an Unscoped Token from a Password + + authOpts := gophercloud.AuthOptions{ + Username: "user", + Password: "pass" + } + + token, err := tokens.Create(identityClient, authOpts).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Tenant ID and Password + + authOpts := gophercloud.AuthOptions{ + Username: "user", + Password: "password", + TenantID: "fc394f2ab2df4114bde39905f800dc57" + } + + token, err := tokens.Create(identityClient, authOpts).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Tenant Name and Password + + authOpts := gophercloud.AuthOptions{ + Username: "user", + Password: "password", + TenantName: "tenantname" + } + + token, err := tokens.Create(identityClient, authOpts).ExtractToken() + if err != nil { + panic(err) + } +*/ +package tokens diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go new file mode 100644 index 00000000000..ab32368cc6e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go @@ -0,0 +1,103 @@ +package tokens + +import "github.com/gophercloud/gophercloud" + +// PasswordCredentialsV2 represents the required options to authenticate +// with a username and password. +type PasswordCredentialsV2 struct { + Username string `json:"username" required:"true"` + Password string `json:"password" required:"true"` +} + +// TokenCredentialsV2 represents the required options to authenticate +// with a token. +type TokenCredentialsV2 struct { + ID string `json:"id,omitempty" required:"true"` +} + +// AuthOptionsV2 wraps a gophercloud AuthOptions in order to adhere to the +// AuthOptionsBuilder interface. +type AuthOptionsV2 struct { + PasswordCredentials *PasswordCredentialsV2 `json:"passwordCredentials,omitempty" xor:"TokenCredentials"` + + // The TenantID and TenantName fields are optional for the Identity V2 API. + // Some providers allow you to specify a TenantName instead of the TenantId. + // Some require both. Your provider's authentication policies will determine + // how these fields influence authentication. + TenantID string `json:"tenantId,omitempty"` + TenantName string `json:"tenantName,omitempty"` + + // TokenCredentials allows users to authenticate (possibly as another user) + // with an authentication token ID. + TokenCredentials *TokenCredentialsV2 `json:"token,omitempty" xor:"PasswordCredentials"` +} + +// AuthOptionsBuilder allows extensions to add additional parameters to the +// token create request. +type AuthOptionsBuilder interface { + // ToTokenCreateMap assembles the Create request body, returning an error + // if parameters are missing or inconsistent. + ToTokenV2CreateMap() (map[string]interface{}, error) +} + +// AuthOptions are the valid options for Openstack Identity v2 authentication. +// For field descriptions, see gophercloud.AuthOptions. +type AuthOptions struct { + IdentityEndpoint string `json:"-"` + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + TenantID string `json:"tenantId,omitempty"` + TenantName string `json:"tenantName,omitempty"` + AllowReauth bool `json:"-"` + TokenID string +} + +// ToTokenV2CreateMap builds a token request body from the given AuthOptions. +func (opts AuthOptions) ToTokenV2CreateMap() (map[string]interface{}, error) { + v2Opts := AuthOptionsV2{ + TenantID: opts.TenantID, + TenantName: opts.TenantName, + } + + if opts.Password != "" { + v2Opts.PasswordCredentials = &PasswordCredentialsV2{ + Username: opts.Username, + Password: opts.Password, + } + } else { + v2Opts.TokenCredentials = &TokenCredentialsV2{ + ID: opts.TokenID, + } + } + + b, err := gophercloud.BuildRequestBody(v2Opts, "auth") + if err != nil { + return nil, err + } + return b, nil +} + +// Create authenticates to the identity service and attempts to acquire a Token. +// Generally, rather than interact with this call directly, end users should +// call openstack.AuthenticatedClient(), which abstracts all of the gory details +// about navigating service catalogs and such. +func Create(client *gophercloud.ServiceClient, auth AuthOptionsBuilder) (r CreateResult) { + b, err := auth.ToTokenV2CreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(CreateURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 203}, + MoreHeaders: map[string]string{"X-Auth-Token": ""}, + }) + return +} + +// Get validates and retrieves information for user's token. +func Get(client *gophercloud.ServiceClient, token string) (r GetResult) { + _, r.Err = client.Get(GetURL(client, token), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 203}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go new file mode 100644 index 00000000000..b11326772b1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go @@ -0,0 +1,159 @@ +package tokens + +import ( + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants" +) + +// Token provides only the most basic information related to an authentication +// token. +type Token struct { + // ID provides the primary means of identifying a user to the OpenStack API. + // OpenStack defines this field as an opaque value, so do not depend on its + // content. It is safe, however, to compare for equality. + ID string + + // ExpiresAt provides a timestamp in ISO 8601 format, indicating when the + // authentication token becomes invalid. After this point in time, future + // API requests made using this authentication token will respond with + // errors. Either the caller will need to reauthenticate manually, or more + // preferably, the caller should exploit automatic re-authentication. + // See the AuthOptions structure for more details. + ExpiresAt time.Time + + // Tenant provides information about the tenant to which this token grants + // access. + Tenant tenants.Tenant +} + +// Role is a role for a user. +type Role struct { + Name string `json:"name"` +} + +// User is an OpenStack user. +type User struct { + ID string `json:"id"` + Name string `json:"name"` + UserName string `json:"username"` + Roles []Role `json:"roles"` +} + +// Endpoint represents a single API endpoint offered by a service. +// It provides the public and internal URLs, if supported, along with a region +// specifier, again if provided. +// +// The significance of the Region field will depend upon your provider. +// +// In addition, the interface offered by the service will have version +// information associated with it through the VersionId, VersionInfo, and +// VersionList fields, if provided or supported. +// +// In all cases, fields which aren't supported by the provider and service +// combined will assume a zero-value (""). +type Endpoint struct { + TenantID string `json:"tenantId"` + PublicURL string `json:"publicURL"` + InternalURL string `json:"internalURL"` + AdminURL string `json:"adminURL"` + Region string `json:"region"` + VersionID string `json:"versionId"` + VersionInfo string `json:"versionInfo"` + VersionList string `json:"versionList"` +} + +// CatalogEntry provides a type-safe interface to an Identity API V2 service +// catalog listing. +// +// Each class of service, such as cloud DNS or block storage services, will have +// a single CatalogEntry representing it. +// +// Note: when looking for the desired service, try, whenever possible, to key +// off the type field. Otherwise, you'll tie the representation of the service +// to a specific provider. +type CatalogEntry struct { + // Name will contain the provider-specified name for the service. + Name string `json:"name"` + + // Type will contain a type string if OpenStack defines a type for the + // service. Otherwise, for provider-specific services, the provider may assign + // their own type strings. + Type string `json:"type"` + + // Endpoints will let the caller iterate over all the different endpoints that + // may exist for the service. + Endpoints []Endpoint `json:"endpoints"` +} + +// ServiceCatalog provides a view into the service catalog from a previous, +// successful authentication. +type ServiceCatalog struct { + Entries []CatalogEntry +} + +// CreateResult is the response from a Create request. Use ExtractToken() to +// interpret it as a Token, or ExtractServiceCatalog() to interpret it as a +// service catalog. +type CreateResult struct { + gophercloud.Result +} + +// GetResult is the deferred response from a Get call, which is the same with a +// Created token. Use ExtractUser() to interpret it as a User. +type GetResult struct { + CreateResult +} + +// ExtractToken returns the just-created Token from a CreateResult. +func (r CreateResult) ExtractToken() (*Token, error) { + var s struct { + Access struct { + Token struct { + Expires string `json:"expires"` + ID string `json:"id"` + Tenant tenants.Tenant `json:"tenant"` + } `json:"token"` + } `json:"access"` + } + + err := r.ExtractInto(&s) + if err != nil { + return nil, err + } + + expiresTs, err := time.Parse(gophercloud.RFC3339Milli, s.Access.Token.Expires) + if err != nil { + return nil, err + } + + return &Token{ + ID: s.Access.Token.ID, + ExpiresAt: expiresTs, + Tenant: s.Access.Token.Tenant, + }, nil +} + +// ExtractServiceCatalog returns the ServiceCatalog that was generated along +// with the user's Token. +func (r CreateResult) ExtractServiceCatalog() (*ServiceCatalog, error) { + var s struct { + Access struct { + Entries []CatalogEntry `json:"serviceCatalog"` + } `json:"access"` + } + err := r.ExtractInto(&s) + return &ServiceCatalog{Entries: s.Access.Entries}, err +} + +// ExtractUser returns the User from a GetResult. +func (r GetResult) ExtractUser() (*User, error) { + var s struct { + Access struct { + User User `json:"user"` + } `json:"access"` + } + err := r.ExtractInto(&s) + return &s.Access.User, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go new file mode 100644 index 00000000000..ee0a28f2004 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go @@ -0,0 +1,13 @@ +package tokens + +import "github.com/gophercloud/gophercloud" + +// CreateURL generates the URL used to create new Tokens. +func CreateURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("tokens") +} + +// GetURL generates the URL used to Validate Tokens. +func GetURL(client *gophercloud.ServiceClient, token string) string { + return client.ServiceURL("tokens", token) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/doc.go new file mode 100644 index 00000000000..696e2a5d8e5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/doc.go @@ -0,0 +1,60 @@ +/* +Package groups manages and retrieves Groups in the OpenStack Identity Service. + +Example to List Groups + + listOpts := groups.ListOpts{ + DomainID: "default", + } + + allPages, err := groups.List(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allGroups, err := groups.ExtractGroups(allPages) + if err != nil { + panic(err) + } + + for _, group := range allGroups { + fmt.Printf("%+v\n", group) + } + +Example to Create a Group + + createOpts := groups.CreateOpts{ + Name: "groupname", + DomainID: "default", + Extra: map[string]interface{}{ + "email": "groupname@example.com", + } + } + + group, err := groups.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Group + + groupID := "0fe36e73809d46aeae6705c39077b1b3" + + updateOpts := groups.UpdateOpts{ + Description: "Updated Description for group", + } + + group, err := groups.Update(identityClient, groupID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Group + + groupID := "0fe36e73809d46aeae6705c39077b1b3" + err := groups.Delete(identityClient, groupID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package groups diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/requests.go new file mode 100644 index 00000000000..b6e74dcf976 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/requests.go @@ -0,0 +1,158 @@ +package groups + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to +// the List request +type ListOptsBuilder interface { + ToGroupListQuery() (string, error) +} + +// ListOpts provides options to filter the List results. +type ListOpts struct { + // DomainID filters the response by a domain ID. + DomainID string `q:"domain_id"` + + // Name filters the response by group name. + Name string `q:"name"` +} + +// ToGroupListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToGroupListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List enumerates the Groups to which the current token has access. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToGroupListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return GroupPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves details on a single group, by ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to +// the Create request. +type CreateOptsBuilder interface { + ToGroupCreateMap() (map[string]interface{}, error) +} + +// CreateOpts provides options used to create a group. +type CreateOpts struct { + // Name is the name of the new group. + Name string `json:"name" required:"true"` + + // Description is a description of the group. + Description string `json:"description,omitempty"` + + // DomainID is the ID of the domain the group belongs to. + DomainID string `json:"domain_id,omitempty"` + + // Extra is free-form extra key/value pairs to describe the group. + Extra map[string]interface{} `json:"-"` +} + +// ToGroupCreateMap formats a CreateOpts into a create request. +func (opts CreateOpts) ToGroupCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "group") + if err != nil { + return nil, err + } + + if opts.Extra != nil { + if v, ok := b["group"].(map[string]interface{}); ok { + for key, value := range opts.Extra { + v[key] = value + } + } + } + + return b, nil +} + +// Create creates a new Group. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToGroupCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateOptsBuilder interface { + ToGroupUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts provides options for updating a group. +type UpdateOpts struct { + // Name is the name of the new group. + Name string `json:"name,omitempty"` + + // Description is a description of the group. + Description string `json:"description,omitempty"` + + // DomainID is the ID of the domain the group belongs to. + DomainID string `json:"domain_id,omitempty"` + + // Extra is free-form extra key/value pairs to describe the group. + Extra map[string]interface{} `json:"-"` +} + +// ToGroupUpdateMap formats a UpdateOpts into an update request. +func (opts UpdateOpts) ToGroupUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "group") + if err != nil { + return nil, err + } + + if opts.Extra != nil { + if v, ok := b["group"].(map[string]interface{}); ok { + for key, value := range opts.Extra { + v[key] = value + } + } + } + + return b, nil +} + +// Update updates an existing Group. +func Update(client *gophercloud.ServiceClient, groupID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToGroupUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Patch(updateURL(client, groupID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete deletes a group. +func Delete(client *gophercloud.ServiceClient, groupID string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, groupID), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/results.go new file mode 100644 index 00000000000..ba7d018d173 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/results.go @@ -0,0 +1,132 @@ +package groups + +import ( + "encoding/json" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/internal" + "github.com/gophercloud/gophercloud/pagination" +) + +// Group helps manage related users. +type Group struct { + // Description describes the group purpose. + Description string `json:"description"` + + // DomainID is the domain ID the group belongs to. + DomainID string `json:"domain_id"` + + // ID is the unique ID of the group. + ID string `json:"id"` + + // Extra is a collection of miscellaneous key/values. + Extra map[string]interface{} `json:"-"` + + // Links contains referencing links to the group. + Links map[string]interface{} `json:"links"` + + // Name is the name of the group. + Name string `json:"name"` +} + +func (r *Group) UnmarshalJSON(b []byte) error { + type tmp Group + var s struct { + tmp + Extra map[string]interface{} `json:"extra"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Group(s.tmp) + + // Collect other fields and bundle them into Extra + // but only if a field titled "extra" wasn't sent. + if s.Extra != nil { + r.Extra = s.Extra + } else { + var result interface{} + err := json.Unmarshal(b, &result) + if err != nil { + return err + } + if resultMap, ok := result.(map[string]interface{}); ok { + r.Extra = internal.RemainingKeys(Group{}, resultMap) + } + } + + return err +} + +type groupResult struct { + gophercloud.Result +} + +// GetResult is the response from a Get operation. Call its Extract method +// to interpret it as a Group. +type GetResult struct { + groupResult +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a Group. +type CreateResult struct { + groupResult +} + +// UpdateResult is the response from an Update operation. Call its Extract +// method to interpret it as a Group. +type UpdateResult struct { + groupResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr to +// determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// GroupPage is a single page of Group results. +type GroupPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a page of Groups contains any results. +func (r GroupPage) IsEmpty() (bool, error) { + groups, err := ExtractGroups(r) + return len(groups) == 0, err +} + +// NextPageURL extracts the "next" link from the links section of the result. +func (r GroupPage) NextPageURL() (string, error) { + var s struct { + Links struct { + Next string `json:"next"` + Previous string `json:"previous"` + } `json:"links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Links.Next, err +} + +// ExtractGroups returns a slice of Groups contained in a single page of results. +func ExtractGroups(r pagination.Page) ([]Group, error) { + var s struct { + Groups []Group `json:"groups"` + } + err := (r.(GroupPage)).ExtractInto(&s) + return s.Groups, err +} + +// Extract interprets any group results as a Group. +func (r groupResult) Extract() (*Group, error) { + var s struct { + Group *Group `json:"group"` + } + err := r.ExtractInto(&s) + return s.Group, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/urls.go new file mode 100644 index 00000000000..e7d1e53b27f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/urls.go @@ -0,0 +1,23 @@ +package groups + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("groups") +} + +func getURL(client *gophercloud.ServiceClient, groupID string) string { + return client.ServiceURL("groups", groupID) +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("groups") +} + +func updateURL(client *gophercloud.ServiceClient, groupID string) string { + return client.ServiceURL("groups", groupID) +} + +func deleteURL(client *gophercloud.ServiceClient, groupID string) string { + return client.ServiceURL("groups", groupID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/doc.go new file mode 100644 index 00000000000..4f5b45ab655 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/doc.go @@ -0,0 +1,58 @@ +/* +Package projects manages and retrieves Projects in the OpenStack Identity +Service. + +Example to List Projects + + listOpts := projects.ListOpts{ + Enabled: gophercloud.Enabled, + } + + allPages, err := projects.List(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allProjects, err := projects.ExtractProjects(allPages) + if err != nil { + panic(err) + } + + for _, project := range allProjects { + fmt.Printf("%+v\n", project) + } + +Example to Create a Project + + createOpts := projects.CreateOpts{ + Name: "project_name", + Description: "Project Description" + } + + project, err := projects.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Project + + projectID := "966b3c7d36a24facaf20b7e458bf2192" + + updateOpts := projects.UpdateOpts{ + Enabled: gophercloud.Disabled, + } + + project, err := projects.Update(identityClient, projectID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Project + + projectID := "966b3c7d36a24facaf20b7e458bf2192" + err := projects.Delete(identityClient, projectID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package projects diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/requests.go new file mode 100644 index 00000000000..368b7321ba7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/requests.go @@ -0,0 +1,152 @@ +package projects + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to +// the List request +type ListOptsBuilder interface { + ToProjectListQuery() (string, error) +} + +// ListOpts enables filtering of a list request. +type ListOpts struct { + // DomainID filters the response by a domain ID. + DomainID string `q:"domain_id"` + + // Enabled filters the response by enabled projects. + Enabled *bool `q:"enabled"` + + // IsDomain filters the response by projects that are domains. + // Setting this to true is effectively listing domains. + IsDomain *bool `q:"is_domain"` + + // Name filters the response by project name. + Name string `q:"name"` + + // ParentID filters the response by projects of a given parent project. + ParentID string `q:"parent_id"` +} + +// ToProjectListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToProjectListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List enumerates the Projects to which the current token has access. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToProjectListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ProjectPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves details on a single project, by ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to +// the Create request. +type CreateOptsBuilder interface { + ToProjectCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents parameters used to create a project. +type CreateOpts struct { + // DomainID is the ID this project will belong under. + DomainID string `json:"domain_id,omitempty"` + + // Enabled sets the project status to enabled or disabled. + Enabled *bool `json:"enabled,omitempty"` + + // IsDomain indicates if this project is a domain. + IsDomain *bool `json:"is_domain,omitempty"` + + // Name is the name of the project. + Name string `json:"name" required:"true"` + + // ParentID specifies the parent project of this new project. + ParentID string `json:"parent_id,omitempty"` + + // Description is the description of the project. + Description string `json:"description,omitempty"` +} + +// ToProjectCreateMap formats a CreateOpts into a create request. +func (opts CreateOpts) ToProjectCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "project") +} + +// Create creates a new Project. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToProjectCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), &b, &r.Body, nil) + return +} + +// Delete deletes a project. +func Delete(client *gophercloud.ServiceClient, projectID string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, projectID), nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateOptsBuilder interface { + ToProjectUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents parameters to update a project. +type UpdateOpts struct { + // DomainID is the ID this project will belong under. + DomainID string `json:"domain_id,omitempty"` + + // Enabled sets the project status to enabled or disabled. + Enabled *bool `json:"enabled,omitempty"` + + // IsDomain indicates if this project is a domain. + IsDomain *bool `json:"is_domain,omitempty"` + + // Name is the name of the project. + Name string `json:"name,omitempty"` + + // ParentID specifies the parent project of this new project. + ParentID string `json:"parent_id,omitempty"` + + // Description is the description of the project. + Description string `json:"description,omitempty"` +} + +// ToUpdateCreateMap formats a UpdateOpts into an update request. +func (opts UpdateOpts) ToProjectUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "project") +} + +// Update modifies the attributes of a project. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToProjectUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Patch(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/results.go new file mode 100644 index 00000000000..a13fa7f2ae1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/results.go @@ -0,0 +1,103 @@ +package projects + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type projectResult struct { + gophercloud.Result +} + +// GetResult is the result of a Get request. Call its Extract method to +// interpret it as a Project. +type GetResult struct { + projectResult +} + +// CreateResult is the result of a Create request. Call its Extract method to +// interpret it as a Project. +type CreateResult struct { + projectResult +} + +// DeleteResult is the result of a Delete request. Call its ExtractErr method to +// determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UpdateResult is the result of an Update request. Call its Extract method to +// interpret it as a Project. +type UpdateResult struct { + projectResult +} + +// Project represents an OpenStack Identity Project. +type Project struct { + // IsDomain indicates whether the project is a domain. + IsDomain bool `json:"is_domain"` + + // Description is the description of the project. + Description string `json:"description"` + + // DomainID is the domain ID the project belongs to. + DomainID string `json:"domain_id"` + + // Enabled is whether or not the project is enabled. + Enabled bool `json:"enabled"` + + // ID is the unique ID of the project. + ID string `json:"id"` + + // Name is the name of the project. + Name string `json:"name"` + + // ParentID is the parent_id of the project. + ParentID string `json:"parent_id"` +} + +// ProjectPage is a single page of Project results. +type ProjectPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a page of Projects contains any results. +func (r ProjectPage) IsEmpty() (bool, error) { + projects, err := ExtractProjects(r) + return len(projects) == 0, err +} + +// NextPageURL extracts the "next" link from the links section of the result. +func (r ProjectPage) NextPageURL() (string, error) { + var s struct { + Links struct { + Next string `json:"next"` + Previous string `json:"previous"` + } `json:"links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Links.Next, err +} + +// ExtractProjects returns a slice of Projects contained in a single page of +// results. +func ExtractProjects(r pagination.Page) ([]Project, error) { + var s struct { + Projects []Project `json:"projects"` + } + err := (r.(ProjectPage)).ExtractInto(&s) + return s.Projects, err +} + +// Extract interprets any projectResults as a Project. +func (r projectResult) Extract() (*Project, error) { + var s struct { + Project *Project `json:"project"` + } + err := r.ExtractInto(&s) + return s.Project, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/urls.go new file mode 100644 index 00000000000..e26cf3684cc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/urls.go @@ -0,0 +1,23 @@ +package projects + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("projects") +} + +func getURL(client *gophercloud.ServiceClient, projectID string) string { + return client.ServiceURL("projects", projectID) +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("projects") +} + +func deleteURL(client *gophercloud.ServiceClient, projectID string) string { + return client.ServiceURL("projects", projectID) +} + +func updateURL(client *gophercloud.ServiceClient, projectID string) string { + return client.ServiceURL("projects", projectID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go new file mode 100644 index 00000000000..966e128f128 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go @@ -0,0 +1,108 @@ +/* +Package tokens provides information and interaction with the token API +resource for the OpenStack Identity service. + +For more information, see: +http://developer.openstack.org/api-ref-identity-v3.html#tokens-v3 + +Example to Create a Token From a Username and Password + + authOptions := tokens.AuthOptions{ + UserID: "username", + Password: "password", + } + + token, err := tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token From a Username, Password, and Domain + + authOptions := tokens.AuthOptions{ + UserID: "username", + Password: "password", + DomainID: "default", + } + + token, err := tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + + authOptions = tokens.AuthOptions{ + UserID: "username", + Password: "password", + DomainName: "default", + } + + token, err = tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token From a Token + + authOptions := tokens.AuthOptions{ + TokenID: "token_id", + } + + token, err := tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Username and Password with Project ID Scope + + scope := tokens.Scope{ + ProjectID: "0fe36e73809d46aeae6705c39077b1b3", + } + + authOptions := tokens.AuthOptions{ + Scope: &scope, + UserID: "username", + Password: "password", + } + + token, err = tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Username and Password with Domain ID Scope + + scope := tokens.Scope{ + DomainID: "default", + } + + authOptions := tokens.AuthOptions{ + Scope: &scope, + UserID: "username", + Password: "password", + } + + token, err = tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Username and Password with Project Name Scope + + scope := tokens.Scope{ + ProjectName: "project_name", + DomainID: "default", + } + + authOptions := tokens.AuthOptions{ + Scope: &scope, + UserID: "username", + Password: "password", + } + + token, err = tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +*/ +package tokens diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go new file mode 100644 index 00000000000..ca35851e4a4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go @@ -0,0 +1,210 @@ +package tokens + +import "github.com/gophercloud/gophercloud" + +// Scope allows a created token to be limited to a specific domain or project. +type Scope struct { + ProjectID string + ProjectName string + DomainID string + DomainName string +} + +// AuthOptionsBuilder provides the ability for extensions to add additional +// parameters to AuthOptions. Extensions must satisfy all required methods. +type AuthOptionsBuilder interface { + // ToTokenV3CreateMap assembles the Create request body, returning an error + // if parameters are missing or inconsistent. + ToTokenV3CreateMap(map[string]interface{}) (map[string]interface{}, error) + ToTokenV3ScopeMap() (map[string]interface{}, error) + CanReauth() bool +} + +// AuthOptions represents options for authenticating a user. +type AuthOptions struct { + // IdentityEndpoint specifies the HTTP endpoint that is required to work with + // the Identity API of the appropriate version. While it's ultimately needed + // by all of the identity services, it will often be populated by a + // provider-level function. + IdentityEndpoint string `json:"-"` + + // Username is required if using Identity V2 API. Consult with your provider's + // control panel to discover your account's username. In Identity V3, either + // UserID or a combination of Username and DomainID or DomainName are needed. + Username string `json:"username,omitempty"` + UserID string `json:"id,omitempty"` + + Password string `json:"password,omitempty"` + + // At most one of DomainID and DomainName must be provided if using Username + // with Identity V3. Otherwise, either are optional. + DomainID string `json:"-"` + DomainName string `json:"name,omitempty"` + + // AllowReauth should be set to true if you grant permission for Gophercloud + // to cache your credentials in memory, and to allow Gophercloud to attempt + // to re-authenticate automatically if/when your token expires. If you set + // it to false, it will not cache these settings, but re-authentication will + // not be possible. This setting defaults to false. + AllowReauth bool `json:"-"` + + // TokenID allows users to authenticate (possibly as another user) with an + // authentication token ID. + TokenID string `json:"-"` + + Scope Scope `json:"-"` +} + +// ToTokenV3CreateMap builds a request body from AuthOptions. +func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[string]interface{}, error) { + gophercloudAuthOpts := gophercloud.AuthOptions{ + Username: opts.Username, + UserID: opts.UserID, + Password: opts.Password, + DomainID: opts.DomainID, + DomainName: opts.DomainName, + AllowReauth: opts.AllowReauth, + TokenID: opts.TokenID, + } + + return gophercloudAuthOpts.ToTokenV3CreateMap(scope) +} + +// ToTokenV3CreateMap builds a scope request body from AuthOptions. +func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { + if opts.Scope.ProjectName != "" { + // ProjectName provided: either DomainID or DomainName must also be supplied. + // ProjectID may not be supplied. + if opts.Scope.DomainID == "" && opts.Scope.DomainName == "" { + return nil, gophercloud.ErrScopeDomainIDOrDomainName{} + } + if opts.Scope.ProjectID != "" { + return nil, gophercloud.ErrScopeProjectIDOrProjectName{} + } + + if opts.Scope.DomainID != "" { + // ProjectName + DomainID + return map[string]interface{}{ + "project": map[string]interface{}{ + "name": &opts.Scope.ProjectName, + "domain": map[string]interface{}{"id": &opts.Scope.DomainID}, + }, + }, nil + } + + if opts.Scope.DomainName != "" { + // ProjectName + DomainName + return map[string]interface{}{ + "project": map[string]interface{}{ + "name": &opts.Scope.ProjectName, + "domain": map[string]interface{}{"name": &opts.Scope.DomainName}, + }, + }, nil + } + } else if opts.Scope.ProjectID != "" { + // ProjectID provided. ProjectName, DomainID, and DomainName may not be provided. + if opts.Scope.DomainID != "" { + return nil, gophercloud.ErrScopeProjectIDAlone{} + } + if opts.Scope.DomainName != "" { + return nil, gophercloud.ErrScopeProjectIDAlone{} + } + + // ProjectID + return map[string]interface{}{ + "project": map[string]interface{}{ + "id": &opts.Scope.ProjectID, + }, + }, nil + } else if opts.Scope.DomainID != "" { + // DomainID provided. ProjectID, ProjectName, and DomainName may not be provided. + if opts.Scope.DomainName != "" { + return nil, gophercloud.ErrScopeDomainIDOrDomainName{} + } + + // DomainID + return map[string]interface{}{ + "domain": map[string]interface{}{ + "id": &opts.Scope.DomainID, + }, + }, nil + } else if opts.Scope.DomainName != "" { + // DomainName + return map[string]interface{}{ + "domain": map[string]interface{}{ + "name": &opts.Scope.DomainName, + }, + }, nil + } + + return nil, nil +} + +func (opts *AuthOptions) CanReauth() bool { + return opts.AllowReauth +} + +func subjectTokenHeaders(c *gophercloud.ServiceClient, subjectToken string) map[string]string { + return map[string]string{ + "X-Subject-Token": subjectToken, + } +} + +// Create authenticates and either generates a new token, or changes the Scope +// of an existing token. +func Create(c *gophercloud.ServiceClient, opts AuthOptionsBuilder) (r CreateResult) { + scope, err := opts.ToTokenV3ScopeMap() + if err != nil { + r.Err = err + return + } + + b, err := opts.ToTokenV3CreateMap(scope) + if err != nil { + r.Err = err + return + } + + resp, err := c.Post(tokenURL(c), b, &r.Body, &gophercloud.RequestOpts{ + MoreHeaders: map[string]string{"X-Auth-Token": ""}, + }) + r.Err = err + if resp != nil { + r.Header = resp.Header + } + return +} + +// Get validates and retrieves information about another token. +func Get(c *gophercloud.ServiceClient, token string) (r GetResult) { + resp, err := c.Get(tokenURL(c), &r.Body, &gophercloud.RequestOpts{ + MoreHeaders: subjectTokenHeaders(c, token), + OkCodes: []int{200, 203}, + }) + if resp != nil { + r.Err = err + r.Header = resp.Header + } + return +} + +// Validate determines if a specified token is valid or not. +func Validate(c *gophercloud.ServiceClient, token string) (bool, error) { + resp, err := c.Request("HEAD", tokenURL(c), &gophercloud.RequestOpts{ + MoreHeaders: subjectTokenHeaders(c, token), + OkCodes: []int{200, 204, 404}, + }) + if err != nil { + return false, err + } + + return resp.StatusCode == 200 || resp.StatusCode == 204, nil +} + +// Revoke immediately makes specified token invalid. +func Revoke(c *gophercloud.ServiceClient, token string) (r RevokeResult) { + _, r.Err = c.Delete(tokenURL(c), &gophercloud.RequestOpts{ + MoreHeaders: subjectTokenHeaders(c, token), + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go new file mode 100644 index 00000000000..6e78d1cbdbf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go @@ -0,0 +1,170 @@ +package tokens + +import ( + "time" + + "github.com/gophercloud/gophercloud" +) + +// Endpoint represents a single API endpoint offered by a service. +// It matches either a public, internal or admin URL. +// If supported, it contains a region specifier, again if provided. +// The significance of the Region field will depend upon your provider. +type Endpoint struct { + ID string `json:"id"` + Region string `json:"region"` + Interface string `json:"interface"` + URL string `json:"url"` +} + +// CatalogEntry provides a type-safe interface to an Identity API V3 service +// catalog listing. Each class of service, such as cloud DNS or block storage +// services, could have multiple CatalogEntry representing it (one by interface +// type, e.g public, admin or internal). +// +// Note: when looking for the desired service, try, whenever possible, to key +// off the type field. Otherwise, you'll tie the representation of the service +// to a specific provider. +type CatalogEntry struct { + // Service ID + ID string `json:"id"` + + // Name will contain the provider-specified name for the service. + Name string `json:"name"` + + // Type will contain a type string if OpenStack defines a type for the + // service. Otherwise, for provider-specific services, the provider may + // assign their own type strings. + Type string `json:"type"` + + // Endpoints will let the caller iterate over all the different endpoints that + // may exist for the service. + Endpoints []Endpoint `json:"endpoints"` +} + +// ServiceCatalog provides a view into the service catalog from a previous, +// successful authentication. +type ServiceCatalog struct { + Entries []CatalogEntry `json:"catalog"` +} + +// Domain provides information about the domain to which this token grants +// access. +type Domain struct { + ID string `json:"id"` + Name string `json:"name"` +} + +// User represents a user resource that exists in the Identity Service. +type User struct { + Domain Domain `json:"domain"` + ID string `json:"id"` + Name string `json:"name"` +} + +// Role provides information about roles to which User is authorized. +type Role struct { + ID string `json:"id"` + Name string `json:"name"` +} + +// Project provides information about project to which User is authorized. +type Project struct { + Domain Domain `json:"domain"` + ID string `json:"id"` + Name string `json:"name"` +} + +// commonResult is the response from a request. A commonResult has various +// methods which can be used to extract different details about the result. +type commonResult struct { + gophercloud.Result +} + +// Extract is a shortcut for ExtractToken. +// This function is deprecated and still present for backward compatibility. +func (r commonResult) Extract() (*Token, error) { + return r.ExtractToken() +} + +// ExtractToken interprets a commonResult as a Token. +func (r commonResult) ExtractToken() (*Token, error) { + var s Token + err := r.ExtractInto(&s) + if err != nil { + return nil, err + } + + // Parse the token itself from the stored headers. + s.ID = r.Header.Get("X-Subject-Token") + + return &s, err +} + +// ExtractServiceCatalog returns the ServiceCatalog that was generated along +// with the user's Token. +func (r commonResult) ExtractServiceCatalog() (*ServiceCatalog, error) { + var s ServiceCatalog + err := r.ExtractInto(&s) + return &s, err +} + +// ExtractUser returns the User that is the owner of the Token. +func (r commonResult) ExtractUser() (*User, error) { + var s struct { + User *User `json:"user"` + } + err := r.ExtractInto(&s) + return s.User, err +} + +// ExtractRoles returns Roles to which User is authorized. +func (r commonResult) ExtractRoles() ([]Role, error) { + var s struct { + Roles []Role `json:"roles"` + } + err := r.ExtractInto(&s) + return s.Roles, err +} + +// ExtractProject returns Project to which User is authorized. +func (r commonResult) ExtractProject() (*Project, error) { + var s struct { + Project *Project `json:"project"` + } + err := r.ExtractInto(&s) + return s.Project, err +} + +// CreateResult is the response from a Create request. Use ExtractToken() +// to interpret it as a Token, or ExtractServiceCatalog() to interpret it +// as a service catalog. +type CreateResult struct { + commonResult +} + +// GetResult is the response from a Get request. Use ExtractToken() +// to interpret it as a Token, or ExtractServiceCatalog() to interpret it +// as a service catalog. +type GetResult struct { + commonResult +} + +// RevokeResult is response from a Revoke request. +type RevokeResult struct { + commonResult +} + +// Token is a string that grants a user access to a controlled set of services +// in an OpenStack provider. Each Token is valid for a set length of time. +type Token struct { + // ID is the issued token. + ID string `json:"id"` + + // ExpiresAt is the timestamp at which this token will no longer be accepted. + ExpiresAt time.Time `json:"expires_at"` +} + +func (r commonResult) ExtractInto(v interface{}) error { + return r.ExtractIntoStructPtr(v, "token") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go new file mode 100644 index 00000000000..2f864a31c8b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go @@ -0,0 +1,7 @@ +package tokens + +import "github.com/gophercloud/gophercloud" + +func tokenURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("auth", "tokens") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/doc.go new file mode 100644 index 00000000000..aa7ec196f5a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/doc.go @@ -0,0 +1,123 @@ +/* +Package users manages and retrieves Users in the OpenStack Identity Service. + +Example to List Users + + listOpts := users.ListOpts{ + DomainID: "default", + } + + allPages, err := users.List(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allUsers, err := users.ExtractUsers(allPages) + if err != nil { + panic(err) + } + + for _, user := range allUsers { + fmt.Printf("%+v\n", user) + } + +Example to Create a User + + projectID := "a99e9b4e620e4db09a2dfb6e42a01e66" + + createOpts := users.CreateOpts{ + Name: "username", + DomainID: "default", + DefaultProjectID: projectID, + Enabled: gophercloud.Enabled, + Password: "supersecret", + Extra: map[string]interface{}{ + "email": "username@example.com", + } + } + + user, err := users.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a User + + userID := "0fe36e73809d46aeae6705c39077b1b3" + + updateOpts := users.UpdateOpts{ + Enabled: gophercloud.Disabled, + } + + user, err := users.Update(identityClient, userID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a User + + userID := "0fe36e73809d46aeae6705c39077b1b3" + err := users.Delete(identityClient, userID).ExtractErr() + if err != nil { + panic(err) + } + +Example to List Groups a User Belongs To + + userID := "0fe36e73809d46aeae6705c39077b1b3" + + allPages, err := users.ListGroups(identityClient, userID).AllPages() + if err != nil { + panic(err) + } + + allGroups, err := groups.ExtractGroups(allPages) + if err != nil { + panic(err) + } + + for _, group := range allGroups { + fmt.Printf("%+v\n", group) + } + +Example to List Projects a User Belongs To + + userID := "0fe36e73809d46aeae6705c39077b1b3" + + allPages, err := users.ListProjects(identityClient, userID).AllPages() + if err != nil { + panic(err) + } + + allProjects, err := projects.ExtractProjects(allPages) + if err != nil { + panic(err) + } + + for _, project := range allProjects { + fmt.Printf("%+v\n", project) + } + +Example to List Users in a Group + + groupID := "bede500ee1124ae9b0006ff859758b3a" + listOpts := users.ListOpts{ + DomainID: "default", + } + + allPages, err := users.ListInGroup(identityClient, groupID, listOpts).AllPages() + if err != nil { + panic(err) + } + + allUsers, err := users.ExtractUsers(allPages) + if err != nil { + panic(err) + } + + for _, user := range allUsers { + fmt.Printf("%+v\n", user) + } + +*/ +package users diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/requests.go new file mode 100644 index 00000000000..779d116fccd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/requests.go @@ -0,0 +1,242 @@ +package users + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/identity/v3/groups" + "github.com/gophercloud/gophercloud/openstack/identity/v3/projects" + "github.com/gophercloud/gophercloud/pagination" +) + +// Option is a specific option defined at the API to enable features +// on a user account. +type Option string + +const ( + IgnoreChangePasswordUponFirstUse Option = "ignore_change_password_upon_first_use" + IgnorePasswordExpiry Option = "ignore_password_expiry" + IgnoreLockoutFailureAttempts Option = "ignore_lockout_failure_attempts" + MultiFactorAuthRules Option = "multi_factor_auth_rules" + MultiFactorAuthEnabled Option = "multi_factor_auth_enabled" +) + +// ListOptsBuilder allows extensions to add additional parameters to +// the List request +type ListOptsBuilder interface { + ToUserListQuery() (string, error) +} + +// ListOpts provides options to filter the List results. +type ListOpts struct { + // DomainID filters the response by a domain ID. + DomainID string `q:"domain_id"` + + // Enabled filters the response by enabled users. + Enabled *bool `q:"enabled"` + + // IdpID filters the response by an Identity Provider ID. + IdPID string `q:"idp_id"` + + // Name filters the response by username. + Name string `q:"name"` + + // PasswordExpiresAt filters the response based on expiring passwords. + PasswordExpiresAt string `q:"password_expires_at"` + + // ProtocolID filters the response by protocol ID. + ProtocolID string `q:"protocol_id"` + + // UniqueID filters the response by unique ID. + UniqueID string `q:"unique_id"` +} + +// ToUserListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToUserListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List enumerates the Users to which the current token has access. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToUserListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return UserPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves details on a single user, by ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to +// the Create request. +type CreateOptsBuilder interface { + ToUserCreateMap() (map[string]interface{}, error) +} + +// CreateOpts provides options used to create a user. +type CreateOpts struct { + // Name is the name of the new user. + Name string `json:"name" required:"true"` + + // DefaultProjectID is the ID of the default project of the user. + DefaultProjectID string `json:"default_project_id,omitempty"` + + // Description is a description of the user. + Description string `json:"description,omitempty"` + + // DomainID is the ID of the domain the user belongs to. + DomainID string `json:"domain_id,omitempty"` + + // Enabled sets the user status to enabled or disabled. + Enabled *bool `json:"enabled,omitempty"` + + // Extra is free-form extra key/value pairs to describe the user. + Extra map[string]interface{} `json:"-"` + + // Options are defined options in the API to enable certain features. + Options map[Option]interface{} `json:"options,omitempty"` + + // Password is the password of the new user. + Password string `json:"password,omitempty"` +} + +// ToUserCreateMap formats a CreateOpts into a create request. +func (opts CreateOpts) ToUserCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "user") + if err != nil { + return nil, err + } + + if opts.Extra != nil { + if v, ok := b["user"].(map[string]interface{}); ok { + for key, value := range opts.Extra { + v[key] = value + } + } + } + + return b, nil +} + +// Create creates a new User. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToUserCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateOptsBuilder interface { + ToUserUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts provides options for updating a user account. +type UpdateOpts struct { + // Name is the name of the new user. + Name string `json:"name,omitempty"` + + // DefaultProjectID is the ID of the default project of the user. + DefaultProjectID string `json:"default_project_id,omitempty"` + + // Description is a description of the user. + Description string `json:"description,omitempty"` + + // DomainID is the ID of the domain the user belongs to. + DomainID string `json:"domain_id,omitempty"` + + // Enabled sets the user status to enabled or disabled. + Enabled *bool `json:"enabled,omitempty"` + + // Extra is free-form extra key/value pairs to describe the user. + Extra map[string]interface{} `json:"-"` + + // Options are defined options in the API to enable certain features. + Options map[Option]interface{} `json:"options,omitempty"` + + // Password is the password of the new user. + Password string `json:"password,omitempty"` +} + +// ToUserUpdateMap formats a UpdateOpts into an update request. +func (opts UpdateOpts) ToUserUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "user") + if err != nil { + return nil, err + } + + if opts.Extra != nil { + if v, ok := b["user"].(map[string]interface{}); ok { + for key, value := range opts.Extra { + v[key] = value + } + } + } + + return b, nil +} + +// Update updates an existing User. +func Update(client *gophercloud.ServiceClient, userID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToUserUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Patch(updateURL(client, userID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete deletes a user. +func Delete(client *gophercloud.ServiceClient, userID string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, userID), nil) + return +} + +// ListGroups enumerates groups user belongs to. +func ListGroups(client *gophercloud.ServiceClient, userID string) pagination.Pager { + url := listGroupsURL(client, userID) + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return groups.GroupPage{LinkedPageBase: pagination.LinkedPageBase{PageResult: r}} + }) +} + +// ListProjects enumerates groups user belongs to. +func ListProjects(client *gophercloud.ServiceClient, userID string) pagination.Pager { + url := listProjectsURL(client, userID) + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return projects.ProjectPage{LinkedPageBase: pagination.LinkedPageBase{PageResult: r}} + }) +} + +// ListInGroup enumerates users that belong to a group. +func ListInGroup(client *gophercloud.ServiceClient, groupID string, opts ListOptsBuilder) pagination.Pager { + url := listInGroupURL(client, groupID) + if opts != nil { + query, err := opts.ToUserListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return UserPage{pagination.LinkedPageBase{PageResult: r}} + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/results.go new file mode 100644 index 00000000000..c474e882b90 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/results.go @@ -0,0 +1,149 @@ +package users + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/internal" + "github.com/gophercloud/gophercloud/pagination" +) + +// User represents a User in the OpenStack Identity Service. +type User struct { + // DefaultProjectID is the ID of the default project of the user. + DefaultProjectID string `json:"default_project_id"` + + // Description is the description of the user. + Description string `json:"description"` + + // DomainID is the domain ID the user belongs to. + DomainID string `json:"domain_id"` + + // Enabled is whether or not the user is enabled. + Enabled bool `json:"enabled"` + + // Extra is a collection of miscellaneous key/values. + Extra map[string]interface{} `json:"-"` + + // ID is the unique ID of the user. + ID string `json:"id"` + + // Links contains referencing links to the user. + Links map[string]interface{} `json:"links"` + + // Name is the name of the user. + Name string `json:"name"` + + // Options are a set of defined options of the user. + Options map[string]interface{} `json:"options"` + + // PasswordExpiresAt is the timestamp when the user's password expires. + PasswordExpiresAt time.Time `json:"-"` +} + +func (r *User) UnmarshalJSON(b []byte) error { + type tmp User + var s struct { + tmp + Extra map[string]interface{} `json:"extra"` + PasswordExpiresAt gophercloud.JSONRFC3339MilliNoZ `json:"password_expires_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = User(s.tmp) + + r.PasswordExpiresAt = time.Time(s.PasswordExpiresAt) + + // Collect other fields and bundle them into Extra + // but only if a field titled "extra" wasn't sent. + if s.Extra != nil { + r.Extra = s.Extra + } else { + var result interface{} + err := json.Unmarshal(b, &result) + if err != nil { + return err + } + if resultMap, ok := result.(map[string]interface{}); ok { + delete(resultMap, "password_expires_at") + r.Extra = internal.RemainingKeys(User{}, resultMap) + } + } + + return err +} + +type userResult struct { + gophercloud.Result +} + +// GetResult is the response from a Get operation. Call its Extract method +// to interpret it as a User. +type GetResult struct { + userResult +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a User. +type CreateResult struct { + userResult +} + +// UpdateResult is the response from an Update operation. Call its Extract +// method to interpret it as a User. +type UpdateResult struct { + userResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr to +// determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UserPage is a single page of User results. +type UserPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a UserPage contains any results. +func (r UserPage) IsEmpty() (bool, error) { + users, err := ExtractUsers(r) + return len(users) == 0, err +} + +// NextPageURL extracts the "next" link from the links section of the result. +func (r UserPage) NextPageURL() (string, error) { + var s struct { + Links struct { + Next string `json:"next"` + Previous string `json:"previous"` + } `json:"links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Links.Next, err +} + +// ExtractUsers returns a slice of Users contained in a single page of results. +func ExtractUsers(r pagination.Page) ([]User, error) { + var s struct { + Users []User `json:"users"` + } + err := (r.(UserPage)).ExtractInto(&s) + return s.Users, err +} + +// Extract interprets any user results as a User. +func (r userResult) Extract() (*User, error) { + var s struct { + User *User `json:"user"` + } + err := r.ExtractInto(&s) + return s.User, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/urls.go new file mode 100644 index 00000000000..1db2831b5ed --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/urls.go @@ -0,0 +1,35 @@ +package users + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("users") +} + +func getURL(client *gophercloud.ServiceClient, userID string) string { + return client.ServiceURL("users", userID) +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("users") +} + +func updateURL(client *gophercloud.ServiceClient, userID string) string { + return client.ServiceURL("users", userID) +} + +func deleteURL(client *gophercloud.ServiceClient, userID string) string { + return client.ServiceURL("users", userID) +} + +func listGroupsURL(client *gophercloud.ServiceClient, userID string) string { + return client.ServiceURL("users", userID, "groups") +} + +func listProjectsURL(client *gophercloud.ServiceClient, userID string) string { + return client.ServiceURL("users", userID, "projects") +} + +func listInGroupURL(client *gophercloud.ServiceClient, groupID string) string { + return client.ServiceURL("groups", groupID, "users") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/doc.go new file mode 100644 index 00000000000..a2f5e58b89a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/doc.go @@ -0,0 +1,33 @@ +/* +Package imagedata enables management of image data. + +Example to Upload Image Data + + imageID := "da3b75d9-3f4a-40e7-8a2c-bfab23927dea" + + imageData, err := os.Open("/path/to/image/file") + if err != nil { + panic(err) + } + defer imageData.Close() + + err = imagedata.Upload(imageClient, imageID, imageData).ExtractErr() + if err != nil { + panic(err) + } + +Example to Download Image Data + + imageID := "da3b75d9-3f4a-40e7-8a2c-bfab23927dea" + + image, err := imagedata.Download(imageClient, imageID).Extract() + if err != nil { + panic(err) + } + + imageData, err := ioutil.ReadAll(image) + if err != nil { + panic(err) + } +*/ +package imagedata diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/requests.go new file mode 100644 index 00000000000..4761e488c6b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/requests.go @@ -0,0 +1,28 @@ +package imagedata + +import ( + "io" + "net/http" + + "github.com/gophercloud/gophercloud" +) + +// Upload uploads an image file. +func Upload(client *gophercloud.ServiceClient, id string, data io.Reader) (r UploadResult) { + _, r.Err = client.Put(uploadURL(client, id), data, nil, &gophercloud.RequestOpts{ + MoreHeaders: map[string]string{"Content-Type": "application/octet-stream"}, + OkCodes: []int{204}, + }) + return +} + +// Download retrieves an image. +func Download(client *gophercloud.ServiceClient, id string) (r DownloadResult) { + var resp *http.Response + resp, r.Err = client.Get(downloadURL(client, id), nil, nil) + if resp != nil { + r.Body = resp.Body + r.Header = resp.Header + } + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/results.go new file mode 100644 index 00000000000..895d28ba8ae --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/results.go @@ -0,0 +1,28 @@ +package imagedata + +import ( + "fmt" + "io" + + "github.com/gophercloud/gophercloud" +) + +// UploadResult is the result of an upload image operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type UploadResult struct { + gophercloud.ErrResult +} + +// DownloadResult is the result of a download image operation. Call its Extract +// method to gain access to the image data. +type DownloadResult struct { + gophercloud.Result +} + +// Extract builds images model from io.Reader +func (r DownloadResult) Extract() (io.Reader, error) { + if r, ok := r.Body.(io.Reader); ok { + return r, nil + } + return nil, fmt.Errorf("Expected io.Reader but got: %T(%#v)", r.Body, r.Body) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/urls.go new file mode 100644 index 00000000000..ccd6416e53e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/urls.go @@ -0,0 +1,13 @@ +package imagedata + +import "github.com/gophercloud/gophercloud" + +// `imageDataURL(c,i)` is the URL for the binary image data for the +// image identified by ID `i` in the service `c`. +func uploadURL(c *gophercloud.ServiceClient, imageID string) string { + return c.ServiceURL("images", imageID, "file") +} + +func downloadURL(c *gophercloud.ServiceClient, imageID string) string { + return uploadURL(c, imageID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/doc.go new file mode 100644 index 00000000000..14da9ac90da --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/doc.go @@ -0,0 +1,60 @@ +/* +Package images enables management and retrieval of images from the OpenStack +Image Service. + +Example to List Images + + images.ListOpts{ + Owner: "a7509e1ae65945fda83f3e52c6296017", + } + + allPages, err := images.List(imagesClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allImages, err := images.ExtractImages(allPages) + if err != nil { + panic(err) + } + + for _, image := range allImages { + fmt.Printf("%+v\n", image) + } + +Example to Create an Image + + createOpts := images.CreateOpts{ + Name: "image_name", + Visibility: images.ImageVisibilityPrivate, + } + + image, err := images.Create(imageClient, createOpts) + if err != nil { + panic(err) + } + +Example to Update an Image + + imageID := "1bea47ed-f6a9-463b-b423-14b9cca9ad27" + + updateOpts := images.UpdateOpts{ + images.ReplaceImageName{ + NewName: "new_name", + }, + } + + image, err := images.Update(imageClient, imageID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete an Image + + imageID := "1bea47ed-f6a9-463b-b423-14b9cca9ad27" + err := images.Delete(imageClient, imageID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package images diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/requests.go new file mode 100644 index 00000000000..387e791cebc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/requests.go @@ -0,0 +1,258 @@ +package images + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToImageListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the server attributes you want to see returned. Marker and Limit are used +// for pagination. +// +// http://developer.openstack.org/api-ref-image-v2.html +type ListOpts struct { + // Integer value for the limit of values to return. + Limit int `q:"limit"` + + // UUID of the server at which you want to set a marker. + Marker string `q:"marker"` + + // Name filters on the name of the image. + Name string `q:"name"` + + // Visibility filters on the visibility of the image. + Visibility ImageVisibility `q:"visibility"` + + // MemberStatus filters on the member status of the image. + MemberStatus ImageMemberStatus `q:"member_status"` + + // Owner filters on the project ID of the image. + Owner string `q:"owner"` + + // Status filters on the status of the image. + Status ImageStatus `q:"status"` + + // SizeMin filters on the size_min image property. + SizeMin int64 `q:"size_min"` + + // SizeMax filters on the size_max image property. + SizeMax int64 `q:"size_max"` + + // SortKey will sort the results based on a specified image property. + SortKey string `q:"sort_key"` + + // SortDir will sort the list results either ascending or decending. + SortDir string `q:"sort_dir"` + Tag string `q:"tag"` +} + +// ToImageListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToImageListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List implements image list request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToImageListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return ImagePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add parameters to the Create request. +type CreateOptsBuilder interface { + // Returns value that can be passed to json.Marshal + ToImageCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents options used to create an image. +type CreateOpts struct { + // Name is the name of the new image. + Name string `json:"name" required:"true"` + + // Id is the the image ID. + ID string `json:"id,omitempty"` + + // Visibility defines who can see/use the image. + Visibility *ImageVisibility `json:"visibility,omitempty"` + + // Tags is a set of image tags. + Tags []string `json:"tags,omitempty"` + + // ContainerFormat is the format of the + // container. Valid values are ami, ari, aki, bare, and ovf. + ContainerFormat string `json:"container_format,omitempty"` + + // DiskFormat is the format of the disk. If set, + // valid values are ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, + // and iso. + DiskFormat string `json:"disk_format,omitempty"` + + // MinDisk is the amount of disk space in + // GB that is required to boot the image. + MinDisk int `json:"min_disk,omitempty"` + + // MinRAM is the amount of RAM in MB that + // is required to boot the image. + MinRAM int `json:"min_ram,omitempty"` + + // protected is whether the image is not deletable. + Protected *bool `json:"protected,omitempty"` + + // properties is a set of properties, if any, that + // are associated with the image. + Properties map[string]string `json:"-"` +} + +// ToImageCreateMap assembles a request body based on the contents of +// a CreateOpts. +func (opts CreateOpts) ToImageCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + if opts.Properties != nil { + for k, v := range opts.Properties { + b[k] = v + } + } + return b, nil +} + +// Create implements create image request. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToImageCreateMap() + if err != nil { + r.Err = err + return r + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{OkCodes: []int{201}}) + return +} + +// Delete implements image delete request. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// Get implements image get request. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// Update implements image updated request. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToImageUpdateMap() + if err != nil { + r.Err = err + return r + } + _, r.Err = client.Patch(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + MoreHeaders: map[string]string{"Content-Type": "application/openstack-images-v2.1-json-patch"}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + // returns value implementing json.Marshaler which when marshaled matches + // the patch schema: + // http://specs.openstack.org/openstack/glance-specs/specs/api/v2/http-patch-image-api-v2.html + ToImageUpdateMap() ([]interface{}, error) +} + +// UpdateOpts implements UpdateOpts +type UpdateOpts []Patch + +// ToImageUpdateMap assembles a request body based on the contents of +// UpdateOpts. +func (opts UpdateOpts) ToImageUpdateMap() ([]interface{}, error) { + m := make([]interface{}, len(opts)) + for i, patch := range opts { + patchJSON := patch.ToImagePatchMap() + m[i] = patchJSON + } + return m, nil +} + +// Patch represents a single update to an existing image. Multiple updates +// to an image can be submitted at the same time. +type Patch interface { + ToImagePatchMap() map[string]interface{} +} + +// UpdateVisibility represents an updated visibility property request. +type UpdateVisibility struct { + Visibility ImageVisibility +} + +// ToImagePatchMap assembles a request body based on UpdateVisibility. +func (u UpdateVisibility) ToImagePatchMap() map[string]interface{} { + return map[string]interface{}{ + "op": "replace", + "path": "/visibility", + "value": u.Visibility, + } +} + +// ReplaceImageName represents an updated image_name property request. +type ReplaceImageName struct { + NewName string +} + +// ToImagePatchMap assembles a request body based on ReplaceImageName. +func (r ReplaceImageName) ToImagePatchMap() map[string]interface{} { + return map[string]interface{}{ + "op": "replace", + "path": "/name", + "value": r.NewName, + } +} + +// ReplaceImageChecksum represents an updated checksum property request. +type ReplaceImageChecksum struct { + Checksum string +} + +// ReplaceImageChecksum assembles a request body based on ReplaceImageChecksum. +func (rc ReplaceImageChecksum) ToImagePatchMap() map[string]interface{} { + return map[string]interface{}{ + "op": "replace", + "path": "/checksum", + "value": rc.Checksum, + } +} + +// ReplaceImageTags represents an updated tags property request. +type ReplaceImageTags struct { + NewTags []string +} + +// ToImagePatchMap assembles a request body based on ReplaceImageTags. +func (r ReplaceImageTags) ToImagePatchMap() map[string]interface{} { + return map[string]interface{}{ + "op": "replace", + "path": "/tags", + "value": r.NewTags, + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/results.go new file mode 100644 index 00000000000..cd819ec9c82 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/results.go @@ -0,0 +1,200 @@ +package images + +import ( + "encoding/json" + "fmt" + "reflect" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/internal" + "github.com/gophercloud/gophercloud/pagination" +) + +// Image represents an image found in the OpenStack Image service. +type Image struct { + // ID is the image UUID. + ID string `json:"id"` + + // Name is the human-readable display name for the image. + Name string `json:"name"` + + // Status is the image status. It can be "queued" or "active" + // See imageservice/v2/images/type.go + Status ImageStatus `json:"status"` + + // Tags is a list of image tags. Tags are arbitrarily defined strings + // attached to an image. + Tags []string `json:"tags"` + + // ContainerFormat is the format of the container. + // Valid values are ami, ari, aki, bare, and ovf. + ContainerFormat string `json:"container_format"` + + // DiskFormat is the format of the disk. + // If set, valid values are ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, + // and iso. + DiskFormat string `json:"disk_format"` + + // MinDiskGigabytes is the amount of disk space in GB that is required to + // boot the image. + MinDiskGigabytes int `json:"min_disk"` + + // MinRAMMegabytes [optional] is the amount of RAM in MB that is required to + // boot the image. + MinRAMMegabytes int `json:"min_ram"` + + // Owner is the tenant ID the image belongs to. + Owner string `json:"owner"` + + // Protected is whether the image is deletable or not. + Protected bool `json:"protected"` + + // Visibility defines who can see/use the image. + Visibility ImageVisibility `json:"visibility"` + + // Checksum is the checksum of the data that's associated with the image. + Checksum string `json:"checksum"` + + // SizeBytes is the size of the data that's associated with the image. + SizeBytes int64 `json:"size"` + + // Metadata is a set of metadata associated with the image. + // Image metadata allow for meaningfully define the image properties + // and tags. + // See http://docs.openstack.org/developer/glance/metadefs-concepts.html. + Metadata map[string]string `json:"metadata"` + + // Properties is a set of key-value pairs, if any, that are associated with + // the image. + Properties map[string]interface{} `json:"-"` + + // CreatedAt is the date when the image has been created. + CreatedAt time.Time `json:"created_at"` + + // UpdatedAt is the date when the last change has been made to the image or + // it's properties. + UpdatedAt time.Time `json:"updated_at"` + + // File is the trailing path after the glance endpoint that represent the + // location of the image or the path to retrieve it. + File string `json:"file"` + + // Schema is the path to the JSON-schema that represent the image or image + // entity. + Schema string `json:"schema"` + + // VirtualSize is the virtual size of the image + VirtualSize int64 `json:"virtual_size"` +} + +func (r *Image) UnmarshalJSON(b []byte) error { + type tmp Image + var s struct { + tmp + SizeBytes interface{} `json:"size"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Image(s.tmp) + + switch t := s.SizeBytes.(type) { + case nil: + return nil + case float32: + r.SizeBytes = int64(t) + case float64: + r.SizeBytes = int64(t) + default: + return fmt.Errorf("Unknown type for SizeBytes: %v (value: %v)", reflect.TypeOf(t), t) + } + + // Bundle all other fields into Properties + var result interface{} + err = json.Unmarshal(b, &result) + if err != nil { + return err + } + if resultMap, ok := result.(map[string]interface{}); ok { + delete(resultMap, "self") + r.Properties = internal.RemainingKeys(Image{}, resultMap) + } + + return err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract interprets any commonResult as an Image. +func (r commonResult) Extract() (*Image, error) { + var s *Image + err := r.ExtractInto(&s) + return s, err +} + +// CreateResult represents the result of a Create operation. Call its Extract +// method to interpret it as an Image. +type CreateResult struct { + commonResult +} + +// UpdateResult represents the result of an Update operation. Call its Extract +// method to interpret it as an Image. +type UpdateResult struct { + commonResult +} + +// GetResult represents the result of a Get operation. Call its Extract +// method to interpret it as an Image. +type GetResult struct { + commonResult +} + +// DeleteResult represents the result of a Delete operation. Call its +// ExtractErr method to interpret it as an Image. +type DeleteResult struct { + gophercloud.ErrResult +} + +// ImagePage represents the results of a List request. +type ImagePage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if an ImagePage contains no Images results. +func (r ImagePage) IsEmpty() (bool, error) { + images, err := ExtractImages(r) + return len(images) == 0, err +} + +// NextPageURL uses the response's embedded link reference to navigate to +// the next page of results. +func (r ImagePage) NextPageURL() (string, error) { + var s struct { + Next string `json:"next"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + + if s.Next == "" { + return "", nil + } + + return nextPageURL(r.URL.String(), s.Next) +} + +// ExtractImages interprets the results of a single page from a List() call, +// producing a slice of Image entities. +func ExtractImages(r pagination.Page) ([]Image, error) { + var s struct { + Images []Image `json:"images"` + } + err := (r.(ImagePage)).ExtractInto(&s) + return s.Images, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/types.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/types.go new file mode 100644 index 00000000000..2e01b38f5c4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/types.go @@ -0,0 +1,79 @@ +package images + +// ImageStatus image statuses +// http://docs.openstack.org/developer/glance/statuses.html +type ImageStatus string + +const ( + // ImageStatusQueued is a status for an image which identifier has + // been reserved for an image in the image registry. + ImageStatusQueued ImageStatus = "queued" + + // ImageStatusSaving denotes that an image’s raw data is currently being + // uploaded to Glance + ImageStatusSaving ImageStatus = "saving" + + // ImageStatusActive denotes an image that is fully available in Glance. + ImageStatusActive ImageStatus = "active" + + // ImageStatusKilled denotes that an error occurred during the uploading + // of an image’s data, and that the image is not readable. + ImageStatusKilled ImageStatus = "killed" + + // ImageStatusDeleted is used for an image that is no longer available to use. + // The image information is retained in the image registry. + ImageStatusDeleted ImageStatus = "deleted" + + // ImageStatusPendingDelete is similar to Delete, but the image is not yet + // deleted. + ImageStatusPendingDelete ImageStatus = "pending_delete" + + // ImageStatusDeactivated denotes that access to image data is not allowed to + // any non-admin user. + ImageStatusDeactivated ImageStatus = "deactivated" +) + +// ImageVisibility denotes an image that is fully available in Glance. +// This occurs when the image data is uploaded, or the image size is explicitly +// set to zero on creation. +// According to design +// https://wiki.openstack.org/wiki/Glance-v2-community-image-visibility-design +type ImageVisibility string + +const ( + // ImageVisibilityPublic all users + ImageVisibilityPublic ImageVisibility = "public" + + // ImageVisibilityPrivate users with tenantId == tenantId(owner) + ImageVisibilityPrivate ImageVisibility = "private" + + // ImageVisibilityShared images are visible to: + // - users with tenantId == tenantId(owner) + // - users with tenantId in the member-list of the image + // - users with tenantId in the member-list with member_status == 'accepted' + ImageVisibilityShared ImageVisibility = "shared" + + // ImageVisibilityCommunity images: + // - all users can see and boot it + // - users with tenantId in the member-list of the image with + // member_status == 'accepted' have this image in their default image-list. + ImageVisibilityCommunity ImageVisibility = "community" +) + +// MemberStatus is a status for adding a new member (tenant) to an image +// member list. +type ImageMemberStatus string + +const ( + // ImageMemberStatusAccepted is the status for an accepted image member. + ImageMemberStatusAccepted ImageMemberStatus = "accepted" + + // ImageMemberStatusPending shows that the member addition is pending + ImageMemberStatusPending ImageMemberStatus = "pending" + + // ImageMemberStatusAccepted is the status for a rejected image member + ImageMemberStatusRejected ImageMemberStatus = "rejected" + + // ImageMemberStatusAll + ImageMemberStatusAll ImageMemberStatus = "all" +) diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/urls.go new file mode 100644 index 00000000000..bf7cea1ef89 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/urls.go @@ -0,0 +1,51 @@ +package images + +import ( + "net/url" + + "github.com/gophercloud/gophercloud" +) + +// `listURL` is a pure function. `listURL(c)` is a URL for which a GET +// request will respond with a list of images in the service `c`. +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("images") +} + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("images") +} + +// `imageURL(c,i)` is the URL for the image identified by ID `i` in +// the service `c`. +func imageURL(c *gophercloud.ServiceClient, imageID string) string { + return c.ServiceURL("images", imageID) +} + +// `getURL(c,i)` is a URL for which a GET request will respond with +// information about the image identified by ID `i` in the service +// `c`. +func getURL(c *gophercloud.ServiceClient, imageID string) string { + return imageURL(c, imageID) +} + +func updateURL(c *gophercloud.ServiceClient, imageID string) string { + return imageURL(c, imageID) +} + +func deleteURL(c *gophercloud.ServiceClient, imageID string) string { + return imageURL(c, imageID) +} + +// builds next page full url based on current url +func nextPageURL(currentURL string, next string) (string, error) { + base, err := url.Parse(currentURL) + if err != nil { + return "", err + } + rel, err := url.Parse(next) + if err != nil { + return "", err + } + return base.ResolveReference(rel).String(), nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/doc.go new file mode 100644 index 00000000000..c01070edc8c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/doc.go @@ -0,0 +1,60 @@ +/* +Package firewalls allows management and retrieval of firewalls from the +OpenStack Networking Service. + +Example to List Firewalls + + listOpts := firewalls.ListOpts{ + TenantID: "tenant-id", + } + + allPages, err := firewalls.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allFirewalls, err := firewalls.ExtractFirewalls(allPages) + if err != nil { + panic(err) + } + + for _, fw := range allFirewalls { + fmt.Printf("%+v\n", fw) + } + +Example to Create a Firewall + + createOpts := firewalls.CreateOpts{ + Name: "firewall_1", + Description: "A firewall", + PolicyID: "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + AdminStateUp: gophercloud.Enabled, + } + + firewall, err := firewalls.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Firewall + + firewallID := "a6917946-38ab-4ffd-a55a-26c0980ce5ee" + + updateOpts := firewalls.UpdateOpts{ + AdminStateUp: gophercloud.Disabled, + } + + firewall, err := firewalls.Update(networkClient, firewallID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Firewall + + firewallID := "a6917946-38ab-4ffd-a55a-26c0980ce5ee" + err := firewalls.Delete(networkClient, firewallID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package firewalls diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/errors.go new file mode 100644 index 00000000000..dd92bb20dbe --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/errors.go @@ -0,0 +1,11 @@ +package firewalls + +import "fmt" + +func err(str string) error { + return fmt.Errorf("%s", str) +} + +var ( + errPolicyRequired = err("A policy ID is required") +) diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests.go new file mode 100644 index 00000000000..aa30194668a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests.go @@ -0,0 +1,137 @@ +package firewalls + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToFirewallListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the firewall attributes you want to see returned. SortKey allows you to sort +// by a particular firewall attribute. SortDir sets the direction, and is either +// `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + TenantID string `q:"tenant_id"` + Name string `q:"name"` + Description string `q:"description"` + AdminStateUp bool `q:"admin_state_up"` + Shared bool `q:"shared"` + PolicyID string `q:"firewall_policy_id"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToFirewallListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToFirewallListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// firewalls. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +// +// Default policy settings return only those firewalls that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToFirewallListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return FirewallPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToFirewallCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new firewall. +type CreateOpts struct { + PolicyID string `json:"firewall_policy_id" required:"true"` + // TenantID specifies a tenant to own the firewall. The caller must have + // an admin role in order to set this. Otherwise, this field is left unset + // and the caller will be the owner. + TenantID string `json:"tenant_id,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Shared *bool `json:"shared,omitempty"` +} + +// ToFirewallCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToFirewallCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "firewall") +} + +// Create accepts a CreateOpts struct and uses the values to create a new firewall. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToFirewallCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular firewall based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToFirewallUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a firewall. +type UpdateOpts struct { + PolicyID string `json:"firewall_policy_id" required:"true"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Shared *bool `json:"shared,omitempty"` +} + +// ToFirewallUpdateMap casts a CreateOpts struct to a map. +func (opts UpdateOpts) ToFirewallUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "firewall") +} + +// Update allows firewalls to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToFirewallUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will permanently delete a particular firewall based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/results.go new file mode 100644 index 00000000000..f6786a44331 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/results.go @@ -0,0 +1,95 @@ +package firewalls + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Firewall is an OpenStack firewall. +type Firewall struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + AdminStateUp bool `json:"admin_state_up"` + Status string `json:"status"` + PolicyID string `json:"firewall_policy_id"` + TenantID string `json:"tenant_id"` +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a firewall. +func (r commonResult) Extract() (*Firewall, error) { + var s Firewall + err := r.ExtractInto(&s) + return &s, err +} + +func (r commonResult) ExtractInto(v interface{}) error { + return r.Result.ExtractIntoStructPtr(v, "firewall") +} + +func ExtractFirewallsInto(r pagination.Page, v interface{}) error { + return r.(FirewallPage).Result.ExtractIntoSlicePtr(v, "firewalls") +} + +// FirewallPage is the page returned by a pager when traversing over a +// collection of firewalls. +type FirewallPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of firewalls has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r FirewallPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"firewalls_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a FirewallPage struct is empty. +func (r FirewallPage) IsEmpty() (bool, error) { + is, err := ExtractFirewalls(r) + return len(is) == 0, err +} + +// ExtractFirewalls accepts a Page struct, specifically a FirewallPage struct, +// and extracts the elements into a slice of Firewall structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractFirewalls(r pagination.Page) ([]Firewall, error) { + var s []Firewall + err := ExtractFirewallsInto(r, &s) + return s, err +} + +// GetResult represents the result of a Get operation. Call its Extract +// method to interpret it as a Firewall. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an Update operation. Call its Extract +// method to interpret it as a Firewall. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the operation succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// CreateResult represents the result of a Create operation. Call its Extract +// method to interpret it as a Firewall. +type CreateResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/urls.go new file mode 100644 index 00000000000..807ea1ab656 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/urls.go @@ -0,0 +1,16 @@ +package firewalls + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "fw" + resourcePath = "firewalls" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/doc.go new file mode 100644 index 00000000000..ae824491f18 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/doc.go @@ -0,0 +1,84 @@ +/* +Package policies allows management and retrieval of Firewall Policies in the +OpenStack Networking Service. + +Example to List Policies + + listOpts := policies.ListOpts{ + TenantID: "966b3c7d36a24facaf20b7e458bf2192", + } + + allPages, err := policies.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allPolicies, err := policies.ExtractPolicies(allPages) + if err != nil { + panic(err) + } + + for _, policy := range allPolicies { + fmt.Printf("%+v\n", policy) + } + +Example to Create a Policy + + createOpts := policies.CreateOpts{ + Name: "policy_1", + Description: "A policy", + Rules: []string{ + "98a58c87-76be-ae7c-a74e-b77fffb88d95", + "7c4f087a-ed46-4ea8-8040-11ca460a61c0", + } + } + + policy, err := policies.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Policy + + policyID := "38aee955-6283-4279-b091-8b9c828000ec" + + updateOpts := policies.UpdateOpts{ + Description: "New Description", + } + + policy, err := policies.Update(networkClient, policyID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Policy + + policyID := "38aee955-6283-4279-b091-8b9c828000ec" + err := policies.Delete(networkClient, policyID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Add a Rule to a Policy + + policyID := "38aee955-6283-4279-b091-8b9c828000ec" + ruleOpts := policies.InsertRuleOpts{ + ID: "98a58c87-76be-ae7c-a74e-b77fffb88d95", + } + + policy, err := policies.AddRule(networkClient, policyID, ruleOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Rule from a Policy + + policyID := "38aee955-6283-4279-b091-8b9c828000ec" + ruleID := "98a58c87-76be-ae7c-a74e-b77fffb88d95", + + policy, err := policies.RemoveRule(networkClient, policyID, ruleID).Extract() + if err != nil { + panic(err) + } +*/ +package policies diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests.go new file mode 100644 index 00000000000..b1a6a5598b6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests.go @@ -0,0 +1,177 @@ +package policies + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToPolicyListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the firewall policy attributes you want to see returned. SortKey allows you +// to sort by a particular firewall policy attribute. SortDir sets the direction, +// and is either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + TenantID string `q:"tenant_id"` + Name string `q:"name"` + Description string `q:"description"` + Shared *bool `q:"shared"` + Audited *bool `q:"audited"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToPolicyListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToPolicyListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// firewall policies. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +// +// Default policy settings return only those firewall policies that are owned by +// the tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToPolicyListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return PolicyPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToFirewallPolicyCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new firewall policy. +type CreateOpts struct { + // TenantID specifies a tenant to own the firewall. The caller must have + // an admin role in order to set this. Otherwise, this field is left unset + // and the caller will be the owner. + TenantID string `json:"tenant_id,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Shared *bool `json:"shared,omitempty"` + Audited *bool `json:"audited,omitempty"` + Rules []string `json:"firewall_rules,omitempty"` +} + +// ToFirewallPolicyCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToFirewallPolicyCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "firewall_policy") +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// firewall policy. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToFirewallPolicyCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular firewall policy based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToFirewallPolicyUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a firewall policy. +type UpdateOpts struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Shared *bool `json:"shared,omitempty"` + Audited *bool `json:"audited,omitempty"` + Rules []string `json:"firewall_rules,omitempty"` +} + +// ToFirewallPolicyUpdateMap casts a CreateOpts struct to a map. +func (opts UpdateOpts) ToFirewallPolicyUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "firewall_policy") +} + +// Update allows firewall policies to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToFirewallPolicyUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will permanently delete a particular firewall policy based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// InsertRuleOptsBuilder allows extensions to add additional parameters to the +// InsertRule request. +type InsertRuleOptsBuilder interface { + ToFirewallPolicyInsertRuleMap() (map[string]interface{}, error) +} + +// InsertRuleOpts contains the values used when updating a policy's rules. +type InsertRuleOpts struct { + ID string `json:"firewall_rule_id" required:"true"` + BeforeRuleID string `json:"insert_before,omitempty"` + AfterRuleID string `json:"insert_after,omitempty"` +} + +func (opts InsertRuleOpts) ToFirewallPolicyInsertRuleMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// AddRule will add a rule to a policy. +func AddRule(c *gophercloud.ServiceClient, id string, opts InsertRuleOptsBuilder) (r InsertRuleResult) { + b, err := opts.ToFirewallPolicyInsertRuleMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(insertURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// RemoveRule will add a rule to a policy. +func RemoveRule(c *gophercloud.ServiceClient, id, ruleID string) (r RemoveRuleResult) { + b := map[string]interface{}{"firewall_rule_id": ruleID} + _, r.Err = c.Put(removeURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/results.go new file mode 100644 index 00000000000..bbe22b13612 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/results.go @@ -0,0 +1,103 @@ +package policies + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Policy is a firewall policy. +type Policy struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + TenantID string `json:"tenant_id"` + Audited bool `json:"audited"` + Shared bool `json:"shared"` + Rules []string `json:"firewall_rules,omitempty"` +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a firewall policy. +func (r commonResult) Extract() (*Policy, error) { + var s struct { + Policy *Policy `json:"firewall_policy"` + } + err := r.ExtractInto(&s) + return s.Policy, err +} + +// PolicyPage is the page returned by a pager when traversing over a +// collection of firewall policies. +type PolicyPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of firewall policies has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r PolicyPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"firewall_policies_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a PolicyPage struct is empty. +func (r PolicyPage) IsEmpty() (bool, error) { + is, err := ExtractPolicies(r) + return len(is) == 0, err +} + +// ExtractPolicies accepts a Page struct, specifically a Policy struct, +// and extracts the elements into a slice of Policy structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractPolicies(r pagination.Page) ([]Policy, error) { + var s struct { + Policies []Policy `json:"firewall_policies"` + } + err := (r.(PolicyPage)).ExtractInto(&s) + return s.Policies, err +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Policy. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its +// Extract method to interpret it as a Policy. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the operation succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Policy. +type CreateResult struct { + commonResult +} + +// InsertRuleResult represents the result of an InsertRule operation. Call its +// Extract method to interpret it as a Policy. +type InsertRuleResult struct { + commonResult +} + +// RemoveRuleResult represents the result of a RemoveRule operation. Call its +// Extract method to interpret it as a Policy. +type RemoveRuleResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/urls.go new file mode 100644 index 00000000000..c252b79dd0c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/urls.go @@ -0,0 +1,26 @@ +package policies + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "fw" + resourcePath = "firewall_policies" + insertPath = "insert_rule" + removePath = "remove_rule" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} + +func insertURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id, insertPath) +} + +func removeURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id, removePath) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/doc.go new file mode 100644 index 00000000000..4f0a779eec9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/doc.go @@ -0,0 +1,68 @@ +/* +Package routerinsertion implements the fwaasrouterinsertion Firewall extension. +It is used to manage the router information of a firewall. + +Example to List Firewalls with Router Information + + type FirewallsWithRouters struct { + firewalls.Firewall + routerinsertion.FirewallExt + } + + var allFirewalls []FirewallsWithRouters + + allPages, err := firewalls.List(networkClient, nil).AllPages() + if err != nil { + panic(err) + } + + err = firewalls.ExtractFirewallsInto(allPages, &allFirewalls) + if err != nil { + panic(err) + } + + for _, fw := range allFirewalls { + fmt.Printf("%+v\n", fw) + } + +Example to Create a Firewall with a Router + + firewallCreateOpts := firewalls.CreateOpts{ + Name: "firewall_1", + PolicyID: "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + } + + createOpts := routerinsertion.CreateOptsExt{ + CreateOptsBuilder: firewallCreateOpts, + RouterIDs: []string{ + "8a3a0d6a-34b5-4a92-b65d-6375a4c1e9e8", + }, + } + + firewall, err := firewalls.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Firewall with a Router + + firewallID := "a6917946-38ab-4ffd-a55a-26c0980ce5ee" + + firewallUpdateOpts := firewalls.UpdateOpts{ + Description: "updated firewall", + PolicyID: "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + } + + updateOpts := routerinsertion.UpdateOptsExt{ + UpdateOptsBuilder: firewallUpdateOpts, + RouterIDs: []string{ + "8a3a0d6a-34b5-4a92-b65d-6375a4c1e9e8", + }, + } + + firewall, err := firewalls.Update(networkClient, firewallID, updateOpts).Extract() + if err != nil { + panic(err) + } +*/ +package routerinsertion diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/requests.go new file mode 100644 index 00000000000..b1f6d76e387 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/requests.go @@ -0,0 +1,43 @@ +package routerinsertion + +import ( + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls" +) + +// CreateOptsExt adds the RouterIDs option to the base CreateOpts. +type CreateOptsExt struct { + firewalls.CreateOptsBuilder + RouterIDs []string `json:"router_ids"` +} + +// ToFirewallCreateMap adds router_ids to the base firewall creation options. +func (opts CreateOptsExt) ToFirewallCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToFirewallCreateMap() + if err != nil { + return nil, err + } + + firewallMap := base["firewall"].(map[string]interface{}) + firewallMap["router_ids"] = opts.RouterIDs + + return base, nil +} + +// UpdateOptsExt adds the RouterIDs option to the base UpdateOpts. +type UpdateOptsExt struct { + firewalls.UpdateOptsBuilder + RouterIDs []string `json:"router_ids"` +} + +// ToFirewallUpdateMap adds router_ids to the base firewall update options. +func (opts UpdateOptsExt) ToFirewallUpdateMap() (map[string]interface{}, error) { + base, err := opts.UpdateOptsBuilder.ToFirewallUpdateMap() + if err != nil { + return nil, err + } + + firewallMap := base["firewall"].(map[string]interface{}) + firewallMap["router_ids"] = opts.RouterIDs + + return base, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/results.go new file mode 100644 index 00000000000..85c288e51e6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/results.go @@ -0,0 +1,7 @@ +package routerinsertion + +// FirewallExt is an extension to the base Firewall object +type FirewallExt struct { + // RouterIDs are the routers that the firewall is attached to. + RouterIDs []string `json:"router_ids"` +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/doc.go new file mode 100644 index 00000000000..3351a3e5c95 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/doc.go @@ -0,0 +1,64 @@ +/* +Package rules enables management and retrieval of Firewall Rules in the +OpenStack Networking Service. + +Example to List Rules + + listOpts := rules.ListOpts{ + Protocol: rules.ProtocolAny, + } + + allPages, err := rules.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allRules, err := rules.ExtractRules(allPages) + if err != nil { + panic(err) + } + + for _, rule := range allRules { + fmt.Printf("%+v\n", rule) + } + +Example to Create a Rule + + createOpts := rules.CreateOpts{ + Action: "allow", + Protocol: rules.ProtocolTCP, + Description: "ssh", + DestinationPort: 22, + DestinationIPAddress: "192.168.1.0/24", + } + + rule, err := rules.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Rule + + ruleID := "f03bd950-6c56-4f5e-a307-45967078f507" + newPort := 80 + newDescription := "http" + + updateOpts := rules.UpdateOpts{ + Description: &newDescription, + port: &newPort, + } + + rule, err := rules.Update(networkClient, ruleID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Rule + + ruleID := "f03bd950-6c56-4f5e-a307-45967078f507" + err := rules.Delete(networkClient, ruleID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package rules diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/errors.go new file mode 100644 index 00000000000..0b29d39fd9e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/errors.go @@ -0,0 +1,12 @@ +package rules + +import "fmt" + +func err(str string) error { + return fmt.Errorf("%s", str) +} + +var ( + errProtocolRequired = err("A protocol is required (tcp, udp, icmp or any)") + errActionRequired = err("An action is required (allow or deny)") +) diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests.go new file mode 100644 index 00000000000..83bbe99b6d2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests.go @@ -0,0 +1,188 @@ +package rules + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type ( + // Protocol represents a valid rule protocol. + Protocol string +) + +const ( + // ProtocolAny is to allow any protocol. + ProtocolAny Protocol = "any" + + // ProtocolICMP is to allow the ICMP protocol. + ProtocolICMP Protocol = "icmp" + + // ProtocolTCP is to allow the TCP protocol. + ProtocolTCP Protocol = "tcp" + + // ProtocolUDP is to allow the UDP protocol. + ProtocolUDP Protocol = "udp" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToRuleListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the Firewall rule attributes you want to see returned. SortKey allows you to +// sort by a particular firewall rule attribute. SortDir sets the direction, and +// is either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + TenantID string `q:"tenant_id"` + Name string `q:"name"` + Description string `q:"description"` + Protocol string `q:"protocol"` + Action string `q:"action"` + IPVersion int `q:"ip_version"` + SourceIPAddress string `q:"source_ip_address"` + DestinationIPAddress string `q:"destination_ip_address"` + SourcePort string `q:"source_port"` + DestinationPort string `q:"destination_port"` + Enabled bool `q:"enabled"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToRuleListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToRuleListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// List returns a Pager which allows you to iterate over a collection of +// firewall rules. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +// +// Default policy settings return only those firewall rules that are owned by +// the tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + + if opts != nil { + query, err := opts.ToRuleListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return RulePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToRuleCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new firewall rule. +type CreateOpts struct { + Protocol Protocol `json:"protocol" required:"true"` + Action string `json:"action" required:"true"` + TenantID string `json:"tenant_id,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + IPVersion gophercloud.IPVersion `json:"ip_version,omitempty"` + SourceIPAddress string `json:"source_ip_address,omitempty"` + DestinationIPAddress string `json:"destination_ip_address,omitempty"` + SourcePort string `json:"source_port,omitempty"` + DestinationPort string `json:"destination_port,omitempty"` + Shared *bool `json:"shared,omitempty"` + Enabled *bool `json:"enabled,omitempty"` +} + +// ToRuleCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToRuleCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "firewall_rule") + if err != nil { + return nil, err + } + + if m := b["firewall_rule"].(map[string]interface{}); m["protocol"] == "any" { + m["protocol"] = nil + } + + return b, nil +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// firewall rule. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToRuleCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular firewall rule based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToRuleUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a firewall rule. +// These fields are all pointers so that unset fields will not cause the +// existing Rule attribute to be removed. +type UpdateOpts struct { + Protocol *string `json:"protocol,omitempty"` + Action *string `json:"action,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + IPVersion *gophercloud.IPVersion `json:"ip_version,omitempty"` + SourceIPAddress *string `json:"source_ip_address,omitempty"` + DestinationIPAddress *string `json:"destination_ip_address,omitempty"` + SourcePort *string `json:"source_port,omitempty"` + DestinationPort *string `json:"destination_port,omitempty"` + Shared *bool `json:"shared,omitempty"` + Enabled *bool `json:"enabled,omitempty"` +} + +// ToRuleUpdateMap casts a UpdateOpts struct to a map. +func (opts UpdateOpts) ToRuleUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "firewall_rule") +} + +// Update allows firewall policies to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToRuleUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will permanently delete a particular firewall rule based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/results.go new file mode 100644 index 00000000000..1af03e573d9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/results.go @@ -0,0 +1,99 @@ +package rules + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Rule represents a firewall rule. +type Rule struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Protocol string `json:"protocol"` + Action string `json:"action"` + IPVersion int `json:"ip_version,omitempty"` + SourceIPAddress string `json:"source_ip_address,omitempty"` + DestinationIPAddress string `json:"destination_ip_address,omitempty"` + SourcePort string `json:"source_port,omitempty"` + DestinationPort string `json:"destination_port,omitempty"` + Shared bool `json:"shared,omitempty"` + Enabled bool `json:"enabled,omitempty"` + PolicyID string `json:"firewall_policy_id"` + Position int `json:"position"` + TenantID string `json:"tenant_id"` +} + +// RulePage is the page returned by a pager when traversing over a +// collection of firewall rules. +type RulePage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of firewall rules has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r RulePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"firewall_rules_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a RulePage struct is empty. +func (r RulePage) IsEmpty() (bool, error) { + is, err := ExtractRules(r) + return len(is) == 0, err +} + +// ExtractRules accepts a Page struct, specifically a RulePage struct, +// and extracts the elements into a slice of Rule structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractRules(r pagination.Page) ([]Rule, error) { + var s struct { + Rules []Rule `json:"firewall_rules"` + } + err := (r.(RulePage)).ExtractInto(&s) + return s.Rules, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a firewall rule. +func (r commonResult) Extract() (*Rule, error) { + var s struct { + Rule *Rule `json:"firewall_rule"` + } + err := r.ExtractInto(&s) + return s.Rule, err +} + +// GetResult represents the result of a get operation. Call its Extract method +// to interpret it as a Rule. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Rule. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Rule. +type CreateResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/urls.go new file mode 100644 index 00000000000..79654be73e2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/urls.go @@ -0,0 +1,16 @@ +package rules + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "fw" + resourcePath = "firewall_rules" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/doc.go new file mode 100644 index 00000000000..bf5ec6807cc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/doc.go @@ -0,0 +1,71 @@ +/* +package floatingips enables management and retrieval of Floating IPs from the +OpenStack Networking service. + +Example to List Floating IPs + + listOpts := floatingips.ListOpts{ + FloatingNetworkID: "a6917946-38ab-4ffd-a55a-26c0980ce5ee", + } + + allPages, err := floatingips.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allFIPs, err := floatingips.ExtractFloatingIPs(allPages) + if err != nil { + panic(err) + } + + for _, fip := range allFIPs { + fmt.Printf("%+v\n", fip) + } + +Example to Create a Floating IP + + createOpts := floatingips.CreateOpts{ + FloatingNetworkID: "a6917946-38ab-4ffd-a55a-26c0980ce5ee", + } + + fip, err := floatingips.Create(networkingClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Floating IP + + fipID := "2f245a7b-796b-4f26-9cf9-9e82d248fda7" + portID := "76d0a61b-b8e5-490c-9892-4cf674f2bec8" + + updateOpts := floatingips.UpdateOpts{ + PortID: &portID, + } + + fip, err := floatingips.Update(networkingClient, fipID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Disassociate a Floating IP with a Port + + fipID := "2f245a7b-796b-4f26-9cf9-9e82d248fda7" + + updateOpts := floatingips.UpdateOpts{ + PortID: nil, + } + + fip, err := floatingips.Update(networkingClient, fipID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Floating IP + + fipID := "2f245a7b-796b-4f26-9cf9-9e82d248fda7" + err := floatingips.Delete(networkClient, fipID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package floatingips diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go new file mode 100644 index 00000000000..1c8a8b2f13f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go @@ -0,0 +1,147 @@ +package floatingips + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + FloatingNetworkID string `q:"floating_network_id"` + PortID string `q:"port_id"` + FixedIP string `q:"fixed_ip_address"` + FloatingIP string `q:"floating_ip_address"` + TenantID string `q:"tenant_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` + RouterID string `q:"router_id"` + Status string `q:"status"` +} + +// List returns a Pager which allows you to iterate over a collection of +// floating IP resources. It accepts a ListOpts struct, which allows you to +// filter and sort the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return FloatingIPPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToFloatingIPCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new floating IP +// resource. The only required fields are FloatingNetworkID and PortID which +// refer to the external network and internal port respectively. +type CreateOpts struct { + FloatingNetworkID string `json:"floating_network_id" required:"true"` + FloatingIP string `json:"floating_ip_address,omitempty"` + PortID string `json:"port_id,omitempty"` + FixedIP string `json:"fixed_ip_address,omitempty"` + TenantID string `json:"tenant_id,omitempty"` +} + +// ToFloatingIPCreateMap allows CreateOpts to satisfy the CreateOptsBuilder +// interface +func (opts CreateOpts) ToFloatingIPCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "floatingip") +} + +// Create accepts a CreateOpts struct and uses the values provided to create a +// new floating IP resource. You can create floating IPs on external networks +// only. If you provide a FloatingNetworkID which refers to a network that is +// not external (i.e. its `router:external' attribute is False), the operation +// will fail and return a 400 error. +// +// If you do not specify a FloatingIP address value, the operation will +// automatically allocate an available address for the new resource. If you do +// choose to specify one, it must fall within the subnet range for the external +// network - otherwise the operation returns a 400 error. If the FloatingIP +// address is already in use, the operation returns a 409 error code. +// +// You can associate the new resource with an internal port by using the PortID +// field. If you specify a PortID that is not valid, the operation will fail and +// return 404 error code. +// +// You must also configure an IP address for the port associated with the PortID +// you have provided - this is what the FixedIP refers to: an IP fixed to a +// port. Because a port might be associated with multiple IP addresses, you can +// use the FixedIP field to associate a particular IP address rather than have +// the API assume for you. If you specify an IP address that is not valid, the +// operation will fail and return a 400 error code. If the PortID and FixedIP +// are already associated with another resource, the operation will fail and +// returns a 409 error code. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToFloatingIPCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular floating IP resource based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToFloatingIPUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a floating IP resource. The +// only value that can be updated is which internal port the floating IP is +// linked to. To associate the floating IP with a new internal port, provide its +// ID. To disassociate the floating IP from all ports, provide an empty string. +type UpdateOpts struct { + PortID *string `json:"port_id"` +} + +// ToFloatingIPUpdateMap allows UpdateOpts to satisfy the UpdateOptsBuilder +// interface +func (opts UpdateOpts) ToFloatingIPUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "floatingip") +} + +// Update allows floating IP resources to be updated. Currently, the only way to +// "update" a floating IP is to associate it with a new internal port, or +// disassociated it from all ports. See UpdateOpts for instructions of how to +// do this. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToFloatingIPUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will permanently delete a particular floating IP resource. Please +// ensure this is what you want - you can also disassociate the IP from existing +// internal ports. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go new file mode 100644 index 00000000000..f1af23d4a60 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go @@ -0,0 +1,116 @@ +package floatingips + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// FloatingIP represents a floating IP resource. A floating IP is an external +// IP address that is mapped to an internal port and, optionally, a specific +// IP address on a private network. In other words, it enables access to an +// instance on a private network from an external network. For this reason, +// floating IPs can only be defined on networks where the `router:external' +// attribute (provided by the external network extension) is set to True. +type FloatingIP struct { + // ID is the unique identifier for the floating IP instance. + ID string `json:"id"` + + // FloatingNetworkID is the UUID of the external network where the floating + // IP is to be created. + FloatingNetworkID string `json:"floating_network_id"` + + // FloatingIP is the address of the floating IP on the external network. + FloatingIP string `json:"floating_ip_address"` + + // PortID is the UUID of the port on an internal network that is associated + // with the floating IP. + PortID string `json:"port_id"` + + // FixedIP is the specific IP address of the internal port which should be + // associated with the floating IP. + FixedIP string `json:"fixed_ip_address"` + + // TenantID is the Owner of the floating IP. Only admin users can specify a + // tenant identifier other than its own. + TenantID string `json:"tenant_id"` + + // Status is the condition of the API resource. + Status string `json:"status"` + + // RouterID is the ID of the router used for this floating IP. + RouterID string `json:"router_id"` +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will extract a FloatingIP resource from a result. +func (r commonResult) Extract() (*FloatingIP, error) { + var s struct { + FloatingIP *FloatingIP `json:"floatingip"` + } + err := r.ExtractInto(&s) + return s.FloatingIP, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a FloatingIP. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a FloatingIP. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a FloatingIP. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of an update operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// FloatingIPPage is the page returned by a pager when traversing over a +// collection of floating IPs. +type FloatingIPPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of floating IPs has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r FloatingIPPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"floatingips_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a FloatingIPPage struct is empty. +func (r FloatingIPPage) IsEmpty() (bool, error) { + is, err := ExtractFloatingIPs(r) + return len(is) == 0, err +} + +// ExtractFloatingIPs accepts a Page struct, specifically a FloatingIPPage +// struct, and extracts the elements into a slice of FloatingIP structs. In +// other words, a generic collection is mapped into a relevant slice. +func ExtractFloatingIPs(r pagination.Page) ([]FloatingIP, error) { + var s struct { + FloatingIPs []FloatingIP `json:"floatingips"` + } + err := (r.(FloatingIPPage)).ExtractInto(&s) + return s.FloatingIPs, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/urls.go new file mode 100644 index 00000000000..1318a184caa --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/urls.go @@ -0,0 +1,13 @@ +package floatingips + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "floatingips" + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/doc.go new file mode 100644 index 00000000000..6ede7f5e171 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/doc.go @@ -0,0 +1,108 @@ +/* +Package routers enables management and retrieval of Routers from the OpenStack +Networking service. + +Example to List Routers + + listOpts := routers.ListOpts{} + allPages, err := routers.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allRouters, err := routers.ExtractRouters(allPages) + if err != nil { + panic(err) + } + + for _, router := range allRoutes { + fmt.Printf("%+v\n", router) + } + +Example to Create a Router + + iTrue := true + gwi := routers.GatewayInfo{ + NetworkID: "8ca37218-28ff-41cb-9b10-039601ea7e6b", + } + + createOpts := routers.CreateOpts{ + Name: "router_1", + AdminStateUp: &iTrue, + GatewayInfo: &gwi, + } + + router, err := routers.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Router + + routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" + + routes := []routers.Route{{ + DestinationCIDR: "40.0.1.0/24", + NextHop: "10.1.0.10", + }} + + updateOpts := routers.UpdateOpts{ + Name: "new_name", + Routes: routes, + } + + router, err := routers.Update(networkClient, routerID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Remove all Routes from a Router + + routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" + + routes := []routers.Route{} + + updateOpts := routers.UpdateOpts{ + Routes: routes, + } + + router, err := routers.Update(networkClient, routerID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Router + + routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" + err := routers.Delete(networkClient, routerID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Add an Interface to a Router + + routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" + + intOpts := routers.AddInterfaceOpts{ + SubnetID: "a2f1f29d-571b-4533-907f-5803ab96ead1", + } + + interface, err := routers.AddInterface(networkClient, routerID, intOpts).Extract() + if err != nil { + panic(err) + } + +Example to Remove an Interface from a Router + + routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" + + intOpts := routers.RemoveInterfaceOpts{ + SubnetID: "a2f1f29d-571b-4533-907f-5803ab96ead1", + } + + interface, err := routers.RemoveInterface(networkClient, routerID, intOpts).Extract() + if err != nil { + panic(err) + } +*/ +package routers diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go new file mode 100644 index 00000000000..6799d200b7a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go @@ -0,0 +1,223 @@ +package routers + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + Name string `q:"name"` + AdminStateUp *bool `q:"admin_state_up"` + Distributed *bool `q:"distributed"` + Status string `q:"status"` + TenantID string `q:"tenant_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// routers. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those routers that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return RouterPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToRouterCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new router. There are +// no required values. +type CreateOpts struct { + Name string `json:"name,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Distributed *bool `json:"distributed,omitempty"` + TenantID string `json:"tenant_id,omitempty"` + GatewayInfo *GatewayInfo `json:"external_gateway_info,omitempty"` +} + +// ToRouterCreateMap builds a create request body from CreateOpts. +func (opts CreateOpts) ToRouterCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "router") +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// logical router. When it is created, the router does not have an internal +// interface - it is not associated to any subnet. +// +// You can optionally specify an external gateway for a router using the +// GatewayInfo struct. The external gateway for the router must be plugged into +// an external network (it is external if its `router:external' field is set to +// true). +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToRouterCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular router based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToRouterUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a router. +type UpdateOpts struct { + Name string `json:"name,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Distributed *bool `json:"distributed,omitempty"` + GatewayInfo *GatewayInfo `json:"external_gateway_info,omitempty"` + Routes []Route `json:"routes"` +} + +// ToRouterUpdateMap builds an update body based on UpdateOpts. +func (opts UpdateOpts) ToRouterUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "router") +} + +// Update allows routers to be updated. You can update the name, administrative +// state, and the external gateway. For more information about how to set the +// external gateway for a router, see Create. This operation does not enable +// the update of router interfaces. To do this, use the AddInterface and +// RemoveInterface functions. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToRouterUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will permanently delete a particular router based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// AddInterfaceOptsBuilder allows extensions to add additional parameters to +// the AddInterface request. +type AddInterfaceOptsBuilder interface { + ToRouterAddInterfaceMap() (map[string]interface{}, error) +} + +// AddInterfaceOpts represents the options for adding an interface to a router. +type AddInterfaceOpts struct { + SubnetID string `json:"subnet_id,omitempty" xor:"PortID"` + PortID string `json:"port_id,omitempty" xor:"SubnetID"` +} + +// ToRouterAddInterfaceMap builds a request body from AddInterfaceOpts. +func (opts AddInterfaceOpts) ToRouterAddInterfaceMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// AddInterface attaches a subnet to an internal router interface. You must +// specify either a SubnetID or PortID in the request body. If you specify both, +// the operation will fail and an error will be returned. +// +// If you specify a SubnetID, the gateway IP address for that particular subnet +// is used to create the router interface. Alternatively, if you specify a +// PortID, the IP address associated with the port is used to create the router +// interface. +// +// If you reference a port that is associated with multiple IP addresses, or +// if the port is associated with zero IP addresses, the operation will fail and +// a 400 Bad Request error will be returned. +// +// If you reference a port already in use, the operation will fail and a 409 +// Conflict error will be returned. +// +// The PortID that is returned after using Extract() on the result of this +// operation can either be the same PortID passed in or, on the other hand, the +// identifier of a new port created by this operation. After the operation +// completes, the device ID of the port is set to the router ID, and the +// device owner attribute is set to `network:router_interface'. +func AddInterface(c *gophercloud.ServiceClient, id string, opts AddInterfaceOptsBuilder) (r InterfaceResult) { + b, err := opts.ToRouterAddInterfaceMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(addInterfaceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// RemoveInterfaceOptsBuilder allows extensions to add additional parameters to +// the RemoveInterface request. +type RemoveInterfaceOptsBuilder interface { + ToRouterRemoveInterfaceMap() (map[string]interface{}, error) +} + +// RemoveInterfaceOpts represents options for removing an interface from +// a router. +type RemoveInterfaceOpts struct { + SubnetID string `json:"subnet_id,omitempty" or:"PortID"` + PortID string `json:"port_id,omitempty" or:"SubnetID"` +} + +// ToRouterRemoveInterfaceMap builds a request body based on +// RemoveInterfaceOpts. +func (opts RemoveInterfaceOpts) ToRouterRemoveInterfaceMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// RemoveInterface removes an internal router interface, which detaches a +// subnet from the router. You must specify either a SubnetID or PortID, since +// these values are used to identify the router interface to remove. +// +// Unlike AddInterface, you can also specify both a SubnetID and PortID. If you +// choose to specify both, the subnet ID must correspond to the subnet ID of +// the first IP address on the port specified by the port ID. Otherwise, the +// operation will fail and return a 409 Conflict error. +// +// If the router, subnet or port which are referenced do not exist or are not +// visible to you, the operation will fail and a 404 Not Found error will be +// returned. After this operation completes, the port connecting the router +// with the subnet is removed from the subnet for the network. +func RemoveInterface(c *gophercloud.ServiceClient, id string, opts RemoveInterfaceOptsBuilder) (r InterfaceResult) { + b, err := opts.ToRouterRemoveInterfaceMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(removeInterfaceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go new file mode 100644 index 00000000000..e7c27dc332e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go @@ -0,0 +1,167 @@ +package routers + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// GatewayInfo represents the information of an external gateway for any +// particular network router. +type GatewayInfo struct { + NetworkID string `json:"network_id"` + ExternalFixedIPs []ExternalFixedIP `json:"external_fixed_ips,omitempty"` +} + +// ExternalFixedIP is the IP address and subnet ID of the external gateway of a +// router. +type ExternalFixedIP struct { + IPAddress string `json:"ip_address"` + SubnetID string `json:"subnet_id"` +} + +// Route is a possible route in a router. +type Route struct { + NextHop string `json:"nexthop"` + DestinationCIDR string `json:"destination"` +} + +// Router represents a Neutron router. A router is a logical entity that +// forwards packets across internal subnets and NATs (network address +// translation) them on external networks through an appropriate gateway. +// +// A router has an interface for each subnet with which it is associated. By +// default, the IP address of such interface is the subnet's gateway IP. Also, +// whenever a router is associated with a subnet, a port for that router +// interface is added to the subnet's network. +type Router struct { + // Status indicates whether or not a router is currently operational. + Status string `json:"status"` + + // GateayInfo provides information on external gateway for the router. + GatewayInfo GatewayInfo `json:"external_gateway_info"` + + // AdminStateUp is the administrative state of the router. + AdminStateUp bool `json:"admin_state_up"` + + // Distributed is whether router is disitrubted or not. + Distributed bool `json:"distributed"` + + // Name is the human readable name for the router. It does not have to be + // unique. + Name string `json:"name"` + + // ID is the unique identifier for the router. + ID string `json:"id"` + + // TenantID is the owner of the router. Only admin users can specify a tenant + // identifier other than its own. + TenantID string `json:"tenant_id"` + + // Routes are a collection of static routes that the router will host. + Routes []Route `json:"routes"` +} + +// RouterPage is the page returned by a pager when traversing over a +// collection of routers. +type RouterPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of routers has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r RouterPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"routers_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a RouterPage struct is empty. +func (r RouterPage) IsEmpty() (bool, error) { + is, err := ExtractRouters(r) + return len(is) == 0, err +} + +// ExtractRouters accepts a Page struct, specifically a RouterPage struct, +// and extracts the elements into a slice of Router structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractRouters(r pagination.Page) ([]Router, error) { + var s struct { + Routers []Router `json:"routers"` + } + err := (r.(RouterPage)).ExtractInto(&s) + return s.Routers, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a router. +func (r commonResult) Extract() (*Router, error) { + var s struct { + Router *Router `json:"router"` + } + err := r.ExtractInto(&s) + return s.Router, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Router. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Router. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Router. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// InterfaceInfo represents information about a particular router interface. As +// mentioned above, in order for a router to forward to a subnet, it needs an +// interface. +type InterfaceInfo struct { + // SubnetID is the ID of the subnet which this interface is associated with. + SubnetID string `json:"subnet_id"` + + // PortID is the ID of the port that is a part of the subnet. + PortID string `json:"port_id"` + + // ID is the UUID of the interface. + ID string `json:"id"` + + // TenantID is the owner of the interface. + TenantID string `json:"tenant_id"` +} + +// InterfaceResult represents the result of interface operations, such as +// AddInterface() and RemoveInterface(). Call its Extract method to interpret +// the result as a InterfaceInfo. +type InterfaceResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts an information struct. +func (r InterfaceResult) Extract() (*InterfaceInfo, error) { + var s InterfaceInfo + err := r.ExtractInto(&s) + return &s, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/urls.go new file mode 100644 index 00000000000..f9e9da32117 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/urls.go @@ -0,0 +1,21 @@ +package routers + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "routers" + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} + +func addInterfaceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id, "add_router_interface") +} + +func removeInterfaceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id, "remove_router_interface") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/doc.go new file mode 100644 index 00000000000..bad3324b5ed --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/doc.go @@ -0,0 +1,59 @@ +/* +Package members provides information and interaction with Members of the +Load Balancer as a Service extension for the OpenStack Networking service. + +Example to List Members + + listOpts := members.ListOpts{ + ProtocolPort: 80, + } + + allPages, err := members.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allMembers, err := members.ExtractMembers(allPages) + if err != nil { + panic(err) + } + + for _, member := range allMembers { + fmt.Printf("%+v\n", member) + } + +Example to Create a Member + + createOpts := members.CreateOpts{ + Address: "192.168.2.14", + ProtocolPort: 80, + PoolID: "0b266a12-0fdf-4434-bd11-649d84e54bd5" + } + + member, err := members.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Member + + memberID := "46592c54-03f7-40ef-9cdf-b1fcf2775ddf" + + updateOpts := members.UpdateOpts{ + AdminStateUp: gophercloud.Disabled, + } + + member, err := members.Update(networkClient, memberID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Member + + memberID := "46592c54-03f7-40ef-9cdf-b1fcf2775ddf" + err := members.Delete(networkClient, memberID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package members diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go new file mode 100644 index 00000000000..1a312884440 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go @@ -0,0 +1,124 @@ +package members + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Status string `q:"status"` + Weight int `q:"weight"` + AdminStateUp *bool `q:"admin_state_up"` + TenantID string `q:"tenant_id"` + PoolID string `q:"pool_id"` + Address string `q:"address"` + ProtocolPort int `q:"protocol_port"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// members. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those members that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return MemberPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToLBMemberCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new pool member. +type CreateOpts struct { + // Address is the IP address of the member. + Address string `json:"address" required:"true"` + + // ProtocolPort is the port on which the application is hosted. + ProtocolPort int `json:"protocol_port" required:"true"` + + // PoolID is the pool to which this member will belong. + PoolID string `json:"pool_id" required:"true"` + + // TenantID is only required if the caller has an admin role and wants + // to create a pool for another tenant. + TenantID string `json:"tenant_id,omitempty"` +} + +// ToLBMemberCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToLBMemberCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "member") +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// load balancer pool member. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToLBMemberCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular pool member based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToLBMemberUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a pool member. +type UpdateOpts struct { + // The administrative state of the member, which is up (true) or down (false). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToLBMemberUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToLBMemberUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "member") +} + +// Update allows members to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToLBMemberUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return +} + +// Delete will permanently delete a particular member based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/results.go new file mode 100644 index 00000000000..804dbe8445f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/results.go @@ -0,0 +1,109 @@ +package members + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Member represents the application running on a backend server. +type Member struct { + // Status is the status of the member. Indicates whether the member + // is operational. + Status string + + // Weight is the weight of member. + Weight int + + // AdminStateUp is the administrative state of the member, which is up + // (true) or down (false). + AdminStateUp bool `json:"admin_state_up"` + + // TenantID is the owner of the member. + TenantID string `json:"tenant_id"` + + // PoolID is the pool to which the member belongs. + PoolID string `json:"pool_id"` + + // Address is the IP address of the member. + Address string + + // ProtocolPort is the port on which the application is hosted. + ProtocolPort int `json:"protocol_port"` + + // ID is the unique ID for the member. + ID string +} + +// MemberPage is the page returned by a pager when traversing over a +// collection of pool members. +type MemberPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of members has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r MemberPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"members_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a MemberPage struct is empty. +func (r MemberPage) IsEmpty() (bool, error) { + is, err := ExtractMembers(r) + return len(is) == 0, err +} + +// ExtractMembers accepts a Page struct, specifically a MemberPage struct, +// and extracts the elements into a slice of Member structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractMembers(r pagination.Page) ([]Member, error) { + var s struct { + Members []Member `json:"members"` + } + err := (r.(MemberPage)).ExtractInto(&s) + return s.Members, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a member. +func (r commonResult) Extract() (*Member, error) { + var s struct { + Member *Member `json:"member"` + } + err := r.ExtractInto(&s) + return s.Member, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Member. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Member. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Member. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the result succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/urls.go new file mode 100644 index 00000000000..e2248f81f45 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/urls.go @@ -0,0 +1,16 @@ +package members + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lb" + resourcePath = "members" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/doc.go new file mode 100644 index 00000000000..b5c0f29f054 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/doc.go @@ -0,0 +1,63 @@ +/* +Package monitors provides information and interaction with the Monitors +of the Load Balancer as a Service extension for the OpenStack Networking +Service. + +Example to List Monitors + + listOpts: monitors.ListOpts{ + Type: "HTTP", + } + + allPages, err := monitors.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allMonitors, err := monitors.ExtractMonitors(allPages) + if err != nil { + panic(err) + } + + for _, monitor := range allMonitors { + fmt.Printf("%+v\n", monitor) + } + +Example to Create a Monitor + + createOpts := monitors.CreateOpts{ + Type: "HTTP", + Delay: 20, + Timeout: 20, + MaxRetries: 5, + URLPath: "/check", + ExpectedCodes: "200-299", + } + + monitor, err := monitors.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Monitor + + monitorID := "681aed03-aadb-43ae-aead-b9016375650a" + + updateOpts := monitors.UpdateOpts{ + Timeout: 30, + } + + monitor, err := monitors.Update(networkClient, monitorID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Member + + monitorID := "681aed03-aadb-43ae-aead-b9016375650a" + err := monitors.Delete(networkClient, monitorID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package monitors diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go new file mode 100644 index 00000000000..9ed0c769c90 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go @@ -0,0 +1,227 @@ +package monitors + +import ( + "fmt" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + TenantID string `q:"tenant_id"` + Type string `q:"type"` + Delay int `q:"delay"` + Timeout int `q:"timeout"` + MaxRetries int `q:"max_retries"` + HTTPMethod string `q:"http_method"` + URLPath string `q:"url_path"` + ExpectedCodes string `q:"expected_codes"` + AdminStateUp *bool `q:"admin_state_up"` + Status string `q:"status"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// monitors. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those monitors that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return MonitorPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// MonitorType is the type for all the types of LB monitors. +type MonitorType string + +// Constants that represent approved monitoring types. +const ( + TypePING MonitorType = "PING" + TypeTCP MonitorType = "TCP" + TypeHTTP MonitorType = "HTTP" + TypeHTTPS MonitorType = "HTTPS" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToLBMonitorCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new health monitor. +type CreateOpts struct { + // MonitorType is the type of probe, which is PING, TCP, HTTP, or HTTPS, + // that is sent by the load balancer to verify the member state. + Type MonitorType `json:"type" required:"true"` + + // Delay is the time, in seconds, between sending probes to members. + Delay int `json:"delay" required:"true"` + + // Timeout is the maximum number of seconds for a monitor to wait for a ping + // reply before it times out. The value must be less than the delay value. + Timeout int `json:"timeout" required:"true"` + + // MaxRetries is the number of permissible ping failures before changing the + // member's status to INACTIVE. Must be a number between 1 and 10. + MaxRetries int `json:"max_retries" required:"true"` + + // URLPath is the URI path that will be accessed if monitor type + // is HTTP or HTTPS. Required for HTTP(S) types. + URLPath string `json:"url_path,omitempty"` + + // HTTPMethod is the HTTP method used for requests by the monitor. If this + // attribute is not specified, it defaults to "GET". Required for HTTP(S) + // types. + HTTPMethod string `json:"http_method,omitempty"` + + // ExpectedCodes is the expected HTTP codes for a passing HTTP(S) monitor + // You can either specify a single status like "200", or a range like + // "200-202". Required for HTTP(S) types. + ExpectedCodes string `json:"expected_codes,omitempty"` + + // TenantID is only required if the caller has an admin role and wants + // to create a pool for another tenant. + TenantID string `json:"tenant_id,omitempty"` + + // AdminStateUp denotes whether the monitor is administratively up or down. + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToLBMonitorCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToLBMonitorCreateMap() (map[string]interface{}, error) { + if opts.Type == TypeHTTP || opts.Type == TypeHTTPS { + if opts.URLPath == "" { + err := gophercloud.ErrMissingInput{} + err.Argument = "monitors.CreateOpts.URLPath" + return nil, err + } + if opts.ExpectedCodes == "" { + err := gophercloud.ErrMissingInput{} + err.Argument = "monitors.CreateOpts.ExpectedCodes" + return nil, err + } + } + if opts.Delay < opts.Timeout { + err := gophercloud.ErrInvalidInput{} + err.Argument = "monitors.CreateOpts.Delay/monitors.CreateOpts.Timeout" + err.Info = "Delay must be greater than or equal to timeout" + return nil, err + } + return gophercloud.BuildRequestBody(opts, "health_monitor") +} + +// Create is an operation which provisions a new health monitor. There are +// different types of monitor you can provision: PING, TCP or HTTP(S). Below +// are examples of how to create each one. +// +// Here is an example config struct to use when creating a PING or TCP monitor: +// +// CreateOpts{Type: TypePING, Delay: 20, Timeout: 10, MaxRetries: 3} +// CreateOpts{Type: TypeTCP, Delay: 20, Timeout: 10, MaxRetries: 3} +// +// Here is an example config struct to use when creating a HTTP(S) monitor: +// +// CreateOpts{Type: TypeHTTP, Delay: 20, Timeout: 10, MaxRetries: 3, +// HttpMethod: "HEAD", ExpectedCodes: "200"} +// +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToLBMonitorCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular health monitor based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToLBMonitorUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains all the values needed to update an existing monitor. +// Attributes not listed here but appear in CreateOpts are immutable and cannot +// be updated. +type UpdateOpts struct { + // Delay is the time, in seconds, between sending probes to members. + Delay int `json:"delay,omitempty"` + + // Timeout is the maximum number of seconds for a monitor to wait for a ping + // reply before it times out. The value must be less than the delay value. + Timeout int `json:"timeout,omitempty"` + + // MaxRetries is the number of permissible ping failures before changing the + // member's status to INACTIVE. Must be a number between 1 and 10. + MaxRetries int `json:"max_retries,omitempty"` + + // URLPath is the URI path that will be accessed if monitor type + // is HTTP or HTTPS. + URLPath string `json:"url_path,omitempty"` + + // HTTPMethod is the HTTP method used for requests by the monitor. If this + // attribute is not specified, it defaults to "GET". + HTTPMethod string `json:"http_method,omitempty"` + + // ExpectedCodes is the expected HTTP codes for a passing HTTP(S) monitor + // You can either specify a single status like "200", or a range like + // "200-202". + ExpectedCodes string `json:"expected_codes,omitempty"` + + // AdminStateUp denotes whether the monitor is administratively up or down. + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToLBMonitorUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToLBMonitorUpdateMap() (map[string]interface{}, error) { + if opts.Delay > 0 && opts.Timeout > 0 && opts.Delay < opts.Timeout { + err := gophercloud.ErrInvalidInput{} + err.Argument = "monitors.CreateOpts.Delay/monitors.CreateOpts.Timeout" + err.Value = fmt.Sprintf("%d/%d", opts.Delay, opts.Timeout) + err.Info = "Delay must be greater than or equal to timeout" + return nil, err + } + return gophercloud.BuildRequestBody(opts, "health_monitor") +} + +// Update is an operation which modifies the attributes of the specified +// monitor. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToLBMonitorUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete will permanently delete a particular monitor based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/results.go new file mode 100644 index 00000000000..cc99f7cced7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/results.go @@ -0,0 +1,141 @@ +package monitors + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Monitor represents a load balancer health monitor. A health monitor is used +// to determine whether or not back-end members of the VIP's pool are usable +// for processing a request. A pool can have several health monitors associated +// with it. There are different types of health monitors supported: +// +// PING: used to ping the members using ICMP. +// TCP: used to connect to the members using TCP. +// HTTP: used to send an HTTP request to the member. +// HTTPS: used to send a secure HTTP request to the member. +// +// When a pool has several monitors associated with it, each member of the pool +// is monitored by all these monitors. If any monitor declares the member as +// unhealthy, then the member status is changed to INACTIVE and the member +// won't participate in its pool's load balancing. In other words, ALL monitors +// must declare the member to be healthy for it to stay ACTIVE. +type Monitor struct { + // ID is the unique ID for the Monitor. + ID string + + // Name is the monitor name. Does not have to be unique. + Name string + + // TenantID is the owner of the Monitor. + TenantID string `json:"tenant_id"` + + // Type is the type of probe sent by the load balancer to verify the member + // state, which is PING, TCP, HTTP, or HTTPS. + Type string + + // Delay is the time, in seconds, between sending probes to members. + Delay int + + // Timeout is the maximum number of seconds for a monitor to wait for a + // connection to be established before it times out. This value must be less + // than the delay value. + Timeout int + + // MaxRetries is the number of allowed connection failures before changing the + // status of the member to INACTIVE. A valid value is from 1 to 10. + MaxRetries int `json:"max_retries"` + + // HTTPMethod is the HTTP method that the monitor uses for requests. + HTTPMethod string `json:"http_method"` + + // URLPath is the HTTP path of the request sent by the monitor to test the + // health of a member. Must be a string beginning with a forward slash (/). + URLPath string `json:"url_path"` + + // ExpectedCodes is the expected HTTP codes for a passing HTTP(S) monitor. + ExpectedCodes string `json:"expected_codes"` + + // AdminStateUp is the administrative state of the health monitor, which is up + // (true) or down (false). + AdminStateUp bool `json:"admin_state_up"` + + // Status is the status of the health monitor. Indicates whether the health + // monitor is operational. + Status string +} + +// MonitorPage is the page returned by a pager when traversing over a +// collection of health monitors. +type MonitorPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of monitors has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r MonitorPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"health_monitors_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a PoolPage struct is empty. +func (r MonitorPage) IsEmpty() (bool, error) { + is, err := ExtractMonitors(r) + return len(is) == 0, err +} + +// ExtractMonitors accepts a Page struct, specifically a MonitorPage struct, +// and extracts the elements into a slice of Monitor structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractMonitors(r pagination.Page) ([]Monitor, error) { + var s struct { + Monitors []Monitor `json:"health_monitors"` + } + err := (r.(MonitorPage)).ExtractInto(&s) + return s.Monitors, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a monitor. +func (r commonResult) Extract() (*Monitor, error) { + var s struct { + Monitor *Monitor `json:"health_monitor"` + } + err := r.ExtractInto(&s) + return s.Monitor, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Monitor. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Monitor. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Monitor. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its Extract +// method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/urls.go new file mode 100644 index 00000000000..e9d90fcc569 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/urls.go @@ -0,0 +1,16 @@ +package monitors + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lb" + resourcePath = "health_monitors" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/doc.go new file mode 100644 index 00000000000..25c4204dc93 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/doc.go @@ -0,0 +1,81 @@ +/* +Package pools provides information and interaction with the Pools of the +Load Balancing as a Service extension for the OpenStack Networking service. + +Example to List Pools + + listOpts := pools.ListOpts{ + SubnetID: "d9bd223b-f1a9-4f98-953b-df977b0f902d", + } + + allPages, err := pools.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allPools, err := pools.ExtractPools(allPages) + if err != nil { + panic(err) + } + + for _, pool := range allPools { + fmt.Printf("%+v\n", pool) + } + +Example to Create a Pool + + createOpts := pools.CreateOpts{ + LBMethod: pools.LBMethodRoundRobin, + Protocol: "HTTP", + Name: "Example pool", + SubnetID: "1981f108-3c48-48d2-b908-30f7d28532c9", + Provider: "haproxy", + } + + pool, err := pools.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Pool + + poolID := "166db5e6-c72a-4d77-8776-3573e27ae271" + + updateOpts := pools.UpdateOpts{ + LBMethod: pools.LBMethodLeastConnections, + } + + pool, err := pools.Update(networkClient, poolID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Pool + + poolID := "166db5e6-c72a-4d77-8776-3573e27ae271" + err := pools.Delete(networkClient, poolID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Associate a Monitor to a Pool + + poolID := "166db5e6-c72a-4d77-8776-3573e27ae271" + monitorID := "8bbfbe1c-6faa-4d97-abdb-0df6c90df70b" + + pool, err := pools.AssociateMonitor(networkClient, poolID, monitorID).Extract() + if err != nil { + panic(err) + } + +Example to Disassociate a Monitor from a Pool + + poolID := "166db5e6-c72a-4d77-8776-3573e27ae271" + monitorID := "8bbfbe1c-6faa-4d97-abdb-0df6c90df70b" + + pool, err := pools.DisassociateMonitor(networkClient, poolID, monitorID).Extract() + if err != nil { + panic(err) + } +*/ +package pools diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go new file mode 100644 index 00000000000..b3593548d37 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go @@ -0,0 +1,175 @@ +package pools + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Status string `q:"status"` + LBMethod string `q:"lb_method"` + Protocol string `q:"protocol"` + SubnetID string `q:"subnet_id"` + TenantID string `q:"tenant_id"` + AdminStateUp *bool `q:"admin_state_up"` + Name string `q:"name"` + ID string `q:"id"` + VIPID string `q:"vip_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// pools. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those pools that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return PoolPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// LBMethod is a type used for possible load balancing methods. +type LBMethod string + +// LBProtocol is a type used for possible load balancing protocols. +type LBProtocol string + +// Supported attributes for create/update operations. +const ( + LBMethodRoundRobin LBMethod = "ROUND_ROBIN" + LBMethodLeastConnections LBMethod = "LEAST_CONNECTIONS" + + ProtocolTCP LBProtocol = "TCP" + ProtocolHTTP LBProtocol = "HTTP" + ProtocolHTTPS LBProtocol = "HTTPS" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToLBPoolCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new pool. +type CreateOpts struct { + // Name of the pool. + Name string `json:"name" required:"true"` + + // Protocol used by the pool members, you can use either + // ProtocolTCP, ProtocolHTTP, or ProtocolHTTPS. + Protocol LBProtocol `json:"protocol" required:"true"` + + // TenantID is only required if the caller has an admin role and wants + // to create a pool for another tenant. + TenantID string `json:"tenant_id,omitempty"` + + // SubnetID is the network on which the members of the pool will be located. + // Only members that are on this network can be added to the pool. + SubnetID string `json:"subnet_id,omitempty"` + + // LBMethod is the algorithm used to distribute load between the members of + // the pool. The current specification supports LBMethodRoundRobin and + // LBMethodLeastConnections as valid values for this attribute. + LBMethod LBMethod `json:"lb_method" required:"true"` + + // Provider of the pool. + Provider string `json:"provider,omitempty"` +} + +// ToLBPoolCreateMap builds a request body based on CreateOpts. +func (opts CreateOpts) ToLBPoolCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "pool") +} + +// Create accepts a CreateOptsBuilder and uses the values to create a new +// load balancer pool. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToLBPoolCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular pool based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters ot the +// Update request. +type UpdateOptsBuilder interface { + ToLBPoolUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a pool. +type UpdateOpts struct { + // Name of the pool. + Name string `json:"name,omitempty"` + + // LBMethod is the algorithm used to distribute load between the members of + // the pool. The current specification supports LBMethodRoundRobin and + // LBMethodLeastConnections as valid values for this attribute. + LBMethod LBMethod `json:"lb_method,omitempty"` +} + +// ToLBPoolUpdateMap builds a request body based on UpdateOpts. +func (opts UpdateOpts) ToLBPoolUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "pool") +} + +// Update allows pools to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToLBPoolUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will permanently delete a particular pool based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// AssociateMonitor will associate a health monitor with a particular pool. +// Once associated, the health monitor will start monitoring the members of the +// pool and will deactivate these members if they are deemed unhealthy. A +// member can be deactivated (status set to INACTIVE) if any of health monitors +// finds it unhealthy. +func AssociateMonitor(c *gophercloud.ServiceClient, poolID, monitorID string) (r AssociateResult) { + b := map[string]interface{}{"health_monitor": map[string]string{"id": monitorID}} + _, r.Err = c.Post(associateURL(c, poolID), b, &r.Body, nil) + return +} + +// DisassociateMonitor will disassociate a health monitor with a particular +// pool. When dissociation is successful, the health monitor will no longer +// check for the health of the members of the pool. +func DisassociateMonitor(c *gophercloud.ServiceClient, poolID, monitorID string) (r AssociateResult) { + _, r.Err = c.Delete(disassociateURL(c, poolID, monitorID), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/results.go new file mode 100644 index 00000000000..c2bae82d56e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/results.go @@ -0,0 +1,137 @@ +package pools + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Pool represents a logical set of devices, such as web servers, that you +// group together to receive and process traffic. The load balancing function +// chooses a member of the pool according to the configured load balancing +// method to handle the new requests or connections received on the VIP address. +// There is only one pool per virtual IP. +type Pool struct { + // Status of the pool. Indicates whether the pool is operational. + Status string + + // LBMethod is the load-balancer algorithm, which is round-robin, + // least-connections, and so on. This value, which must be supported, is + // dependent on the provider. + LBMethod string `json:"lb_method"` + + // Protocol of the pool, which is TCP, HTTP, or HTTPS. + Protocol string + + // Description for the pool. + Description string + + // MonitorIDs are the IDs of associated monitors which check the health of + // the pool members. + MonitorIDs []string `json:"health_monitors"` + + // SubnetID is the network on which the members of the pool will be located. + // Only members that are on this network can be added to the pool. + SubnetID string `json:"subnet_id"` + + // TenantID is the owner of the pool. + TenantID string `json:"tenant_id"` + + // AdminStateUp is the administrative state of the pool, which is up + // (true) or down (false). + AdminStateUp bool `json:"admin_state_up"` + + // Name of the pool. + Name string + + // MemberIDs is the list of member IDs that belong to the pool. + MemberIDs []string `json:"members"` + + // ID is the unique ID for the pool. + ID string + + // VIPID is the ID of the virtual IP associated with this pool. + VIPID string `json:"vip_id"` + + // The provider. + Provider string +} + +// PoolPage is the page returned by a pager when traversing over a +// collection of pools. +type PoolPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of pools has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r PoolPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"pools_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a PoolPage struct is empty. +func (r PoolPage) IsEmpty() (bool, error) { + is, err := ExtractPools(r) + return len(is) == 0, err +} + +// ExtractPools accepts a Page struct, specifically a PoolPage struct, +// and extracts the elements into a slice of Router structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractPools(r pagination.Page) ([]Pool, error) { + var s struct { + Pools []Pool `json:"pools"` + } + err := (r.(PoolPage)).ExtractInto(&s) + return s.Pools, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a router. +func (r commonResult) Extract() (*Pool, error) { + var s struct { + Pool *Pool `json:"pool"` + } + err := r.ExtractInto(&s) + return s.Pool, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Pool. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Pool. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Pool. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to interpret it as a Pool. +type DeleteResult struct { + gophercloud.ErrResult +} + +// AssociateResult represents the result of an association operation. Call its Extract +// method to interpret it as a Pool. +type AssociateResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/urls.go new file mode 100644 index 00000000000..fe3601bbec9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/urls.go @@ -0,0 +1,25 @@ +package pools + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lb" + resourcePath = "pools" + monitorPath = "health_monitors" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} + +func associateURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id, monitorPath) +} + +func disassociateURL(c *gophercloud.ServiceClient, poolID, monitorID string) string { + return c.ServiceURL(rootPath, resourcePath, poolID, monitorPath, monitorID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/doc.go new file mode 100644 index 00000000000..7fd861044b6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/doc.go @@ -0,0 +1,65 @@ +/* +Package vips provides information and interaction with the Virtual IPs of the +Load Balancing as a Service extension for the OpenStack Networking service. + +Example to List Virtual IPs + + listOpts := vips.ListOpts{ + SubnetID: "d9bd223b-f1a9-4f98-953b-df977b0f902d", + } + + allPages, err := vips.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allVIPs, err := vips.ExtractVIPs(allPages) + if err != nil { + panic(err) + } + + for _, vip := range allVIPs { + fmt.Printf("%+v\n", vip) + } + +Example to Create a Virtual IP + + createOpts := vips.CreateOpts{ + Protocol: "HTTP", + Name: "NewVip", + AdminStateUp: gophercloud.Enabled, + SubnetID: "8032909d-47a1-4715-90af-5153ffe39861", + PoolID: "61b1f87a-7a21-4ad3-9dda-7f81d249944f", + ProtocolPort: 80, + Persistence: &vips.SessionPersistence{Type: "SOURCE_IP"}, + } + + vip, err := vips.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Virtual IP + + vipID := "93f1bad4-0423-40a8-afac-3fc541839912" + + i1000 := 1000 + updateOpts := vips.UpdateOpts{ + ConnLimit: &i1000, + Persistence: &vips.SessionPersistence{Type: "SOURCE_IP"}, + } + + vip, err := vips.Update(networkClient, vipID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Virtual IP + + vipID := "93f1bad4-0423-40a8-afac-3fc541839912" + err := vips.Delete(networkClient, vipID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package vips diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go new file mode 100644 index 00000000000..53b81bfdb92 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go @@ -0,0 +1,180 @@ +package vips + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + Name string `q:"name"` + AdminStateUp *bool `q:"admin_state_up"` + Status string `q:"status"` + TenantID string `q:"tenant_id"` + SubnetID string `q:"subnet_id"` + Address string `q:"address"` + PortID string `q:"port_id"` + Protocol string `q:"protocol"` + ProtocolPort int `q:"protocol_port"` + ConnectionLimit int `q:"connection_limit"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// Virtual IPs. It accepts a ListOpts struct, which allows you to filter and +// sort the returned collection for greater efficiency. +// +// Default policy settings return only those virtual IPs that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return VIPPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create Request. +type CreateOptsBuilder interface { + ToVIPCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new virtual IP. +type CreateOpts struct { + // Name is the human-readable name for the VIP. Does not have to be unique. + Name string `json:"name" required:"true"` + + // SubnetID is the network on which to allocate the VIP's address. A tenant + // can only create VIPs on networks authorized by policy (e.g. networks that + // belong to them or networks that are shared). + SubnetID string `json:"subnet_id" required:"true"` + + // Protocol - can either be TCP, HTTP or HTTPS. + Protocol string `json:"protocol" required:"true"` + + // ProtocolPort is the port on which to listen for client traffic. + ProtocolPort int `json:"protocol_port" required:"true"` + + // PoolID is the ID of the pool with which the VIP is associated. + PoolID string `json:"pool_id" required:"true"` + + // TenantID is only required if the caller has an admin role and wants + // to create a pool for another tenant. + TenantID string `json:"tenant_id,omitempty"` + + // Address is the IP address of the VIP. + Address string `json:"address,omitempty"` + + // Description is the human-readable description for the VIP. + Description string `json:"description,omitempty"` + + // Persistence is the the of session persistence to use. + // Omit this field to prevent session persistence. + Persistence *SessionPersistence `json:"session_persistence,omitempty"` + + // ConnLimit is the maximum number of connections allowed for the VIP. + ConnLimit *int `json:"connection_limit,omitempty"` + + // AdminStateUp is the administrative state of the VIP. A valid value is + // true (UP) or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToVIPCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToVIPCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "vip") +} + +// Create is an operation which provisions a new virtual IP based on the +// configuration defined in the CreateOpts struct. Once the request is +// validated and progress has started on the provisioning process, a +// CreateResult will be returned. +// +// Please note that the PoolID should refer to a pool that is not already +// associated with another vip. If the pool is already used by another vip, +// then the operation will fail with a 409 Conflict error will be returned. +// +// Users with an admin role can create VIPs on behalf of other tenants by +// specifying a TenantID attribute different than their own. +func Create(c *gophercloud.ServiceClient, opts CreateOpts) (r CreateResult) { + b, err := opts.ToVIPCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular virtual IP based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToVIPUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains all the values needed to update an existing virtual IP. +// Attributes not listed here but appear in CreateOpts are immutable and cannot +// be updated. +type UpdateOpts struct { + // Name is the human-readable name for the VIP. Does not have to be unique. + Name *string `json:"name,omitempty"` + + // PoolID is the ID of the pool with which the VIP is associated. + PoolID *string `json:"pool_id,omitempty"` + + // Description is the human-readable description for the VIP. + Description *string `json:"description,omitempty"` + + // Persistence is the the of session persistence to use. + // Omit this field to prevent session persistence. + Persistence *SessionPersistence `json:"session_persistence,omitempty"` + + // ConnLimit is the maximum number of connections allowed for the VIP. + ConnLimit *int `json:"connection_limit,omitempty"` + + // AdminStateUp is the administrative state of the VIP. A valid value is + // true (UP) or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToVIPUpdateMap builds a request body based on UpdateOpts. +func (opts UpdateOpts) ToVIPUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "vip") +} + +// Update is an operation which modifies the attributes of the specified VIP. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToVIPUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete will permanently delete a particular virtual IP based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/results.go new file mode 100644 index 00000000000..cb0994a7b25 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/results.go @@ -0,0 +1,156 @@ +package vips + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// SessionPersistence represents the session persistence feature of the load +// balancing service. It attempts to force connections or requests in the same +// session to be processed by the same member as long as it is ative. Three +// types of persistence are supported: +// +// SOURCE_IP: With this mode, all connections originating from the same source +// IP address, will be handled by the same member of the pool. +// HTTP_COOKIE: With this persistence mode, the load balancing function will +// create a cookie on the first request from a client. Subsequent +// requests containing the same cookie value will be handled by +// the same member of the pool. +// APP_COOKIE: With this persistence mode, the load balancing function will +// rely on a cookie established by the backend application. All +// requests carrying the same cookie value will be handled by the +// same member of the pool. +type SessionPersistence struct { + // Type is the type of persistence mode. + Type string `json:"type"` + + // CookieName is the name of cookie if persistence mode is set appropriately. + CookieName string `json:"cookie_name,omitempty"` +} + +// VirtualIP is the primary load balancing configuration object that specifies +// the virtual IP address and port on which client traffic is received, as well +// as other details such as the load balancing method to be use, protocol, etc. +// This entity is sometimes known in LB products under the name of a "virtual +// server", a "vserver" or a "listener". +type VirtualIP struct { + // ID is the unique ID for the VIP. + ID string `json:"id"` + + // TenantID is the owner of the VIP. + TenantID string `json:"tenant_id"` + + // Name is the human-readable name for the VIP. Does not have to be unique. + Name string `json:"name"` + + // Description is the human-readable description for the VIP. + Description string `json:"description"` + + // SubnetID is the ID of the subnet on which to allocate the VIP address. + SubnetID string `json:"subnet_id"` + + // Address is the IP address of the VIP. + Address string `json:"address"` + + // Protocol of the VIP address. A valid value is TCP, HTTP, or HTTPS. + Protocol string `json:"protocol"` + + // ProtocolPort is the port on which to listen to client traffic that is + // associated with the VIP address. A valid value is from 0 to 65535. + ProtocolPort int `json:"protocol_port"` + + // PoolID is the ID of the pool with which the VIP is associated. + PoolID string `json:"pool_id"` + + // PortID is the ID of the port which belongs to the load balancer. + PortID string `json:"port_id"` + + // Persistence indicates whether connections in the same session will be + // processed by the same pool member or not. + Persistence SessionPersistence `json:"session_persistence"` + + // ConnLimit is the maximum number of connections allowed for the VIP. + // Default is -1, meaning no limit. + ConnLimit int `json:"connection_limit"` + + // AdminStateUp is the administrative state of the VIP. A valid value is + // true (UP) or false (DOWN). + AdminStateUp bool `json:"admin_state_up"` + + // Status is the status of the VIP. Indicates whether the VIP is operational. + Status string `json:"status"` +} + +// VIPPage is the page returned by a pager when traversing over a +// collection of virtual IPs. +type VIPPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of routers has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r VIPPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"vips_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a VIPPage struct is empty. +func (r VIPPage) IsEmpty() (bool, error) { + is, err := ExtractVIPs(r) + return len(is) == 0, err +} + +// ExtractVIPs accepts a Page struct, specifically a VIPPage struct, +// and extracts the elements into a slice of VirtualIP structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractVIPs(r pagination.Page) ([]VirtualIP, error) { + var s struct { + VIPs []VirtualIP `json:"vips"` + } + err := (r.(VIPPage)).ExtractInto(&s) + return s.VIPs, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a VirtualIP. +func (r commonResult) Extract() (*VirtualIP, error) { + var s struct { + VirtualIP *VirtualIP `json:"vip" json:"vip"` + } + err := r.ExtractInto(&s) + return s.VirtualIP, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a VirtualIP +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a VirtualIP +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a VirtualIP +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/urls.go new file mode 100644 index 00000000000..584a1cf680c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/urls.go @@ -0,0 +1,16 @@ +package vips + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lb" + resourcePath = "vips" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/doc.go new file mode 100644 index 00000000000..108cdb03d8b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/doc.go @@ -0,0 +1,63 @@ +/* +Package listeners provides information and interaction with Listeners of the +LBaaS v2 extension for the OpenStack Networking service. + +Example to List Listeners + + listOpts := listeners.ListOpts{ + LoadbalancerID : "ca430f80-1737-4712-8dc6-3f640d55594b", + } + + allPages, err := listeners.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allListeners, err := listeners.ExtractListeners(allPages) + if err != nil { + panic(err) + } + + for _, listener := range allListeners { + fmt.Printf("%+v\n", listener) + } + +Example to Create a Listener + + createOpts := listeners.CreateOpts{ + Protocol: "TCP", + Name: "db", + LoadbalancerID: "79e05663-7f03-45d2-a092-8b94062f22ab", + AdminStateUp: gophercloud.Enabled, + DefaultPoolID: "41efe233-7591-43c5-9cf7-923964759f9e", + ProtocolPort: 3306, + } + + listener, err := listeners.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Listener + + listenerID := "d67d56a6-4a86-4688-a282-f46444705c64" + + i1001 := 1001 + updateOpts := listeners.UpdateOpts{ + ConnLimit: &i1001, + } + + listener, err := listeners.Update(networkClient, listenerID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Listener + + listenerID := "d67d56a6-4a86-4688-a282-f46444705c64" + err := listeners.Delete(networkClient, listenerID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package listeners diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/requests.go new file mode 100644 index 00000000000..625748fd256 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/requests.go @@ -0,0 +1,194 @@ +package listeners + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Type Protocol represents a listener protocol. +type Protocol string + +// Supported attributes for create/update operations. +const ( + ProtocolTCP Protocol = "TCP" + ProtocolHTTP Protocol = "HTTP" + ProtocolHTTPS Protocol = "HTTPS" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToListenerListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular listener attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + Name string `q:"name"` + AdminStateUp *bool `q:"admin_state_up"` + TenantID string `q:"tenant_id"` + LoadbalancerID string `q:"loadbalancer_id"` + DefaultPoolID string `q:"default_pool_id"` + Protocol string `q:"protocol"` + ProtocolPort int `q:"protocol_port"` + ConnectionLimit int `q:"connection_limit"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToListenerListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToListenerListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// listeners. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those listeners that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToListenerListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return ListenerPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToListenerCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents options for creating a listener. +type CreateOpts struct { + // The load balancer on which to provision this listener. + LoadbalancerID string `json:"loadbalancer_id" required:"true"` + + // The protocol - can either be TCP, HTTP or HTTPS. + Protocol Protocol `json:"protocol" required:"true"` + + // The port on which to listen for client traffic. + ProtocolPort int `json:"protocol_port" required:"true"` + + // TenantID is only required if the caller has an admin role and wants + // to create a pool for another tenant. + TenantID string `json:"tenant_id,omitempty"` + + // Human-readable name for the Listener. Does not have to be unique. + Name string `json:"name,omitempty"` + + // The ID of the default pool with which the Listener is associated. + DefaultPoolID string `json:"default_pool_id,omitempty"` + + // Human-readable description for the Listener. + Description string `json:"description,omitempty"` + + // The maximum number of connections allowed for the Listener. + ConnLimit *int `json:"connection_limit,omitempty"` + + // A reference to a Barbican container of TLS secrets. + DefaultTlsContainerRef string `json:"default_tls_container_ref,omitempty"` + + // A list of references to TLS secrets. + SniContainerRefs []string `json:"sni_container_refs,omitempty"` + + // The administrative state of the Listener. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToListenerCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToListenerCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "listener") +} + +// Create is an operation which provisions a new Listeners based on the +// configuration defined in the CreateOpts struct. Once the request is +// validated and progress has started on the provisioning process, a +// CreateResult will be returned. +// +// Users with an admin role can create Listeners on behalf of other tenants by +// specifying a TenantID attribute different than their own. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToListenerCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular Listeners based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToListenerUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents options for updating a Listener. +type UpdateOpts struct { + // Human-readable name for the Listener. Does not have to be unique. + Name string `json:"name,omitempty"` + + // Human-readable description for the Listener. + Description string `json:"description,omitempty"` + + // The maximum number of connections allowed for the Listener. + ConnLimit *int `json:"connection_limit,omitempty"` + + // A reference to a Barbican container of TLS secrets. + DefaultTlsContainerRef string `json:"default_tls_container_ref,omitempty"` + + // A list of references to TLS secrets. + SniContainerRefs []string `json:"sni_container_refs,omitempty"` + + // The administrative state of the Listener. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToListenerUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToListenerUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "listener") +} + +// Update is an operation which modifies the attributes of the specified +// Listener. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) (r UpdateResult) { + b, err := opts.ToListenerUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete will permanently delete a particular Listeners based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/results.go new file mode 100644 index 00000000000..e0c134ed51b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/results.go @@ -0,0 +1,131 @@ +package listeners + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" + "github.com/gophercloud/gophercloud/pagination" +) + +type LoadBalancerID struct { + ID string `json:"id"` +} + +// Listener is the primary load balancing configuration object that specifies +// the loadbalancer and port on which client traffic is received, as well +// as other details such as the load balancing method to be use, protocol, etc. +type Listener struct { + // The unique ID for the Listener. + ID string `json:"id"` + + // Owner of the Listener. + TenantID string `json:"tenant_id"` + + // Human-readable name for the Listener. Does not have to be unique. + Name string `json:"name"` + + // Human-readable description for the Listener. + Description string `json:"description"` + + // The protocol to loadbalance. A valid value is TCP, HTTP, or HTTPS. + Protocol string `json:"protocol"` + + // The port on which to listen to client traffic that is associated with the + // Loadbalancer. A valid value is from 0 to 65535. + ProtocolPort int `json:"protocol_port"` + + // The UUID of default pool. Must have compatible protocol with listener. + DefaultPoolID string `json:"default_pool_id"` + + // A list of load balancer IDs. + Loadbalancers []LoadBalancerID `json:"loadbalancers"` + + // The maximum number of connections allowed for the Loadbalancer. + // Default is -1, meaning no limit. + ConnLimit int `json:"connection_limit"` + + // The list of references to TLS secrets. + SniContainerRefs []string `json:"sni_container_refs"` + + // A reference to a Barbican container of TLS secrets. + DefaultTlsContainerRef string `json:"default_tls_container_ref"` + + // The administrative state of the Listener. A valid value is true (UP) or false (DOWN). + AdminStateUp bool `json:"admin_state_up"` + + // Pools are the pools which are part of this listener. + Pools []pools.Pool `json:"pools"` +} + +// ListenerPage is the page returned by a pager when traversing over a +// collection of listeners. +type ListenerPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of listeners has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r ListenerPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"listeners_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a ListenerPage struct is empty. +func (r ListenerPage) IsEmpty() (bool, error) { + is, err := ExtractListeners(r) + return len(is) == 0, err +} + +// ExtractListeners accepts a Page struct, specifically a ListenerPage struct, +// and extracts the elements into a slice of Listener structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractListeners(r pagination.Page) ([]Listener, error) { + var s struct { + Listeners []Listener `json:"listeners"` + } + err := (r.(ListenerPage)).ExtractInto(&s) + return s.Listeners, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a listener. +func (r commonResult) Extract() (*Listener, error) { + var s struct { + Listener *Listener `json:"listener"` + } + err := r.ExtractInto(&s) + return s.Listener, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Listener. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Listener. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Listener. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/urls.go new file mode 100644 index 00000000000..02fb1eb39ec --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/urls.go @@ -0,0 +1,16 @@ +package listeners + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lbaas" + resourcePath = "listeners" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/doc.go new file mode 100644 index 00000000000..eea43391a8c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/doc.go @@ -0,0 +1,71 @@ +/* +Package loadbalancers provides information and interaction with Load Balancers +of the LBaaS v2 extension for the OpenStack Networking service. + +Example to List Load Balancers + + listOpts := loadbalancers.ListOpts{ + Provider: "haproxy", + } + + allPages, err := loadbalancers.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allLoadbalancers, err := loadbalancers.ExtractLoadBalancers(allPages) + if err != nil { + panic(err) + } + + for _, lb := range allLoadbalancers { + fmt.Printf("%+v\n", lb) + } + +Example to Create a Load Balancer + + createOpts := loadbalancers.CreateOpts{ + Name: "db_lb", + AdminStateUp: gophercloud.Enabled, + VipSubnetID: "9cedb85d-0759-4898-8a4b-fa5a5ea10086", + VipAddress: "10.30.176.48", + Flavor: "medium", + Provider: "haproxy", + } + + lb, err := loadbalancers.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Load Balancer + + lbID := "d67d56a6-4a86-4688-a282-f46444705c64" + + i1001 := 1001 + updateOpts := loadbalancers.UpdateOpts{ + Name: "new-name", + } + + lb, err := loadbalancers.Update(networkClient, lbID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Load Balancers + + lbID := "d67d56a6-4a86-4688-a282-f46444705c64" + err := loadbalancers.Delete(networkClient, lbID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Get the Status of a Load Balancer + + lbID := "d67d56a6-4a86-4688-a282-f46444705c64" + status, err := loadbalancers.GetStatuses(networkClient, LBID).Extract() + if err != nil { + panic(err) + } +*/ +package loadbalancers diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/requests.go new file mode 100644 index 00000000000..839776dd28b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/requests.go @@ -0,0 +1,177 @@ +package loadbalancers + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToLoadBalancerListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the Loadbalancer attributes you want to see returned. SortKey allows you to +// sort by a particular attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Description string `q:"description"` + AdminStateUp *bool `q:"admin_state_up"` + TenantID string `q:"tenant_id"` + ProvisioningStatus string `q:"provisioning_status"` + VipAddress string `q:"vip_address"` + VipPortID string `q:"vip_port_id"` + VipSubnetID string `q:"vip_subnet_id"` + ID string `q:"id"` + OperatingStatus string `q:"operating_status"` + Name string `q:"name"` + Flavor string `q:"flavor"` + Provider string `q:"provider"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToLoadbalancerListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToLoadBalancerListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// load balancers. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +// +// Default policy settings return only those load balancers that are owned by +// the tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToLoadBalancerListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return LoadBalancerPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToLoadBalancerCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts struct { + // Human-readable name for the Loadbalancer. Does not have to be unique. + Name string `json:"name,omitempty"` + + // Human-readable description for the Loadbalancer. + Description string `json:"description,omitempty"` + + // The network on which to allocate the Loadbalancer's address. A tenant can + // only create Loadbalancers on networks authorized by policy (e.g. networks + // that belong to them or networks that are shared). + VipSubnetID string `json:"vip_subnet_id" required:"true"` + + // The UUID of the tenant who owns the Loadbalancer. Only administrative users + // can specify a tenant UUID other than their own. + TenantID string `json:"tenant_id,omitempty"` + + // The IP address of the Loadbalancer. + VipAddress string `json:"vip_address,omitempty"` + + // The administrative state of the Loadbalancer. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` + + // The UUID of a flavor. + Flavor string `json:"flavor,omitempty"` + + // The name of the provider. + Provider string `json:"provider,omitempty"` +} + +// ToLoadBalancerCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToLoadBalancerCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "loadbalancer") +} + +// Create is an operation which provisions a new loadbalancer based on the +// configuration defined in the CreateOpts struct. Once the request is +// validated and progress has started on the provisioning process, a +// CreateResult will be returned. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToLoadBalancerCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular Loadbalancer based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToLoadBalancerUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts is the common options struct used in this package's Update +// operation. +type UpdateOpts struct { + // Human-readable name for the Loadbalancer. Does not have to be unique. + Name string `json:"name,omitempty"` + + // Human-readable description for the Loadbalancer. + Description string `json:"description,omitempty"` + + // The administrative state of the Loadbalancer. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToLoadBalancerUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToLoadBalancerUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "loadbalancer") +} + +// Update is an operation which modifies the attributes of the specified +// LoadBalancer. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) (r UpdateResult) { + b, err := opts.ToLoadBalancerUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete will permanently delete a particular LoadBalancer based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// GetStatuses will return the status of a particular LoadBalancer. +func GetStatuses(c *gophercloud.ServiceClient, id string) (r GetStatusesResult) { + _, r.Err = c.Get(statusRootURL(c, id), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/results.go new file mode 100644 index 00000000000..9f8f19d7c56 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/results.go @@ -0,0 +1,149 @@ +package loadbalancers + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners" + "github.com/gophercloud/gophercloud/pagination" +) + +// LoadBalancer is the primary load balancing configuration object that +// specifies the virtual IP address on which client traffic is received, as well +// as other details such as the load balancing method to be use, protocol, etc. +type LoadBalancer struct { + // Human-readable description for the Loadbalancer. + Description string `json:"description"` + + // The administrative state of the Loadbalancer. + // A valid value is true (UP) or false (DOWN). + AdminStateUp bool `json:"admin_state_up"` + + // Owner of the LoadBalancer. + TenantID string `json:"tenant_id"` + + // The provisioning status of the LoadBalancer. + // This value is ACTIVE, PENDING_CREATE or ERROR. + ProvisioningStatus string `json:"provisioning_status"` + + // The IP address of the Loadbalancer. + VipAddress string `json:"vip_address"` + + // The UUID of the port associated with the IP address. + VipPortID string `json:"vip_port_id"` + + // The UUID of the subnet on which to allocate the virtual IP for the + // Loadbalancer address. + VipSubnetID string `json:"vip_subnet_id"` + + // The unique ID for the LoadBalancer. + ID string `json:"id"` + + // The operating status of the LoadBalancer. This value is ONLINE or OFFLINE. + OperatingStatus string `json:"operating_status"` + + // Human-readable name for the LoadBalancer. Does not have to be unique. + Name string `json:"name"` + + // The UUID of a flavor if set. + Flavor string `json:"flavor"` + + // The name of the provider. + Provider string `json:"provider"` + + // Listeners are the listeners related to this Loadbalancer. + Listeners []listeners.Listener `json:"listeners"` +} + +// StatusTree represents the status of a loadbalancer. +type StatusTree struct { + Loadbalancer *LoadBalancer `json:"loadbalancer"` +} + +// LoadBalancerPage is the page returned by a pager when traversing over a +// collection of load balancers. +type LoadBalancerPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of load balancers has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r LoadBalancerPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"loadbalancers_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a LoadBalancerPage struct is empty. +func (p LoadBalancerPage) IsEmpty() (bool, error) { + is, err := ExtractLoadBalancers(p) + return len(is) == 0, err +} + +// ExtractLoadBalancers accepts a Page struct, specifically a LoadbalancerPage +// struct, and extracts the elements into a slice of LoadBalancer structs. In +// other words, a generic collection is mapped into a relevant slice. +func ExtractLoadBalancers(r pagination.Page) ([]LoadBalancer, error) { + var s struct { + LoadBalancers []LoadBalancer `json:"loadbalancers"` + } + err := (r.(LoadBalancerPage)).ExtractInto(&s) + return s.LoadBalancers, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a loadbalancer. +func (r commonResult) Extract() (*LoadBalancer, error) { + var s struct { + LoadBalancer *LoadBalancer `json:"loadbalancer"` + } + err := r.ExtractInto(&s) + return s.LoadBalancer, err +} + +// GetStatusesResult represents the result of a GetStatuses operation. +// Call its Extract method to interpret it as a StatusTree. +type GetStatusesResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts the status of +// a Loadbalancer. +func (r GetStatusesResult) Extract() (*StatusTree, error) { + var s struct { + Statuses *StatusTree `json:"statuses"` + } + err := r.ExtractInto(&s) + return s.Statuses, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a LoadBalancer. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a LoadBalancer. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a LoadBalancer. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/urls.go new file mode 100644 index 00000000000..73cf5dc126a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/urls.go @@ -0,0 +1,21 @@ +package loadbalancers + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lbaas" + resourcePath = "loadbalancers" + statusPath = "statuses" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} + +func statusRootURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id, statusPath) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/doc.go new file mode 100644 index 00000000000..6ed8c8fb5ff --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/doc.go @@ -0,0 +1,69 @@ +/* +Package monitors provides information and interaction with Monitors +of the LBaaS v2 extension for the OpenStack Networking service. + +Example to List Monitors + + listOpts := monitors.ListOpts{ + PoolID: "c79a4468-d788-410c-bf79-9a8ef6354852", + } + + allPages, err := monitors.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allMonitors, err := monitors.ExtractMonitors(allPages) + if err != nil { + panic(err) + } + + for _, monitor := range allMonitors { + fmt.Printf("%+v\n", monitor) + } + +Example to Create a Monitor + + createOpts := monitors.CreateOpts{ + Type: "HTTP", + Name: "db", + PoolID: "84f1b61f-58c4-45bf-a8a9-2dafb9e5214d", + Delay: 20, + Timeout: 10, + MaxRetries: 5, + URLPath: "/check", + ExpectedCodes: "200-299", + } + + monitor, err := monitors.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Monitor + + monitorID := "d67d56a6-4a86-4688-a282-f46444705c64" + + updateOpts := monitors.UpdateOpts{ + Name: "NewHealthmonitorName", + Delay: 3, + Timeout: 20, + MaxRetries: 10, + URLPath: "/another_check", + ExpectedCodes: "301", + } + + monitor, err := monitors.Update(networkClient, monitorID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Monitor + + monitorID := "d67d56a6-4a86-4688-a282-f46444705c64" + err := monitors.Delete(networkClient, monitorID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package monitors diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/requests.go new file mode 100644 index 00000000000..6d9ab8ba79b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/requests.go @@ -0,0 +1,252 @@ +package monitors + +import ( + "fmt" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToMonitorListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the Monitor attributes you want to see returned. SortKey allows you to +// sort by a particular Monitor attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + Name string `q:"name"` + TenantID string `q:"tenant_id"` + PoolID string `q:"pool_id"` + Type string `q:"type"` + Delay int `q:"delay"` + Timeout int `q:"timeout"` + MaxRetries int `q:"max_retries"` + HTTPMethod string `q:"http_method"` + URLPath string `q:"url_path"` + ExpectedCodes string `q:"expected_codes"` + AdminStateUp *bool `q:"admin_state_up"` + Status string `q:"status"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToMonitorListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToMonitorListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// List returns a Pager which allows you to iterate over a collection of +// health monitors. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those health monitors that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToMonitorListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return MonitorPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Constants that represent approved monitoring types. +const ( + TypePING = "PING" + TypeTCP = "TCP" + TypeHTTP = "HTTP" + TypeHTTPS = "HTTPS" +) + +var ( + errDelayMustGETimeout = fmt.Errorf("Delay must be greater than or equal to timeout") +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// List request. +type CreateOptsBuilder interface { + ToMonitorCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts struct { + // The Pool to Monitor. + PoolID string `json:"pool_id" required:"true"` + + // The type of probe, which is PING, TCP, HTTP, or HTTPS, that is + // sent by the load balancer to verify the member state. + Type string `json:"type" required:"true"` + + // The time, in seconds, between sending probes to members. + Delay int `json:"delay" required:"true"` + + // Maximum number of seconds for a Monitor to wait for a ping reply + // before it times out. The value must be less than the delay value. + Timeout int `json:"timeout" required:"true"` + + // Number of permissible ping failures before changing the member's + // status to INACTIVE. Must be a number between 1 and 10. + MaxRetries int `json:"max_retries" required:"true"` + + // URI path that will be accessed if Monitor type is HTTP or HTTPS. + // Required for HTTP(S) types. + URLPath string `json:"url_path,omitempty"` + + // The HTTP method used for requests by the Monitor. If this attribute + // is not specified, it defaults to "GET". Required for HTTP(S) types. + HTTPMethod string `json:"http_method,omitempty"` + + // Expected HTTP codes for a passing HTTP(S) Monitor. You can either specify + // a single status like "200", or a range like "200-202". Required for HTTP(S) + // types. + ExpectedCodes string `json:"expected_codes,omitempty"` + + // The UUID of the tenant who owns the Monitor. Only administrative users + // can specify a tenant UUID other than their own. + TenantID string `json:"tenant_id,omitempty"` + + // The Name of the Monitor. + Name string `json:"name,omitempty"` + + // The administrative state of the Monitor. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToMonitorCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToMonitorCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "healthmonitor") + if err != nil { + return nil, err + } + + switch opts.Type { + case TypeHTTP, TypeHTTPS: + switch opts.URLPath { + case "": + return nil, fmt.Errorf("URLPath must be provided for HTTP and HTTPS") + } + switch opts.ExpectedCodes { + case "": + return nil, fmt.Errorf("ExpectedCodes must be provided for HTTP and HTTPS") + } + } + + return b, nil +} + +/* + Create is an operation which provisions a new Health Monitor. There are + different types of Monitor you can provision: PING, TCP or HTTP(S). Below + are examples of how to create each one. + + Here is an example config struct to use when creating a PING or TCP Monitor: + + CreateOpts{Type: TypePING, Delay: 20, Timeout: 10, MaxRetries: 3} + CreateOpts{Type: TypeTCP, Delay: 20, Timeout: 10, MaxRetries: 3} + + Here is an example config struct to use when creating a HTTP(S) Monitor: + + CreateOpts{Type: TypeHTTP, Delay: 20, Timeout: 10, MaxRetries: 3, + HttpMethod: "HEAD", ExpectedCodes: "200", PoolID: "2c946bfc-1804-43ab-a2ff-58f6a762b505"} +*/ +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToMonitorCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular Health Monitor based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToMonitorUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts is the common options struct used in this package's Update +// operation. +type UpdateOpts struct { + // The time, in seconds, between sending probes to members. + Delay int `json:"delay,omitempty"` + + // Maximum number of seconds for a Monitor to wait for a ping reply + // before it times out. The value must be less than the delay value. + Timeout int `json:"timeout,omitempty"` + + // Number of permissible ping failures before changing the member's + // status to INACTIVE. Must be a number between 1 and 10. + MaxRetries int `json:"max_retries,omitempty"` + + // URI path that will be accessed if Monitor type is HTTP or HTTPS. + // Required for HTTP(S) types. + URLPath string `json:"url_path,omitempty"` + + // The HTTP method used for requests by the Monitor. If this attribute + // is not specified, it defaults to "GET". Required for HTTP(S) types. + HTTPMethod string `json:"http_method,omitempty"` + + // Expected HTTP codes for a passing HTTP(S) Monitor. You can either specify + // a single status like "200", or a range like "200-202". Required for HTTP(S) + // types. + ExpectedCodes string `json:"expected_codes,omitempty"` + + // The Name of the Monitor. + Name string `json:"name,omitempty"` + + // The administrative state of the Monitor. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToMonitorUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToMonitorUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "healthmonitor") +} + +// Update is an operation which modifies the attributes of the specified +// Monitor. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToMonitorUpdateMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete will permanently delete a particular Monitor based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/results.go new file mode 100644 index 00000000000..ea832cc5d07 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/results.go @@ -0,0 +1,149 @@ +package monitors + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type PoolID struct { + ID string `json:"id"` +} + +// Monitor represents a load balancer health monitor. A health monitor is used +// to determine whether or not back-end members of the VIP's pool are usable +// for processing a request. A pool can have several health monitors associated +// with it. There are different types of health monitors supported: +// +// PING: used to ping the members using ICMP. +// TCP: used to connect to the members using TCP. +// HTTP: used to send an HTTP request to the member. +// HTTPS: used to send a secure HTTP request to the member. +// +// When a pool has several monitors associated with it, each member of the pool +// is monitored by all these monitors. If any monitor declares the member as +// unhealthy, then the member status is changed to INACTIVE and the member +// won't participate in its pool's load balancing. In other words, ALL monitors +// must declare the member to be healthy for it to stay ACTIVE. +type Monitor struct { + // The unique ID for the Monitor. + ID string `json:"id"` + + // The Name of the Monitor. + Name string `json:"name"` + + // TenantID is the owner of the Monitor. + TenantID string `json:"tenant_id"` + + // The type of probe sent by the load balancer to verify the member state, + // which is PING, TCP, HTTP, or HTTPS. + Type string `json:"type"` + + // The time, in seconds, between sending probes to members. + Delay int `json:"delay"` + + // The maximum number of seconds for a monitor to wait for a connection to be + // established before it times out. This value must be less than the delay + // value. + Timeout int `json:"timeout"` + + // Number of allowed connection failures before changing the status of the + // member to INACTIVE. A valid value is from 1 to 10. + MaxRetries int `json:"max_retries"` + + // The HTTP method that the monitor uses for requests. + HTTPMethod string `json:"http_method"` + + // The HTTP path of the request sent by the monitor to test the health of a + // member. Must be a string beginning with a forward slash (/). + URLPath string `json:"url_path" ` + + // Expected HTTP codes for a passing HTTP(S) monitor. + ExpectedCodes string `json:"expected_codes"` + + // The administrative state of the health monitor, which is up (true) or + // down (false). + AdminStateUp bool `json:"admin_state_up"` + + // The status of the health monitor. Indicates whether the health monitor is + // operational. + Status string `json:"status"` + + // List of pools that are associated with the health monitor. + Pools []PoolID `json:"pools"` +} + +// MonitorPage is the page returned by a pager when traversing over a +// collection of health monitors. +type MonitorPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of monitors has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r MonitorPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"healthmonitors_links"` + } + + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a MonitorPage struct is empty. +func (r MonitorPage) IsEmpty() (bool, error) { + is, err := ExtractMonitors(r) + return len(is) == 0, err +} + +// ExtractMonitors accepts a Page struct, specifically a MonitorPage struct, +// and extracts the elements into a slice of Monitor structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractMonitors(r pagination.Page) ([]Monitor, error) { + var s struct { + Monitors []Monitor `json:"healthmonitors"` + } + err := (r.(MonitorPage)).ExtractInto(&s) + return s.Monitors, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a monitor. +func (r commonResult) Extract() (*Monitor, error) { + var s struct { + Monitor *Monitor `json:"healthmonitor"` + } + err := r.ExtractInto(&s) + return s.Monitor, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Monitor. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Monitor. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Monitor. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the result succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/urls.go new file mode 100644 index 00000000000..a222e52a93d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/urls.go @@ -0,0 +1,16 @@ +package monitors + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lbaas" + resourcePath = "healthmonitors" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/doc.go new file mode 100644 index 00000000000..2d57ed43938 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/doc.go @@ -0,0 +1,124 @@ +/* +Package pools provides information and interaction with Pools and +Members of the LBaaS v2 extension for the OpenStack Networking service. + +Example to List Pools + + listOpts := pools.ListOpts{ + LoadbalancerID: "c79a4468-d788-410c-bf79-9a8ef6354852", + } + + allPages, err := pools.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allPools, err := pools.ExtractMonitors(allPages) + if err != nil { + panic(err) + } + + for _, pools := range allPools { + fmt.Printf("%+v\n", pool) + } + +Example to Create a Pool + + createOpts := pools.CreateOpts{ + LBMethod: pools.LBMethodRoundRobin, + Protocol: "HTTP", + Name: "Example pool", + LoadbalancerID: "79e05663-7f03-45d2-a092-8b94062f22ab", + } + + pool, err := pools.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Pool + + poolID := "d67d56a6-4a86-4688-a282-f46444705c64" + + updateOpts := pools.UpdateOpts{ + Name: "new-name", + } + + pool, err := pools.Update(networkClient, poolID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Pool + + poolID := "d67d56a6-4a86-4688-a282-f46444705c64" + err := pools.Delete(networkClient, poolID).ExtractErr() + if err != nil { + panic(err) + } + +Example to List Pool Members + + poolID := "d67d56a6-4a86-4688-a282-f46444705c64" + + listOpts := pools.ListMemberOpts{ + ProtocolPort: 80, + } + + allPages, err := pools.ListMembers(networkClient, poolID, listOpts).AllPages() + if err != nil { + panic(err) + } + + allMembers, err := pools.ExtractMembers(allPages) + if err != nil { + panic(err) + } + + for _, member := allMembers { + fmt.Printf("%+v\n", member) + } + +Example to Create a Member + + poolID := "d67d56a6-4a86-4688-a282-f46444705c64" + + createOpts := pools.CreateMemberOpts{ + Name: "db", + SubnetID: "1981f108-3c48-48d2-b908-30f7d28532c9", + Address: "10.0.2.11", + ProtocolPort: 80, + Weight: 10, + } + + member, err := pools.CreateMember(networkClient, poolID, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Member + + poolID := "d67d56a6-4a86-4688-a282-f46444705c64" + memberID := "64dba99f-8af8-4200-8882-e32a0660f23e" + + updateOpts := pools.UpdateMemberOpts{ + Name: "new-name", + Weight: 4, + } + + member, err := pools.UpdateMember(networkClient, poolID, memberID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Member + + poolID := "d67d56a6-4a86-4688-a282-f46444705c64" + memberID := "64dba99f-8af8-4200-8882-e32a0660f23e" + + err := pools.DeleteMember(networkClient, poolID, memberID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package pools diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/requests.go new file mode 100644 index 00000000000..2173ee81711 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/requests.go @@ -0,0 +1,347 @@ +package pools + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToPoolListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the Pool attributes you want to see returned. SortKey allows you to +// sort by a particular Pool attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + LBMethod string `q:"lb_algorithm"` + Protocol string `q:"protocol"` + TenantID string `q:"tenant_id"` + AdminStateUp *bool `q:"admin_state_up"` + Name string `q:"name"` + ID string `q:"id"` + LoadbalancerID string `q:"loadbalancer_id"` + ListenerID string `q:"listener_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToPoolListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToPoolListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// pools. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those pools that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToPoolListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return PoolPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +type LBMethod string +type Protocol string + +// Supported attributes for create/update operations. +const ( + LBMethodRoundRobin LBMethod = "ROUND_ROBIN" + LBMethodLeastConnections LBMethod = "LEAST_CONNECTIONS" + LBMethodSourceIp LBMethod = "SOURCE_IP" + + ProtocolTCP Protocol = "TCP" + ProtocolHTTP Protocol = "HTTP" + ProtocolHTTPS Protocol = "HTTPS" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToPoolCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts struct { + // The algorithm used to distribute load between the members of the pool. The + // current specification supports LBMethodRoundRobin, LBMethodLeastConnections + // and LBMethodSourceIp as valid values for this attribute. + LBMethod LBMethod `json:"lb_algorithm" required:"true"` + + // The protocol used by the pool members, you can use either + // ProtocolTCP, ProtocolHTTP, or ProtocolHTTPS. + Protocol Protocol `json:"protocol" required:"true"` + + // The Loadbalancer on which the members of the pool will be associated with. + // Note: one of LoadbalancerID or ListenerID must be provided. + LoadbalancerID string `json:"loadbalancer_id,omitempty" xor:"ListenerID"` + + // The Listener on which the members of the pool will be associated with. + // Note: one of LoadbalancerID or ListenerID must be provided. + ListenerID string `json:"listener_id,omitempty" xor:"LoadbalancerID"` + + // The UUID of the tenant who owns the Pool. Only administrative users + // can specify a tenant UUID other than their own. + TenantID string `json:"tenant_id,omitempty"` + + // Name of the pool. + Name string `json:"name,omitempty"` + + // Human-readable description for the pool. + Description string `json:"description,omitempty"` + + // Persistence is the session persistence of the pool. + // Omit this field to prevent session persistence. + Persistence *SessionPersistence `json:"session_persistence,omitempty"` + + // The administrative state of the Pool. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToPoolCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToPoolCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "pool") +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// load balancer pool. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToPoolCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular pool based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToPoolUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts is the common options struct used in this package's Update +// operation. +type UpdateOpts struct { + // Name of the pool. + Name string `json:"name,omitempty"` + + // Human-readable description for the pool. + Description string `json:"description,omitempty"` + + // The algorithm used to distribute load between the members of the pool. The + // current specification supports LBMethodRoundRobin, LBMethodLeastConnections + // and LBMethodSourceIp as valid values for this attribute. + LBMethod LBMethod `json:"lb_algorithm,omitempty"` + + // The administrative state of the Pool. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToPoolUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToPoolUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "pool") +} + +// Update allows pools to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToPoolUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will permanently delete a particular pool based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// ListMemberOptsBuilder allows extensions to add additional parameters to the +// ListMembers request. +type ListMembersOptsBuilder interface { + ToMembersListQuery() (string, error) +} + +// ListMembersOpts allows the filtering and sorting of paginated collections +// through the API. Filtering is achieved by passing in struct field values +// that map to the Member attributes you want to see returned. SortKey allows +// you to sort by a particular Member attribute. SortDir sets the direction, +// and is either `asc' or `desc'. Marker and Limit are used for pagination. +type ListMembersOpts struct { + Name string `q:"name"` + Weight int `q:"weight"` + AdminStateUp *bool `q:"admin_state_up"` + TenantID string `q:"tenant_id"` + Address string `q:"address"` + ProtocolPort int `q:"protocol_port"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToMemberListQuery formats a ListOpts into a query string. +func (opts ListMembersOpts) ToMembersListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListMembers returns a Pager which allows you to iterate over a collection of +// members. It accepts a ListMembersOptsBuilder, which allows you to filter and +// sort the returned collection for greater efficiency. +// +// Default policy settings return only those members that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func ListMembers(c *gophercloud.ServiceClient, poolID string, opts ListMembersOptsBuilder) pagination.Pager { + url := memberRootURL(c, poolID) + if opts != nil { + query, err := opts.ToMembersListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return MemberPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateMemberOptsBuilder allows extensions to add additional parameters to the +// CreateMember request. +type CreateMemberOptsBuilder interface { + ToMemberCreateMap() (map[string]interface{}, error) +} + +// CreateMemberOpts is the common options struct used in this package's CreateMember +// operation. +type CreateMemberOpts struct { + // The IP address of the member to receive traffic from the load balancer. + Address string `json:"address" required:"true"` + + // The port on which to listen for client traffic. + ProtocolPort int `json:"protocol_port" required:"true"` + + // Name of the Member. + Name string `json:"name,omitempty"` + + // The UUID of the tenant who owns the Member. Only administrative users + // can specify a tenant UUID other than their own. + TenantID string `json:"tenant_id,omitempty"` + + // A positive integer value that indicates the relative portion of traffic + // that this member should receive from the pool. For example, a member with + // a weight of 10 receives five times as much traffic as a member with a + // weight of 2. + Weight int `json:"weight,omitempty"` + + // If you omit this parameter, LBaaS uses the vip_subnet_id parameter value + // for the subnet UUID. + SubnetID string `json:"subnet_id,omitempty"` + + // The administrative state of the Pool. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToMemberCreateMap builds a request body from CreateMemberOpts. +func (opts CreateMemberOpts) ToMemberCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "member") +} + +// CreateMember will create and associate a Member with a particular Pool. +func CreateMember(c *gophercloud.ServiceClient, poolID string, opts CreateMemberOpts) (r CreateMemberResult) { + b, err := opts.ToMemberCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(memberRootURL(c, poolID), b, &r.Body, nil) + return +} + +// GetMember retrieves a particular Pool Member based on its unique ID. +func GetMember(c *gophercloud.ServiceClient, poolID string, memberID string) (r GetMemberResult) { + _, r.Err = c.Get(memberResourceURL(c, poolID, memberID), &r.Body, nil) + return +} + +// UpdateMemberOptsBuilder allows extensions to add additional parameters to the +// List request. +type UpdateMemberOptsBuilder interface { + ToMemberUpdateMap() (map[string]interface{}, error) +} + +// UpdateMemberOpts is the common options struct used in this package's Update +// operation. +type UpdateMemberOpts struct { + // Name of the Member. + Name string `json:"name,omitempty"` + + // A positive integer value that indicates the relative portion of traffic + // that this member should receive from the pool. For example, a member with + // a weight of 10 receives five times as much traffic as a member with a + // weight of 2. + Weight int `json:"weight,omitempty"` + + // The administrative state of the Pool. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToMemberUpdateMap builds a request body from UpdateMemberOpts. +func (opts UpdateMemberOpts) ToMemberUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "member") +} + +// Update allows Member to be updated. +func UpdateMember(c *gophercloud.ServiceClient, poolID string, memberID string, opts UpdateMemberOptsBuilder) (r UpdateMemberResult) { + b, err := opts.ToMemberUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(memberResourceURL(c, poolID, memberID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return +} + +// DisassociateMember will remove and disassociate a Member from a particular +// Pool. +func DeleteMember(c *gophercloud.ServiceClient, poolID string, memberID string) (r DeleteMemberResult) { + _, r.Err = c.Delete(memberResourceURL(c, poolID, memberID), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/results.go new file mode 100644 index 00000000000..56790fff990 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/results.go @@ -0,0 +1,273 @@ +package pools + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors" + "github.com/gophercloud/gophercloud/pagination" +) + +// SessionPersistence represents the session persistence feature of the load +// balancing service. It attempts to force connections or requests in the same +// session to be processed by the same member as long as it is ative. Three +// types of persistence are supported: +// +// SOURCE_IP: With this mode, all connections originating from the same source +// IP address, will be handled by the same Member of the Pool. +// HTTP_COOKIE: With this persistence mode, the load balancing function will +// create a cookie on the first request from a client. Subsequent +// requests containing the same cookie value will be handled by +// the same Member of the Pool. +// APP_COOKIE: With this persistence mode, the load balancing function will +// rely on a cookie established by the backend application. All +// requests carrying the same cookie value will be handled by the +// same Member of the Pool. +type SessionPersistence struct { + // The type of persistence mode. + Type string `json:"type"` + + // Name of cookie if persistence mode is set appropriately. + CookieName string `json:"cookie_name,omitempty"` +} + +// LoadBalancerID represents a load balancer. +type LoadBalancerID struct { + ID string `json:"id"` +} + +// ListenerID represents a listener. +type ListenerID struct { + ID string `json:"id"` +} + +// Pool represents a logical set of devices, such as web servers, that you +// group together to receive and process traffic. The load balancing function +// chooses a Member of the Pool according to the configured load balancing +// method to handle the new requests or connections received on the VIP address. +type Pool struct { + // The load-balancer algorithm, which is round-robin, least-connections, and + // so on. This value, which must be supported, is dependent on the provider. + // Round-robin must be supported. + LBMethod string `json:"lb_algorithm"` + + // The protocol of the Pool, which is TCP, HTTP, or HTTPS. + Protocol string `json:"protocol"` + + // Description for the Pool. + Description string `json:"description"` + + // A list of listeners objects IDs. + Listeners []ListenerID `json:"listeners"` //[]map[string]interface{} + + // A list of member objects IDs. + Members []Member `json:"members"` + + // The ID of associated health monitor. + MonitorID string `json:"healthmonitor_id"` + + // The network on which the members of the Pool will be located. Only members + // that are on this network can be added to the Pool. + SubnetID string `json:"subnet_id"` + + // Owner of the Pool. + TenantID string `json:"tenant_id"` + + // The administrative state of the Pool, which is up (true) or down (false). + AdminStateUp bool `json:"admin_state_up"` + + // Pool name. Does not have to be unique. + Name string `json:"name"` + + // The unique ID for the Pool. + ID string `json:"id"` + + // A list of load balancer objects IDs. + Loadbalancers []LoadBalancerID `json:"loadbalancers"` + + // Indicates whether connections in the same session will be processed by the + // same Pool member or not. + Persistence SessionPersistence `json:"session_persistence"` + + // The load balancer provider. + Provider string `json:"provider"` + + // The Monitor associated with this Pool. + Monitor monitors.Monitor `json:"healthmonitor"` +} + +// PoolPage is the page returned by a pager when traversing over a +// collection of pools. +type PoolPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of pools has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r PoolPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"pools_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a PoolPage struct is empty. +func (r PoolPage) IsEmpty() (bool, error) { + is, err := ExtractPools(r) + return len(is) == 0, err +} + +// ExtractPools accepts a Page struct, specifically a PoolPage struct, +// and extracts the elements into a slice of Pool structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractPools(r pagination.Page) ([]Pool, error) { + var s struct { + Pools []Pool `json:"pools"` + } + err := (r.(PoolPage)).ExtractInto(&s) + return s.Pools, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a pool. +func (r commonResult) Extract() (*Pool, error) { + var s struct { + Pool *Pool `json:"pool"` + } + err := r.ExtractInto(&s) + return s.Pool, err +} + +// CreateResult represents the result of a Create operation. Call its Extract +// method to interpret the result as a Pool. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a Get operation. Call its Extract +// method to interpret the result as a Pool. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an Update operation. Call its Extract +// method to interpret the result as a Pool. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a Delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Member represents the application running on a backend server. +type Member struct { + // Name of the Member. + Name string `json:"name"` + + // Weight of Member. + Weight int `json:"weight"` + + // The administrative state of the member, which is up (true) or down (false). + AdminStateUp bool `json:"admin_state_up"` + + // Owner of the Member. + TenantID string `json:"tenant_id"` + + // Parameter value for the subnet UUID. + SubnetID string `json:"subnet_id"` + + // The Pool to which the Member belongs. + PoolID string `json:"pool_id"` + + // The IP address of the Member. + Address string `json:"address"` + + // The port on which the application is hosted. + ProtocolPort int `json:"protocol_port"` + + // The unique ID for the Member. + ID string `json:"id"` +} + +// MemberPage is the page returned by a pager when traversing over a +// collection of Members in a Pool. +type MemberPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of members has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r MemberPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"members_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a MemberPage struct is empty. +func (r MemberPage) IsEmpty() (bool, error) { + is, err := ExtractMembers(r) + return len(is) == 0, err +} + +// ExtractMembers accepts a Page struct, specifically a MemberPage struct, +// and extracts the elements into a slice of Members structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractMembers(r pagination.Page) ([]Member, error) { + var s struct { + Members []Member `json:"members"` + } + err := (r.(MemberPage)).ExtractInto(&s) + return s.Members, err +} + +type commonMemberResult struct { + gophercloud.Result +} + +// ExtractMember is a function that accepts a result and extracts a member. +func (r commonMemberResult) Extract() (*Member, error) { + var s struct { + Member *Member `json:"member"` + } + err := r.ExtractInto(&s) + return s.Member, err +} + +// CreateMemberResult represents the result of a CreateMember operation. +// Call its Extract method to interpret it as a Member. +type CreateMemberResult struct { + commonMemberResult +} + +// GetMemberResult represents the result of a GetMember operation. +// Call its Extract method to interpret it as a Member. +type GetMemberResult struct { + commonMemberResult +} + +// UpdateMemberResult represents the result of an UpdateMember operation. +// Call its Extract method to interpret it as a Member. +type UpdateMemberResult struct { + commonMemberResult +} + +// DeleteMemberResult represents the result of a DeleteMember operation. +// Call its ExtractErr method to determine if the request succeeded or failed. +type DeleteMemberResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/urls.go new file mode 100644 index 00000000000..bceca67707f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/urls.go @@ -0,0 +1,25 @@ +package pools + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lbaas" + resourcePath = "pools" + memberPath = "members" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} + +func memberRootURL(c *gophercloud.ServiceClient, poolId string) string { + return c.ServiceURL(rootPath, resourcePath, poolId, memberPath) +} + +func memberResourceURL(c *gophercloud.ServiceClient, poolID string, memeberID string) string { + return c.ServiceURL(rootPath, resourcePath, poolID, memberPath, memeberID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/doc.go new file mode 100644 index 00000000000..ddc44175a79 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/doc.go @@ -0,0 +1,73 @@ +/* +Package provider gives access to the provider Neutron plugin, allowing +network extended attributes. The provider extended attributes for networks +enable administrative users to specify how network objects map to the +underlying networking infrastructure. These extended attributes also appear +when administrative users query networks. + +For more information about extended attributes, see the NetworkExtAttrs +struct. The actual semantics of these attributes depend on the technology +back end of the particular plug-in. See the plug-in documentation and the +OpenStack Cloud Administrator Guide to understand which values should be +specific for each of these attributes when OpenStack Networking is deployed +with a particular plug-in. The examples shown in this chapter refer to the +Open vSwitch plug-in. + +The default policy settings enable only users with administrative rights to +specify these parameters in requests and to see their values in responses. By +default, the provider network extension attributes are completely hidden from +regular tenants. As a rule of thumb, if these attributes are not visible in a +GET /networks/ operation, this implies the user submitting the +request is not authorized to view or manipulate provider network attributes. + +Example to List Networks with Provider Information + + type NetworkWithProvider { + networks.Network + provider.NetworkProviderExt + } + + var allNetworks []NetworkWithProvider + + allPages, err := networks.List(networkClient, nil).AllPages() + if err != nil { + panic(err) + } + + err = networks.ExtractNetworksInto(allPages, &allNetworks) + if err != nil { + panic(err) + } + + for _, network := range allNetworks { + fmt.Printf("%+v\n", network) + } + +Example to Create a Provider Network + + segments := []provider.Segment{ + provider.Segment{ + NetworkType: "vxlan", + PhysicalNetwork: "br-ex", + SegmentationID: 615, + }, + } + + iTrue := true + networkCreateOpts := networks.CreateOpts{ + Name: "provider-network", + AdminStateUp: &iTrue, + Shared: &iTrue, + } + + createOpts : provider.CreateOptsExt{ + CreateOptsBuilder: networkCreateOpts, + Segments: segments, + } + + network, err := networks.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } +*/ +package provider diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/requests.go new file mode 100644 index 00000000000..32c27970a4c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/requests.go @@ -0,0 +1,28 @@ +package provider + +import ( + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" +) + +// CreateOptsExt adds a Segments option to the base Network CreateOpts. +type CreateOptsExt struct { + networks.CreateOptsBuilder + Segments []Segment `json:"segments,omitempty"` +} + +// ToNetworkCreateMap adds segments to the base network creation options. +func (opts CreateOptsExt) ToNetworkCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToNetworkCreateMap() + if err != nil { + return nil, err + } + + if opts.Segments == nil { + return base, nil + } + + providerMap := base["network"].(map[string]interface{}) + providerMap["segments"] = opts.Segments + + return base, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/results.go new file mode 100644 index 00000000000..9babd2ab6ed --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/results.go @@ -0,0 +1,62 @@ +package provider + +import ( + "encoding/json" + "strconv" +) + +// NetworkProviderExt represents an extended form of a Network with additional +// fields. +type NetworkProviderExt struct { + // Specifies the nature of the physical network mapped to this network + // resource. Examples are flat, vlan, or gre. + NetworkType string `json:"provider:network_type"` + + // Identifies the physical network on top of which this network object is + // being implemented. The OpenStack Networking API does not expose any + // facility for retrieving the list of available physical networks. As an + // example, in the Open vSwitch plug-in this is a symbolic name which is + // then mapped to specific bridges on each compute host through the Open + // vSwitch plug-in configuration file. + PhysicalNetwork string `json:"provider:physical_network"` + + // Identifies an isolated segment on the physical network; the nature of the + // segment depends on the segmentation model defined by network_type. For + // instance, if network_type is vlan, then this is a vlan identifier; + // otherwise, if network_type is gre, then this will be a gre key. + SegmentationID string `json:"-"` + + // Segments is an array of Segment which defines multiple physical bindings + // to logical networks. + Segments []Segment `json:"segments"` +} + +// Segment defines a physical binding to a logical network. +type Segment struct { + PhysicalNetwork string `json:"provider:physical_network"` + NetworkType string `json:"provider:network_type"` + SegmentationID int `json:"provider:segmentation_id"` +} + +func (r *NetworkProviderExt) UnmarshalJSON(b []byte) error { + type tmp NetworkProviderExt + var networkProviderExt struct { + tmp + SegmentationID interface{} `json:"provider:segmentation_id"` + } + + if err := json.Unmarshal(b, &networkProviderExt); err != nil { + return err + } + + *r = NetworkProviderExt(networkProviderExt.tmp) + + switch t := networkProviderExt.SegmentationID.(type) { + case float64: + r.SegmentationID = strconv.FormatFloat(t, 'f', -1, 64) + case string: + r.SegmentationID = string(t) + } + + return nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/doc.go new file mode 100644 index 00000000000..7d8bbcaacba --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/doc.go @@ -0,0 +1,58 @@ +/* +Package groups provides information and interaction with Security Groups +for the OpenStack Networking service. + +Example to List Security Groups + + listOpts := groups.ListOpts{ + TenantID: "966b3c7d36a24facaf20b7e458bf2192", + } + + allPages, err := groups.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allGroups, err := groups.ExtractGroups(allPages) + if err != nil { + panic(err) + } + + for _, group := range allGroups { + fmt.Printf("%+v\n", group) + } + +Example to Create a Security Group + + createOpts := groups.CreateOpts{ + Name: "group_name", + Description: "A Security Group", + } + + group, err := groups.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Security Group + + groupID := "37d94f8a-d136-465c-ae46-144f0d8ef141" + + updateOpts := groups.UpdateOpts{ + Name: "new_name", + } + + group, err := groups.Update(networkClient, groupID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Security Group + + groupID := "37d94f8a-d136-465c-ae46-144f0d8ef141" + err := groups.Delete(networkClient, groupID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package groups diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go new file mode 100644 index 00000000000..0a7ef79cf66 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go @@ -0,0 +1,151 @@ +package groups + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the group attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + Name string `q:"name"` + TenantID string `q:"tenant_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// security groups. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return SecGroupPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToSecGroupCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new security group. +type CreateOpts struct { + // Human-readable name for the Security Group. Does not have to be unique. + Name string `json:"name" required:"true"` + + // The UUID of the tenant who owns the Group. Only administrative users + // can specify a tenant UUID other than their own. + TenantID string `json:"tenant_id,omitempty"` + + // Describes the security group. + Description string `json:"description,omitempty"` +} + +// ToSecGroupCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToSecGroupCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "security_group") +} + +// Create is an operation which provisions a new security group with default +// security group rules for the IPv4 and IPv6 ether types. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToSecGroupCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToSecGroupUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains all the values needed to update an existing security +// group. +type UpdateOpts struct { + // Human-readable name for the Security Group. Does not have to be unique. + Name string `json:"name,omitempty"` + + // Describes the security group. + Description string `json:"description,omitempty"` +} + +// ToSecGroupUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToSecGroupUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "security_group") +} + +// Update is an operation which updates an existing security group. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToSecGroupUpdateMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Get retrieves a particular security group based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// Delete will permanently delete a particular security group based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// IDFromName is a convenience function that returns a security group's ID, +// given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + pages, err := List(client, ListOpts{}).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractGroups(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "security group"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "security group"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/results.go new file mode 100644 index 00000000000..8a8e0ffcfdd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/results.go @@ -0,0 +1,102 @@ +package groups + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules" + "github.com/gophercloud/gophercloud/pagination" +) + +// SecGroup represents a container for security group rules. +type SecGroup struct { + // The UUID for the security group. + ID string + + // Human-readable name for the security group. Might not be unique. + // Cannot be named "default" as that is automatically created for a tenant. + Name string + + // The security group description. + Description string + + // A slice of security group rules that dictate the permitted behaviour for + // traffic entering and leaving the group. + Rules []rules.SecGroupRule `json:"security_group_rules"` + + // Owner of the security group. + TenantID string `json:"tenant_id"` +} + +// SecGroupPage is the page returned by a pager when traversing over a +// collection of security groups. +type SecGroupPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of security groups has +// reached the end of a page and the pager seeks to traverse over a new one. In +// order to do this, it needs to construct the next page's URL. +func (r SecGroupPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"security_groups_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a SecGroupPage struct is empty. +func (r SecGroupPage) IsEmpty() (bool, error) { + is, err := ExtractGroups(r) + return len(is) == 0, err +} + +// ExtractGroups accepts a Page struct, specifically a SecGroupPage struct, +// and extracts the elements into a slice of SecGroup structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractGroups(r pagination.Page) ([]SecGroup, error) { + var s struct { + SecGroups []SecGroup `json:"security_groups"` + } + err := (r.(SecGroupPage)).ExtractInto(&s) + return s.SecGroups, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a security group. +func (r commonResult) Extract() (*SecGroup, error) { + var s struct { + SecGroup *SecGroup `json:"security_group"` + } + err := r.ExtractInto(&s) + return s.SecGroup, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a SecGroup. +type CreateResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a SecGroup. +type UpdateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a SecGroup. +type GetResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/urls.go new file mode 100644 index 00000000000..104cbcc558d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/urls.go @@ -0,0 +1,13 @@ +package groups + +import "github.com/gophercloud/gophercloud" + +const rootPath = "security-groups" + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/doc.go new file mode 100644 index 00000000000..bf66dc8b40e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/doc.go @@ -0,0 +1,50 @@ +/* +Package rules provides information and interaction with Security Group Rules +for the OpenStack Networking service. + +Example to List Security Groups Rules + + listOpts := rules.ListOpts{ + Protocol: "tcp", + } + + allPages, err := rules.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allRules, err := rules.ExtractRules(allPages) + if err != nil { + panic(err) + } + + for _, rule := range allRules { + fmt.Printf("%+v\n", rule) + } + +Example to Create a Security Group Rule + + createOpts := rules.CreateOpts{ + Direction: "ingress", + PortRangeMin: 80, + EtherType: rules.EtherType4, + PortRangeMax: 80, + Protocol: "tcp", + RemoteGroupID: "85cc3048-abc3-43cc-89b3-377341426ac5", + SecGroupID: "a7734e61-b545-452d-a3cd-0189cbd9747a", + } + + rule, err := rules.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Security Group Rule + + ruleID := "37d94f8a-d136-465c-ae46-144f0d8ef141" + err := rules.Delete(networkClient, ruleID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package rules diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go new file mode 100644 index 00000000000..197710fc4c2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go @@ -0,0 +1,154 @@ +package rules + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the security group rule attributes you want to see returned. SortKey allows +// you to sort by a particular network attribute. SortDir sets the direction, +// and is either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Direction string `q:"direction"` + EtherType string `q:"ethertype"` + ID string `q:"id"` + PortRangeMax int `q:"port_range_max"` + PortRangeMin int `q:"port_range_min"` + Protocol string `q:"protocol"` + RemoteGroupID string `q:"remote_group_id"` + RemoteIPPrefix string `q:"remote_ip_prefix"` + SecGroupID string `q:"security_group_id"` + TenantID string `q:"tenant_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// security group rules. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return SecGroupRulePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +type RuleDirection string +type RuleProtocol string +type RuleEtherType string + +// Constants useful for CreateOpts +const ( + DirIngress RuleDirection = "ingress" + DirEgress RuleDirection = "egress" + EtherType4 RuleEtherType = "IPv4" + EtherType6 RuleEtherType = "IPv6" + ProtocolAH RuleProtocol = "ah" + ProtocolDCCP RuleProtocol = "dccp" + ProtocolEGP RuleProtocol = "egp" + ProtocolESP RuleProtocol = "esp" + ProtocolGRE RuleProtocol = "gre" + ProtocolICMP RuleProtocol = "icmp" + ProtocolIGMP RuleProtocol = "igmp" + ProtocolIPv6Encap RuleProtocol = "ipv6-encap" + ProtocolIPv6Frag RuleProtocol = "ipv6-frag" + ProtocolIPv6ICMP RuleProtocol = "ipv6-icmp" + ProtocolIPv6NoNxt RuleProtocol = "ipv6-nonxt" + ProtocolIPv6Opts RuleProtocol = "ipv6-opts" + ProtocolIPv6Route RuleProtocol = "ipv6-route" + ProtocolOSPF RuleProtocol = "ospf" + ProtocolPGM RuleProtocol = "pgm" + ProtocolRSVP RuleProtocol = "rsvp" + ProtocolSCTP RuleProtocol = "sctp" + ProtocolTCP RuleProtocol = "tcp" + ProtocolUDP RuleProtocol = "udp" + ProtocolUDPLite RuleProtocol = "udplite" + ProtocolVRRP RuleProtocol = "vrrp" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToSecGroupRuleCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new security group +// rule. +type CreateOpts struct { + // Must be either "ingress" or "egress": the direction in which the security + // group rule is applied. + Direction RuleDirection `json:"direction" required:"true"` + + // Must be "IPv4" or "IPv6", and addresses represented in CIDR must match the + // ingress or egress rules. + EtherType RuleEtherType `json:"ethertype" required:"true"` + + // The security group ID to associate with this security group rule. + SecGroupID string `json:"security_group_id" required:"true"` + + // The maximum port number in the range that is matched by the security group + // rule. The PortRangeMin attribute constrains the PortRangeMax attribute. If + // the protocol is ICMP, this value must be an ICMP type. + PortRangeMax int `json:"port_range_max,omitempty"` + + // The minimum port number in the range that is matched by the security group + // rule. If the protocol is TCP or UDP, this value must be less than or equal + // to the value of the PortRangeMax attribute. If the protocol is ICMP, this + // value must be an ICMP type. + PortRangeMin int `json:"port_range_min,omitempty"` + + // The protocol that is matched by the security group rule. Valid values are + // "tcp", "udp", "icmp" or an empty string. + Protocol RuleProtocol `json:"protocol,omitempty"` + + // The remote group ID to be associated with this security group rule. You can + // specify either RemoteGroupID or RemoteIPPrefix. + RemoteGroupID string `json:"remote_group_id,omitempty"` + + // The remote IP prefix to be associated with this security group rule. You can + // specify either RemoteGroupID or RemoteIPPrefix. This attribute matches the + // specified IP prefix as the source IP address of the IP packet. + RemoteIPPrefix string `json:"remote_ip_prefix,omitempty"` + + // The UUID of the tenant who owns the Rule. Only administrative users + // can specify a tenant UUID other than their own. + TenantID string `json:"tenant_id,omitempty"` +} + +// ToSecGroupRuleCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToSecGroupRuleCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "security_group_rule") +} + +// Create is an operation which adds a new security group rule and associates it +// with an existing security group (whose ID is specified in CreateOpts). +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToSecGroupRuleCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular security group rule based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// Delete will permanently delete a particular security group rule based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/results.go new file mode 100644 index 00000000000..0d8c43f8eda --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/results.go @@ -0,0 +1,121 @@ +package rules + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// SecGroupRule represents a rule to dictate the behaviour of incoming or +// outgoing traffic for a particular security group. +type SecGroupRule struct { + // The UUID for this security group rule. + ID string + + // The direction in which the security group rule is applied. The only values + // allowed are "ingress" or "egress". For a compute instance, an ingress + // security group rule is applied to incoming (ingress) traffic for that + // instance. An egress rule is applied to traffic leaving the instance. + Direction string + + // Must be IPv4 or IPv6, and addresses represented in CIDR must match the + // ingress or egress rules. + EtherType string `json:"ethertype"` + + // The security group ID to associate with this security group rule. + SecGroupID string `json:"security_group_id"` + + // The minimum port number in the range that is matched by the security group + // rule. If the protocol is TCP or UDP, this value must be less than or equal + // to the value of the PortRangeMax attribute. If the protocol is ICMP, this + // value must be an ICMP type. + PortRangeMin int `json:"port_range_min"` + + // The maximum port number in the range that is matched by the security group + // rule. The PortRangeMin attribute constrains the PortRangeMax attribute. If + // the protocol is ICMP, this value must be an ICMP type. + PortRangeMax int `json:"port_range_max"` + + // The protocol that is matched by the security group rule. Valid values are + // "tcp", "udp", "icmp" or an empty string. + Protocol string + + // The remote group ID to be associated with this security group rule. You + // can specify either RemoteGroupID or RemoteIPPrefix. + RemoteGroupID string `json:"remote_group_id"` + + // The remote IP prefix to be associated with this security group rule. You + // can specify either RemoteGroupID or RemoteIPPrefix . This attribute + // matches the specified IP prefix as the source IP address of the IP packet. + RemoteIPPrefix string `json:"remote_ip_prefix"` + + // The owner of this security group rule. + TenantID string `json:"tenant_id"` +} + +// SecGroupRulePage is the page returned by a pager when traversing over a +// collection of security group rules. +type SecGroupRulePage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of security group rules has +// reached the end of a page and the pager seeks to traverse over a new one. In +// order to do this, it needs to construct the next page's URL. +func (r SecGroupRulePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"security_group_rules_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a SecGroupRulePage struct is empty. +func (r SecGroupRulePage) IsEmpty() (bool, error) { + is, err := ExtractRules(r) + return len(is) == 0, err +} + +// ExtractRules accepts a Page struct, specifically a SecGroupRulePage struct, +// and extracts the elements into a slice of SecGroupRule structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractRules(r pagination.Page) ([]SecGroupRule, error) { + var s struct { + SecGroupRules []SecGroupRule `json:"security_group_rules"` + } + err := (r.(SecGroupRulePage)).ExtractInto(&s) + return s.SecGroupRules, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a security rule. +func (r commonResult) Extract() (*SecGroupRule, error) { + var s struct { + SecGroupRule *SecGroupRule `json:"security_group_rule"` + } + err := r.ExtractInto(&s) + return s.SecGroupRule, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a SecGroupRule. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a SecGroupRule. +type GetResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/urls.go new file mode 100644 index 00000000000..a5ede0e89b9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/urls.go @@ -0,0 +1,13 @@ +package rules + +import "github.com/gophercloud/gophercloud" + +const rootPath = "security-group-rules" + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/doc.go new file mode 100644 index 00000000000..e768b71f820 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/doc.go @@ -0,0 +1,65 @@ +/* +Package networks contains functionality for working with Neutron network +resources. A network is an isolated virtual layer-2 broadcast domain that is +typically reserved for the tenant who created it (unless you configure the +network to be shared). Tenants can create multiple networks until the +thresholds per-tenant quota is reached. + +In the v2.0 Networking API, the network is the main entity. Ports and subnets +are always associated with a network. + +Example to List Networks + + listOpts := networks.ListOpts{ + TenantID: "a99e9b4e620e4db09a2dfb6e42a01e66", + } + + allPages, err := networks.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allNetworks, err := networks.ExtractNetworks(allPages) + if err != nil { + panic(err) + } + + for _, network := range allNetworks { + fmt.Printf("%+v", network) + } + +Example to Create a Network + + iTrue := true + createOpts := networks.CreateOpts{ + Name: "network_1", + AdminStateUp: &iTrue, + } + + network, err := networks.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Network + + networkID := "484cda0e-106f-4f4b-bb3f-d413710bbe78" + + updateOpts := networks.UpdateOpts{ + Name: "new_name", + } + + network, err := networks.Update(networkClient, networkID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Network + + networkID := "484cda0e-106f-4f4b-bb3f-d413710bbe78" + err := networks.Delete(networkClient, networkID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package networks diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/requests.go new file mode 100644 index 00000000000..5b61b247192 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/requests.go @@ -0,0 +1,165 @@ +package networks + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToNetworkListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the network attributes you want to see returned. SortKey allows you to sort +// by a particular network attribute. SortDir sets the direction, and is either +// `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Status string `q:"status"` + Name string `q:"name"` + AdminStateUp *bool `q:"admin_state_up"` + TenantID string `q:"tenant_id"` + Shared *bool `q:"shared"` + ID string `q:"id"` + Marker string `q:"marker"` + Limit int `q:"limit"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToNetworkListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToNetworkListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// networks. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToNetworkListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return NetworkPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves a specific network based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(getURL(c, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToNetworkCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents options used to create a network. +type CreateOpts struct { + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Name string `json:"name,omitempty"` + Shared *bool `json:"shared,omitempty"` + TenantID string `json:"tenant_id,omitempty"` +} + +// ToNetworkCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToNetworkCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "network") +} + +// Create accepts a CreateOpts struct and creates a new network using the values +// provided. This operation does not actually require a request body, i.e. the +// CreateOpts struct argument can be empty. +// +// The tenant ID that is contained in the URI is the tenant that creates the +// network. An admin user, however, has the option of specifying another tenant +// ID in the CreateOpts struct. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToNetworkCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(createURL(c), b, &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToNetworkUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents options used to update a network. +type UpdateOpts struct { + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Name string `json:"name,omitempty"` + Shared *bool `json:"shared,omitempty"` +} + +// ToNetworkUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToNetworkUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "network") +} + +// Update accepts a UpdateOpts struct and updates an existing network using the +// values provided. For more information, see the Create function. +func Update(c *gophercloud.ServiceClient, networkID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToNetworkUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(updateURL(c, networkID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Delete accepts a unique ID and deletes the network associated with it. +func Delete(c *gophercloud.ServiceClient, networkID string) (r DeleteResult) { + _, r.Err = c.Delete(deleteURL(c, networkID), nil) + return +} + +// IDFromName is a convenience function that returns a network's ID, given +// its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + pages, err := List(client, nil).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractNetworks(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "network"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "network"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/results.go new file mode 100644 index 00000000000..ffd0259f1d2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/results.go @@ -0,0 +1,111 @@ +package networks + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a network resource. +func (r commonResult) Extract() (*Network, error) { + var s Network + err := r.ExtractInto(&s) + return &s, err +} + +func (r commonResult) ExtractInto(v interface{}) error { + return r.Result.ExtractIntoStructPtr(v, "network") +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Network. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Network. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Network. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Network represents, well, a network. +type Network struct { + // UUID for the network + ID string `json:"id"` + + // Human-readable name for the network. Might not be unique. + Name string `json:"name"` + + // The administrative state of network. If false (down), the network does not + // forward packets. + AdminStateUp bool `json:"admin_state_up"` + + // Indicates whether network is currently operational. Possible values include + // `ACTIVE', `DOWN', `BUILD', or `ERROR'. Plug-ins might define additional + // values. + Status string `json:"status"` + + // Subnets associated with this network. + Subnets []string `json:"subnets"` + + // Owner of network. + TenantID string `json:"tenant_id"` + + // Specifies whether the network resource can be accessed by any tenant. + Shared bool `json:"shared"` +} + +// NetworkPage is the page returned by a pager when traversing over a +// collection of networks. +type NetworkPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of networks has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r NetworkPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"networks_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a NetworkPage struct is empty. +func (r NetworkPage) IsEmpty() (bool, error) { + is, err := ExtractNetworks(r) + return len(is) == 0, err +} + +// ExtractNetworks accepts a Page struct, specifically a NetworkPage struct, +// and extracts the elements into a slice of Network structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractNetworks(r pagination.Page) ([]Network, error) { + var s []Network + err := ExtractNetworksInto(r, &s) + return s, err +} + +func ExtractNetworksInto(r pagination.Page, v interface{}) error { + return r.(NetworkPage).Result.ExtractIntoSlicePtr(v, "networks") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/urls.go new file mode 100644 index 00000000000..4a8fb1dc7d3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/urls.go @@ -0,0 +1,31 @@ +package networks + +import "github.com/gophercloud/gophercloud" + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("networks", id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("networks") +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func listURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func createURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/doc.go new file mode 100644 index 00000000000..cfb1774fb4b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/doc.go @@ -0,0 +1,73 @@ +/* +Package ports contains functionality for working with Neutron port resources. + +A port represents a virtual switch port on a logical network switch. Virtual +instances attach their interfaces into ports. The logical port also defines +the MAC address and the IP address(es) to be assigned to the interfaces +plugged into them. When IP addresses are associated to a port, this also +implies the port is associated with a subnet, as the IP address was taken +from the allocation pool for a specific subnet. + +Example to List Ports + + listOpts := ports.ListOpts{ + DeviceID: "b0b89efe-82f8-461d-958b-adbf80f50c7d", + } + + allPages, err := ports.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allPorts, err := ports.ExtractPorts(allPages) + if err != nil { + panic(err) + } + + for _, port := range allPorts { + fmt.Printf("%+v\n", port) + } + +Example to Create a Port + + createOtps := ports.CreateOpts{ + Name: "private-port", + AdminStateUp: &asu, + NetworkID: "a87cc70a-3e15-4acf-8205-9b711a3531b7", + FixedIPs: []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.2"}, + }, + SecurityGroups: &[]string{"foo"}, + AllowedAddressPairs: []ports.AddressPair{ + {IPAddress: "10.0.0.4", MACAddress: "fa:16:3e:c9:cb:f0"}, + }, + } + + port, err := ports.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Port + + portID := "c34bae2b-7641-49b6-bf6d-d8e473620ed8" + + updateOpts := ports.UpdateOpts{ + Name: "new_name", + SecurityGroups: &[]string{}, + } + + port, err := ports.Update(networkClient, portID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Port + + portID := "c34bae2b-7641-49b6-bf6d-d8e473620ed8" + err := ports.Delete(networkClient, portID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package ports diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/requests.go new file mode 100644 index 00000000000..fd1e9725764 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/requests.go @@ -0,0 +1,177 @@ +package ports + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToPortListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the port attributes you want to see returned. SortKey allows you to sort +// by a particular port attribute. SortDir sets the direction, and is either +// `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Status string `q:"status"` + Name string `q:"name"` + AdminStateUp *bool `q:"admin_state_up"` + NetworkID string `q:"network_id"` + TenantID string `q:"tenant_id"` + DeviceOwner string `q:"device_owner"` + MACAddress string `q:"mac_address"` + ID string `q:"id"` + DeviceID string `q:"device_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToPortListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToPortListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// ports. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those ports that are owned by the tenant +// who submits the request, unless the request is submitted by a user with +// administrative rights. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToPortListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return PortPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves a specific port based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(getURL(c, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToPortCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents the attributes used when creating a new port. +type CreateOpts struct { + NetworkID string `json:"network_id" required:"true"` + Name string `json:"name,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + MACAddress string `json:"mac_address,omitempty"` + FixedIPs interface{} `json:"fixed_ips,omitempty"` + DeviceID string `json:"device_id,omitempty"` + DeviceOwner string `json:"device_owner,omitempty"` + TenantID string `json:"tenant_id,omitempty"` + SecurityGroups *[]string `json:"security_groups,omitempty"` + AllowedAddressPairs []AddressPair `json:"allowed_address_pairs,omitempty"` +} + +// ToPortCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToPortCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "port") +} + +// Create accepts a CreateOpts struct and creates a new network using the values +// provided. You must remember to provide a NetworkID value. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToPortCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(createURL(c), b, &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToPortUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents the attributes used when updating an existing port. +type UpdateOpts struct { + Name string `json:"name,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + FixedIPs interface{} `json:"fixed_ips,omitempty"` + DeviceID string `json:"device_id,omitempty"` + DeviceOwner string `json:"device_owner,omitempty"` + SecurityGroups *[]string `json:"security_groups,omitempty"` + AllowedAddressPairs *[]AddressPair `json:"allowed_address_pairs,omitempty"` +} + +// ToPortUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToPortUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "port") +} + +// Update accepts a UpdateOpts struct and updates an existing port using the +// values provided. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToPortUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(updateURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Delete accepts a unique ID and deletes the port associated with it. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(deleteURL(c, id), nil) + return +} + +// IDFromName is a convenience function that returns a port's ID, +// given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + pages, err := List(client, nil).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractPorts(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "port"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "port"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/results.go new file mode 100644 index 00000000000..c50da6dfffd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/results.go @@ -0,0 +1,136 @@ +package ports + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a port resource. +func (r commonResult) Extract() (*Port, error) { + var s struct { + Port *Port `json:"port"` + } + err := r.ExtractInto(&s) + return s.Port, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Port. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Port. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Port. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// IP is a sub-struct that represents an individual IP. +type IP struct { + SubnetID string `json:"subnet_id"` + IPAddress string `json:"ip_address,omitempty"` +} + +// AddressPair contains the IP Address and the MAC address. +type AddressPair struct { + IPAddress string `json:"ip_address,omitempty"` + MACAddress string `json:"mac_address,omitempty"` +} + +// Port represents a Neutron port. See package documentation for a top-level +// description of what this is. +type Port struct { + // UUID for the port. + ID string `json:"id"` + + // Network that this port is associated with. + NetworkID string `json:"network_id"` + + // Human-readable name for the port. Might not be unique. + Name string `json:"name"` + + // Administrative state of port. If false (down), port does not forward + // packets. + AdminStateUp bool `json:"admin_state_up"` + + // Indicates whether network is currently operational. Possible values include + // `ACTIVE', `DOWN', `BUILD', or `ERROR'. Plug-ins might define additional + // values. + Status string `json:"status"` + + // Mac address to use on this port. + MACAddress string `json:"mac_address"` + + // Specifies IP addresses for the port thus associating the port itself with + // the subnets where the IP addresses are picked from + FixedIPs []IP `json:"fixed_ips"` + + // Owner of network. + TenantID string `json:"tenant_id"` + + // Identifies the entity (e.g.: dhcp agent) using this port. + DeviceOwner string `json:"device_owner"` + + // Specifies the IDs of any security groups associated with a port. + SecurityGroups []string `json:"security_groups"` + + // Identifies the device (e.g., virtual server) using this port. + DeviceID string `json:"device_id"` + + // Identifies the list of IP addresses the port will recognize/accept + AllowedAddressPairs []AddressPair `json:"allowed_address_pairs"` +} + +// PortPage is the page returned by a pager when traversing over a collection +// of network ports. +type PortPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of ports has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r PortPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"ports_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a PortPage struct is empty. +func (r PortPage) IsEmpty() (bool, error) { + is, err := ExtractPorts(r) + return len(is) == 0, err +} + +// ExtractPorts accepts a Page struct, specifically a PortPage struct, +// and extracts the elements into a slice of Port structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractPorts(r pagination.Page) ([]Port, error) { + var s struct { + Ports []Port `json:"ports"` + } + err := (r.(PortPage)).ExtractInto(&s) + return s.Ports, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/urls.go new file mode 100644 index 00000000000..600d6f2fd95 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/urls.go @@ -0,0 +1,31 @@ +package ports + +import "github.com/gophercloud/gophercloud" + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("ports", id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("ports") +} + +func listURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func createURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/doc.go new file mode 100644 index 00000000000..d0ed8dff06a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/doc.go @@ -0,0 +1,133 @@ +/* +Package subnets contains functionality for working with Neutron subnet +resources. A subnet represents an IP address block that can be used to +assign IP addresses to virtual instances. Each subnet must have a CIDR and +must be associated with a network. IPs can either be selected from the whole +subnet CIDR or from allocation pools specified by the user. + +A subnet can also have a gateway, a list of DNS name servers, and host routes. +This information is pushed to instances whose interfaces are associated with +the subnet. + +Example to List Subnets + + listOpts := subnets.ListOpts{ + IPVersion: 4, + } + + allPages, err := subnets.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allSubnets, err := subnets.ExtractSubnets(allPages) + if err != nil { + panic(err) + } + + for _, subnet := range allSubnets { + fmt.Printf("%+v\n", subnet) + } + +Example to Create a Subnet With Specified Gateway + + var gatewayIP = "192.168.199.1" + createOpts := subnets.CreateOpts{ + NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a22", + IPVersion: 4, + CIDR: "192.168.199.0/24", + GatewayIP: &gatewayIP, + AllocationPools: []subnets.AllocationPool{ + { + Start: "192.168.199.2", + End: "192.168.199.254", + }, + }, + DNSNameservers: []string{"foo"}, + } + + subnet, err := subnets.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Create a Subnet With No Gateway + + var noGateway = "" + + createOpts := subnets.CreateOpts{ + NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a23", + IPVersion: 4, + CIDR: "192.168.1.0/24", + GatewayIP: &noGateway, + AllocationPools: []subnets.AllocationPool{ + { + Start: "192.168.1.2", + End: "192.168.1.254", + }, + }, + DNSNameservers: []string{}, + } + + subnet, err := subnets.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Create a Subnet With a Default Gateway + + createOpts := subnets.CreateOpts{ + NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a23", + IPVersion: 4, + CIDR: "192.168.1.0/24", + AllocationPools: []subnets.AllocationPool{ + { + Start: "192.168.1.2", + End: "192.168.1.254", + }, + }, + DNSNameservers: []string{}, + } + + subnet, err := subnets.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Subnet + + subnetID := "db77d064-e34f-4d06-b060-f21e28a61c23" + + updateOpts := subnets.UpdateOpts{ + Name: "new_name", + DNSNameservers: []string{"8.8.8.8}, + } + + subnet, err := subnets.Update(networkClient, subnetID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Remove a Gateway From a Subnet + + var noGateway = "" + subnetID := "db77d064-e34f-4d06-b060-f21e28a61c23" + + updateOpts := subnets.UpdateOpts{ + GatewayIP: &noGateway, + } + + subnet, err := subnets.Update(networkClient, subnetID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Subnet + + subnetID := "db77d064-e34f-4d06-b060-f21e28a61c23" + err := subnets.Delete(networkClient, subnetID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package subnets diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/requests.go new file mode 100644 index 00000000000..c2e74eff440 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/requests.go @@ -0,0 +1,231 @@ +package subnets + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToSubnetListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the subnet attributes you want to see returned. SortKey allows you to sort +// by a particular subnet attribute. SortDir sets the direction, and is either +// `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Name string `q:"name"` + EnableDHCP *bool `q:"enable_dhcp"` + NetworkID string `q:"network_id"` + TenantID string `q:"tenant_id"` + IPVersion int `q:"ip_version"` + GatewayIP string `q:"gateway_ip"` + CIDR string `q:"cidr"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToSubnetListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToSubnetListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// subnets. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those subnets that are owned by the tenant +// who submits the request, unless the request is submitted by a user with +// administrative rights. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToSubnetListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return SubnetPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves a specific subnet based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(getURL(c, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// List request. +type CreateOptsBuilder interface { + ToSubnetCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents the attributes used when creating a new subnet. +type CreateOpts struct { + // NetworkID is the UUID of the network the subnet will be associated with. + NetworkID string `json:"network_id" required:"true"` + + // CIDR is the address CIDR of the subnet. + CIDR string `json:"cidr" required:"true"` + + // Name is a human-readable name of the subnet. + Name string `json:"name,omitempty"` + + // The UUID of the tenant who owns the Subnet. Only administrative users + // can specify a tenant UUID other than their own. + TenantID string `json:"tenant_id,omitempty"` + + // AllocationPools are IP Address pools that will be available for DHCP. + AllocationPools []AllocationPool `json:"allocation_pools,omitempty"` + + // GatewayIP sets gateway information for the subnet. Setting to nil will + // cause a default gateway to automatically be created. Setting to an empty + // string will cause the subnet to be created with no gateway. Setting to + // an explicit address will set that address as the gateway. + GatewayIP *string `json:"gateway_ip,omitempty"` + + // IPVersion is the IP version for the subnet. + IPVersion gophercloud.IPVersion `json:"ip_version,omitempty"` + + // EnableDHCP will either enable to disable the DHCP service. + EnableDHCP *bool `json:"enable_dhcp,omitempty"` + + // DNSNameservers are the nameservers to be set via DHCP. + DNSNameservers []string `json:"dns_nameservers,omitempty"` + + // HostRoutes are any static host routes to be set via DHCP. + HostRoutes []HostRoute `json:"host_routes,omitempty"` +} + +// ToSubnetCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToSubnetCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "subnet") + if err != nil { + return nil, err + } + + if m := b["subnet"].(map[string]interface{}); m["gateway_ip"] == "" { + m["gateway_ip"] = nil + } + + return b, nil +} + +// Create accepts a CreateOpts struct and creates a new subnet using the values +// provided. You must remember to provide a valid NetworkID, CIDR and IP +// version. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToSubnetCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(createURL(c), b, &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToSubnetUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents the attributes used when updating an existing subnet. +type UpdateOpts struct { + // Name is a human-readable name of the subnet. + Name string `json:"name,omitempty"` + + // AllocationPools are IP Address pools that will be available for DHCP. + AllocationPools []AllocationPool `json:"allocation_pools,omitempty"` + + // GatewayIP sets gateway information for the subnet. Setting to nil will + // cause a default gateway to automatically be created. Setting to an empty + // string will cause the subnet to be created with no gateway. Setting to + // an explicit address will set that address as the gateway. + GatewayIP *string `json:"gateway_ip,omitempty"` + + // DNSNameservers are the nameservers to be set via DHCP. + DNSNameservers []string `json:"dns_nameservers,omitempty"` + + // HostRoutes are any static host routes to be set via DHCP. + HostRoutes []HostRoute `json:"host_routes,omitempty"` + + // EnableDHCP will either enable to disable the DHCP service. + EnableDHCP *bool `json:"enable_dhcp,omitempty"` +} + +// ToSubnetUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToSubnetUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "subnet") + if err != nil { + return nil, err + } + + if m := b["subnet"].(map[string]interface{}); m["gateway_ip"] == "" { + m["gateway_ip"] = nil + } + + return b, nil +} + +// Update accepts a UpdateOpts struct and updates an existing subnet using the +// values provided. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToSubnetUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(updateURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Delete accepts a unique ID and deletes the subnet associated with it. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(deleteURL(c, id), nil) + return +} + +// IDFromName is a convenience function that returns a subnet's ID, +// given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + pages, err := List(client, nil).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractSubnets(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "subnet"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "subnet"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/results.go new file mode 100644 index 00000000000..ade8abc699c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/results.go @@ -0,0 +1,133 @@ +package subnets + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a subnet resource. +func (r commonResult) Extract() (*Subnet, error) { + var s struct { + Subnet *Subnet `json:"subnet"` + } + err := r.ExtractInto(&s) + return s.Subnet, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Subnet. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Subnet. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Subnet. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// AllocationPool represents a sub-range of cidr available for dynamic +// allocation to ports, e.g. {Start: "10.0.0.2", End: "10.0.0.254"} +type AllocationPool struct { + Start string `json:"start"` + End string `json:"end"` +} + +// HostRoute represents a route that should be used by devices with IPs from +// a subnet (not including local subnet route). +type HostRoute struct { + DestinationCIDR string `json:"destination"` + NextHop string `json:"nexthop"` +} + +// Subnet represents a subnet. See package documentation for a top-level +// description of what this is. +type Subnet struct { + // UUID representing the subnet. + ID string `json:"id"` + + // UUID of the parent network. + NetworkID string `json:"network_id"` + + // Human-readable name for the subnet. Might not be unique. + Name string `json:"name"` + + // IP version, either `4' or `6'. + IPVersion int `json:"ip_version"` + + // CIDR representing IP range for this subnet, based on IP version. + CIDR string `json:"cidr"` + + // Default gateway used by devices in this subnet. + GatewayIP string `json:"gateway_ip"` + + // DNS name servers used by hosts in this subnet. + DNSNameservers []string `json:"dns_nameservers"` + + // Sub-ranges of CIDR available for dynamic allocation to ports. + // See AllocationPool. + AllocationPools []AllocationPool `json:"allocation_pools"` + + // Routes that should be used by devices with IPs from this subnet + // (not including local subnet route). + HostRoutes []HostRoute `json:"host_routes"` + + // Specifies whether DHCP is enabled for this subnet or not. + EnableDHCP bool `json:"enable_dhcp"` + + // Owner of network. + TenantID string `json:"tenant_id"` +} + +// SubnetPage is the page returned by a pager when traversing over a collection +// of subnets. +type SubnetPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of subnets has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r SubnetPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"subnets_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a SubnetPage struct is empty. +func (r SubnetPage) IsEmpty() (bool, error) { + is, err := ExtractSubnets(r) + return len(is) == 0, err +} + +// ExtractSubnets accepts a Page struct, specifically a SubnetPage struct, +// and extracts the elements into a slice of Subnet structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractSubnets(r pagination.Page) ([]Subnet, error) { + var s struct { + Subnets []Subnet `json:"subnets"` + } + err := (r.(SubnetPage)).ExtractInto(&s) + return s.Subnets, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/urls.go new file mode 100644 index 00000000000..7a4f2f7dd4c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/urls.go @@ -0,0 +1,31 @@ +package subnets + +import "github.com/gophercloud/gophercloud" + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("subnets", id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("subnets") +} + +func listURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func createURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/doc.go new file mode 100644 index 00000000000..0fa1c083a26 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/doc.go @@ -0,0 +1,29 @@ +/* +Package accounts contains functionality for working with Object Storage +account resources. An account is the top-level resource the object storage +hierarchy: containers belong to accounts, objects belong to containers. + +Another way of thinking of an account is like a namespace for all your +resources. It is synonymous with a project or tenant in other OpenStack +services. + +Example to Get an Account + + account, err := accounts.Get(objectStorageClient, nil).Extract() + fmt.Printf("%+v\n", account) + +Example to Update an Account + + metadata := map[string]string{ + "some": "metadata", + } + + updateOpts := accounts.UpdateOpts{ + Metadata: metadata, + } + + updateResult, err := accounts.Update(objectStorageClient, updateOpts).Extract() + fmt.Printf("%+v\n", updateResult) + +*/ +package accounts diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/requests.go new file mode 100644 index 00000000000..df215878534 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/requests.go @@ -0,0 +1,100 @@ +package accounts + +import "github.com/gophercloud/gophercloud" + +// GetOptsBuilder allows extensions to add additional headers to the Get +// request. +type GetOptsBuilder interface { + ToAccountGetMap() (map[string]string, error) +} + +// GetOpts is a structure that contains parameters for getting an account's +// metadata. +type GetOpts struct { + Newest bool `h:"X-Newest"` +} + +// ToAccountGetMap formats a GetOpts into a map[string]string of headers. +func (opts GetOpts) ToAccountGetMap() (map[string]string, error) { + return gophercloud.BuildHeaders(opts) +} + +// Get is a function that retrieves an account's metadata. To extract just the +// custom metadata, call the ExtractMetadata method on the GetResult. To extract +// all the headers that are returned (including the metadata), call the +// Extract method on the GetResult. +func Get(c *gophercloud.ServiceClient, opts GetOptsBuilder) (r GetResult) { + h := make(map[string]string) + if opts != nil { + headers, err := opts.ToAccountGetMap() + if err != nil { + r.Err = err + return + } + for k, v := range headers { + h[k] = v + } + } + resp, err := c.Request("HEAD", getURL(c), &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{204}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// UpdateOptsBuilder allows extensions to add additional headers to the Update +// request. +type UpdateOptsBuilder interface { + ToAccountUpdateMap() (map[string]string, error) +} + +// UpdateOpts is a structure that contains parameters for updating, creating, or +// deleting an account's metadata. +type UpdateOpts struct { + Metadata map[string]string + ContentType string `h:"Content-Type"` + DetectContentType bool `h:"X-Detect-Content-Type"` + TempURLKey string `h:"X-Account-Meta-Temp-URL-Key"` + TempURLKey2 string `h:"X-Account-Meta-Temp-URL-Key-2"` +} + +// ToAccountUpdateMap formats an UpdateOpts into a map[string]string of headers. +func (opts UpdateOpts) ToAccountUpdateMap() (map[string]string, error) { + headers, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + headers["X-Account-Meta-"+k] = v + } + return headers, err +} + +// Update is a function that creates, updates, or deletes an account's metadata. +// To extract the headers returned, call the Extract method on the UpdateResult. +func Update(c *gophercloud.ServiceClient, opts UpdateOptsBuilder) (r UpdateResult) { + h := make(map[string]string) + if opts != nil { + headers, err := opts.ToAccountUpdateMap() + if err != nil { + r.Err = err + return + } + for k, v := range headers { + h[k] = v + } + } + resp, err := c.Request("POST", updateURL(c), &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{201, 202, 204}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/results.go new file mode 100644 index 00000000000..bf5dc846fc3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/results.go @@ -0,0 +1,167 @@ +package accounts + +import ( + "encoding/json" + "strconv" + "strings" + "time" + + "github.com/gophercloud/gophercloud" +) + +// UpdateResult is returned from a call to the Update function. +type UpdateResult struct { + gophercloud.HeaderResult +} + +// UpdateHeader represents the headers returned in the response from an Update +// request. +type UpdateHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + TransID string `json:"X-Trans-Id"` + Date time.Time `json:"-"` +} + +func (r *UpdateHeader) UnmarshalJSON(b []byte) error { + type tmp UpdateHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = UpdateHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + + return err +} + +// Extract will return a struct of headers returned from a call to Get. To +// obtain a map of headers, call the Extract method on the GetResult. +func (r UpdateResult) Extract() (*UpdateHeader, error) { + var s *UpdateHeader + err := r.ExtractInto(&s) + return s, err +} + +// GetHeader represents the headers returned in the response from a Get request. +type GetHeader struct { + BytesUsed int64 `json:"-"` + ContainerCount int64 `json:"-"` + ContentLength int64 `json:"-"` + ObjectCount int64 `json:"-"` + ContentType string `json:"Content-Type"` + TransID string `json:"X-Trans-Id"` + TempURLKey string `json:"X-Account-Meta-Temp-URL-Key"` + TempURLKey2 string `json:"X-Account-Meta-Temp-URL-Key-2"` + Date time.Time `json:"-"` +} + +func (r *GetHeader) UnmarshalJSON(b []byte) error { + type tmp GetHeader + var s struct { + tmp + BytesUsed string `json:"X-Account-Bytes-Used"` + ContentLength string `json:"Content-Length"` + ContainerCount string `json:"X-Account-Container-Count"` + ObjectCount string `json:"X-Account-Object-Count"` + Date string `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = GetHeader(s.tmp) + + switch s.BytesUsed { + case "": + r.BytesUsed = 0 + default: + r.BytesUsed, err = strconv.ParseInt(s.BytesUsed, 10, 64) + if err != nil { + return err + } + } + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + switch s.ObjectCount { + case "": + r.ObjectCount = 0 + default: + r.ObjectCount, err = strconv.ParseInt(s.ObjectCount, 10, 64) + if err != nil { + return err + } + } + + switch s.ContainerCount { + case "": + r.ContainerCount = 0 + default: + r.ContainerCount, err = strconv.ParseInt(s.ContainerCount, 10, 64) + if err != nil { + return err + } + } + + if s.Date != "" { + r.Date, err = time.Parse(time.RFC1123, s.Date) + } + + return err +} + +// GetResult is returned from a call to the Get function. +type GetResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Get. +func (r GetResult) Extract() (*GetHeader, error) { + var s *GetHeader + err := r.ExtractInto(&s) + return s, err +} + +// ExtractMetadata is a function that takes a GetResult (of type *http.Response) +// and returns the custom metatdata associated with the account. +func (r GetResult) ExtractMetadata() (map[string]string, error) { + if r.Err != nil { + return nil, r.Err + } + + metadata := make(map[string]string) + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Account-Meta-") { + key := strings.TrimPrefix(k, "X-Account-Meta-") + metadata[key] = v[0] + } + } + return metadata, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/urls.go new file mode 100644 index 00000000000..71540b1daf3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/urls.go @@ -0,0 +1,11 @@ +package accounts + +import "github.com/gophercloud/gophercloud" + +func getURL(c *gophercloud.ServiceClient) string { + return c.Endpoint +} + +func updateURL(c *gophercloud.ServiceClient) string { + return getURL(c) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go new file mode 100644 index 00000000000..1ac8504de72 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go @@ -0,0 +1,88 @@ +/* +Package containers contains functionality for working with Object Storage +container resources. A container serves as a logical namespace for objects +that are placed inside it - an object with the same name in two different +containers represents two different objects. + +In addition to containing objects, you can also use the container to control +access to objects by using an access control list (ACL). + +Example to List Containers + + listOpts := containers.ListOpts{ + Full: true, + } + + allPages, err := containers.List(objectStorageClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allContainers, err := containers.ExtractInfo(allPages) + if err != nil { + panic(err) + } + + for _, container := range allContainers { + fmt.Printf("%+v\n", container) + } + +Example to List Only Container Names + + listOpts := containers.ListOpts{ + Full: false, + } + + allPages, err := containers.List(objectStorageClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allContainers, err := containers.ExtractNames(allPages) + if err != nil { + panic(err) + } + + for _, container := range allContainers { + fmt.Printf("%+v\n", container) + } + +Example to Create a Container + + createOpts := containers.CreateOpts{ + ContentType: "application/json", + Metadata: map[string]string{ + "foo": "bar", + }, + } + + container, err := containers.Create(objectStorageClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Container + + containerName := "my_container" + + updateOpts := containers.UpdateOpts{ + Metadata: map[string]string{ + "bar": "baz", + }, + } + + container, err := containers.Update(objectStorageClient, containerName, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Container + + containerName := "my_container" + + container, err := containers.Delete(objectStorageClient, containerName).Extract() + if err != nil { + panic(err) + } +*/ +package containers diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go new file mode 100644 index 00000000000..ecb76075b11 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go @@ -0,0 +1,191 @@ +package containers + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToContainerListParams() (bool, string, error) +} + +// ListOpts is a structure that holds options for listing containers. +type ListOpts struct { + Full bool + Limit int `q:"limit"` + Marker string `q:"marker"` + EndMarker string `q:"end_marker"` + Format string `q:"format"` + Prefix string `q:"prefix"` + Delimiter string `q:"delimiter"` +} + +// ToContainerListParams formats a ListOpts into a query string and boolean +// representing whether to list complete information for each container. +func (opts ListOpts) ToContainerListParams() (bool, string, error) { + q, err := gophercloud.BuildQueryString(opts) + return opts.Full, q.String(), err +} + +// List is a function that retrieves containers associated with the account as +// well as account metadata. It returns a pager which can be iterated with the +// EachPage function. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + headers := map[string]string{"Accept": "text/plain", "Content-Type": "text/plain"} + + url := listURL(c) + if opts != nil { + full, query, err := opts.ToContainerListParams() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + + if full { + headers = map[string]string{"Accept": "application/json", "Content-Type": "application/json"} + } + } + + pager := pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + p := ContainerPage{pagination.MarkerPageBase{PageResult: r}} + p.MarkerPageBase.Owner = p + return p + }) + pager.Headers = headers + return pager +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToContainerCreateMap() (map[string]string, error) +} + +// CreateOpts is a structure that holds parameters for creating a container. +type CreateOpts struct { + Metadata map[string]string + ContainerRead string `h:"X-Container-Read"` + ContainerSyncTo string `h:"X-Container-Sync-To"` + ContainerSyncKey string `h:"X-Container-Sync-Key"` + ContainerWrite string `h:"X-Container-Write"` + ContentType string `h:"Content-Type"` + DetectContentType bool `h:"X-Detect-Content-Type"` + IfNoneMatch string `h:"If-None-Match"` + VersionsLocation string `h:"X-Versions-Location"` +} + +// ToContainerCreateMap formats a CreateOpts into a map of headers. +func (opts CreateOpts) ToContainerCreateMap() (map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + h["X-Container-Meta-"+k] = v + } + return h, nil +} + +// Create is a function that creates a new container. +func Create(c *gophercloud.ServiceClient, containerName string, opts CreateOptsBuilder) (r CreateResult) { + h := make(map[string]string) + if opts != nil { + headers, err := opts.ToContainerCreateMap() + if err != nil { + r.Err = err + return + } + for k, v := range headers { + h[k] = v + } + } + resp, err := c.Request("PUT", createURL(c, containerName), &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{201, 202, 204}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// Delete is a function that deletes a container. +func Delete(c *gophercloud.ServiceClient, containerName string) (r DeleteResult) { + _, r.Err = c.Delete(deleteURL(c, containerName), nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToContainerUpdateMap() (map[string]string, error) +} + +// UpdateOpts is a structure that holds parameters for updating, creating, or +// deleting a container's metadata. +type UpdateOpts struct { + Metadata map[string]string + ContainerRead string `h:"X-Container-Read"` + ContainerSyncTo string `h:"X-Container-Sync-To"` + ContainerSyncKey string `h:"X-Container-Sync-Key"` + ContainerWrite string `h:"X-Container-Write"` + ContentType string `h:"Content-Type"` + DetectContentType bool `h:"X-Detect-Content-Type"` + RemoveVersionsLocation string `h:"X-Remove-Versions-Location"` + VersionsLocation string `h:"X-Versions-Location"` +} + +// ToContainerUpdateMap formats a UpdateOpts into a map of headers. +func (opts UpdateOpts) ToContainerUpdateMap() (map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + h["X-Container-Meta-"+k] = v + } + return h, nil +} + +// Update is a function that creates, updates, or deletes a container's +// metadata. +func Update(c *gophercloud.ServiceClient, containerName string, opts UpdateOptsBuilder) (r UpdateResult) { + h := make(map[string]string) + if opts != nil { + headers, err := opts.ToContainerUpdateMap() + if err != nil { + r.Err = err + return + } + + for k, v := range headers { + h[k] = v + } + } + resp, err := c.Request("POST", updateURL(c, containerName), &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{201, 202, 204}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// Get is a function that retrieves the metadata of a container. To extract just +// the custom metadata, pass the GetResult response to the ExtractMetadata +// function. +func Get(c *gophercloud.ServiceClient, containerName string) (r GetResult) { + resp, err := c.Request("HEAD", getURL(c, containerName), &gophercloud.RequestOpts{ + OkCodes: []int{200, 204}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/results.go new file mode 100644 index 00000000000..87682c885b3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/results.go @@ -0,0 +1,342 @@ +package containers + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Container represents a container resource. +type Container struct { + // The total number of bytes stored in the container. + Bytes int64 `json:"bytes"` + + // The total number of objects stored in the container. + Count int64 `json:"count"` + + // The name of the container. + Name string `json:"name"` +} + +// ContainerPage is the page returned by a pager when traversing over a +// collection of containers. +type ContainerPage struct { + pagination.MarkerPageBase +} + +//IsEmpty returns true if a ListResult contains no container names. +func (r ContainerPage) IsEmpty() (bool, error) { + names, err := ExtractNames(r) + return len(names) == 0, err +} + +// LastMarker returns the last container name in a ListResult. +func (r ContainerPage) LastMarker() (string, error) { + names, err := ExtractNames(r) + if err != nil { + return "", err + } + if len(names) == 0 { + return "", nil + } + return names[len(names)-1], nil +} + +// ExtractInfo is a function that takes a ListResult and returns the +// containers' information. +func ExtractInfo(r pagination.Page) ([]Container, error) { + var s []Container + err := (r.(ContainerPage)).ExtractInto(&s) + return s, err +} + +// ExtractNames is a function that takes a ListResult and returns the +// containers' names. +func ExtractNames(page pagination.Page) ([]string, error) { + casted := page.(ContainerPage) + ct := casted.Header.Get("Content-Type") + + switch { + case strings.HasPrefix(ct, "application/json"): + parsed, err := ExtractInfo(page) + if err != nil { + return nil, err + } + + names := make([]string, 0, len(parsed)) + for _, container := range parsed { + names = append(names, container.Name) + } + return names, nil + case strings.HasPrefix(ct, "text/plain"): + names := make([]string, 0, 50) + + body := string(page.(ContainerPage).Body.([]uint8)) + for _, name := range strings.Split(body, "\n") { + if len(name) > 0 { + names = append(names, name) + } + } + + return names, nil + default: + return nil, fmt.Errorf("Cannot extract names from response with content-type: [%s]", ct) + } +} + +// GetHeader represents the headers returned in the response from a Get request. +type GetHeader struct { + AcceptRanges string `json:"Accept-Ranges"` + BytesUsed int64 `json:"-"` + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + ObjectCount int64 `json:"-"` + Read []string `json:"-"` + TransID string `json:"X-Trans-Id"` + VersionsLocation string `json:"X-Versions-Location"` + Write []string `json:"-"` +} + +func (r *GetHeader) UnmarshalJSON(b []byte) error { + type tmp GetHeader + var s struct { + tmp + BytesUsed string `json:"X-Container-Bytes-Used"` + ContentLength string `json:"Content-Length"` + ObjectCount string `json:"X-Container-Object-Count"` + Write string `json:"X-Container-Write"` + Read string `json:"X-Container-Read"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = GetHeader(s.tmp) + + switch s.BytesUsed { + case "": + r.BytesUsed = 0 + default: + r.BytesUsed, err = strconv.ParseInt(s.BytesUsed, 10, 64) + if err != nil { + return err + } + } + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + switch s.ObjectCount { + case "": + r.ObjectCount = 0 + default: + r.ObjectCount, err = strconv.ParseInt(s.ObjectCount, 10, 64) + if err != nil { + return err + } + } + + r.Read = strings.Split(s.Read, ",") + r.Write = strings.Split(s.Write, ",") + + r.Date = time.Time(s.Date) + + return err +} + +// GetResult represents the result of a get operation. +type GetResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Get. +func (r GetResult) Extract() (*GetHeader, error) { + var s *GetHeader + err := r.ExtractInto(&s) + return s, err +} + +// ExtractMetadata is a function that takes a GetResult (of type *http.Response) +// and returns the custom metadata associated with the container. +func (r GetResult) ExtractMetadata() (map[string]string, error) { + if r.Err != nil { + return nil, r.Err + } + metadata := make(map[string]string) + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Container-Meta-") { + key := strings.TrimPrefix(k, "X-Container-Meta-") + metadata[key] = v[0] + } + } + return metadata, nil +} + +// CreateHeader represents the headers returned in the response from a Create +// request. +type CreateHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *CreateHeader) UnmarshalJSON(b []byte) error { + type tmp CreateHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = CreateHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + + return err +} + +// CreateResult represents the result of a create operation. To extract the +// the headers from the HTTP response, call its Extract method. +type CreateResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Create. +// To extract the headers from the HTTP response, call its Extract method. +func (r CreateResult) Extract() (*CreateHeader, error) { + var s *CreateHeader + err := r.ExtractInto(&s) + return s, err +} + +// UpdateHeader represents the headers returned in the response from a Update +// request. +type UpdateHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *UpdateHeader) UnmarshalJSON(b []byte) error { + type tmp UpdateHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = UpdateHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + + return err +} + +// UpdateResult represents the result of an update operation. To extract the +// the headers from the HTTP response, call its Extract method. +type UpdateResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Update. +func (r UpdateResult) Extract() (*UpdateHeader, error) { + var s *UpdateHeader + err := r.ExtractInto(&s) + return s, err +} + +// DeleteHeader represents the headers returned in the response from a Delete +// request. +type DeleteHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *DeleteHeader) UnmarshalJSON(b []byte) error { + type tmp DeleteHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = DeleteHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + + return err +} + +// DeleteResult represents the result of a delete operation. To extract the +// the headers from the HTTP response, call its Extract method. +type DeleteResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Delete. +func (r DeleteResult) Extract() (*DeleteHeader, error) { + var s *DeleteHeader + err := r.ExtractInto(&s) + return s, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/urls.go new file mode 100644 index 00000000000..9b380470dd7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/urls.go @@ -0,0 +1,23 @@ +package containers + +import "github.com/gophercloud/gophercloud" + +func listURL(c *gophercloud.ServiceClient) string { + return c.Endpoint +} + +func createURL(c *gophercloud.ServiceClient, container string) string { + return c.ServiceURL(container) +} + +func getURL(c *gophercloud.ServiceClient, container string) string { + return createURL(c, container) +} + +func deleteURL(c *gophercloud.ServiceClient, container string) string { + return createURL(c, container) +} + +func updateURL(c *gophercloud.ServiceClient, container string) string { + return createURL(c, container) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/doc.go new file mode 100644 index 00000000000..1e02430fb42 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/doc.go @@ -0,0 +1,102 @@ +/* +Package objects contains functionality for working with Object Storage +object resources. An object is a resource that represents and contains data +- such as documents, images, and so on. You can also store custom metadata +with an object. + +Example to List Objects + + containerName := "my_container" + + listOpts := objects.ListOpts{ + Full: true, + } + + allPages, err := objects.List(objectStorageClient, containerName, listOpts).AllPages() + if err != nil { + panic(err) + } + + allObjects, err := objects.ExtractInfo(allPages) + if err != nil { + panic(err) + } + + for _, object := range allObjects { + fmt.Printf("%+v\n", object) + } + +Example to List Object Names + + containerName := "my_container" + + listOpts := objects.ListOpts{ + Full: false, + } + + allPages, err := objects.List(objectStorageClient, containerName, listOpts).AllPages() + if err != nil { + panic(err) + } + + allObjects, err := objects.ExtractNames(allPages) + if err != nil { + panic(err) + } + + for _, object := range allObjects { + fmt.Printf("%+v\n", object) + } + +Example to Create an Object + + content := "some object content" + objectName := "my_object" + containerName := "my_container" + + createOpts := objects.CreateOpts{ + ContentType: "text/plain" + Content: strings.NewReader(content), + } + + object, err := objects.Create(objectStorageClient, containerName, objectName, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Copy an Object + + objectName := "my_object" + containerName := "my_container" + + copyOpts := objects.CopyOpts{ + Destination: "/newContainer/newObject", + } + + object, err := objects.Copy(objectStorageClient, containerName, objectName, copyOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete an Object + + objectName := "my_object" + containerName := "my_container" + + object, err := objects.Delete(objectStorageClient, containerName, objectName).Extract() + if err != nil { + panic(err) + } + +Example to Download an Object's Data + + objectName := "my_object" + containerName := "my_container" + + object := objects.Download(objectStorageClient, containerName, objectName, nil) + content, err := object.ExtractContent() + if err != nil { + panic(err) + } +*/ +package objects diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/errors.go new file mode 100644 index 00000000000..5c4ae44d317 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/errors.go @@ -0,0 +1,13 @@ +package objects + +import "github.com/gophercloud/gophercloud" + +// ErrWrongChecksum is the error when the checksum generated for an object +// doesn't match the ETAG header. +type ErrWrongChecksum struct { + gophercloud.BaseError +} + +func (e ErrWrongChecksum) Error() string { + return "Local checksum does not match API ETag header" +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/requests.go new file mode 100644 index 00000000000..f67bfd15904 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/requests.go @@ -0,0 +1,461 @@ +package objects + +import ( + "bytes" + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "fmt" + "io" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToObjectListParams() (bool, string, error) +} + +// ListOpts is a structure that holds parameters for listing objects. +type ListOpts struct { + // Full is a true/false value that represents the amount of object information + // returned. If Full is set to true, then the content-type, number of bytes, + // hash date last modified, and name are returned. If set to false or not set, + // then only the object names are returned. + Full bool + Limit int `q:"limit"` + Marker string `q:"marker"` + EndMarker string `q:"end_marker"` + Format string `q:"format"` + Prefix string `q:"prefix"` + Delimiter string `q:"delimiter"` + Path string `q:"path"` +} + +// ToObjectListParams formats a ListOpts into a query string and boolean +// representing whether to list complete information for each object. +func (opts ListOpts) ToObjectListParams() (bool, string, error) { + q, err := gophercloud.BuildQueryString(opts) + return opts.Full, q.String(), err +} + +// List is a function that retrieves all objects in a container. It also returns +// the details for the container. To extract only the object information or names, +// pass the ListResult response to the ExtractInfo or ExtractNames function, +// respectively. +func List(c *gophercloud.ServiceClient, containerName string, opts ListOptsBuilder) pagination.Pager { + headers := map[string]string{"Accept": "text/plain", "Content-Type": "text/plain"} + + url := listURL(c, containerName) + if opts != nil { + full, query, err := opts.ToObjectListParams() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + + if full { + headers = map[string]string{"Accept": "application/json", "Content-Type": "application/json"} + } + } + + pager := pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + p := ObjectPage{pagination.MarkerPageBase{PageResult: r}} + p.MarkerPageBase.Owner = p + return p + }) + pager.Headers = headers + return pager +} + +// DownloadOptsBuilder allows extensions to add additional parameters to the +// Download request. +type DownloadOptsBuilder interface { + ToObjectDownloadParams() (map[string]string, string, error) +} + +// DownloadOpts is a structure that holds parameters for downloading an object. +type DownloadOpts struct { + IfMatch string `h:"If-Match"` + IfModifiedSince time.Time `h:"If-Modified-Since"` + IfNoneMatch string `h:"If-None-Match"` + IfUnmodifiedSince time.Time `h:"If-Unmodified-Since"` + Range string `h:"Range"` + Expires string `q:"expires"` + MultipartManifest string `q:"multipart-manifest"` + Signature string `q:"signature"` +} + +// ToObjectDownloadParams formats a DownloadOpts into a query string and map of +// headers. +func (opts DownloadOpts) ToObjectDownloadParams() (map[string]string, string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return nil, "", err + } + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, q.String(), err + } + return h, q.String(), nil +} + +// Download is a function that retrieves the content and metadata for an object. +// To extract just the content, pass the DownloadResult response to the +// ExtractContent function. +func Download(c *gophercloud.ServiceClient, containerName, objectName string, opts DownloadOptsBuilder) (r DownloadResult) { + url := downloadURL(c, containerName, objectName) + h := make(map[string]string) + if opts != nil { + headers, query, err := opts.ToObjectDownloadParams() + if err != nil { + r.Err = err + return + } + for k, v := range headers { + h[k] = v + } + url += query + } + + resp, err := c.Get(url, nil, &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{200, 304}, + }) + if resp != nil { + r.Header = resp.Header + r.Body = resp.Body + } + r.Err = err + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToObjectCreateParams() (io.Reader, map[string]string, string, error) +} + +// CreateOpts is a structure that holds parameters for creating an object. +type CreateOpts struct { + Content io.Reader + Metadata map[string]string + CacheControl string `h:"Cache-Control"` + ContentDisposition string `h:"Content-Disposition"` + ContentEncoding string `h:"Content-Encoding"` + ContentLength int64 `h:"Content-Length"` + ContentType string `h:"Content-Type"` + CopyFrom string `h:"X-Copy-From"` + DeleteAfter int `h:"X-Delete-After"` + DeleteAt int `h:"X-Delete-At"` + DetectContentType string `h:"X-Detect-Content-Type"` + ETag string `h:"ETag"` + IfNoneMatch string `h:"If-None-Match"` + ObjectManifest string `h:"X-Object-Manifest"` + TransferEncoding string `h:"Transfer-Encoding"` + Expires string `q:"expires"` + MultipartManifest string `q:"multipart-manifest"` + Signature string `q:"signature"` +} + +// ToObjectCreateParams formats a CreateOpts into a query string and map of +// headers. +func (opts CreateOpts) ToObjectCreateParams() (io.Reader, map[string]string, string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return nil, nil, "", err + } + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, nil, "", err + } + + for k, v := range opts.Metadata { + h["X-Object-Meta-"+k] = v + } + + hash := md5.New() + buf := bytes.NewBuffer([]byte{}) + _, err = io.Copy(io.MultiWriter(hash, buf), opts.Content) + if err != nil { + return nil, nil, "", err + } + localChecksum := fmt.Sprintf("%x", hash.Sum(nil)) + h["ETag"] = localChecksum + + return buf, h, q.String(), nil +} + +// Create is a function that creates a new object or replaces an existing +// object. If the returned response's ETag header fails to match the local +// checksum, the failed request will automatically be retried up to a maximum +// of 3 times. +func Create(c *gophercloud.ServiceClient, containerName, objectName string, opts CreateOptsBuilder) (r CreateResult) { + url := createURL(c, containerName, objectName) + h := make(map[string]string) + var b io.Reader + if opts != nil { + tmpB, headers, query, err := opts.ToObjectCreateParams() + if err != nil { + r.Err = err + return + } + for k, v := range headers { + h[k] = v + } + url += query + b = tmpB + } + + resp, err := c.Put(url, nil, nil, &gophercloud.RequestOpts{ + RawBody: b, + MoreHeaders: h, + }) + r.Err = err + if resp != nil { + r.Header = resp.Header + } + return +} + +// CopyOptsBuilder allows extensions to add additional parameters to the +// Copy request. +type CopyOptsBuilder interface { + ToObjectCopyMap() (map[string]string, error) +} + +// CopyOpts is a structure that holds parameters for copying one object to +// another. +type CopyOpts struct { + Metadata map[string]string + ContentDisposition string `h:"Content-Disposition"` + ContentEncoding string `h:"Content-Encoding"` + ContentType string `h:"Content-Type"` + Destination string `h:"Destination" required:"true"` +} + +// ToObjectCopyMap formats a CopyOpts into a map of headers. +func (opts CopyOpts) ToObjectCopyMap() (map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + h["X-Object-Meta-"+k] = v + } + return h, nil +} + +// Copy is a function that copies one object to another. +func Copy(c *gophercloud.ServiceClient, containerName, objectName string, opts CopyOptsBuilder) (r CopyResult) { + h := make(map[string]string) + headers, err := opts.ToObjectCopyMap() + if err != nil { + r.Err = err + return + } + + for k, v := range headers { + h[k] = v + } + + url := copyURL(c, containerName, objectName) + resp, err := c.Request("COPY", url, &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{201}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// DeleteOptsBuilder allows extensions to add additional parameters to the +// Delete request. +type DeleteOptsBuilder interface { + ToObjectDeleteQuery() (string, error) +} + +// DeleteOpts is a structure that holds parameters for deleting an object. +type DeleteOpts struct { + MultipartManifest string `q:"multipart-manifest"` +} + +// ToObjectDeleteQuery formats a DeleteOpts into a query string. +func (opts DeleteOpts) ToObjectDeleteQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// Delete is a function that deletes an object. +func Delete(c *gophercloud.ServiceClient, containerName, objectName string, opts DeleteOptsBuilder) (r DeleteResult) { + url := deleteURL(c, containerName, objectName) + if opts != nil { + query, err := opts.ToObjectDeleteQuery() + if err != nil { + r.Err = err + return + } + url += query + } + resp, err := c.Delete(url, nil) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// GetOptsBuilder allows extensions to add additional parameters to the +// Get request. +type GetOptsBuilder interface { + ToObjectGetQuery() (string, error) +} + +// GetOpts is a structure that holds parameters for getting an object's +// metadata. +type GetOpts struct { + Expires string `q:"expires"` + Signature string `q:"signature"` +} + +// ToObjectGetQuery formats a GetOpts into a query string. +func (opts GetOpts) ToObjectGetQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// Get is a function that retrieves the metadata of an object. To extract just +// the custom metadata, pass the GetResult response to the ExtractMetadata +// function. +func Get(c *gophercloud.ServiceClient, containerName, objectName string, opts GetOptsBuilder) (r GetResult) { + url := getURL(c, containerName, objectName) + if opts != nil { + query, err := opts.ToObjectGetQuery() + if err != nil { + r.Err = err + return + } + url += query + } + resp, err := c.Request("HEAD", url, &gophercloud.RequestOpts{ + OkCodes: []int{200, 204}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToObjectUpdateMap() (map[string]string, error) +} + +// UpdateOpts is a structure that holds parameters for updating, creating, or +// deleting an object's metadata. +type UpdateOpts struct { + Metadata map[string]string + ContentDisposition string `h:"Content-Disposition"` + ContentEncoding string `h:"Content-Encoding"` + ContentType string `h:"Content-Type"` + DeleteAfter int `h:"X-Delete-After"` + DeleteAt int `h:"X-Delete-At"` + DetectContentType bool `h:"X-Detect-Content-Type"` +} + +// ToObjectUpdateMap formats a UpdateOpts into a map of headers. +func (opts UpdateOpts) ToObjectUpdateMap() (map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + h["X-Object-Meta-"+k] = v + } + return h, nil +} + +// Update is a function that creates, updates, or deletes an object's metadata. +func Update(c *gophercloud.ServiceClient, containerName, objectName string, opts UpdateOptsBuilder) (r UpdateResult) { + h := make(map[string]string) + if opts != nil { + headers, err := opts.ToObjectUpdateMap() + if err != nil { + r.Err = err + return + } + + for k, v := range headers { + h[k] = v + } + } + url := updateURL(c, containerName, objectName) + resp, err := c.Post(url, nil, nil, &gophercloud.RequestOpts{ + MoreHeaders: h, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// HTTPMethod represents an HTTP method string (e.g. "GET"). +type HTTPMethod string + +var ( + // GET represents an HTTP "GET" method. + GET HTTPMethod = "GET" + + // POST represents an HTTP "POST" method. + POST HTTPMethod = "POST" +) + +// CreateTempURLOpts are options for creating a temporary URL for an object. +type CreateTempURLOpts struct { + // (REQUIRED) Method is the HTTP method to allow for users of the temp URL. + // Valid values are "GET" and "POST". + Method HTTPMethod + + // (REQUIRED) TTL is the number of seconds the temp URL should be active. + TTL int + + // (Optional) Split is the string on which to split the object URL. Since only + // the object path is used in the hash, the object URL needs to be parsed. If + // empty, the default OpenStack URL split point will be used ("/v1/"). + Split string +} + +// CreateTempURL is a function for creating a temporary URL for an object. It +// allows users to have "GET" or "POST" access to a particular tenant's object +// for a limited amount of time. +func CreateTempURL(c *gophercloud.ServiceClient, containerName, objectName string, opts CreateTempURLOpts) (string, error) { + if opts.Split == "" { + opts.Split = "/v1/" + } + duration := time.Duration(opts.TTL) * time.Second + expiry := time.Now().Add(duration).Unix() + getHeader, err := accounts.Get(c, nil).Extract() + if err != nil { + return "", err + } + secretKey := []byte(getHeader.TempURLKey) + url := getURL(c, containerName, objectName) + splitPath := strings.Split(url, opts.Split) + baseURL, objectPath := splitPath[0], splitPath[1] + objectPath = opts.Split + objectPath + body := fmt.Sprintf("%s\n%d\n%s", opts.Method, expiry, objectPath) + hash := hmac.New(sha1.New, secretKey) + hash.Write([]byte(body)) + hexsum := fmt.Sprintf("%x", hash.Sum(nil)) + return fmt.Sprintf("%s%s?temp_url_sig=%s&temp_url_expires=%d", baseURL, objectPath, hexsum, expiry), nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/results.go new file mode 100644 index 00000000000..f19b8f4aa55 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/results.go @@ -0,0 +1,496 @@ +package objects + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "strconv" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Object is a structure that holds information related to a storage object. +type Object struct { + // Bytes is the total number of bytes that comprise the object. + Bytes int64 `json:"bytes"` + + // ContentType is the content type of the object. + ContentType string `json:"content_type"` + + // Hash represents the MD5 checksum value of the object's content. + Hash string `json:"hash"` + + // LastModified is the time the object was last modified, represented + // as a string. + LastModified time.Time `json:"-"` + + // Name is the unique name for the object. + Name string `json:"name"` +} + +func (r *Object) UnmarshalJSON(b []byte) error { + type tmp Object + var s *struct { + tmp + LastModified gophercloud.JSONRFC3339MilliNoZ `json:"last_modified"` + } + + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = Object(s.tmp) + + r.LastModified = time.Time(s.LastModified) + + return nil + +} + +// ObjectPage is a single page of objects that is returned from a call to the +// List function. +type ObjectPage struct { + pagination.MarkerPageBase +} + +// IsEmpty returns true if a ListResult contains no object names. +func (r ObjectPage) IsEmpty() (bool, error) { + names, err := ExtractNames(r) + return len(names) == 0, err +} + +// LastMarker returns the last object name in a ListResult. +func (r ObjectPage) LastMarker() (string, error) { + names, err := ExtractNames(r) + if err != nil { + return "", err + } + if len(names) == 0 { + return "", nil + } + return names[len(names)-1], nil +} + +// ExtractInfo is a function that takes a page of objects and returns their +// full information. +func ExtractInfo(r pagination.Page) ([]Object, error) { + var s []Object + err := (r.(ObjectPage)).ExtractInto(&s) + return s, err +} + +// ExtractNames is a function that takes a page of objects and returns only +// their names. +func ExtractNames(r pagination.Page) ([]string, error) { + casted := r.(ObjectPage) + ct := casted.Header.Get("Content-Type") + switch { + case strings.HasPrefix(ct, "application/json"): + parsed, err := ExtractInfo(r) + if err != nil { + return nil, err + } + + names := make([]string, 0, len(parsed)) + for _, object := range parsed { + names = append(names, object.Name) + } + + return names, nil + case strings.HasPrefix(ct, "text/plain"): + names := make([]string, 0, 50) + + body := string(r.(ObjectPage).Body.([]uint8)) + for _, name := range strings.Split(body, "\n") { + if len(name) > 0 { + names = append(names, name) + } + } + + return names, nil + case strings.HasPrefix(ct, "text/html"): + return []string{}, nil + default: + return nil, fmt.Errorf("Cannot extract names from response with content-type: [%s]", ct) + } +} + +// DownloadHeader represents the headers returned in the response from a +// Download request. +type DownloadHeader struct { + AcceptRanges string `json:"Accept-Ranges"` + ContentDisposition string `json:"Content-Disposition"` + ContentEncoding string `json:"Content-Encoding"` + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + DeleteAt time.Time `json:"-"` + ETag string `json:"Etag"` + LastModified time.Time `json:"-"` + ObjectManifest string `json:"X-Object-Manifest"` + StaticLargeObject bool `json:"X-Static-Large-Object"` + TransID string `json:"X-Trans-Id"` +} + +func (r *DownloadHeader) UnmarshalJSON(b []byte) error { + type tmp DownloadHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + DeleteAt gophercloud.JSONUnix `json:"X-Delete-At"` + LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = DownloadHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + r.DeleteAt = time.Time(s.DeleteAt) + r.LastModified = time.Time(s.LastModified) + + return nil +} + +// DownloadResult is a *http.Response that is returned from a call to the +// Download function. +type DownloadResult struct { + gophercloud.HeaderResult + Body io.ReadCloser +} + +// Extract will return a struct of headers returned from a call to Download. +func (r DownloadResult) Extract() (*DownloadHeader, error) { + var s *DownloadHeader + err := r.ExtractInto(&s) + return s, err +} + +// ExtractContent is a function that takes a DownloadResult's io.Reader body +// and reads all available data into a slice of bytes. Please be aware that due +// the nature of io.Reader is forward-only - meaning that it can only be read +// once and not rewound. You can recreate a reader from the output of this +// function by using bytes.NewReader(downloadBytes) +func (r *DownloadResult) ExtractContent() ([]byte, error) { + if r.Err != nil { + return nil, r.Err + } + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, err + } + r.Body.Close() + return body, nil +} + +// GetHeader represents the headers returned in the response from a Get request. +type GetHeader struct { + ContentDisposition string `json:"Content-Disposition"` + ContentEncoding string `json:"Content-Encoding"` + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + DeleteAt time.Time `json:"-"` + ETag string `json:"Etag"` + LastModified time.Time `json:"-"` + ObjectManifest string `json:"X-Object-Manifest"` + StaticLargeObject bool `json:"X-Static-Large-Object"` + TransID string `json:"X-Trans-Id"` +} + +func (r *GetHeader) UnmarshalJSON(b []byte) error { + type tmp GetHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + DeleteAt gophercloud.JSONUnix `json:"X-Delete-At"` + LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = GetHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + r.DeleteAt = time.Time(s.DeleteAt) + r.LastModified = time.Time(s.LastModified) + + return nil +} + +// GetResult is a *http.Response that is returned from a call to the Get +// function. +type GetResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Get. +func (r GetResult) Extract() (*GetHeader, error) { + var s *GetHeader + err := r.ExtractInto(&s) + return s, err +} + +// ExtractMetadata is a function that takes a GetResult (of type *http.Response) +// and returns the custom metadata associated with the object. +func (r GetResult) ExtractMetadata() (map[string]string, error) { + if r.Err != nil { + return nil, r.Err + } + metadata := make(map[string]string) + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Object-Meta-") { + key := strings.TrimPrefix(k, "X-Object-Meta-") + metadata[key] = v[0] + } + } + return metadata, nil +} + +// CreateHeader represents the headers returned in the response from a +// Create request. +type CreateHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + ETag string `json:"Etag"` + LastModified time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *CreateHeader) UnmarshalJSON(b []byte) error { + type tmp CreateHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = CreateHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + r.LastModified = time.Time(s.LastModified) + + return nil +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + checksum string + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Create. +func (r CreateResult) Extract() (*CreateHeader, error) { + //if r.Header.Get("ETag") != fmt.Sprintf("%x", localChecksum) { + // return nil, ErrWrongChecksum{} + //} + var s *CreateHeader + err := r.ExtractInto(&s) + return s, err +} + +// UpdateHeader represents the headers returned in the response from a +// Update request. +type UpdateHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *UpdateHeader) UnmarshalJSON(b []byte) error { + type tmp UpdateHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = UpdateHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + + return nil +} + +// UpdateResult represents the result of an update operation. +type UpdateResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Update. +func (r UpdateResult) Extract() (*UpdateHeader, error) { + var s *UpdateHeader + err := r.ExtractInto(&s) + return s, err +} + +// DeleteHeader represents the headers returned in the response from a +// Delete request. +type DeleteHeader struct { + ContentLength int64 `json:"Content-Length"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *DeleteHeader) UnmarshalJSON(b []byte) error { + type tmp DeleteHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = DeleteHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + + return nil +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Delete. +func (r DeleteResult) Extract() (*DeleteHeader, error) { + var s *DeleteHeader + err := r.ExtractInto(&s) + return s, err +} + +// CopyHeader represents the headers returned in the response from a +// Copy request. +type CopyHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + CopiedFrom string `json:"X-Copied-From"` + CopiedFromLastModified time.Time `json:"-"` + Date time.Time `json:"-"` + ETag string `json:"Etag"` + LastModified time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *CopyHeader) UnmarshalJSON(b []byte) error { + type tmp CopyHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + CopiedFromLastModified gophercloud.JSONRFC1123 `json:"X-Copied-From-Last-Modified"` + Date gophercloud.JSONRFC1123 `json:"Date"` + LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = CopyHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + r.CopiedFromLastModified = time.Time(s.CopiedFromLastModified) + r.LastModified = time.Time(s.LastModified) + + return nil +} + +// CopyResult represents the result of a copy operation. +type CopyResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Copy. +func (r CopyResult) Extract() (*CopyHeader, error) { + var s *CopyHeader + err := r.ExtractInto(&s) + return s, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/urls.go new file mode 100644 index 00000000000..b3ac304b742 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/urls.go @@ -0,0 +1,33 @@ +package objects + +import ( + "github.com/gophercloud/gophercloud" +) + +func listURL(c *gophercloud.ServiceClient, container string) string { + return c.ServiceURL(container) +} + +func copyURL(c *gophercloud.ServiceClient, container, object string) string { + return c.ServiceURL(container, object) +} + +func createURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} + +func getURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} + +func deleteURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} + +func downloadURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} + +func updateURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/doc.go new file mode 100644 index 00000000000..989dc4ece2b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/doc.go @@ -0,0 +1,16 @@ +/* +Package swauth implements Swift's built-in authentication. + +Example to Authenticate with swauth + + authOpts := swauth.AuthOpts{ + User: "project:user", + Key: "password", + } + + swiftClient, err := swauth.NewObjectStorageV1(providerClient, authOpts) + if err != nil { + panic(err) + } +*/ +package swauth diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/requests.go new file mode 100644 index 00000000000..29bdcbcf76f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/requests.go @@ -0,0 +1,70 @@ +package swauth + +import "github.com/gophercloud/gophercloud" + +// AuthOptsBuilder describes struct types that can be accepted by the Auth call. +type AuthOptsBuilder interface { + ToAuthOptsMap() (map[string]string, error) +} + +// AuthOpts specifies an authentication request. +type AuthOpts struct { + // User is an Swauth-based username in username:tenant format. + User string `h:"X-Auth-User" required:"true"` + + // Key is a secret/password to authenticate the User with. + Key string `h:"X-Auth-Key" required:"true"` +} + +// ToAuthOptsMap formats an AuthOpts structure into a request body. +func (opts AuthOpts) ToAuthOptsMap() (map[string]string, error) { + return gophercloud.BuildHeaders(opts) +} + +// Auth performs an authentication request for a Swauth-based user. +func Auth(c *gophercloud.ProviderClient, opts AuthOptsBuilder) (r GetAuthResult) { + h := make(map[string]string) + + if opts != nil { + headers, err := opts.ToAuthOptsMap() + if err != nil { + r.Err = err + return + } + + for k, v := range headers { + h[k] = v + } + } + + resp, err := c.Request("GET", getURL(c), &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{200}, + }) + + if resp != nil { + r.Header = resp.Header + } + + r.Err = err + + return r +} + +// NewObjectStorageV1 creates a Swauth-authenticated *gophercloud.ServiceClient +// client that can issue ObjectStorage-based API calls. +func NewObjectStorageV1(pc *gophercloud.ProviderClient, authOpts AuthOpts) (*gophercloud.ServiceClient, error) { + auth, err := Auth(pc, authOpts).Extract() + if err != nil { + return nil, err + } + + swiftClient := &gophercloud.ServiceClient{ + ProviderClient: pc, + Endpoint: gophercloud.NormalizeURL(auth.StorageURL), + } + + swiftClient.TokenID = auth.Token + + return swiftClient, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/results.go new file mode 100644 index 00000000000..f442f472550 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/results.go @@ -0,0 +1,27 @@ +package swauth + +import ( + "github.com/gophercloud/gophercloud" +) + +// GetAuthResult contains the response from the Auth request. Call its Extract +// method to interpret it as an AuthResult. +type GetAuthResult struct { + gophercloud.HeaderResult +} + +// AuthResult contains the authentication information from a Swauth +// authentication request. +type AuthResult struct { + Token string `json:"X-Auth-Token"` + StorageURL string `json:"X-Storage-Url"` + CDNURL string `json:"X-CDN-Management-Url"` +} + +// Extract is a method that attempts to interpret any Swauth authentication +// response as a AuthResult struct. +func (r GetAuthResult) Extract() (*AuthResult, error) { + var s *AuthResult + err := r.ExtractInto(&s) + return s, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/urls.go new file mode 100644 index 00000000000..a30cabd60e5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/urls.go @@ -0,0 +1,7 @@ +package swauth + +import "github.com/gophercloud/gophercloud" + +func getURL(c *gophercloud.ProviderClient) string { + return c.IdentityBase + "auth/v1.0" +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go b/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go new file mode 100644 index 00000000000..27da19f91a8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go @@ -0,0 +1,111 @@ +package utils + +import ( + "fmt" + "strings" + + "github.com/gophercloud/gophercloud" +) + +// Version is a supported API version, corresponding to a vN package within the appropriate service. +type Version struct { + ID string + Suffix string + Priority int +} + +var goodStatus = map[string]bool{ + "current": true, + "supported": true, + "stable": true, +} + +// ChooseVersion queries the base endpoint of an API to choose the most recent non-experimental alternative from a service's +// published versions. +// It returns the highest-Priority Version among the alternatives that are provided, as well as its corresponding endpoint. +func ChooseVersion(client *gophercloud.ProviderClient, recognized []*Version) (*Version, string, error) { + type linkResp struct { + Href string `json:"href"` + Rel string `json:"rel"` + } + + type valueResp struct { + ID string `json:"id"` + Status string `json:"status"` + Links []linkResp `json:"links"` + } + + type versionsResp struct { + Values []valueResp `json:"values"` + } + + type response struct { + Versions versionsResp `json:"versions"` + } + + normalize := func(endpoint string) string { + if !strings.HasSuffix(endpoint, "/") { + return endpoint + "/" + } + return endpoint + } + identityEndpoint := normalize(client.IdentityEndpoint) + + // If a full endpoint is specified, check version suffixes for a match first. + for _, v := range recognized { + if strings.HasSuffix(identityEndpoint, v.Suffix) { + return v, identityEndpoint, nil + } + } + + var resp response + _, err := client.Request("GET", client.IdentityBase, &gophercloud.RequestOpts{ + JSONResponse: &resp, + OkCodes: []int{200, 300}, + }) + + if err != nil { + return nil, "", err + } + + var highest *Version + var endpoint string + + for _, value := range resp.Versions.Values { + href := "" + for _, link := range value.Links { + if link.Rel == "self" { + href = normalize(link.Href) + } + } + + for _, version := range recognized { + if strings.Contains(value.ID, version.ID) { + // Prefer a version that exactly matches the provided endpoint. + if href == identityEndpoint { + if href == "" { + return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", value.ID, client.IdentityBase) + } + return version, href, nil + } + + // Otherwise, find the highest-priority version with a whitelisted status. + if goodStatus[strings.ToLower(value.Status)] { + if highest == nil || version.Priority > highest.Priority { + highest = version + endpoint = href + } + } + } + } + } + + if highest == nil { + return nil, "", fmt.Errorf("No supported version available from endpoint %s", client.IdentityBase) + } + if endpoint == "" { + return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", highest.ID, client.IdentityBase) + } + + return highest, endpoint, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/http.go b/vendor/github.com/gophercloud/gophercloud/pagination/http.go new file mode 100644 index 00000000000..757295c423a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/http.go @@ -0,0 +1,60 @@ +package pagination + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/gophercloud/gophercloud" +) + +// PageResult stores the HTTP response that returned the current page of results. +type PageResult struct { + gophercloud.Result + url.URL +} + +// PageResultFrom parses an HTTP response as JSON and returns a PageResult containing the +// results, interpreting it as JSON if the content type indicates. +func PageResultFrom(resp *http.Response) (PageResult, error) { + var parsedBody interface{} + + defer resp.Body.Close() + rawBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return PageResult{}, err + } + + if strings.HasPrefix(resp.Header.Get("Content-Type"), "application/json") { + err = json.Unmarshal(rawBody, &parsedBody) + if err != nil { + return PageResult{}, err + } + } else { + parsedBody = rawBody + } + + return PageResultFromParsed(resp, parsedBody), err +} + +// PageResultFromParsed constructs a PageResult from an HTTP response that has already had its +// body parsed as JSON (and closed). +func PageResultFromParsed(resp *http.Response, body interface{}) PageResult { + return PageResult{ + Result: gophercloud.Result{ + Body: body, + Header: resp.Header, + }, + URL: *resp.Request.URL, + } +} + +// Request performs an HTTP request and extracts the http.Response from the result. +func Request(client *gophercloud.ServiceClient, headers map[string]string, url string) (*http.Response, error) { + return client.Get(url, nil, &gophercloud.RequestOpts{ + MoreHeaders: headers, + OkCodes: []int{200, 204, 300}, + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/linked.go b/vendor/github.com/gophercloud/gophercloud/pagination/linked.go new file mode 100644 index 00000000000..3656fb7f8f4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/linked.go @@ -0,0 +1,92 @@ +package pagination + +import ( + "fmt" + "reflect" + + "github.com/gophercloud/gophercloud" +) + +// LinkedPageBase may be embedded to implement a page that provides navigational "Next" and "Previous" links within its result. +type LinkedPageBase struct { + PageResult + + // LinkPath lists the keys that should be traversed within a response to arrive at the "next" pointer. + // If any link along the path is missing, an empty URL will be returned. + // If any link results in an unexpected value type, an error will be returned. + // When left as "nil", []string{"links", "next"} will be used as a default. + LinkPath []string +} + +// NextPageURL extracts the pagination structure from a JSON response and returns the "next" link, if one is present. +// It assumes that the links are available in a "links" element of the top-level response object. +// If this is not the case, override NextPageURL on your result type. +func (current LinkedPageBase) NextPageURL() (string, error) { + var path []string + var key string + + if current.LinkPath == nil { + path = []string{"links", "next"} + } else { + path = current.LinkPath + } + + submap, ok := current.Body.(map[string]interface{}) + if !ok { + err := gophercloud.ErrUnexpectedType{} + err.Expected = "map[string]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) + return "", err + } + + for { + key, path = path[0], path[1:len(path)] + + value, ok := submap[key] + if !ok { + return "", nil + } + + if len(path) > 0 { + submap, ok = value.(map[string]interface{}) + if !ok { + err := gophercloud.ErrUnexpectedType{} + err.Expected = "map[string]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(value)) + return "", err + } + } else { + if value == nil { + // Actual null element. + return "", nil + } + + url, ok := value.(string) + if !ok { + err := gophercloud.ErrUnexpectedType{} + err.Expected = "string" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(value)) + return "", err + } + + return url, nil + } + } +} + +// IsEmpty satisifies the IsEmpty method of the Page interface +func (current LinkedPageBase) IsEmpty() (bool, error) { + if b, ok := current.Body.([]interface{}); ok { + return len(b) == 0, nil + } + err := gophercloud.ErrUnexpectedType{} + err.Expected = "[]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) + return true, err +} + +// GetBody returns the linked page's body. This method is needed to satisfy the +// Page interface. +func (current LinkedPageBase) GetBody() interface{} { + return current.Body +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/marker.go b/vendor/github.com/gophercloud/gophercloud/pagination/marker.go new file mode 100644 index 00000000000..52e53bae850 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/marker.go @@ -0,0 +1,58 @@ +package pagination + +import ( + "fmt" + "reflect" + + "github.com/gophercloud/gophercloud" +) + +// MarkerPage is a stricter Page interface that describes additional functionality required for use with NewMarkerPager. +// For convenience, embed the MarkedPageBase struct. +type MarkerPage interface { + Page + + // LastMarker returns the last "marker" value on this page. + LastMarker() (string, error) +} + +// MarkerPageBase is a page in a collection that's paginated by "limit" and "marker" query parameters. +type MarkerPageBase struct { + PageResult + + // Owner is a reference to the embedding struct. + Owner MarkerPage +} + +// NextPageURL generates the URL for the page of results after this one. +func (current MarkerPageBase) NextPageURL() (string, error) { + currentURL := current.URL + + mark, err := current.Owner.LastMarker() + if err != nil { + return "", err + } + + q := currentURL.Query() + q.Set("marker", mark) + currentURL.RawQuery = q.Encode() + + return currentURL.String(), nil +} + +// IsEmpty satisifies the IsEmpty method of the Page interface +func (current MarkerPageBase) IsEmpty() (bool, error) { + if b, ok := current.Body.([]interface{}); ok { + return len(b) == 0, nil + } + err := gophercloud.ErrUnexpectedType{} + err.Expected = "[]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) + return true, err +} + +// GetBody returns the linked page's body. This method is needed to satisfy the +// Page interface. +func (current MarkerPageBase) GetBody() interface{} { + return current.Body +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/pager.go b/vendor/github.com/gophercloud/gophercloud/pagination/pager.go new file mode 100644 index 00000000000..6f1609ef2e3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/pager.go @@ -0,0 +1,238 @@ +package pagination + +import ( + "errors" + "fmt" + "net/http" + "reflect" + "strings" + + "github.com/gophercloud/gophercloud" +) + +var ( + // ErrPageNotAvailable is returned from a Pager when a next or previous page is requested, but does not exist. + ErrPageNotAvailable = errors.New("The requested page does not exist.") +) + +// Page must be satisfied by the result type of any resource collection. +// It allows clients to interact with the resource uniformly, regardless of whether or not or how it's paginated. +// Generally, rather than implementing this interface directly, implementors should embed one of the concrete PageBase structs, +// instead. +// Depending on the pagination strategy of a particular resource, there may be an additional subinterface that the result type +// will need to implement. +type Page interface { + + // NextPageURL generates the URL for the page of data that follows this collection. + // Return "" if no such page exists. + NextPageURL() (string, error) + + // IsEmpty returns true if this Page has no items in it. + IsEmpty() (bool, error) + + // GetBody returns the Page Body. This is used in the `AllPages` method. + GetBody() interface{} +} + +// Pager knows how to advance through a specific resource collection, one page at a time. +type Pager struct { + client *gophercloud.ServiceClient + + initialURL string + + createPage func(r PageResult) Page + + Err error + + // Headers supplies additional HTTP headers to populate on each paged request. + Headers map[string]string +} + +// NewPager constructs a manually-configured pager. +// Supply the URL for the first page, a function that requests a specific page given a URL, and a function that counts a page. +func NewPager(client *gophercloud.ServiceClient, initialURL string, createPage func(r PageResult) Page) Pager { + return Pager{ + client: client, + initialURL: initialURL, + createPage: createPage, + } +} + +// WithPageCreator returns a new Pager that substitutes a different page creation function. This is +// useful for overriding List functions in delegation. +func (p Pager) WithPageCreator(createPage func(r PageResult) Page) Pager { + return Pager{ + client: p.client, + initialURL: p.initialURL, + createPage: createPage, + } +} + +func (p Pager) fetchNextPage(url string) (Page, error) { + resp, err := Request(p.client, p.Headers, url) + if err != nil { + return nil, err + } + + remembered, err := PageResultFrom(resp) + if err != nil { + return nil, err + } + + return p.createPage(remembered), nil +} + +// EachPage iterates over each page returned by a Pager, yielding one at a time to a handler function. +// Return "false" from the handler to prematurely stop iterating. +func (p Pager) EachPage(handler func(Page) (bool, error)) error { + if p.Err != nil { + return p.Err + } + currentURL := p.initialURL + for { + currentPage, err := p.fetchNextPage(currentURL) + if err != nil { + return err + } + + empty, err := currentPage.IsEmpty() + if err != nil { + return err + } + if empty { + return nil + } + + ok, err := handler(currentPage) + if err != nil { + return err + } + if !ok { + return nil + } + + currentURL, err = currentPage.NextPageURL() + if err != nil { + return err + } + if currentURL == "" { + return nil + } + } +} + +// AllPages returns all the pages from a `List` operation in a single page, +// allowing the user to retrieve all the pages at once. +func (p Pager) AllPages() (Page, error) { + // pagesSlice holds all the pages until they get converted into as Page Body. + var pagesSlice []interface{} + // body will contain the final concatenated Page body. + var body reflect.Value + + // Grab a test page to ascertain the page body type. + testPage, err := p.fetchNextPage(p.initialURL) + if err != nil { + return nil, err + } + // Store the page type so we can use reflection to create a new mega-page of + // that type. + pageType := reflect.TypeOf(testPage) + + // if it's a single page, just return the testPage (first page) + if _, found := pageType.FieldByName("SinglePageBase"); found { + return testPage, nil + } + + // Switch on the page body type. Recognized types are `map[string]interface{}`, + // `[]byte`, and `[]interface{}`. + switch pb := testPage.GetBody().(type) { + case map[string]interface{}: + // key is the map key for the page body if the body type is `map[string]interface{}`. + var key string + // Iterate over the pages to concatenate the bodies. + err = p.EachPage(func(page Page) (bool, error) { + b := page.GetBody().(map[string]interface{}) + for k, v := range b { + // If it's a linked page, we don't want the `links`, we want the other one. + if !strings.HasSuffix(k, "links") { + // check the field's type. we only want []interface{} (which is really []map[string]interface{}) + switch vt := v.(type) { + case []interface{}: + key = k + pagesSlice = append(pagesSlice, vt...) + } + } + } + return true, nil + }) + if err != nil { + return nil, err + } + // Set body to value of type `map[string]interface{}` + body = reflect.MakeMap(reflect.MapOf(reflect.TypeOf(key), reflect.TypeOf(pagesSlice))) + body.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(pagesSlice)) + case []byte: + // Iterate over the pages to concatenate the bodies. + err = p.EachPage(func(page Page) (bool, error) { + b := page.GetBody().([]byte) + pagesSlice = append(pagesSlice, b) + // seperate pages with a comma + pagesSlice = append(pagesSlice, []byte{10}) + return true, nil + }) + if err != nil { + return nil, err + } + if len(pagesSlice) > 0 { + // Remove the trailing comma. + pagesSlice = pagesSlice[:len(pagesSlice)-1] + } + var b []byte + // Combine the slice of slices in to a single slice. + for _, slice := range pagesSlice { + b = append(b, slice.([]byte)...) + } + // Set body to value of type `bytes`. + body = reflect.New(reflect.TypeOf(b)).Elem() + body.SetBytes(b) + case []interface{}: + // Iterate over the pages to concatenate the bodies. + err = p.EachPage(func(page Page) (bool, error) { + b := page.GetBody().([]interface{}) + pagesSlice = append(pagesSlice, b...) + return true, nil + }) + if err != nil { + return nil, err + } + // Set body to value of type `[]interface{}` + body = reflect.MakeSlice(reflect.TypeOf(pagesSlice), len(pagesSlice), len(pagesSlice)) + for i, s := range pagesSlice { + body.Index(i).Set(reflect.ValueOf(s)) + } + default: + err := gophercloud.ErrUnexpectedType{} + err.Expected = "map[string]interface{}/[]byte/[]interface{}" + err.Actual = fmt.Sprintf("%T", pb) + return nil, err + } + + // Each `Extract*` function is expecting a specific type of page coming back, + // otherwise the type assertion in those functions will fail. pageType is needed + // to create a type in this method that has the same type that the `Extract*` + // function is expecting and set the Body of that object to the concatenated + // pages. + page := reflect.New(pageType) + // Set the page body to be the concatenated pages. + page.Elem().FieldByName("Body").Set(body) + // Set any additional headers that were pass along. The `objectstorage` pacakge, + // for example, passes a Content-Type header. + h := make(http.Header) + for k, v := range p.Headers { + h.Add(k, v) + } + page.Elem().FieldByName("Header").Set(reflect.ValueOf(h)) + // Type assert the page to a Page interface so that the type assertion in the + // `Extract*` methods will work. + return page.Elem().Interface().(Page), err +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go b/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go new file mode 100644 index 00000000000..912daea3642 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go @@ -0,0 +1,4 @@ +/* +Package pagination contains utilities and convenience structs that implement common pagination idioms within OpenStack APIs. +*/ +package pagination diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/single.go b/vendor/github.com/gophercloud/gophercloud/pagination/single.go new file mode 100644 index 00000000000..4251d6491ef --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/single.go @@ -0,0 +1,33 @@ +package pagination + +import ( + "fmt" + "reflect" + + "github.com/gophercloud/gophercloud" +) + +// SinglePageBase may be embedded in a Page that contains all of the results from an operation at once. +type SinglePageBase PageResult + +// NextPageURL always returns "" to indicate that there are no more pages to return. +func (current SinglePageBase) NextPageURL() (string, error) { + return "", nil +} + +// IsEmpty satisifies the IsEmpty method of the Page interface +func (current SinglePageBase) IsEmpty() (bool, error) { + if b, ok := current.Body.([]interface{}); ok { + return len(b) == 0, nil + } + err := gophercloud.ErrUnexpectedType{} + err.Expected = "[]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) + return true, err +} + +// GetBody returns the single page's body. This method is needed to satisfy the +// Page interface. +func (current SinglePageBase) GetBody() interface{} { + return current.Body +} diff --git a/vendor/github.com/gophercloud/gophercloud/params.go b/vendor/github.com/gophercloud/gophercloud/params.go new file mode 100644 index 00000000000..6afc8f8b721 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/params.go @@ -0,0 +1,463 @@ +package gophercloud + +import ( + "encoding/json" + "fmt" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +/* +BuildRequestBody builds a map[string]interface from the given `struct`. If +parent is not an empty string, the final map[string]interface returned will +encapsulate the built one. For example: + + disk := 1 + createOpts := flavors.CreateOpts{ + ID: "1", + Name: "m1.tiny", + Disk: &disk, + RAM: 512, + VCPUs: 1, + RxTxFactor: 1.0, + } + + body, err := gophercloud.BuildRequestBody(createOpts, "flavor") + +The above example can be run as-is, however it is recommended to look at how +BuildRequestBody is used within Gophercloud to more fully understand how it +fits within the request process as a whole rather than use it directly as shown +above. +*/ +func BuildRequestBody(opts interface{}, parent string) (map[string]interface{}, error) { + optsValue := reflect.ValueOf(opts) + if optsValue.Kind() == reflect.Ptr { + optsValue = optsValue.Elem() + } + + optsType := reflect.TypeOf(opts) + if optsType.Kind() == reflect.Ptr { + optsType = optsType.Elem() + } + + optsMap := make(map[string]interface{}) + if optsValue.Kind() == reflect.Struct { + //fmt.Printf("optsValue.Kind() is a reflect.Struct: %+v\n", optsValue.Kind()) + for i := 0; i < optsValue.NumField(); i++ { + v := optsValue.Field(i) + f := optsType.Field(i) + + if f.Name != strings.Title(f.Name) { + //fmt.Printf("Skipping field: %s...\n", f.Name) + continue + } + + //fmt.Printf("Starting on field: %s...\n", f.Name) + + zero := isZero(v) + //fmt.Printf("v is zero?: %v\n", zero) + + // if the field has a required tag that's set to "true" + if requiredTag := f.Tag.Get("required"); requiredTag == "true" { + //fmt.Printf("Checking required field [%s]:\n\tv: %+v\n\tisZero:%v\n", f.Name, v.Interface(), zero) + // if the field's value is zero, return a missing-argument error + if zero { + // if the field has a 'required' tag, it can't have a zero-value + err := ErrMissingInput{} + err.Argument = f.Name + return nil, err + } + } + + if xorTag := f.Tag.Get("xor"); xorTag != "" { + //fmt.Printf("Checking `xor` tag for field [%s] with value %+v:\n\txorTag: %s\n", f.Name, v, xorTag) + xorField := optsValue.FieldByName(xorTag) + var xorFieldIsZero bool + if reflect.ValueOf(xorField.Interface()) == reflect.Zero(xorField.Type()) { + xorFieldIsZero = true + } else { + if xorField.Kind() == reflect.Ptr { + xorField = xorField.Elem() + } + xorFieldIsZero = isZero(xorField) + } + if !(zero != xorFieldIsZero) { + err := ErrMissingInput{} + err.Argument = fmt.Sprintf("%s/%s", f.Name, xorTag) + err.Info = fmt.Sprintf("Exactly one of %s and %s must be provided", f.Name, xorTag) + return nil, err + } + } + + if orTag := f.Tag.Get("or"); orTag != "" { + //fmt.Printf("Checking `or` tag for field with:\n\tname: %+v\n\torTag:%s\n", f.Name, orTag) + //fmt.Printf("field is zero?: %v\n", zero) + if zero { + orField := optsValue.FieldByName(orTag) + var orFieldIsZero bool + if reflect.ValueOf(orField.Interface()) == reflect.Zero(orField.Type()) { + orFieldIsZero = true + } else { + if orField.Kind() == reflect.Ptr { + orField = orField.Elem() + } + orFieldIsZero = isZero(orField) + } + if orFieldIsZero { + err := ErrMissingInput{} + err.Argument = fmt.Sprintf("%s/%s", f.Name, orTag) + err.Info = fmt.Sprintf("At least one of %s and %s must be provided", f.Name, orTag) + return nil, err + } + } + } + + if v.Kind() == reflect.Struct || (v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct) { + if zero { + //fmt.Printf("value before change: %+v\n", optsValue.Field(i)) + if jsonTag := f.Tag.Get("json"); jsonTag != "" { + jsonTagPieces := strings.Split(jsonTag, ",") + if len(jsonTagPieces) > 1 && jsonTagPieces[1] == "omitempty" { + if v.CanSet() { + if !v.IsNil() { + if v.Kind() == reflect.Ptr { + v.Set(reflect.Zero(v.Type())) + } + } + //fmt.Printf("value after change: %+v\n", optsValue.Field(i)) + } + } + } + continue + } + + //fmt.Printf("Calling BuildRequestBody with:\n\tv: %+v\n\tf.Name:%s\n", v.Interface(), f.Name) + _, err := BuildRequestBody(v.Interface(), f.Name) + if err != nil { + return nil, err + } + } + } + + //fmt.Printf("opts: %+v \n", opts) + + b, err := json.Marshal(opts) + if err != nil { + return nil, err + } + + //fmt.Printf("string(b): %s\n", string(b)) + + err = json.Unmarshal(b, &optsMap) + if err != nil { + return nil, err + } + + //fmt.Printf("optsMap: %+v\n", optsMap) + + if parent != "" { + optsMap = map[string]interface{}{parent: optsMap} + } + //fmt.Printf("optsMap after parent added: %+v\n", optsMap) + return optsMap, nil + } + // Return an error if the underlying type of 'opts' isn't a struct. + return nil, fmt.Errorf("Options type is not a struct.") +} + +// EnabledState is a convenience type, mostly used in Create and Update +// operations. Because the zero value of a bool is FALSE, we need to use a +// pointer instead to indicate zero-ness. +type EnabledState *bool + +// Convenience vars for EnabledState values. +var ( + iTrue = true + iFalse = false + + Enabled EnabledState = &iTrue + Disabled EnabledState = &iFalse +) + +// IPVersion is a type for the possible IP address versions. Valid instances +// are IPv4 and IPv6 +type IPVersion int + +const ( + // IPv4 is used for IP version 4 addresses + IPv4 IPVersion = 4 + // IPv6 is used for IP version 6 addresses + IPv6 IPVersion = 6 +) + +// IntToPointer is a function for converting integers into integer pointers. +// This is useful when passing in options to operations. +func IntToPointer(i int) *int { + return &i +} + +/* +MaybeString is an internal function to be used by request methods in individual +resource packages. + +It takes a string that might be a zero value and returns either a pointer to its +address or nil. This is useful for allowing users to conveniently omit values +from an options struct by leaving them zeroed, but still pass nil to the JSON +serializer so they'll be omitted from the request body. +*/ +func MaybeString(original string) *string { + if original != "" { + return &original + } + return nil +} + +/* +MaybeInt is an internal function to be used by request methods in individual +resource packages. + +Like MaybeString, it accepts an int that may or may not be a zero value, and +returns either a pointer to its address or nil. It's intended to hint that the +JSON serializer should omit its field. +*/ +func MaybeInt(original int) *int { + if original != 0 { + return &original + } + return nil +} + +/* +func isUnderlyingStructZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Ptr: + return isUnderlyingStructZero(v.Elem()) + default: + return isZero(v) + } +} +*/ + +var t time.Time + +func isZero(v reflect.Value) bool { + //fmt.Printf("\n\nchecking isZero for value: %+v\n", v) + switch v.Kind() { + case reflect.Ptr: + if v.IsNil() { + return true + } + return false + case reflect.Func, reflect.Map, reflect.Slice: + return v.IsNil() + case reflect.Array: + z := true + for i := 0; i < v.Len(); i++ { + z = z && isZero(v.Index(i)) + } + return z + case reflect.Struct: + if v.Type() == reflect.TypeOf(t) { + if v.Interface().(time.Time).IsZero() { + return true + } + return false + } + z := true + for i := 0; i < v.NumField(); i++ { + z = z && isZero(v.Field(i)) + } + return z + } + // Compare other types directly: + z := reflect.Zero(v.Type()) + //fmt.Printf("zero type for value: %+v\n\n\n", z) + return v.Interface() == z.Interface() +} + +/* +BuildQueryString is an internal function to be used by request methods in +individual resource packages. + +It accepts a tagged structure and expands it into a URL struct. Field names are +converted into query parameters based on a "q" tag. For example: + + type struct Something { + Bar string `q:"x_bar"` + Baz int `q:"lorem_ipsum"` + } + + instance := Something{ + Bar: "AAA", + Baz: "BBB", + } + +will be converted into "?x_bar=AAA&lorem_ipsum=BBB". + +The struct's fields may be strings, integers, or boolean values. Fields left at +their type's zero value will be omitted from the query. +*/ +func BuildQueryString(opts interface{}) (*url.URL, error) { + optsValue := reflect.ValueOf(opts) + if optsValue.Kind() == reflect.Ptr { + optsValue = optsValue.Elem() + } + + optsType := reflect.TypeOf(opts) + if optsType.Kind() == reflect.Ptr { + optsType = optsType.Elem() + } + + params := url.Values{} + + if optsValue.Kind() == reflect.Struct { + for i := 0; i < optsValue.NumField(); i++ { + v := optsValue.Field(i) + f := optsType.Field(i) + qTag := f.Tag.Get("q") + + // if the field has a 'q' tag, it goes in the query string + if qTag != "" { + tags := strings.Split(qTag, ",") + + // if the field is set, add it to the slice of query pieces + if !isZero(v) { + loop: + switch v.Kind() { + case reflect.Ptr: + v = v.Elem() + goto loop + case reflect.String: + params.Add(tags[0], v.String()) + case reflect.Int: + params.Add(tags[0], strconv.FormatInt(v.Int(), 10)) + case reflect.Bool: + params.Add(tags[0], strconv.FormatBool(v.Bool())) + case reflect.Slice: + switch v.Type().Elem() { + case reflect.TypeOf(0): + for i := 0; i < v.Len(); i++ { + params.Add(tags[0], strconv.FormatInt(v.Index(i).Int(), 10)) + } + default: + for i := 0; i < v.Len(); i++ { + params.Add(tags[0], v.Index(i).String()) + } + } + } + } else { + // Otherwise, the field is not set. + if len(tags) == 2 && tags[1] == "required" { + // And the field is required. Return an error. + return nil, fmt.Errorf("Required query parameter [%s] not set.", f.Name) + } + } + } + } + + return &url.URL{RawQuery: params.Encode()}, nil + } + // Return an error if the underlying type of 'opts' isn't a struct. + return nil, fmt.Errorf("Options type is not a struct.") +} + +/* +BuildHeaders is an internal function to be used by request methods in +individual resource packages. + +It accepts an arbitrary tagged structure and produces a string map that's +suitable for use as the HTTP headers of an outgoing request. Field names are +mapped to header names based in "h" tags. + + type struct Something { + Bar string `h:"x_bar"` + Baz int `h:"lorem_ipsum"` + } + + instance := Something{ + Bar: "AAA", + Baz: "BBB", + } + +will be converted into: + + map[string]string{ + "x_bar": "AAA", + "lorem_ipsum": "BBB", + } + +Untagged fields and fields left at their zero values are skipped. Integers, +booleans and string values are supported. +*/ +func BuildHeaders(opts interface{}) (map[string]string, error) { + optsValue := reflect.ValueOf(opts) + if optsValue.Kind() == reflect.Ptr { + optsValue = optsValue.Elem() + } + + optsType := reflect.TypeOf(opts) + if optsType.Kind() == reflect.Ptr { + optsType = optsType.Elem() + } + + optsMap := make(map[string]string) + if optsValue.Kind() == reflect.Struct { + for i := 0; i < optsValue.NumField(); i++ { + v := optsValue.Field(i) + f := optsType.Field(i) + hTag := f.Tag.Get("h") + + // if the field has a 'h' tag, it goes in the header + if hTag != "" { + tags := strings.Split(hTag, ",") + + // if the field is set, add it to the slice of query pieces + if !isZero(v) { + switch v.Kind() { + case reflect.String: + optsMap[tags[0]] = v.String() + case reflect.Int: + optsMap[tags[0]] = strconv.FormatInt(v.Int(), 10) + case reflect.Bool: + optsMap[tags[0]] = strconv.FormatBool(v.Bool()) + } + } else { + // Otherwise, the field is not set. + if len(tags) == 2 && tags[1] == "required" { + // And the field is required. Return an error. + return optsMap, fmt.Errorf("Required header not set.") + } + } + } + + } + return optsMap, nil + } + // Return an error if the underlying type of 'opts' isn't a struct. + return optsMap, fmt.Errorf("Options type is not a struct.") +} + +// IDSliceToQueryString takes a slice of elements and converts them into a query +// string. For example, if name=foo and slice=[]int{20, 40, 60}, then the +// result would be `?name=20&name=40&name=60' +func IDSliceToQueryString(name string, ids []int) string { + str := "" + for k, v := range ids { + if k == 0 { + str += "?" + } else { + str += "&" + } + str += fmt.Sprintf("%s=%s", name, strconv.Itoa(v)) + } + return str +} + +// IntWithinRange returns TRUE if an integer falls within a defined range, and +// FALSE if not. +func IntWithinRange(val, min, max int) bool { + return val > min && val < max +} diff --git a/vendor/github.com/gophercloud/gophercloud/provider_client.go b/vendor/github.com/gophercloud/gophercloud/provider_client.go new file mode 100644 index 00000000000..f88682381dd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/provider_client.go @@ -0,0 +1,307 @@ +package gophercloud + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "strings" +) + +// DefaultUserAgent is the default User-Agent string set in the request header. +const DefaultUserAgent = "gophercloud/2.0.0" + +// UserAgent represents a User-Agent header. +type UserAgent struct { + // prepend is the slice of User-Agent strings to prepend to DefaultUserAgent. + // All the strings to prepend are accumulated and prepended in the Join method. + prepend []string +} + +// Prepend prepends a user-defined string to the default User-Agent string. Users +// may pass in one or more strings to prepend. +func (ua *UserAgent) Prepend(s ...string) { + ua.prepend = append(s, ua.prepend...) +} + +// Join concatenates all the user-defined User-Agend strings with the default +// Gophercloud User-Agent string. +func (ua *UserAgent) Join() string { + uaSlice := append(ua.prepend, DefaultUserAgent) + return strings.Join(uaSlice, " ") +} + +// ProviderClient stores details that are required to interact with any +// services within a specific provider's API. +// +// Generally, you acquire a ProviderClient by calling the NewClient method in +// the appropriate provider's child package, providing whatever authentication +// credentials are required. +type ProviderClient struct { + // IdentityBase is the base URL used for a particular provider's identity + // service - it will be used when issuing authenticatation requests. It + // should point to the root resource of the identity service, not a specific + // identity version. + IdentityBase string + + // IdentityEndpoint is the identity endpoint. This may be a specific version + // of the identity service. If this is the case, this endpoint is used rather + // than querying versions first. + IdentityEndpoint string + + // TokenID is the ID of the most recently issued valid token. + TokenID string + + // EndpointLocator describes how this provider discovers the endpoints for + // its constituent services. + EndpointLocator EndpointLocator + + // HTTPClient allows users to interject arbitrary http, https, or other transit behaviors. + HTTPClient http.Client + + // UserAgent represents the User-Agent header in the HTTP request. + UserAgent UserAgent + + // ReauthFunc is the function used to re-authenticate the user if the request + // fails with a 401 HTTP response code. This a needed because there may be multiple + // authentication functions for different Identity service versions. + ReauthFunc func() error + + Debug bool +} + +// AuthenticatedHeaders returns a map of HTTP headers that are common for all +// authenticated service requests. +func (client *ProviderClient) AuthenticatedHeaders() map[string]string { + if client.TokenID == "" { + return map[string]string{} + } + return map[string]string{"X-Auth-Token": client.TokenID} +} + +// RequestOpts customizes the behavior of the provider.Request() method. +type RequestOpts struct { + // JSONBody, if provided, will be encoded as JSON and used as the body of the HTTP request. The + // content type of the request will default to "application/json" unless overridden by MoreHeaders. + // It's an error to specify both a JSONBody and a RawBody. + JSONBody interface{} + // RawBody contains an io.Reader that will be consumed by the request directly. No content-type + // will be set unless one is provided explicitly by MoreHeaders. + RawBody io.Reader + // JSONResponse, if provided, will be populated with the contents of the response body parsed as + // JSON. + JSONResponse interface{} + // OkCodes contains a list of numeric HTTP status codes that should be interpreted as success. If + // the response has a different code, an error will be returned. + OkCodes []int + // MoreHeaders specifies additional HTTP headers to be provide on the request. If a header is + // provided with a blank value (""), that header will be *omitted* instead: use this to suppress + // the default Accept header or an inferred Content-Type, for example. + MoreHeaders map[string]string + // ErrorContext specifies the resource error type to return if an error is encountered. + // This lets resources override default error messages based on the response status code. + ErrorContext error +} + +var applicationJSON = "application/json" + +// Request performs an HTTP request using the ProviderClient's current HTTPClient. An authentication +// header will automatically be provided. +func (client *ProviderClient) Request(method, url string, options *RequestOpts) (*http.Response, error) { + var body io.Reader + var contentType *string + + // Derive the content body by either encoding an arbitrary object as JSON, or by taking a provided + // io.ReadSeeker as-is. Default the content-type to application/json. + if options.JSONBody != nil { + if options.RawBody != nil { + panic("Please provide only one of JSONBody or RawBody to gophercloud.Request().") + } + + rendered, err := json.Marshal(options.JSONBody) + if err != nil { + return nil, err + } + + body = bytes.NewReader(rendered) + contentType = &applicationJSON + } + + if options.RawBody != nil { + body = options.RawBody + } + + // Construct the http.Request. + req, err := http.NewRequest(method, url, body) + if err != nil { + return nil, err + } + + // Populate the request headers. Apply options.MoreHeaders last, to give the caller the chance to + // modify or omit any header. + if contentType != nil { + req.Header.Set("Content-Type", *contentType) + } + req.Header.Set("Accept", applicationJSON) + + for k, v := range client.AuthenticatedHeaders() { + req.Header.Add(k, v) + } + + // Set the User-Agent header + req.Header.Set("User-Agent", client.UserAgent.Join()) + + if options.MoreHeaders != nil { + for k, v := range options.MoreHeaders { + if v != "" { + req.Header.Set(k, v) + } else { + req.Header.Del(k) + } + } + } + + // Set connection parameter to close the connection immediately when we've got the response + req.Close = true + + // Issue the request. + resp, err := client.HTTPClient.Do(req) + if err != nil { + return nil, err + } + + // Allow default OkCodes if none explicitly set + if options.OkCodes == nil { + options.OkCodes = defaultOkCodes(method) + } + + // Validate the HTTP response status. + var ok bool + for _, code := range options.OkCodes { + if resp.StatusCode == code { + ok = true + break + } + } + + if !ok { + body, _ := ioutil.ReadAll(resp.Body) + resp.Body.Close() + //pc := make([]uintptr, 1) + //runtime.Callers(2, pc) + //f := runtime.FuncForPC(pc[0]) + respErr := ErrUnexpectedResponseCode{ + URL: url, + Method: method, + Expected: options.OkCodes, + Actual: resp.StatusCode, + Body: body, + } + //respErr.Function = "gophercloud.ProviderClient.Request" + + errType := options.ErrorContext + switch resp.StatusCode { + case http.StatusBadRequest: + err = ErrDefault400{respErr} + if error400er, ok := errType.(Err400er); ok { + err = error400er.Error400(respErr) + } + case http.StatusUnauthorized: + if client.ReauthFunc != nil { + err = client.ReauthFunc() + if err != nil { + e := &ErrUnableToReauthenticate{} + e.ErrOriginal = respErr + return nil, e + } + if options.RawBody != nil { + if seeker, ok := options.RawBody.(io.Seeker); ok { + seeker.Seek(0, 0) + } + } + resp, err = client.Request(method, url, options) + if err != nil { + switch err.(type) { + case *ErrUnexpectedResponseCode: + e := &ErrErrorAfterReauthentication{} + e.ErrOriginal = err.(*ErrUnexpectedResponseCode) + return nil, e + default: + e := &ErrErrorAfterReauthentication{} + e.ErrOriginal = err + return nil, e + } + } + return resp, nil + } + err = ErrDefault401{respErr} + if error401er, ok := errType.(Err401er); ok { + err = error401er.Error401(respErr) + } + case http.StatusNotFound: + err = ErrDefault404{respErr} + if error404er, ok := errType.(Err404er); ok { + err = error404er.Error404(respErr) + } + case http.StatusMethodNotAllowed: + err = ErrDefault405{respErr} + if error405er, ok := errType.(Err405er); ok { + err = error405er.Error405(respErr) + } + case http.StatusRequestTimeout: + err = ErrDefault408{respErr} + if error408er, ok := errType.(Err408er); ok { + err = error408er.Error408(respErr) + } + case 429: + err = ErrDefault429{respErr} + if error429er, ok := errType.(Err429er); ok { + err = error429er.Error429(respErr) + } + case http.StatusInternalServerError: + err = ErrDefault500{respErr} + if error500er, ok := errType.(Err500er); ok { + err = error500er.Error500(respErr) + } + case http.StatusServiceUnavailable: + err = ErrDefault503{respErr} + if error503er, ok := errType.(Err503er); ok { + err = error503er.Error503(respErr) + } + } + + if err == nil { + err = respErr + } + + return resp, err + } + + // Parse the response body as JSON, if requested to do so. + if options.JSONResponse != nil { + defer resp.Body.Close() + if err := json.NewDecoder(resp.Body).Decode(options.JSONResponse); err != nil { + return nil, err + } + } + + return resp, nil +} + +func defaultOkCodes(method string) []int { + switch { + case method == "GET": + return []int{200} + case method == "POST": + return []int{201, 202} + case method == "PUT": + return []int{201, 202} + case method == "PATCH": + return []int{200, 204} + case method == "DELETE": + return []int{202, 204} + } + + return []int{} +} diff --git a/vendor/github.com/gophercloud/gophercloud/results.go b/vendor/github.com/gophercloud/gophercloud/results.go new file mode 100644 index 00000000000..e64feee19ed --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/results.go @@ -0,0 +1,382 @@ +package gophercloud + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "reflect" + "strconv" + "time" +) + +/* +Result is an internal type to be used by individual resource packages, but its +methods will be available on a wide variety of user-facing embedding types. + +It acts as a base struct that other Result types, returned from request +functions, can embed for convenience. All Results capture basic information +from the HTTP transaction that was performed, including the response body, +HTTP headers, and any errors that happened. + +Generally, each Result type will have an Extract method that can be used to +further interpret the result's payload in a specific context. Extensions or +providers can then provide additional extraction functions to pull out +provider- or extension-specific information as well. +*/ +type Result struct { + // Body is the payload of the HTTP response from the server. In most cases, + // this will be the deserialized JSON structure. + Body interface{} + + // Header contains the HTTP header structure from the original response. + Header http.Header + + // Err is an error that occurred during the operation. It's deferred until + // extraction to make it easier to chain the Extract call. + Err error +} + +// ExtractInto allows users to provide an object into which `Extract` will extract +// the `Result.Body`. This would be useful for OpenStack providers that have +// different fields in the response object than OpenStack proper. +func (r Result) ExtractInto(to interface{}) error { + if r.Err != nil { + return r.Err + } + + if reader, ok := r.Body.(io.Reader); ok { + if readCloser, ok := reader.(io.Closer); ok { + defer readCloser.Close() + } + return json.NewDecoder(reader).Decode(to) + } + + b, err := json.Marshal(r.Body) + if err != nil { + return err + } + err = json.Unmarshal(b, to) + + return err +} + +func (r Result) extractIntoPtr(to interface{}, label string) error { + if label == "" { + return r.ExtractInto(&to) + } + + var m map[string]interface{} + err := r.ExtractInto(&m) + if err != nil { + return err + } + + b, err := json.Marshal(m[label]) + if err != nil { + return err + } + + toValue := reflect.ValueOf(to) + if toValue.Kind() == reflect.Ptr { + toValue = toValue.Elem() + } + + switch toValue.Kind() { + case reflect.Slice: + typeOfV := toValue.Type().Elem() + if typeOfV.Kind() == reflect.Struct { + if typeOfV.NumField() > 0 && typeOfV.Field(0).Anonymous { + newSlice := reflect.MakeSlice(reflect.SliceOf(typeOfV), 0, 0) + newType := reflect.New(typeOfV).Elem() + + for _, v := range m[label].([]interface{}) { + b, err := json.Marshal(v) + if err != nil { + return err + } + + for i := 0; i < newType.NumField(); i++ { + s := newType.Field(i).Addr().Interface() + err = json.NewDecoder(bytes.NewReader(b)).Decode(s) + if err != nil { + return err + } + } + newSlice = reflect.Append(newSlice, newType) + } + toValue.Set(newSlice) + } + } + case reflect.Struct: + typeOfV := toValue.Type() + if typeOfV.NumField() > 0 && typeOfV.Field(0).Anonymous { + for i := 0; i < toValue.NumField(); i++ { + toField := toValue.Field(i) + if toField.Kind() == reflect.Struct { + s := toField.Addr().Interface() + err = json.NewDecoder(bytes.NewReader(b)).Decode(s) + if err != nil { + return err + } + } + } + } + } + + err = json.Unmarshal(b, &to) + return err +} + +// ExtractIntoStructPtr will unmarshal the Result (r) into the provided +// interface{} (to). +// +// NOTE: For internal use only +// +// `to` must be a pointer to an underlying struct type +// +// If provided, `label` will be filtered out of the response +// body prior to `r` being unmarshalled into `to`. +func (r Result) ExtractIntoStructPtr(to interface{}, label string) error { + if r.Err != nil { + return r.Err + } + + t := reflect.TypeOf(to) + if k := t.Kind(); k != reflect.Ptr { + return fmt.Errorf("Expected pointer, got %v", k) + } + switch t.Elem().Kind() { + case reflect.Struct: + return r.extractIntoPtr(to, label) + default: + return fmt.Errorf("Expected pointer to struct, got: %v", t) + } +} + +// ExtractIntoSlicePtr will unmarshal the Result (r) into the provided +// interface{} (to). +// +// NOTE: For internal use only +// +// `to` must be a pointer to an underlying slice type +// +// If provided, `label` will be filtered out of the response +// body prior to `r` being unmarshalled into `to`. +func (r Result) ExtractIntoSlicePtr(to interface{}, label string) error { + if r.Err != nil { + return r.Err + } + + t := reflect.TypeOf(to) + if k := t.Kind(); k != reflect.Ptr { + return fmt.Errorf("Expected pointer, got %v", k) + } + switch t.Elem().Kind() { + case reflect.Slice: + return r.extractIntoPtr(to, label) + default: + return fmt.Errorf("Expected pointer to slice, got: %v", t) + } +} + +// PrettyPrintJSON creates a string containing the full response body as +// pretty-printed JSON. It's useful for capturing test fixtures and for +// debugging extraction bugs. If you include its output in an issue related to +// a buggy extraction function, we will all love you forever. +func (r Result) PrettyPrintJSON() string { + pretty, err := json.MarshalIndent(r.Body, "", " ") + if err != nil { + panic(err.Error()) + } + return string(pretty) +} + +// ErrResult is an internal type to be used by individual resource packages, but +// its methods will be available on a wide variety of user-facing embedding +// types. +// +// It represents results that only contain a potential error and +// nothing else. Usually, if the operation executed successfully, the Err field +// will be nil; otherwise it will be stocked with a relevant error. Use the +// ExtractErr method +// to cleanly pull it out. +type ErrResult struct { + Result +} + +// ExtractErr is a function that extracts error information, or nil, from a result. +func (r ErrResult) ExtractErr() error { + return r.Err +} + +/* +HeaderResult is an internal type to be used by individual resource packages, but +its methods will be available on a wide variety of user-facing embedding types. + +It represents a result that only contains an error (possibly nil) and an +http.Header. This is used, for example, by the objectstorage packages in +openstack, because most of the operations don't return response bodies, but do +have relevant information in headers. +*/ +type HeaderResult struct { + Result +} + +// ExtractInto allows users to provide an object into which `Extract` will +// extract the http.Header headers of the result. +func (r HeaderResult) ExtractInto(to interface{}) error { + if r.Err != nil { + return r.Err + } + + tmpHeaderMap := map[string]string{} + for k, v := range r.Header { + if len(v) > 0 { + tmpHeaderMap[k] = v[0] + } + } + + b, err := json.Marshal(tmpHeaderMap) + if err != nil { + return err + } + err = json.Unmarshal(b, to) + + return err +} + +// RFC3339Milli describes a common time format used by some API responses. +const RFC3339Milli = "2006-01-02T15:04:05.999999Z" + +type JSONRFC3339Milli time.Time + +func (jt *JSONRFC3339Milli) UnmarshalJSON(data []byte) error { + b := bytes.NewBuffer(data) + dec := json.NewDecoder(b) + var s string + if err := dec.Decode(&s); err != nil { + return err + } + t, err := time.Parse(RFC3339Milli, s) + if err != nil { + return err + } + *jt = JSONRFC3339Milli(t) + return nil +} + +const RFC3339MilliNoZ = "2006-01-02T15:04:05.999999" + +type JSONRFC3339MilliNoZ time.Time + +func (jt *JSONRFC3339MilliNoZ) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(RFC3339MilliNoZ, s) + if err != nil { + return err + } + *jt = JSONRFC3339MilliNoZ(t) + return nil +} + +type JSONRFC1123 time.Time + +func (jt *JSONRFC1123) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + return err + } + *jt = JSONRFC1123(t) + return nil +} + +type JSONUnix time.Time + +func (jt *JSONUnix) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + unix, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + t = time.Unix(unix, 0) + *jt = JSONUnix(t) + return nil +} + +// RFC3339NoZ is the time format used in Heat (Orchestration). +const RFC3339NoZ = "2006-01-02T15:04:05" + +type JSONRFC3339NoZ time.Time + +func (jt *JSONRFC3339NoZ) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(RFC3339NoZ, s) + if err != nil { + return err + } + *jt = JSONRFC3339NoZ(t) + return nil +} + +/* +Link is an internal type to be used in packages of collection resources that are +paginated in a certain way. + +It's a response substructure common to many paginated collection results that is +used to point to related pages. Usually, the one we care about is the one with +Rel field set to "next". +*/ +type Link struct { + Href string `json:"href"` + Rel string `json:"rel"` +} + +/* +ExtractNextURL is an internal function useful for packages of collection +resources that are paginated in a certain way. + +It attempts to extract the "next" URL from slice of Link structs, or +"" if no such URL is present. +*/ +func ExtractNextURL(links []Link) (string, error) { + var url string + + for _, l := range links { + if l.Rel == "next" { + url = l.Href + } + } + + if url == "" { + return "", nil + } + + return url, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/service_client.go b/vendor/github.com/gophercloud/gophercloud/service_client.go new file mode 100644 index 00000000000..1160fefa7c2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/service_client.go @@ -0,0 +1,122 @@ +package gophercloud + +import ( + "io" + "net/http" + "strings" +) + +// ServiceClient stores details required to interact with a specific service API implemented by a provider. +// Generally, you'll acquire these by calling the appropriate `New` method on a ProviderClient. +type ServiceClient struct { + // ProviderClient is a reference to the provider that implements this service. + *ProviderClient + + // Endpoint is the base URL of the service's API, acquired from a service catalog. + // It MUST end with a /. + Endpoint string + + // ResourceBase is the base URL shared by the resources within a service's API. It should include + // the API version and, like Endpoint, MUST end with a / if set. If not set, the Endpoint is used + // as-is, instead. + ResourceBase string + + // This is the service client type (e.g. compute, sharev2). + // NOTE: FOR INTERNAL USE ONLY. DO NOT SET. GOPHERCLOUD WILL SET THIS. + // It is only exported because it gets set in a different package. + Type string + + // The microversion of the service to use. Set this to use a particular microversion. + Microversion string +} + +// ResourceBaseURL returns the base URL of any resources used by this service. It MUST end with a /. +func (client *ServiceClient) ResourceBaseURL() string { + if client.ResourceBase != "" { + return client.ResourceBase + } + return client.Endpoint +} + +// ServiceURL constructs a URL for a resource belonging to this provider. +func (client *ServiceClient) ServiceURL(parts ...string) string { + return client.ResourceBaseURL() + strings.Join(parts, "/") +} + +func (client *ServiceClient) initReqOpts(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) { + if v, ok := (JSONBody).(io.Reader); ok { + opts.RawBody = v + } else if JSONBody != nil { + opts.JSONBody = JSONBody + } + + if JSONResponse != nil { + opts.JSONResponse = JSONResponse + } + + if opts.MoreHeaders == nil { + opts.MoreHeaders = make(map[string]string) + } + + if client.Microversion != "" { + client.setMicroversionHeader(opts) + } +} + +// Get calls `Request` with the "GET" HTTP verb. +func (client *ServiceClient) Get(url string, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, nil, JSONResponse, opts) + return client.Request("GET", url, opts) +} + +// Post calls `Request` with the "POST" HTTP verb. +func (client *ServiceClient) Post(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, JSONBody, JSONResponse, opts) + return client.Request("POST", url, opts) +} + +// Put calls `Request` with the "PUT" HTTP verb. +func (client *ServiceClient) Put(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, JSONBody, JSONResponse, opts) + return client.Request("PUT", url, opts) +} + +// Patch calls `Request` with the "PATCH" HTTP verb. +func (client *ServiceClient) Patch(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, JSONBody, JSONResponse, opts) + return client.Request("PATCH", url, opts) +} + +// Delete calls `Request` with the "DELETE" HTTP verb. +func (client *ServiceClient) Delete(url string, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, nil, nil, opts) + return client.Request("DELETE", url, opts) +} + +func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) { + switch client.Type { + case "compute": + opts.MoreHeaders["X-OpenStack-Nova-API-Version"] = client.Microversion + case "sharev2": + opts.MoreHeaders["X-OpenStack-Manila-API-Version"] = client.Microversion + } + + if client.Type != "" { + opts.MoreHeaders["OpenStack-API-Version"] = client.Type + " " + client.Microversion + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/util.go b/vendor/github.com/gophercloud/gophercloud/util.go new file mode 100644 index 00000000000..68f9a5d3eca --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/util.go @@ -0,0 +1,102 @@ +package gophercloud + +import ( + "fmt" + "net/url" + "path/filepath" + "strings" + "time" +) + +// WaitFor polls a predicate function, once per second, up to a timeout limit. +// This is useful to wait for a resource to transition to a certain state. +// To handle situations when the predicate might hang indefinitely, the +// predicate will be prematurely cancelled after the timeout. +// Resource packages will wrap this in a more convenient function that's +// specific to a certain resource, but it can also be useful on its own. +func WaitFor(timeout int, predicate func() (bool, error)) error { + type WaitForResult struct { + Success bool + Error error + } + + start := time.Now().Unix() + + for { + // If a timeout is set, and that's been exceeded, shut it down. + if timeout >= 0 && time.Now().Unix()-start >= int64(timeout) { + return fmt.Errorf("A timeout occurred") + } + + time.Sleep(1 * time.Second) + + var result WaitForResult + ch := make(chan bool, 1) + go func() { + defer close(ch) + satisfied, err := predicate() + result.Success = satisfied + result.Error = err + }() + + select { + case <-ch: + if result.Error != nil { + return result.Error + } + if result.Success { + return nil + } + // If the predicate has not finished by the timeout, cancel it. + case <-time.After(time.Duration(timeout) * time.Second): + return fmt.Errorf("A timeout occurred") + } + } +} + +// NormalizeURL is an internal function to be used by provider clients. +// +// It ensures that each endpoint URL has a closing `/`, as expected by +// ServiceClient's methods. +func NormalizeURL(url string) string { + if !strings.HasSuffix(url, "/") { + return url + "/" + } + return url +} + +// NormalizePathURL is used to convert rawPath to a fqdn, using basePath as +// a reference in the filesystem, if necessary. basePath is assumed to contain +// either '.' when first used, or the file:// type fqdn of the parent resource. +// e.g. myFavScript.yaml => file://opt/lib/myFavScript.yaml +func NormalizePathURL(basePath, rawPath string) (string, error) { + u, err := url.Parse(rawPath) + if err != nil { + return "", err + } + // if a scheme is defined, it must be a fqdn already + if u.Scheme != "" { + return u.String(), nil + } + // if basePath is a url, then child resources are assumed to be relative to it + bu, err := url.Parse(basePath) + if err != nil { + return "", err + } + var basePathSys, absPathSys string + if bu.Scheme != "" { + basePathSys = filepath.FromSlash(bu.Path) + absPathSys = filepath.Join(basePathSys, rawPath) + bu.Path = filepath.ToSlash(absPathSys) + return bu.String(), nil + } + + absPathSys = filepath.Join(basePath, rawPath) + u.Path = filepath.ToSlash(absPathSys) + if err != nil { + return "", err + } + u.Scheme = "file" + return u.String(), nil + +} diff --git a/vendor/github.com/hashicorp/consul/LICENSE b/vendor/github.com/hashicorp/consul/LICENSE new file mode 100644 index 00000000000..c33dcc7c928 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/consul/api/README.md b/vendor/github.com/hashicorp/consul/api/README.md new file mode 100644 index 00000000000..7e64988f42d --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/README.md @@ -0,0 +1,43 @@ +Consul API client +================= + +This package provides the `api` package which attempts to +provide programmatic access to the full Consul API. + +Currently, all of the Consul APIs included in version 0.6.0 are supported. + +Documentation +============= + +The full documentation is available on [Godoc](https://godoc.org/github.com/hashicorp/consul/api) + +Usage +===== + +Below is an example of using the Consul client: + +```go +// Get a new client +client, err := api.NewClient(api.DefaultConfig()) +if err != nil { + panic(err) +} + +// Get a handle to the KV API +kv := client.KV() + +// PUT a new KV pair +p := &api.KVPair{Key: "foo", Value: []byte("test")} +_, err = kv.Put(p, nil) +if err != nil { + panic(err) +} + +// Lookup the pair +pair, _, err := kv.Get("foo", nil) +if err != nil { + panic(err) +} +fmt.Printf("KV: %v", pair) + +``` diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go new file mode 100644 index 00000000000..6ea0a752e58 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/acl.go @@ -0,0 +1,193 @@ +package api + +import ( + "time" +) + +const ( + // ACLCLientType is the client type token + ACLClientType = "client" + + // ACLManagementType is the management type token + ACLManagementType = "management" +) + +// ACLEntry is used to represent an ACL entry +type ACLEntry struct { + CreateIndex uint64 + ModifyIndex uint64 + ID string + Name string + Type string + Rules string +} + +// ACLReplicationStatus is used to represent the status of ACL replication. +type ACLReplicationStatus struct { + Enabled bool + Running bool + SourceDatacenter string + ReplicatedIndex uint64 + LastSuccess time.Time + LastError time.Time +} + +// ACL can be used to query the ACL endpoints +type ACL struct { + c *Client +} + +// ACL returns a handle to the ACL endpoints +func (c *Client) ACL() *ACL { + return &ACL{c} +} + +// Bootstrap is used to perform a one-time ACL bootstrap operation on a cluster +// to get the first management token. +func (a *ACL) Bootstrap() (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/bootstrap") + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Create is used to generate a new token with the given parameters +func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/create") + r.setWriteOptions(q) + r.obj = acl + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Update is used to update the rules of an existing token +func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/update") + r.setWriteOptions(q) + r.obj = acl + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Destroy is used to destroy a given ACL token ID +func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Clone is used to return a new token cloned from an existing one +func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/clone/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Info is used to query for information about an ACL token +func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/info/"+id) + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List is used to get all the ACL tokens +func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/list") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// Replication returns the status of the ACL replication process in the datacenter +func (a *ACL) Replication(q *QueryOptions) (*ACLReplicationStatus, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/replication") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries *ACLReplicationStatus + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go new file mode 100644 index 00000000000..533b2455782 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -0,0 +1,621 @@ +package api + +import ( + "bufio" + "fmt" +) + +// AgentCheck represents a check known to the agent +type AgentCheck struct { + Node string + CheckID string + Name string + Status string + Notes string + Output string + ServiceID string + ServiceName string +} + +// AgentService represents a service known to the agent +type AgentService struct { + ID string + Service string + Tags []string + Port int + Address string + EnableTagOverride bool + CreateIndex uint64 + ModifyIndex uint64 +} + +// AgentMember represents a cluster member known to the agent +type AgentMember struct { + Name string + Addr string + Port uint16 + Tags map[string]string + Status int + ProtocolMin uint8 + ProtocolMax uint8 + ProtocolCur uint8 + DelegateMin uint8 + DelegateMax uint8 + DelegateCur uint8 +} + +// AllSegments is used to select for all segments in MembersOpts. +const AllSegments = "_all" + +// MembersOpts is used for querying member information. +type MembersOpts struct { + // WAN is whether to show members from the WAN. + WAN bool + + // Segment is the LAN segment to show members for. Setting this to the + // AllSegments value above will show members in all segments. + Segment string +} + +// AgentServiceRegistration is used to register a new service +type AgentServiceRegistration struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Tags []string `json:",omitempty"` + Port int `json:",omitempty"` + Address string `json:",omitempty"` + EnableTagOverride bool `json:",omitempty"` + Check *AgentServiceCheck + Checks AgentServiceChecks +} + +// AgentCheckRegistration is used to register a new check +type AgentCheckRegistration struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Notes string `json:",omitempty"` + ServiceID string `json:",omitempty"` + AgentServiceCheck +} + +// AgentServiceCheck is used to define a node or service level check +type AgentServiceCheck struct { + Args []string `json:"ScriptArgs,omitempty"` + Script string `json:",omitempty"` // Deprecated, use Args. + DockerContainerID string `json:",omitempty"` + Shell string `json:",omitempty"` // Only supported for Docker. + Interval string `json:",omitempty"` + Timeout string `json:",omitempty"` + TTL string `json:",omitempty"` + HTTP string `json:",omitempty"` + Header map[string][]string `json:",omitempty"` + Method string `json:",omitempty"` + TCP string `json:",omitempty"` + Status string `json:",omitempty"` + Notes string `json:",omitempty"` + TLSSkipVerify bool `json:",omitempty"` + + // In Consul 0.7 and later, checks that are associated with a service + // may also contain this optional DeregisterCriticalServiceAfter field, + // which is a timeout in the same Go time format as Interval and TTL. If + // a check is in the critical state for more than this configured value, + // then its associated service (and all of its associated checks) will + // automatically be deregistered. + DeregisterCriticalServiceAfter string `json:",omitempty"` +} +type AgentServiceChecks []*AgentServiceCheck + +// AgentToken is used when updating ACL tokens for an agent. +type AgentToken struct { + Token string +} + +// Metrics info is used to store different types of metric values from the agent. +type MetricsInfo struct { + Timestamp string + Gauges []GaugeValue + Points []PointValue + Counters []SampledValue + Samples []SampledValue +} + +// GaugeValue stores one value that is updated as time goes on, such as +// the amount of memory allocated. +type GaugeValue struct { + Name string + Value float32 + Labels map[string]string +} + +// PointValue holds a series of points for a metric. +type PointValue struct { + Name string + Points []float32 +} + +// SampledValue stores info about a metric that is incremented over time, +// such as the number of requests to an HTTP endpoint. +type SampledValue struct { + Name string + Count int + Sum float64 + Min float64 + Max float64 + Mean float64 + Stddev float64 + Labels map[string]string +} + +// Agent can be used to query the Agent endpoints +type Agent struct { + c *Client + + // cache the node name + nodeName string +} + +// Agent returns a handle to the agent endpoints +func (c *Client) Agent() *Agent { + return &Agent{c: c} +} + +// Self is used to query the agent we are speaking to for +// information about itself +func (a *Agent) Self() (map[string]map[string]interface{}, error) { + r := a.c.newRequest("GET", "/v1/agent/self") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]map[string]interface{} + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Metrics is used to query the agent we are speaking to for +// its current internal metric data +func (a *Agent) Metrics() (*MetricsInfo, error) { + r := a.c.newRequest("GET", "/v1/agent/metrics") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out *MetricsInfo + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Reload triggers a configuration reload for the agent we are connected to. +func (a *Agent) Reload() error { + r := a.c.newRequest("PUT", "/v1/agent/reload") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// NodeName is used to get the node name of the agent +func (a *Agent) NodeName() (string, error) { + if a.nodeName != "" { + return a.nodeName, nil + } + info, err := a.Self() + if err != nil { + return "", err + } + name := info["Config"]["NodeName"].(string) + a.nodeName = name + return name, nil +} + +// Checks returns the locally registered checks +func (a *Agent) Checks() (map[string]*AgentCheck, error) { + r := a.c.newRequest("GET", "/v1/agent/checks") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]*AgentCheck + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Services returns the locally registered services +func (a *Agent) Services() (map[string]*AgentService, error) { + r := a.c.newRequest("GET", "/v1/agent/services") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]*AgentService + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Members returns the known gossip members. The WAN +// flag can be used to query a server for WAN members. +func (a *Agent) Members(wan bool) ([]*AgentMember, error) { + r := a.c.newRequest("GET", "/v1/agent/members") + if wan { + r.params.Set("wan", "1") + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*AgentMember + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// MembersOpts returns the known gossip members and can be passed +// additional options for WAN/segment filtering. +func (a *Agent) MembersOpts(opts MembersOpts) ([]*AgentMember, error) { + r := a.c.newRequest("GET", "/v1/agent/members") + r.params.Set("segment", opts.Segment) + if opts.WAN { + r.params.Set("wan", "1") + } + + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*AgentMember + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// ServiceRegister is used to register a new service with +// the local agent +func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error { + r := a.c.newRequest("PUT", "/v1/agent/service/register") + r.obj = service + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ServiceDeregister is used to deregister a service with +// the local agent +func (a *Agent) ServiceDeregister(serviceID string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// PassTTL is used to set a TTL check to the passing state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. +func (a *Agent) PassTTL(checkID, note string) error { + return a.updateTTL(checkID, note, "pass") +} + +// WarnTTL is used to set a TTL check to the warning state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. +func (a *Agent) WarnTTL(checkID, note string) error { + return a.updateTTL(checkID, note, "warn") +} + +// FailTTL is used to set a TTL check to the failing state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. +func (a *Agent) FailTTL(checkID, note string) error { + return a.updateTTL(checkID, note, "fail") +} + +// updateTTL is used to update the TTL of a check. This is the internal +// method that uses the old API that's present in Consul versions prior to +// 0.6.4. Since Consul didn't have an analogous "update" API before it seemed +// ok to break this (former) UpdateTTL in favor of the new UpdateTTL below, +// but keep the old Pass/Warn/Fail methods using the old API under the hood. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 and the server endpoints will +// be removed in 0.9. +func (a *Agent) updateTTL(checkID, note, status string) error { + switch status { + case "pass": + case "warn": + case "fail": + default: + return fmt.Errorf("Invalid status: %s", status) + } + endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID) + r := a.c.newRequest("PUT", endpoint) + r.params.Set("note", note) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// checkUpdate is the payload for a PUT for a check update. +type checkUpdate struct { + // Status is one of the api.Health* states: HealthPassing + // ("passing"), HealthWarning ("warning"), or HealthCritical + // ("critical"). + Status string + + // Output is the information to post to the UI for operators as the + // output of the process that decided to hit the TTL check. This is + // different from the note field that's associated with the check + // itself. + Output string +} + +// UpdateTTL is used to update the TTL of a check. This uses the newer API +// that was introduced in Consul 0.6.4 and later. We translate the old status +// strings for compatibility (though a newer version of Consul will still be +// required to use this API). +func (a *Agent) UpdateTTL(checkID, output, status string) error { + switch status { + case "pass", HealthPassing: + status = HealthPassing + case "warn", HealthWarning: + status = HealthWarning + case "fail", HealthCritical: + status = HealthCritical + default: + return fmt.Errorf("Invalid status: %s", status) + } + + endpoint := fmt.Sprintf("/v1/agent/check/update/%s", checkID) + r := a.c.newRequest("PUT", endpoint) + r.obj = &checkUpdate{ + Status: status, + Output: output, + } + + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// CheckRegister is used to register a new check with +// the local agent +func (a *Agent) CheckRegister(check *AgentCheckRegistration) error { + r := a.c.newRequest("PUT", "/v1/agent/check/register") + r.obj = check + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// CheckDeregister is used to deregister a check with +// the local agent +func (a *Agent) CheckDeregister(checkID string) error { + r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Join is used to instruct the agent to attempt a join to +// another cluster member +func (a *Agent) Join(addr string, wan bool) error { + r := a.c.newRequest("PUT", "/v1/agent/join/"+addr) + if wan { + r.params.Set("wan", "1") + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Leave is used to have the agent gracefully leave the cluster and shutdown +func (a *Agent) Leave() error { + r := a.c.newRequest("PUT", "/v1/agent/leave") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ForceLeave is used to have the agent eject a failed node +func (a *Agent) ForceLeave(node string) error { + r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// EnableServiceMaintenance toggles service maintenance mode on +// for the given service ID. +func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) + r.params.Set("enable", "true") + r.params.Set("reason", reason) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// DisableServiceMaintenance toggles service maintenance mode off +// for the given service ID. +func (a *Agent) DisableServiceMaintenance(serviceID string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) + r.params.Set("enable", "false") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// EnableNodeMaintenance toggles node maintenance mode on for the +// agent we are connected to. +func (a *Agent) EnableNodeMaintenance(reason string) error { + r := a.c.newRequest("PUT", "/v1/agent/maintenance") + r.params.Set("enable", "true") + r.params.Set("reason", reason) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// DisableNodeMaintenance toggles node maintenance mode off for the +// agent we are connected to. +func (a *Agent) DisableNodeMaintenance() error { + r := a.c.newRequest("PUT", "/v1/agent/maintenance") + r.params.Set("enable", "false") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Monitor returns a channel which will receive streaming logs from the agent +// Providing a non-nil stopCh can be used to close the connection and stop the +// log stream. An empty string will be sent down the given channel when there's +// nothing left to stream, after which the caller should close the stopCh. +func (a *Agent) Monitor(loglevel string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) { + r := a.c.newRequest("GET", "/v1/agent/monitor") + r.setQueryOptions(q) + if loglevel != "" { + r.params.Add("loglevel", loglevel) + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + + logCh := make(chan string, 64) + go func() { + defer resp.Body.Close() + + scanner := bufio.NewScanner(resp.Body) + for { + select { + case <-stopCh: + close(logCh) + return + default: + } + if scanner.Scan() { + // An empty string signals to the caller that + // the scan is done, so make sure we only emit + // that when the scanner says it's done, not if + // we happen to ingest an empty line. + if text := scanner.Text(); text != "" { + logCh <- text + } else { + logCh <- " " + } + } else { + logCh <- "" + } + } + }() + + return logCh, nil +} + +// UpdateACLToken updates the agent's "acl_token". See updateToken for more +// details. +func (c *Agent) UpdateACLToken(token string, q *WriteOptions) (*WriteMeta, error) { + return c.updateToken("acl_token", token, q) +} + +// UpdateACLAgentToken updates the agent's "acl_agent_token". See updateToken +// for more details. +func (c *Agent) UpdateACLAgentToken(token string, q *WriteOptions) (*WriteMeta, error) { + return c.updateToken("acl_agent_token", token, q) +} + +// UpdateACLAgentMasterToken updates the agent's "acl_agent_master_token". See +// updateToken for more details. +func (c *Agent) UpdateACLAgentMasterToken(token string, q *WriteOptions) (*WriteMeta, error) { + return c.updateToken("acl_agent_master_token", token, q) +} + +// UpdateACLReplicationToken updates the agent's "acl_replication_token". See +// updateToken for more details. +func (c *Agent) UpdateACLReplicationToken(token string, q *WriteOptions) (*WriteMeta, error) { + return c.updateToken("acl_replication_token", token, q) +} + +// updateToken can be used to update an agent's ACL token after the agent has +// started. The tokens are not persisted, so will need to be updated again if +// the agent is restarted. +func (c *Agent) updateToken(target, token string, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", fmt.Sprintf("/v1/agent/token/%s", target)) + r.setWriteOptions(q) + r.obj = &AgentToken{Token: token} + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go new file mode 100644 index 00000000000..97a524b5ee5 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -0,0 +1,781 @@ +package api + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-rootcerts" +) + +const ( + // HTTPAddrEnvName defines an environment variable name which sets + // the HTTP address if there is no -http-addr specified. + HTTPAddrEnvName = "CONSUL_HTTP_ADDR" + + // HTTPTokenEnvName defines an environment variable name which sets + // the HTTP token. + HTTPTokenEnvName = "CONSUL_HTTP_TOKEN" + + // HTTPAuthEnvName defines an environment variable name which sets + // the HTTP authentication header. + HTTPAuthEnvName = "CONSUL_HTTP_AUTH" + + // HTTPSSLEnvName defines an environment variable name which sets + // whether or not to use HTTPS. + HTTPSSLEnvName = "CONSUL_HTTP_SSL" + + // HTTPCAFile defines an environment variable name which sets the + // CA file to use for talking to Consul over TLS. + HTTPCAFile = "CONSUL_CACERT" + + // HTTPCAPath defines an environment variable name which sets the + // path to a directory of CA certs to use for talking to Consul over TLS. + HTTPCAPath = "CONSUL_CAPATH" + + // HTTPClientCert defines an environment variable name which sets the + // client cert file to use for talking to Consul over TLS. + HTTPClientCert = "CONSUL_CLIENT_CERT" + + // HTTPClientKey defines an environment variable name which sets the + // client key file to use for talking to Consul over TLS. + HTTPClientKey = "CONSUL_CLIENT_KEY" + + // HTTPTLSServerName defines an environment variable name which sets the + // server name to use as the SNI host when connecting via TLS + HTTPTLSServerName = "CONSUL_TLS_SERVER_NAME" + + // HTTPSSLVerifyEnvName defines an environment variable name which sets + // whether or not to disable certificate checking. + HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY" +) + +// QueryOptions are used to parameterize a query +type QueryOptions struct { + // Providing a datacenter overwrites the DC provided + // by the Config + Datacenter string + + // AllowStale allows any Consul server (non-leader) to service + // a read. This allows for lower latency and higher throughput + AllowStale bool + + // RequireConsistent forces the read to be fully consistent. + // This is more expensive but prevents ever performing a stale + // read. + RequireConsistent bool + + // WaitIndex is used to enable a blocking query. Waits + // until the timeout or the next index is reached + WaitIndex uint64 + + // WaitTime is used to bound the duration of a wait. + // Defaults to that of the Config, but can be overridden. + WaitTime time.Duration + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + // Near is used to provide a node name that will sort the results + // in ascending order based on the estimated round trip time from + // that node. Setting this to "_agent" will use the agent's node + // for the sort. + Near string + + // NodeMeta is used to filter results by nodes with the given + // metadata key/value pairs. Currently, only one key/value pair can + // be provided for filtering. + NodeMeta map[string]string + + // RelayFactor is used in keyring operations to cause reponses to be + // relayed back to the sender through N other random nodes. Must be + // a value from 0 to 5 (inclusive). + RelayFactor uint8 + + // ctx is an optional context pass through to the underlying HTTP + // request layer. Use Context() and WithContext() to manage this. + ctx context.Context +} + +func (o *QueryOptions) Context() context.Context { + if o != nil && o.ctx != nil { + return o.ctx + } + return context.Background() +} + +func (o *QueryOptions) WithContext(ctx context.Context) *QueryOptions { + o2 := new(QueryOptions) + if o != nil { + *o2 = *o + } + o2.ctx = ctx + return o2 +} + +// WriteOptions are used to parameterize a write +type WriteOptions struct { + // Providing a datacenter overwrites the DC provided + // by the Config + Datacenter string + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + // RelayFactor is used in keyring operations to cause reponses to be + // relayed back to the sender through N other random nodes. Must be + // a value from 0 to 5 (inclusive). + RelayFactor uint8 + + // ctx is an optional context pass through to the underlying HTTP + // request layer. Use Context() and WithContext() to manage this. + ctx context.Context +} + +func (o *WriteOptions) Context() context.Context { + if o != nil && o.ctx != nil { + return o.ctx + } + return context.Background() +} + +func (o *WriteOptions) WithContext(ctx context.Context) *WriteOptions { + o2 := new(WriteOptions) + if o != nil { + *o2 = *o + } + o2.ctx = ctx + return o2 +} + +// QueryMeta is used to return meta data about a query +type QueryMeta struct { + // LastIndex. This can be used as a WaitIndex to perform + // a blocking query + LastIndex uint64 + + // Time of last contact from the leader for the + // server servicing the request + LastContact time.Duration + + // Is there a known leader + KnownLeader bool + + // How long did the request take + RequestTime time.Duration + + // Is address translation enabled for HTTP responses on this agent + AddressTranslationEnabled bool +} + +// WriteMeta is used to return meta data about a write +type WriteMeta struct { + // How long did the request take + RequestTime time.Duration +} + +// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication +type HttpBasicAuth struct { + // Username to use for HTTP Basic Authentication + Username string + + // Password to use for HTTP Basic Authentication + Password string +} + +// Config is used to configure the creation of a client +type Config struct { + // Address is the address of the Consul server + Address string + + // Scheme is the URI scheme for the Consul server + Scheme string + + // Datacenter to use. If not provided, the default agent datacenter is used. + Datacenter string + + // Transport is the Transport to use for the http client. + Transport *http.Transport + + // HttpClient is the client to use. Default will be + // used if not provided. + HttpClient *http.Client + + // HttpAuth is the auth info to use for http access. + HttpAuth *HttpBasicAuth + + // WaitTime limits how long a Watch will block. If not provided, + // the agent default values will be used. + WaitTime time.Duration + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + TLSConfig TLSConfig +} + +// TLSConfig is used to generate a TLSClientConfig that's useful for talking to +// Consul using TLS. +type TLSConfig struct { + // Address is the optional address of the Consul server. The port, if any + // will be removed from here and this will be set to the ServerName of the + // resulting config. + Address string + + // CAFile is the optional path to the CA certificate used for Consul + // communication, defaults to the system bundle if not specified. + CAFile string + + // CAPath is the optional path to a directory of CA certificates to use for + // Consul communication, defaults to the system bundle if not specified. + CAPath string + + // CertFile is the optional path to the certificate for Consul + // communication. If this is set then you need to also set KeyFile. + CertFile string + + // KeyFile is the optional path to the private key for Consul communication. + // If this is set then you need to also set CertFile. + KeyFile string + + // InsecureSkipVerify if set to true will disable TLS host verification. + InsecureSkipVerify bool +} + +// DefaultConfig returns a default configuration for the client. By default this +// will pool and reuse idle connections to Consul. If you have a long-lived +// client object, this is the desired behavior and should make the most efficient +// use of the connections to Consul. If you don't reuse a client object , which +// is not recommended, then you may notice idle connections building up over +// time. To avoid this, use the DefaultNonPooledConfig() instead. +func DefaultConfig() *Config { + return defaultConfig(cleanhttp.DefaultPooledTransport) +} + +// DefaultNonPooledConfig returns a default configuration for the client which +// does not pool connections. This isn't a recommended configuration because it +// will reconnect to Consul on every request, but this is useful to avoid the +// accumulation of idle connections if you make many client objects during the +// lifetime of your application. +func DefaultNonPooledConfig() *Config { + return defaultConfig(cleanhttp.DefaultTransport) +} + +// defaultConfig returns the default configuration for the client, using the +// given function to make the transport. +func defaultConfig(transportFn func() *http.Transport) *Config { + config := &Config{ + Address: "127.0.0.1:8500", + Scheme: "http", + Transport: transportFn(), + } + + if addr := os.Getenv(HTTPAddrEnvName); addr != "" { + config.Address = addr + } + + if token := os.Getenv(HTTPTokenEnvName); token != "" { + config.Token = token + } + + if auth := os.Getenv(HTTPAuthEnvName); auth != "" { + var username, password string + if strings.Contains(auth, ":") { + split := strings.SplitN(auth, ":", 2) + username = split[0] + password = split[1] + } else { + username = auth + } + + config.HttpAuth = &HttpBasicAuth{ + Username: username, + Password: password, + } + } + + if ssl := os.Getenv(HTTPSSLEnvName); ssl != "" { + enabled, err := strconv.ParseBool(ssl) + if err != nil { + log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLEnvName, err) + } + + if enabled { + config.Scheme = "https" + } + } + + if v := os.Getenv(HTTPTLSServerName); v != "" { + config.TLSConfig.Address = v + } + if v := os.Getenv(HTTPCAFile); v != "" { + config.TLSConfig.CAFile = v + } + if v := os.Getenv(HTTPCAPath); v != "" { + config.TLSConfig.CAPath = v + } + if v := os.Getenv(HTTPClientCert); v != "" { + config.TLSConfig.CertFile = v + } + if v := os.Getenv(HTTPClientKey); v != "" { + config.TLSConfig.KeyFile = v + } + if v := os.Getenv(HTTPSSLVerifyEnvName); v != "" { + doVerify, err := strconv.ParseBool(v) + if err != nil { + log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLVerifyEnvName, err) + } + if !doVerify { + config.TLSConfig.InsecureSkipVerify = true + } + } + + return config +} + +// TLSConfig is used to generate a TLSClientConfig that's useful for talking to +// Consul using TLS. +func SetupTLSConfig(tlsConfig *TLSConfig) (*tls.Config, error) { + tlsClientConfig := &tls.Config{ + InsecureSkipVerify: tlsConfig.InsecureSkipVerify, + } + + if tlsConfig.Address != "" { + server := tlsConfig.Address + hasPort := strings.LastIndex(server, ":") > strings.LastIndex(server, "]") + if hasPort { + var err error + server, _, err = net.SplitHostPort(server) + if err != nil { + return nil, err + } + } + tlsClientConfig.ServerName = server + } + + if tlsConfig.CertFile != "" && tlsConfig.KeyFile != "" { + tlsCert, err := tls.LoadX509KeyPair(tlsConfig.CertFile, tlsConfig.KeyFile) + if err != nil { + return nil, err + } + tlsClientConfig.Certificates = []tls.Certificate{tlsCert} + } + + rootConfig := &rootcerts.Config{ + CAFile: tlsConfig.CAFile, + CAPath: tlsConfig.CAPath, + } + if err := rootcerts.ConfigureTLS(tlsClientConfig, rootConfig); err != nil { + return nil, err + } + + return tlsClientConfig, nil +} + +// Client provides a client to the Consul API +type Client struct { + config Config +} + +// NewClient returns a new client +func NewClient(config *Config) (*Client, error) { + // bootstrap the config + defConfig := DefaultConfig() + + if len(config.Address) == 0 { + config.Address = defConfig.Address + } + + if len(config.Scheme) == 0 { + config.Scheme = defConfig.Scheme + } + + if config.Transport == nil { + config.Transport = defConfig.Transport + } + + if config.TLSConfig.Address == "" { + config.TLSConfig.Address = defConfig.TLSConfig.Address + } + + if config.TLSConfig.CAFile == "" { + config.TLSConfig.CAFile = defConfig.TLSConfig.CAFile + } + + if config.TLSConfig.CAPath == "" { + config.TLSConfig.CAPath = defConfig.TLSConfig.CAPath + } + + if config.TLSConfig.CertFile == "" { + config.TLSConfig.CertFile = defConfig.TLSConfig.CertFile + } + + if config.TLSConfig.KeyFile == "" { + config.TLSConfig.KeyFile = defConfig.TLSConfig.KeyFile + } + + if !config.TLSConfig.InsecureSkipVerify { + config.TLSConfig.InsecureSkipVerify = defConfig.TLSConfig.InsecureSkipVerify + } + + if config.HttpClient == nil { + var err error + config.HttpClient, err = NewHttpClient(config.Transport, config.TLSConfig) + if err != nil { + return nil, err + } + } + + parts := strings.SplitN(config.Address, "://", 2) + if len(parts) == 2 { + switch parts[0] { + case "http": + config.Scheme = "http" + case "https": + config.Scheme = "https" + case "unix": + trans := cleanhttp.DefaultTransport() + trans.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial("unix", parts[1]) + } + config.HttpClient = &http.Client{ + Transport: trans, + } + default: + return nil, fmt.Errorf("Unknown protocol scheme: %s", parts[0]) + } + config.Address = parts[1] + } + + if config.Token == "" { + config.Token = defConfig.Token + } + + return &Client{config: *config}, nil +} + +// NewHttpClient returns an http client configured with the given Transport and TLS +// config. +func NewHttpClient(transport *http.Transport, tlsConf TLSConfig) (*http.Client, error) { + client := &http.Client{ + Transport: transport, + } + + if transport.TLSClientConfig == nil { + tlsClientConfig, err := SetupTLSConfig(&tlsConf) + + if err != nil { + return nil, err + } + + transport.TLSClientConfig = tlsClientConfig + } + + return client, nil +} + +// request is used to help build up a request +type request struct { + config *Config + method string + url *url.URL + params url.Values + body io.Reader + header http.Header + obj interface{} + ctx context.Context +} + +// setQueryOptions is used to annotate the request with +// additional query options +func (r *request) setQueryOptions(q *QueryOptions) { + if q == nil { + return + } + if q.Datacenter != "" { + r.params.Set("dc", q.Datacenter) + } + if q.AllowStale { + r.params.Set("stale", "") + } + if q.RequireConsistent { + r.params.Set("consistent", "") + } + if q.WaitIndex != 0 { + r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10)) + } + if q.WaitTime != 0 { + r.params.Set("wait", durToMsec(q.WaitTime)) + } + if q.Token != "" { + r.header.Set("X-Consul-Token", q.Token) + } + if q.Near != "" { + r.params.Set("near", q.Near) + } + if len(q.NodeMeta) > 0 { + for key, value := range q.NodeMeta { + r.params.Add("node-meta", key+":"+value) + } + } + if q.RelayFactor != 0 { + r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) + } + r.ctx = q.ctx +} + +// durToMsec converts a duration to a millisecond specified string. If the +// user selected a positive value that rounds to 0 ms, then we will use 1 ms +// so they get a short delay, otherwise Consul will translate the 0 ms into +// a huge default delay. +func durToMsec(dur time.Duration) string { + ms := dur / time.Millisecond + if dur > 0 && ms == 0 { + ms = 1 + } + return fmt.Sprintf("%dms", ms) +} + +// serverError is a string we look for to detect 500 errors. +const serverError = "Unexpected response code: 500" + +// IsRetryableError returns true for 500 errors from the Consul servers, and +// network connection errors. These are usually retryable at a later time. +// This applies to reads but NOT to writes. This may return true for errors +// on writes that may have still gone through, so do not use this to retry +// any write operations. +func IsRetryableError(err error) bool { + if err == nil { + return false + } + + if _, ok := err.(net.Error); ok { + return true + } + + // TODO (slackpad) - Make a real error type here instead of using + // a string check. + return strings.Contains(err.Error(), serverError) +} + +// setWriteOptions is used to annotate the request with +// additional write options +func (r *request) setWriteOptions(q *WriteOptions) { + if q == nil { + return + } + if q.Datacenter != "" { + r.params.Set("dc", q.Datacenter) + } + if q.Token != "" { + r.header.Set("X-Consul-Token", q.Token) + } + if q.RelayFactor != 0 { + r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) + } + r.ctx = q.ctx +} + +// toHTTP converts the request to an HTTP request +func (r *request) toHTTP() (*http.Request, error) { + // Encode the query parameters + r.url.RawQuery = r.params.Encode() + + // Check if we should encode the body + if r.body == nil && r.obj != nil { + b, err := encodeBody(r.obj) + if err != nil { + return nil, err + } + r.body = b + } + + // Create the HTTP request + req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body) + if err != nil { + return nil, err + } + + req.URL.Host = r.url.Host + req.URL.Scheme = r.url.Scheme + req.Host = r.url.Host + req.Header = r.header + + // Setup auth + if r.config.HttpAuth != nil { + req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password) + } + if r.ctx != nil { + return req.WithContext(r.ctx), nil + } else { + return req, nil + } +} + +// newRequest is used to create a new request +func (c *Client) newRequest(method, path string) *request { + r := &request{ + config: &c.config, + method: method, + url: &url.URL{ + Scheme: c.config.Scheme, + Host: c.config.Address, + Path: path, + }, + params: make(map[string][]string), + header: make(http.Header), + } + if c.config.Datacenter != "" { + r.params.Set("dc", c.config.Datacenter) + } + if c.config.WaitTime != 0 { + r.params.Set("wait", durToMsec(r.config.WaitTime)) + } + if c.config.Token != "" { + r.header.Set("X-Consul-Token", r.config.Token) + } + return r +} + +// doRequest runs a request with our client +func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) { + req, err := r.toHTTP() + if err != nil { + return 0, nil, err + } + start := time.Now() + resp, err := c.config.HttpClient.Do(req) + diff := time.Since(start) + return diff, resp, err +} + +// Query is used to do a GET request against an endpoint +// and deserialize the response into an interface using +// standard Consul conventions. +func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { + r := c.newRequest("GET", endpoint) + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if err := decodeBody(resp, out); err != nil { + return nil, err + } + return qm, nil +} + +// write is used to do a PUT request against an endpoint +// and serialize/deserialized using the standard Consul conventions. +func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { + r := c.newRequest("PUT", endpoint) + r.setWriteOptions(q) + r.obj = in + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + if out != nil { + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + } else if _, err := ioutil.ReadAll(resp.Body); err != nil { + return nil, err + } + return wm, nil +} + +// parseQueryMeta is used to help parse query meta-data +func parseQueryMeta(resp *http.Response, q *QueryMeta) error { + header := resp.Header + + // Parse the X-Consul-Index + index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-Index: %v", err) + } + q.LastIndex = index + + // Parse the X-Consul-LastContact + last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err) + } + q.LastContact = time.Duration(last) * time.Millisecond + + // Parse the X-Consul-KnownLeader + switch header.Get("X-Consul-KnownLeader") { + case "true": + q.KnownLeader = true + default: + q.KnownLeader = false + } + + // Parse X-Consul-Translate-Addresses + switch header.Get("X-Consul-Translate-Addresses") { + case "true": + q.AddressTranslationEnabled = true + default: + q.AddressTranslationEnabled = false + } + + return nil +} + +// decodeBody is used to JSON decode a body +func decodeBody(resp *http.Response, out interface{}) error { + dec := json.NewDecoder(resp.Body) + return dec.Decode(out) +} + +// encodeBody is used to encode a request body +func encodeBody(obj interface{}) (io.Reader, error) { + buf := bytes.NewBuffer(nil) + enc := json.NewEncoder(buf) + if err := enc.Encode(obj); err != nil { + return nil, err + } + return buf, nil +} + +// requireOK is used to wrap doRequest and check for a 200 +func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { + if e != nil { + if resp != nil { + resp.Body.Close() + } + return d, nil, e + } + if resp.StatusCode != 200 { + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + resp.Body.Close() + return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) + } + return d, resp, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go new file mode 100644 index 00000000000..babfc9a1df4 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/catalog.go @@ -0,0 +1,198 @@ +package api + +type Node struct { + ID string + Node string + Address string + Datacenter string + TaggedAddresses map[string]string + Meta map[string]string + CreateIndex uint64 + ModifyIndex uint64 +} + +type CatalogService struct { + ID string + Node string + Address string + Datacenter string + TaggedAddresses map[string]string + NodeMeta map[string]string + ServiceID string + ServiceName string + ServiceAddress string + ServiceTags []string + ServicePort int + ServiceEnableTagOverride bool + CreateIndex uint64 + ModifyIndex uint64 +} + +type CatalogNode struct { + Node *Node + Services map[string]*AgentService +} + +type CatalogRegistration struct { + ID string + Node string + Address string + TaggedAddresses map[string]string + NodeMeta map[string]string + Datacenter string + Service *AgentService + Check *AgentCheck +} + +type CatalogDeregistration struct { + Node string + Address string // Obsolete. + Datacenter string + ServiceID string + CheckID string +} + +// Catalog can be used to query the Catalog endpoints +type Catalog struct { + c *Client +} + +// Catalog returns a handle to the catalog endpoints +func (c *Client) Catalog() *Catalog { + return &Catalog{c} +} + +func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/catalog/register") + r.setWriteOptions(q) + r.obj = reg + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/catalog/deregister") + r.setWriteOptions(q) + r.obj = dereg + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +// Datacenters is used to query for all the known datacenters +func (c *Catalog) Datacenters() ([]string, error) { + r := c.c.newRequest("GET", "/v1/catalog/datacenters") + _, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []string + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Nodes is used to query all the known nodes +func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/nodes") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*Node + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Services is used to query for all known services +func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/services") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out map[string][]string + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Service is used to query catalog entries for a given service +func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/service/"+service) + r.setQueryOptions(q) + if tag != "" { + r.params.Set("tag", tag) + } + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CatalogService + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Node is used to query for service information about a single node +func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out *CatalogNode + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/coordinate.go b/vendor/github.com/hashicorp/consul/api/coordinate.go new file mode 100644 index 00000000000..90214e392ce --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/coordinate.go @@ -0,0 +1,68 @@ +package api + +import ( + "github.com/hashicorp/serf/coordinate" +) + +// CoordinateEntry represents a node and its associated network coordinate. +type CoordinateEntry struct { + Node string + Segment string + Coord *coordinate.Coordinate +} + +// CoordinateDatacenterMap has the coordinates for servers in a given datacenter +// and area. Network coordinates are only compatible within the same area. +type CoordinateDatacenterMap struct { + Datacenter string + AreaID string + Coordinates []CoordinateEntry +} + +// Coordinate can be used to query the coordinate endpoints +type Coordinate struct { + c *Client +} + +// Coordinate returns a handle to the coordinate endpoints +func (c *Client) Coordinate() *Coordinate { + return &Coordinate{c} +} + +// Datacenters is used to return the coordinates of all the servers in the WAN +// pool. +func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) { + r := c.c.newRequest("GET", "/v1/coordinate/datacenters") + _, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*CoordinateDatacenterMap + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Nodes is used to return the coordinates of all the nodes in the LAN pool. +func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/coordinate/nodes") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CoordinateEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/event.go b/vendor/github.com/hashicorp/consul/api/event.go new file mode 100644 index 00000000000..85b5b069b03 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/event.go @@ -0,0 +1,104 @@ +package api + +import ( + "bytes" + "strconv" +) + +// Event can be used to query the Event endpoints +type Event struct { + c *Client +} + +// UserEvent represents an event that was fired by the user +type UserEvent struct { + ID string + Name string + Payload []byte + NodeFilter string + ServiceFilter string + TagFilter string + Version int + LTime uint64 +} + +// Event returns a handle to the event endpoints +func (c *Client) Event() *Event { + return &Event{c} +} + +// Fire is used to fire a new user event. Only the Name, Payload and Filters +// are respected. This returns the ID or an associated error. Cross DC requests +// are supported. +func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) { + r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name) + r.setWriteOptions(q) + if params.NodeFilter != "" { + r.params.Set("node", params.NodeFilter) + } + if params.ServiceFilter != "" { + r.params.Set("service", params.ServiceFilter) + } + if params.TagFilter != "" { + r.params.Set("tag", params.TagFilter) + } + if params.Payload != nil { + r.body = bytes.NewReader(params.Payload) + } + + rtt, resp, err := requireOK(e.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out UserEvent + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// List is used to get the most recent events an agent has received. +// This list can be optionally filtered by the name. This endpoint supports +// quasi-blocking queries. The index is not monotonic, nor does it provide provide +// LastContact or KnownLeader. +func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) { + r := e.c.newRequest("GET", "/v1/event/list") + r.setQueryOptions(q) + if name != "" { + r.params.Set("name", name) + } + rtt, resp, err := requireOK(e.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*UserEvent + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// IDToIndex is a bit of a hack. This simulates the index generation to +// convert an event ID into a WaitIndex. +func (e *Event) IDToIndex(uuid string) uint64 { + lower := uuid[0:8] + uuid[9:13] + uuid[14:18] + upper := uuid[19:23] + uuid[24:36] + lowVal, err := strconv.ParseUint(lower, 16, 64) + if err != nil { + panic("Failed to convert " + lower) + } + highVal, err := strconv.ParseUint(upper, 16, 64) + if err != nil { + panic("Failed to convert " + upper) + } + return lowVal ^ highVal +} diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go new file mode 100644 index 00000000000..38c105fdb93 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/health.go @@ -0,0 +1,200 @@ +package api + +import ( + "fmt" + "strings" +) + +const ( + // HealthAny is special, and is used as a wild card, + // not as a specific state. + HealthAny = "any" + HealthPassing = "passing" + HealthWarning = "warning" + HealthCritical = "critical" + HealthMaint = "maintenance" +) + +const ( + // NodeMaint is the special key set by a node in maintenance mode. + NodeMaint = "_node_maintenance" + + // ServiceMaintPrefix is the prefix for a service in maintenance mode. + ServiceMaintPrefix = "_service_maintenance:" +) + +// HealthCheck is used to represent a single check +type HealthCheck struct { + Node string + CheckID string + Name string + Status string + Notes string + Output string + ServiceID string + ServiceName string + ServiceTags []string +} + +// HealthChecks is a collection of HealthCheck structs. +type HealthChecks []*HealthCheck + +// AggregatedStatus returns the "best" status for the list of health checks. +// Because a given entry may have many service and node-level health checks +// attached, this function determines the best representative of the status as +// as single string using the following heuristic: +// +// maintenance > critical > warning > passing +// +func (c HealthChecks) AggregatedStatus() string { + var passing, warning, critical, maintenance bool + for _, check := range c { + id := string(check.CheckID) + if id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) { + maintenance = true + continue + } + + switch check.Status { + case HealthPassing: + passing = true + case HealthWarning: + warning = true + case HealthCritical: + critical = true + default: + return "" + } + } + + switch { + case maintenance: + return HealthMaint + case critical: + return HealthCritical + case warning: + return HealthWarning + case passing: + return HealthPassing + default: + return HealthPassing + } +} + +// ServiceEntry is used for the health service endpoint +type ServiceEntry struct { + Node *Node + Service *AgentService + Checks HealthChecks +} + +// Health can be used to query the Health endpoints +type Health struct { + c *Client +} + +// Health returns a handle to the health endpoints +func (c *Client) Health() *Health { + return &Health{c} +} + +// Node is used to query for checks belonging to a given node +func (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out HealthChecks + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Checks is used to return the checks associated with a service +func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/checks/"+service) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out HealthChecks + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Service is used to query health information along with service info +// for a given service. It can optionally do server-side filtering on a tag +// or nodes with passing health checks only. +func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/service/"+service) + r.setQueryOptions(q) + if tag != "" { + r.params.Set("tag", tag) + } + if passingOnly { + r.params.Set(HealthPassing, "1") + } + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*ServiceEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// State is used to retrieve all the checks in a given state. +// The wildcard "any" state can also be used for all checks. +func (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { + switch state { + case HealthAny: + case HealthWarning: + case HealthCritical: + case HealthPassing: + default: + return nil, nil, fmt.Errorf("Unsupported state: %v", state) + } + r := h.c.newRequest("GET", "/v1/health/state/"+state) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out HealthChecks + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go new file mode 100644 index 00000000000..97f51568559 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/kv.go @@ -0,0 +1,420 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "net/http" + "strconv" + "strings" +) + +// KVPair is used to represent a single K/V entry +type KVPair struct { + // Key is the name of the key. It is also part of the URL path when accessed + // via the API. + Key string + + // CreateIndex holds the index corresponding the creation of this KVPair. This + // is a read-only field. + CreateIndex uint64 + + // ModifyIndex is used for the Check-And-Set operations and can also be fed + // back into the WaitIndex of the QueryOptions in order to perform blocking + // queries. + ModifyIndex uint64 + + // LockIndex holds the index corresponding to a lock on this key, if any. This + // is a read-only field. + LockIndex uint64 + + // Flags are any user-defined flags on the key. It is up to the implementer + // to check these values, since Consul does not treat them specially. + Flags uint64 + + // Value is the value for the key. This can be any value, but it will be + // base64 encoded upon transport. + Value []byte + + // Session is a string representing the ID of the session. Any other + // interactions with this key over the same session must specify the same + // session ID. + Session string +} + +// KVPairs is a list of KVPair objects +type KVPairs []*KVPair + +// KVOp constants give possible operations available in a KVTxn. +type KVOp string + +const ( + KVSet KVOp = "set" + KVDelete KVOp = "delete" + KVDeleteCAS KVOp = "delete-cas" + KVDeleteTree KVOp = "delete-tree" + KVCAS KVOp = "cas" + KVLock KVOp = "lock" + KVUnlock KVOp = "unlock" + KVGet KVOp = "get" + KVGetTree KVOp = "get-tree" + KVCheckSession KVOp = "check-session" + KVCheckIndex KVOp = "check-index" + KVCheckNotExists KVOp = "check-not-exists" +) + +// KVTxnOp defines a single operation inside a transaction. +type KVTxnOp struct { + Verb KVOp + Key string + Value []byte + Flags uint64 + Index uint64 + Session string +} + +// KVTxnOps defines a set of operations to be performed inside a single +// transaction. +type KVTxnOps []*KVTxnOp + +// KVTxnResponse has the outcome of a transaction. +type KVTxnResponse struct { + Results []*KVPair + Errors TxnErrors +} + +// KV is used to manipulate the K/V API +type KV struct { + c *Client +} + +// KV is used to return a handle to the K/V apis +func (c *Client) KV() *KV { + return &KV{c} +} + +// Get is used to lookup a single key. The returned pointer +// to the KVPair will be nil if the key does not exist. +func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) { + resp, qm, err := k.getInternal(key, nil, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []*KVPair + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List is used to lookup all keys under a prefix +func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) { + resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []*KVPair + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// Keys is used to list all the keys under a prefix. Optionally, +// a separator can be used to limit the responses. +func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) { + params := map[string]string{"keys": ""} + if separator != "" { + params["separator"] = separator + } + resp, qm, err := k.getInternal(prefix, params, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []string + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) { + r := k.c.newRequest("GET", "/v1/kv/"+strings.TrimPrefix(key, "/")) + r.setQueryOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + rtt, resp, err := k.c.doRequest(r) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == 404 { + resp.Body.Close() + return nil, qm, nil + } else if resp.StatusCode != 200 { + resp.Body.Close() + return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) + } + return resp, qm, nil +} + +// Put is used to write a new value. Only the +// Key, Flags and Value is respected. +func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) { + params := make(map[string]string, 1) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + _, wm, err := k.put(p.Key, params, p.Value, q) + return wm, err +} + +// CAS is used for a Check-And-Set operation. The Key, +// ModifyIndex, Flags and Value are respected. Returns true +// on success or false on failures. +func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["cas"] = strconv.FormatUint(p.ModifyIndex, 10) + return k.put(p.Key, params, p.Value, q) +} + +// Acquire is used for a lock acquisition operation. The Key, +// Flags, Value and Session are respected. Returns true +// on success or false on failures. +func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["acquire"] = p.Session + return k.put(p.Key, params, p.Value, q) +} + +// Release is used for a lock release operation. The Key, +// Flags, Value and Session are respected. Returns true +// on success or false on failures. +func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["release"] = p.Session + return k.put(p.Key, params, p.Value, q) +} + +func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) { + if len(key) > 0 && key[0] == '/' { + return false, nil, fmt.Errorf("Invalid key. Key must not begin with a '/': %s", key) + } + + r := k.c.newRequest("PUT", "/v1/kv/"+key) + r.setWriteOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + r.body = bytes.NewReader(body) + rtt, resp, err := requireOK(k.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(buf.String(), "true") + return res, qm, nil +} + +// Delete is used to delete a single key +func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) { + _, qm, err := k.deleteInternal(key, nil, w) + return qm, err +} + +// DeleteCAS is used for a Delete Check-And-Set operation. The Key +// and ModifyIndex are respected. Returns true on success or false on failures. +func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := map[string]string{ + "cas": strconv.FormatUint(p.ModifyIndex, 10), + } + return k.deleteInternal(p.Key, params, q) +} + +// DeleteTree is used to delete all keys under a prefix +func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) { + _, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w) + return qm, err +} + +func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) { + r := k.c.newRequest("DELETE", "/v1/kv/"+strings.TrimPrefix(key, "/")) + r.setWriteOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + rtt, resp, err := requireOK(k.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(buf.String(), "true") + return res, qm, nil +} + +// TxnOp is the internal format we send to Consul. It's not specific to KV, +// though currently only KV operations are supported. +type TxnOp struct { + KV *KVTxnOp +} + +// TxnOps is a list of transaction operations. +type TxnOps []*TxnOp + +// TxnResult is the internal format we receive from Consul. +type TxnResult struct { + KV *KVPair +} + +// TxnResults is a list of TxnResult objects. +type TxnResults []*TxnResult + +// TxnError is used to return information about an operation in a transaction. +type TxnError struct { + OpIndex int + What string +} + +// TxnErrors is a list of TxnError objects. +type TxnErrors []*TxnError + +// TxnResponse is the internal format we receive from Consul. +type TxnResponse struct { + Results TxnResults + Errors TxnErrors +} + +// Txn is used to apply multiple KV operations in a single, atomic transaction. +// +// Note that Go will perform the required base64 encoding on the values +// automatically because the type is a byte slice. Transactions are defined as a +// list of operations to perform, using the KVOp constants and KVTxnOp structure +// to define operations. If any operation fails, none of the changes are applied +// to the state store. Note that this hides the internal raw transaction interface +// and munges the input and output types into KV-specific ones for ease of use. +// If there are more non-KV operations in the future we may break out a new +// transaction API client, but it will be easy to keep this KV-specific variant +// supported. +// +// Even though this is generally a write operation, we take a QueryOptions input +// and return a QueryMeta output. If the transaction contains only read ops, then +// Consul will fast-path it to a different endpoint internally which supports +// consistency controls, but not blocking. If there are write operations then +// the request will always be routed through raft and any consistency settings +// will be ignored. +// +// Here's an example: +// +// ops := KVTxnOps{ +// &KVTxnOp{ +// Verb: KVLock, +// Key: "test/lock", +// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e", +// Value: []byte("hello"), +// }, +// &KVTxnOp{ +// Verb: KVGet, +// Key: "another/key", +// }, +// } +// ok, response, _, err := kv.Txn(&ops, nil) +// +// If there is a problem making the transaction request then an error will be +// returned. Otherwise, the ok value will be true if the transaction succeeded +// or false if it was rolled back. The response is a structured return value which +// will have the outcome of the transaction. Its Results member will have entries +// for each operation. Deleted keys will have a nil entry in the, and to save +// space, the Value of each key in the Results will be nil unless the operation +// is a KVGet. If the transaction was rolled back, the Errors member will have +// entries referencing the index of the operation that failed along with an error +// message. +func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) { + r := k.c.newRequest("PUT", "/v1/txn") + r.setQueryOptions(q) + + // Convert into the internal format since this is an all-KV txn. + ops := make(TxnOps, 0, len(txn)) + for _, kvOp := range txn { + ops = append(ops, &TxnOp{KV: kvOp}) + } + r.obj = ops + rtt, resp, err := k.c.doRequest(r) + if err != nil { + return false, nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict { + var txnResp TxnResponse + if err := decodeBody(resp, &txnResp); err != nil { + return false, nil, nil, err + } + + // Convert from the internal format. + kvResp := KVTxnResponse{ + Errors: txnResp.Errors, + } + for _, result := range txnResp.Results { + kvResp.Results = append(kvResp.Results, result.KV) + } + return resp.StatusCode == http.StatusOK, &kvResp, qm, nil + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, nil, fmt.Errorf("Failed to read response: %v", err) + } + return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String()) +} diff --git a/vendor/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go new file mode 100644 index 00000000000..41f72e7d23a --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/lock.go @@ -0,0 +1,385 @@ +package api + +import ( + "fmt" + "sync" + "time" +) + +const ( + // DefaultLockSessionName is the Session Name we assign if none is provided + DefaultLockSessionName = "Consul API Lock" + + // DefaultLockSessionTTL is the default session TTL if no Session is provided + // when creating a new Lock. This is used because we do not have another + // other check to depend upon. + DefaultLockSessionTTL = "15s" + + // DefaultLockWaitTime is how long we block for at a time to check if lock + // acquisition is possible. This affects the minimum time it takes to cancel + // a Lock acquisition. + DefaultLockWaitTime = 15 * time.Second + + // DefaultLockRetryTime is how long we wait after a failed lock acquisition + // before attempting to do the lock again. This is so that once a lock-delay + // is in effect, we do not hot loop retrying the acquisition. + DefaultLockRetryTime = 5 * time.Second + + // DefaultMonitorRetryTime is how long we wait after a failed monitor check + // of a lock (500 response code). This allows the monitor to ride out brief + // periods of unavailability, subject to the MonitorRetries setting in the + // lock options which is by default set to 0, disabling this feature. This + // affects locks and semaphores. + DefaultMonitorRetryTime = 2 * time.Second + + // LockFlagValue is a magic flag we set to indicate a key + // is being used for a lock. It is used to detect a potential + // conflict with a semaphore. + LockFlagValue = 0x2ddccbc058a50c18 +) + +var ( + // ErrLockHeld is returned if we attempt to double lock + ErrLockHeld = fmt.Errorf("Lock already held") + + // ErrLockNotHeld is returned if we attempt to unlock a lock + // that we do not hold. + ErrLockNotHeld = fmt.Errorf("Lock not held") + + // ErrLockInUse is returned if we attempt to destroy a lock + // that is in use. + ErrLockInUse = fmt.Errorf("Lock in use") + + // ErrLockConflict is returned if the flags on a key + // used for a lock do not match expectation + ErrLockConflict = fmt.Errorf("Existing key does not match lock use") +) + +// Lock is used to implement client-side leader election. It is follows the +// algorithm as described here: https://www.consul.io/docs/guides/leader-election.html. +type Lock struct { + c *Client + opts *LockOptions + + isHeld bool + sessionRenew chan struct{} + lockSession string + l sync.Mutex +} + +// LockOptions is used to parameterize the Lock behavior. +type LockOptions struct { + Key string // Must be set and have write permissions + Value []byte // Optional, value to associate with the lock + Session string // Optional, created if not specified + SessionOpts *SessionEntry // Optional, options to use when creating a session + SessionName string // Optional, defaults to DefaultLockSessionName (ignored if SessionOpts is given) + SessionTTL string // Optional, defaults to DefaultLockSessionTTL (ignored if SessionOpts is given) + MonitorRetries int // Optional, defaults to 0 which means no retries + MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime + LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime + LockTryOnce bool // Optional, defaults to false which means try forever +} + +// LockKey returns a handle to a lock struct which can be used +// to acquire and release the mutex. The key used must have +// write permissions. +func (c *Client) LockKey(key string) (*Lock, error) { + opts := &LockOptions{ + Key: key, + } + return c.LockOpts(opts) +} + +// LockOpts returns a handle to a lock struct which can be used +// to acquire and release the mutex. The key used must have +// write permissions. +func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) { + if opts.Key == "" { + return nil, fmt.Errorf("missing key") + } + if opts.SessionName == "" { + opts.SessionName = DefaultLockSessionName + } + if opts.SessionTTL == "" { + opts.SessionTTL = DefaultLockSessionTTL + } else { + if _, err := time.ParseDuration(opts.SessionTTL); err != nil { + return nil, fmt.Errorf("invalid SessionTTL: %v", err) + } + } + if opts.MonitorRetryTime == 0 { + opts.MonitorRetryTime = DefaultMonitorRetryTime + } + if opts.LockWaitTime == 0 { + opts.LockWaitTime = DefaultLockWaitTime + } + l := &Lock{ + c: c, + opts: opts, + } + return l, nil +} + +// Lock attempts to acquire the lock and blocks while doing so. +// Providing a non-nil stopCh can be used to abort the lock attempt. +// Returns a channel that is closed if our lock is lost or an error. +// This channel could be closed at any time due to session invalidation, +// communication errors, operator intervention, etc. It is NOT safe to +// assume that the lock is held until Unlock() unless the Session is specifically +// created without any associated health checks. By default Consul sessions +// prefer liveness over safety and an application must be able to handle +// the lock being lost. +func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + // Hold the lock as we try to acquire + l.l.Lock() + defer l.l.Unlock() + + // Check if we already hold the lock + if l.isHeld { + return nil, ErrLockHeld + } + + // Check if we need to create a session first + l.lockSession = l.opts.Session + if l.lockSession == "" { + s, err := l.createSession() + if err != nil { + return nil, fmt.Errorf("failed to create session: %v", err) + } + + l.sessionRenew = make(chan struct{}) + l.lockSession = s + session := l.c.Session() + go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew) + + // If we fail to acquire the lock, cleanup the session + defer func() { + if !l.isHeld { + close(l.sessionRenew) + l.sessionRenew = nil + } + }() + } + + // Setup the query options + kv := l.c.KV() + qOpts := &QueryOptions{ + WaitTime: l.opts.LockWaitTime, + } + + start := time.Now() + attempts := 0 +WAIT: + // Check if we should quit + select { + case <-stopCh: + return nil, nil + default: + } + + // Handle the one-shot mode. + if l.opts.LockTryOnce && attempts > 0 { + elapsed := time.Since(start) + if elapsed > qOpts.WaitTime { + return nil, nil + } + + qOpts.WaitTime -= elapsed + } + attempts++ + + // Look for an existing lock, blocking until not taken + pair, meta, err := kv.Get(l.opts.Key, qOpts) + if err != nil { + return nil, fmt.Errorf("failed to read lock: %v", err) + } + if pair != nil && pair.Flags != LockFlagValue { + return nil, ErrLockConflict + } + locked := false + if pair != nil && pair.Session == l.lockSession { + goto HELD + } + if pair != nil && pair.Session != "" { + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } + + // Try to acquire the lock + pair = l.lockEntry(l.lockSession) + locked, _, err = kv.Acquire(pair, nil) + if err != nil { + return nil, fmt.Errorf("failed to acquire lock: %v", err) + } + + // Handle the case of not getting the lock + if !locked { + // Determine why the lock failed + qOpts.WaitIndex = 0 + pair, meta, err = kv.Get(l.opts.Key, qOpts) + if pair != nil && pair.Session != "" { + //If the session is not null, this means that a wait can safely happen + //using a long poll + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } else { + // If the session is empty and the lock failed to acquire, then it means + // a lock-delay is in effect and a timed wait must be used + select { + case <-time.After(DefaultLockRetryTime): + goto WAIT + case <-stopCh: + return nil, nil + } + } + } + +HELD: + // Watch to ensure we maintain leadership + leaderCh := make(chan struct{}) + go l.monitorLock(l.lockSession, leaderCh) + + // Set that we own the lock + l.isHeld = true + + // Locked! All done + return leaderCh, nil +} + +// Unlock released the lock. It is an error to call this +// if the lock is not currently held. +func (l *Lock) Unlock() error { + // Hold the lock as we try to release + l.l.Lock() + defer l.l.Unlock() + + // Ensure the lock is actually held + if !l.isHeld { + return ErrLockNotHeld + } + + // Set that we no longer own the lock + l.isHeld = false + + // Stop the session renew + if l.sessionRenew != nil { + defer func() { + close(l.sessionRenew) + l.sessionRenew = nil + }() + } + + // Get the lock entry, and clear the lock session + lockEnt := l.lockEntry(l.lockSession) + l.lockSession = "" + + // Release the lock explicitly + kv := l.c.KV() + _, _, err := kv.Release(lockEnt, nil) + if err != nil { + return fmt.Errorf("failed to release lock: %v", err) + } + return nil +} + +// Destroy is used to cleanup the lock entry. It is not necessary +// to invoke. It will fail if the lock is in use. +func (l *Lock) Destroy() error { + // Hold the lock as we try to release + l.l.Lock() + defer l.l.Unlock() + + // Check if we already hold the lock + if l.isHeld { + return ErrLockHeld + } + + // Look for an existing lock + kv := l.c.KV() + pair, _, err := kv.Get(l.opts.Key, nil) + if err != nil { + return fmt.Errorf("failed to read lock: %v", err) + } + + // Nothing to do if the lock does not exist + if pair == nil { + return nil + } + + // Check for possible flag conflict + if pair.Flags != LockFlagValue { + return ErrLockConflict + } + + // Check if it is in use + if pair.Session != "" { + return ErrLockInUse + } + + // Attempt the delete + didRemove, _, err := kv.DeleteCAS(pair, nil) + if err != nil { + return fmt.Errorf("failed to remove lock: %v", err) + } + if !didRemove { + return ErrLockInUse + } + return nil +} + +// createSession is used to create a new managed session +func (l *Lock) createSession() (string, error) { + session := l.c.Session() + se := l.opts.SessionOpts + if se == nil { + se = &SessionEntry{ + Name: l.opts.SessionName, + TTL: l.opts.SessionTTL, + } + } + id, _, err := session.Create(se, nil) + if err != nil { + return "", err + } + return id, nil +} + +// lockEntry returns a formatted KVPair for the lock +func (l *Lock) lockEntry(session string) *KVPair { + return &KVPair{ + Key: l.opts.Key, + Value: l.opts.Value, + Session: session, + Flags: LockFlagValue, + } +} + +// monitorLock is a long running routine to monitor a lock ownership +// It closes the stopCh if we lose our leadership. +func (l *Lock) monitorLock(session string, stopCh chan struct{}) { + defer close(stopCh) + kv := l.c.KV() + opts := &QueryOptions{RequireConsistent: true} +WAIT: + retries := l.opts.MonitorRetries +RETRY: + pair, meta, err := kv.Get(l.opts.Key, opts) + if err != nil { + // If configured we can try to ride out a brief Consul unavailability + // by doing retries. Note that we have to attempt the retry in a non- + // blocking fashion so that we have a clean place to reset the retry + // counter if service is restored. + if retries > 0 && IsRetryableError(err) { + time.Sleep(l.opts.MonitorRetryTime) + retries-- + opts.WaitIndex = 0 + goto RETRY + } + return + } + if pair != nil && pair.Session == session { + opts.WaitIndex = meta.LastIndex + goto WAIT + } +} diff --git a/vendor/github.com/hashicorp/consul/api/operator.go b/vendor/github.com/hashicorp/consul/api/operator.go new file mode 100644 index 00000000000..079e2248663 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator.go @@ -0,0 +1,11 @@ +package api + +// Operator can be used to perform low-level operator tasks for Consul. +type Operator struct { + c *Client +} + +// Operator returns a handle to the operator endpoints. +func (c *Client) Operator() *Operator { + return &Operator{c} +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_area.go b/vendor/github.com/hashicorp/consul/api/operator_area.go new file mode 100644 index 00000000000..a630b694cd5 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_area.go @@ -0,0 +1,193 @@ +// The /v1/operator/area endpoints are available only in Consul Enterprise and +// interact with its network area subsystem. Network areas are used to link +// together Consul servers in different Consul datacenters. With network areas, +// Consul datacenters can be linked together in ways other than a fully-connected +// mesh, as is required for Consul's WAN. +package api + +import ( + "net" + "time" +) + +// Area defines a network area. +type Area struct { + // ID is this identifier for an area (a UUID). This must be left empty + // when creating a new area. + ID string + + // PeerDatacenter is the peer Consul datacenter that will make up the + // other side of this network area. Network areas always involve a pair + // of datacenters: the datacenter where the area was created, and the + // peer datacenter. This is required. + PeerDatacenter string + + // RetryJoin specifies the address of Consul servers to join to, such as + // an IPs or hostnames with an optional port number. This is optional. + RetryJoin []string + + // UseTLS specifies whether gossip over this area should be encrypted with TLS + // if possible. + UseTLS bool +} + +// AreaJoinResponse is returned when a join occurs and gives the result for each +// address. +type AreaJoinResponse struct { + // The address that was joined. + Address string + + // Whether or not the join was a success. + Joined bool + + // If we couldn't join, this is the message with information. + Error string +} + +// SerfMember is a generic structure for reporting information about members in +// a Serf cluster. This is only used by the area endpoints right now, but this +// could be expanded to other endpoints in the future. +type SerfMember struct { + // ID is the node identifier (a UUID). + ID string + + // Name is the node name. + Name string + + // Addr has the IP address. + Addr net.IP + + // Port is the RPC port. + Port uint16 + + // Datacenter is the DC name. + Datacenter string + + // Role is "client", "server", or "unknown". + Role string + + // Build has the version of the Consul agent. + Build string + + // Protocol is the protocol of the Consul agent. + Protocol int + + // Status is the Serf health status "none", "alive", "leaving", "left", + // or "failed". + Status string + + // RTT is the estimated round trip time from the server handling the + // request to the this member. This will be negative if no RTT estimate + // is available. + RTT time.Duration +} + +// AreaCreate will create a new network area. The ID in the given structure must +// be empty and a generated ID will be returned on success. +func (op *Operator) AreaCreate(area *Area, q *WriteOptions) (string, *WriteMeta, error) { + r := op.c.newRequest("POST", "/v1/operator/area") + r.setWriteOptions(q) + r.obj = area + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// AreaUpdate will update the configuration of the network area with the given ID. +func (op *Operator) AreaUpdate(areaID string, area *Area, q *WriteOptions) (string, *WriteMeta, error) { + r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID) + r.setWriteOptions(q) + r.obj = area + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// AreaGet returns a single network area. +func (op *Operator) AreaGet(areaID string, q *QueryOptions) ([]*Area, *QueryMeta, error) { + var out []*Area + qm, err := op.c.query("/v1/operator/area/"+areaID, &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// AreaList returns all the available network areas. +func (op *Operator) AreaList(q *QueryOptions) ([]*Area, *QueryMeta, error) { + var out []*Area + qm, err := op.c.query("/v1/operator/area", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// AreaDelete deletes the given network area. +func (op *Operator) AreaDelete(areaID string, q *WriteOptions) (*WriteMeta, error) { + r := op.c.newRequest("DELETE", "/v1/operator/area/"+areaID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} + +// AreaJoin attempts to join the given set of join addresses to the given +// network area. See the Area structure for details about join addresses. +func (op *Operator) AreaJoin(areaID string, addresses []string, q *WriteOptions) ([]*AreaJoinResponse, *WriteMeta, error) { + r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID+"/join") + r.setWriteOptions(q) + r.obj = addresses + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out []*AreaJoinResponse + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, wm, nil +} + +// AreaMembers lists the Serf information about the members in the given area. +func (op *Operator) AreaMembers(areaID string, q *QueryOptions) ([]*SerfMember, *QueryMeta, error) { + var out []*SerfMember + qm, err := op.c.query("/v1/operator/area/"+areaID+"/members", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go new file mode 100644 index 00000000000..b179406dc12 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go @@ -0,0 +1,219 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +// AutopilotConfiguration is used for querying/setting the Autopilot configuration. +// Autopilot helps manage operator tasks related to Consul servers like removing +// failed servers from the Raft quorum. +type AutopilotConfiguration struct { + // CleanupDeadServers controls whether to remove dead servers from the Raft + // peer list when a new server joins + CleanupDeadServers bool + + // LastContactThreshold is the limit on the amount of time a server can go + // without leader contact before being considered unhealthy. + LastContactThreshold *ReadableDuration + + // MaxTrailingLogs is the amount of entries in the Raft Log that a server can + // be behind before being considered unhealthy. + MaxTrailingLogs uint64 + + // ServerStabilizationTime is the minimum amount of time a server must be + // in a stable, healthy state before it can be added to the cluster. Only + // applicable with Raft protocol version 3 or higher. + ServerStabilizationTime *ReadableDuration + + // (Enterprise-only) RedundancyZoneTag is the node tag to use for separating + // servers into zones for redundancy. If left blank, this feature will be disabled. + RedundancyZoneTag string + + // (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration + // strategy of waiting until enough newer-versioned servers have been added to the + // cluster before promoting them to voters. + DisableUpgradeMigration bool + + // (Enterprise-only) UpgradeVersionTag is the node tag to use for version info when + // performing upgrade migrations. If left blank, the Consul version will be used. + UpgradeVersionTag string + + // CreateIndex holds the index corresponding the creation of this configuration. + // This is a read-only field. + CreateIndex uint64 + + // ModifyIndex will be set to the index of the last update when retrieving the + // Autopilot configuration. Resubmitting a configuration with + // AutopilotCASConfiguration will perform a check-and-set operation which ensures + // there hasn't been a subsequent update since the configuration was retrieved. + ModifyIndex uint64 +} + +// ServerHealth is the health (from the leader's point of view) of a server. +type ServerHealth struct { + // ID is the raft ID of the server. + ID string + + // Name is the node name of the server. + Name string + + // Address is the address of the server. + Address string + + // The status of the SerfHealth check for the server. + SerfStatus string + + // Version is the Consul version of the server. + Version string + + // Leader is whether this server is currently the leader. + Leader bool + + // LastContact is the time since this node's last contact with the leader. + LastContact *ReadableDuration + + // LastTerm is the highest leader term this server has a record of in its Raft log. + LastTerm uint64 + + // LastIndex is the last log index this server has a record of in its Raft log. + LastIndex uint64 + + // Healthy is whether or not the server is healthy according to the current + // Autopilot config. + Healthy bool + + // Voter is whether this is a voting server. + Voter bool + + // StableSince is the last time this server's Healthy value changed. + StableSince time.Time +} + +// OperatorHealthReply is a representation of the overall health of the cluster +type OperatorHealthReply struct { + // Healthy is true if all the servers in the cluster are healthy. + Healthy bool + + // FailureTolerance is the number of healthy servers that could be lost without + // an outage occurring. + FailureTolerance int + + // Servers holds the health of each server. + Servers []ServerHealth +} + +// ReadableDuration is a duration type that is serialized to JSON in human readable format. +type ReadableDuration time.Duration + +func NewReadableDuration(dur time.Duration) *ReadableDuration { + d := ReadableDuration(dur) + return &d +} + +func (d *ReadableDuration) String() string { + return d.Duration().String() +} + +func (d *ReadableDuration) Duration() time.Duration { + if d == nil { + return time.Duration(0) + } + return time.Duration(*d) +} + +func (d *ReadableDuration) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, d.Duration().String())), nil +} + +func (d *ReadableDuration) UnmarshalJSON(raw []byte) error { + if d == nil { + return fmt.Errorf("cannot unmarshal to nil pointer") + } + + str := string(raw) + if len(str) < 2 || str[0] != '"' || str[len(str)-1] != '"' { + return fmt.Errorf("must be enclosed with quotes: %s", str) + } + dur, err := time.ParseDuration(str[1 : len(str)-1]) + if err != nil { + return err + } + *d = ReadableDuration(dur) + return nil +} + +// AutopilotGetConfiguration is used to query the current Autopilot configuration. +func (op *Operator) AutopilotGetConfiguration(q *QueryOptions) (*AutopilotConfiguration, error) { + r := op.c.newRequest("GET", "/v1/operator/autopilot/configuration") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out AutopilotConfiguration + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + + return &out, nil +} + +// AutopilotSetConfiguration is used to set the current Autopilot configuration. +func (op *Operator) AutopilotSetConfiguration(conf *AutopilotConfiguration, q *WriteOptions) error { + r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") + r.setWriteOptions(q) + r.obj = conf + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// AutopilotCASConfiguration is used to perform a Check-And-Set update on the +// Autopilot configuration. The ModifyIndex value will be respected. Returns +// true on success or false on failures. +func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *WriteOptions) (bool, error) { + r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") + r.setWriteOptions(q) + r.params.Set("cas", strconv.FormatUint(conf.ModifyIndex, 10)) + r.obj = conf + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return false, err + } + defer resp.Body.Close() + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(buf.String(), "true") + + return res, nil +} + +// AutopilotServerHealth +func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) { + r := op.c.newRequest("GET", "/v1/operator/autopilot/health") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out OperatorHealthReply + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return &out, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_keyring.go b/vendor/github.com/hashicorp/consul/api/operator_keyring.go new file mode 100644 index 00000000000..6b614296cea --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_keyring.go @@ -0,0 +1,86 @@ +package api + +// keyringRequest is used for performing Keyring operations +type keyringRequest struct { + Key string +} + +// KeyringResponse is returned when listing the gossip encryption keys +type KeyringResponse struct { + // Whether this response is for a WAN ring + WAN bool + + // The datacenter name this request corresponds to + Datacenter string + + // Segment has the network segment this request corresponds to. + Segment string + + // A map of the encryption keys to the number of nodes they're installed on + Keys map[string]int + + // The total number of nodes in this ring + NumNodes int +} + +// KeyringInstall is used to install a new gossip encryption key into the cluster +func (op *Operator) KeyringInstall(key string, q *WriteOptions) error { + r := op.c.newRequest("POST", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// KeyringList is used to list the gossip keys installed in the cluster +func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) { + r := op.c.newRequest("GET", "/v1/operator/keyring") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*KeyringResponse + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// KeyringRemove is used to remove a gossip encryption key from the cluster +func (op *Operator) KeyringRemove(key string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// KeyringUse is used to change the active gossip encryption key +func (op *Operator) KeyringUse(key string, q *WriteOptions) error { + r := op.c.newRequest("PUT", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft.go b/vendor/github.com/hashicorp/consul/api/operator_raft.go new file mode 100644 index 00000000000..a9844df2dd3 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_raft.go @@ -0,0 +1,89 @@ +package api + +// RaftServer has information about a server in the Raft configuration. +type RaftServer struct { + // ID is the unique ID for the server. These are currently the same + // as the address, but they will be changed to a real GUID in a future + // release of Consul. + ID string + + // Node is the node name of the server, as known by Consul, or this + // will be set to "(unknown)" otherwise. + Node string + + // Address is the IP:port of the server, used for Raft communications. + Address string + + // Leader is true if this server is the current cluster leader. + Leader bool + + // Protocol version is the raft protocol version used by the server + ProtocolVersion string + + // Voter is true if this server has a vote in the cluster. This might + // be false if the server is staging and still coming online, or if + // it's a non-voting server, which will be added in a future release of + // Consul. + Voter bool +} + +// RaftConfiguration is returned when querying for the current Raft configuration. +type RaftConfiguration struct { + // Servers has the list of servers in the Raft configuration. + Servers []*RaftServer + + // Index has the Raft index of this configuration. + Index uint64 +} + +// RaftGetConfiguration is used to query the current Raft peer set. +func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) { + r := op.c.newRequest("GET", "/v1/operator/raft/configuration") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out RaftConfiguration + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return &out, nil +} + +// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft +// quorum but no longer known to Serf or the catalog) by address in the form of +// "IP:port". +func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") + r.setWriteOptions(q) + + r.params.Set("address", string(address)) + + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + + resp.Body.Close() + return nil +} + +// RaftRemovePeerByID is used to kick a stale peer (one that it in the Raft +// quorum but no longer known to Serf or the catalog) by ID. +func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") + r.setWriteOptions(q) + + r.params.Set("id", string(id)) + + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_segment.go b/vendor/github.com/hashicorp/consul/api/operator_segment.go new file mode 100644 index 00000000000..92b05d3c03b --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_segment.go @@ -0,0 +1,11 @@ +package api + +// SegmentList returns all the available LAN segments. +func (op *Operator) SegmentList(q *QueryOptions) ([]string, *QueryMeta, error) { + var out []string + qm, err := op.c.query("/v1/operator/segment", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go new file mode 100644 index 00000000000..ff210de3f00 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/prepared_query.go @@ -0,0 +1,198 @@ +package api + +// QueryDatacenterOptions sets options about how we fail over if there are no +// healthy nodes in the local datacenter. +type QueryDatacenterOptions struct { + // NearestN is set to the number of remote datacenters to try, based on + // network coordinates. + NearestN int + + // Datacenters is a fixed list of datacenters to try after NearestN. We + // never try a datacenter multiple times, so those are subtracted from + // this list before proceeding. + Datacenters []string +} + +// QueryDNSOptions controls settings when query results are served over DNS. +type QueryDNSOptions struct { + // TTL is the time to live for the served DNS results. + TTL string +} + +// ServiceQuery is used to query for a set of healthy nodes offering a specific +// service. +type ServiceQuery struct { + // Service is the service to query. + Service string + + // Near allows baking in the name of a node to automatically distance- + // sort from. The magic "_agent" value is supported, which sorts near + // the agent which initiated the request by default. + Near string + + // Failover controls what we do if there are no healthy nodes in the + // local datacenter. + Failover QueryDatacenterOptions + + // If OnlyPassing is true then we will only include nodes with passing + // health checks (critical AND warning checks will cause a node to be + // discarded) + OnlyPassing bool + + // Tags are a set of required and/or disallowed tags. If a tag is in + // this list it must be present. If the tag is preceded with "!" then + // it is disallowed. + Tags []string + + // NodeMeta is a map of required node metadata fields. If a key/value + // pair is in this map it must be present on the node in order for the + // service entry to be returned. + NodeMeta map[string]string +} + +// QueryTemplate carries the arguments for creating a templated query. +type QueryTemplate struct { + // Type specifies the type of the query template. Currently only + // "name_prefix_match" is supported. This field is required. + Type string + + // Regexp allows specifying a regex pattern to match against the name + // of the query being executed. + Regexp string +} + +// PrepatedQueryDefinition defines a complete prepared query. +type PreparedQueryDefinition struct { + // ID is this UUID-based ID for the query, always generated by Consul. + ID string + + // Name is an optional friendly name for the query supplied by the + // user. NOTE - if this feature is used then it will reduce the security + // of any read ACL associated with this query/service since this name + // can be used to locate nodes with supplying any ACL. + Name string + + // Session is an optional session to tie this query's lifetime to. If + // this is omitted then the query will not expire. + Session string + + // Token is the ACL token used when the query was created, and it is + // used when a query is subsequently executed. This token, or a token + // with management privileges, must be used to change the query later. + Token string + + // Service defines a service query (leaving things open for other types + // later). + Service ServiceQuery + + // DNS has options that control how the results of this query are + // served over DNS. + DNS QueryDNSOptions + + // Template is used to pass through the arguments for creating a + // prepared query with an attached template. If a template is given, + // interpolations are possible in other struct fields. + Template QueryTemplate +} + +// PreparedQueryExecuteResponse has the results of executing a query. +type PreparedQueryExecuteResponse struct { + // Service is the service that was queried. + Service string + + // Nodes has the nodes that were output by the query. + Nodes []ServiceEntry + + // DNS has the options for serving these results over DNS. + DNS QueryDNSOptions + + // Datacenter is the datacenter that these results came from. + Datacenter string + + // Failovers is a count of how many times we had to query a remote + // datacenter. + Failovers int +} + +// PreparedQuery can be used to query the prepared query endpoints. +type PreparedQuery struct { + c *Client +} + +// PreparedQuery returns a handle to the prepared query endpoints. +func (c *Client) PreparedQuery() *PreparedQuery { + return &PreparedQuery{c} +} + +// Create makes a new prepared query. The ID of the new query is returned. +func (c *PreparedQuery) Create(query *PreparedQueryDefinition, q *WriteOptions) (string, *WriteMeta, error) { + r := c.c.newRequest("POST", "/v1/query") + r.setWriteOptions(q) + r.obj = query + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Update makes updates to an existing prepared query. +func (c *PreparedQuery) Update(query *PreparedQueryDefinition, q *WriteOptions) (*WriteMeta, error) { + return c.c.write("/v1/query/"+query.ID, query, nil, q) +} + +// List is used to fetch all the prepared queries (always requires a management +// token). +func (c *PreparedQuery) List(q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { + var out []*PreparedQueryDefinition + qm, err := c.c.query("/v1/query", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Get is used to fetch a specific prepared query. +func (c *PreparedQuery) Get(queryID string, q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { + var out []*PreparedQueryDefinition + qm, err := c.c.query("/v1/query/"+queryID, &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Delete is used to delete a specific prepared query. +func (c *PreparedQuery) Delete(queryID string, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("DELETE", "/v1/query/"+queryID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} + +// Execute is used to execute a specific prepared query. You can execute using +// a query ID or name. +func (c *PreparedQuery) Execute(queryIDOrName string, q *QueryOptions) (*PreparedQueryExecuteResponse, *QueryMeta, error) { + var out *PreparedQueryExecuteResponse + qm, err := c.c.query("/v1/query/"+queryIDOrName+"/execute", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/raw.go b/vendor/github.com/hashicorp/consul/api/raw.go new file mode 100644 index 00000000000..745a208c99d --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/raw.go @@ -0,0 +1,24 @@ +package api + +// Raw can be used to do raw queries against custom endpoints +type Raw struct { + c *Client +} + +// Raw returns a handle to query endpoints +func (c *Client) Raw() *Raw { + return &Raw{c} +} + +// Query is used to do a GET request against an endpoint +// and deserialize the response into an interface using +// standard Consul conventions. +func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { + return raw.c.query(endpoint, out, q) +} + +// Write is used to do a PUT request against an endpoint +// and serialize/deserialized using the standard Consul conventions. +func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { + return raw.c.write(endpoint, in, out, q) +} diff --git a/vendor/github.com/hashicorp/consul/api/semaphore.go b/vendor/github.com/hashicorp/consul/api/semaphore.go new file mode 100644 index 00000000000..d0c57417788 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/semaphore.go @@ -0,0 +1,513 @@ +package api + +import ( + "encoding/json" + "fmt" + "path" + "sync" + "time" +) + +const ( + // DefaultSemaphoreSessionName is the Session Name we assign if none is provided + DefaultSemaphoreSessionName = "Consul API Semaphore" + + // DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided + // when creating a new Semaphore. This is used because we do not have another + // other check to depend upon. + DefaultSemaphoreSessionTTL = "15s" + + // DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore + // acquisition is possible. This affects the minimum time it takes to cancel + // a Semaphore acquisition. + DefaultSemaphoreWaitTime = 15 * time.Second + + // DefaultSemaphoreKey is the key used within the prefix to + // use for coordination between all the contenders. + DefaultSemaphoreKey = ".lock" + + // SemaphoreFlagValue is a magic flag we set to indicate a key + // is being used for a semaphore. It is used to detect a potential + // conflict with a lock. + SemaphoreFlagValue = 0xe0f69a2baa414de0 +) + +var ( + // ErrSemaphoreHeld is returned if we attempt to double lock + ErrSemaphoreHeld = fmt.Errorf("Semaphore already held") + + // ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore + // that we do not hold. + ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held") + + // ErrSemaphoreInUse is returned if we attempt to destroy a semaphore + // that is in use. + ErrSemaphoreInUse = fmt.Errorf("Semaphore in use") + + // ErrSemaphoreConflict is returned if the flags on a key + // used for a semaphore do not match expectation + ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use") +) + +// Semaphore is used to implement a distributed semaphore +// using the Consul KV primitives. +type Semaphore struct { + c *Client + opts *SemaphoreOptions + + isHeld bool + sessionRenew chan struct{} + lockSession string + l sync.Mutex +} + +// SemaphoreOptions is used to parameterize the Semaphore +type SemaphoreOptions struct { + Prefix string // Must be set and have write permissions + Limit int // Must be set, and be positive + Value []byte // Optional, value to associate with the contender entry + Session string // Optional, created if not specified + SessionName string // Optional, defaults to DefaultLockSessionName + SessionTTL string // Optional, defaults to DefaultLockSessionTTL + MonitorRetries int // Optional, defaults to 0 which means no retries + MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime + SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime + SemaphoreTryOnce bool // Optional, defaults to false which means try forever +} + +// semaphoreLock is written under the DefaultSemaphoreKey and +// is used to coordinate between all the contenders. +type semaphoreLock struct { + // Limit is the integer limit of holders. This is used to + // verify that all the holders agree on the value. + Limit int + + // Holders is a list of all the semaphore holders. + // It maps the session ID to true. It is used as a set effectively. + Holders map[string]bool +} + +// SemaphorePrefix is used to created a Semaphore which will operate +// at the given KV prefix and uses the given limit for the semaphore. +// The prefix must have write privileges, and the limit must be agreed +// upon by all contenders. +func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) { + opts := &SemaphoreOptions{ + Prefix: prefix, + Limit: limit, + } + return c.SemaphoreOpts(opts) +} + +// SemaphoreOpts is used to create a Semaphore with the given options. +// The prefix must have write privileges, and the limit must be agreed +// upon by all contenders. If a Session is not provided, one will be created. +func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) { + if opts.Prefix == "" { + return nil, fmt.Errorf("missing prefix") + } + if opts.Limit <= 0 { + return nil, fmt.Errorf("semaphore limit must be positive") + } + if opts.SessionName == "" { + opts.SessionName = DefaultSemaphoreSessionName + } + if opts.SessionTTL == "" { + opts.SessionTTL = DefaultSemaphoreSessionTTL + } else { + if _, err := time.ParseDuration(opts.SessionTTL); err != nil { + return nil, fmt.Errorf("invalid SessionTTL: %v", err) + } + } + if opts.MonitorRetryTime == 0 { + opts.MonitorRetryTime = DefaultMonitorRetryTime + } + if opts.SemaphoreWaitTime == 0 { + opts.SemaphoreWaitTime = DefaultSemaphoreWaitTime + } + s := &Semaphore{ + c: c, + opts: opts, + } + return s, nil +} + +// Acquire attempts to reserve a slot in the semaphore, blocking until +// success, interrupted via the stopCh or an error is encountered. +// Providing a non-nil stopCh can be used to abort the attempt. +// On success, a channel is returned that represents our slot. +// This channel could be closed at any time due to session invalidation, +// communication errors, operator intervention, etc. It is NOT safe to +// assume that the slot is held until Release() unless the Session is specifically +// created without any associated health checks. By default Consul sessions +// prefer liveness over safety and an application must be able to handle +// the session being lost. +func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) { + // Hold the lock as we try to acquire + s.l.Lock() + defer s.l.Unlock() + + // Check if we already hold the semaphore + if s.isHeld { + return nil, ErrSemaphoreHeld + } + + // Check if we need to create a session first + s.lockSession = s.opts.Session + if s.lockSession == "" { + sess, err := s.createSession() + if err != nil { + return nil, fmt.Errorf("failed to create session: %v", err) + } + + s.sessionRenew = make(chan struct{}) + s.lockSession = sess + session := s.c.Session() + go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew) + + // If we fail to acquire the lock, cleanup the session + defer func() { + if !s.isHeld { + close(s.sessionRenew) + s.sessionRenew = nil + } + }() + } + + // Create the contender entry + kv := s.c.KV() + made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil) + if err != nil || !made { + return nil, fmt.Errorf("failed to make contender entry: %v", err) + } + + // Setup the query options + qOpts := &QueryOptions{ + WaitTime: s.opts.SemaphoreWaitTime, + } + + start := time.Now() + attempts := 0 +WAIT: + // Check if we should quit + select { + case <-stopCh: + return nil, nil + default: + } + + // Handle the one-shot mode. + if s.opts.SemaphoreTryOnce && attempts > 0 { + elapsed := time.Since(start) + if elapsed > qOpts.WaitTime { + return nil, nil + } + + qOpts.WaitTime -= elapsed + } + attempts++ + + // Read the prefix + pairs, meta, err := kv.List(s.opts.Prefix, qOpts) + if err != nil { + return nil, fmt.Errorf("failed to read prefix: %v", err) + } + + // Decode the lock + lockPair := s.findLock(pairs) + if lockPair.Flags != SemaphoreFlagValue { + return nil, ErrSemaphoreConflict + } + lock, err := s.decodeLock(lockPair) + if err != nil { + return nil, err + } + + // Verify we agree with the limit + if lock.Limit != s.opts.Limit { + return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)", + lock.Limit, s.opts.Limit) + } + + // Prune the dead holders + s.pruneDeadHolders(lock, pairs) + + // Check if the lock is held + if len(lock.Holders) >= lock.Limit { + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } + + // Create a new lock with us as a holder + lock.Holders[s.lockSession] = true + newLock, err := s.encodeLock(lock, lockPair.ModifyIndex) + if err != nil { + return nil, err + } + + // Attempt the acquisition + didSet, _, err := kv.CAS(newLock, nil) + if err != nil { + return nil, fmt.Errorf("failed to update lock: %v", err) + } + if !didSet { + // Update failed, could have been a race with another contender, + // retry the operation + goto WAIT + } + + // Watch to ensure we maintain ownership of the slot + lockCh := make(chan struct{}) + go s.monitorLock(s.lockSession, lockCh) + + // Set that we own the lock + s.isHeld = true + + // Acquired! All done + return lockCh, nil +} + +// Release is used to voluntarily give up our semaphore slot. It is +// an error to call this if the semaphore has not been acquired. +func (s *Semaphore) Release() error { + // Hold the lock as we try to release + s.l.Lock() + defer s.l.Unlock() + + // Ensure the lock is actually held + if !s.isHeld { + return ErrSemaphoreNotHeld + } + + // Set that we no longer own the lock + s.isHeld = false + + // Stop the session renew + if s.sessionRenew != nil { + defer func() { + close(s.sessionRenew) + s.sessionRenew = nil + }() + } + + // Get and clear the lock session + lockSession := s.lockSession + s.lockSession = "" + + // Remove ourselves as a lock holder + kv := s.c.KV() + key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) +READ: + pair, _, err := kv.Get(key, nil) + if err != nil { + return err + } + if pair == nil { + pair = &KVPair{} + } + lock, err := s.decodeLock(pair) + if err != nil { + return err + } + + // Create a new lock without us as a holder + if _, ok := lock.Holders[lockSession]; ok { + delete(lock.Holders, lockSession) + newLock, err := s.encodeLock(lock, pair.ModifyIndex) + if err != nil { + return err + } + + // Swap the locks + didSet, _, err := kv.CAS(newLock, nil) + if err != nil { + return fmt.Errorf("failed to update lock: %v", err) + } + if !didSet { + goto READ + } + } + + // Destroy the contender entry + contenderKey := path.Join(s.opts.Prefix, lockSession) + if _, err := kv.Delete(contenderKey, nil); err != nil { + return err + } + return nil +} + +// Destroy is used to cleanup the semaphore entry. It is not necessary +// to invoke. It will fail if the semaphore is in use. +func (s *Semaphore) Destroy() error { + // Hold the lock as we try to acquire + s.l.Lock() + defer s.l.Unlock() + + // Check if we already hold the semaphore + if s.isHeld { + return ErrSemaphoreHeld + } + + // List for the semaphore + kv := s.c.KV() + pairs, _, err := kv.List(s.opts.Prefix, nil) + if err != nil { + return fmt.Errorf("failed to read prefix: %v", err) + } + + // Find the lock pair, bail if it doesn't exist + lockPair := s.findLock(pairs) + if lockPair.ModifyIndex == 0 { + return nil + } + if lockPair.Flags != SemaphoreFlagValue { + return ErrSemaphoreConflict + } + + // Decode the lock + lock, err := s.decodeLock(lockPair) + if err != nil { + return err + } + + // Prune the dead holders + s.pruneDeadHolders(lock, pairs) + + // Check if there are any holders + if len(lock.Holders) > 0 { + return ErrSemaphoreInUse + } + + // Attempt the delete + didRemove, _, err := kv.DeleteCAS(lockPair, nil) + if err != nil { + return fmt.Errorf("failed to remove semaphore: %v", err) + } + if !didRemove { + return ErrSemaphoreInUse + } + return nil +} + +// createSession is used to create a new managed session +func (s *Semaphore) createSession() (string, error) { + session := s.c.Session() + se := &SessionEntry{ + Name: s.opts.SessionName, + TTL: s.opts.SessionTTL, + Behavior: SessionBehaviorDelete, + } + id, _, err := session.Create(se, nil) + if err != nil { + return "", err + } + return id, nil +} + +// contenderEntry returns a formatted KVPair for the contender +func (s *Semaphore) contenderEntry(session string) *KVPair { + return &KVPair{ + Key: path.Join(s.opts.Prefix, session), + Value: s.opts.Value, + Session: session, + Flags: SemaphoreFlagValue, + } +} + +// findLock is used to find the KV Pair which is used for coordination +func (s *Semaphore) findLock(pairs KVPairs) *KVPair { + key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) + for _, pair := range pairs { + if pair.Key == key { + return pair + } + } + return &KVPair{Flags: SemaphoreFlagValue} +} + +// decodeLock is used to decode a semaphoreLock from an +// entry in Consul +func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) { + // Handle if there is no lock + if pair == nil || pair.Value == nil { + return &semaphoreLock{ + Limit: s.opts.Limit, + Holders: make(map[string]bool), + }, nil + } + + l := &semaphoreLock{} + if err := json.Unmarshal(pair.Value, l); err != nil { + return nil, fmt.Errorf("lock decoding failed: %v", err) + } + return l, nil +} + +// encodeLock is used to encode a semaphoreLock into a KVPair +// that can be PUT +func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) { + enc, err := json.Marshal(l) + if err != nil { + return nil, fmt.Errorf("lock encoding failed: %v", err) + } + pair := &KVPair{ + Key: path.Join(s.opts.Prefix, DefaultSemaphoreKey), + Value: enc, + Flags: SemaphoreFlagValue, + ModifyIndex: oldIndex, + } + return pair, nil +} + +// pruneDeadHolders is used to remove all the dead lock holders +func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) { + // Gather all the live holders + alive := make(map[string]struct{}, len(pairs)) + for _, pair := range pairs { + if pair.Session != "" { + alive[pair.Session] = struct{}{} + } + } + + // Remove any holders that are dead + for holder := range lock.Holders { + if _, ok := alive[holder]; !ok { + delete(lock.Holders, holder) + } + } +} + +// monitorLock is a long running routine to monitor a semaphore ownership +// It closes the stopCh if we lose our slot. +func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) { + defer close(stopCh) + kv := s.c.KV() + opts := &QueryOptions{RequireConsistent: true} +WAIT: + retries := s.opts.MonitorRetries +RETRY: + pairs, meta, err := kv.List(s.opts.Prefix, opts) + if err != nil { + // If configured we can try to ride out a brief Consul unavailability + // by doing retries. Note that we have to attempt the retry in a non- + // blocking fashion so that we have a clean place to reset the retry + // counter if service is restored. + if retries > 0 && IsRetryableError(err) { + time.Sleep(s.opts.MonitorRetryTime) + retries-- + opts.WaitIndex = 0 + goto RETRY + } + return + } + lockPair := s.findLock(pairs) + lock, err := s.decodeLock(lockPair) + if err != nil { + return + } + s.pruneDeadHolders(lock, pairs) + if _, ok := lock.Holders[session]; ok { + opts.WaitIndex = meta.LastIndex + goto WAIT + } +} diff --git a/vendor/github.com/hashicorp/consul/api/session.go b/vendor/github.com/hashicorp/consul/api/session.go new file mode 100644 index 00000000000..1613f11a60c --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/session.go @@ -0,0 +1,224 @@ +package api + +import ( + "errors" + "fmt" + "time" +) + +const ( + // SessionBehaviorRelease is the default behavior and causes + // all associated locks to be released on session invalidation. + SessionBehaviorRelease = "release" + + // SessionBehaviorDelete is new in Consul 0.5 and changes the + // behavior to delete all associated locks on session invalidation. + // It can be used in a way similar to Ephemeral Nodes in ZooKeeper. + SessionBehaviorDelete = "delete" +) + +var ErrSessionExpired = errors.New("session expired") + +// SessionEntry represents a session in consul +type SessionEntry struct { + CreateIndex uint64 + ID string + Name string + Node string + Checks []string + LockDelay time.Duration + Behavior string + TTL string +} + +// Session can be used to query the Session endpoints +type Session struct { + c *Client +} + +// Session returns a handle to the session endpoints +func (c *Client) Session() *Session { + return &Session{c} +} + +// CreateNoChecks is like Create but is used specifically to create +// a session with no associated health checks. +func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { + body := make(map[string]interface{}) + body["Checks"] = []string{} + if se != nil { + if se.Name != "" { + body["Name"] = se.Name + } + if se.Node != "" { + body["Node"] = se.Node + } + if se.LockDelay != 0 { + body["LockDelay"] = durToMsec(se.LockDelay) + } + if se.Behavior != "" { + body["Behavior"] = se.Behavior + } + if se.TTL != "" { + body["TTL"] = se.TTL + } + } + return s.create(body, q) + +} + +// Create makes a new session. Providing a session entry can +// customize the session. It can also be nil to use defaults. +func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { + var obj interface{} + if se != nil { + body := make(map[string]interface{}) + obj = body + if se.Name != "" { + body["Name"] = se.Name + } + if se.Node != "" { + body["Node"] = se.Node + } + if se.LockDelay != 0 { + body["LockDelay"] = durToMsec(se.LockDelay) + } + if len(se.Checks) > 0 { + body["Checks"] = se.Checks + } + if se.Behavior != "" { + body["Behavior"] = se.Behavior + } + if se.TTL != "" { + body["TTL"] = se.TTL + } + } + return s.create(obj, q) +} + +func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) { + var out struct{ ID string } + wm, err := s.c.write("/v1/session/create", obj, &out, q) + if err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Destroy invalidates a given session +func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { + wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Renew renews the TTL on a given session +func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) { + r := s.c.newRequest("PUT", "/v1/session/renew/"+id) + r.setWriteOptions(q) + rtt, resp, err := s.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + + if resp.StatusCode == 404 { + return nil, wm, nil + } else if resp.StatusCode != 200 { + return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) + } + + var entries []*SessionEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, fmt.Errorf("Failed to read response: %v", err) + } + if len(entries) > 0 { + return entries[0], wm, nil + } + return nil, wm, nil +} + +// RenewPeriodic is used to periodically invoke Session.Renew on a +// session until a doneCh is closed. This is meant to be used in a long running +// goroutine to ensure a session stays valid. +func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh <-chan struct{}) error { + ctx := q.Context() + + ttl, err := time.ParseDuration(initialTTL) + if err != nil { + return err + } + + waitDur := ttl / 2 + lastRenewTime := time.Now() + var lastErr error + for { + if time.Since(lastRenewTime) > ttl { + return lastErr + } + select { + case <-time.After(waitDur): + entry, _, err := s.Renew(id, q) + if err != nil { + waitDur = time.Second + lastErr = err + continue + } + if entry == nil { + return ErrSessionExpired + } + + // Handle the server updating the TTL + ttl, _ = time.ParseDuration(entry.TTL) + waitDur = ttl / 2 + lastRenewTime = time.Now() + + case <-doneCh: + // Attempt a session destroy + s.Destroy(id, q) + return nil + + case <-ctx.Done(): + // Bail immediately since attempting the destroy would + // use the canceled context in q, which would just bail. + return ctx.Err() + } + } +} + +// Info looks up a single session +func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/info/"+id, &entries, q) + if err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List gets sessions for a node +func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/node/"+node, &entries, q) + if err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// List gets all active sessions +func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/list", &entries, q) + if err != nil { + return nil, nil, err + } + return entries, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/snapshot.go b/vendor/github.com/hashicorp/consul/api/snapshot.go new file mode 100644 index 00000000000..e902377dd5c --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/snapshot.go @@ -0,0 +1,47 @@ +package api + +import ( + "io" +) + +// Snapshot can be used to query the /v1/snapshot endpoint to take snapshots of +// Consul's internal state and restore snapshots for disaster recovery. +type Snapshot struct { + c *Client +} + +// Snapshot returns a handle that exposes the snapshot endpoints. +func (c *Client) Snapshot() *Snapshot { + return &Snapshot{c} +} + +// Save requests a new snapshot and provides an io.ReadCloser with the snapshot +// data to save. If this doesn't return an error, then it's the responsibility +// of the caller to close it. Only a subset of the QueryOptions are supported: +// Datacenter, AllowStale, and Token. +func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) { + r := s.c.newRequest("GET", "/v1/snapshot") + r.setQueryOptions(q) + + rtt, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + return resp.Body, qm, nil +} + +// Restore streams in an existing snapshot and attempts to restore it. +func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error { + r := s.c.newRequest("PUT", "/v1/snapshot") + r.body = in + r.setWriteOptions(q) + _, _, err := requireOK(s.c.doRequest(r)) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/status.go b/vendor/github.com/hashicorp/consul/api/status.go new file mode 100644 index 00000000000..74ef61a678f --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/status.go @@ -0,0 +1,43 @@ +package api + +// Status can be used to query the Status endpoints +type Status struct { + c *Client +} + +// Status returns a handle to the status endpoints +func (c *Client) Status() *Status { + return &Status{c} +} + +// Leader is used to query for a known leader +func (s *Status) Leader() (string, error) { + r := s.c.newRequest("GET", "/v1/status/leader") + _, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return "", err + } + defer resp.Body.Close() + + var leader string + if err := decodeBody(resp, &leader); err != nil { + return "", err + } + return leader, nil +} + +// Peers is used to query for a known raft peers +func (s *Status) Peers() ([]string, error) { + r := s.c.newRequest("GET", "/v1/status/peers") + _, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var peers []string + if err := decodeBody(resp, &peers); err != nil { + return nil, err + } + return peers, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/README.md b/vendor/github.com/hashicorp/go-getter/README.md index 4a0b6a625d1..a9eafd61f06 100644 --- a/vendor/github.com/hashicorp/go-getter/README.md +++ b/vendor/github.com/hashicorp/go-getter/README.md @@ -21,8 +21,7 @@ URLs. For example: "github.com/hashicorp/go-getter" would turn into a Git URL. Or "./foo" would turn into a file URL. These are extensible. This library is used by [Terraform](https://terraform.io) for -downloading modules, [Otto](https://ottoproject.io) for dependencies and -Appfile imports, and [Nomad](https://nomadproject.io) for downloading +downloading modules and [Nomad](https://nomadproject.io) for downloading binaries. ## Installation and Usage @@ -119,6 +118,37 @@ The protocol-specific options are documented below the URL format section. But because they are part of the URL, we point it out here so you know they exist. +### Subdirectories + +If you want to download only a specific subdirectory from a downloaded +directory, you can specify a subdirectory after a double-slash `//`. +go-getter will first download the URL specified _before_ the double-slash +(as if you didn't specify a double-slash), but will then copy the +path after the double slash into the target directory. + +For example, if you're downloading this GitHub repository, but you only +want to download the `test-fixtures` directory, you can do the following: + +``` +https://github.com/hashicorp/go-getter.git//test-fixtures +``` + +If you downloaded this to the `/tmp` directory, then the file +`/tmp/archive.gz` would exist. Notice that this file is in the `test-fixtures` +directory in this repository, but because we specified a subdirectory, +go-getter automatically copied only that directory contents. + +Subdirectory paths may contain may also use filesystem glob patterns. +The path must match _exactly one_ entry or go-getter will return an error. +This is useful if you're not sure the exact directory name but it follows +a predictable naming structure. + +For example, the following URL would also work: + +``` +https://github.com/hashicorp/go-getter.git//test-* +``` + ### Checksumming For file downloads of any protocol, go-getter can automatically verify @@ -154,9 +184,11 @@ The following archive formats are supported: * `tar.gz` and `tgz` * `tar.bz2` and `tbz2` + * `tar.xz` and `txz` * `zip` * `gz` * `bz2` + * `xz` For example, an example URL is shown below: @@ -222,13 +254,17 @@ None ### HTTP (`http`) -None +#### Basic Authentication + +To use HTTP basic authentication with go-getter, simply prepend `username:password@` to the +hostname in the URL such as `https://Aladdin:OpenSesame@www.example.com/index.html`. All special +characters, including the username and password, must be URL encoded. ### S3 (`s3`) S3 takes various access configurations in the URL. Note that it will also -read these from standard AWS environment variables if they're set. If -the query parameters are present, these take priority. +read these from standard AWS environment variables if they're set. S3 compliant servers like Minio +are also supported. If the query parameters are present, these take priority. * `aws_access_key_id` - AWS access key. * `aws_access_key_secret` - AWS access key secret. @@ -240,6 +276,14 @@ If you use go-getter and want to use an EC2 IAM Instance Profile to avoid using credentials, then just omit these and the profile, if available will be used automatically. +### Using S3 with Minio + If you use go-gitter for Minio support, you must consider the following: + + * `aws_access_key_id` (required) - Minio access key. + * `aws_access_key_secret` (required) - Minio access key secret. + * `region` (optional - defaults to us-east-1) - Region identifier to use. + * `version` (optional - fefaults to Minio default) - Configuration file format. + #### S3 Bucket Examples S3 has several addressing schemes used to reference your bucket. These are @@ -250,4 +294,5 @@ Some examples for these addressing schemes: - s3::https://s3-eu-west-1.amazonaws.com/bucket/foo - bucket.s3.amazonaws.com/foo - bucket.s3-eu-west-1.amazonaws.com/foo/bar +- "s3::http://127.0.0.1:9000/test-bucket/hello.txt?aws_access_key_id=KEYID&aws_access_key_secret=SECRETKEY®ion=us-east-2" diff --git a/vendor/github.com/hashicorp/go-getter/appveyor.yml b/vendor/github.com/hashicorp/go-getter/appveyor.yml index 159dad4dc25..ec48d45ec3d 100644 --- a/vendor/github.com/hashicorp/go-getter/appveyor.yml +++ b/vendor/github.com/hashicorp/go-getter/appveyor.yml @@ -1,5 +1,5 @@ version: "build-{branch}-{build}" -image: Visual Studio 2015 +image: Visual Studio 2017 clone_folder: c:\gopath\github.com\hashicorp\go-getter environment: GOPATH: c:\gopath diff --git a/vendor/github.com/hashicorp/go-getter/client.go b/vendor/github.com/hashicorp/go-getter/client.go index 876812a0a26..b67bb641c36 100644 --- a/vendor/github.com/hashicorp/go-getter/client.go +++ b/vendor/github.com/hashicorp/go-getter/client.go @@ -305,7 +305,13 @@ func (c *Client) Get() error { return err } - return copyDir(realDst, filepath.Join(dst, subDir), false) + // Process any globs + subDir, err := SubdirGlob(dst, subDir) + if err != nil { + return err + } + + return copyDir(realDst, subDir, false) } return nil diff --git a/vendor/github.com/hashicorp/go-getter/decompress.go b/vendor/github.com/hashicorp/go-getter/decompress.go index d18174cc3d3..fc5681d39fa 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress.go +++ b/vendor/github.com/hashicorp/go-getter/decompress.go @@ -16,14 +16,18 @@ var Decompressors map[string]Decompressor func init() { tbzDecompressor := new(TarBzip2Decompressor) tgzDecompressor := new(TarGzipDecompressor) + txzDecompressor := new(TarXzDecompressor) Decompressors = map[string]Decompressor{ "bz2": new(Bzip2Decompressor), "gz": new(GzipDecompressor), + "xz": new(XzDecompressor), "tar.bz2": tbzDecompressor, "tar.gz": tgzDecompressor, + "tar.xz": txzDecompressor, "tbz2": tbzDecompressor, "tgz": tgzDecompressor, + "txz": txzDecompressor, "zip": new(ZipDecompressor), } } diff --git a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go index 20010540eaf..5ebf709b4f9 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go @@ -9,7 +9,7 @@ import ( ) // GzipDecompressor is an implementation of Decompressor that can -// decompress bz2 files. +// decompress gzip files. type GzipDecompressor struct{} func (d *GzipDecompressor) Decompress(dst, src string, dir bool) error { diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tar.go b/vendor/github.com/hashicorp/go-getter/decompress_tar.go new file mode 100644 index 00000000000..543c30d21f3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_tar.go @@ -0,0 +1,112 @@ +package getter + +import ( + "archive/tar" + "fmt" + "io" + "os" + "path/filepath" +) + +// untar is a shared helper for untarring an archive. The reader should provide +// an uncompressed view of the tar archive. +func untar(input io.Reader, dst, src string, dir bool) error { + tarR := tar.NewReader(input) + done := false + for { + hdr, err := tarR.Next() + if err == io.EOF { + if !done { + // Empty archive + return fmt.Errorf("empty archive: %s", src) + } + + return nil + } + if err != nil { + return err + } + + if hdr.Typeflag == tar.TypeXGlobalHeader || hdr.Typeflag == tar.TypeXHeader { + // don't unpack extended headers as files + continue + } + + path := dst + if dir { + path = filepath.Join(path, hdr.Name) + } + + if hdr.FileInfo().IsDir() { + if !dir { + return fmt.Errorf("expected a single file: %s", src) + } + + // A directory, just make the directory and continue unarchiving... + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + + continue + } else { + // There is no ordering guarantee that a file in a directory is + // listed before the directory + dstPath := filepath.Dir(path) + + // Check that the directory exists, otherwise create it + if _, err := os.Stat(dstPath); os.IsNotExist(err) { + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + } + } + + // We have a file. If we already decoded, then it is an error + if !dir && done { + return fmt.Errorf("expected a single file, got multiple: %s", src) + } + + // Mark that we're done so future in single file mode errors + done = true + + // Open the file for writing + dstF, err := os.Create(path) + if err != nil { + return err + } + _, err = io.Copy(dstF, tarR) + dstF.Close() + if err != nil { + return err + } + + // Chmod the file + if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil { + return err + } + } +} + +// tarDecompressor is an implementation of Decompressor that can +// unpack tar files. +type tarDecompressor struct{} + +func (d *tarDecompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + return untar(f, dst, src, dir) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go index c46ed4453bb..5391b5c8c52 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go @@ -1,10 +1,7 @@ package getter import ( - "archive/tar" "compress/bzip2" - "fmt" - "io" "os" "path/filepath" ) @@ -32,64 +29,5 @@ func (d *TarBzip2Decompressor) Decompress(dst, src string, dir bool) error { // Bzip2 compression is second bzipR := bzip2.NewReader(f) - - // Once bzip decompressed we have a tar format - tarR := tar.NewReader(bzipR) - done := false - for { - hdr, err := tarR.Next() - if err == io.EOF { - if !done { - // Empty archive - return fmt.Errorf("empty archive: %s", src) - } - - return nil - } - if err != nil { - return err - } - - path := dst - if dir { - path = filepath.Join(path, hdr.Name) - } - - if hdr.FileInfo().IsDir() { - if dir { - return fmt.Errorf("expected a single file: %s", src) - } - - // A directory, just make the directory and continue unarchiving... - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - - continue - } - - // We have a file. If we already decoded, then it is an error - if !dir && done { - return fmt.Errorf("expected a single file, got multiple: %s", src) - } - - // Mark that we're done so future in single file mode errors - done = true - - // Open the file for writing - dstF, err := os.Create(path) - if err != nil { - return err - } - _, err = io.Copy(dstF, tarR) - dstF.Close() - if err != nil { - return err - } - - // Chmod the file - if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil { - return err - } - } + return untar(bzipR, dst, src, dir) } diff --git a/vendor/github.com/hashicorp/go-getter/decompress_testing.go b/vendor/github.com/hashicorp/go-getter/decompress_testing.go index 686d6c2b64f..82b8ab4f6e8 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_testing.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_testing.go @@ -11,7 +11,8 @@ import ( "runtime" "sort" "strings" - "testing" + + "github.com/mitchellh/go-testing-interface" ) // TestDecompressCase is a single test case for testing decompressors @@ -24,7 +25,7 @@ type TestDecompressCase struct { } // TestDecompressor is a helper function for testing generic decompressors. -func TestDecompressor(t *testing.T, d Decompressor, cases []TestDecompressCase) { +func TestDecompressor(t testing.T, d Decompressor, cases []TestDecompressCase) { for _, tc := range cases { t.Logf("Testing: %s", tc.Input) @@ -87,7 +88,7 @@ func TestDecompressor(t *testing.T, d Decompressor, cases []TestDecompressCase) } } -func testListDir(t *testing.T, path string) []string { +func testListDir(t testing.T, path string) []string { var result []string err := filepath.Walk(path, func(sub string, info os.FileInfo, err error) error { if err != nil { @@ -102,7 +103,7 @@ func testListDir(t *testing.T, path string) []string { // If it is a dir, add trailing sep if info.IsDir() { - sub += "/" + sub += string(os.PathSeparator) } result = append(result, sub) @@ -116,7 +117,7 @@ func testListDir(t *testing.T, path string) []string { return result } -func testMD5(t *testing.T, path string) string { +func testMD5(t testing.T, path string) string { f, err := os.Open(path) if err != nil { t.Fatalf("err: %s", err) diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go index e8b1c31cacd..65eb70dd2c2 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go @@ -1,10 +1,8 @@ package getter import ( - "archive/tar" "compress/gzip" "fmt" - "io" "os" "path/filepath" ) @@ -37,63 +35,5 @@ func (d *TarGzipDecompressor) Decompress(dst, src string, dir bool) error { } defer gzipR.Close() - // Once gzip decompressed we have a tar format - tarR := tar.NewReader(gzipR) - done := false - for { - hdr, err := tarR.Next() - if err == io.EOF { - if !done { - // Empty archive - return fmt.Errorf("empty archive: %s", src) - } - - return nil - } - if err != nil { - return err - } - - path := dst - if dir { - path = filepath.Join(path, hdr.Name) - } - - if hdr.FileInfo().IsDir() { - if !dir { - return fmt.Errorf("expected a single file: %s", src) - } - - // A directory, just make the directory and continue unarchiving... - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - - continue - } - - // We have a file. If we already decoded, then it is an error - if !dir && done { - return fmt.Errorf("expected a single file, got multiple: %s", src) - } - - // Mark that we're done so future in single file mode errors - done = true - - // Open the file for writing - dstF, err := os.Create(path) - if err != nil { - return err - } - _, err = io.Copy(dstF, tarR) - dstF.Close() - if err != nil { - return err - } - - // Chmod the file - if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil { - return err - } - } + return untar(gzipR, dst, src, dir) } diff --git a/vendor/github.com/hashicorp/go-getter/decompress_txz.go b/vendor/github.com/hashicorp/go-getter/decompress_txz.go new file mode 100644 index 00000000000..5e151c127df --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_txz.go @@ -0,0 +1,39 @@ +package getter + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/ulikunitz/xz" +) + +// TarXzDecompressor is an implementation of Decompressor that can +// decompress tar.xz files. +type TarXzDecompressor struct{} + +func (d *TarXzDecompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // xz compression is second + txzR, err := xz.NewReader(f) + if err != nil { + return fmt.Errorf("Error opening an xz reader for %s: %s", src, err) + } + + return untar(txzR, dst, src, dir) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_xz.go b/vendor/github.com/hashicorp/go-getter/decompress_xz.go new file mode 100644 index 00000000000..4e37abab108 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_xz.go @@ -0,0 +1,49 @@ +package getter + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/ulikunitz/xz" +) + +// XzDecompressor is an implementation of Decompressor that can +// decompress xz files. +type XzDecompressor struct{} + +func (d *XzDecompressor) Decompress(dst, src string, dir bool) error { + // Directory isn't supported at all + if dir { + return fmt.Errorf("xz-compressed files can only unarchive to a single file") + } + + // If we're going into a directory we should make that first + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // xz compression is second + xzR, err := xz.NewReader(f) + if err != nil { + return err + } + + // Copy it out + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = io.Copy(dstF, xzR) + return err +} diff --git a/vendor/github.com/hashicorp/go-getter/detect.go b/vendor/github.com/hashicorp/go-getter/detect.go index 481b737c6a7..c3695510b34 100644 --- a/vendor/github.com/hashicorp/go-getter/detect.go +++ b/vendor/github.com/hashicorp/go-getter/detect.go @@ -72,12 +72,18 @@ func Detect(src string, pwd string, ds []Detector) (string, error) { subDir = detectSubdir } } + if subDir != "" { u, err := url.Parse(result) if err != nil { return "", fmt.Errorf("Error parsing URL: %s", err) } u.Path += "//" + subDir + + // a subdir may contain wildcards, but in order to support them we + // have to ensure the path isn't escaped. + u.RawPath = u.Path + result = u.String() } diff --git a/vendor/github.com/hashicorp/go-getter/detect_file.go b/vendor/github.com/hashicorp/go-getter/detect_file.go index 756ea43f83a..4ef41ea73fa 100644 --- a/vendor/github.com/hashicorp/go-getter/detect_file.go +++ b/vendor/github.com/hashicorp/go-getter/detect_file.go @@ -32,7 +32,7 @@ func (d *FileDetector) Detect(src, pwd string) (string, bool, error) { return "", true, err } if fi.Mode()&os.ModeSymlink != 0 { - pwd, err = os.Readlink(pwd) + pwd, err = filepath.EvalSymlinks(pwd) if err != nil { return "", true, err } diff --git a/vendor/github.com/hashicorp/go-getter/get.go b/vendor/github.com/hashicorp/go-getter/get.go index c3236f553e4..e6053d934a1 100644 --- a/vendor/github.com/hashicorp/go-getter/get.go +++ b/vendor/github.com/hashicorp/go-getter/get.go @@ -18,6 +18,8 @@ import ( "os/exec" "regexp" "syscall" + + cleanhttp "github.com/hashicorp/go-cleanhttp" ) // Getter defines the interface that schemes must implement to download @@ -49,8 +51,13 @@ var Getters map[string]Getter // syntax is schema::url, example: git::https://foo.com var forcedRegexp = regexp.MustCompile(`^([A-Za-z0-9]+)::(.+)$`) +// httpClient is the default client to be used by HttpGetters. +var httpClient = cleanhttp.DefaultClient() + func init() { - httpGetter := &HttpGetter{Netrc: true} + httpGetter := &HttpGetter{ + Netrc: true, + } Getters = map[string]Getter{ "file": new(FileGetter), diff --git a/vendor/github.com/hashicorp/go-getter/get_git.go b/vendor/github.com/hashicorp/go-getter/get_git.go index 07281398322..6f5d9142bcd 100644 --- a/vendor/github.com/hashicorp/go-getter/get_git.go +++ b/vendor/github.com/hashicorp/go-getter/get_git.go @@ -180,17 +180,34 @@ func (g *GitGetter) fetchSubmodules(dst, sshKeyFile string) error { // setupGitEnv sets up the environment for the given command. This is used to // pass configuration data to git and ssh and enables advanced cloning methods. func setupGitEnv(cmd *exec.Cmd, sshKeyFile string) { - var sshOpts []string + const gitSSHCommand = "GIT_SSH_COMMAND=" + var sshCmd []string + + // If we have an existing GIT_SSH_COMMAND, we need to append our options. + // We will also remove our old entry to make sure the behavior is the same + // with versions of Go < 1.9. + env := os.Environ() + for i, v := range env { + if strings.HasPrefix(v, gitSSHCommand) { + sshCmd = []string{v} + + env[i], env[len(env)-1] = env[len(env)-1], env[i] + env = env[:len(env)-1] + break + } + } + + if len(sshCmd) == 0 { + sshCmd = []string{gitSSHCommand + "ssh"} + } if sshKeyFile != "" { // We have an SSH key temp file configured, tell ssh about this. - sshOpts = append(sshOpts, "-i", sshKeyFile) + sshCmd = append(sshCmd, "-i", sshKeyFile) } - cmd.Env = append(os.Environ(), - // Set the ssh command to use for clones. - "GIT_SSH_COMMAND=ssh "+strings.Join(sshOpts, " "), - ) + env = append(env, strings.Join(sshCmd, " ")) + cmd.Env = env } // checkGitVersion is used to check the version of git installed on the system diff --git a/vendor/github.com/hashicorp/go-getter/get_http.go b/vendor/github.com/hashicorp/go-getter/get_http.go index 3c020343eea..9acc72cd720 100644 --- a/vendor/github.com/hashicorp/go-getter/get_http.go +++ b/vendor/github.com/hashicorp/go-getter/get_http.go @@ -36,6 +36,10 @@ type HttpGetter struct { // Netrc, if true, will lookup and use auth information found // in the user's netrc file if available. Netrc bool + + // Client is the http.Client to use for Get requests. + // This defaults to a cleanhttp.DefaultClient if left unset. + Client *http.Client } func (g *HttpGetter) ClientMode(u *url.URL) (ClientMode, error) { @@ -57,13 +61,17 @@ func (g *HttpGetter) Get(dst string, u *url.URL) error { } } + if g.Client == nil { + g.Client = httpClient + } + // Add terraform-get to the parameter. q := u.Query() q.Add("terraform-get", "1") u.RawQuery = q.Encode() // Get the URL - resp, err := http.Get(u.String()) + resp, err := g.Client.Get(u.String()) if err != nil { return err } @@ -98,7 +106,18 @@ func (g *HttpGetter) Get(dst string, u *url.URL) error { } func (g *HttpGetter) GetFile(dst string, u *url.URL) error { - resp, err := http.Get(u.String()) + if g.Netrc { + // Add auth from netrc if we can + if err := addAuthFromNetrc(u); err != nil { + return err + } + } + + if g.Client == nil { + g.Client = httpClient + } + + resp, err := g.Client.Get(u.String()) if err != nil { return err } @@ -132,13 +151,22 @@ func (g *HttpGetter) getSubdir(dst, source, subDir string) error { } defer os.RemoveAll(td) + // We have to create a subdirectory that doesn't exist for the file + // getter to work. + td = filepath.Join(td, "data") + // Download that into the given directory if err := Get(td, source); err != nil { return err } + // Process any globbing + sourcePath, err := SubdirGlob(td, subDir) + if err != nil { + return err + } + // Make sure the subdir path actually exists - sourcePath := filepath.Join(td, subDir) if _, err := os.Stat(sourcePath); err != nil { return fmt.Errorf( "Error downloading %s: %s", source, err) diff --git a/vendor/github.com/hashicorp/go-getter/get_s3.go b/vendor/github.com/hashicorp/go-getter/get_s3.go index d3bffeb1737..ebb3217417d 100644 --- a/vendor/github.com/hashicorp/go-getter/get_s3.go +++ b/vendor/github.com/hashicorp/go-getter/get_s3.go @@ -28,7 +28,7 @@ func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) { } // Create client config - config := g.getAWSConfig(region, creds) + config := g.getAWSConfig(region, u, creds) sess := session.New(config) client := s3.New(sess) @@ -84,7 +84,7 @@ func (g *S3Getter) Get(dst string, u *url.URL) error { return err } - config := g.getAWSConfig(region, creds) + config := g.getAWSConfig(region, u, creds) sess := session.New(config) client := s3.New(sess) @@ -139,7 +139,7 @@ func (g *S3Getter) GetFile(dst string, u *url.URL) error { return err } - config := g.getAWSConfig(region, creds) + config := g.getAWSConfig(region, u, creds) sess := session.New(config) client := s3.New(sess) return g.getObject(client, dst, bucket, path, version) @@ -174,7 +174,7 @@ func (g *S3Getter) getObject(client *s3.S3, dst, bucket, key, version string) er return err } -func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) *aws.Config { +func (g *S3Getter) getAWSConfig(region string, url *url.URL, creds *credentials.Credentials) *aws.Config { conf := &aws.Config{} if creds == nil { // Grab the metadata URL @@ -195,6 +195,14 @@ func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) * }) } + if creds != nil { + conf.Endpoint = &url.Host + conf.S3ForcePathStyle = aws.Bool(true) + if url.Scheme == "http" { + conf.DisableSSL = aws.Bool(true) + } + } + conf.Credentials = creds if region != "" { conf.Region = aws.String(region) @@ -204,29 +212,48 @@ func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) * } func (g *S3Getter) parseUrl(u *url.URL) (region, bucket, path, version string, creds *credentials.Credentials, err error) { - // Expected host style: s3.amazonaws.com. They always have 3 parts, - // although the first may differ if we're accessing a specific region. - hostParts := strings.Split(u.Host, ".") - if len(hostParts) != 3 { - err = fmt.Errorf("URL is not a valid S3 URL") - return - } + // This just check whether we are dealing with S3 or + // any other S3 compliant service. S3 has a predictable + // url as others do not + if strings.Contains(u.Host, "amazonaws.com") { + // Expected host style: s3.amazonaws.com. They always have 3 parts, + // although the first may differ if we're accessing a specific region. + hostParts := strings.Split(u.Host, ".") + if len(hostParts) != 3 { + err = fmt.Errorf("URL is not a valid S3 URL") + return + } - // Parse the region out of the first part of the host - region = strings.TrimPrefix(strings.TrimPrefix(hostParts[0], "s3-"), "s3") - if region == "" { - region = "us-east-1" - } + // Parse the region out of the first part of the host + region = strings.TrimPrefix(strings.TrimPrefix(hostParts[0], "s3-"), "s3") + if region == "" { + region = "us-east-1" + } - pathParts := strings.SplitN(u.Path, "/", 3) - if len(pathParts) != 3 { - err = fmt.Errorf("URL is not a valid S3 URL") - return - } + pathParts := strings.SplitN(u.Path, "/", 3) + if len(pathParts) != 3 { + err = fmt.Errorf("URL is not a valid S3 URL") + return + } + + bucket = pathParts[1] + path = pathParts[2] + version = u.Query().Get("version") - bucket = pathParts[1] - path = pathParts[2] - version = u.Query().Get("version") + } else { + pathParts := strings.SplitN(u.Path, "/", 3) + if len(pathParts) != 3 { + err = fmt.Errorf("URL is not a valid S3 complaint URL") + return + } + bucket = pathParts[1] + path = pathParts[2] + version = u.Query().Get("version") + region = u.Query().Get("region") + if region == "" { + region = "us-east-1" + } + } _, hasAwsId := u.Query()["aws_access_key_id"] _, hasAwsSecret := u.Query()["aws_access_key_secret"] diff --git a/vendor/github.com/hashicorp/go-getter/source.go b/vendor/github.com/hashicorp/go-getter/source.go index 4d5ee3cce54..c63f2bbaf9e 100644 --- a/vendor/github.com/hashicorp/go-getter/source.go +++ b/vendor/github.com/hashicorp/go-getter/source.go @@ -1,6 +1,8 @@ package getter import ( + "fmt" + "path/filepath" "strings" ) @@ -34,3 +36,27 @@ func SourceDirSubdir(src string) (string, string) { return src, subdir } + +// SubdirGlob returns the actual subdir with globbing processed. +// +// dst should be a destination directory that is already populated (the +// download is complete) and subDir should be the set subDir. If subDir +// is an empty string, this returns an empty string. +// +// The returned path is the full absolute path. +func SubdirGlob(dst, subDir string) (string, error) { + matches, err := filepath.Glob(filepath.Join(dst, subDir)) + if err != nil { + return "", err + } + + if len(matches) == 0 { + return "", fmt.Errorf("subdir %q not found", subDir) + } + + if len(matches) > 1 { + return "", fmt.Errorf("subdir %q matches multiple paths", subDir) + } + + return matches[0], nil +} diff --git a/vendor/github.com/hashicorp/go-hclog/LICENSE b/vendor/github.com/hashicorp/go-hclog/LICENSE new file mode 100644 index 00000000000..abaf1e45f2a --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 HashiCorp + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md new file mode 100644 index 00000000000..614342b2d87 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/README.md @@ -0,0 +1,123 @@ +# go-hclog + +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[godocs]: https://godoc.org/github.com/hashicorp/go-hclog + +`go-hclog` is a package for Go that provides a simple key/value logging +interface for use in development and production environments. + +It provides logging levels that provide decreased output based upon the +desired amount of output, unlike the standard library `log` package. + +It does not provide `Printf` style logging, only key/value logging that is +exposed as arguments to the logging functions for simplicity. + +It provides a human readable output mode for use in development as well as +JSON output mode for production. + +## Stability Note + +While this library is fully open source and HashiCorp will be maintaining it +(since we are and will be making extensive use of it), the API and output +format is subject to minor changes as we fully bake and vet it in our projects. +This notice will be removed once it's fully integrated into our major projects +and no further changes are anticipated. + +## Installation and Docs + +Install using `go get github.com/hashicorp/go-hclog`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/go-hclog + +## Usage + +### Use the global logger + +```go +hclog.Default().Info("hello world") +``` + +```text +2017-07-05T16:15:55.167-0700 [INFO ] hello world +``` + +(Note timestamps are removed in future examples for brevity.) + +### Create a new logger + +```go +appLogger := hclog.New(&hclog.LoggerOptions{ + Name: "my-app", + Level: hclog.LevelFromString("DEBUG"), +}) +``` + +### Emit an Info level message with 2 key/value pairs + +```go +input := "5.5" +_, err := strconv.ParseInt(input, 10, 32) +if err != nil { + appLogger.Info("Invalid input for ParseInt", "input", input, "error", err) +} +``` + +```text +... [INFO ] my-app: Invalid input for ParseInt: input=5.5 error="strconv.ParseInt: parsing "5.5": invalid syntax" +``` + +### Create a new Logger for a major subsystem + +```go +subsystemLogger := appLogger.Named("transport") +subsystemLogger.Info("we are transporting something") +``` + +```text +... [INFO ] my-app.transport: we are transporting something +``` + +Notice that logs emitted by `subsystemLogger` contain `my-app.transport`, +reflecting both the application and subsystem names. + +### Create a new Logger with fixed key/value pairs + +Using `With()` will include a specific key-value pair in all messages emitted +by that logger. + +```go +requestID := "5fb446b6-6eba-821d-df1b-cd7501b6a363" +requestLogger := subsystemLogger.With("request", requestID) +requestLogger.Info("we are transporting a request") +``` + +```text +... [INFO ] my-app.transport: we are transporting a request: request=5fb446b6-6eba-821d-df1b-cd7501b6a363 +``` + +This allows sub Loggers to be context specific without having to thread that +into all the callers. + +### Use this with code that uses the standard library logger + +If you want to use the standard library's `log.Logger` interface you can wrap +`hclog.Logger` by calling the `StandardLogger()` method. This allows you to use +it with the familiar `Println()`, `Printf()`, etc. For example: + +```go +stdLogger := appLogger.StandardLogger(&hclog.StandardLoggerOptions{ + InferLevels: true, +}) +// Printf() is provided by stdlib log.Logger interface, not hclog.Logger +stdLogger.Printf("[DEBUG] %+v", stdLogger) +``` + +```text +... [DEBUG] my-app: &{mu:{state:0 sema:0} prefix: flag:0 out:0xc42000a0a0 buf:[]} +``` + +Notice that if `appLogger` is initialized with the `INFO` log level _and_ you +specify `InferLevels: true`, you will not see any output here. You must change +`appLogger` to `DEBUG` to see output. See the docs for more information. diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go new file mode 100644 index 00000000000..55ce4396034 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/global.go @@ -0,0 +1,34 @@ +package hclog + +import ( + "sync" +) + +var ( + protect sync.Once + def Logger + + // The options used to create the Default logger. These are + // read only when the Default logger is created, so set them + // as soon as the process starts. + DefaultOptions = &LoggerOptions{ + Level: DefaultLevel, + Output: DefaultOutput, + } +) + +// Return a logger that is held globally. This can be a good starting +// place, and then you can use .With() and .Name() to create sub-loggers +// to be used in more specific contexts. +func Default() Logger { + protect.Do(func() { + def = New(DefaultOptions) + }) + + return def +} + +// A short alias for Default() +func L() Logger { + return Default() +} diff --git a/vendor/github.com/hashicorp/go-hclog/int.go b/vendor/github.com/hashicorp/go-hclog/int.go new file mode 100644 index 00000000000..20adcfbb93b --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/int.go @@ -0,0 +1,404 @@ +package hclog + +import ( + "bufio" + "encoding" + "encoding/json" + "fmt" + "log" + "os" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +var ( + _levelToBracket = map[Level]string{ + Debug: "[DEBUG]", + Trace: "[TRACE]", + Info: "[INFO ]", + Warn: "[WARN ]", + Error: "[ERROR]", + } +) + +// Given the options (nil for defaults), create a new Logger +func New(opts *LoggerOptions) Logger { + if opts == nil { + opts = &LoggerOptions{} + } + + output := opts.Output + if output == nil { + output = os.Stderr + } + + level := opts.Level + if level == NoLevel { + level = DefaultLevel + } + + mtx := opts.Mutex + if mtx == nil { + mtx = new(sync.Mutex) + } + + return &intLogger{ + m: mtx, + json: opts.JSONFormat, + caller: opts.IncludeLocation, + name: opts.Name, + w: bufio.NewWriter(output), + level: level, + } +} + +// The internal logger implementation. Internal in that it is defined entirely +// by this package. +type intLogger struct { + json bool + caller bool + name string + + // this is a pointer so that it's shared by any derived loggers, since + // those derived loggers share the bufio.Writer as well. + m *sync.Mutex + w *bufio.Writer + level Level + + implied []interface{} +} + +// Make sure that intLogger is a Logger +var _ Logger = &intLogger{} + +// The time format to use for logging. This is a version of RFC3339 that +// contains millisecond precision +const TimeFormat = "2006-01-02T15:04:05.000Z0700" + +// Log a message and a set of key/value pairs if the given level is at +// or more severe that the threshold configured in the Logger. +func (z *intLogger) Log(level Level, msg string, args ...interface{}) { + if level < z.level { + return + } + + t := time.Now() + + z.m.Lock() + defer z.m.Unlock() + + if z.json { + z.logJson(t, level, msg, args...) + } else { + z.log(t, level, msg, args...) + } + + z.w.Flush() +} + +// Cleanup a path by returning the last 2 segments of the path only. +func trimCallerPath(path string) string { + // lovely borrowed from zap + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + // + + // Find the last separator. + // + idx := strings.LastIndexByte(path, '/') + if idx == -1 { + return path + } + + // Find the penultimate separator. + idx = strings.LastIndexByte(path[:idx], '/') + if idx == -1 { + return path + } + + return path[idx+1:] +} + +// Non-JSON logging format function +func (z *intLogger) log(t time.Time, level Level, msg string, args ...interface{}) { + z.w.WriteString(t.Format(TimeFormat)) + z.w.WriteByte(' ') + + s, ok := _levelToBracket[level] + if ok { + z.w.WriteString(s) + } else { + z.w.WriteString("[UNKN ]") + } + + if z.caller { + if _, file, line, ok := runtime.Caller(3); ok { + z.w.WriteByte(' ') + z.w.WriteString(trimCallerPath(file)) + z.w.WriteByte(':') + z.w.WriteString(strconv.Itoa(line)) + z.w.WriteByte(':') + } + } + + z.w.WriteByte(' ') + + if z.name != "" { + z.w.WriteString(z.name) + z.w.WriteString(": ") + } + + z.w.WriteString(msg) + + args = append(z.implied, args...) + + var stacktrace CapturedStacktrace + + if args != nil && len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + stacktrace = cs + } else { + args = append(args, "") + } + } + + z.w.WriteByte(':') + + FOR: + for i := 0; i < len(args); i = i + 2 { + var val string + + switch st := args[i+1].(type) { + case string: + val = st + case int: + val = strconv.FormatInt(int64(st), 10) + case int64: + val = strconv.FormatInt(int64(st), 10) + case int32: + val = strconv.FormatInt(int64(st), 10) + case int16: + val = strconv.FormatInt(int64(st), 10) + case int8: + val = strconv.FormatInt(int64(st), 10) + case uint: + val = strconv.FormatUint(uint64(st), 10) + case uint64: + val = strconv.FormatUint(uint64(st), 10) + case uint32: + val = strconv.FormatUint(uint64(st), 10) + case uint16: + val = strconv.FormatUint(uint64(st), 10) + case uint8: + val = strconv.FormatUint(uint64(st), 10) + case CapturedStacktrace: + stacktrace = st + continue FOR + default: + val = fmt.Sprintf("%v", st) + } + + z.w.WriteByte(' ') + z.w.WriteString(args[i].(string)) + z.w.WriteByte('=') + + if strings.ContainsAny(val, " \t\n\r") { + z.w.WriteByte('"') + z.w.WriteString(val) + z.w.WriteByte('"') + } else { + z.w.WriteString(val) + } + } + } + + z.w.WriteString("\n") + + if stacktrace != "" { + z.w.WriteString(string(stacktrace)) + } +} + +// JSON logging function +func (z *intLogger) logJson(t time.Time, level Level, msg string, args ...interface{}) { + vals := map[string]interface{}{ + "@message": msg, + "@timestamp": t.Format("2006-01-02T15:04:05.000000Z07:00"), + } + + var levelStr string + switch level { + case Error: + levelStr = "error" + case Warn: + levelStr = "warn" + case Info: + levelStr = "info" + case Debug: + levelStr = "debug" + case Trace: + levelStr = "trace" + default: + levelStr = "all" + } + + vals["@level"] = levelStr + + if z.name != "" { + vals["@module"] = z.name + } + + if z.caller { + if _, file, line, ok := runtime.Caller(3); ok { + vals["@caller"] = fmt.Sprintf("%s:%d", file, line) + } + } + + if args != nil && len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + vals["stacktrace"] = cs + } else { + args = append(args, "") + } + } + + for i := 0; i < len(args); i = i + 2 { + if _, ok := args[i].(string); !ok { + // As this is the logging function not much we can do here + // without injecting into logs... + continue + } + val := args[i+1] + // Check if val is of type error. If error type doesn't + // implement json.Marshaler or encoding.TextMarshaler + // then set val to err.Error() so that it gets marshaled + if err, ok := val.(error); ok { + switch err.(type) { + case json.Marshaler, encoding.TextMarshaler: + default: + val = err.Error() + } + } + vals[args[i].(string)] = val + } + } + + err := json.NewEncoder(z.w).Encode(vals) + if err != nil { + panic(err) + } +} + +// Emit the message and args at DEBUG level +func (z *intLogger) Debug(msg string, args ...interface{}) { + z.Log(Debug, msg, args...) +} + +// Emit the message and args at TRACE level +func (z *intLogger) Trace(msg string, args ...interface{}) { + z.Log(Trace, msg, args...) +} + +// Emit the message and args at INFO level +func (z *intLogger) Info(msg string, args ...interface{}) { + z.Log(Info, msg, args...) +} + +// Emit the message and args at WARN level +func (z *intLogger) Warn(msg string, args ...interface{}) { + z.Log(Warn, msg, args...) +} + +// Emit the message and args at ERROR level +func (z *intLogger) Error(msg string, args ...interface{}) { + z.Log(Error, msg, args...) +} + +// Indicate that the logger would emit TRACE level logs +func (z *intLogger) IsTrace() bool { + return z.level == Trace +} + +// Indicate that the logger would emit DEBUG level logs +func (z *intLogger) IsDebug() bool { + return z.level <= Debug +} + +// Indicate that the logger would emit INFO level logs +func (z *intLogger) IsInfo() bool { + return z.level <= Info +} + +// Indicate that the logger would emit WARN level logs +func (z *intLogger) IsWarn() bool { + return z.level <= Warn +} + +// Indicate that the logger would emit ERROR level logs +func (z *intLogger) IsError() bool { + return z.level <= Error +} + +// Return a sub-Logger for which every emitted log message will contain +// the given key/value pairs. This is used to create a context specific +// Logger. +func (z *intLogger) With(args ...interface{}) Logger { + var nz intLogger = *z + + nz.implied = append(nz.implied, args...) + + return &nz +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +func (z *intLogger) Named(name string) Logger { + var nz intLogger = *z + + if nz.name != "" { + nz.name = nz.name + "." + name + } else { + nz.name = name + } + + return &nz +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. +func (z *intLogger) ResetNamed(name string) Logger { + var nz intLogger = *z + + nz.name = name + + return &nz +} + +// Create a *log.Logger that will send it's data through this Logger. This +// allows packages that expect to be using the standard library log to actually +// use this logger. +func (z *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + if opts == nil { + opts = &StandardLoggerOptions{} + } + + return log.New(&stdlogAdapter{z, opts.InferLevels}, "", 0) +} diff --git a/vendor/github.com/hashicorp/go-hclog/log.go b/vendor/github.com/hashicorp/go-hclog/log.go new file mode 100644 index 00000000000..dbc4198a5a4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/log.go @@ -0,0 +1,142 @@ +package hclog + +import ( + "io" + "log" + "os" + "strings" + "sync" +) + +var ( + DefaultOutput = os.Stderr + DefaultLevel = Info +) + +type Level int + +const ( + // This is a special level used to indicate that no level has been + // set and allow for a default to be used. + NoLevel Level = 0 + + // The most verbose level. Intended to be used for the tracing of actions + // in code, such as function enters/exits, etc. + Trace Level = 1 + + // For programmer lowlevel analysis. + Debug Level = 2 + + // For information about steady state operations. + Info Level = 3 + + // For information about rare but handled events. + Warn Level = 4 + + // For information about unrecoverable events. + Error Level = 5 +) + +// LevelFromString returns a Level type for the named log level, or "NoLevel" if +// the level string is invalid. This facilitates setting the log level via +// config or environment variable by name in a predictable way. +func LevelFromString(levelStr string) Level { + // We don't care about case. Accept "INFO" or "info" + levelStr = strings.ToLower(strings.TrimSpace(levelStr)) + switch levelStr { + case "trace": + return Trace + case "debug": + return Debug + case "info": + return Info + case "warn": + return Warn + case "error": + return Error + default: + return NoLevel + } +} + +// The main Logger interface. All code should code against this interface only. +type Logger interface { + // Args are alternating key, val pairs + // keys must be strings + // vals can be any type, but display is implementation specific + // Emit a message and key/value pairs at the TRACE level + Trace(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the DEBUG level + Debug(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the INFO level + Info(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the WARN level + Warn(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the ERROR level + Error(msg string, args ...interface{}) + + // Indicate if TRACE logs would be emitted. This and the other Is* guards + // are used to elide expensive logging code based on the current level. + IsTrace() bool + + // Indicate if DEBUG logs would be emitted. This and the other Is* guards + IsDebug() bool + + // Indicate if INFO logs would be emitted. This and the other Is* guards + IsInfo() bool + + // Indicate if WARN logs would be emitted. This and the other Is* guards + IsWarn() bool + + // Indicate if ERROR logs would be emitted. This and the other Is* guards + IsError() bool + + // Creates a sublogger that will always have the given key/value pairs + With(args ...interface{}) Logger + + // Create a logger that will prepend the name string on the front of all messages. + // If the logger already has a name, the new value will be appended to the current + // name. That way, a major subsystem can use this to decorate all it's own logs + // without losing context. + Named(name string) Logger + + // Create a logger that will prepend the name string on the front of all messages. + // This sets the name of the logger to the value directly, unlike Named which honor + // the current name as well. + ResetNamed(name string) Logger + + // Return a value that conforms to the stdlib log.Logger interface + StandardLogger(opts *StandardLoggerOptions) *log.Logger +} + +type StandardLoggerOptions struct { + // Indicate that some minimal parsing should be done on strings to try + // and detect their level and re-emit them. + // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO], + // [DEBUG] and strip it off before reapplying it. + InferLevels bool +} + +type LoggerOptions struct { + // Name of the subsystem to prefix logs with + Name string + + // The threshold for the logger. Anything less severe is supressed + Level Level + + // Where to write the logs to. Defaults to os.Stdout if nil + Output io.Writer + + // An optional mutex pointer in case Output is shared + Mutex *sync.Mutex + + // Control if the output should be in JSON. + JSONFormat bool + + // Include file and line information in each log line + IncludeLocation bool +} diff --git a/vendor/github.com/hashicorp/go-hclog/stacktrace.go b/vendor/github.com/hashicorp/go-hclog/stacktrace.go new file mode 100644 index 00000000000..8af1a3be4c0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/stacktrace.go @@ -0,0 +1,108 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package hclog + +import ( + "bytes" + "runtime" + "strconv" + "strings" + "sync" +) + +var ( + _stacktraceIgnorePrefixes = []string{ + "runtime.goexit", + "runtime.main", + } + _stacktracePool = sync.Pool{ + New: func() interface{} { + return newProgramCounters(64) + }, + } +) + +// A stacktrace gathered by a previous call to log.Stacktrace. If passed +// to a logging function, the stacktrace will be appended. +type CapturedStacktrace string + +// Gather a stacktrace of the current goroutine and return it to be passed +// to a logging function. +func Stacktrace() CapturedStacktrace { + return CapturedStacktrace(takeStacktrace()) +} + +func takeStacktrace() string { + programCounters := _stacktracePool.Get().(*programCounters) + defer _stacktracePool.Put(programCounters) + + var buffer bytes.Buffer + + for { + // Skip the call to runtime.Counters and takeStacktrace so that the + // program counters start at the caller of takeStacktrace. + n := runtime.Callers(2, programCounters.pcs) + if n < cap(programCounters.pcs) { + programCounters.pcs = programCounters.pcs[:n] + break + } + // Don't put the too-short counter slice back into the pool; this lets + // the pool adjust if we consistently take deep stacktraces. + programCounters = newProgramCounters(len(programCounters.pcs) * 2) + } + + i := 0 + frames := runtime.CallersFrames(programCounters.pcs) + for frame, more := frames.Next(); more; frame, more = frames.Next() { + if shouldIgnoreStacktraceFunction(frame.Function) { + continue + } + if i != 0 { + buffer.WriteByte('\n') + } + i++ + buffer.WriteString(frame.Function) + buffer.WriteByte('\n') + buffer.WriteByte('\t') + buffer.WriteString(frame.File) + buffer.WriteByte(':') + buffer.WriteString(strconv.Itoa(int(frame.Line))) + } + + return buffer.String() +} + +func shouldIgnoreStacktraceFunction(function string) bool { + for _, prefix := range _stacktraceIgnorePrefixes { + if strings.HasPrefix(function, prefix) { + return true + } + } + return false +} + +type programCounters struct { + pcs []uintptr +} + +func newProgramCounters(size int) *programCounters { + return &programCounters{make([]uintptr, size)} +} diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go new file mode 100644 index 00000000000..2bb927fc90c --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/stdlog.go @@ -0,0 +1,62 @@ +package hclog + +import ( + "bytes" + "strings" +) + +// Provides a io.Writer to shim the data out of *log.Logger +// and back into our Logger. This is basically the only way to +// build upon *log.Logger. +type stdlogAdapter struct { + hl Logger + inferLevels bool +} + +// Take the data, infer the levels if configured, and send it through +// a regular Logger +func (s *stdlogAdapter) Write(data []byte) (int, error) { + str := string(bytes.TrimRight(data, " \t\n")) + + if s.inferLevels { + level, str := s.pickLevel(str) + switch level { + case Trace: + s.hl.Trace(str) + case Debug: + s.hl.Debug(str) + case Info: + s.hl.Info(str) + case Warn: + s.hl.Warn(str) + case Error: + s.hl.Error(str) + default: + s.hl.Info(str) + } + } else { + s.hl.Info(str) + } + + return len(data), nil +} + +// Detect, based on conventions, what log level this is +func (s *stdlogAdapter) pickLevel(str string) (Level, string) { + switch { + case strings.HasPrefix(str, "[DEBUG]"): + return Debug, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[TRACE]"): + return Trace, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[INFO]"): + return Info, strings.TrimSpace(str[6:]) + case strings.HasPrefix(str, "[WARN]"): + return Warn, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[ERROR]"): + return Error, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[ERR]"): + return Error, strings.TrimSpace(str[5:]) + default: + return Info, str + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md index 2058cfb68d1..78d354ed23f 100644 --- a/vendor/github.com/hashicorp/go-plugin/README.md +++ b/vendor/github.com/hashicorp/go-plugin/README.md @@ -1,10 +1,9 @@ # Go Plugin System over RPC `go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system -that has been in use by HashiCorp tooling for over 3 years. While initially -created for [Packer](https://www.packer.io), it has since been used by -[Terraform](https://www.terraform.io) and [Otto](https://www.ottoproject.io), -with plans to also use it for [Nomad](https://www.nomadproject.io) and +that has been in use by HashiCorp tooling for over 4 years. While initially +created for [Packer](https://www.packer.io), it is additionally in use by +[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), and [Vault](https://www.vaultproject.io). While the plugin system is over RPC, it is currently only designed to work @@ -24,6 +23,11 @@ interface as if it were going to run in the same process. For a plugin user: you just use and call functions on an interface as if it were in the same process. This plugin system handles the communication in between. +**Cross-language support.** Plugins can be written (and consumed) by +almost every major language. This library supports serving plugins via +[gRPC](http://www.grpc.io). gRPC-based plugins enable plugins to be written +in any language. + **Complex arguments and return values are supported.** This library provides APIs for handling complex arguments and return values such as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library @@ -37,7 +41,10 @@ and the plugin can call back into the host process. **Built-in Logging.** Any plugins that use the `log` standard library will have log data automatically sent to the host process. The host process will mirror this output prefixed with the path to the plugin -binary. This makes debugging with plugins simple. +binary. This makes debugging with plugins simple. If the host system +uses [hclog](https://github.com/hashicorp/go-hclog) then the log data +will be structured. If the plugin also uses hclog, logs from the plugin +will be sent to the host hclog and be structured. **Protocol Versioning.** A very basic "protocol version" is supported that can be incremented to invalidate any previous plugins. This is useful when @@ -62,13 +69,18 @@ This requires the host/plugin to know this is possible and daemonize properly. `NewClient` takes a `ReattachConfig` to determine if and how to reattach. +**Cryptographically Secure Plugins.** Plugins can be verified with an expected +checksum and RPC communications can be configured to use TLS. The host process +must be properly secured to protect this configuration. + ## Architecture The HashiCorp plugin system works by launching subprocesses and communicating -over RPC (using standard `net/rpc`). A single connection is made between -any plugin and the host process, and we use a -[connection multiplexing](https://github.com/hashicorp/yamux) -library to multiplex any other connections on top. +over RPC (using standard `net/rpc` or [gRPC](http://www.grpc.io). A single +connection is made between any plugin and the host process. For net/rpc-based +plugins, we use a [connection multiplexing](https://github.com/hashicorp/yamux) +library to multiplex any other connections on top. For gRPC-based plugins, +the HTTP2 protocol handles multiplexing. This architecture has a number of benefits: @@ -76,8 +88,8 @@ This architecture has a number of benefits: panic the plugin user. * Plugins are very easy to write: just write a Go application and `go build`. - Theoretically you could also use another language as long as it can - communicate the Go `net/rpc` protocol but this hasn't yet been tried. + Or use any other language to write a gRPC server with a tiny amount of + boilerplate to support go-plugin. * Plugins are very easy to install: just put the binary in a location where the host will find it (depends on the host but this library also provides @@ -85,8 +97,8 @@ This architecture has a number of benefits: * Plugins can be relatively secure: The plugin only has access to the interfaces and args given to it, not to the entire memory space of the - process. More security features are planned (see the coming soon section - below). + process. Additionally, go-plugin can communicate with the plugin over + TLS. ## Usage @@ -97,10 +109,9 @@ high-level steps that must be done. Examples are available in the 1. Choose the interface(s) you want to expose for plugins. 2. For each interface, implement an implementation of that interface - that communicates over an `*rpc.Client` (from the standard `net/rpc` - package) for every function call. Likewise, implement the RPC server - struct this communicates to which is then communicating to a real, - concrete implementation. + that communicates over a `net/rpc` connection or other a + [gRPC](http://www.grpc.io) connection or both. You'll have to implement + both a client and server implementation. 3. Create a `Plugin` implementation that knows how to create the RPC client/server for a given plugin type. @@ -125,10 +136,6 @@ improvements we can make. At this point in time, the roadmap for the plugin system is: -**Cryptographically Secure Plugins.** We'll implement signing plugins -and loading signed plugins in order to allow Vault to make use of multi-process -in a secure way. - **Semantic Versioning.** Plugins will be able to implement a semantic version. This plugin system will give host processes a system for constraining versions. This is in addition to the protocol versioning already present diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go index 9f8a0f2765c..b912826b200 100644 --- a/vendor/github.com/hashicorp/go-plugin/client.go +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -2,8 +2,11 @@ package plugin import ( "bufio" + "crypto/subtle" + "crypto/tls" "errors" "fmt" + "hash" "io" "io/ioutil" "log" @@ -17,6 +20,8 @@ import ( "sync/atomic" "time" "unicode" + + hclog "github.com/hashicorp/go-hclog" ) // If this is 1, then we've called CleanupClients. This can be used @@ -35,6 +40,22 @@ var ( // ErrProcessNotFound is returned when a client is instantiated to // reattach to an existing process and it isn't found. ErrProcessNotFound = errors.New("Reattachment process not found") + + // ErrChecksumsDoNotMatch is returned when binary's checksum doesn't match + // the one provided in the SecureConfig. + ErrChecksumsDoNotMatch = errors.New("checksums did not match") + + // ErrSecureNoChecksum is returned when an empty checksum is provided to the + // SecureConfig. + ErrSecureConfigNoChecksum = errors.New("no checksum provided") + + // ErrSecureNoHash is returned when a nil Hash object is provided to the + // SecureConfig. + ErrSecureConfigNoHash = errors.New("no hash implementation provided") + + // ErrSecureConfigAndReattach is returned when both Reattach and + // SecureConfig are set. + ErrSecureConfigAndReattach = errors.New("only one of Reattach or SecureConfig can be set") ) // Client handles the lifecycle of a plugin application. It launches @@ -55,7 +76,9 @@ type Client struct { l sync.Mutex address net.Addr process *os.Process - client *RPCClient + client ClientProtocol + protocol Protocol + logger hclog.Logger } // ClientConfig is the configuration used to initialize a new @@ -79,6 +102,13 @@ type ClientConfig struct { Cmd *exec.Cmd Reattach *ReattachConfig + // SecureConfig is configuration for verifying the integrity of the + // executable. It can not be used with Reattach. + SecureConfig *SecureConfig + + // TLSConfig is used to enable TLS on the RPC client. + TLSConfig *tls.Config + // Managed represents if the client should be managed by the // plugin package or not. If true, then by calling CleanupClients, // it will automatically be cleaned up. Otherwise, the client @@ -109,14 +139,74 @@ type ClientConfig struct { // sync any of these streams. SyncStdout io.Writer SyncStderr io.Writer + + // AllowedProtocols is a list of allowed protocols. If this isn't set, + // then only netrpc is allowed. This is so that older go-plugin systems + // can show friendly errors if they see a plugin with an unknown + // protocol. + // + // By setting this, you can cause an error immediately on plugin start + // if an unsupported protocol is used with a good error message. + // + // If this isn't set at all (nil value), then only net/rpc is accepted. + // This is done for legacy reasons. You must explicitly opt-in to + // new protocols. + AllowedProtocols []Protocol + + // Logger is the logger that the client will used. If none is provided, + // it will default to hclog's default logger. + Logger hclog.Logger } // ReattachConfig is used to configure a client to reattach to an // already-running plugin process. You can retrieve this information by // calling ReattachConfig on Client. type ReattachConfig struct { - Addr net.Addr - Pid int + Protocol Protocol + Addr net.Addr + Pid int +} + +// SecureConfig is used to configure a client to verify the integrity of an +// executable before running. It does this by verifying the checksum is +// expected. Hash is used to specify the hashing method to use when checksumming +// the file. The configuration is verified by the client by calling the +// SecureConfig.Check() function. +// +// The host process should ensure the checksum was provided by a trusted and +// authoritative source. The binary should be installed in such a way that it +// can not be modified by an unauthorized user between the time of this check +// and the time of execution. +type SecureConfig struct { + Checksum []byte + Hash hash.Hash +} + +// Check takes the filepath to an executable and returns true if the checksum of +// the file matches the checksum provided in the SecureConfig. +func (s *SecureConfig) Check(filePath string) (bool, error) { + if len(s.Checksum) == 0 { + return false, ErrSecureConfigNoChecksum + } + + if s.Hash == nil { + return false, ErrSecureConfigNoHash + } + + file, err := os.Open(filePath) + if err != nil { + return false, err + } + defer file.Close() + + _, err = io.Copy(s.Hash, file) + if err != nil { + return false, err + } + + sum := s.Hash.Sum(nil) + + return subtle.ConstantTimeCompare(sum, s.Checksum) == 1, nil } // This makes sure all the managed subprocesses are killed and properly @@ -174,7 +264,22 @@ func NewClient(config *ClientConfig) (c *Client) { config.SyncStderr = ioutil.Discard } - c = &Client{config: config} + if config.AllowedProtocols == nil { + config.AllowedProtocols = []Protocol{ProtocolNetRPC} + } + + if config.Logger == nil { + config.Logger = hclog.New(&hclog.LoggerOptions{ + Output: hclog.DefaultOutput, + Level: hclog.Trace, + Name: "plugin", + }) + } + + c = &Client{ + config: config, + logger: config.Logger, + } if config.Managed { managedClientsLock.Lock() managedClients = append(managedClients, c) @@ -184,11 +289,11 @@ func NewClient(config *ClientConfig) (c *Client) { return } -// Client returns an RPC client for the plugin. +// Client returns the protocol client for this connection. // -// Subsequent calls to this will return the same RPC client. -func (c *Client) Client() (*RPCClient, error) { - addr, err := c.Start() +// Subsequent calls to this will return the same client. +func (c *Client) Client() (ClientProtocol, error) { + _, err := c.Start() if err != nil { return nil, err } @@ -200,29 +305,18 @@ func (c *Client) Client() (*RPCClient, error) { return c.client, nil } - // Connect to the client - conn, err := net.Dial(addr.Network(), addr.String()) - if err != nil { - return nil, err - } - if tcpConn, ok := conn.(*net.TCPConn); ok { - // Make sure to set keep alive so that the connection doesn't die - tcpConn.SetKeepAlive(true) - } + switch c.protocol { + case ProtocolNetRPC: + c.client, err = newRPCClient(c) - // Create the actual RPC client - c.client, err = NewRPCClient(conn, c.config.Plugins) - if err != nil { - conn.Close() - return nil, err + case ProtocolGRPC: + c.client, err = newGRPCClient(c) + + default: + return nil, fmt.Errorf("unknown server protocol: %s", c.protocol) } - // Begin the stream syncing so that stdin, out, err work properly - err = c.client.SyncStreams( - c.config.SyncStdout, - c.config.SyncStderr) if err != nil { - c.client.Close() c.client = nil return nil, err } @@ -274,8 +368,7 @@ func (c *Client) Kill() { if err != nil { // If there was an error just log it. We're going to force // kill in a moment anyways. - log.Printf( - "[WARN] plugin: error closing client during Kill: %s", err) + c.logger.Warn("error closing client during Kill", "err", err) } } } @@ -318,9 +411,14 @@ func (c *Client) Start() (addr net.Addr, err error) { { cmdSet := c.config.Cmd != nil attachSet := c.config.Reattach != nil + secureSet := c.config.SecureConfig != nil if cmdSet == attachSet { return nil, fmt.Errorf("Only one of Cmd or Reattach must be set") } + + if secureSet && attachSet { + return nil, ErrSecureConfigAndReattach + } } // Create the logging channel for when we kill @@ -350,7 +448,7 @@ func (c *Client) Start() (addr net.Addr, err error) { pidWait(pid) // Log so we can see it - log.Printf("[DEBUG] plugin: reattached plugin process exited\n") + c.logger.Debug("reattached plugin process exited") // Mark it c.l.Lock() @@ -364,6 +462,11 @@ func (c *Client) Start() (addr net.Addr, err error) { // Set the address and process c.address = c.config.Reattach.Addr c.process = p + c.protocol = c.config.Reattach.Protocol + if c.protocol == "" { + // Default the protocol to net/rpc for backwards compatibility + c.protocol = ProtocolNetRPC + } return c.address, nil } @@ -384,7 +487,15 @@ func (c *Client) Start() (addr net.Addr, err error) { cmd.Stderr = stderr_w cmd.Stdout = stdout_w - log.Printf("[DEBUG] plugin: starting plugin: %s %#v", cmd.Path, cmd.Args) + if c.config.SecureConfig != nil { + if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil { + return nil, fmt.Errorf("error verifying checksum: %s", err) + } else if !ok { + return nil, ErrChecksumsDoNotMatch + } + } + + c.logger.Debug("starting plugin", "path", cmd.Path, "args", cmd.Args) err = cmd.Start() if err != nil { return @@ -418,7 +529,7 @@ func (c *Client) Start() (addr net.Addr, err error) { cmd.Wait() // Log and make sure to flush the logs write away - log.Printf("[DEBUG] plugin: %s: plugin process exited\n", cmd.Path) + c.logger.Debug("plugin process exited", "path", cmd.Path) os.Stderr.Sync() // Mark that we exited @@ -465,7 +576,7 @@ func (c *Client) Start() (addr net.Addr, err error) { timeout := time.After(c.config.StartTimeout) // Start looking for the address - log.Printf("[DEBUG] plugin: waiting for RPC address for: %s", cmd.Path) + c.logger.Debug("waiting for RPC address", "path", cmd.Path) select { case <-timeout: err = errors.New("timeout while waiting for plugin to start") @@ -475,7 +586,7 @@ func (c *Client) Start() (addr net.Addr, err error) { // Trim the line and split by "|" in order to get the parts of // the output. line := strings.TrimSpace(string(lineBytes)) - parts := strings.SplitN(line, "|", 4) + parts := strings.SplitN(line, "|", 6) if len(parts) < 4 { err = fmt.Errorf( "Unrecognized remote plugin message: %s\n\n"+ @@ -525,6 +636,27 @@ func (c *Client) Start() (addr net.Addr, err error) { default: err = fmt.Errorf("Unknown address type: %s", parts[3]) } + + // If we have a server type, then record that. We default to net/rpc + // for backwards compatibility. + c.protocol = ProtocolNetRPC + if len(parts) >= 5 { + c.protocol = Protocol(parts[4]) + } + + found := false + for _, p := range c.config.AllowedProtocols { + if p == c.protocol { + found = true + break + } + } + if !found { + err = fmt.Errorf("Unsupported plugin protocol %q. Supported: %v", + c.protocol, c.config.AllowedProtocols) + return + } + } c.address = addr @@ -555,9 +687,46 @@ func (c *Client) ReattachConfig() *ReattachConfig { } return &ReattachConfig{ - Addr: c.address, - Pid: c.config.Cmd.Process.Pid, + Protocol: c.protocol, + Addr: c.address, + Pid: c.config.Cmd.Process.Pid, + } +} + +// Protocol returns the protocol of server on the remote end. This will +// start the plugin process if it isn't already started. Errors from +// starting the plugin are surpressed and ProtocolInvalid is returned. It +// is recommended you call Start explicitly before calling Protocol to ensure +// no errors occur. +func (c *Client) Protocol() Protocol { + _, err := c.Start() + if err != nil { + return ProtocolInvalid + } + + return c.protocol +} + +// dialer is compatible with grpc.WithDialer and creates the connection +// to the plugin. +func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) { + // Connect to the client + conn, err := net.Dial(c.address.Network(), c.address.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) } + + // If we have a TLS config we wrap our connection. We only do this + // for net/rpc since gRPC uses its own mechanism for TLS. + if c.protocol == ProtocolNetRPC && c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + return conn, nil } func (c *Client) logStderr(r io.Reader) { @@ -566,9 +735,31 @@ func (c *Client) logStderr(r io.Reader) { line, err := bufR.ReadString('\n') if line != "" { c.config.Stderr.Write([]byte(line)) - line = strings.TrimRightFunc(line, unicode.IsSpace) - log.Printf("[DEBUG] plugin: %s: %s", filepath.Base(c.config.Cmd.Path), line) + + l := c.logger.Named(filepath.Base(c.config.Cmd.Path)) + + entry, err := parseJSON(line) + // If output is not JSON format, print directly to Debug + if err != nil { + l.Debug(line) + } else { + out := flattenKVPairs(entry.KVPairs) + + l = l.With("timestamp", entry.Timestamp.Format(hclog.TimeFormat)) + switch hclog.LevelFromString(entry.Level) { + case hclog.Trace: + l.Trace(entry.Message, out...) + case hclog.Debug: + l.Debug(entry.Message, out...) + case hclog.Info: + l.Info(entry.Message, out...) + case hclog.Warn: + l.Warn(entry.Message, out...) + case hclog.Error: + l.Error(entry.Message, out...) + } + } } if err == io.EOF { diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go new file mode 100644 index 00000000000..3bcf95efc56 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_client.go @@ -0,0 +1,83 @@ +package plugin + +import ( + "fmt" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health/grpc_health_v1" +) + +// newGRPCClient creates a new GRPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newGRPCClient(c *Client) (*GRPCClient, error) { + // Build dialing options. + opts := make([]grpc.DialOption, 0, 5) + + // We use a custom dialer so that we can connect over unix domain sockets + opts = append(opts, grpc.WithDialer(c.dialer)) + + // go-plugin expects to block the connection + opts = append(opts, grpc.WithBlock()) + + // Fail right away + opts = append(opts, grpc.FailOnNonTempDialError(true)) + + // If we have no TLS configuration set, we need to explicitly tell grpc + // that we're connecting with an insecure connection. + if c.config.TLSConfig == nil { + opts = append(opts, grpc.WithInsecure()) + } else { + opts = append(opts, grpc.WithTransportCredentials( + credentials.NewTLS(c.config.TLSConfig))) + } + + // Connect. Note the first parameter is unused because we use a custom + // dialer that has the state to see the address. + conn, err := grpc.Dial("unused", opts...) + if err != nil { + return nil, err + } + + return &GRPCClient{ + Conn: conn, + Plugins: c.config.Plugins, + }, nil +} + +// GRPCClient connects to a GRPCServer over gRPC to dispense plugin types. +type GRPCClient struct { + Conn *grpc.ClientConn + Plugins map[string]Plugin +} + +// ClientProtocol impl. +func (c *GRPCClient) Close() error { + return c.Conn.Close() +} + +// ClientProtocol impl. +func (c *GRPCClient) Dispense(name string) (interface{}, error) { + raw, ok := c.Plugins[name] + if !ok { + return nil, fmt.Errorf("unknown plugin type: %s", name) + } + + p, ok := raw.(GRPCPlugin) + if !ok { + return nil, fmt.Errorf("plugin %q doesn't support gRPC", name) + } + + return p.GRPCClient(c.Conn) +} + +// ClientProtocol impl. +func (c *GRPCClient) Ping() error { + client := grpc_health_v1.NewHealthClient(c.Conn) + _, err := client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{ + Service: GRPCServiceName, + }) + + return err +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/vendor/github.com/hashicorp/go-plugin/grpc_server.go new file mode 100644 index 00000000000..177a0cdd7dc --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_server.go @@ -0,0 +1,115 @@ +package plugin + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" +) + +// GRPCServiceName is the name of the service that the health check should +// return as passing. +const GRPCServiceName = "plugin" + +// DefaultGRPCServer can be used with the "GRPCServer" field for Server +// as a default factory method to create a gRPC server with no extra options. +func DefaultGRPCServer(opts []grpc.ServerOption) *grpc.Server { + return grpc.NewServer(opts...) +} + +// GRPCServer is a ServerType implementation that serves plugins over +// gRPC. This allows plugins to easily be written for other languages. +// +// The GRPCServer outputs a custom configuration as a base64-encoded +// JSON structure represented by the GRPCServerConfig config structure. +type GRPCServer struct { + // Plugins are the list of plugins to serve. + Plugins map[string]Plugin + + // Server is the actual server that will accept connections. This + // will be used for plugin registration as well. + Server func([]grpc.ServerOption) *grpc.Server + + // TLS should be the TLS configuration if available. If this is nil, + // the connection will not have transport security. + TLS *tls.Config + + // DoneCh is the channel that is closed when this server has exited. + DoneCh chan struct{} + + // Stdout/StderrLis are the readers for stdout/stderr that will be copied + // to the stdout/stderr connection that is output. + Stdout io.Reader + Stderr io.Reader + + config GRPCServerConfig + server *grpc.Server +} + +// ServerProtocol impl. +func (s *GRPCServer) Init() error { + // Create our server + var opts []grpc.ServerOption + if s.TLS != nil { + opts = append(opts, grpc.Creds(credentials.NewTLS(s.TLS))) + } + s.server = s.Server(opts) + + // Register the health service + healthCheck := health.NewServer() + healthCheck.SetServingStatus( + GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING) + grpc_health_v1.RegisterHealthServer(s.server, healthCheck) + + // Register all our plugins onto the gRPC server. + for k, raw := range s.Plugins { + p, ok := raw.(GRPCPlugin) + if !ok { + return fmt.Errorf("%q is not a GRPC-compatibile plugin", k) + } + + if err := p.GRPCServer(s.server); err != nil { + return fmt.Errorf("error registring %q: %s", k, err) + } + } + + return nil +} + +// Config is the GRPCServerConfig encoded as JSON then base64. +func (s *GRPCServer) Config() string { + // Create a buffer that will contain our final contents + var buf bytes.Buffer + + // Wrap the base64 encoding with JSON encoding. + if err := json.NewEncoder(&buf).Encode(s.config); err != nil { + // We panic since ths shouldn't happen under any scenario. We + // carefully control the structure being encoded here and it should + // always be successful. + panic(err) + } + + return buf.String() +} + +func (s *GRPCServer) Serve(lis net.Listener) { + // Start serving in a goroutine + go s.server.Serve(lis) + + // Wait until graceful completion + <-s.DoneCh +} + +// GRPCServerConfig is the extra configuration passed along for consumers +// to facilitate using GRPC plugins. +type GRPCServerConfig struct { + StdoutAddr string `json:"stdout_addr"` + StderrAddr string `json:"stderr_addr"` +} diff --git a/vendor/github.com/hashicorp/go-plugin/log_entry.go b/vendor/github.com/hashicorp/go-plugin/log_entry.go new file mode 100644 index 00000000000..2996c14c3cb --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/log_entry.go @@ -0,0 +1,73 @@ +package plugin + +import ( + "encoding/json" + "time" +) + +// logEntry is the JSON payload that gets sent to Stderr from the plugin to the host +type logEntry struct { + Message string `json:"@message"` + Level string `json:"@level"` + Timestamp time.Time `json:"timestamp"` + KVPairs []*logEntryKV `json:"kv_pairs"` +} + +// logEntryKV is a key value pair within the Output payload +type logEntryKV struct { + Key string `json:"key"` + Value interface{} `json:"value"` +} + +// flattenKVPairs is used to flatten KVPair slice into []interface{} +// for hclog consumption. +func flattenKVPairs(kvs []*logEntryKV) []interface{} { + var result []interface{} + for _, kv := range kvs { + result = append(result, kv.Key) + result = append(result, kv.Value) + } + + return result +} + +// parseJSON handles parsing JSON output +func parseJSON(input string) (*logEntry, error) { + var raw map[string]interface{} + entry := &logEntry{} + + err := json.Unmarshal([]byte(input), &raw) + if err != nil { + return nil, err + } + + // Parse hclog-specific objects + if v, ok := raw["@message"]; ok { + entry.Message = v.(string) + delete(raw, "@message") + } + + if v, ok := raw["@level"]; ok { + entry.Level = v.(string) + delete(raw, "@level") + } + + if v, ok := raw["@timestamp"]; ok { + t, err := time.Parse("2006-01-02T15:04:05.000000Z07:00", v.(string)) + if err != nil { + return nil, err + } + entry.Timestamp = t + delete(raw, "@timestamp") + } + + // Parse dynamic KV args from the hclog payload. + for k, v := range raw { + entry.KVPairs = append(entry.KVPairs, &logEntryKV{ + Key: k, + Value: v, + }) + } + + return entry, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go index 37c8fd653f9..6b7bdd1cfd0 100644 --- a/vendor/github.com/hashicorp/go-plugin/plugin.go +++ b/vendor/github.com/hashicorp/go-plugin/plugin.go @@ -9,7 +9,10 @@ package plugin import ( + "errors" "net/rpc" + + "google.golang.org/grpc" ) // Plugin is the interface that is implemented to serve/connect to an @@ -23,3 +26,31 @@ type Plugin interface { // serving that communicates to the server end of the plugin. Client(*MuxBroker, *rpc.Client) (interface{}, error) } + +// GRPCPlugin is the interface that is implemented to serve/connect to +// a plugin over gRPC. +type GRPCPlugin interface { + // GRPCServer should register this plugin for serving with the + // given GRPCServer. Unlike Plugin.Server, this is only called once + // since gRPC plugins serve singletons. + GRPCServer(*grpc.Server) error + + // GRPCClient should return the interface implementation for the plugin + // you're serving via gRPC. + GRPCClient(*grpc.ClientConn) (interface{}, error) +} + +// NetRPCUnsupportedPlugin implements Plugin but returns errors for the +// Server and Client functions. This will effectively disable support for +// net/rpc based plugins. +// +// This struct can be embedded in your struct. +type NetRPCUnsupportedPlugin struct{} + +func (p NetRPCUnsupportedPlugin) Server(*MuxBroker) (interface{}, error) { + return nil, errors.New("net/rpc plugin protocol not supported") +} + +func (p NetRPCUnsupportedPlugin) Client(*MuxBroker, *rpc.Client) (interface{}, error) { + return nil, errors.New("net/rpc plugin protocol not supported") +} diff --git a/vendor/github.com/hashicorp/go-plugin/protocol.go b/vendor/github.com/hashicorp/go-plugin/protocol.go new file mode 100644 index 00000000000..0cfc19e52d6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/protocol.go @@ -0,0 +1,45 @@ +package plugin + +import ( + "io" + "net" +) + +// Protocol is an enum representing the types of protocols. +type Protocol string + +const ( + ProtocolInvalid Protocol = "" + ProtocolNetRPC Protocol = "netrpc" + ProtocolGRPC Protocol = "grpc" +) + +// ServerProtocol is an interface that must be implemented for new plugin +// protocols to be servers. +type ServerProtocol interface { + // Init is called once to configure and initialize the protocol, but + // not start listening. This is the point at which all validation should + // be done and errors returned. + Init() error + + // Config is extra configuration to be outputted to stdout. This will + // be automatically base64 encoded to ensure it can be parsed properly. + // This can be an empty string if additional configuration is not needed. + Config() string + + // Serve is called to serve connections on the given listener. This should + // continue until the listener is closed. + Serve(net.Listener) +} + +// ClientProtocol is an interface that must be implemented for new plugin +// protocols to be clients. +type ClientProtocol interface { + io.Closer + + // Dispense dispenses a new instance of the plugin with the given name. + Dispense(string) (interface{}, error) + + // Ping checks that the client connection is still healthy. + Ping() error +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go index 29f9bf063e7..f30a4b1d387 100644 --- a/vendor/github.com/hashicorp/go-plugin/rpc_client.go +++ b/vendor/github.com/hashicorp/go-plugin/rpc_client.go @@ -1,6 +1,7 @@ package plugin import ( + "crypto/tls" "fmt" "io" "net" @@ -19,6 +20,42 @@ type RPCClient struct { stdout, stderr net.Conn } +// newRPCClient creates a new RPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newRPCClient(c *Client) (*RPCClient, error) { + // Connect to the client + conn, err := net.Dial(c.address.Network(), c.address.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + if c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + // Create the actual RPC client + result, err := NewRPCClient(conn, c.config.Plugins) + if err != nil { + conn.Close() + return nil, err + } + + // Begin the stream syncing so that stdin, out, err work properly + err = result.SyncStreams( + c.config.SyncStdout, + c.config.SyncStderr) + if err != nil { + result.Close() + return nil, err + } + + return result, nil +} + // NewRPCClient creates a client from an already-open connection-like value. // Dial is typically used instead. func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) { @@ -121,3 +158,13 @@ func (c *RPCClient) Dispense(name string) (interface{}, error) { return p.Client(c.broker, rpc.NewClient(conn)) } + +// Ping pings the connection to ensure it is still alive. +// +// The error from the RPC call is returned exactly if you want to inspect +// it for further error analysis. Any error returned from here would indicate +// that the connection to the plugin is not healthy. +func (c *RPCClient) Ping() error { + var empty struct{} + return c.control.Call("Control.Ping", true, &empty) +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go index 3984dc891ba..5bb18dd5db1 100644 --- a/vendor/github.com/hashicorp/go-plugin/rpc_server.go +++ b/vendor/github.com/hashicorp/go-plugin/rpc_server.go @@ -34,10 +34,14 @@ type RPCServer struct { lock sync.Mutex } -// Accept accepts connections on a listener and serves requests for -// each incoming connection. Accept blocks; the caller typically invokes -// it in a go statement. -func (s *RPCServer) Accept(lis net.Listener) { +// ServerProtocol impl. +func (s *RPCServer) Init() error { return nil } + +// ServerProtocol impl. +func (s *RPCServer) Config() string { return "" } + +// ServerProtocol impl. +func (s *RPCServer) Serve(lis net.Listener) { for { conn, err := lis.Accept() if err != nil { @@ -122,6 +126,14 @@ type controlServer struct { server *RPCServer } +// Ping can be called to verify the connection (and likely the binary) +// is still alive to a plugin. +func (c *controlServer) Ping( + null bool, response *struct{}) error { + *response = struct{}{} + return nil +} + func (c *controlServer) Quit( null bool, response *struct{}) error { // End the server diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go index b5c5270a7d8..e1543214a52 100644 --- a/vendor/github.com/hashicorp/go-plugin/server.go +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -1,6 +1,8 @@ package plugin import ( + "crypto/tls" + "encoding/base64" "errors" "fmt" "io/ioutil" @@ -11,6 +13,10 @@ import ( "runtime" "strconv" "sync/atomic" + + "github.com/hashicorp/go-hclog" + + "google.golang.org/grpc" ) // CoreProtocolVersion is the ProtocolVersion of the plugin system itself. @@ -45,14 +51,37 @@ type ServeConfig struct { // HandshakeConfig is the configuration that must match clients. HandshakeConfig + // TLSProvider is a function that returns a configured tls.Config. + TLSProvider func() (*tls.Config, error) + // Plugins are the plugins that are served. Plugins map[string]Plugin + + // GRPCServer should be non-nil to enable serving the plugins over + // gRPC. This is a function to create the server when needed with the + // given server options. The server options populated by go-plugin will + // be for TLS if set. You may modify the input slice. + // + // Note that the grpc.Server will automatically be registered with + // the gRPC health checking service. This is not optional since go-plugin + // relies on this to implement Ping(). + GRPCServer func([]grpc.ServerOption) *grpc.Server +} + +// Protocol returns the protocol that this server should speak. +func (c *ServeConfig) Protocol() Protocol { + result := ProtocolNetRPC + if c.GRPCServer != nil { + result = ProtocolGRPC + } + + return result } // Serve serves the plugins given by ServeConfig. // // Serve doesn't return until the plugin is done being executed. Any -// errors will be outputted to the log. +// errors will be outputted to os.Stderr. // // This is the method that plugins should call in their main() functions. func Serve(opts *ServeConfig) { @@ -77,6 +106,13 @@ func Serve(opts *ServeConfig) { // Logging goes to the original stderr log.SetOutput(os.Stderr) + // internal logger to os.Stderr + logger := hclog.New(&hclog.LoggerOptions{ + Level: hclog.Trace, + Output: os.Stderr, + JSONFormat: true, + }) + // Create our new stdout, stderr files. These will override our built-in // stdout/stderr so that it works across the stream boundary. stdout_r, stdout_w, err := os.Pipe() @@ -93,30 +129,86 @@ func Serve(opts *ServeConfig) { // Register a listener so we can accept a connection listener, err := serverListener() if err != nil { - log.Printf("[ERR] plugin: plugin init: %s", err) + logger.Error("plugin init error", "error", err) return } - defer listener.Close() + + // Close the listener on return. We wrap this in a func() on purpose + // because the "listener" reference may change to TLS. + defer func() { + listener.Close() + }() + + var tlsConfig *tls.Config + if opts.TLSProvider != nil { + tlsConfig, err = opts.TLSProvider() + if err != nil { + logger.Error("plugin tls init", "error", err) + return + } + } // Create the channel to tell us when we're done doneCh := make(chan struct{}) - // Create the RPC server to dispense - server := &RPCServer{ - Plugins: opts.Plugins, - Stdout: stdout_r, - Stderr: stderr_r, - DoneCh: doneCh, + // Build the server type + var server ServerProtocol + switch opts.Protocol() { + case ProtocolNetRPC: + // If we have a TLS configuration then we wrap the listener + // ourselves and do it at that level. + if tlsConfig != nil { + listener = tls.NewListener(listener, tlsConfig) + } + + // Create the RPC server to dispense + server = &RPCServer{ + Plugins: opts.Plugins, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + } + + case ProtocolGRPC: + // Create the gRPC server + server = &GRPCServer{ + Plugins: opts.Plugins, + Server: opts.GRPCServer, + TLS: tlsConfig, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + } + + default: + panic("unknown server protocol: " + opts.Protocol()) + } + + // Initialize the servers + if err := server.Init(); err != nil { + logger.Error("protocol init", "error", err) + return } + // Build the extra configuration + extra := "" + if v := server.Config(); v != "" { + extra = base64.StdEncoding.EncodeToString([]byte(v)) + } + if extra != "" { + extra = "|" + extra + } + + logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String()) + // Output the address and service name to stdout so that core can bring it up. - log.Printf("[DEBUG] plugin: plugin address: %s %s\n", - listener.Addr().Network(), listener.Addr().String()) - fmt.Printf("%d|%d|%s|%s\n", + fmt.Printf("%d|%d|%s|%s|%s%s\n", CoreProtocolVersion, opts.ProtocolVersion, listener.Addr().Network(), - listener.Addr().String()) + listener.Addr().String(), + opts.Protocol(), + extra) os.Stdout.Sync() // Eat the interrupts @@ -127,9 +219,7 @@ func Serve(opts *ServeConfig) { for { <-ch newCount := atomic.AddInt32(&count, 1) - log.Printf( - "[DEBUG] plugin: received interrupt signal (count: %d). Ignoring.", - newCount) + logger.Debug("plugin received interrupt signal, ignoring", "count", newCount) } }() @@ -137,10 +227,8 @@ func Serve(opts *ServeConfig) { os.Stdout = stdout_w os.Stderr = stderr_w - // Serve - go server.Accept(listener) - - // Wait for the graceful exit + // Accept connections and wait for completion + go server.Serve(listener) <-doneCh } diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go index 9086a1b45f6..c6bf7c4ed32 100644 --- a/vendor/github.com/hashicorp/go-plugin/testing.go +++ b/vendor/github.com/hashicorp/go-plugin/testing.go @@ -4,7 +4,9 @@ import ( "bytes" "net" "net/rpc" - "testing" + + "github.com/mitchellh/go-testing-interface" + "google.golang.org/grpc" ) // The testing file contains test helpers that you can use outside of @@ -12,7 +14,7 @@ import ( // TestConn is a helper function for returning a client and server // net.Conn connected to each other. -func TestConn(t *testing.T) (net.Conn, net.Conn) { +func TestConn(t testing.T) (net.Conn, net.Conn) { // Listen to any local port. This listener will be closed // after a single connection is established. l, err := net.Listen("tcp", "127.0.0.1:0") @@ -46,7 +48,7 @@ func TestConn(t *testing.T) (net.Conn, net.Conn) { } // TestRPCConn returns a rpc client and server connected to each other. -func TestRPCConn(t *testing.T) (*rpc.Client, *rpc.Server) { +func TestRPCConn(t testing.T) (*rpc.Client, *rpc.Server) { clientConn, serverConn := TestConn(t) server := rpc.NewServer() @@ -58,7 +60,7 @@ func TestRPCConn(t *testing.T) (*rpc.Client, *rpc.Server) { // TestPluginRPCConn returns a plugin RPC client and server that are connected // together and configured. -func TestPluginRPCConn(t *testing.T, ps map[string]Plugin) (*RPCClient, *RPCServer) { +func TestPluginRPCConn(t testing.T, ps map[string]Plugin) (*RPCClient, *RPCServer) { // Create two net.Conns we can use to shuttle our control connection clientConn, serverConn := TestConn(t) @@ -74,3 +76,45 @@ func TestPluginRPCConn(t *testing.T, ps map[string]Plugin) (*RPCClient, *RPCServ return client, server } + +// TestPluginGRPCConn returns a plugin gRPC client and server that are connected +// together and configured. This is used to test gRPC connections. +func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCServer) { + // Create a listener + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Start up the server + server := &GRPCServer{ + Plugins: ps, + Server: DefaultGRPCServer, + Stdout: new(bytes.Buffer), + Stderr: new(bytes.Buffer), + } + if err := server.Init(); err != nil { + t.Fatalf("err: %s", err) + } + go server.Serve(l) + + // Connect to the server + conn, err := grpc.Dial( + l.Addr().String(), + grpc.WithBlock(), + grpc.WithInsecure()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Connection successful, close the listener + l.Close() + + // Create the client + client := &GRPCClient{ + Conn: conn, + Plugins: ps, + } + + return client, server +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE new file mode 100644 index 00000000000..e87a115e462 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-retryablehttp/Makefile b/vendor/github.com/hashicorp/go-retryablehttp/Makefile new file mode 100644 index 00000000000..da17640e644 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/Makefile @@ -0,0 +1,11 @@ +default: test + +test: + go vet ./... + go test -race ./... + +updatedeps: + go get -f -t -u ./... + go get -f -u ./... + +.PHONY: default test updatedeps diff --git a/vendor/github.com/hashicorp/go-retryablehttp/README.md b/vendor/github.com/hashicorp/go-retryablehttp/README.md new file mode 100644 index 00000000000..0d6f9ed40af --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/README.md @@ -0,0 +1,43 @@ +go-retryablehttp +================ + +[![Build Status](http://img.shields.io/travis/hashicorp/go-retryablehttp.svg?style=flat-square)][travis] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: http://travis-ci.org/hashicorp/go-retryablehttp +[godocs]: http://godoc.org/github.com/hashicorp/go-retryablehttp + +The `retryablehttp` package provides a familiar HTTP client interface with +automatic retries and exponential backoff. It is a thin wrapper over the +standard `net/http` client library and exposes nearly the same public API. This +makes `retryablehttp` very easy to drop into existing programs. + +`retryablehttp` performs automatic retries under certain conditions. Mainly, if +an error is returned by the client (connection errors, etc.), or if a 500-range +response code is received, then a retry is invoked after a wait period. +Otherwise, the response is returned and left to the caller to interpret. + +The main difference from `net/http` is that requests which take a request body +(POST/PUT et. al) require an `io.ReadSeeker` to be provided. This enables the +request body to be "rewound" if the initial request fails so that the full +request can be attempted again. + +Example Use +=========== + +Using this library should look almost identical to what you would do with +`net/http`. The most simple example of a GET request is shown below: + +```go +resp, err := retryablehttp.Get("/foo") +if err != nil { + panic(err) +} +``` + +The returned response object is an `*http.Response`, the same thing you would +usually get from `net/http`. Had the request failed one or more times, the above +call would block and retry with exponential backoff. + +For more usage and examples see the +[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp). diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go new file mode 100644 index 00000000000..2ecd1ae8828 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/client.go @@ -0,0 +1,311 @@ +// The retryablehttp package provides a familiar HTTP client interface with +// automatic retries and exponential backoff. It is a thin wrapper over the +// standard net/http client library and exposes nearly the same public API. +// This makes retryablehttp very easy to drop into existing programs. +// +// retryablehttp performs automatic retries under certain conditions. Mainly, if +// an error is returned by the client (connection errors etc), or if a 500-range +// response is received, then a retry is invoked. Otherwise, the response is +// returned and left to the caller to interpret. +// +// The main difference from net/http is that requests which take a request body +// (POST/PUT et. al) require an io.ReadSeeker to be provided. This enables the +// request body to be "rewound" if the initial request fails so that the full +// request can be attempted again. +package retryablehttp + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "math" + "net/http" + "net/url" + "os" + "strings" + "time" + + "github.com/hashicorp/go-cleanhttp" +) + +var ( + // Default retry configuration + defaultRetryWaitMin = 1 * time.Second + defaultRetryWaitMax = 30 * time.Second + defaultRetryMax = 4 + + // defaultClient is used for performing requests without explicitly making + // a new client. It is purposely private to avoid modifications. + defaultClient = NewClient() + + // We need to consume response bodies to maintain http connections, but + // limit the size we consume to respReadLimit. + respReadLimit = int64(4096) +) + +// LenReader is an interface implemented by many in-memory io.Reader's. Used +// for automatically sending the right Content-Length header when possible. +type LenReader interface { + Len() int +} + +// Request wraps the metadata needed to create HTTP requests. +type Request struct { + // body is a seekable reader over the request body payload. This is + // used to rewind the request data in between retries. + body io.ReadSeeker + + // Embed an HTTP request directly. This makes a *Request act exactly + // like an *http.Request so that all meta methods are supported. + *http.Request +} + +// NewRequest creates a new wrapped request. +func NewRequest(method, url string, body io.ReadSeeker) (*Request, error) { + // Wrap the body in a noop ReadCloser if non-nil. This prevents the + // reader from being closed by the HTTP client. + var rcBody io.ReadCloser + if body != nil { + rcBody = ioutil.NopCloser(body) + } + + // Make the request with the noop-closer for the body. + httpReq, err := http.NewRequest(method, url, rcBody) + if err != nil { + return nil, err + } + + // Check if we can set the Content-Length automatically. + if lr, ok := body.(LenReader); ok { + httpReq.ContentLength = int64(lr.Len()) + } + + return &Request{body, httpReq}, nil +} + +// RequestLogHook allows a function to run before each retry. The HTTP +// request which will be made, and the retry number (0 for the initial +// request) are available to users. The internal logger is exposed to +// consumers. +type RequestLogHook func(*log.Logger, *http.Request, int) + +// ResponseLogHook is like RequestLogHook, but allows running a function +// on each HTTP response. This function will be invoked at the end of +// every HTTP request executed, regardless of whether a subsequent retry +// needs to be performed or not. If the response body is read or closed +// from this method, this will affect the response returned from Do(). +type ResponseLogHook func(*log.Logger, *http.Response) + +// CheckRetry specifies a policy for handling retries. It is called +// following each request with the response and error values returned by +// the http.Client. If CheckRetry returns false, the Client stops retrying +// and returns the response to the caller. If CheckRetry returns an error, +// that error value is returned in lieu of the error from the request. The +// Client will close any response body when retrying, but if the retry is +// aborted it is up to the CheckResponse callback to properly close any +// response body before returning. +type CheckRetry func(resp *http.Response, err error) (bool, error) + +// Backoff specifies a policy for how long to wait between retries. +// It is called after a failing request to determine the amount of time +// that should pass before trying again. +type Backoff func(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration + +// Client is used to make HTTP requests. It adds additional functionality +// like automatic retries to tolerate minor outages. +type Client struct { + HTTPClient *http.Client // Internal HTTP client. + Logger *log.Logger // Customer logger instance. + + RetryWaitMin time.Duration // Minimum time to wait + RetryWaitMax time.Duration // Maximum time to wait + RetryMax int // Maximum number of retries + + // RequestLogHook allows a user-supplied function to be called + // before each retry. + RequestLogHook RequestLogHook + + // ResponseLogHook allows a user-supplied function to be called + // with the response from each HTTP request executed. + ResponseLogHook ResponseLogHook + + // CheckRetry specifies the policy for handling retries, and is called + // after each request. The default policy is DefaultRetryPolicy. + CheckRetry CheckRetry + + // Backoff specifies the policy for how long to wait between retries + Backoff Backoff +} + +// NewClient creates a new Client with default settings. +func NewClient() *Client { + return &Client{ + HTTPClient: cleanhttp.DefaultClient(), + Logger: log.New(os.Stderr, "", log.LstdFlags), + RetryWaitMin: defaultRetryWaitMin, + RetryWaitMax: defaultRetryWaitMax, + RetryMax: defaultRetryMax, + CheckRetry: DefaultRetryPolicy, + Backoff: DefaultBackoff, + } +} + +// DefaultRetryPolicy provides a default callback for Client.CheckRetry, which +// will retry on connection errors and server errors. +func DefaultRetryPolicy(resp *http.Response, err error) (bool, error) { + if err != nil { + return true, err + } + // Check the response code. We retry on 500-range responses to allow + // the server time to recover, as 500's are typically not permanent + // errors and may relate to outages on the server side. This will catch + // invalid response codes as well, like 0 and 999. + if resp.StatusCode == 0 || resp.StatusCode >= 500 { + return true, nil + } + + return false, nil +} + +// DefaultBackoff provides a default callback for Client.Backoff which +// will perform exponential backoff based on the attempt number and limited +// by the provided minimum and maximum durations. +func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + mult := math.Pow(2, float64(attemptNum)) * float64(min) + sleep := time.Duration(mult) + if float64(sleep) != mult || sleep > max { + sleep = max + } + return sleep +} + +// Do wraps calling an HTTP method with retries. +func (c *Client) Do(req *Request) (*http.Response, error) { + c.Logger.Printf("[DEBUG] %s %s", req.Method, req.URL) + + for i := 0; ; i++ { + var code int // HTTP response code + + // Always rewind the request body when non-nil. + if req.body != nil { + if _, err := req.body.Seek(0, 0); err != nil { + return nil, fmt.Errorf("failed to seek body: %v", err) + } + } + + if c.RequestLogHook != nil { + c.RequestLogHook(c.Logger, req.Request, i) + } + + // Attempt the request + resp, err := c.HTTPClient.Do(req.Request) + + // Check if we should continue with retries. + checkOK, checkErr := c.CheckRetry(resp, err) + + if err != nil { + c.Logger.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err) + } else { + // Call this here to maintain the behavior of logging all requests, + // even if CheckRetry signals to stop. + if c.ResponseLogHook != nil { + // Call the response logger function if provided. + c.ResponseLogHook(c.Logger, resp) + } + } + + // Now decide if we should continue. + if !checkOK { + if checkErr != nil { + err = checkErr + } + return resp, err + } + + // We're going to retry, consume any response to reuse the connection. + if err == nil { + c.drainBody(resp.Body) + } + + remain := c.RetryMax - i + if remain == 0 { + break + } + wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp) + desc := fmt.Sprintf("%s %s", req.Method, req.URL) + if code > 0 { + desc = fmt.Sprintf("%s (status: %d)", desc, code) + } + c.Logger.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) + time.Sleep(wait) + } + + // Return an error if we fall out of the retry loop + return nil, fmt.Errorf("%s %s giving up after %d attempts", + req.Method, req.URL, c.RetryMax+1) +} + +// Try to read the response body so we can reuse this connection. +func (c *Client) drainBody(body io.ReadCloser) { + defer body.Close() + _, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit)) + if err != nil { + c.Logger.Printf("[ERR] error reading response body: %v", err) + } +} + +// Get is a shortcut for doing a GET request without making a new client. +func Get(url string) (*http.Response, error) { + return defaultClient.Get(url) +} + +// Get is a convenience helper for doing simple GET requests. +func (c *Client) Get(url string) (*http.Response, error) { + req, err := NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +// Head is a shortcut for doing a HEAD request without making a new client. +func Head(url string) (*http.Response, error) { + return defaultClient.Head(url) +} + +// Head is a convenience method for doing simple HEAD requests. +func (c *Client) Head(url string) (*http.Response, error) { + req, err := NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +// Post is a shortcut for doing a POST request without making a new client. +func Post(url, bodyType string, body io.ReadSeeker) (*http.Response, error) { + return defaultClient.Post(url, bodyType, body) +} + +// Post is a convenience method for doing simple POST requests. +func (c *Client) Post(url, bodyType string, body io.ReadSeeker) (*http.Response, error) { + req, err := NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return c.Do(req) +} + +// PostForm is a shortcut to perform a POST with form data without creating +// a new client. +func PostForm(url string, data url.Values) (*http.Response, error) { + return defaultClient.PostForm(url, data) +} + +// PostForm is a convenience method for doing simple POST operations using +// pre-filled url.Values form data. +func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) { + return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/vendor/github.com/hashicorp/hcl2/LICENSE b/vendor/github.com/hashicorp/hcl2/LICENSE new file mode 100644 index 00000000000..82b4de97c7e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/decode.go b/vendor/github.com/hashicorp/hcl2/gohcl/decode.go new file mode 100644 index 00000000000..3a149a8c2cb --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/gohcl/decode.go @@ -0,0 +1,304 @@ +package gohcl + +import ( + "fmt" + "reflect" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/gocty" +) + +// DecodeBody extracts the configuration within the given body into the given +// value. This value must be a non-nil pointer to either a struct or +// a map, where in the former case the configuration will be decoded using +// struct tags and in the latter case only attributes are allowed and their +// values are decoded into the map. +// +// The given EvalContext is used to resolve any variables or functions in +// expressions encountered while decoding. This may be nil to require only +// constant values, for simple applications that do not support variables or +// functions. +// +// The returned diagnostics should be inspected with its HasErrors method to +// determine if the populated value is valid and complete. If error diagnostics +// are returned then the given value may have been partially-populated but +// may still be accessed by a careful caller for static analysis and editor +// integration use-cases. +func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + rv := reflect.ValueOf(val) + if rv.Kind() != reflect.Ptr { + panic(fmt.Sprintf("target value must be a pointer, not %s", rv.Type().String())) + } + + return decodeBodyToValue(body, ctx, rv.Elem()) +} + +func decodeBodyToValue(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { + et := val.Type() + switch et.Kind() { + case reflect.Struct: + return decodeBodyToStruct(body, ctx, val) + case reflect.Map: + return decodeBodyToMap(body, ctx, val) + default: + panic(fmt.Sprintf("target value must be pointer to struct or map, not %s", et.String())) + } +} + +func decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { + schema, partial := ImpliedBodySchema(val.Interface()) + + var content *hcl.BodyContent + var leftovers hcl.Body + var diags hcl.Diagnostics + if partial { + content, leftovers, diags = body.PartialContent(schema) + } else { + content, diags = body.Content(schema) + } + if content == nil { + return diags + } + + tags := getFieldTags(val.Type()) + + if tags.Remain != nil { + fieldIdx := *tags.Remain + field := val.Type().Field(fieldIdx) + fieldV := val.Field(fieldIdx) + switch { + case bodyType.AssignableTo(field.Type): + fieldV.Set(reflect.ValueOf(leftovers)) + case attrsType.AssignableTo(field.Type): + attrs, attrsDiags := leftovers.JustAttributes() + if len(attrsDiags) > 0 { + diags = append(diags, attrsDiags...) + } + fieldV.Set(reflect.ValueOf(attrs)) + default: + diags = append(diags, decodeBodyToValue(leftovers, ctx, fieldV)...) + } + } + + for name, fieldIdx := range tags.Attributes { + attr := content.Attributes[name] + field := val.Type().Field(fieldIdx) + fieldV := val.Field(fieldIdx) + + if attr == nil { + if !exprType.AssignableTo(field.Type) { + continue + } + + // As a special case, if the target is of type hcl.Expression then + // we'll assign an actual expression that evalues to a cty null, + // so the caller can deal with it within the cty realm rather + // than within the Go realm. + synthExpr := hcl.StaticExpr(cty.NullVal(cty.DynamicPseudoType), body.MissingItemRange()) + fieldV.Set(reflect.ValueOf(synthExpr)) + continue + } + + switch { + case attrType.AssignableTo(field.Type): + fieldV.Set(reflect.ValueOf(attr)) + case exprType.AssignableTo(field.Type): + fieldV.Set(reflect.ValueOf(attr.Expr)) + default: + diags = append(diags, DecodeExpression( + attr.Expr, ctx, fieldV.Addr().Interface(), + )...) + } + } + + blocksByType := content.Blocks.ByType() + + for typeName, fieldIdx := range tags.Blocks { + blocks := blocksByType[typeName] + field := val.Type().Field(fieldIdx) + + ty := field.Type + isSlice := false + isPtr := false + if ty.Kind() == reflect.Slice { + isSlice = true + ty = ty.Elem() + } + if ty.Kind() == reflect.Ptr { + isPtr = true + ty = ty.Elem() + } + + if len(blocks) > 1 && !isSlice { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", typeName), + Detail: fmt.Sprintf( + "Only one %s block is allowed. Another was defined at %s.", + typeName, blocks[0].DefRange.String(), + ), + Subject: &blocks[1].DefRange, + }) + continue + } + + if len(blocks) == 0 { + if isSlice || isPtr { + val.Field(fieldIdx).Set(reflect.Zero(field.Type)) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Missing %s block", typeName), + Detail: fmt.Sprintf("A %s block is required.", typeName), + Subject: body.MissingItemRange().Ptr(), + }) + } + continue + } + + switch { + + case isSlice: + elemType := ty + if isPtr { + elemType = reflect.PtrTo(ty) + } + sli := reflect.MakeSlice(reflect.SliceOf(elemType), len(blocks), len(blocks)) + + for i, block := range blocks { + if isPtr { + v := reflect.New(ty) + diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...) + sli.Index(i).Set(v) + } else { + diags = append(diags, decodeBlockToValue(block, ctx, sli.Index(i))...) + } + } + + val.Field(fieldIdx).Set(sli) + + default: + block := blocks[0] + if isPtr { + v := reflect.New(ty) + diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...) + val.Field(fieldIdx).Set(v) + } else { + diags = append(diags, decodeBlockToValue(block, ctx, val.Field(fieldIdx))...) + } + + } + + } + + return diags +} + +func decodeBodyToMap(body hcl.Body, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { + attrs, diags := body.JustAttributes() + if attrs == nil { + return diags + } + + mv := reflect.MakeMap(v.Type()) + + for k, attr := range attrs { + switch { + case attrType.AssignableTo(v.Type().Elem()): + mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr)) + case exprType.AssignableTo(v.Type().Elem()): + mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr.Expr)) + default: + ev := reflect.New(v.Type().Elem()) + diags = append(diags, DecodeExpression(attr.Expr, ctx, ev.Interface())...) + mv.SetMapIndex(reflect.ValueOf(k), ev.Elem()) + } + } + + v.Set(mv) + + return diags +} + +func decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { + var diags hcl.Diagnostics + + ty := v.Type() + + switch { + case blockType.AssignableTo(ty): + v.Elem().Set(reflect.ValueOf(block)) + case bodyType.AssignableTo(ty): + v.Elem().Set(reflect.ValueOf(block.Body)) + case attrsType.AssignableTo(ty): + attrs, attrsDiags := block.Body.JustAttributes() + if len(attrsDiags) > 0 { + diags = append(diags, attrsDiags...) + } + v.Elem().Set(reflect.ValueOf(attrs)) + default: + diags = append(diags, decodeBodyToValue(block.Body, ctx, v)...) + + if len(block.Labels) > 0 { + blockTags := getFieldTags(ty) + for li, lv := range block.Labels { + lfieldIdx := blockTags.Labels[li].FieldIndex + v.Field(lfieldIdx).Set(reflect.ValueOf(lv)) + } + } + + } + + return diags +} + +// DecodeExpression extracts the value of the given expression into the given +// value. This value must be something that gocty is able to decode into, +// since the final decoding is delegated to that package. +// +// The given EvalContext is used to resolve any variables or functions in +// expressions encountered while decoding. This may be nil to require only +// constant values, for simple applications that do not support variables or +// functions. +// +// The returned diagnostics should be inspected with its HasErrors method to +// determine if the populated value is valid and complete. If error diagnostics +// are returned then the given value may have been partially-populated but +// may still be accessed by a careful caller for static analysis and editor +// integration use-cases. +func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + srcVal, diags := expr.Value(ctx) + + convTy, err := gocty.ImpliedType(val) + if err != nil { + panic(fmt.Sprintf("unsuitable DecodeExpression target: %s", err)) + } + + srcVal, err = convert.Convert(srcVal, convTy) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsuitable value type", + Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()), + Subject: expr.StartRange().Ptr(), + Context: expr.Range().Ptr(), + }) + return diags + } + + err = gocty.FromCtyValue(srcVal, val) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsuitable value type", + Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()), + Subject: expr.StartRange().Ptr(), + Context: expr.Range().Ptr(), + }) + } + + return diags +} diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/doc.go b/vendor/github.com/hashicorp/hcl2/gohcl/doc.go new file mode 100644 index 00000000000..8500214bf00 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/gohcl/doc.go @@ -0,0 +1,49 @@ +// Package gohcl allows decoding HCL configurations into Go data structures. +// +// It provides a convenient and concise way of describing the schema for +// configuration and then accessing the resulting data via native Go +// types. +// +// A struct field tag scheme is used, similar to other decoding and +// unmarshalling libraries. The tags are formatted as in the following example: +// +// ThingType string `hcl:"thing_type,attr"` +// +// Within each tag there are two comma-separated tokens. The first is the +// name of the corresponding construct in configuration, while the second +// is a keyword giving the kind of construct expected. The following +// kind keywords are supported: +// +// attr (the default) indicates that the value is to be populated from an attribute +// block indicates that the value is to populated from a block +// label indicates that the value is to populated from a block label +// remain indicates that the value is to be populated from the remaining body after populating other fields +// +// "attr" fields may either be of type *hcl.Expression, in which case the raw +// expression is assigned, or of any type accepted by gocty, in which case +// gocty will be used to assign the value to a native Go type. +// +// "block" fields may be of type *hcl.Block or hcl.Body, in which case the +// corresponding raw value is assigned, or may be a struct that recursively +// uses the same tags. Block fields may also be slices of any of these types, +// in which case multiple blocks of the corresponding type are decoded into +// the slice. +// +// "label" fields are considered only in a struct used as the type of a field +// marked as "block", and are used sequentially to capture the labels of +// the blocks being decoded. In this case, the name token is used only as +// an identifier for the label in diagnostic messages. +// +// "remain" can be placed on a single field that may be either of type +// hcl.Body or hcl.Attributes, in which case any remaining body content is +// placed into this field for delayed processing. If no "remain" field is +// present then any attributes or blocks not matched by another valid tag +// will cause an error diagnostic. +// +// Broadly-speaking this package deals with two types of error. The first is +// errors in the configuration itself, which are returned as diagnostics +// written with the configuration author as the target audience. The second +// is bugs in the calling program, such as invalid struct tags, which are +// surfaced via panics since there can be no useful runtime handling of such +// errors and they should certainly not be returned to the user as diagnostics. +package gohcl diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/schema.go b/vendor/github.com/hashicorp/hcl2/gohcl/schema.go new file mode 100644 index 00000000000..a8955dcdc7b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/gohcl/schema.go @@ -0,0 +1,167 @@ +package gohcl + +import ( + "fmt" + "reflect" + "sort" + "strings" + + "github.com/hashicorp/hcl2/hcl" +) + +// ImpliedBodySchema produces a hcl.BodySchema derived from the type of the +// given value, which must be a struct value or a pointer to one. If an +// inappropriate value is passed, this function will panic. +// +// The second return argument indicates whether the given struct includes +// a "remain" field, and thus the returned schema is non-exhaustive. +// +// This uses the tags on the fields of the struct to discover how each +// field's value should be expressed within configuration. If an invalid +// mapping is attempted, this function will panic. +func ImpliedBodySchema(val interface{}) (schema *hcl.BodySchema, partial bool) { + ty := reflect.TypeOf(val) + + if ty.Kind() == reflect.Ptr { + ty = ty.Elem() + } + + if ty.Kind() != reflect.Struct { + panic(fmt.Sprintf("given value must be struct, not %T", val)) + } + + var attrSchemas []hcl.AttributeSchema + var blockSchemas []hcl.BlockHeaderSchema + + tags := getFieldTags(ty) + + attrNames := make([]string, 0, len(tags.Attributes)) + for n := range tags.Attributes { + attrNames = append(attrNames, n) + } + sort.Strings(attrNames) + for _, n := range attrNames { + idx := tags.Attributes[n] + field := ty.Field(idx) + var required bool + + switch { + case field.Type.AssignableTo(exprType): + // If we're decoding to hcl.Expression then absense can be + // indicated via a null value, so we don't specify that + // the field is required during decoding. + required = false + case field.Type.Kind() != reflect.Ptr: + required = true + default: + required = false + } + + attrSchemas = append(attrSchemas, hcl.AttributeSchema{ + Name: n, + Required: required, + }) + } + + blockNames := make([]string, 0, len(tags.Blocks)) + for n := range tags.Blocks { + blockNames = append(blockNames, n) + } + sort.Strings(blockNames) + for _, n := range blockNames { + idx := tags.Blocks[n] + field := ty.Field(idx) + fty := field.Type + if fty.Kind() == reflect.Slice { + fty = fty.Elem() + } + if fty.Kind() == reflect.Ptr { + fty = fty.Elem() + } + if fty.Kind() != reflect.Struct { + panic(fmt.Sprintf( + "hcl 'block' tag kind cannot be applied to %s field %s: struct required", field.Type.String(), field.Name, + )) + } + ftags := getFieldTags(fty) + var labelNames []string + if len(ftags.Labels) > 0 { + labelNames = make([]string, len(ftags.Labels)) + for i, l := range ftags.Labels { + labelNames[i] = l.Name + } + } + + blockSchemas = append(blockSchemas, hcl.BlockHeaderSchema{ + Type: n, + LabelNames: labelNames, + }) + } + + partial = tags.Remain != nil + schema = &hcl.BodySchema{ + Attributes: attrSchemas, + Blocks: blockSchemas, + } + return schema, partial +} + +type fieldTags struct { + Attributes map[string]int + Blocks map[string]int + Labels []labelField + Remain *int +} + +type labelField struct { + FieldIndex int + Name string +} + +func getFieldTags(ty reflect.Type) *fieldTags { + ret := &fieldTags{ + Attributes: map[string]int{}, + Blocks: map[string]int{}, + } + + ct := ty.NumField() + for i := 0; i < ct; i++ { + field := ty.Field(i) + tag := field.Tag.Get("hcl") + if tag == "" { + continue + } + + comma := strings.Index(tag, ",") + var name, kind string + if comma != -1 { + name = tag[:comma] + kind = tag[comma+1:] + } else { + name = tag + kind = "attr" + } + + switch kind { + case "attr": + ret.Attributes[name] = i + case "block": + ret.Blocks[name] = i + case "label": + ret.Labels = append(ret.Labels, labelField{ + FieldIndex: i, + Name: name, + }) + case "remain": + if ret.Remain != nil { + panic("only one 'remain' tag is permitted") + } + idx := i // copy, because this loop will continue assigning to i + ret.Remain = &idx + default: + panic(fmt.Sprintf("invalid hcl field tag kind %q on %s %q", kind, field.Type.String(), field.Name)) + } + } + + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/types.go b/vendor/github.com/hashicorp/hcl2/gohcl/types.go new file mode 100644 index 00000000000..a94f275adc5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/gohcl/types.go @@ -0,0 +1,16 @@ +package gohcl + +import ( + "reflect" + + "github.com/hashicorp/hcl2/hcl" +) + +var victimExpr hcl.Expression +var victimBody hcl.Body + +var exprType = reflect.TypeOf(&victimExpr).Elem() +var bodyType = reflect.TypeOf(&victimBody).Elem() +var blockType = reflect.TypeOf((*hcl.Block)(nil)) +var attrType = reflect.TypeOf((*hcl.Attribute)(nil)) +var attrsType = reflect.TypeOf(hcl.Attributes(nil)) diff --git a/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go new file mode 100644 index 00000000000..6ecf7447f5f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go @@ -0,0 +1,103 @@ +package hcl + +import ( + "fmt" +) + +// DiagnosticSeverity represents the severity of a diagnostic. +type DiagnosticSeverity int + +const ( + // DiagInvalid is the invalid zero value of DiagnosticSeverity + DiagInvalid DiagnosticSeverity = iota + + // DiagError indicates that the problem reported by a diagnostic prevents + // further progress in parsing and/or evaluating the subject. + DiagError + + // DiagWarning indicates that the problem reported by a diagnostic warrants + // user attention but does not prevent further progress. It is most + // commonly used for showing deprecation notices. + DiagWarning +) + +// Diagnostic represents information to be presented to a user about an +// error or anomoly in parsing or evaluating configuration. +type Diagnostic struct { + Severity DiagnosticSeverity + + // Summary and detail contain the English-language description of the + // problem. Summary is a terse description of the general problem and + // detail is a more elaborate, often-multi-sentence description of + // the probem and what might be done to solve it. + Summary string + Detail string + Subject *Range + Context *Range +} + +// Diagnostics is a list of Diagnostic instances. +type Diagnostics []*Diagnostic + +// error implementation, so that diagnostics can be returned via APIs +// that normally deal in vanilla Go errors. +// +// This presents only minimal context about the error, for compatibility +// with usual expectations about how errors will present as strings. +func (d *Diagnostic) Error() string { + return fmt.Sprintf("%s: %s; %s", d.Subject, d.Summary, d.Detail) +} + +// error implementation, so that sets of diagnostics can be returned via +// APIs that normally deal in vanilla Go errors. +func (d Diagnostics) Error() string { + count := len(d) + switch { + case count == 0: + return "no diagnostics" + case count == 1: + return d[0].Error() + default: + return fmt.Sprintf("%s, and %d other diagnostic(s)", d[0].Error(), count-1) + } +} + +// Append appends a new error to a Diagnostics and return the whole Diagnostics. +// +// This is provided as a convenience for returning from a function that +// collects and then returns a set of diagnostics: +// +// return nil, diags.Append(&hcl.Diagnostic{ ... }) +// +// Note that this modifies the array underlying the diagnostics slice, so +// must be used carefully within a single codepath. It is incorrect (and rude) +// to extend a diagnostics created by a different subsystem. +func (d Diagnostics) Append(diag *Diagnostic) Diagnostics { + return append(d, diag) +} + +// Extend concatenates the given Diagnostics with the receiver and returns +// the whole new Diagnostics. +// +// This is similar to Append but accepts multiple diagnostics to add. It has +// all the same caveats and constraints. +func (d Diagnostics) Extend(diags Diagnostics) Diagnostics { + return append(d, diags...) +} + +// HasErrors returns true if the receiver contains any diagnostics of +// severity DiagError. +func (d Diagnostics) HasErrors() bool { + for _, diag := range d { + if diag.Severity == DiagError { + return true + } + } + return false +} + +// A DiagnosticWriter emits diagnostics somehow. +type DiagnosticWriter interface { + WriteDiagnostic(*Diagnostic) error + WriteDiagnostics(Diagnostics) error +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go new file mode 100644 index 00000000000..9776f04de4b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go @@ -0,0 +1,163 @@ +package hcl + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + + wordwrap "github.com/mitchellh/go-wordwrap" +) + +type diagnosticTextWriter struct { + files map[string]*File + wr io.Writer + width uint + color bool +} + +// NewDiagnosticTextWriter creates a DiagnosticWriter that writes diagnostics +// to the given writer as formatted text. +// +// It is designed to produce text appropriate to print in a monospaced font +// in a terminal of a particular width, or optionally with no width limit. +// +// The given width may be zero to disable word-wrapping of the detail text +// and truncation of source code snippets. +// +// If color is set to true, the output will include VT100 escape sequences to +// color-code the severity indicators. It is suggested to turn this off if +// the target writer is not a terminal. +func NewDiagnosticTextWriter(wr io.Writer, files map[string]*File, width uint, color bool) DiagnosticWriter { + return &diagnosticTextWriter{ + files: files, + wr: wr, + width: width, + color: color, + } +} + +func (w *diagnosticTextWriter) WriteDiagnostic(diag *Diagnostic) error { + if diag == nil { + return errors.New("nil diagnostic") + } + + var colorCode, resetCode string + if w.color { + switch diag.Severity { + case DiagError: + colorCode = "\x1b[31m" + case DiagWarning: + colorCode = "\x1b[33m" + } + resetCode = "\x1b[0m" + } + + var severityStr string + switch diag.Severity { + case DiagError: + severityStr = "Error" + case DiagWarning: + severityStr = "Warning" + default: + // should never happen + severityStr = "???????" + } + + fmt.Fprintf(w.wr, "%s%s%s: %s\n\n", colorCode, severityStr, resetCode, diag.Summary) + + if diag.Subject != nil { + + file := w.files[diag.Subject.Filename] + if file == nil || file.Bytes == nil { + fmt.Fprintf(w.wr, " on %s line %d:\n (source code not available)\n\n", diag.Subject.Filename, diag.Subject.Start.Line) + } else { + src := file.Bytes + r := bytes.NewReader(src) + sc := bufio.NewScanner(r) + sc.Split(bufio.ScanLines) + + var startLine, endLine int + if diag.Context != nil { + startLine = diag.Context.Start.Line + endLine = diag.Context.End.Line + } else { + startLine = diag.Subject.Start.Line + endLine = diag.Subject.End.Line + } + + var contextLine string + if diag.Subject != nil { + contextLine = contextString(file, diag.Subject.Start.Byte) + if contextLine != "" { + contextLine = ", in " + contextLine + } + } + + li := 1 + var ls string + for sc.Scan() { + ls = sc.Text() + + if li == startLine { + break + } + li++ + } + + fmt.Fprintf(w.wr, " on %s line %d%s:\n", diag.Subject.Filename, diag.Subject.Start.Line, contextLine) + + // TODO: Generate markers for the specific characters that are in the Context and Subject ranges. + // For now, we just print out the lines. + + fmt.Fprintf(w.wr, "%4d: %s\n", li, ls) + + if endLine > li { + for sc.Scan() { + ls = sc.Text() + li++ + + fmt.Fprintf(w.wr, "%4d: %s\n", li, ls) + + if li == endLine { + break + } + } + } + + w.wr.Write([]byte{'\n'}) + } + } + + if diag.Detail != "" { + detail := diag.Detail + if w.width != 0 { + detail = wordwrap.WrapString(detail, w.width) + } + fmt.Fprintf(w.wr, "%s\n\n", detail) + } + + return nil +} + +func (w *diagnosticTextWriter) WriteDiagnostics(diags Diagnostics) error { + for _, diag := range diags { + err := w.WriteDiagnostic(diag) + if err != nil { + return err + } + } + return nil +} + +func contextString(file *File, offset int) string { + type contextStringer interface { + ContextString(offset int) string + } + + if cser, ok := file.Nav.(contextStringer); ok { + return cser.ContextString(offset) + } + return "" +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/didyoumean.go b/vendor/github.com/hashicorp/hcl2/hcl/didyoumean.go new file mode 100644 index 00000000000..c12833440ad --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/didyoumean.go @@ -0,0 +1,24 @@ +package hcl + +import ( + "github.com/agext/levenshtein" +) + +// nameSuggestion tries to find a name from the given slice of suggested names +// that is close to the given name and returns it if found. If no suggestion +// is close enough, returns the empty string. +// +// The suggestions are tried in order, so earlier suggestions take precedence +// if the given string is similar to two or more suggestions. +// +// This function is intended to be used with a relatively-small number of +// suggestions. It's not optimized for hundreds or thousands of them. +func nameSuggestion(given string, suggestions []string) string { + for _, suggestion := range suggestions { + dist := levenshtein.Distance(given, suggestion, nil) + if dist < 3 { // threshold determined experimentally + return suggestion + } + } + return "" +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/doc.go b/vendor/github.com/hashicorp/hcl2/hcl/doc.go new file mode 100644 index 00000000000..01318c96f87 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/doc.go @@ -0,0 +1 @@ +package hcl diff --git a/vendor/github.com/hashicorp/hcl2/hcl/eval_context.go b/vendor/github.com/hashicorp/hcl2/hcl/eval_context.go new file mode 100644 index 00000000000..915910ad8a5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/eval_context.go @@ -0,0 +1,25 @@ +package hcl + +import ( + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// An EvalContext provides the variables and functions that should be used +// to evaluate an expression. +type EvalContext struct { + Variables map[string]cty.Value + Functions map[string]function.Function + parent *EvalContext +} + +// NewChild returns a new EvalContext that is a child of the receiver. +func (ctx *EvalContext) NewChild() *EvalContext { + return &EvalContext{parent: ctx} +} + +// Parent returns the parent of the receiver, or nil if the receiver has +// no parent. +func (ctx *EvalContext) Parent() *EvalContext { + return ctx.parent +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/didyoumean.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/didyoumean.go new file mode 100644 index 00000000000..ccc1c0ae2c5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/didyoumean.go @@ -0,0 +1,24 @@ +package hclsyntax + +import ( + "github.com/agext/levenshtein" +) + +// nameSuggestion tries to find a name from the given slice of suggested names +// that is close to the given name and returns it if found. If no suggestion +// is close enough, returns the empty string. +// +// The suggestions are tried in order, so earlier suggestions take precedence +// if the given string is similar to two or more suggestions. +// +// This function is intended to be used with a relatively-small number of +// suggestions. It's not optimized for hundreds or thousands of them. +func nameSuggestion(given string, suggestions []string) string { + for _, suggestion := range suggestions { + dist := levenshtein.Distance(given, suggestion, nil) + if dist < 3 { // threshold determined experimentally + return suggestion + } + } + return "" +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/doc.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/doc.go new file mode 100644 index 00000000000..8db11573faf --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/doc.go @@ -0,0 +1,7 @@ +// Package hclsyntax contains the parser, AST, etc for zcl's native language, +// as opposed to the JSON variant. +// +// In normal use applications should rarely depend on this package directly, +// instead preferring the higher-level interface of the main hcl package and +// its companion hclparse. +package hclsyntax diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go new file mode 100644 index 00000000000..e90ac2be6e7 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go @@ -0,0 +1,1121 @@ +package hclsyntax + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// Expression is the abstract type for nodes that behave as zcl expressions. +type Expression interface { + Node + + // The hcl.Expression methods are duplicated here, rather than simply + // embedded, because both Node and hcl.Expression have a Range method + // and so they conflict. + + Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) + Variables() []hcl.Traversal + StartRange() hcl.Range +} + +// Assert that Expression implements hcl.Expression +var assertExprImplExpr hcl.Expression = Expression(nil) + +// LiteralValueExpr is an expression that just always returns a given value. +type LiteralValueExpr struct { + Val cty.Value + SrcRange hcl.Range +} + +func (e *LiteralValueExpr) walkChildNodes(w internalWalkFunc) { + // Literal values have no child nodes +} + +func (e *LiteralValueExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return e.Val, nil +} + +func (e *LiteralValueExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *LiteralValueExpr) StartRange() hcl.Range { + return e.SrcRange +} + +// ScopeTraversalExpr is an Expression that retrieves a value from the scope +// using a traversal. +type ScopeTraversalExpr struct { + Traversal hcl.Traversal + SrcRange hcl.Range +} + +func (e *ScopeTraversalExpr) walkChildNodes(w internalWalkFunc) { + // Scope traversals have no child nodes +} + +func (e *ScopeTraversalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return e.Traversal.TraverseAbs(ctx) +} + +func (e *ScopeTraversalExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ScopeTraversalExpr) StartRange() hcl.Range { + return e.SrcRange +} + +// RelativeTraversalExpr is an Expression that retrieves a value from another +// value using a _relative_ traversal. +type RelativeTraversalExpr struct { + Source Expression + Traversal hcl.Traversal + SrcRange hcl.Range +} + +func (e *RelativeTraversalExpr) walkChildNodes(w internalWalkFunc) { + // Scope traversals have no child nodes +} + +func (e *RelativeTraversalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + src, diags := e.Source.Value(ctx) + ret, travDiags := e.Traversal.TraverseRel(src) + diags = append(diags, travDiags...) + return ret, diags +} + +func (e *RelativeTraversalExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *RelativeTraversalExpr) StartRange() hcl.Range { + return e.SrcRange +} + +// FunctionCallExpr is an Expression that calls a function from the EvalContext +// and returns its result. +type FunctionCallExpr struct { + Name string + Args []Expression + + // If true, the final argument should be a tuple, list or set which will + // expand to be one argument per element. + ExpandFinal bool + + NameRange hcl.Range + OpenParenRange hcl.Range + CloseParenRange hcl.Range +} + +func (e *FunctionCallExpr) walkChildNodes(w internalWalkFunc) { + for i, arg := range e.Args { + e.Args[i] = w(arg).(Expression) + } +} + +func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + var f function.Function + exists := false + hasNonNilMap := false + thisCtx := ctx + for thisCtx != nil { + if thisCtx.Functions == nil { + thisCtx = thisCtx.Parent() + continue + } + hasNonNilMap = true + f, exists = thisCtx.Functions[e.Name] + if exists { + break + } + thisCtx = thisCtx.Parent() + } + + if !exists { + if !hasNonNilMap { + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Function calls not allowed", + Detail: "Functions may not be called here.", + Subject: e.Range().Ptr(), + }, + } + } + + avail := make([]string, 0, len(ctx.Functions)) + for name := range ctx.Functions { + avail = append(avail, name) + } + suggestion := nameSuggestion(e.Name, avail) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Call to unknown function", + Detail: fmt.Sprintf("There is no function named %q.%s", e.Name, suggestion), + Subject: &e.NameRange, + Context: e.Range().Ptr(), + }, + } + } + + params := f.Params() + varParam := f.VarParam() + + args := e.Args + if e.ExpandFinal { + if len(args) < 1 { + // should never happen if the parser is behaving + panic("ExpandFinal set on function call with no arguments") + } + expandExpr := args[len(args)-1] + expandVal, expandDiags := expandExpr.Value(ctx) + diags = append(diags, expandDiags...) + if expandDiags.HasErrors() { + return cty.DynamicVal, diags + } + + switch { + case expandVal.Type().IsTupleType() || expandVal.Type().IsListType() || expandVal.Type().IsSetType(): + if expandVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid expanding argument value", + Detail: "The expanding argument (indicated by ...) must not be null.", + Context: expandExpr.Range().Ptr(), + Subject: e.Range().Ptr(), + }) + return cty.DynamicVal, diags + } + if !expandVal.IsKnown() { + return cty.DynamicVal, diags + } + + newArgs := make([]Expression, 0, (len(args)-1)+expandVal.LengthInt()) + newArgs = append(newArgs, args[:len(args)-1]...) + it := expandVal.ElementIterator() + for it.Next() { + _, val := it.Element() + newArgs = append(newArgs, &LiteralValueExpr{ + Val: val, + SrcRange: expandExpr.Range(), + }) + } + args = newArgs + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid expanding argument value", + Detail: "The expanding argument (indicated by ...) must be of a tuple, list, or set type.", + Context: expandExpr.Range().Ptr(), + Subject: e.Range().Ptr(), + }) + return cty.DynamicVal, diags + } + } + + if len(args) < len(params) { + missing := params[len(args)] + qual := "" + if varParam != nil { + qual = " at least" + } + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Not enough function arguments", + Detail: fmt.Sprintf( + "Function %q expects%s %d argument(s). Missing value for %q.", + e.Name, qual, len(params), missing.Name, + ), + Subject: &e.CloseParenRange, + Context: e.Range().Ptr(), + }, + } + } + + if varParam == nil && len(args) > len(params) { + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Too many function arguments", + Detail: fmt.Sprintf( + "Function %q expects only %d argument(s).", + e.Name, len(params), + ), + Subject: args[len(params)].StartRange().Ptr(), + Context: e.Range().Ptr(), + }, + } + } + + argVals := make([]cty.Value, len(args)) + + for i, argExpr := range args { + var param *function.Parameter + if i < len(params) { + param = ¶ms[i] + } else { + param = varParam + } + + val, argDiags := argExpr.Value(ctx) + if len(argDiags) > 0 { + diags = append(diags, argDiags...) + } + + // Try to convert our value to the parameter type + val, err := convert.Convert(val, param.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid function argument", + Detail: fmt.Sprintf( + "Invalid value for %q parameter: %s.", + param.Name, err, + ), + Subject: argExpr.StartRange().Ptr(), + Context: e.Range().Ptr(), + }) + } + + argVals[i] = val + } + + if diags.HasErrors() { + // Don't try to execute the function if we already have errors with + // the arguments, because the result will probably be a confusing + // error message. + return cty.DynamicVal, diags + } + + resultVal, err := f.Call(argVals) + if err != nil { + switch terr := err.(type) { + case function.ArgError: + i := terr.Index + var param *function.Parameter + if i < len(params) { + param = ¶ms[i] + } else { + param = varParam + } + argExpr := e.Args[i] + + // TODO: we should also unpick a PathError here and show the + // path to the deep value where the error was detected. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid function argument", + Detail: fmt.Sprintf( + "Invalid value for %q parameter: %s.", + param.Name, err, + ), + Subject: argExpr.StartRange().Ptr(), + Context: e.Range().Ptr(), + }) + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Error in function call", + Detail: fmt.Sprintf( + "Call to function %q failed: %s.", + e.Name, err, + ), + Subject: e.StartRange().Ptr(), + Context: e.Range().Ptr(), + }) + } + + return cty.DynamicVal, diags + } + + return resultVal, diags +} + +func (e *FunctionCallExpr) Range() hcl.Range { + return hcl.RangeBetween(e.NameRange, e.CloseParenRange) +} + +func (e *FunctionCallExpr) StartRange() hcl.Range { + return hcl.RangeBetween(e.NameRange, e.OpenParenRange) +} + +type ConditionalExpr struct { + Condition Expression + TrueResult Expression + FalseResult Expression + + SrcRange hcl.Range +} + +func (e *ConditionalExpr) walkChildNodes(w internalWalkFunc) { + e.Condition = w(e.Condition).(Expression) + e.TrueResult = w(e.TrueResult).(Expression) + e.FalseResult = w(e.FalseResult).(Expression) +} + +func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + trueResult, trueDiags := e.TrueResult.Value(ctx) + falseResult, falseDiags := e.FalseResult.Value(ctx) + var diags hcl.Diagnostics + + // Try to find a type that both results can be converted to. + resultType, convs := convert.UnifyUnsafe([]cty.Type{trueResult.Type(), falseResult.Type()}) + if resultType == cty.NilType { + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Inconsistent conditional result types", + Detail: fmt.Sprintf( + // FIXME: Need a helper function for showing natural-language type diffs, + // since this will generate some useless messages in some cases, like + // "These expressions are object and object respectively" if the + // object types don't exactly match. + "The true and false result expressions must have consistent types. The given expressions are %s and %s, respectively.", + trueResult.Type(), falseResult.Type(), + ), + Subject: hcl.RangeBetween(e.TrueResult.Range(), e.FalseResult.Range()).Ptr(), + Context: &e.SrcRange, + }, + } + } + + condResult, condDiags := e.Condition.Value(ctx) + diags = append(diags, condDiags...) + if condResult.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Null condition", + Detail: "The condition value is null. Conditions must either be true or false.", + Subject: e.Condition.Range().Ptr(), + Context: &e.SrcRange, + }) + return cty.UnknownVal(resultType), diags + } + if !condResult.IsKnown() { + return cty.UnknownVal(resultType), diags + } + condResult, err := convert.Convert(condResult, cty.Bool) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect condition type", + Detail: fmt.Sprintf("The condition expression must be of type bool."), + Subject: e.Condition.Range().Ptr(), + Context: &e.SrcRange, + }) + return cty.UnknownVal(resultType), diags + } + + if condResult.True() { + diags = append(diags, trueDiags...) + if convs[0] != nil { + var err error + trueResult, err = convs[0](trueResult) + if err != nil { + // Unsafe conversion failed with the concrete result value + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Inconsistent conditional result types", + Detail: fmt.Sprintf( + "The true result value has the wrong type: %s.", + err.Error(), + ), + Subject: e.TrueResult.Range().Ptr(), + Context: &e.SrcRange, + }) + trueResult = cty.UnknownVal(resultType) + } + } + return trueResult, diags + } else { + diags = append(diags, falseDiags...) + if convs[1] != nil { + var err error + falseResult, err = convs[1](falseResult) + if err != nil { + // Unsafe conversion failed with the concrete result value + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Inconsistent conditional result types", + Detail: fmt.Sprintf( + "The false result value has the wrong type: %s.", + err.Error(), + ), + Subject: e.TrueResult.Range().Ptr(), + Context: &e.SrcRange, + }) + falseResult = cty.UnknownVal(resultType) + } + } + return falseResult, diags + } +} + +func (e *ConditionalExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ConditionalExpr) StartRange() hcl.Range { + return e.Condition.StartRange() +} + +type IndexExpr struct { + Collection Expression + Key Expression + + SrcRange hcl.Range + OpenRange hcl.Range +} + +func (e *IndexExpr) walkChildNodes(w internalWalkFunc) { + e.Collection = w(e.Collection).(Expression) + e.Key = w(e.Key).(Expression) +} + +func (e *IndexExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + coll, collDiags := e.Collection.Value(ctx) + key, keyDiags := e.Key.Value(ctx) + diags = append(diags, collDiags...) + diags = append(diags, keyDiags...) + + return hcl.Index(coll, key, &e.SrcRange) +} + +func (e *IndexExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *IndexExpr) StartRange() hcl.Range { + return e.OpenRange +} + +type TupleConsExpr struct { + Exprs []Expression + + SrcRange hcl.Range + OpenRange hcl.Range +} + +func (e *TupleConsExpr) walkChildNodes(w internalWalkFunc) { + for i, expr := range e.Exprs { + e.Exprs[i] = w(expr).(Expression) + } +} + +func (e *TupleConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var vals []cty.Value + var diags hcl.Diagnostics + + vals = make([]cty.Value, len(e.Exprs)) + for i, expr := range e.Exprs { + val, valDiags := expr.Value(ctx) + vals[i] = val + diags = append(diags, valDiags...) + } + + return cty.TupleVal(vals), diags +} + +func (e *TupleConsExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *TupleConsExpr) StartRange() hcl.Range { + return e.OpenRange +} + +type ObjectConsExpr struct { + Items []ObjectConsItem + + SrcRange hcl.Range + OpenRange hcl.Range +} + +type ObjectConsItem struct { + KeyExpr Expression + ValueExpr Expression +} + +func (e *ObjectConsExpr) walkChildNodes(w internalWalkFunc) { + for i, item := range e.Items { + e.Items[i].KeyExpr = w(item.KeyExpr).(Expression) + e.Items[i].ValueExpr = w(item.ValueExpr).(Expression) + } +} + +func (e *ObjectConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var vals map[string]cty.Value + var diags hcl.Diagnostics + + // This will get set to true if we fail to produce any of our keys, + // either because they are actually unknown or if the evaluation produces + // errors. In all of these case we must return DynamicPseudoType because + // we're unable to know the full set of keys our object has, and thus + // we can't produce a complete value of the intended type. + // + // We still evaluate all of the item keys and values to make sure that we + // get as complete as possible a set of diagnostics. + known := true + + vals = make(map[string]cty.Value, len(e.Items)) + for _, item := range e.Items { + key, keyDiags := item.KeyExpr.Value(ctx) + diags = append(diags, keyDiags...) + + val, valDiags := item.ValueExpr.Value(ctx) + diags = append(diags, valDiags...) + + if keyDiags.HasErrors() { + known = false + continue + } + + if key.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Null value as key", + Detail: "Can't use a null value as a key.", + Subject: item.ValueExpr.Range().Ptr(), + }) + known = false + continue + } + + var err error + key, err = convert.Convert(key, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect key type", + Detail: fmt.Sprintf("Can't use this value as a key: %s.", err.Error()), + Subject: item.ValueExpr.Range().Ptr(), + }) + known = false + continue + } + + if !key.IsKnown() { + known = false + continue + } + + keyStr := key.AsString() + + vals[keyStr] = val + } + + if !known { + return cty.DynamicVal, diags + } + + return cty.ObjectVal(vals), diags +} + +func (e *ObjectConsExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ObjectConsExpr) StartRange() hcl.Range { + return e.OpenRange +} + +// ForExpr represents iteration constructs: +// +// tuple = [for i, v in list: upper(v) if i > 2] +// object = {for k, v in map: k => upper(v)} +// object_of_tuples = {for v in list: v.key: v...} +type ForExpr struct { + KeyVar string // empty if ignoring the key + ValVar string + + CollExpr Expression + + KeyExpr Expression // nil when producing a tuple + ValExpr Expression + CondExpr Expression // null if no "if" clause is present + + Group bool // set if the ellipsis is used on the value in an object for + + SrcRange hcl.Range + OpenRange hcl.Range + CloseRange hcl.Range +} + +func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + collVal, collDiags := e.CollExpr.Value(ctx) + diags = append(diags, collDiags...) + + if collVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Iteration over null value", + Detail: "A null value cannot be used as the collection in a 'for' expression.", + Subject: e.CollExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + return cty.DynamicVal, diags + } + if collVal.Type() == cty.DynamicPseudoType { + return cty.DynamicVal, diags + } + if !collVal.CanIterateElements() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Iteration over non-iterable value", + Detail: fmt.Sprintf( + "A value of type %s cannot be used as the collection in a 'for' expression.", + collVal.Type().FriendlyName(), + ), + Subject: e.CollExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + return cty.DynamicVal, diags + } + if !collVal.IsKnown() { + return cty.DynamicVal, diags + } + + childCtx := ctx.NewChild() + childCtx.Variables = map[string]cty.Value{} + + // Before we start we'll do an early check to see if any CondExpr we've + // been given is of the wrong type. This isn't 100% reliable (it may + // be DynamicVal until real values are given) but it should catch some + // straightforward cases and prevent a barrage of repeated errors. + if e.CondExpr != nil { + if e.KeyVar != "" { + childCtx.Variables[e.KeyVar] = cty.DynamicVal + } + childCtx.Variables[e.ValVar] = cty.DynamicVal + + result, condDiags := e.CondExpr.Value(childCtx) + diags = append(diags, condDiags...) + if result.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Condition is null", + Detail: "The value of the 'if' clause must not be null.", + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + return cty.DynamicVal, diags + } + _, err := convert.Convert(result, cty.Bool) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + return cty.DynamicVal, diags + } + if condDiags.HasErrors() { + return cty.DynamicVal, diags + } + } + + if e.KeyExpr != nil { + // Producing an object + var vals map[string]cty.Value + var groupVals map[string][]cty.Value + if e.Group { + groupVals = map[string][]cty.Value{} + } else { + vals = map[string]cty.Value{} + } + + it := collVal.ElementIterator() + + known := true + for it.Next() { + k, v := it.Element() + if e.KeyVar != "" { + childCtx.Variables[e.KeyVar] = k + } + childCtx.Variables[e.ValVar] = v + + if e.CondExpr != nil { + includeRaw, condDiags := e.CondExpr.Value(childCtx) + diags = append(diags, condDiags...) + if includeRaw.IsNull() { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Condition is null", + Detail: "The value of the 'if' clause must not be null.", + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + } + known = false + continue + } + include, err := convert.Convert(includeRaw, cty.Bool) + if err != nil { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + } + known = false + continue + } + if !include.IsKnown() { + known = false + continue + } + + if include.False() { + // Skip this element + continue + } + } + + keyRaw, keyDiags := e.KeyExpr.Value(childCtx) + diags = append(diags, keyDiags...) + if keyRaw.IsNull() { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object key", + Detail: "Key expression in 'for' expression must not produce a null value.", + Subject: e.KeyExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + } + known = false + continue + } + if !keyRaw.IsKnown() { + known = false + continue + } + + key, err := convert.Convert(keyRaw, cty.String) + if err != nil { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object key", + Detail: fmt.Sprintf("The key expression produced an invalid result: %s.", err.Error()), + Subject: e.KeyExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + } + known = false + continue + } + + val, valDiags := e.ValExpr.Value(childCtx) + diags = append(diags, valDiags...) + + if e.Group { + k := key.AsString() + groupVals[k] = append(groupVals[k], val) + } else { + k := key.AsString() + if _, exists := vals[k]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate object key", + Detail: fmt.Sprintf( + "Two different items produced the key %q in this for expression. If duplicates are expected, use the ellipsis (...) after the value expression to enable grouping by key.", + k, + ), + Subject: e.KeyExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + } else { + vals[key.AsString()] = val + } + } + } + + if !known { + return cty.DynamicVal, diags + } + + if e.Group { + vals = map[string]cty.Value{} + for k, gvs := range groupVals { + vals[k] = cty.TupleVal(gvs) + } + } + + return cty.ObjectVal(vals), diags + + } else { + // Producing a tuple + vals := []cty.Value{} + + it := collVal.ElementIterator() + + known := true + for it.Next() { + k, v := it.Element() + if e.KeyVar != "" { + childCtx.Variables[e.KeyVar] = k + } + childCtx.Variables[e.ValVar] = v + + if e.CondExpr != nil { + includeRaw, condDiags := e.CondExpr.Value(childCtx) + diags = append(diags, condDiags...) + if includeRaw.IsNull() { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Condition is null", + Detail: "The value of the 'if' clause must not be null.", + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + } + known = false + continue + } + if !includeRaw.IsKnown() { + // We will eventually return DynamicVal, but we'll continue + // iterating in case there are other diagnostics to gather + // for later elements. + known = false + continue + } + + include, err := convert.Convert(includeRaw, cty.Bool) + if err != nil { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + } + known = false + continue + } + + if include.False() { + // Skip this element + continue + } + } + + val, valDiags := e.ValExpr.Value(childCtx) + diags = append(diags, valDiags...) + vals = append(vals, val) + } + + if !known { + return cty.DynamicVal, diags + } + + return cty.TupleVal(vals), diags + } +} + +func (e *ForExpr) walkChildNodes(w internalWalkFunc) { + e.CollExpr = w(e.CollExpr).(Expression) + + scopeNames := map[string]struct{}{} + if e.KeyVar != "" { + scopeNames[e.KeyVar] = struct{}{} + } + if e.ValVar != "" { + scopeNames[e.ValVar] = struct{}{} + } + + if e.KeyExpr != nil { + w(ChildScope{ + LocalNames: scopeNames, + Expr: &e.KeyExpr, + }) + } + w(ChildScope{ + LocalNames: scopeNames, + Expr: &e.ValExpr, + }) + if e.CondExpr != nil { + w(ChildScope{ + LocalNames: scopeNames, + Expr: &e.CondExpr, + }) + } +} + +func (e *ForExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ForExpr) StartRange() hcl.Range { + return e.OpenRange +} + +type SplatExpr struct { + Source Expression + Each Expression + Item *AnonSymbolExpr + + SrcRange hcl.Range + MarkerRange hcl.Range +} + +func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + sourceVal, diags := e.Source.Value(ctx) + if diags.HasErrors() { + // We'll evaluate our "Each" expression here just to see if it + // produces any more diagnostics we can report. Since we're not + // assigning a value to our AnonSymbolExpr here it will return + // DynamicVal, which should short-circuit any use of it. + _, itemDiags := e.Item.Value(ctx) + diags = append(diags, itemDiags...) + return cty.DynamicVal, diags + } + + if sourceVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Splat of null value", + Detail: "Splat expressions (with the * symbol) cannot be applied to null values.", + Subject: e.Source.Range().Ptr(), + Context: hcl.RangeBetween(e.Source.Range(), e.MarkerRange).Ptr(), + }) + return cty.DynamicVal, diags + } + if !sourceVal.IsKnown() { + return cty.DynamicVal, diags + } + + // A "special power" of splat expressions is that they can be applied + // both to tuples/lists and to other values, and in the latter case + // the value will be treated as an implicit single-value list. We'll + // deal with that here first. + if !(sourceVal.Type().IsTupleType() || sourceVal.Type().IsListType()) { + sourceVal = cty.ListVal([]cty.Value{sourceVal}) + } + + vals := make([]cty.Value, 0, sourceVal.LengthInt()) + it := sourceVal.ElementIterator() + if ctx == nil { + // we need a context to use our AnonSymbolExpr, so we'll just + // make an empty one here to use as a placeholder. + ctx = ctx.NewChild() + } + isKnown := true + for it.Next() { + _, sourceItem := it.Element() + e.Item.setValue(ctx, sourceItem) + newItem, itemDiags := e.Each.Value(ctx) + diags = append(diags, itemDiags...) + if itemDiags.HasErrors() { + isKnown = false + } + vals = append(vals, newItem) + } + e.Item.clearValue(ctx) // clean up our temporary value + + if !isKnown { + return cty.DynamicVal, diags + } + + return cty.TupleVal(vals), diags +} + +func (e *SplatExpr) walkChildNodes(w internalWalkFunc) { + e.Source = w(e.Source).(Expression) + e.Each = w(e.Each).(Expression) +} + +func (e *SplatExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *SplatExpr) StartRange() hcl.Range { + return e.MarkerRange +} + +// AnonSymbolExpr is used as a placeholder for a value in an expression that +// can be applied dynamically to any value at runtime. +// +// This is a rather odd, synthetic expression. It is used as part of the +// representation of splat expressions as a placeholder for the current item +// being visited in the splat evaluation. +// +// AnonSymbolExpr cannot be evaluated in isolation. If its Value is called +// directly then cty.DynamicVal will be returned. Instead, it is evaluated +// in terms of another node (i.e. a splat expression) which temporarily +// assigns it a value. +type AnonSymbolExpr struct { + SrcRange hcl.Range + values map[*hcl.EvalContext]cty.Value +} + +func (e *AnonSymbolExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + if ctx == nil { + return cty.DynamicVal, nil + } + val, exists := e.values[ctx] + if !exists { + return cty.DynamicVal, nil + } + return val, nil +} + +// setValue sets a temporary local value for the expression when evaluated +// in the given context, which must be non-nil. +func (e *AnonSymbolExpr) setValue(ctx *hcl.EvalContext, val cty.Value) { + if e.values == nil { + e.values = make(map[*hcl.EvalContext]cty.Value) + } + if ctx == nil { + panic("can't setValue for a nil EvalContext") + } + e.values[ctx] = val +} + +func (e *AnonSymbolExpr) clearValue(ctx *hcl.EvalContext) { + if e.values == nil { + return + } + if ctx == nil { + panic("can't clearValue for a nil EvalContext") + } + delete(e.values, ctx) +} + +func (e *AnonSymbolExpr) walkChildNodes(w internalWalkFunc) { + // AnonSymbolExpr is a leaf node in the tree +} + +func (e *AnonSymbolExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *AnonSymbolExpr) StartRange() hcl.Range { + return e.SrcRange +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go new file mode 100644 index 00000000000..9a5da043beb --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go @@ -0,0 +1,258 @@ +package hclsyntax + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" +) + +type Operation struct { + Impl function.Function + Type cty.Type +} + +var ( + OpLogicalOr = &Operation{ + Impl: stdlib.OrFunc, + Type: cty.Bool, + } + OpLogicalAnd = &Operation{ + Impl: stdlib.AndFunc, + Type: cty.Bool, + } + OpLogicalNot = &Operation{ + Impl: stdlib.NotFunc, + Type: cty.Bool, + } + + OpEqual = &Operation{ + Impl: stdlib.EqualFunc, + Type: cty.Bool, + } + OpNotEqual = &Operation{ + Impl: stdlib.NotEqualFunc, + Type: cty.Bool, + } + + OpGreaterThan = &Operation{ + Impl: stdlib.GreaterThanFunc, + Type: cty.Bool, + } + OpGreaterThanOrEqual = &Operation{ + Impl: stdlib.GreaterThanOrEqualToFunc, + Type: cty.Bool, + } + OpLessThan = &Operation{ + Impl: stdlib.LessThanFunc, + Type: cty.Bool, + } + OpLessThanOrEqual = &Operation{ + Impl: stdlib.LessThanOrEqualToFunc, + Type: cty.Bool, + } + + OpAdd = &Operation{ + Impl: stdlib.AddFunc, + Type: cty.Number, + } + OpSubtract = &Operation{ + Impl: stdlib.SubtractFunc, + Type: cty.Number, + } + OpMultiply = &Operation{ + Impl: stdlib.MultiplyFunc, + Type: cty.Number, + } + OpDivide = &Operation{ + Impl: stdlib.DivideFunc, + Type: cty.Number, + } + OpModulo = &Operation{ + Impl: stdlib.ModuloFunc, + Type: cty.Number, + } + OpNegate = &Operation{ + Impl: stdlib.NegateFunc, + Type: cty.Number, + } +) + +var binaryOps []map[TokenType]*Operation + +func init() { + // This operation table maps from the operator's token type + // to the AST operation type. All expressions produced from + // binary operators are BinaryOp nodes. + // + // Binary operator groups are listed in order of precedence, with + // the *lowest* precedence first. Operators within the same group + // have left-to-right associativity. + binaryOps = []map[TokenType]*Operation{ + { + TokenOr: OpLogicalOr, + }, + { + TokenAnd: OpLogicalAnd, + }, + { + TokenEqualOp: OpEqual, + TokenNotEqual: OpNotEqual, + }, + { + TokenGreaterThan: OpGreaterThan, + TokenGreaterThanEq: OpGreaterThanOrEqual, + TokenLessThan: OpLessThan, + TokenLessThanEq: OpLessThanOrEqual, + }, + { + TokenPlus: OpAdd, + TokenMinus: OpSubtract, + }, + { + TokenStar: OpMultiply, + TokenSlash: OpDivide, + TokenPercent: OpModulo, + }, + } +} + +type BinaryOpExpr struct { + LHS Expression + Op *Operation + RHS Expression + + SrcRange hcl.Range +} + +func (e *BinaryOpExpr) walkChildNodes(w internalWalkFunc) { + e.LHS = w(e.LHS).(Expression) + e.RHS = w(e.RHS).(Expression) +} + +func (e *BinaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + impl := e.Op.Impl // assumed to be a function taking exactly two arguments + params := impl.Params() + lhsParam := params[0] + rhsParam := params[1] + + var diags hcl.Diagnostics + + givenLHSVal, lhsDiags := e.LHS.Value(ctx) + givenRHSVal, rhsDiags := e.RHS.Value(ctx) + diags = append(diags, lhsDiags...) + diags = append(diags, rhsDiags...) + + lhsVal, err := convert.Convert(givenLHSVal, lhsParam.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid operand", + Detail: fmt.Sprintf("Unsuitable value for left operand: %s.", err), + Subject: e.LHS.Range().Ptr(), + Context: &e.SrcRange, + }) + } + rhsVal, err := convert.Convert(givenRHSVal, rhsParam.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid operand", + Detail: fmt.Sprintf("Unsuitable value for right operand: %s.", err), + Subject: e.RHS.Range().Ptr(), + Context: &e.SrcRange, + }) + } + + if diags.HasErrors() { + // Don't actually try the call if we have errors already, since the + // this will probably just produce a confusing duplicative diagnostic. + return cty.UnknownVal(e.Op.Type), diags + } + + args := []cty.Value{lhsVal, rhsVal} + result, err := impl.Call(args) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + // FIXME: This diagnostic is useless. + Severity: hcl.DiagError, + Summary: "Operation failed", + Detail: fmt.Sprintf("Error during operation: %s.", err), + Subject: &e.SrcRange, + }) + return cty.UnknownVal(e.Op.Type), diags + } + + return result, diags +} + +func (e *BinaryOpExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *BinaryOpExpr) StartRange() hcl.Range { + return e.LHS.StartRange() +} + +type UnaryOpExpr struct { + Op *Operation + Val Expression + + SrcRange hcl.Range + SymbolRange hcl.Range +} + +func (e *UnaryOpExpr) walkChildNodes(w internalWalkFunc) { + e.Val = w(e.Val).(Expression) +} + +func (e *UnaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + impl := e.Op.Impl // assumed to be a function taking exactly one argument + params := impl.Params() + param := params[0] + + givenVal, diags := e.Val.Value(ctx) + + val, err := convert.Convert(givenVal, param.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid operand", + Detail: fmt.Sprintf("Unsuitable value for unary operand: %s.", err), + Subject: e.Val.Range().Ptr(), + Context: &e.SrcRange, + }) + } + + if diags.HasErrors() { + // Don't actually try the call if we have errors already, since the + // this will probably just produce a confusing duplicative diagnostic. + return cty.UnknownVal(e.Op.Type), diags + } + + args := []cty.Value{val} + result, err := impl.Call(args) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + // FIXME: This diagnostic is useless. + Severity: hcl.DiagError, + Summary: "Operation failed", + Detail: fmt.Sprintf("Error during operation: %s.", err), + Subject: &e.SrcRange, + }) + return cty.UnknownVal(e.Op.Type), diags + } + + return result, diags +} + +func (e *UnaryOpExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *UnaryOpExpr) StartRange() hcl.Range { + return e.SymbolRange +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go new file mode 100644 index 00000000000..a1c472754d2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go @@ -0,0 +1,192 @@ +package hclsyntax + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +type TemplateExpr struct { + Parts []Expression + + SrcRange hcl.Range +} + +func (e *TemplateExpr) walkChildNodes(w internalWalkFunc) { + for i, part := range e.Parts { + e.Parts[i] = w(part).(Expression) + } +} + +func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + buf := &bytes.Buffer{} + var diags hcl.Diagnostics + isKnown := true + + for _, part := range e.Parts { + partVal, partDiags := part.Value(ctx) + diags = append(diags, partDiags...) + + if partVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: fmt.Sprintf( + "The expression result is null. Cannot include a null value in a string template.", + ), + Subject: part.Range().Ptr(), + Context: &e.SrcRange, + }) + continue + } + + if !partVal.IsKnown() { + // If any part is unknown then the result as a whole must be + // unknown too. We'll keep on processing the rest of the parts + // anyway, because we want to still emit any diagnostics resulting + // from evaluating those. + isKnown = false + continue + } + + strVal, err := convert.Convert(partVal, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: fmt.Sprintf( + "Cannot include the given value in a string template: %s.", + err.Error(), + ), + Subject: part.Range().Ptr(), + Context: &e.SrcRange, + }) + continue + } + + buf.WriteString(strVal.AsString()) + } + + if !isKnown { + return cty.UnknownVal(cty.String), diags + } + + return cty.StringVal(buf.String()), diags +} + +func (e *TemplateExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *TemplateExpr) StartRange() hcl.Range { + return e.Parts[0].StartRange() +} + +// TemplateJoinExpr is used to convert tuples of strings produced by template +// constructs (i.e. for loops) into flat strings, by converting the values +// tos strings and joining them. This AST node is not used directly; it's +// produced as part of the AST of a "for" loop in a template. +type TemplateJoinExpr struct { + Tuple Expression +} + +func (e *TemplateJoinExpr) walkChildNodes(w internalWalkFunc) { + e.Tuple = w(e.Tuple).(Expression) +} + +func (e *TemplateJoinExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + tuple, diags := e.Tuple.Value(ctx) + + if tuple.IsNull() { + // This indicates a bug in the code that constructed the AST. + panic("TemplateJoinExpr got null tuple") + } + if tuple.Type() == cty.DynamicPseudoType { + return cty.UnknownVal(cty.String), diags + } + if !tuple.Type().IsTupleType() { + // This indicates a bug in the code that constructed the AST. + panic("TemplateJoinExpr got non-tuple tuple") + } + if !tuple.IsKnown() { + return cty.UnknownVal(cty.String), diags + } + + buf := &bytes.Buffer{} + it := tuple.ElementIterator() + for it.Next() { + _, val := it.Element() + + if val.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: fmt.Sprintf( + "An iteration result is null. Cannot include a null value in a string template.", + ), + Subject: e.Range().Ptr(), + }) + continue + } + if val.Type() == cty.DynamicPseudoType { + return cty.UnknownVal(cty.String), diags + } + strVal, err := convert.Convert(val, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: fmt.Sprintf( + "Cannot include one of the interpolation results into the string template: %s.", + err.Error(), + ), + Subject: e.Range().Ptr(), + }) + continue + } + if !val.IsKnown() { + return cty.UnknownVal(cty.String), diags + } + + buf.WriteString(strVal.AsString()) + } + + return cty.StringVal(buf.String()), diags +} + +func (e *TemplateJoinExpr) Range() hcl.Range { + return e.Tuple.Range() +} + +func (e *TemplateJoinExpr) StartRange() hcl.Range { + return e.Tuple.StartRange() +} + +// TemplateWrapExpr is used instead of a TemplateExpr when a template +// consists _only_ of a single interpolation sequence. In that case, the +// template's result is the single interpolation's result, verbatim with +// no type conversions. +type TemplateWrapExpr struct { + Wrapped Expression + + SrcRange hcl.Range +} + +func (e *TemplateWrapExpr) walkChildNodes(w internalWalkFunc) { + e.Wrapped = w(e.Wrapped).(Expression) +} + +func (e *TemplateWrapExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return e.Wrapped.Value(ctx) +} + +func (e *TemplateWrapExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *TemplateWrapExpr) StartRange() hcl.Range { + return e.SrcRange +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars.go new file mode 100755 index 00000000000..c15d1340506 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars.go @@ -0,0 +1,72 @@ +package hclsyntax + +// Generated by expression_vars_get.go. DO NOT EDIT. +// Run 'go generate' on this package to update the set of functions here. + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +func (e *AnonSymbolExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *BinaryOpExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ConditionalExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ForExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *FunctionCallExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *IndexExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *LiteralValueExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ObjectConsExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *RelativeTraversalExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ScopeTraversalExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *SplatExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *TemplateExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *TemplateJoinExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *TemplateWrapExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *TupleConsExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *UnaryOpExpr) Variables() []hcl.Traversal { + return Variables(e) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/file.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/file.go new file mode 100644 index 00000000000..490c02556b2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/file.go @@ -0,0 +1,20 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// File is the top-level object resulting from parsing a configuration file. +type File struct { + Body *Body + Bytes []byte +} + +func (f *File) AsHCLFile() *hcl.File { + return &hcl.File{ + Body: f.Body, + Bytes: f.Bytes, + + // TODO: The Nav object, once we have an implementation of it + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/generate.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/generate.go new file mode 100644 index 00000000000..ecc389f1144 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/generate.go @@ -0,0 +1,7 @@ +package hclsyntax + +//go:generate go run expression_vars_gen.go +//go:generate ruby unicode2ragel.rb --url=http://www.unicode.org/Public/9.0.0/ucd/DerivedCoreProperties.txt -m UnicodeDerived -p ID_Start,ID_Continue -o unicode_derived.rl +//go:generate ragel -Z scan_tokens.rl +//go:generate gofmt -w scan_tokens.go +//go:generate stringer -type TokenType -output token_type_string.go diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/keywords.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/keywords.go new file mode 100644 index 00000000000..eef8b9626ce --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/keywords.go @@ -0,0 +1,21 @@ +package hclsyntax + +import ( + "bytes" +) + +type Keyword []byte + +var forKeyword = Keyword([]byte{'f', 'o', 'r'}) +var inKeyword = Keyword([]byte{'i', 'n'}) +var ifKeyword = Keyword([]byte{'i', 'f'}) +var elseKeyword = Keyword([]byte{'e', 'l', 's', 'e'}) +var endifKeyword = Keyword([]byte{'e', 'n', 'd', 'i', 'f'}) +var endforKeyword = Keyword([]byte{'e', 'n', 'd', 'f', 'o', 'r'}) + +func (kw Keyword) TokenMatches(token Token) bool { + if token.Type != TokenIdent { + return false + } + return bytes.Equal([]byte(kw), token.Bytes) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go new file mode 100644 index 00000000000..8a04c2081a6 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go @@ -0,0 +1,41 @@ +package hclsyntax + +import ( + "bytes" + "fmt" +) + +type navigation struct { + root *Body +} + +// Implementation of zcled.ContextString +func (n navigation) ContextString(offset int) string { + // We will walk our top-level blocks until we find one that contains + // the given offset, and then construct a representation of the header + // of the block. + + var block *Block + for _, candidate := range n.root.Blocks { + if candidate.Range().ContainsOffset(offset) { + block = candidate + break + } + } + + if block == nil { + return "" + } + + if len(block.Labels) == 0 { + // Easy case! + return block.Type + } + + buf := &bytes.Buffer{} + buf.WriteString(block.Type) + for _, label := range block.Labels { + fmt.Fprintf(buf, " %q", label) + } + return buf.String() +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go new file mode 100644 index 00000000000..fd426d4a6c2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go @@ -0,0 +1,22 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// Node is the abstract type that every AST node implements. +// +// This is a closed interface, so it cannot be implemented from outside of +// this package. +type Node interface { + // This is the mechanism by which the public-facing walk functions + // are implemented. Implementations should call the given function + // for each child node and then replace that node with its return value. + // The return value might just be the same node, for non-transforming + // walks. + walkChildNodes(w internalWalkFunc) + + Range() hcl.Range +} + +type internalWalkFunc func(Node) Node diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go new file mode 100644 index 00000000000..0f81ddfb144 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go @@ -0,0 +1,1780 @@ +package hclsyntax + +import ( + "bufio" + "bytes" + "fmt" + + "github.com/apparentlymart/go-textseg/textseg" + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +type parser struct { + *peeker + + // set to true if any recovery is attempted. The parser can use this + // to attempt to reduce error noise by suppressing "bad token" errors + // in recovery mode, assuming that the recovery heuristics have failed + // in this case and left the peeker in a wrong place. + recovery bool +} + +func (p *parser) ParseBody(end TokenType) (*Body, hcl.Diagnostics) { + attrs := Attributes{} + blocks := Blocks{} + var diags hcl.Diagnostics + + startRange := p.PrevRange() + var endRange hcl.Range + +Token: + for { + next := p.Peek() + if next.Type == end { + endRange = p.NextRange() + p.Read() + break Token + } + + switch next.Type { + case TokenNewline: + p.Read() + continue + case TokenIdent: + item, itemDiags := p.ParseBodyItem() + diags = append(diags, itemDiags...) + switch titem := item.(type) { + case *Block: + blocks = append(blocks, titem) + case *Attribute: + if existing, exists := attrs[titem.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute redefined", + Detail: fmt.Sprintf( + "The attribute %q was already defined at %s. Each attribute may be defined only once.", + titem.Name, existing.NameRange.String(), + ), + Subject: &titem.NameRange, + }) + } else { + attrs[titem.Name] = titem + } + default: + // This should never happen for valid input, but may if a + // syntax error was detected in ParseBodyItem that prevented + // it from even producing a partially-broken item. In that + // case, it would've left at least one error in the diagnostics + // slice we already dealt with above. + // + // We'll assume ParseBodyItem attempted recovery to leave + // us in a reasonable position to try parsing the next item. + continue + } + default: + bad := p.Read() + if !p.recovery { + if bad.Type == TokenOQuote { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid attribute name", + Detail: "Attribute names must not be quoted.", + Subject: &bad.Range, + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute or block definition required", + Detail: "An attribute or block definition is required here.", + Subject: &bad.Range, + }) + } + } + endRange = p.PrevRange() // arbitrary, but somewhere inside the body means better diagnostics + + p.recover(end) // attempt to recover to the token after the end of this body + break Token + } + } + + return &Body{ + Attributes: attrs, + Blocks: blocks, + + SrcRange: hcl.RangeBetween(startRange, endRange), + EndRange: hcl.Range{ + Filename: endRange.Filename, + Start: endRange.End, + End: endRange.End, + }, + }, diags +} + +func (p *parser) ParseBodyItem() (Node, hcl.Diagnostics) { + ident := p.Read() + if ident.Type != TokenIdent { + p.recoverAfterBodyItem() + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Attribute or block definition required", + Detail: "An attribute or block definition is required here.", + Subject: &ident.Range, + }, + } + } + + next := p.Peek() + + switch next.Type { + case TokenEqual: + return p.finishParsingBodyAttribute(ident) + case TokenOQuote, TokenOBrace: + return p.finishParsingBodyBlock(ident) + default: + p.recoverAfterBodyItem() + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Attribute or block definition required", + Detail: "An attribute or block definition is required here. To define an attribute, use the equals sign \"=\" to introduce the attribute value.", + Subject: &ident.Range, + }, + } + } + + return nil, nil +} + +func (p *parser) finishParsingBodyAttribute(ident Token) (Node, hcl.Diagnostics) { + eqTok := p.Read() // eat equals token + if eqTok.Type != TokenEqual { + // should never happen if caller behaves + panic("finishParsingBodyAttribute called with next not equals") + } + + var endRange hcl.Range + + expr, diags := p.ParseExpression() + if p.recovery && diags.HasErrors() { + // recovery within expressions tends to be tricky, so we've probably + // landed somewhere weird. We'll try to reset to the start of a body + // item so parsing can continue. + endRange = p.PrevRange() + p.recoverAfterBodyItem() + } else { + end := p.Peek() + if end.Type != TokenNewline { + if !p.recovery { + if end.Type == TokenEOF { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing newline after attribute definition", + Detail: "A newline is required after an attribute definition at the end of a file.", + Subject: &end.Range, + Context: hcl.RangeBetween(ident.Range, end.Range).Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing newline after attribute definition", + Detail: "An attribute definition must end with a newline.", + Subject: &end.Range, + Context: hcl.RangeBetween(ident.Range, end.Range).Ptr(), + }) + } + } + endRange = p.PrevRange() + p.recoverAfterBodyItem() + } else { + endRange = p.PrevRange() + p.Read() // eat newline + } + } + + return &Attribute{ + Name: string(ident.Bytes), + Expr: expr, + + SrcRange: hcl.RangeBetween(ident.Range, endRange), + NameRange: ident.Range, + EqualsRange: eqTok.Range, + }, diags +} + +func (p *parser) finishParsingBodyBlock(ident Token) (Node, hcl.Diagnostics) { + var blockType = string(ident.Bytes) + var diags hcl.Diagnostics + var labels []string + var labelRanges []hcl.Range + + var oBrace Token + +Token: + for { + tok := p.Peek() + + switch tok.Type { + + case TokenOBrace: + oBrace = p.Read() + break Token + + case TokenOQuote: + label, labelRange, labelDiags := p.parseQuotedStringLiteral() + diags = append(diags, labelDiags...) + labels = append(labels, label) + labelRanges = append(labelRanges, labelRange) + if labelDiags.HasErrors() { + p.recoverAfterBodyItem() + return &Block{ + Type: blockType, + Labels: labels, + Body: nil, + + TypeRange: ident.Range, + LabelRanges: labelRanges, + OpenBraceRange: ident.Range, // placeholder + CloseBraceRange: ident.Range, // placeholder + }, diags + } + + default: + switch tok.Type { + case TokenEqual: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid block definition", + Detail: "The equals sign \"=\" indicates an attribute definition, and must not be used when defining a block.", + Subject: &tok.Range, + Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(), + }) + case TokenNewline: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid block definition", + Detail: "A block definition must have block content delimited by \"{\" and \"}\", starting on the same line as the block header.", + Subject: &tok.Range, + Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(), + }) + default: + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid block definition", + Detail: "Either a quoted string block label or an opening brace (\"{\") is expected here.", + Subject: &tok.Range, + Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(), + }) + } + } + + p.recoverAfterBodyItem() + + return &Block{ + Type: blockType, + Labels: labels, + Body: nil, + + TypeRange: ident.Range, + LabelRanges: labelRanges, + OpenBraceRange: ident.Range, // placeholder + CloseBraceRange: ident.Range, // placeholder + }, diags + } + } + + // Once we fall out here, the peeker is pointed just after our opening + // brace, so we can begin our nested body parsing. + body, bodyDiags := p.ParseBody(TokenCBrace) + diags = append(diags, bodyDiags...) + cBraceRange := p.PrevRange() + + eol := p.Peek() + if eol.Type == TokenNewline { + p.Read() // eat newline + } else { + if !p.recovery { + if eol.Type == TokenEOF { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing newline after block definition", + Detail: "A newline is required after a block definition at the end of a file.", + Subject: &eol.Range, + Context: hcl.RangeBetween(ident.Range, eol.Range).Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing newline after block definition", + Detail: "A block definition must end with a newline.", + Subject: &eol.Range, + Context: hcl.RangeBetween(ident.Range, eol.Range).Ptr(), + }) + } + } + p.recoverAfterBodyItem() + } + + return &Block{ + Type: blockType, + Labels: labels, + Body: body, + + TypeRange: ident.Range, + LabelRanges: labelRanges, + OpenBraceRange: oBrace.Range, + CloseBraceRange: cBraceRange, + }, diags +} + +func (p *parser) ParseExpression() (Expression, hcl.Diagnostics) { + return p.parseTernaryConditional() +} + +func (p *parser) parseTernaryConditional() (Expression, hcl.Diagnostics) { + // The ternary conditional operator (.. ? .. : ..) behaves somewhat + // like a binary operator except that the "symbol" is itself + // an expression enclosed in two punctuation characters. + // The middle expression is parsed as if the ? and : symbols + // were parentheses. The "rhs" (the "false expression") is then + // treated right-associatively so it behaves similarly to the + // middle in terms of precedence. + + startRange := p.NextRange() + var condExpr, trueExpr, falseExpr Expression + var diags hcl.Diagnostics + + condExpr, condDiags := p.parseBinaryOps(binaryOps) + diags = append(diags, condDiags...) + if p.recovery && condDiags.HasErrors() { + return condExpr, diags + } + + questionMark := p.Peek() + if questionMark.Type != TokenQuestion { + return condExpr, diags + } + + p.Read() // eat question mark + + trueExpr, trueDiags := p.ParseExpression() + diags = append(diags, trueDiags...) + if p.recovery && trueDiags.HasErrors() { + return condExpr, diags + } + + colon := p.Peek() + if colon.Type != TokenColon { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing false expression in conditional", + Detail: "The conditional operator (...?...:...) requires a false expression, delimited by a colon.", + Subject: &colon.Range, + Context: hcl.RangeBetween(startRange, colon.Range).Ptr(), + }) + return condExpr, diags + } + + p.Read() // eat colon + + falseExpr, falseDiags := p.ParseExpression() + diags = append(diags, falseDiags...) + if p.recovery && falseDiags.HasErrors() { + return condExpr, diags + } + + return &ConditionalExpr{ + Condition: condExpr, + TrueResult: trueExpr, + FalseResult: falseExpr, + + SrcRange: hcl.RangeBetween(startRange, falseExpr.Range()), + }, diags +} + +// parseBinaryOps calls itself recursively to work through all of the +// operator precedence groups, and then eventually calls parseExpressionTerm +// for each operand. +func (p *parser) parseBinaryOps(ops []map[TokenType]*Operation) (Expression, hcl.Diagnostics) { + if len(ops) == 0 { + // We've run out of operators, so now we'll just try to parse a term. + return p.parseExpressionWithTraversals() + } + + thisLevel := ops[0] + remaining := ops[1:] + + var lhs, rhs Expression + var operation *Operation + var diags hcl.Diagnostics + + // Parse a term that might be the first operand of a binary + // operation or it might just be a standalone term. + // We won't know until we've parsed it and can look ahead + // to see if there's an operator token for this level. + lhs, lhsDiags := p.parseBinaryOps(remaining) + diags = append(diags, lhsDiags...) + if p.recovery && lhsDiags.HasErrors() { + return lhs, diags + } + + // We'll keep eating up operators until we run out, so that operators + // with the same precedence will combine in a left-associative manner: + // a+b+c => (a+b)+c, not a+(b+c) + // + // Should we later want to have right-associative operators, a way + // to achieve that would be to call back up to ParseExpression here + // instead of iteratively parsing only the remaining operators. + for { + next := p.Peek() + var newOp *Operation + var ok bool + if newOp, ok = thisLevel[next.Type]; !ok { + break + } + + // Are we extending an expression started on the previous iteration? + if operation != nil { + lhs = &BinaryOpExpr{ + LHS: lhs, + Op: operation, + RHS: rhs, + + SrcRange: hcl.RangeBetween(lhs.Range(), rhs.Range()), + } + } + + operation = newOp + p.Read() // eat operator token + var rhsDiags hcl.Diagnostics + rhs, rhsDiags = p.parseBinaryOps(remaining) + diags = append(diags, rhsDiags...) + if p.recovery && rhsDiags.HasErrors() { + return lhs, diags + } + } + + if operation == nil { + return lhs, diags + } + + return &BinaryOpExpr{ + LHS: lhs, + Op: operation, + RHS: rhs, + + SrcRange: hcl.RangeBetween(lhs.Range(), rhs.Range()), + }, diags +} + +func (p *parser) parseExpressionWithTraversals() (Expression, hcl.Diagnostics) { + term, diags := p.parseExpressionTerm() + ret := term + +Traversal: + for { + next := p.Peek() + + switch next.Type { + case TokenDot: + // Attribute access or splat + dot := p.Read() + attrTok := p.Peek() + + switch attrTok.Type { + case TokenIdent: + attrTok = p.Read() // eat token + name := string(attrTok.Bytes) + rng := hcl.RangeBetween(dot.Range, attrTok.Range) + step := hcl.TraverseAttr{ + Name: name, + SrcRange: rng, + } + + ret = makeRelativeTraversal(ret, step, rng) + + case TokenStar: + // "Attribute-only" splat expression. + // (This is a kinda weird construct inherited from HIL, which + // behaves a bit like a [*] splat except that it is only able + // to do attribute traversals into each of its elements, + // whereas foo[*] can support _any_ traversal. + marker := p.Read() // eat star + trav := make(hcl.Traversal, 0, 1) + var firstRange, lastRange hcl.Range + firstRange = p.NextRange() + for p.Peek().Type == TokenDot { + dot := p.Read() + + if p.Peek().Type == TokenNumberLit { + // Continuing the "weird stuff inherited from HIL" + // theme, we also allow numbers as attribute names + // inside splats and interpret them as indexing + // into a list, for expressions like: + // foo.bar.*.baz.0.foo + numTok := p.Read() + numVal, numDiags := p.numberLitValue(numTok) + diags = append(diags, numDiags...) + trav = append(trav, hcl.TraverseIndex{ + Key: numVal, + SrcRange: hcl.RangeBetween(dot.Range, numTok.Range), + }) + lastRange = numTok.Range + continue + } + + if p.Peek().Type != TokenIdent { + if !p.recovery { + if p.Peek().Type == TokenStar { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Nested splat expression not allowed", + Detail: "A splat expression (*) cannot be used inside another attribute-only splat expression.", + Subject: p.Peek().Range.Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid attribute name", + Detail: "An attribute name is required after a dot.", + Subject: &attrTok.Range, + }) + } + } + p.setRecovery() + continue Traversal + } + + attrTok := p.Read() + trav = append(trav, hcl.TraverseAttr{ + Name: string(attrTok.Bytes), + SrcRange: hcl.RangeBetween(dot.Range, attrTok.Range), + }) + lastRange = attrTok.Range + } + + itemExpr := &AnonSymbolExpr{ + SrcRange: hcl.RangeBetween(dot.Range, marker.Range), + } + var travExpr Expression + if len(trav) == 0 { + travExpr = itemExpr + } else { + travExpr = &RelativeTraversalExpr{ + Source: itemExpr, + Traversal: trav, + SrcRange: hcl.RangeBetween(firstRange, lastRange), + } + } + + ret = &SplatExpr{ + Source: ret, + Each: travExpr, + Item: itemExpr, + + SrcRange: hcl.RangeBetween(dot.Range, lastRange), + MarkerRange: hcl.RangeBetween(dot.Range, marker.Range), + } + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid attribute name", + Detail: "An attribute name is required after a dot.", + Subject: &attrTok.Range, + }) + // This leaves the peeker in a bad place, so following items + // will probably be misparsed until we hit something that + // allows us to re-sync. + // + // We will probably need to do something better here eventually + // in order to support autocomplete triggered by typing a + // period. + p.setRecovery() + } + + case TokenOBrack: + // Indexing of a collection. + // This may or may not be a hcl.Traverser, depending on whether + // the key value is something constant. + + open := p.Read() + // TODO: If we have a TokenStar inside our brackets, parse as + // a Splat expression: foo[*].baz[0]. + var close Token + p.PushIncludeNewlines(false) // arbitrary newlines allowed in brackets + keyExpr, keyDiags := p.ParseExpression() + diags = append(diags, keyDiags...) + if p.recovery && keyDiags.HasErrors() { + close = p.recover(TokenCBrack) + } else { + close = p.Read() + if close.Type != TokenCBrack && !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing close bracket on index", + Detail: "The index operator must end with a closing bracket (\"]\").", + Subject: &close.Range, + }) + close = p.recover(TokenCBrack) + } + } + p.PushIncludeNewlines(true) + + if lit, isLit := keyExpr.(*LiteralValueExpr); isLit { + litKey, _ := lit.Value(nil) + rng := hcl.RangeBetween(open.Range, close.Range) + step := &hcl.TraverseIndex{ + Key: litKey, + SrcRange: rng, + } + ret = makeRelativeTraversal(ret, step, rng) + } else { + rng := hcl.RangeBetween(open.Range, close.Range) + ret = &IndexExpr{ + Collection: ret, + Key: keyExpr, + + SrcRange: rng, + OpenRange: open.Range, + } + } + + default: + break Traversal + } + } + + return ret, diags +} + +// makeRelativeTraversal takes an expression and a traverser and returns +// a traversal expression that combines the two. If the given expression +// is already a traversal, it is extended in place (mutating it) and +// returned. If it isn't, a new RelativeTraversalExpr is created and returned. +func makeRelativeTraversal(expr Expression, next hcl.Traverser, rng hcl.Range) Expression { + switch texpr := expr.(type) { + case *ScopeTraversalExpr: + texpr.Traversal = append(texpr.Traversal, next) + texpr.SrcRange = hcl.RangeBetween(texpr.SrcRange, rng) + return texpr + case *RelativeTraversalExpr: + texpr.Traversal = append(texpr.Traversal, next) + texpr.SrcRange = hcl.RangeBetween(texpr.SrcRange, rng) + return texpr + default: + return &RelativeTraversalExpr{ + Source: expr, + Traversal: hcl.Traversal{next}, + SrcRange: rng, + } + } +} + +func (p *parser) parseExpressionTerm() (Expression, hcl.Diagnostics) { + start := p.Peek() + + switch start.Type { + case TokenOParen: + p.Read() // eat open paren + + p.PushIncludeNewlines(false) + + expr, diags := p.ParseExpression() + if diags.HasErrors() { + // attempt to place the peeker after our closing paren + // before we return, so that the next parser has some + // chance of finding a valid expression. + p.recover(TokenCParen) + p.PopIncludeNewlines() + return expr, diags + } + + close := p.Peek() + if close.Type != TokenCParen { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unbalanced parentheses", + Detail: "Expected a closing parenthesis to terminate the expression.", + Subject: &close.Range, + Context: hcl.RangeBetween(start.Range, close.Range).Ptr(), + }) + p.setRecovery() + } + + p.Read() // eat closing paren + p.PopIncludeNewlines() + + return expr, diags + + case TokenNumberLit: + tok := p.Read() // eat number token + + numVal, diags := p.numberLitValue(tok) + return &LiteralValueExpr{ + Val: numVal, + SrcRange: tok.Range, + }, diags + + case TokenIdent: + tok := p.Read() // eat identifier token + + if p.Peek().Type == TokenOParen { + return p.finishParsingFunctionCall(tok) + } + + name := string(tok.Bytes) + switch name { + case "true": + return &LiteralValueExpr{ + Val: cty.True, + SrcRange: tok.Range, + }, nil + case "false": + return &LiteralValueExpr{ + Val: cty.False, + SrcRange: tok.Range, + }, nil + case "null": + return &LiteralValueExpr{ + Val: cty.NullVal(cty.DynamicPseudoType), + SrcRange: tok.Range, + }, nil + default: + return &ScopeTraversalExpr{ + Traversal: hcl.Traversal{ + hcl.TraverseRoot{ + Name: name, + SrcRange: tok.Range, + }, + }, + SrcRange: tok.Range, + }, nil + } + + case TokenOQuote, TokenOHeredoc: + open := p.Read() // eat opening marker + closer := p.oppositeBracket(open.Type) + exprs, passthru, _, diags := p.parseTemplateInner(closer) + + closeRange := p.PrevRange() + + if passthru { + if len(exprs) != 1 { + panic("passthru set with len(exprs) != 1") + } + return &TemplateWrapExpr{ + Wrapped: exprs[0], + SrcRange: hcl.RangeBetween(open.Range, closeRange), + }, diags + } + + return &TemplateExpr{ + Parts: exprs, + SrcRange: hcl.RangeBetween(open.Range, closeRange), + }, diags + + case TokenMinus: + tok := p.Read() // eat minus token + + // Important to use parseExpressionWithTraversals rather than parseExpression + // here, otherwise we can capture a following binary expression into + // our negation. + // e.g. -46+5 should parse as (-46)+5, not -(46+5) + operand, diags := p.parseExpressionWithTraversals() + return &UnaryOpExpr{ + Op: OpNegate, + Val: operand, + + SrcRange: hcl.RangeBetween(tok.Range, operand.Range()), + SymbolRange: tok.Range, + }, diags + + case TokenBang: + tok := p.Read() // eat bang token + + // Important to use parseExpressionWithTraversals rather than parseExpression + // here, otherwise we can capture a following binary expression into + // our negation. + operand, diags := p.parseExpressionWithTraversals() + return &UnaryOpExpr{ + Op: OpLogicalNot, + Val: operand, + + SrcRange: hcl.RangeBetween(tok.Range, operand.Range()), + SymbolRange: tok.Range, + }, diags + + case TokenOBrack: + return p.parseTupleCons() + + case TokenOBrace: + return p.parseObjectCons() + + default: + var diags hcl.Diagnostics + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid expression", + Detail: "Expected the start of an expression, but found an invalid expression token.", + Subject: &start.Range, + }) + } + p.setRecovery() + + // Return a placeholder so that the AST is still structurally sound + // even in the presence of parse errors. + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: start.Range, + }, diags + } +} + +func (p *parser) numberLitValue(tok Token) (cty.Value, hcl.Diagnostics) { + // We'll lean on the cty converter to do the conversion, to ensure that + // the behavior is the same as what would happen if converting a + // non-literal string to a number. + numStrVal := cty.StringVal(string(tok.Bytes)) + numVal, err := convert.Convert(numStrVal, cty.Number) + if err != nil { + ret := cty.UnknownVal(cty.Number) + return ret, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid number literal", + // FIXME: not a very good error message, but convert only + // gives us "a number is required", so not much help either. + Detail: "Failed to recognize the value of this number literal.", + Subject: &tok.Range, + }, + } + } + return numVal, nil +} + +// finishParsingFunctionCall parses a function call assuming that the function +// name was already read, and so the peeker should be pointing at the opening +// parenthesis after the name. +func (p *parser) finishParsingFunctionCall(name Token) (Expression, hcl.Diagnostics) { + openTok := p.Read() + if openTok.Type != TokenOParen { + // should never happen if callers behave + panic("finishParsingFunctionCall called with non-parenthesis as next token") + } + + var args []Expression + var diags hcl.Diagnostics + var expandFinal bool + var closeTok Token + + // Arbitrary newlines are allowed inside the function call parentheses. + p.PushIncludeNewlines(false) + +Token: + for { + tok := p.Peek() + + if tok.Type == TokenCParen { + closeTok = p.Read() // eat closing paren + break Token + } + + arg, argDiags := p.ParseExpression() + args = append(args, arg) + diags = append(diags, argDiags...) + if p.recovery && argDiags.HasErrors() { + // if there was a parse error in the argument then we've + // probably been left in a weird place in the token stream, + // so we'll bail out with a partial argument list. + p.recover(TokenCParen) + break Token + } + + sep := p.Read() + if sep.Type == TokenCParen { + closeTok = sep + break Token + } + + if sep.Type == TokenEllipsis { + expandFinal = true + + if p.Peek().Type != TokenCParen { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing closing parenthesis", + Detail: "An expanded function argument (with ...) must be immediately followed by closing parentheses.", + Subject: &sep.Range, + Context: hcl.RangeBetween(name.Range, sep.Range).Ptr(), + }) + } + closeTok = p.recover(TokenCParen) + } else { + closeTok = p.Read() // eat closing paren + } + break Token + } + + if sep.Type != TokenComma { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing argument separator", + Detail: "A comma is required to separate each function argument from the next.", + Subject: &sep.Range, + Context: hcl.RangeBetween(name.Range, sep.Range).Ptr(), + }) + closeTok = p.recover(TokenCParen) + break Token + } + + if p.Peek().Type == TokenCParen { + // A trailing comma after the last argument gets us in here. + closeTok = p.Read() // eat closing paren + break Token + } + + } + + p.PopIncludeNewlines() + + return &FunctionCallExpr{ + Name: string(name.Bytes), + Args: args, + + ExpandFinal: expandFinal, + + NameRange: name.Range, + OpenParenRange: openTok.Range, + CloseParenRange: closeTok.Range, + }, diags +} + +func (p *parser) parseTupleCons() (Expression, hcl.Diagnostics) { + open := p.Read() + if open.Type != TokenOBrack { + // Should never happen if callers are behaving + panic("parseTupleCons called without peeker pointing to open bracket") + } + + p.PushIncludeNewlines(false) + defer p.PopIncludeNewlines() + + if forKeyword.TokenMatches(p.Peek()) { + return p.finishParsingForExpr(open) + } + + var close Token + + var diags hcl.Diagnostics + var exprs []Expression + + for { + next := p.Peek() + if next.Type == TokenCBrack { + close = p.Read() // eat closer + break + } + + expr, exprDiags := p.ParseExpression() + exprs = append(exprs, expr) + diags = append(diags, exprDiags...) + + if p.recovery && exprDiags.HasErrors() { + // If expression parsing failed then we are probably in a strange + // place in the token stream, so we'll bail out and try to reset + // to after our closing bracket to allow parsing to continue. + close = p.recover(TokenCBrack) + break + } + + next = p.Peek() + if next.Type == TokenCBrack { + close = p.Read() // eat closer + break + } + + if next.Type != TokenComma { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing item separator", + Detail: "Expected a comma to mark the beginning of the next item.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } + close = p.recover(TokenCBrack) + break + } + + p.Read() // eat comma + + } + + return &TupleConsExpr{ + Exprs: exprs, + + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + }, diags +} + +func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) { + open := p.Read() + if open.Type != TokenOBrace { + // Should never happen if callers are behaving + panic("parseObjectCons called without peeker pointing to open brace") + } + + p.PushIncludeNewlines(true) + defer p.PopIncludeNewlines() + + if forKeyword.TokenMatches(p.Peek()) { + return p.finishParsingForExpr(open) + } + + var close Token + + var diags hcl.Diagnostics + var items []ObjectConsItem + + for { + next := p.Peek() + if next.Type == TokenNewline { + p.Read() // eat newline + continue + } + + if next.Type == TokenCBrace { + close = p.Read() // eat closer + break + } + + // As a special case, we allow the key to be a literal identifier. + // This means that a variable reference or function call can't appear + // directly as key expression, and must instead be wrapped in some + // disambiguation punctuation, like (var.a) = "b" or "${var.a}" = "b". + var key Expression + var keyDiags hcl.Diagnostics + if p.Peek().Type == TokenIdent { + nameTok := p.Read() + key = &LiteralValueExpr{ + Val: cty.StringVal(string(nameTok.Bytes)), + + SrcRange: nameTok.Range, + } + } else { + key, keyDiags = p.ParseExpression() + } + + diags = append(diags, keyDiags...) + + if p.recovery && keyDiags.HasErrors() { + // If expression parsing failed then we are probably in a strange + // place in the token stream, so we'll bail out and try to reset + // to after our closing brace to allow parsing to continue. + close = p.recover(TokenCBrace) + break + } + + next = p.Peek() + if next.Type != TokenEqual && next.Type != TokenColon { + if !p.recovery { + if next.Type == TokenNewline || next.Type == TokenComma { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing item value", + Detail: "Expected an item value, introduced by an equals sign (\"=\").", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing key/value separator", + Detail: "Expected an equals sign (\"=\") to mark the beginning of the item value.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } + } + close = p.recover(TokenCBrace) + break + } + + p.Read() // eat equals sign or colon + + value, valueDiags := p.ParseExpression() + diags = append(diags, valueDiags...) + + if p.recovery && valueDiags.HasErrors() { + // If expression parsing failed then we are probably in a strange + // place in the token stream, so we'll bail out and try to reset + // to after our closing brace to allow parsing to continue. + close = p.recover(TokenCBrace) + break + } + + items = append(items, ObjectConsItem{ + KeyExpr: key, + ValueExpr: value, + }) + + next = p.Peek() + if next.Type == TokenCBrace { + close = p.Read() // eat closer + break + } + + if next.Type != TokenComma && next.Type != TokenNewline { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing item separator", + Detail: "Expected a newline or comma to mark the beginning of the next item.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } + close = p.recover(TokenCBrace) + break + } + + p.Read() // eat comma or newline + + } + + return &ObjectConsExpr{ + Items: items, + + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + }, diags +} + +func (p *parser) finishParsingForExpr(open Token) (Expression, hcl.Diagnostics) { + introducer := p.Read() + if !forKeyword.TokenMatches(introducer) { + // Should never happen if callers are behaving + panic("finishParsingForExpr called without peeker pointing to 'for' identifier") + } + + var makeObj bool + var closeType TokenType + switch open.Type { + case TokenOBrace: + makeObj = true + closeType = TokenCBrace + case TokenOBrack: + makeObj = false // making a tuple + closeType = TokenCBrack + default: + // Should never happen if callers are behaving + panic("finishParsingForExpr called with invalid open token") + } + + var diags hcl.Diagnostics + var keyName, valName string + + if p.Peek().Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "For expression requires variable name after 'for'.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + + valName = string(p.Read().Bytes) + + if p.Peek().Type == TokenComma { + // What we just read was actually the key, then. + keyName = valName + p.Read() // eat comma + + if p.Peek().Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "For expression requires value variable name after comma.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + + valName = string(p.Read().Bytes) + } + + if !inKeyword.TokenMatches(p.Peek()) { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "For expression requires 'in' keyword after names.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + p.Read() // eat 'in' keyword + + collExpr, collDiags := p.ParseExpression() + diags = append(diags, collDiags...) + if p.recovery && collDiags.HasErrors() { + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + + if p.Peek().Type != TokenColon { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "For expression requires colon after collection expression.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + p.Read() // eat colon + + var keyExpr, valExpr Expression + var keyDiags, valDiags hcl.Diagnostics + valExpr, valDiags = p.ParseExpression() + if p.Peek().Type == TokenFatArrow { + // What we just parsed was actually keyExpr + p.Read() // eat the fat arrow + keyExpr, keyDiags = valExpr, valDiags + + valExpr, valDiags = p.ParseExpression() + } + diags = append(diags, keyDiags...) + diags = append(diags, valDiags...) + if p.recovery && (keyDiags.HasErrors() || valDiags.HasErrors()) { + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + + group := false + var ellipsis Token + if p.Peek().Type == TokenEllipsis { + ellipsis = p.Read() + group = true + } + + var condExpr Expression + var condDiags hcl.Diagnostics + if ifKeyword.TokenMatches(p.Peek()) { + p.Read() // eat "if" + condExpr, condDiags = p.ParseExpression() + diags = append(diags, condDiags...) + if p.recovery && condDiags.HasErrors() { + close := p.recover(p.oppositeBracket(open.Type)) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + } + + var close Token + if p.Peek().Type == closeType { + close = p.Read() + } else { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "Extra characters after the end of the 'for' expression.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close = p.recover(closeType) + } + + if !makeObj { + if keyExpr != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "Key expression is not valid when building a tuple.", + Subject: keyExpr.Range().Ptr(), + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + + if group { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "Grouping ellipsis (...) cannot be used when building a tuple.", + Subject: &ellipsis.Range, + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + } else { + if keyExpr == nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "Key expression is required when building an object.", + Subject: valExpr.Range().Ptr(), + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + } + + return &ForExpr{ + KeyVar: keyName, + ValVar: valName, + CollExpr: collExpr, + KeyExpr: keyExpr, + ValExpr: valExpr, + CondExpr: condExpr, + Group: group, + + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + CloseRange: close.Range, + }, diags +} + +// parseQuotedStringLiteral is a helper for parsing quoted strings that +// aren't allowed to contain any interpolations, such as block labels. +func (p *parser) parseQuotedStringLiteral() (string, hcl.Range, hcl.Diagnostics) { + oQuote := p.Read() + if oQuote.Type != TokenOQuote { + return "", oQuote.Range, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid string literal", + Detail: "A quoted string is required here.", + Subject: &oQuote.Range, + }, + } + } + + var diags hcl.Diagnostics + ret := &bytes.Buffer{} + var cQuote Token + +Token: + for { + tok := p.Read() + switch tok.Type { + + case TokenCQuote: + cQuote = tok + break Token + + case TokenQuotedLit: + s, sDiags := p.decodeStringLit(tok) + diags = append(diags, sDiags...) + ret.WriteString(s) + + case TokenTemplateControl, TokenTemplateInterp: + which := "$" + if tok.Type == TokenTemplateControl { + which = "!" + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid string literal", + Detail: fmt.Sprintf( + "Template sequences are not allowed in this string. To include a literal %q, double it (as \"%s%s\") to escape it.", + which, which, which, + ), + Subject: &tok.Range, + Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), + }) + p.recover(TokenTemplateSeqEnd) + + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unterminated string literal", + Detail: "Unable to find the closing quote mark before the end of the file.", + Subject: &tok.Range, + Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), + }) + break Token + + default: + // Should never happen, as long as the scanner is behaving itself + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid string literal", + Detail: "This item is not valid in a string literal.", + Subject: &tok.Range, + Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), + }) + p.recover(TokenOQuote) + break Token + + } + + } + + return ret.String(), hcl.RangeBetween(oQuote.Range, cQuote.Range), diags +} + +// decodeStringLit processes the given token, which must be either a +// TokenQuotedLit or a TokenStringLit, returning the string resulting from +// resolving any escape sequences. +// +// If any error diagnostics are returned, the returned string may be incomplete +// or otherwise invalid. +func (p *parser) decodeStringLit(tok Token) (string, hcl.Diagnostics) { + var quoted bool + switch tok.Type { + case TokenQuotedLit: + quoted = true + case TokenStringLit: + quoted = false + default: + panic("decodeQuotedLit can only be used with TokenStringLit and TokenQuotedLit tokens") + } + var diags hcl.Diagnostics + + ret := make([]byte, 0, len(tok.Bytes)) + var esc []byte + + sc := bufio.NewScanner(bytes.NewReader(tok.Bytes)) + sc.Split(textseg.ScanGraphemeClusters) + + pos := tok.Range.Start + newPos := pos +Character: + for sc.Scan() { + pos = newPos + ch := sc.Bytes() + + // Adjust position based on our new character. + // \r\n is considered to be a single character in text segmentation, + if (len(ch) == 1 && ch[0] == '\n') || (len(ch) == 2 && ch[1] == '\n') { + newPos.Line++ + newPos.Column = 0 + } else { + newPos.Column++ + } + newPos.Byte += len(ch) + + if len(esc) > 0 { + switch esc[0] { + case '\\': + if len(ch) == 1 { + switch ch[0] { + + // TODO: numeric character escapes with \uXXXX + + case 'n': + ret = append(ret, '\n') + esc = esc[:0] + continue Character + case 'r': + ret = append(ret, '\r') + esc = esc[:0] + continue Character + case 't': + ret = append(ret, '\t') + esc = esc[:0] + continue Character + case '"': + ret = append(ret, '"') + esc = esc[:0] + continue Character + case '\\': + ret = append(ret, '\\') + esc = esc[:0] + continue Character + } + } + + var detail string + switch { + case len(ch) == 1 && (ch[0] == '$' || ch[0] == '!'): + detail = fmt.Sprintf( + "The characters \"\\%s\" do not form a recognized escape sequence. To escape a \"%s{\" template sequence, use \"%s%s{\".", + ch, ch, ch, ch, + ) + default: + detail = fmt.Sprintf("The characters \"\\%s\" do not form a recognized escape sequence.", ch) + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: detail, + Subject: &hcl.Range{ + Filename: tok.Range.Filename, + Start: hcl.Pos{ + Line: pos.Line, + Column: pos.Column - 1, // safe because we know the previous character must be a backslash + Byte: pos.Byte - 1, + }, + End: hcl.Pos{ + Line: pos.Line, + Column: pos.Column + 1, // safe because we know the previous character must be a backslash + Byte: pos.Byte + len(ch), + }, + }, + }) + ret = append(ret, ch...) + esc = esc[:0] + continue Character + + case '$', '!': + switch len(esc) { + case 1: + if len(ch) == 1 && ch[0] == esc[0] { + esc = append(esc, ch[0]) + continue Character + } + + // Any other character means this wasn't an escape sequence + // after all. + ret = append(ret, esc...) + ret = append(ret, ch...) + esc = esc[:0] + case 2: + if len(ch) == 1 && ch[0] == '{' { + // successful escape sequence + ret = append(ret, esc[0]) + } else { + // not an escape sequence, so just output literal + ret = append(ret, esc...) + } + ret = append(ret, ch...) + esc = esc[:0] + default: + // should never happen + panic("have invalid escape sequence >2 characters") + } + + } + } else { + if len(ch) == 1 { + switch ch[0] { + case '\\': + if quoted { // ignore backslashes in unquoted mode + esc = append(esc, '\\') + continue Character + } + case '$': + esc = append(esc, '$') + continue Character + case '!': + esc = append(esc, '!') + continue Character + } + } + ret = append(ret, ch...) + } + } + + return string(ret), diags +} + +// setRecovery turns on recovery mode without actually doing any recovery. +// This can be used when a parser knowingly leaves the peeker in a useless +// place and wants to suppress errors that might result from that decision. +func (p *parser) setRecovery() { + p.recovery = true +} + +// recover seeks forward in the token stream until it finds TokenType "end", +// then returns with the peeker pointed at the following token. +// +// If the given token type is a bracketer, this function will additionally +// count nested instances of the brackets to try to leave the peeker at +// the end of the _current_ instance of that bracketer, skipping over any +// nested instances. This is a best-effort operation and may have +// unpredictable results on input with bad bracketer nesting. +func (p *parser) recover(end TokenType) Token { + start := p.oppositeBracket(end) + p.recovery = true + + nest := 0 + for { + tok := p.Read() + ty := tok.Type + if end == TokenTemplateSeqEnd && ty == TokenTemplateControl { + // normalize so that our matching behavior can work, since + // TokenTemplateControl/TokenTemplateInterp are asymmetrical + // with TokenTemplateSeqEnd and thus we need to count both + // openers if that's the closer we're looking for. + ty = TokenTemplateInterp + } + + switch ty { + case start: + nest++ + case end: + if nest < 1 { + return tok + } + + nest-- + case TokenEOF: + return tok + } + } +} + +// recoverOver seeks forward in the token stream until it finds a block +// starting with TokenType "start", then finds the corresponding end token, +// leaving the peeker pointed at the token after that end token. +// +// The given token type _must_ be a bracketer. For example, if the given +// start token is TokenOBrace then the parser will be left at the _end_ of +// the next brace-delimited block encountered, or at EOF if no such block +// is found or it is unclosed. +func (p *parser) recoverOver(start TokenType) { + end := p.oppositeBracket(start) + + // find the opening bracket first +Token: + for { + tok := p.Read() + switch tok.Type { + case start, TokenEOF: + break Token + } + } + + // Now use our existing recover function to locate the _end_ of the + // container we've found. + p.recover(end) +} + +func (p *parser) recoverAfterBodyItem() { + p.recovery = true + var open []TokenType + +Token: + for { + tok := p.Read() + + switch tok.Type { + + case TokenNewline: + if len(open) == 0 { + break Token + } + + case TokenEOF: + break Token + + case TokenOBrace, TokenOBrack, TokenOParen, TokenOQuote, TokenOHeredoc, TokenTemplateInterp, TokenTemplateControl: + open = append(open, tok.Type) + + case TokenCBrace, TokenCBrack, TokenCParen, TokenCQuote, TokenCHeredoc: + opener := p.oppositeBracket(tok.Type) + for len(open) > 0 && open[len(open)-1] != opener { + open = open[:len(open)-1] + } + if len(open) > 0 { + open = open[:len(open)-1] + } + + case TokenTemplateSeqEnd: + for len(open) > 0 && open[len(open)-1] != TokenTemplateInterp && open[len(open)-1] != TokenTemplateControl { + open = open[:len(open)-1] + } + if len(open) > 0 { + open = open[:len(open)-1] + } + + } + } +} + +// oppositeBracket finds the bracket that opposes the given bracketer, or +// NilToken if the given token isn't a bracketer. +// +// "Bracketer", for the sake of this function, is one end of a matching +// open/close set of tokens that establish a bracketing context. +func (p *parser) oppositeBracket(ty TokenType) TokenType { + switch ty { + + case TokenOBrace: + return TokenCBrace + case TokenOBrack: + return TokenCBrack + case TokenOParen: + return TokenCParen + case TokenOQuote: + return TokenCQuote + case TokenOHeredoc: + return TokenCHeredoc + + case TokenCBrace: + return TokenOBrace + case TokenCBrack: + return TokenOBrack + case TokenCParen: + return TokenOParen + case TokenCQuote: + return TokenOQuote + case TokenCHeredoc: + return TokenOHeredoc + + case TokenTemplateControl: + return TokenTemplateSeqEnd + case TokenTemplateInterp: + return TokenTemplateSeqEnd + case TokenTemplateSeqEnd: + // This is ambigous, but we return Interp here because that's + // what's assumed by the "recover" method. + return TokenTemplateInterp + + default: + return TokenNil + } +} + +func errPlaceholderExpr(rng hcl.Range) Expression { + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: rng, + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go new file mode 100644 index 00000000000..e04c8e0f366 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go @@ -0,0 +1,728 @@ +package hclsyntax + +import ( + "fmt" + "strings" + "unicode" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +func (p *parser) ParseTemplate() (Expression, hcl.Diagnostics) { + return p.parseTemplate(TokenEOF) +} + +func (p *parser) parseTemplate(end TokenType) (Expression, hcl.Diagnostics) { + exprs, passthru, rng, diags := p.parseTemplateInner(end) + + if passthru { + if len(exprs) != 1 { + panic("passthru set with len(exprs) != 1") + } + return &TemplateWrapExpr{ + Wrapped: exprs[0], + SrcRange: rng, + }, diags + } + + return &TemplateExpr{ + Parts: exprs, + SrcRange: rng, + }, diags +} + +func (p *parser) parseTemplateInner(end TokenType) ([]Expression, bool, hcl.Range, hcl.Diagnostics) { + parts, diags := p.parseTemplateParts(end) + tp := templateParser{ + Tokens: parts.Tokens, + SrcRange: parts.SrcRange, + } + exprs, exprsDiags := tp.parseRoot() + diags = append(diags, exprsDiags...) + + passthru := false + if len(parts.Tokens) == 2 { // one real token and one synthetic "end" token + if _, isInterp := parts.Tokens[0].(*templateInterpToken); isInterp { + passthru = true + } + } + + return exprs, passthru, parts.SrcRange, diags +} + +type templateParser struct { + Tokens []templateToken + SrcRange hcl.Range + + pos int +} + +func (p *templateParser) parseRoot() ([]Expression, hcl.Diagnostics) { + var exprs []Expression + var diags hcl.Diagnostics + + for { + next := p.Peek() + if _, isEnd := next.(*templateEndToken); isEnd { + break + } + + expr, exprDiags := p.parseExpr() + diags = append(diags, exprDiags...) + exprs = append(exprs, expr) + } + + return exprs, diags +} + +func (p *templateParser) parseExpr() (Expression, hcl.Diagnostics) { + next := p.Peek() + switch tok := next.(type) { + + case *templateLiteralToken: + p.Read() // eat literal + return &LiteralValueExpr{ + Val: cty.StringVal(tok.Val), + SrcRange: tok.SrcRange, + }, nil + + case *templateInterpToken: + p.Read() // eat interp + return tok.Expr, nil + + case *templateIfToken: + return p.parseIf() + + case *templateForToken: + return p.parseFor() + + case *templateEndToken: + p.Read() // eat erroneous token + return errPlaceholderExpr(tok.SrcRange), hcl.Diagnostics{ + { + // This is a particularly unhelpful diagnostic, so callers + // should attempt to pre-empt it and produce a more helpful + // diagnostic that is context-aware. + Severity: hcl.DiagError, + Summary: "Unexpected end of template", + Detail: "The control directives within this template are unbalanced.", + Subject: &tok.SrcRange, + }, + } + + case *templateEndCtrlToken: + p.Read() // eat erroneous token + return errPlaceholderExpr(tok.SrcRange), hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unexpected %s directive", tok.Name()), + Detail: "The control directives within this template are unbalanced.", + Subject: &tok.SrcRange, + }, + } + + default: + // should never happen, because above should be exhaustive + panic(fmt.Sprintf("unhandled template token type %T", next)) + } +} + +func (p *templateParser) parseIf() (Expression, hcl.Diagnostics) { + open := p.Read() + openIf, isIf := open.(*templateIfToken) + if !isIf { + // should never happen if caller is behaving + panic("parseIf called with peeker not pointing at if token") + } + + var ifExprs, elseExprs []Expression + var diags hcl.Diagnostics + var endifRange hcl.Range + + currentExprs := &ifExprs +Token: + for { + next := p.Peek() + if end, isEnd := next.(*templateEndToken); isEnd { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unexpected end of template", + Detail: fmt.Sprintf( + "The if directive at %s is missing its corresponding endif directive.", + openIf.SrcRange, + ), + Subject: &end.SrcRange, + }) + return errPlaceholderExpr(end.SrcRange), diags + } + if end, isCtrlEnd := next.(*templateEndCtrlToken); isCtrlEnd { + p.Read() // eat end directive + + switch end.Type { + + case templateElse: + if currentExprs == &ifExprs { + currentExprs = &elseExprs + continue Token + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unexpected else directive", + Detail: fmt.Sprintf( + "Already in the else clause for the if started at %s.", + openIf.SrcRange, + ), + Subject: &end.SrcRange, + }) + + case templateEndIf: + endifRange = end.SrcRange + break Token + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unexpected %s directive", end.Name()), + Detail: fmt.Sprintf( + "Expecting an endif directive for the if started at %s.", + openIf.SrcRange, + ), + Subject: &end.SrcRange, + }) + } + + return errPlaceholderExpr(end.SrcRange), diags + } + + expr, exprDiags := p.parseExpr() + diags = append(diags, exprDiags...) + *currentExprs = append(*currentExprs, expr) + } + + if len(ifExprs) == 0 { + ifExprs = append(ifExprs, &LiteralValueExpr{ + Val: cty.StringVal(""), + SrcRange: hcl.Range{ + Filename: openIf.SrcRange.Filename, + Start: openIf.SrcRange.End, + End: openIf.SrcRange.End, + }, + }) + } + if len(elseExprs) == 0 { + elseExprs = append(elseExprs, &LiteralValueExpr{ + Val: cty.StringVal(""), + SrcRange: hcl.Range{ + Filename: endifRange.Filename, + Start: endifRange.Start, + End: endifRange.Start, + }, + }) + } + + trueExpr := &TemplateExpr{ + Parts: ifExprs, + SrcRange: hcl.RangeBetween(ifExprs[0].Range(), ifExprs[len(ifExprs)-1].Range()), + } + falseExpr := &TemplateExpr{ + Parts: elseExprs, + SrcRange: hcl.RangeBetween(elseExprs[0].Range(), elseExprs[len(elseExprs)-1].Range()), + } + + return &ConditionalExpr{ + Condition: openIf.CondExpr, + TrueResult: trueExpr, + FalseResult: falseExpr, + + SrcRange: hcl.RangeBetween(openIf.SrcRange, endifRange), + }, diags +} + +func (p *templateParser) parseFor() (Expression, hcl.Diagnostics) { + open := p.Read() + openFor, isFor := open.(*templateForToken) + if !isFor { + // should never happen if caller is behaving + panic("parseFor called with peeker not pointing at for token") + } + + var contentExprs []Expression + var diags hcl.Diagnostics + var endforRange hcl.Range + +Token: + for { + next := p.Peek() + if end, isEnd := next.(*templateEndToken); isEnd { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unexpected end of template", + Detail: fmt.Sprintf( + "The for directive at %s is missing its corresponding endfor directive.", + openFor.SrcRange, + ), + Subject: &end.SrcRange, + }) + return errPlaceholderExpr(end.SrcRange), diags + } + if end, isCtrlEnd := next.(*templateEndCtrlToken); isCtrlEnd { + p.Read() // eat end directive + + switch end.Type { + + case templateElse: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unexpected else directive", + Detail: "An else clause is not expected for a for directive.", + Subject: &end.SrcRange, + }) + + case templateEndFor: + endforRange = end.SrcRange + break Token + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unexpected %s directive", end.Name()), + Detail: fmt.Sprintf( + "Expecting an endfor directive corresponding to the for directive at %s.", + openFor.SrcRange, + ), + Subject: &end.SrcRange, + }) + } + + return errPlaceholderExpr(end.SrcRange), diags + } + + expr, exprDiags := p.parseExpr() + diags = append(diags, exprDiags...) + contentExprs = append(contentExprs, expr) + } + + if len(contentExprs) == 0 { + contentExprs = append(contentExprs, &LiteralValueExpr{ + Val: cty.StringVal(""), + SrcRange: hcl.Range{ + Filename: openFor.SrcRange.Filename, + Start: openFor.SrcRange.End, + End: openFor.SrcRange.End, + }, + }) + } + + contentExpr := &TemplateExpr{ + Parts: contentExprs, + SrcRange: hcl.RangeBetween(contentExprs[0].Range(), contentExprs[len(contentExprs)-1].Range()), + } + + forExpr := &ForExpr{ + KeyVar: openFor.KeyVar, + ValVar: openFor.ValVar, + + CollExpr: openFor.CollExpr, + ValExpr: contentExpr, + + SrcRange: hcl.RangeBetween(openFor.SrcRange, endforRange), + OpenRange: openFor.SrcRange, + CloseRange: endforRange, + } + + return &TemplateJoinExpr{ + Tuple: forExpr, + }, diags +} + +func (p *templateParser) Peek() templateToken { + return p.Tokens[p.pos] +} + +func (p *templateParser) Read() templateToken { + ret := p.Peek() + if _, end := ret.(*templateEndToken); !end { + p.pos++ + } + return ret +} + +// parseTemplateParts produces a flat sequence of "template tokens", which are +// either literal values (with any "trimming" already applied), interpolation +// sequences, or control flow markers. +// +// A further pass is required on the result to turn it into an AST. +func (p *parser) parseTemplateParts(end TokenType) (*templateParts, hcl.Diagnostics) { + var parts []templateToken + var diags hcl.Diagnostics + + startRange := p.NextRange() + ltrimNext := false + nextCanTrimPrev := false + var endRange hcl.Range + +Token: + for { + next := p.Read() + if next.Type == end { + // all done! + endRange = next.Range + break + } + + ltrim := ltrimNext + ltrimNext = false + canTrimPrev := nextCanTrimPrev + nextCanTrimPrev = false + + switch next.Type { + case TokenStringLit, TokenQuotedLit: + str, strDiags := p.decodeStringLit(next) + diags = append(diags, strDiags...) + + if ltrim { + str = strings.TrimLeftFunc(str, unicode.IsSpace) + } + + parts = append(parts, &templateLiteralToken{ + Val: str, + SrcRange: next.Range, + }) + nextCanTrimPrev = true + + case TokenTemplateInterp: + // if the opener is ${~ then we want to eat any trailing whitespace + // in the preceding literal token, assuming it is indeed a literal + // token. + if canTrimPrev && len(next.Bytes) == 3 && next.Bytes[2] == '~' && len(parts) > 0 { + prevExpr := parts[len(parts)-1] + if lexpr, ok := prevExpr.(*templateLiteralToken); ok { + lexpr.Val = strings.TrimRightFunc(lexpr.Val, unicode.IsSpace) + } + } + + p.PushIncludeNewlines(false) + expr, exprDiags := p.ParseExpression() + diags = append(diags, exprDiags...) + close := p.Peek() + if close.Type != TokenTemplateSeqEnd { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extra characters after interpolation expression", + Detail: "Expected a closing brace to end the interpolation expression, but found extra characters.", + Subject: &close.Range, + Context: hcl.RangeBetween(startRange, close.Range).Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + } else { + p.Read() // eat closing brace + + // If the closer is ~} then we want to eat any leading + // whitespace on the next token, if it turns out to be a + // literal token. + if len(close.Bytes) == 2 && close.Bytes[0] == '~' { + ltrimNext = true + } + } + p.PopIncludeNewlines() + parts = append(parts, &templateInterpToken{ + Expr: expr, + SrcRange: hcl.RangeBetween(next.Range, close.Range), + }) + + case TokenTemplateControl: + // if the opener is !{~ then we want to eat any trailing whitespace + // in the preceding literal token, assuming it is indeed a literal + // token. + if canTrimPrev && len(next.Bytes) == 3 && next.Bytes[2] == '~' && len(parts) > 0 { + prevExpr := parts[len(parts)-1] + if lexpr, ok := prevExpr.(*templateLiteralToken); ok { + lexpr.Val = strings.TrimRightFunc(lexpr.Val, unicode.IsSpace) + } + } + p.PushIncludeNewlines(false) + + kw := p.Peek() + if kw.Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template directive", + Detail: "A template directive keyword (\"if\", \"for\", etc) is expected at the beginning of a !{ sequence.", + Subject: &kw.Range, + Context: hcl.RangeBetween(next.Range, kw.Range).Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + } + p.Read() // eat keyword token + + switch { + + case ifKeyword.TokenMatches(kw): + condExpr, exprDiags := p.ParseExpression() + diags = append(diags, exprDiags...) + parts = append(parts, &templateIfToken{ + CondExpr: condExpr, + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + case elseKeyword.TokenMatches(kw): + parts = append(parts, &templateEndCtrlToken{ + Type: templateElse, + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + case endifKeyword.TokenMatches(kw): + parts = append(parts, &templateEndCtrlToken{ + Type: templateEndIf, + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + case forKeyword.TokenMatches(kw): + var keyName, valName string + if p.Peek().Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' directive", + Detail: "For directive requires variable name after 'for'.", + Subject: p.Peek().Range.Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + } + + valName = string(p.Read().Bytes) + + if p.Peek().Type == TokenComma { + // What we just read was actually the key, then. + keyName = valName + p.Read() // eat comma + + if p.Peek().Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' directive", + Detail: "For directive requires value variable name after comma.", + Subject: p.Peek().Range.Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + } + + valName = string(p.Read().Bytes) + } + + if !inKeyword.TokenMatches(p.Peek()) { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' directive", + Detail: "For directive requires 'in' keyword after names.", + Subject: p.Peek().Range.Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + } + p.Read() // eat 'in' keyword + + collExpr, collDiags := p.ParseExpression() + diags = append(diags, collDiags...) + parts = append(parts, &templateForToken{ + KeyVar: keyName, + ValVar: valName, + CollExpr: collExpr, + + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + case endforKeyword.TokenMatches(kw): + parts = append(parts, &templateEndCtrlToken{ + Type: templateEndFor, + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + default: + if !p.recovery { + suggestions := []string{"if", "for", "else", "endif", "endfor"} + given := string(kw.Bytes) + suggestion := nameSuggestion(given, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template control keyword", + Detail: fmt.Sprintf("%q is not a valid template control keyword.%s", given, suggestion), + Subject: &kw.Range, + Context: hcl.RangeBetween(next.Range, kw.Range).Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + + } + + close := p.Peek() + if close.Type != TokenTemplateSeqEnd { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Extra characters in %s marker", kw.Bytes), + Detail: "Expected a closing brace to end the sequence, but found extra characters.", + Subject: &close.Range, + Context: hcl.RangeBetween(startRange, close.Range).Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + } else { + p.Read() // eat closing brace + + // If the closer is ~} then we want to eat any leading + // whitespace on the next token, if it turns out to be a + // literal token. + if len(close.Bytes) == 2 && close.Bytes[0] == '~' { + ltrimNext = true + } + } + p.PopIncludeNewlines() + + default: + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unterminated template string", + Detail: "No closing marker was found for the string.", + Subject: &next.Range, + Context: hcl.RangeBetween(startRange, next.Range).Ptr(), + }) + } + final := p.recover(end) + endRange = final.Range + break Token + } + } + + if len(parts) == 0 { + // If a sequence has no content, we'll treat it as if it had an + // empty string in it because that's what the user probably means + // if they write "" in configuration. + parts = append(parts, &templateLiteralToken{ + Val: "", + SrcRange: hcl.Range{ + // Range is the zero-character span immediately after the + // opening quote. + Filename: startRange.Filename, + Start: startRange.End, + End: startRange.End, + }, + }) + } + + // Always end with an end token, so the parser can produce diagnostics + // about unclosed items with proper position information. + parts = append(parts, &templateEndToken{ + SrcRange: endRange, + }) + + ret := &templateParts{ + Tokens: parts, + SrcRange: hcl.RangeBetween(startRange, endRange), + } + + return ret, diags +} + +type templateParts struct { + Tokens []templateToken + SrcRange hcl.Range +} + +// templateToken is a higher-level token that represents a single atom within +// the template language. Our template parsing first raises the raw token +// stream to a sequence of templateToken, and then transforms the result into +// an expression tree. +type templateToken interface { + templateToken() templateToken +} + +type templateLiteralToken struct { + Val string + SrcRange hcl.Range + isTemplateToken +} + +type templateInterpToken struct { + Expr Expression + SrcRange hcl.Range + isTemplateToken +} + +type templateIfToken struct { + CondExpr Expression + SrcRange hcl.Range + isTemplateToken +} + +type templateForToken struct { + KeyVar string // empty if ignoring key + ValVar string + CollExpr Expression + SrcRange hcl.Range + isTemplateToken +} + +type templateEndCtrlType int + +const ( + templateEndIf templateEndCtrlType = iota + templateElse + templateEndFor +) + +type templateEndCtrlToken struct { + Type templateEndCtrlType + SrcRange hcl.Range + isTemplateToken +} + +func (t *templateEndCtrlToken) Name() string { + switch t.Type { + case templateEndIf: + return "endif" + case templateElse: + return "else" + case templateEndFor: + return "endfor" + default: + // should never happen + panic("invalid templateEndCtrlType") + } +} + +type templateEndToken struct { + SrcRange hcl.Range + isTemplateToken +} + +type isTemplateToken [0]int + +func (t isTemplateToken) templateToken() templateToken { + return t +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_traversal.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_traversal.go new file mode 100644 index 00000000000..2ff3ed6c1a1 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_traversal.go @@ -0,0 +1,159 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// ParseTraversalAbs parses an absolute traversal that is assumed to consume +// all of the remaining tokens in the peeker. The usual parser recovery +// behavior is not supported here because traversals are not expected to +// be parsed as part of a larger program. +func (p *parser) ParseTraversalAbs() (hcl.Traversal, hcl.Diagnostics) { + var ret hcl.Traversal + var diags hcl.Diagnostics + + // Absolute traversal must always begin with a variable name + varTok := p.Read() + if varTok.Type != TokenIdent { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Variable name required", + Detail: "Must begin with a variable name.", + Subject: &varTok.Range, + }) + return ret, diags + } + + varName := string(varTok.Bytes) + ret = append(ret, hcl.TraverseRoot{ + Name: varName, + SrcRange: varTok.Range, + }) + + for { + next := p.Peek() + + if next.Type == TokenEOF { + return ret, diags + } + + switch next.Type { + case TokenDot: + // Attribute access + dot := p.Read() // eat dot + nameTok := p.Read() + if nameTok.Type != TokenIdent { + if nameTok.Type == TokenStar { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute name required", + Detail: "Splat expressions (.*) may not be used here.", + Subject: &nameTok.Range, + Context: hcl.RangeBetween(varTok.Range, nameTok.Range).Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute name required", + Detail: "Dot must be followed by attribute name.", + Subject: &nameTok.Range, + Context: hcl.RangeBetween(varTok.Range, nameTok.Range).Ptr(), + }) + } + return ret, diags + } + + attrName := string(nameTok.Bytes) + ret = append(ret, hcl.TraverseAttr{ + Name: attrName, + SrcRange: hcl.RangeBetween(dot.Range, nameTok.Range), + }) + case TokenOBrack: + // Index + open := p.Read() // eat open bracket + next := p.Peek() + + switch next.Type { + case TokenNumberLit: + tok := p.Read() // eat number + numVal, numDiags := p.numberLitValue(tok) + diags = append(diags, numDiags...) + + close := p.Read() + if close.Type != TokenCBrack { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed index brackets", + Detail: "Index key must be followed by a closing bracket.", + Subject: &close.Range, + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + + ret = append(ret, hcl.TraverseIndex{ + Key: numVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }) + + if diags.HasErrors() { + return ret, diags + } + + case TokenOQuote: + str, _, strDiags := p.parseQuotedStringLiteral() + diags = append(diags, strDiags...) + + close := p.Read() + if close.Type != TokenCBrack { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed index brackets", + Detail: "Index key must be followed by a closing bracket.", + Subject: &close.Range, + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + + ret = append(ret, hcl.TraverseIndex{ + Key: cty.StringVal(str), + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }) + + if diags.HasErrors() { + return ret, diags + } + + default: + if next.Type == TokenStar { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute name required", + Detail: "Splat expressions ([*]) may not be used here.", + Subject: &next.Range, + Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Index value required", + Detail: "Index brackets must contain either a literal number or a literal string.", + Subject: &next.Range, + Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(), + }) + } + return ret, diags + } + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid character", + Detail: "Expected an attribute access or an index operator.", + Subject: &next.Range, + Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(), + }) + return ret, diags + } + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/peeker.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/peeker.go new file mode 100644 index 00000000000..b8171ffab21 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/peeker.go @@ -0,0 +1,108 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +type peeker struct { + Tokens Tokens + NextIndex int + + IncludeComments bool + IncludeNewlinesStack []bool +} + +func newPeeker(tokens Tokens, includeComments bool) *peeker { + return &peeker{ + Tokens: tokens, + IncludeComments: includeComments, + + IncludeNewlinesStack: []bool{true}, + } +} + +func (p *peeker) Peek() Token { + ret, _ := p.nextToken() + return ret +} + +func (p *peeker) Read() Token { + ret, nextIdx := p.nextToken() + p.NextIndex = nextIdx + return ret +} + +func (p *peeker) NextRange() hcl.Range { + return p.Peek().Range +} + +func (p *peeker) PrevRange() hcl.Range { + if p.NextIndex == 0 { + return p.NextRange() + } + + return p.Tokens[p.NextIndex-1].Range +} + +func (p *peeker) nextToken() (Token, int) { + for i := p.NextIndex; i < len(p.Tokens); i++ { + tok := p.Tokens[i] + switch tok.Type { + case TokenComment: + if !p.IncludeComments { + // Single-line comment tokens, starting with # or //, absorb + // the trailing newline that terminates them as part of their + // bytes. When we're filtering out comments, we must as a + // special case transform these to newline tokens in order + // to properly parse newline-terminated block items. + + if p.includingNewlines() { + if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' { + fakeNewline := Token{ + Type: TokenNewline, + Bytes: tok.Bytes[len(tok.Bytes)-1 : len(tok.Bytes)], + + // We use the whole token range as the newline + // range, even though that's a little... weird, + // because otherwise we'd need to go count + // characters again in order to figure out the + // column of the newline, and that complexity + // isn't justified when ranges of newlines are + // so rarely printed anyway. + Range: tok.Range, + } + return fakeNewline, i + 1 + } + } + + continue + } + case TokenNewline: + if !p.includingNewlines() { + continue + } + } + + return tok, i + 1 + } + + // if we fall out here then we'll return the EOF token, and leave + // our index pointed off the end of the array so we'll keep + // returning EOF in future too. + return p.Tokens[len(p.Tokens)-1], len(p.Tokens) +} + +func (p *peeker) includingNewlines() bool { + return p.IncludeNewlinesStack[len(p.IncludeNewlinesStack)-1] +} + +func (p *peeker) PushIncludeNewlines(include bool) { + p.IncludeNewlinesStack = append(p.IncludeNewlinesStack, include) +} + +func (p *peeker) PopIncludeNewlines() bool { + stack := p.IncludeNewlinesStack + remain, ret := stack[:len(stack)-1], stack[len(stack)-1] + p.IncludeNewlinesStack = remain + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/public.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/public.go new file mode 100644 index 00000000000..49d8ab182b7 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/public.go @@ -0,0 +1,130 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// ParseConfig parses the given buffer as a whole zcl config file, returning +// a *hcl.File representing its contents. If HasErrors called on the returned +// diagnostics returns true, the returned body is likely to be incomplete +// and should therefore be used with care. +// +// The body in the returned file has dynamic type *zclsyntax.Body, so callers +// may freely type-assert this to get access to the full zclsyntax API in +// situations where detailed access is required. However, most common use-cases +// should be served using the hcl.Body interface to ensure compatibility with +// other configurationg syntaxes, such as JSON. +func ParseConfig(src []byte, filename string, start hcl.Pos) (*hcl.File, hcl.Diagnostics) { + tokens, diags := LexConfig(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + body, parseDiags := parser.ParseBody(TokenEOF) + diags = append(diags, parseDiags...) + return &hcl.File{ + Body: body, + Bytes: src, + + Nav: navigation{ + root: body, + }, + }, diags +} + +// ParseExpression parses the given buffer as a standalone zcl expression, +// returning it as an instance of Expression. +func ParseExpression(src []byte, filename string, start hcl.Pos) (Expression, hcl.Diagnostics) { + tokens, diags := LexExpression(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + + // Bare expressions are always parsed in "ignore newlines" mode, as if + // they were wrapped in parentheses. + parser.PushIncludeNewlines(false) + + expr, parseDiags := parser.ParseExpression() + diags = append(diags, parseDiags...) + + next := parser.Peek() + if next.Type != TokenEOF && !parser.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extra characters after expression", + Detail: "An expression was successfully parsed, but extra characters were found after it.", + Subject: &next.Range, + }) + } + + return expr, diags +} + +// ParseTemplate parses the given buffer as a standalone zcl template, +// returning it as an instance of Expression. +func ParseTemplate(src []byte, filename string, start hcl.Pos) (Expression, hcl.Diagnostics) { + tokens, diags := LexTemplate(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + expr, parseDiags := parser.ParseTemplate() + diags = append(diags, parseDiags...) + return expr, diags +} + +// ParseTraversalAbs parses the given buffer as a standalone absolute traversal. +// +// Parsing as a traversal is more limited than parsing as an expession since +// it allows only attribute and indexing operations on variables. Traverals +// are useful as a syntax for referring to objects without necessarily +// evaluating them. +func ParseTraversalAbs(src []byte, filename string, start hcl.Pos) (hcl.Traversal, hcl.Diagnostics) { + tokens, diags := LexExpression(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + + // Bare traverals are always parsed in "ignore newlines" mode, as if + // they were wrapped in parentheses. + parser.PushIncludeNewlines(false) + + expr, parseDiags := parser.ParseTraversalAbs() + diags = append(diags, parseDiags...) + return expr, diags +} + +// LexConfig performs lexical analysis on the given buffer, treating it as a +// whole zcl config file, and returns the resulting tokens. +// +// Only minimal validation is done during lexical analysis, so the returned +// diagnostics may include errors about lexical issues such as bad character +// encodings or unrecognized characters, but full parsing is required to +// detect _all_ syntax errors. +func LexConfig(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) { + tokens := scanTokens(src, filename, start, scanNormal) + diags := checkInvalidTokens(tokens) + return tokens, diags +} + +// LexExpression performs lexical analysis on the given buffer, treating it as +// a standalone zcl expression, and returns the resulting tokens. +// +// Only minimal validation is done during lexical analysis, so the returned +// diagnostics may include errors about lexical issues such as bad character +// encodings or unrecognized characters, but full parsing is required to +// detect _all_ syntax errors. +func LexExpression(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) { + // This is actually just the same thing as LexConfig, since configs + // and expressions lex in the same way. + tokens := scanTokens(src, filename, start, scanNormal) + diags := checkInvalidTokens(tokens) + return tokens, diags +} + +// LexTemplate performs lexical analysis on the given buffer, treating it as a +// standalone zcl template, and returns the resulting tokens. +// +// Only minimal validation is done during lexical analysis, so the returned +// diagnostics may include errors about lexical issues such as bad character +// encodings or unrecognized characters, but full parsing is required to +// detect _all_ syntax errors. +func LexTemplate(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) { + tokens := scanTokens(src, filename, start, scanTemplate) + diags := checkInvalidTokens(tokens) + return tokens, diags +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go new file mode 100644 index 00000000000..a8ab57c3e52 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go @@ -0,0 +1,3779 @@ +// line 1 "scan_tokens.rl" +package hclsyntax + +import ( + "bytes" + + "github.com/hashicorp/hcl2/hcl" +) + +// This file is generated from scan_tokens.rl. DO NOT EDIT. + +// line 14 "scan_tokens.go" +var _zcltok_actions []byte = []byte{ + 0, 1, 0, 1, 2, 1, 3, 1, 4, + 1, 5, 1, 6, 1, 7, 1, 8, + 1, 9, 1, 10, 1, 11, 1, 12, + 1, 13, 1, 14, 1, 15, 1, 18, + 1, 19, 1, 20, 1, 21, 1, 22, + 1, 23, 1, 24, 1, 25, 1, 26, + 1, 27, 1, 30, 1, 31, 1, 32, + 1, 33, 1, 34, 1, 35, 1, 36, + 1, 37, 1, 38, 1, 39, 1, 45, + 1, 46, 1, 47, 1, 48, 1, 49, + 1, 50, 1, 51, 1, 52, 1, 53, + 1, 54, 1, 55, 1, 56, 1, 57, + 1, 58, 1, 59, 1, 60, 1, 61, + 1, 62, 1, 63, 1, 64, 1, 65, + 1, 66, 1, 67, 1, 68, 1, 69, + 1, 70, 1, 71, 1, 72, 1, 73, + 1, 74, 1, 75, 2, 0, 1, 2, + 3, 16, 2, 3, 17, 2, 3, 28, + 2, 3, 29, 2, 3, 40, 2, 3, + 41, 2, 3, 42, 2, 3, 43, 2, + 3, 44, +} + +var _zcltok_key_offsets []int16 = []int16{ + 0, 0, 1, 2, 3, 5, 10, 14, + 16, 57, 97, 143, 144, 148, 154, 154, + 156, 158, 167, 173, 180, 181, 184, 185, + 189, 194, 203, 207, 211, 219, 221, 223, + 225, 228, 260, 262, 264, 268, 272, 275, + 286, 299, 318, 331, 347, 359, 375, 390, + 411, 421, 433, 444, 458, 473, 483, 495, + 504, 516, 518, 522, 543, 552, 562, 568, + 574, 575, 624, 626, 630, 632, 638, 645, + 653, 660, 663, 669, 673, 677, 679, 683, + 687, 691, 697, 705, 713, 719, 721, 725, + 727, 733, 737, 741, 745, 749, 754, 761, + 767, 769, 771, 775, 777, 783, 787, 791, + 801, 806, 820, 835, 837, 845, 847, 852, + 866, 871, 873, 877, 878, 882, 888, 894, + 904, 914, 925, 933, 936, 939, 943, 947, + 949, 952, 952, 955, 957, 987, 989, 991, + 995, 1000, 1004, 1009, 1011, 1013, 1015, 1024, + 1028, 1032, 1038, 1040, 1048, 1056, 1068, 1071, + 1077, 1081, 1083, 1087, 1107, 1109, 1111, 1122, + 1128, 1130, 1132, 1134, 1138, 1144, 1150, 1152, + 1157, 1161, 1163, 1171, 1189, 1229, 1239, 1243, + 1245, 1247, 1248, 1252, 1256, 1260, 1264, 1268, + 1273, 1277, 1281, 1285, 1287, 1289, 1293, 1303, + 1307, 1309, 1313, 1317, 1321, 1334, 1336, 1338, + 1342, 1344, 1348, 1350, 1352, 1382, 1386, 1390, + 1394, 1397, 1404, 1409, 1420, 1424, 1440, 1454, + 1458, 1463, 1467, 1471, 1477, 1479, 1485, 1487, + 1491, 1493, 1499, 1504, 1509, 1519, 1521, 1523, + 1527, 1531, 1533, 1546, 1548, 1552, 1556, 1564, + 1566, 1570, 1572, 1573, 1576, 1581, 1583, 1585, + 1589, 1591, 1595, 1601, 1621, 1627, 1633, 1635, + 1636, 1646, 1647, 1655, 1662, 1664, 1667, 1669, + 1671, 1673, 1678, 1682, 1686, 1691, 1701, 1711, + 1715, 1719, 1733, 1759, 1769, 1771, 1773, 1776, + 1778, 1781, 1783, 1787, 1789, 1790, 1794, 1796, + 1799, 1806, 1814, 1816, 1818, 1822, 1824, 1830, + 1841, 1844, 1846, 1850, 1855, 1885, 1890, 1892, + 1895, 1900, 1914, 1921, 1935, 1940, 1953, 1957, + 1970, 1975, 1993, 1994, 2003, 2007, 2019, 2024, + 2031, 2038, 2045, 2047, 2051, 2073, 2078, 2079, + 2083, 2085, 2135, 2138, 2149, 2153, 2155, 2161, + 2167, 2169, 2174, 2176, 2180, 2182, 2183, 2185, + 2187, 2193, 2195, 2197, 2201, 2207, 2220, 2222, + 2228, 2232, 2240, 2251, 2259, 2262, 2292, 2298, + 2301, 2306, 2308, 2312, 2316, 2320, 2322, 2329, + 2331, 2340, 2347, 2355, 2357, 2377, 2389, 2393, + 2395, 2413, 2452, 2454, 2458, 2460, 2467, 2471, + 2499, 2501, 2503, 2505, 2507, 2510, 2512, 2516, + 2520, 2522, 2525, 2527, 2529, 2532, 2534, 2536, + 2537, 2539, 2541, 2545, 2549, 2552, 2565, 2567, + 2573, 2577, 2579, 2583, 2587, 2601, 2604, 2613, + 2615, 2619, 2625, 2625, 2627, 2629, 2638, 2644, + 2651, 2652, 2655, 2656, 2660, 2665, 2674, 2678, + 2682, 2690, 2692, 2694, 2696, 2699, 2731, 2733, + 2735, 2739, 2743, 2746, 2757, 2770, 2789, 2802, + 2818, 2830, 2846, 2861, 2882, 2892, 2904, 2915, + 2929, 2944, 2954, 2966, 2975, 2987, 2989, 2993, + 3014, 3023, 3033, 3039, 3045, 3046, 3095, 3097, + 3101, 3103, 3109, 3116, 3124, 3131, 3134, 3140, + 3144, 3148, 3150, 3154, 3158, 3162, 3168, 3176, + 3184, 3190, 3192, 3196, 3198, 3204, 3208, 3212, + 3216, 3220, 3225, 3232, 3238, 3240, 3242, 3246, + 3248, 3254, 3258, 3262, 3272, 3277, 3291, 3306, + 3308, 3316, 3318, 3323, 3337, 3342, 3344, 3348, + 3349, 3353, 3359, 3365, 3375, 3385, 3396, 3404, + 3407, 3410, 3414, 3418, 3420, 3423, 3423, 3426, + 3428, 3458, 3460, 3462, 3466, 3471, 3475, 3480, + 3482, 3484, 3486, 3495, 3499, 3503, 3509, 3511, + 3519, 3527, 3539, 3542, 3548, 3552, 3554, 3558, + 3578, 3580, 3582, 3593, 3599, 3601, 3603, 3605, + 3609, 3615, 3621, 3623, 3628, 3632, 3634, 3642, + 3660, 3700, 3710, 3714, 3716, 3718, 3719, 3723, + 3727, 3731, 3735, 3739, 3744, 3748, 3752, 3756, + 3758, 3760, 3764, 3774, 3778, 3780, 3784, 3788, + 3792, 3805, 3807, 3809, 3813, 3815, 3819, 3821, + 3823, 3853, 3857, 3861, 3865, 3868, 3875, 3880, + 3891, 3895, 3911, 3925, 3929, 3934, 3938, 3942, + 3948, 3950, 3956, 3958, 3962, 3964, 3970, 3975, + 3980, 3990, 3992, 3994, 3998, 4002, 4004, 4017, + 4019, 4023, 4027, 4035, 4037, 4041, 4043, 4044, + 4047, 4052, 4054, 4056, 4060, 4062, 4066, 4072, + 4092, 4098, 4104, 4106, 4107, 4117, 4118, 4126, + 4133, 4135, 4138, 4140, 4142, 4144, 4149, 4153, + 4157, 4162, 4172, 4182, 4186, 4190, 4204, 4230, + 4240, 4242, 4244, 4247, 4249, 4252, 4254, 4258, + 4260, 4261, 4265, 4267, 4269, 4276, 4280, 4287, + 4294, 4303, 4319, 4331, 4349, 4360, 4372, 4380, + 4398, 4406, 4436, 4439, 4449, 4459, 4471, 4482, + 4491, 4504, 4516, 4520, 4526, 4553, 4562, 4565, + 4570, 4576, 4581, 4602, 4606, 4612, 4612, 4619, + 4628, 4636, 4639, 4643, 4649, 4655, 4658, 4662, + 4669, 4675, 4684, 4693, 4697, 4701, 4705, 4709, + 4716, 4720, 4724, 4734, 4740, 4744, 4750, 4754, + 4757, 4763, 4769, 4781, 4785, 4789, 4799, 4803, + 4814, 4816, 4818, 4822, 4834, 4839, 4863, 4867, + 4873, 4895, 4904, 4908, 4911, 4912, 4920, 4928, + 4934, 4944, 4951, 4969, 4972, 4975, 4983, 4989, + 4993, 4997, 5001, 5007, 5015, 5020, 5026, 5030, + 5038, 5045, 5049, 5056, 5062, 5070, 5078, 5084, + 5090, 5101, 5105, 5117, 5126, 5143, 5160, 5163, + 5167, 5169, 5175, 5177, 5181, 5196, 5200, 5204, + 5208, 5212, 5216, 5218, 5224, 5229, 5233, 5239, + 5246, 5249, 5267, 5269, 5314, 5320, 5326, 5330, + 5334, 5340, 5344, 5350, 5356, 5363, 5365, 5371, + 5377, 5381, 5385, 5393, 5406, 5412, 5419, 5427, + 5433, 5442, 5448, 5452, 5457, 5461, 5469, 5473, + 5477, 5507, 5513, 5519, 5525, 5531, 5538, 5544, + 5551, 5556, 5566, 5570, 5577, 5583, 5587, 5594, + 5598, 5604, 5607, 5611, 5615, 5619, 5623, 5628, + 5633, 5637, 5648, 5652, 5656, 5662, 5670, 5674, + 5691, 5695, 5701, 5711, 5717, 5723, 5726, 5731, + 5740, 5744, 5748, 5754, 5758, 5764, 5772, 5790, + 5791, 5801, 5802, 5811, 5819, 5821, 5824, 5826, + 5828, 5830, 5835, 5848, 5852, 5867, 5896, 5907, + 5909, 5913, 5917, 5922, 5926, 5928, 5935, 5939, + 5947, 5951, 5952, 5954, 5956, 5958, 5960, 5962, + 5963, 5964, 5966, 5968, 5970, 5971, 5972, 5973, + 5974, 5976, 5978, 5980, 5981, 5982, 6057, 6058, + 6059, 6060, 6061, 6062, 6063, 6064, 6066, 6067, + 6072, 6074, 6076, 6077, 6121, 6122, 6123, 6125, + 6130, 6134, 6134, 6136, 6138, 6149, 6159, 6167, + 6168, 6170, 6171, 6175, 6179, 6189, 6193, 6200, + 6211, 6218, 6222, 6228, 6239, 6271, 6320, 6335, + 6350, 6355, 6357, 6362, 6394, 6402, 6404, 6426, + 6448, 6450, 6466, 6482, 6497, 6506, 6520, 6534, + 6550, 6551, 6552, 6553, 6554, 6556, 6558, 6560, + 6574, 6588, 6589, 6590, 6592, 6594, 6596, 6610, + 6624, 6625, 6626, 6628, 6630, +} + +var _zcltok_trans_keys []byte = []byte{ + 10, 46, 42, 42, 47, 46, 69, 101, + 48, 57, 43, 45, 48, 57, 48, 57, + 45, 194, 195, 198, 199, 203, 205, 206, + 207, 210, 212, 213, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 228, 233, 234, 237, 239, 240, 65, + 90, 97, 122, 196, 202, 208, 218, 229, + 236, 194, 195, 198, 199, 203, 205, 206, + 207, 210, 212, 213, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 228, 233, 234, 237, 239, 240, 65, + 90, 97, 122, 196, 202, 208, 218, 229, + 236, 10, 13, 45, 95, 194, 195, 198, + 199, 203, 204, 205, 206, 207, 210, 212, + 213, 214, 215, 216, 217, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 228, 233, + 234, 237, 239, 240, 243, 48, 57, 65, + 90, 97, 122, 196, 218, 229, 236, 10, + 170, 181, 183, 186, 128, 150, 152, 182, + 184, 255, 192, 255, 0, 127, 173, 130, + 133, 146, 159, 165, 171, 175, 255, 181, + 190, 184, 185, 192, 255, 140, 134, 138, + 142, 161, 163, 255, 182, 130, 136, 137, + 176, 151, 152, 154, 160, 190, 136, 144, + 192, 255, 135, 129, 130, 132, 133, 144, + 170, 176, 178, 144, 154, 160, 191, 128, + 169, 174, 255, 148, 169, 157, 158, 189, + 190, 192, 255, 144, 255, 139, 140, 178, + 255, 186, 128, 181, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 128, 173, 128, 155, + 160, 180, 182, 189, 148, 161, 163, 255, + 176, 164, 165, 132, 169, 177, 141, 142, + 145, 146, 179, 181, 186, 187, 158, 133, + 134, 137, 138, 143, 150, 152, 155, 164, + 165, 178, 255, 188, 129, 131, 133, 138, + 143, 144, 147, 168, 170, 176, 178, 179, + 181, 182, 184, 185, 190, 255, 157, 131, + 134, 137, 138, 142, 144, 146, 152, 159, + 165, 182, 255, 129, 131, 133, 141, 143, + 145, 147, 168, 170, 176, 178, 179, 181, + 185, 188, 255, 134, 138, 142, 143, 145, + 159, 164, 165, 176, 184, 186, 255, 129, + 131, 133, 140, 143, 144, 147, 168, 170, + 176, 178, 179, 181, 185, 188, 191, 177, + 128, 132, 135, 136, 139, 141, 150, 151, + 156, 157, 159, 163, 166, 175, 156, 130, + 131, 133, 138, 142, 144, 146, 149, 153, + 154, 158, 159, 163, 164, 168, 170, 174, + 185, 190, 191, 144, 151, 128, 130, 134, + 136, 138, 141, 166, 175, 128, 131, 133, + 140, 142, 144, 146, 168, 170, 185, 189, + 255, 133, 137, 151, 142, 148, 155, 159, + 164, 165, 176, 255, 128, 131, 133, 140, + 142, 144, 146, 168, 170, 179, 181, 185, + 188, 191, 158, 128, 132, 134, 136, 138, + 141, 149, 150, 160, 163, 166, 175, 177, + 178, 129, 131, 133, 140, 142, 144, 146, + 186, 189, 255, 133, 137, 143, 147, 152, + 158, 164, 165, 176, 185, 192, 255, 189, + 130, 131, 133, 150, 154, 177, 179, 187, + 138, 150, 128, 134, 143, 148, 152, 159, + 166, 175, 178, 179, 129, 186, 128, 142, + 144, 153, 132, 138, 141, 165, 167, 129, + 130, 135, 136, 148, 151, 153, 159, 161, + 163, 170, 171, 173, 185, 187, 189, 134, + 128, 132, 136, 141, 144, 153, 156, 159, + 128, 181, 183, 185, 152, 153, 160, 169, + 190, 191, 128, 135, 137, 172, 177, 191, + 128, 132, 134, 151, 153, 188, 134, 128, + 129, 130, 131, 137, 138, 139, 140, 141, + 142, 143, 144, 153, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 173, 175, 176, + 177, 178, 179, 181, 182, 183, 188, 189, + 190, 191, 132, 152, 172, 184, 185, 187, + 128, 191, 128, 137, 144, 255, 158, 159, + 134, 187, 136, 140, 142, 143, 137, 151, + 153, 142, 143, 158, 159, 137, 177, 142, + 143, 182, 183, 191, 255, 128, 130, 133, + 136, 150, 152, 255, 145, 150, 151, 155, + 156, 160, 168, 178, 255, 128, 143, 160, + 255, 182, 183, 190, 255, 129, 255, 173, + 174, 192, 255, 129, 154, 160, 255, 171, + 173, 185, 255, 128, 140, 142, 148, 160, + 180, 128, 147, 160, 172, 174, 176, 178, + 179, 148, 150, 152, 155, 158, 159, 170, + 255, 139, 141, 144, 153, 160, 255, 184, + 255, 128, 170, 176, 255, 182, 255, 128, + 158, 160, 171, 176, 187, 134, 173, 176, + 180, 128, 171, 176, 255, 138, 143, 155, + 255, 128, 155, 160, 255, 159, 189, 190, + 192, 255, 167, 128, 137, 144, 153, 176, + 189, 140, 143, 154, 170, 180, 255, 180, + 255, 128, 183, 128, 137, 141, 189, 128, + 136, 144, 146, 148, 182, 184, 185, 128, + 181, 187, 191, 150, 151, 158, 159, 152, + 154, 156, 158, 134, 135, 142, 143, 190, + 255, 190, 128, 180, 182, 188, 130, 132, + 134, 140, 144, 147, 150, 155, 160, 172, + 178, 180, 182, 188, 128, 129, 130, 131, + 132, 133, 134, 176, 177, 178, 179, 180, + 181, 182, 183, 191, 255, 129, 147, 149, + 176, 178, 190, 192, 255, 144, 156, 161, + 144, 156, 165, 176, 130, 135, 149, 164, + 166, 168, 138, 147, 152, 157, 170, 185, + 188, 191, 142, 133, 137, 160, 255, 137, + 255, 128, 174, 176, 255, 159, 165, 170, + 180, 255, 167, 173, 128, 165, 176, 255, + 168, 174, 176, 190, 192, 255, 128, 150, + 160, 166, 168, 174, 176, 182, 184, 190, + 128, 134, 136, 142, 144, 150, 152, 158, + 160, 191, 128, 129, 130, 131, 132, 133, + 134, 135, 144, 145, 255, 133, 135, 161, + 175, 177, 181, 184, 188, 160, 151, 152, + 187, 192, 255, 133, 173, 177, 255, 143, + 159, 187, 255, 176, 191, 182, 183, 184, + 191, 192, 255, 150, 255, 128, 146, 147, + 148, 152, 153, 154, 155, 156, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, + 176, 129, 255, 141, 255, 144, 189, 141, + 143, 172, 255, 191, 128, 175, 180, 189, + 151, 159, 162, 255, 175, 137, 138, 184, + 255, 183, 255, 168, 255, 128, 179, 188, + 134, 143, 154, 159, 184, 186, 190, 255, + 128, 173, 176, 255, 148, 159, 189, 255, + 129, 142, 154, 159, 191, 255, 128, 182, + 128, 141, 144, 153, 160, 182, 186, 255, + 128, 130, 155, 157, 160, 175, 178, 182, + 129, 134, 137, 142, 145, 150, 160, 166, + 168, 174, 176, 255, 155, 166, 175, 128, + 170, 172, 173, 176, 185, 158, 159, 160, + 255, 164, 175, 135, 138, 188, 255, 164, + 169, 171, 172, 173, 174, 175, 180, 181, + 182, 183, 184, 185, 187, 188, 189, 190, + 191, 165, 186, 174, 175, 154, 255, 190, + 128, 134, 147, 151, 157, 168, 170, 182, + 184, 188, 128, 129, 131, 132, 134, 255, + 147, 255, 190, 255, 144, 145, 136, 175, + 188, 255, 128, 143, 160, 175, 179, 180, + 141, 143, 176, 180, 182, 255, 189, 255, + 191, 144, 153, 161, 186, 129, 154, 166, + 255, 191, 255, 130, 135, 138, 143, 146, + 151, 154, 156, 144, 145, 146, 147, 148, + 150, 151, 152, 155, 157, 158, 160, 170, + 171, 172, 175, 161, 169, 128, 129, 130, + 131, 133, 135, 138, 139, 140, 141, 142, + 143, 144, 145, 146, 147, 148, 149, 152, + 156, 157, 160, 161, 162, 163, 164, 166, + 168, 169, 170, 171, 172, 173, 174, 176, + 177, 153, 155, 178, 179, 128, 139, 141, + 166, 168, 186, 188, 189, 191, 255, 142, + 143, 158, 255, 187, 255, 128, 180, 189, + 128, 156, 160, 255, 145, 159, 161, 255, + 128, 159, 176, 255, 139, 143, 187, 255, + 128, 157, 160, 255, 144, 132, 135, 150, + 255, 158, 159, 170, 175, 148, 151, 188, + 255, 128, 167, 176, 255, 164, 255, 183, + 255, 128, 149, 160, 167, 136, 188, 128, + 133, 138, 181, 183, 184, 191, 255, 150, + 159, 183, 255, 128, 158, 160, 178, 180, + 181, 128, 149, 160, 185, 128, 183, 190, + 191, 191, 128, 131, 133, 134, 140, 147, + 149, 151, 153, 179, 184, 186, 160, 188, + 128, 156, 128, 135, 137, 166, 128, 181, + 128, 149, 160, 178, 128, 145, 128, 178, + 129, 130, 131, 132, 133, 135, 136, 138, + 139, 140, 141, 144, 145, 146, 147, 150, + 151, 152, 153, 154, 155, 156, 162, 163, + 171, 176, 177, 178, 128, 134, 135, 165, + 176, 190, 144, 168, 176, 185, 128, 180, + 182, 191, 182, 144, 179, 155, 133, 137, + 141, 143, 157, 255, 190, 128, 145, 147, + 183, 136, 128, 134, 138, 141, 143, 157, + 159, 168, 176, 255, 171, 175, 186, 255, + 128, 131, 133, 140, 143, 144, 147, 168, + 170, 176, 178, 179, 181, 185, 188, 191, + 144, 151, 128, 132, 135, 136, 139, 141, + 157, 163, 166, 172, 176, 180, 128, 138, + 144, 153, 134, 136, 143, 154, 255, 128, + 181, 184, 255, 129, 151, 158, 255, 129, + 131, 133, 143, 154, 255, 128, 137, 128, + 153, 157, 171, 176, 185, 160, 255, 170, + 190, 192, 255, 128, 184, 128, 136, 138, + 182, 184, 191, 128, 144, 153, 178, 255, + 168, 144, 145, 183, 255, 128, 142, 145, + 149, 129, 141, 144, 146, 147, 148, 175, + 255, 132, 255, 128, 144, 129, 143, 144, + 153, 145, 152, 135, 255, 160, 168, 169, + 171, 172, 173, 174, 188, 189, 190, 191, + 161, 167, 185, 255, 128, 158, 160, 169, + 144, 173, 176, 180, 128, 131, 144, 153, + 163, 183, 189, 255, 144, 255, 133, 143, + 191, 255, 143, 159, 160, 128, 129, 255, + 159, 160, 171, 172, 255, 173, 255, 179, + 255, 128, 176, 177, 178, 128, 129, 171, + 175, 189, 255, 128, 136, 144, 153, 157, + 158, 133, 134, 137, 144, 145, 146, 147, + 148, 149, 154, 155, 156, 157, 158, 159, + 168, 169, 170, 150, 153, 165, 169, 173, + 178, 187, 255, 131, 132, 140, 169, 174, + 255, 130, 132, 149, 157, 173, 186, 188, + 160, 161, 163, 164, 167, 168, 132, 134, + 149, 157, 186, 139, 140, 191, 255, 134, + 128, 132, 138, 144, 146, 255, 166, 167, + 129, 155, 187, 149, 181, 143, 175, 137, + 169, 131, 140, 141, 192, 255, 128, 182, + 187, 255, 173, 180, 182, 255, 132, 155, + 159, 161, 175, 128, 160, 163, 164, 165, + 184, 185, 186, 161, 162, 128, 134, 136, + 152, 155, 161, 163, 164, 166, 170, 133, + 143, 151, 255, 139, 143, 154, 255, 164, + 167, 185, 187, 128, 131, 133, 159, 161, + 162, 169, 178, 180, 183, 130, 135, 137, + 139, 148, 151, 153, 155, 157, 159, 164, + 190, 141, 143, 145, 146, 161, 162, 167, + 170, 172, 178, 180, 183, 185, 188, 128, + 137, 139, 155, 161, 163, 165, 169, 171, + 187, 155, 156, 151, 255, 156, 157, 160, + 181, 255, 186, 187, 255, 162, 255, 160, + 168, 161, 167, 158, 255, 160, 132, 135, + 133, 134, 176, 255, 170, 181, 186, 191, + 176, 180, 182, 183, 186, 189, 134, 140, + 136, 138, 142, 161, 163, 255, 130, 137, + 136, 255, 144, 170, 176, 178, 160, 191, + 128, 138, 174, 175, 177, 255, 148, 150, + 164, 167, 173, 176, 185, 189, 190, 192, + 255, 144, 146, 175, 141, 255, 166, 176, + 178, 255, 186, 138, 170, 180, 181, 160, + 161, 162, 164, 165, 166, 167, 168, 169, + 170, 171, 172, 173, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 184, 186, 187, + 188, 189, 190, 183, 185, 154, 164, 168, + 128, 149, 128, 152, 189, 132, 185, 144, + 152, 161, 177, 255, 169, 177, 129, 132, + 141, 142, 145, 146, 179, 181, 186, 188, + 190, 255, 142, 156, 157, 159, 161, 176, + 177, 133, 138, 143, 144, 147, 168, 170, + 176, 178, 179, 181, 182, 184, 185, 158, + 153, 156, 178, 180, 189, 133, 141, 143, + 145, 147, 168, 170, 176, 178, 179, 181, + 185, 144, 185, 160, 161, 189, 133, 140, + 143, 144, 147, 168, 170, 176, 178, 179, + 181, 185, 177, 156, 157, 159, 161, 131, + 156, 133, 138, 142, 144, 146, 149, 153, + 154, 158, 159, 163, 164, 168, 170, 174, + 185, 144, 189, 133, 140, 142, 144, 146, + 168, 170, 185, 152, 154, 160, 161, 128, + 189, 133, 140, 142, 144, 146, 168, 170, + 179, 181, 185, 158, 160, 161, 177, 178, + 189, 133, 140, 142, 144, 146, 186, 142, + 148, 150, 159, 161, 186, 191, 189, 133, + 150, 154, 177, 179, 187, 128, 134, 129, + 176, 178, 179, 132, 138, 141, 165, 167, + 189, 129, 130, 135, 136, 148, 151, 153, + 159, 161, 163, 170, 171, 173, 176, 178, + 179, 134, 128, 132, 156, 159, 128, 128, + 135, 137, 172, 136, 140, 128, 129, 130, + 131, 137, 138, 139, 140, 141, 142, 143, + 144, 153, 154, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 184, 188, + 189, 190, 191, 132, 152, 185, 187, 191, + 128, 170, 161, 144, 149, 154, 157, 165, + 166, 174, 176, 181, 255, 130, 141, 143, + 159, 155, 255, 128, 140, 142, 145, 160, + 177, 128, 145, 160, 172, 174, 176, 151, + 156, 170, 128, 168, 176, 255, 138, 255, + 128, 150, 160, 255, 149, 255, 167, 133, + 179, 133, 139, 131, 160, 174, 175, 186, + 255, 166, 255, 128, 163, 141, 143, 154, + 189, 169, 172, 174, 177, 181, 182, 129, + 130, 132, 133, 134, 176, 177, 178, 179, + 180, 181, 182, 183, 177, 191, 165, 170, + 175, 177, 180, 255, 168, 174, 176, 255, + 128, 134, 136, 142, 144, 150, 152, 158, + 128, 129, 130, 131, 132, 133, 134, 135, + 144, 145, 255, 133, 135, 161, 169, 177, + 181, 184, 188, 160, 151, 154, 128, 146, + 147, 148, 152, 153, 154, 155, 156, 158, + 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 172, 173, 174, + 175, 176, 129, 255, 141, 143, 160, 169, + 172, 255, 191, 128, 174, 130, 134, 139, + 163, 255, 130, 179, 187, 189, 178, 183, + 138, 165, 176, 255, 135, 159, 189, 255, + 132, 178, 143, 160, 164, 166, 175, 186, + 190, 128, 168, 186, 128, 130, 132, 139, + 160, 182, 190, 255, 176, 178, 180, 183, + 184, 190, 255, 128, 130, 155, 157, 160, + 170, 178, 180, 128, 162, 164, 169, 171, + 172, 173, 174, 175, 180, 181, 182, 183, + 185, 186, 187, 188, 189, 190, 191, 165, + 179, 157, 190, 128, 134, 147, 151, 159, + 168, 170, 182, 184, 188, 176, 180, 182, + 255, 161, 186, 144, 145, 146, 147, 148, + 150, 151, 152, 155, 157, 158, 160, 170, + 171, 172, 175, 161, 169, 128, 129, 130, + 131, 133, 138, 139, 140, 141, 142, 143, + 144, 145, 146, 147, 148, 149, 152, 156, + 157, 160, 161, 162, 163, 164, 166, 168, + 169, 170, 171, 172, 173, 174, 176, 177, + 153, 155, 178, 179, 145, 255, 139, 143, + 182, 255, 158, 175, 128, 144, 147, 149, + 151, 153, 179, 128, 135, 137, 164, 128, + 130, 131, 132, 133, 134, 135, 136, 138, + 139, 140, 141, 144, 145, 146, 147, 150, + 151, 152, 153, 154, 156, 162, 163, 171, + 176, 177, 178, 131, 183, 131, 175, 144, + 168, 131, 166, 182, 144, 178, 131, 178, + 154, 156, 129, 132, 128, 145, 147, 171, + 159, 255, 144, 157, 161, 135, 138, 128, + 175, 135, 132, 133, 128, 174, 152, 155, + 132, 128, 170, 128, 153, 160, 190, 192, + 255, 128, 136, 138, 174, 128, 178, 255, + 160, 168, 169, 171, 172, 173, 174, 188, + 189, 190, 191, 161, 167, 144, 173, 128, + 131, 163, 183, 189, 255, 133, 143, 145, + 255, 147, 159, 128, 176, 177, 178, 128, + 136, 144, 153, 144, 145, 146, 147, 148, + 149, 154, 155, 156, 157, 158, 159, 150, + 153, 131, 140, 255, 160, 163, 164, 165, + 184, 185, 186, 161, 162, 133, 255, 170, + 181, 183, 186, 128, 150, 152, 182, 184, + 255, 192, 255, 128, 255, 173, 130, 133, + 146, 159, 165, 171, 175, 255, 181, 190, + 184, 185, 192, 255, 140, 134, 138, 142, + 161, 163, 255, 182, 130, 136, 137, 176, + 151, 152, 154, 160, 190, 136, 144, 192, + 255, 135, 129, 130, 132, 133, 144, 170, + 176, 178, 144, 154, 160, 191, 128, 169, + 174, 255, 148, 169, 157, 158, 189, 190, + 192, 255, 144, 255, 139, 140, 178, 255, + 186, 128, 181, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 128, 173, 128, 155, 160, + 180, 182, 189, 148, 161, 163, 255, 176, + 164, 165, 132, 169, 177, 141, 142, 145, + 146, 179, 181, 186, 187, 158, 133, 134, + 137, 138, 143, 150, 152, 155, 164, 165, + 178, 255, 188, 129, 131, 133, 138, 143, + 144, 147, 168, 170, 176, 178, 179, 181, + 182, 184, 185, 190, 255, 157, 131, 134, + 137, 138, 142, 144, 146, 152, 159, 165, + 182, 255, 129, 131, 133, 141, 143, 145, + 147, 168, 170, 176, 178, 179, 181, 185, + 188, 255, 134, 138, 142, 143, 145, 159, + 164, 165, 176, 184, 186, 255, 129, 131, + 133, 140, 143, 144, 147, 168, 170, 176, + 178, 179, 181, 185, 188, 191, 177, 128, + 132, 135, 136, 139, 141, 150, 151, 156, + 157, 159, 163, 166, 175, 156, 130, 131, + 133, 138, 142, 144, 146, 149, 153, 154, + 158, 159, 163, 164, 168, 170, 174, 185, + 190, 191, 144, 151, 128, 130, 134, 136, + 138, 141, 166, 175, 128, 131, 133, 140, + 142, 144, 146, 168, 170, 185, 189, 255, + 133, 137, 151, 142, 148, 155, 159, 164, + 165, 176, 255, 128, 131, 133, 140, 142, + 144, 146, 168, 170, 179, 181, 185, 188, + 191, 158, 128, 132, 134, 136, 138, 141, + 149, 150, 160, 163, 166, 175, 177, 178, + 129, 131, 133, 140, 142, 144, 146, 186, + 189, 255, 133, 137, 143, 147, 152, 158, + 164, 165, 176, 185, 192, 255, 189, 130, + 131, 133, 150, 154, 177, 179, 187, 138, + 150, 128, 134, 143, 148, 152, 159, 166, + 175, 178, 179, 129, 186, 128, 142, 144, + 153, 132, 138, 141, 165, 167, 129, 130, + 135, 136, 148, 151, 153, 159, 161, 163, + 170, 171, 173, 185, 187, 189, 134, 128, + 132, 136, 141, 144, 153, 156, 159, 128, + 181, 183, 185, 152, 153, 160, 169, 190, + 191, 128, 135, 137, 172, 177, 191, 128, + 132, 134, 151, 153, 188, 134, 128, 129, + 130, 131, 137, 138, 139, 140, 141, 142, + 143, 144, 153, 154, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 173, 175, 176, 177, + 178, 179, 181, 182, 183, 188, 189, 190, + 191, 132, 152, 172, 184, 185, 187, 128, + 191, 128, 137, 144, 255, 158, 159, 134, + 187, 136, 140, 142, 143, 137, 151, 153, + 142, 143, 158, 159, 137, 177, 142, 143, + 182, 183, 191, 255, 128, 130, 133, 136, + 150, 152, 255, 145, 150, 151, 155, 156, + 160, 168, 178, 255, 128, 143, 160, 255, + 182, 183, 190, 255, 129, 255, 173, 174, + 192, 255, 129, 154, 160, 255, 171, 173, + 185, 255, 128, 140, 142, 148, 160, 180, + 128, 147, 160, 172, 174, 176, 178, 179, + 148, 150, 152, 155, 158, 159, 170, 255, + 139, 141, 144, 153, 160, 255, 184, 255, + 128, 170, 176, 255, 182, 255, 128, 158, + 160, 171, 176, 187, 134, 173, 176, 180, + 128, 171, 176, 255, 138, 143, 155, 255, + 128, 155, 160, 255, 159, 189, 190, 192, + 255, 167, 128, 137, 144, 153, 176, 189, + 140, 143, 154, 170, 180, 255, 180, 255, + 128, 183, 128, 137, 141, 189, 128, 136, + 144, 146, 148, 182, 184, 185, 128, 181, + 187, 191, 150, 151, 158, 159, 152, 154, + 156, 158, 134, 135, 142, 143, 190, 255, + 190, 128, 180, 182, 188, 130, 132, 134, + 140, 144, 147, 150, 155, 160, 172, 178, + 180, 182, 188, 128, 129, 130, 131, 132, + 133, 134, 176, 177, 178, 179, 180, 181, + 182, 183, 191, 255, 129, 147, 149, 176, + 178, 190, 192, 255, 144, 156, 161, 144, + 156, 165, 176, 130, 135, 149, 164, 166, + 168, 138, 147, 152, 157, 170, 185, 188, + 191, 142, 133, 137, 160, 255, 137, 255, + 128, 174, 176, 255, 159, 165, 170, 180, + 255, 167, 173, 128, 165, 176, 255, 168, + 174, 176, 190, 192, 255, 128, 150, 160, + 166, 168, 174, 176, 182, 184, 190, 128, + 134, 136, 142, 144, 150, 152, 158, 160, + 191, 128, 129, 130, 131, 132, 133, 134, + 135, 144, 145, 255, 133, 135, 161, 175, + 177, 181, 184, 188, 160, 151, 152, 187, + 192, 255, 133, 173, 177, 255, 143, 159, + 187, 255, 176, 191, 182, 183, 184, 191, + 192, 255, 150, 255, 128, 146, 147, 148, + 152, 153, 154, 155, 156, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, + 129, 255, 141, 255, 144, 189, 141, 143, + 172, 255, 191, 128, 175, 180, 189, 151, + 159, 162, 255, 175, 137, 138, 184, 255, + 183, 255, 168, 255, 128, 179, 188, 134, + 143, 154, 159, 184, 186, 190, 255, 128, + 173, 176, 255, 148, 159, 189, 255, 129, + 142, 154, 159, 191, 255, 128, 182, 128, + 141, 144, 153, 160, 182, 186, 255, 128, + 130, 155, 157, 160, 175, 178, 182, 129, + 134, 137, 142, 145, 150, 160, 166, 168, + 174, 176, 255, 155, 166, 175, 128, 170, + 172, 173, 176, 185, 158, 159, 160, 255, + 164, 175, 135, 138, 188, 255, 164, 169, + 171, 172, 173, 174, 175, 180, 181, 182, + 183, 184, 185, 187, 188, 189, 190, 191, + 165, 186, 174, 175, 154, 255, 190, 128, + 134, 147, 151, 157, 168, 170, 182, 184, + 188, 128, 129, 131, 132, 134, 255, 147, + 255, 190, 255, 144, 145, 136, 175, 188, + 255, 128, 143, 160, 175, 179, 180, 141, + 143, 176, 180, 182, 255, 189, 255, 191, + 144, 153, 161, 186, 129, 154, 166, 255, + 191, 255, 130, 135, 138, 143, 146, 151, + 154, 156, 144, 145, 146, 147, 148, 150, + 151, 152, 155, 157, 158, 160, 170, 171, + 172, 175, 161, 169, 128, 129, 130, 131, + 133, 135, 138, 139, 140, 141, 142, 143, + 144, 145, 146, 147, 148, 149, 152, 156, + 157, 160, 161, 162, 163, 164, 166, 168, + 169, 170, 171, 172, 173, 174, 176, 177, + 153, 155, 178, 179, 128, 139, 141, 166, + 168, 186, 188, 189, 191, 255, 142, 143, + 158, 255, 187, 255, 128, 180, 189, 128, + 156, 160, 255, 145, 159, 161, 255, 128, + 159, 176, 255, 139, 143, 187, 255, 128, + 157, 160, 255, 144, 132, 135, 150, 255, + 158, 159, 170, 175, 148, 151, 188, 255, + 128, 167, 176, 255, 164, 255, 183, 255, + 128, 149, 160, 167, 136, 188, 128, 133, + 138, 181, 183, 184, 191, 255, 150, 159, + 183, 255, 128, 158, 160, 178, 180, 181, + 128, 149, 160, 185, 128, 183, 190, 191, + 191, 128, 131, 133, 134, 140, 147, 149, + 151, 153, 179, 184, 186, 160, 188, 128, + 156, 128, 135, 137, 166, 128, 181, 128, + 149, 160, 178, 128, 145, 128, 178, 129, + 130, 131, 132, 133, 135, 136, 138, 139, + 140, 141, 144, 145, 146, 147, 150, 151, + 152, 153, 154, 155, 156, 162, 163, 171, + 176, 177, 178, 128, 134, 135, 165, 176, + 190, 144, 168, 176, 185, 128, 180, 182, + 191, 182, 144, 179, 155, 133, 137, 141, + 143, 157, 255, 190, 128, 145, 147, 183, + 136, 128, 134, 138, 141, 143, 157, 159, + 168, 176, 255, 171, 175, 186, 255, 128, + 131, 133, 140, 143, 144, 147, 168, 170, + 176, 178, 179, 181, 185, 188, 191, 144, + 151, 128, 132, 135, 136, 139, 141, 157, + 163, 166, 172, 176, 180, 128, 138, 144, + 153, 134, 136, 143, 154, 255, 128, 181, + 184, 255, 129, 151, 158, 255, 129, 131, + 133, 143, 154, 255, 128, 137, 128, 153, + 157, 171, 176, 185, 160, 255, 170, 190, + 192, 255, 128, 184, 128, 136, 138, 182, + 184, 191, 128, 144, 153, 178, 255, 168, + 144, 145, 183, 255, 128, 142, 145, 149, + 129, 141, 144, 146, 147, 148, 175, 255, + 132, 255, 128, 144, 129, 143, 144, 153, + 145, 152, 135, 255, 160, 168, 169, 171, + 172, 173, 174, 188, 189, 190, 191, 161, + 167, 185, 255, 128, 158, 160, 169, 144, + 173, 176, 180, 128, 131, 144, 153, 163, + 183, 189, 255, 144, 255, 133, 143, 191, + 255, 143, 159, 160, 128, 129, 255, 159, + 160, 171, 172, 255, 173, 255, 179, 255, + 128, 176, 177, 178, 128, 129, 171, 175, + 189, 255, 128, 136, 144, 153, 157, 158, + 133, 134, 137, 144, 145, 146, 147, 148, + 149, 154, 155, 156, 157, 158, 159, 168, + 169, 170, 150, 153, 165, 169, 173, 178, + 187, 255, 131, 132, 140, 169, 174, 255, + 130, 132, 149, 157, 173, 186, 188, 160, + 161, 163, 164, 167, 168, 132, 134, 149, + 157, 186, 139, 140, 191, 255, 134, 128, + 132, 138, 144, 146, 255, 166, 167, 129, + 155, 187, 149, 181, 143, 175, 137, 169, + 131, 140, 141, 192, 255, 128, 182, 187, + 255, 173, 180, 182, 255, 132, 155, 159, + 161, 175, 128, 160, 163, 164, 165, 184, + 185, 186, 161, 162, 128, 134, 136, 152, + 155, 161, 163, 164, 166, 170, 133, 143, + 151, 255, 139, 143, 154, 255, 164, 167, + 185, 187, 128, 131, 133, 159, 161, 162, + 169, 178, 180, 183, 130, 135, 137, 139, + 148, 151, 153, 155, 157, 159, 164, 190, + 141, 143, 145, 146, 161, 162, 167, 170, + 172, 178, 180, 183, 185, 188, 128, 137, + 139, 155, 161, 163, 165, 169, 171, 187, + 155, 156, 151, 255, 156, 157, 160, 181, + 255, 186, 187, 255, 162, 255, 160, 168, + 161, 167, 158, 255, 160, 132, 135, 133, + 134, 176, 255, 128, 191, 154, 164, 168, + 128, 149, 150, 191, 128, 152, 153, 191, + 181, 128, 159, 160, 189, 190, 191, 189, + 128, 131, 132, 185, 186, 191, 144, 128, + 151, 152, 161, 162, 176, 177, 255, 169, + 177, 129, 132, 141, 142, 145, 146, 179, + 181, 186, 188, 190, 191, 192, 255, 142, + 158, 128, 155, 156, 161, 162, 175, 176, + 177, 178, 191, 169, 177, 180, 183, 128, + 132, 133, 138, 139, 142, 143, 144, 145, + 146, 147, 185, 186, 191, 157, 128, 152, + 153, 158, 159, 177, 178, 180, 181, 191, + 142, 146, 169, 177, 180, 189, 128, 132, + 133, 185, 186, 191, 144, 185, 128, 159, + 160, 161, 162, 191, 169, 177, 180, 189, + 128, 132, 133, 140, 141, 142, 143, 144, + 145, 146, 147, 185, 186, 191, 158, 177, + 128, 155, 156, 161, 162, 191, 131, 145, + 155, 157, 128, 132, 133, 138, 139, 141, + 142, 149, 150, 152, 153, 159, 160, 162, + 163, 164, 165, 167, 168, 170, 171, 173, + 174, 185, 186, 191, 144, 128, 191, 141, + 145, 169, 189, 128, 132, 133, 185, 186, + 191, 128, 151, 152, 154, 155, 159, 160, + 161, 162, 191, 128, 141, 145, 169, 180, + 189, 129, 132, 133, 185, 186, 191, 158, + 128, 159, 160, 161, 162, 176, 177, 178, + 179, 191, 141, 145, 189, 128, 132, 133, + 186, 187, 191, 142, 128, 147, 148, 150, + 151, 158, 159, 161, 162, 185, 186, 191, + 178, 188, 128, 132, 133, 150, 151, 153, + 154, 189, 190, 191, 128, 134, 135, 191, + 128, 177, 129, 179, 180, 191, 128, 131, + 137, 141, 152, 160, 164, 166, 172, 177, + 189, 129, 132, 133, 134, 135, 138, 139, + 147, 148, 167, 168, 169, 170, 179, 180, + 191, 133, 128, 134, 135, 155, 156, 159, + 160, 191, 128, 129, 191, 136, 128, 172, + 173, 191, 128, 135, 136, 140, 141, 191, + 191, 128, 170, 171, 190, 161, 128, 143, + 144, 149, 150, 153, 154, 157, 158, 164, + 165, 166, 167, 173, 174, 176, 177, 180, + 181, 255, 130, 141, 143, 159, 134, 187, + 136, 140, 142, 143, 137, 151, 153, 142, + 143, 158, 159, 137, 177, 191, 142, 143, + 182, 183, 192, 255, 129, 151, 128, 133, + 134, 135, 136, 255, 145, 150, 151, 155, + 191, 192, 255, 128, 143, 144, 159, 160, + 255, 182, 183, 190, 191, 192, 255, 128, + 129, 255, 173, 174, 192, 255, 128, 129, + 154, 155, 159, 160, 255, 171, 173, 185, + 191, 192, 255, 141, 128, 145, 146, 159, + 160, 177, 178, 191, 173, 128, 145, 146, + 159, 160, 176, 177, 191, 128, 179, 180, + 191, 151, 156, 128, 191, 128, 159, 160, + 255, 184, 191, 192, 255, 169, 128, 170, + 171, 175, 176, 255, 182, 191, 192, 255, + 128, 158, 159, 191, 128, 143, 144, 173, + 174, 175, 176, 180, 181, 191, 128, 171, + 172, 175, 176, 255, 138, 191, 192, 255, + 128, 150, 151, 159, 160, 255, 149, 191, + 192, 255, 167, 128, 191, 128, 132, 133, + 179, 180, 191, 128, 132, 133, 139, 140, + 191, 128, 130, 131, 160, 161, 173, 174, + 175, 176, 185, 186, 255, 166, 191, 192, + 255, 128, 163, 164, 191, 128, 140, 141, + 143, 144, 153, 154, 189, 190, 191, 128, + 136, 137, 191, 173, 128, 168, 169, 177, + 178, 180, 181, 182, 183, 191, 0, 127, + 192, 255, 150, 151, 158, 159, 152, 154, + 156, 158, 134, 135, 142, 143, 190, 191, + 192, 255, 181, 189, 191, 128, 190, 133, + 181, 128, 129, 130, 140, 141, 143, 144, + 147, 148, 149, 150, 155, 156, 159, 160, + 172, 173, 177, 178, 188, 189, 191, 177, + 191, 128, 190, 128, 143, 144, 156, 157, + 191, 130, 135, 148, 164, 166, 168, 128, + 137, 138, 149, 150, 151, 152, 157, 158, + 169, 170, 185, 186, 187, 188, 191, 142, + 128, 132, 133, 137, 138, 159, 160, 255, + 137, 191, 192, 255, 175, 128, 255, 159, + 165, 170, 175, 177, 180, 191, 192, 255, + 166, 173, 128, 167, 168, 175, 176, 255, + 168, 174, 176, 191, 192, 255, 167, 175, + 183, 191, 128, 150, 151, 159, 160, 190, + 135, 143, 151, 128, 158, 159, 191, 128, + 132, 133, 135, 136, 160, 161, 169, 170, + 176, 177, 181, 182, 183, 184, 188, 189, + 191, 160, 151, 154, 187, 192, 255, 128, + 132, 133, 173, 174, 176, 177, 255, 143, + 159, 187, 191, 192, 255, 128, 175, 176, + 191, 150, 191, 192, 255, 141, 191, 192, + 255, 128, 143, 144, 189, 190, 191, 141, + 143, 160, 169, 172, 191, 192, 255, 191, + 128, 174, 175, 190, 128, 157, 158, 159, + 160, 255, 176, 191, 192, 255, 128, 150, + 151, 159, 160, 161, 162, 255, 175, 137, + 138, 184, 191, 192, 255, 128, 182, 183, + 255, 130, 134, 139, 163, 191, 192, 255, + 128, 129, 130, 179, 180, 191, 187, 189, + 128, 177, 178, 183, 184, 191, 128, 137, + 138, 165, 166, 175, 176, 255, 135, 159, + 189, 191, 192, 255, 128, 131, 132, 178, + 179, 191, 143, 165, 191, 128, 159, 160, + 175, 176, 185, 186, 190, 128, 168, 169, + 191, 131, 186, 128, 139, 140, 159, 160, + 182, 183, 189, 190, 255, 176, 178, 180, + 183, 184, 190, 191, 192, 255, 129, 128, + 130, 131, 154, 155, 157, 158, 159, 160, + 170, 171, 177, 178, 180, 181, 191, 128, + 167, 175, 129, 134, 135, 136, 137, 142, + 143, 144, 145, 150, 151, 159, 160, 255, + 155, 166, 175, 128, 162, 163, 191, 164, + 175, 135, 138, 188, 191, 192, 255, 174, + 175, 154, 191, 192, 255, 157, 169, 183, + 189, 191, 128, 134, 135, 146, 147, 151, + 152, 158, 159, 190, 130, 133, 128, 255, + 178, 191, 192, 255, 128, 146, 147, 255, + 190, 191, 192, 255, 128, 143, 144, 255, + 144, 145, 136, 175, 188, 191, 192, 255, + 181, 128, 175, 176, 255, 189, 191, 192, + 255, 128, 160, 161, 186, 187, 191, 128, + 129, 154, 155, 165, 166, 255, 191, 192, + 255, 128, 129, 130, 135, 136, 137, 138, + 143, 144, 145, 146, 151, 152, 153, 154, + 156, 157, 191, 128, 191, 128, 129, 130, + 131, 133, 138, 139, 140, 141, 142, 143, + 144, 145, 146, 147, 148, 149, 152, 156, + 157, 160, 161, 162, 163, 164, 166, 168, + 169, 170, 171, 172, 173, 174, 176, 177, + 132, 151, 153, 155, 158, 175, 178, 179, + 180, 191, 140, 167, 187, 190, 128, 255, + 142, 143, 158, 191, 192, 255, 187, 191, + 192, 255, 128, 180, 181, 191, 128, 156, + 157, 159, 160, 255, 145, 191, 192, 255, + 128, 159, 160, 175, 176, 255, 139, 143, + 182, 191, 192, 255, 144, 132, 135, 150, + 191, 192, 255, 158, 175, 148, 151, 188, + 191, 192, 255, 128, 167, 168, 175, 176, + 255, 164, 191, 192, 255, 183, 191, 192, + 255, 128, 149, 150, 159, 160, 167, 168, + 191, 136, 182, 188, 128, 133, 134, 137, + 138, 184, 185, 190, 191, 255, 150, 159, + 183, 191, 192, 255, 179, 128, 159, 160, + 181, 182, 191, 128, 149, 150, 159, 160, + 185, 186, 191, 128, 183, 184, 189, 190, + 191, 128, 148, 152, 129, 143, 144, 179, + 180, 191, 128, 159, 160, 188, 189, 191, + 128, 156, 157, 191, 136, 128, 164, 165, + 191, 128, 181, 182, 191, 128, 149, 150, + 159, 160, 178, 179, 191, 128, 145, 146, + 191, 128, 178, 179, 191, 128, 130, 131, + 132, 133, 134, 135, 136, 138, 139, 140, + 141, 144, 145, 146, 147, 150, 151, 152, + 153, 154, 156, 162, 163, 171, 176, 177, + 178, 129, 191, 128, 130, 131, 183, 184, + 191, 128, 130, 131, 175, 176, 191, 128, + 143, 144, 168, 169, 191, 128, 130, 131, + 166, 167, 191, 182, 128, 143, 144, 178, + 179, 191, 128, 130, 131, 178, 179, 191, + 128, 154, 156, 129, 132, 133, 191, 146, + 128, 171, 172, 191, 135, 137, 142, 158, + 128, 168, 169, 175, 176, 255, 159, 191, + 192, 255, 144, 128, 156, 157, 161, 162, + 191, 128, 134, 135, 138, 139, 191, 128, + 175, 176, 191, 134, 128, 131, 132, 135, + 136, 191, 128, 174, 175, 191, 128, 151, + 152, 155, 156, 191, 132, 128, 191, 128, + 170, 171, 191, 128, 153, 154, 191, 160, + 190, 192, 255, 128, 184, 185, 191, 137, + 128, 174, 175, 191, 128, 129, 177, 178, + 255, 144, 191, 192, 255, 128, 142, 143, + 144, 145, 146, 149, 129, 148, 150, 191, + 175, 191, 192, 255, 132, 191, 192, 255, + 128, 144, 129, 143, 145, 191, 144, 153, + 128, 143, 145, 152, 154, 191, 135, 191, + 192, 255, 160, 168, 169, 171, 172, 173, + 174, 188, 189, 190, 191, 128, 159, 161, + 167, 170, 187, 185, 191, 192, 255, 128, + 143, 144, 173, 174, 191, 128, 131, 132, + 162, 163, 183, 184, 188, 189, 255, 133, + 143, 145, 191, 192, 255, 128, 146, 147, + 159, 160, 191, 160, 128, 191, 128, 129, + 191, 192, 255, 159, 160, 171, 128, 170, + 172, 191, 192, 255, 173, 191, 192, 255, + 179, 191, 192, 255, 128, 176, 177, 178, + 129, 191, 128, 129, 130, 191, 171, 175, + 189, 191, 192, 255, 128, 136, 137, 143, + 144, 153, 154, 191, 144, 145, 146, 147, + 148, 149, 154, 155, 156, 157, 158, 159, + 128, 143, 150, 153, 160, 191, 149, 157, + 173, 186, 188, 160, 161, 163, 164, 167, + 168, 132, 134, 149, 157, 186, 191, 139, + 140, 192, 255, 133, 145, 128, 134, 135, + 137, 138, 255, 166, 167, 129, 155, 187, + 149, 181, 143, 175, 137, 169, 131, 140, + 191, 192, 255, 160, 163, 164, 165, 184, + 185, 186, 128, 159, 161, 162, 166, 191, + 133, 191, 192, 255, 132, 160, 163, 167, + 179, 184, 186, 128, 164, 165, 168, 169, + 187, 188, 191, 130, 135, 137, 139, 144, + 147, 151, 153, 155, 157, 159, 163, 171, + 179, 184, 189, 191, 128, 140, 141, 148, + 149, 160, 161, 164, 165, 166, 167, 190, + 138, 164, 170, 128, 155, 156, 160, 161, + 187, 188, 191, 128, 191, 155, 156, 128, + 191, 151, 191, 192, 255, 156, 157, 160, + 128, 191, 181, 191, 192, 255, 158, 159, + 186, 128, 185, 187, 191, 192, 255, 162, + 191, 192, 255, 160, 168, 128, 159, 161, + 167, 169, 191, 158, 191, 192, 255, 123, + 128, 191, 128, 191, 128, 191, 128, 191, + 128, 191, 10, 123, 128, 191, 128, 191, + 128, 191, 123, 123, 10, 123, 128, 191, + 128, 191, 128, 191, 123, 123, 9, 10, + 13, 32, 33, 34, 35, 38, 46, 47, + 60, 61, 62, 64, 92, 95, 123, 124, + 125, 126, 127, 194, 195, 198, 199, 203, + 204, 205, 206, 207, 210, 212, 213, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 228, 233, 234, 237, + 238, 239, 240, 0, 39, 40, 45, 48, + 57, 58, 63, 65, 90, 91, 96, 97, + 122, 192, 193, 196, 218, 229, 236, 241, + 247, 9, 10, 32, 61, 10, 38, 46, + 42, 47, 42, 46, 69, 101, 48, 57, + 60, 61, 61, 62, 61, 45, 95, 194, + 195, 198, 199, 203, 204, 205, 206, 207, + 210, 212, 213, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 228, 233, 234, 237, 239, 240, 243, 48, + 57, 65, 90, 97, 122, 196, 218, 229, + 236, 124, 125, 128, 191, 170, 181, 186, + 128, 191, 151, 183, 128, 255, 192, 255, + 0, 127, 173, 130, 133, 146, 159, 165, + 171, 175, 191, 192, 255, 181, 190, 128, + 175, 176, 183, 184, 185, 186, 191, 134, + 139, 141, 162, 128, 135, 136, 255, 182, + 130, 137, 176, 151, 152, 154, 160, 136, + 191, 192, 255, 128, 143, 144, 170, 171, + 175, 176, 178, 179, 191, 128, 159, 160, + 191, 176, 128, 138, 139, 173, 174, 255, + 148, 150, 164, 167, 173, 176, 185, 189, + 190, 192, 255, 144, 128, 145, 146, 175, + 176, 191, 128, 140, 141, 255, 166, 176, + 178, 191, 192, 255, 186, 128, 137, 138, + 170, 171, 179, 180, 181, 182, 191, 160, + 161, 162, 164, 165, 166, 167, 168, 169, + 170, 171, 172, 173, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 183, 184, 185, + 186, 187, 188, 189, 190, 128, 191, 128, + 129, 130, 131, 137, 138, 139, 140, 141, + 142, 143, 144, 153, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 182, + 183, 184, 188, 189, 190, 191, 132, 187, + 129, 130, 132, 133, 134, 176, 177, 178, + 179, 180, 181, 182, 183, 128, 191, 128, + 129, 130, 131, 132, 133, 134, 135, 144, + 136, 143, 145, 191, 192, 255, 182, 183, + 184, 128, 191, 128, 191, 191, 128, 190, + 192, 255, 128, 146, 147, 148, 152, 153, + 154, 155, 156, 158, 159, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 129, 191, + 192, 255, 158, 159, 128, 157, 160, 191, + 192, 255, 128, 191, 164, 169, 171, 172, + 173, 174, 175, 180, 181, 182, 183, 184, + 185, 187, 188, 189, 190, 191, 128, 163, + 165, 186, 144, 145, 146, 147, 148, 150, + 151, 152, 155, 157, 158, 160, 170, 171, + 172, 175, 128, 159, 161, 169, 173, 191, + 128, 191, 10, 13, 34, 36, 37, 92, + 128, 191, 192, 223, 224, 239, 240, 247, + 248, 255, 10, 13, 34, 92, 36, 37, + 128, 191, 192, 223, 224, 239, 240, 247, + 248, 255, 10, 13, 92, 36, 37, 128, + 191, 192, 223, 224, 239, 240, 247, 248, + 255, 92, 36, 37, 192, 223, 224, 239, + 240, 247, 10, 13, 34, 92, 36, 37, + 192, 223, 224, 239, 240, 247, 248, 255, + 10, 13, 34, 92, 36, 37, 128, 223, + 224, 239, 240, 247, 248, 255, 10, 13, + 34, 92, 36, 37, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 123, 126, + 123, 126, 128, 191, 128, 191, 128, 191, + 10, 13, 36, 37, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 10, 13, + 36, 37, 128, 191, 192, 223, 224, 239, + 240, 247, 248, 255, 126, 126, 128, 191, + 128, 191, 128, 191, 10, 13, 36, 37, + 128, 191, 192, 223, 224, 239, 240, 247, + 248, 255, 10, 13, 36, 37, 128, 191, + 192, 223, 224, 239, 240, 247, 248, 255, + 126, 126, 128, 191, 128, 191, 128, 191, +} + +var _zcltok_single_lengths []byte = []byte{ + 0, 1, 1, 1, 2, 3, 2, 0, + 31, 30, 36, 1, 4, 0, 0, 0, + 0, 1, 2, 1, 1, 1, 1, 0, + 1, 1, 0, 0, 2, 0, 0, 0, + 1, 32, 0, 0, 0, 0, 1, 3, + 1, 1, 1, 0, 2, 0, 1, 1, + 2, 0, 3, 0, 1, 0, 2, 1, + 2, 0, 0, 5, 1, 4, 0, 0, + 1, 43, 0, 0, 0, 2, 3, 2, + 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 4, + 1, 0, 15, 0, 0, 0, 1, 6, + 1, 0, 0, 1, 0, 2, 0, 0, + 0, 9, 0, 1, 1, 0, 0, 0, + 3, 0, 1, 0, 28, 0, 0, 0, + 1, 0, 1, 0, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 1, 0, + 2, 0, 0, 18, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 16, 36, 0, 0, 0, + 0, 1, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 2, 0, + 0, 0, 0, 0, 1, 0, 0, 0, + 0, 0, 0, 0, 28, 0, 0, 0, + 1, 1, 1, 1, 0, 0, 2, 0, + 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 1, 4, 0, 0, 2, + 2, 0, 11, 0, 0, 0, 0, 0, + 0, 0, 1, 1, 3, 0, 0, 4, + 0, 0, 0, 18, 0, 0, 0, 1, + 4, 1, 4, 1, 0, 3, 2, 2, + 2, 1, 0, 0, 1, 8, 0, 0, + 0, 4, 12, 0, 2, 0, 3, 0, + 1, 0, 2, 0, 1, 2, 0, 3, + 1, 2, 0, 0, 0, 0, 0, 1, + 1, 0, 0, 1, 28, 3, 0, 1, + 1, 2, 1, 0, 1, 1, 2, 1, + 1, 2, 1, 1, 0, 2, 1, 1, + 1, 1, 0, 0, 6, 1, 1, 0, + 0, 46, 1, 1, 0, 0, 0, 0, + 2, 1, 0, 0, 0, 1, 0, 0, + 0, 0, 0, 0, 0, 13, 2, 0, + 0, 0, 9, 0, 1, 28, 0, 1, + 3, 0, 2, 0, 0, 0, 1, 0, + 1, 1, 2, 0, 18, 2, 0, 0, + 16, 35, 0, 0, 0, 1, 0, 28, + 0, 0, 0, 0, 1, 0, 2, 0, + 0, 1, 0, 0, 1, 0, 0, 1, + 0, 0, 0, 0, 1, 11, 0, 0, + 0, 0, 4, 0, 12, 1, 7, 0, + 4, 0, 0, 0, 0, 1, 2, 1, + 1, 1, 1, 0, 1, 1, 0, 0, + 2, 0, 0, 0, 1, 32, 0, 0, + 0, 0, 1, 3, 1, 1, 1, 0, + 2, 0, 1, 1, 2, 0, 3, 0, + 1, 0, 2, 1, 2, 0, 0, 5, + 1, 4, 0, 0, 1, 43, 0, 0, + 0, 2, 3, 2, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 4, 1, 0, 15, 0, + 0, 0, 1, 6, 1, 0, 0, 1, + 0, 2, 0, 0, 0, 9, 0, 1, + 1, 0, 0, 0, 3, 0, 1, 0, + 28, 0, 0, 0, 1, 0, 1, 0, + 0, 0, 1, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 2, 0, 0, 18, + 0, 0, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 16, + 36, 0, 0, 0, 0, 1, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, 2, 0, 0, 0, 0, 0, + 1, 0, 0, 0, 0, 0, 0, 0, + 28, 0, 0, 0, 1, 1, 1, 1, + 0, 0, 2, 0, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 1, + 4, 0, 0, 2, 2, 0, 11, 0, + 0, 0, 0, 0, 0, 0, 1, 1, + 3, 0, 0, 4, 0, 0, 0, 18, + 0, 0, 0, 1, 4, 1, 4, 1, + 0, 3, 2, 2, 2, 1, 0, 0, + 1, 8, 0, 0, 0, 4, 12, 0, + 2, 0, 3, 0, 1, 0, 2, 0, + 1, 2, 0, 0, 3, 0, 1, 1, + 1, 2, 2, 4, 1, 6, 2, 4, + 2, 4, 1, 4, 0, 6, 1, 3, + 1, 2, 0, 2, 11, 1, 1, 1, + 0, 1, 1, 0, 2, 0, 3, 3, + 2, 1, 0, 0, 0, 1, 0, 1, + 0, 1, 1, 0, 2, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 4, 3, 2, 2, 0, + 6, 1, 0, 1, 1, 0, 2, 0, + 4, 3, 0, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 1, 0, 0, 0, + 1, 0, 3, 0, 2, 0, 0, 0, + 3, 0, 2, 1, 1, 3, 1, 0, + 0, 0, 0, 0, 5, 2, 0, 0, + 0, 0, 0, 0, 1, 0, 0, 1, + 1, 0, 0, 35, 4, 0, 0, 0, + 0, 0, 0, 0, 1, 0, 0, 0, + 0, 0, 0, 3, 0, 1, 0, 0, + 3, 0, 0, 1, 0, 0, 0, 0, + 28, 0, 0, 0, 0, 1, 0, 3, + 1, 4, 0, 1, 0, 0, 1, 0, + 0, 1, 0, 0, 0, 0, 1, 1, + 0, 7, 0, 0, 2, 2, 0, 11, + 0, 0, 0, 0, 0, 1, 1, 3, + 0, 0, 4, 0, 0, 0, 12, 1, + 4, 1, 5, 2, 0, 3, 2, 2, + 2, 1, 7, 0, 7, 17, 3, 0, + 2, 0, 3, 0, 0, 1, 0, 2, + 0, 1, 0, 0, 0, 0, 0, 1, + 1, 0, 0, 0, 1, 1, 1, 1, + 0, 0, 0, 1, 1, 53, 1, 1, + 1, 1, 1, 1, 1, 2, 1, 3, + 2, 2, 1, 34, 1, 1, 0, 3, + 2, 0, 0, 0, 1, 2, 4, 1, + 0, 1, 0, 0, 0, 0, 1, 1, + 1, 0, 0, 1, 30, 47, 13, 9, + 3, 0, 1, 28, 2, 0, 18, 16, + 0, 6, 4, 3, 1, 4, 4, 4, + 1, 1, 1, 1, 0, 0, 0, 4, + 2, 1, 1, 0, 0, 0, 4, 2, + 1, 1, 0, 0, 0, +} + +var _zcltok_range_lengths []byte = []byte{ + 0, 0, 0, 0, 0, 1, 1, 1, + 5, 5, 5, 0, 0, 3, 0, 1, + 1, 4, 2, 3, 0, 1, 0, 2, + 2, 4, 2, 2, 3, 1, 1, 1, + 1, 0, 1, 1, 2, 2, 1, 4, + 6, 9, 6, 8, 5, 8, 7, 10, + 4, 6, 4, 7, 7, 5, 5, 4, + 5, 1, 2, 8, 4, 3, 3, 3, + 0, 3, 1, 2, 1, 2, 2, 3, + 3, 1, 3, 2, 2, 1, 2, 2, + 2, 3, 4, 4, 3, 1, 2, 1, + 3, 2, 2, 2, 2, 2, 3, 3, + 1, 1, 2, 1, 3, 2, 2, 3, + 2, 7, 0, 1, 4, 1, 2, 4, + 2, 1, 2, 0, 2, 2, 3, 5, + 5, 1, 4, 1, 1, 2, 2, 1, + 0, 0, 1, 1, 1, 1, 1, 2, + 2, 2, 2, 1, 1, 1, 4, 2, + 2, 3, 1, 4, 4, 6, 1, 3, + 1, 1, 2, 1, 1, 1, 5, 3, + 1, 1, 1, 2, 3, 3, 1, 2, + 2, 1, 4, 1, 2, 5, 2, 1, + 1, 0, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 1, 1, 2, 4, 2, + 1, 2, 2, 2, 6, 1, 1, 2, + 1, 2, 1, 1, 1, 2, 2, 2, + 1, 3, 2, 5, 2, 8, 6, 2, + 2, 2, 2, 3, 1, 3, 1, 2, + 1, 3, 2, 2, 3, 1, 1, 1, + 1, 1, 1, 1, 2, 2, 4, 1, + 2, 1, 0, 1, 1, 1, 1, 0, + 1, 2, 3, 1, 3, 3, 1, 0, + 3, 0, 2, 3, 1, 0, 0, 0, + 0, 2, 2, 2, 2, 1, 5, 2, + 2, 5, 7, 5, 0, 1, 0, 1, + 1, 1, 1, 1, 0, 1, 1, 0, + 3, 3, 1, 1, 2, 1, 3, 5, + 1, 1, 2, 2, 1, 1, 1, 1, + 2, 6, 3, 7, 2, 6, 1, 6, + 2, 8, 0, 4, 2, 5, 2, 3, + 3, 3, 1, 2, 8, 2, 0, 2, + 1, 2, 1, 5, 2, 1, 3, 3, + 0, 2, 1, 2, 1, 0, 1, 1, + 3, 1, 1, 2, 3, 0, 0, 3, + 2, 4, 1, 4, 1, 1, 3, 1, + 1, 1, 1, 2, 2, 1, 3, 1, + 4, 3, 3, 1, 1, 5, 2, 1, + 1, 2, 1, 2, 1, 3, 2, 0, + 1, 1, 1, 1, 1, 1, 1, 2, + 1, 1, 1, 1, 1, 1, 1, 0, + 1, 1, 2, 2, 1, 1, 1, 3, + 2, 1, 0, 2, 1, 1, 1, 1, + 0, 3, 0, 1, 1, 4, 2, 3, + 0, 1, 0, 2, 2, 4, 2, 2, + 3, 1, 1, 1, 1, 0, 1, 1, + 2, 2, 1, 4, 6, 9, 6, 8, + 5, 8, 7, 10, 4, 6, 4, 7, + 7, 5, 5, 4, 5, 1, 2, 8, + 4, 3, 3, 3, 0, 3, 1, 2, + 1, 2, 2, 3, 3, 1, 3, 2, + 2, 1, 2, 2, 2, 3, 4, 4, + 3, 1, 2, 1, 3, 2, 2, 2, + 2, 2, 3, 3, 1, 1, 2, 1, + 3, 2, 2, 3, 2, 7, 0, 1, + 4, 1, 2, 4, 2, 1, 2, 0, + 2, 2, 3, 5, 5, 1, 4, 1, + 1, 2, 2, 1, 0, 0, 1, 1, + 1, 1, 1, 2, 2, 2, 2, 1, + 1, 1, 4, 2, 2, 3, 1, 4, + 4, 6, 1, 3, 1, 1, 2, 1, + 1, 1, 5, 3, 1, 1, 1, 2, + 3, 3, 1, 2, 2, 1, 4, 1, + 2, 5, 2, 1, 1, 0, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 1, + 1, 2, 4, 2, 1, 2, 2, 2, + 6, 1, 1, 2, 1, 2, 1, 1, + 1, 2, 2, 2, 1, 3, 2, 5, + 2, 8, 6, 2, 2, 2, 2, 3, + 1, 3, 1, 2, 1, 3, 2, 2, + 3, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 4, 1, 2, 1, 0, 1, + 1, 1, 1, 0, 1, 2, 3, 1, + 3, 3, 1, 0, 3, 0, 2, 3, + 1, 0, 0, 0, 0, 2, 2, 2, + 2, 1, 5, 2, 2, 5, 7, 5, + 0, 1, 0, 1, 1, 1, 1, 1, + 0, 1, 1, 1, 2, 2, 3, 3, + 4, 7, 5, 7, 5, 3, 3, 7, + 3, 13, 1, 3, 5, 3, 5, 3, + 6, 5, 2, 2, 8, 4, 1, 2, + 3, 2, 10, 2, 2, 0, 2, 3, + 3, 1, 2, 3, 3, 1, 2, 3, + 3, 4, 4, 2, 1, 2, 2, 3, + 2, 2, 5, 3, 2, 3, 2, 1, + 3, 3, 6, 2, 2, 5, 2, 5, + 1, 1, 2, 4, 1, 11, 1, 3, + 8, 4, 2, 1, 0, 4, 3, 3, + 3, 2, 9, 1, 1, 4, 3, 2, + 2, 2, 3, 4, 2, 3, 2, 4, + 3, 2, 2, 3, 3, 4, 3, 3, + 4, 2, 5, 4, 8, 7, 1, 2, + 1, 3, 1, 2, 5, 1, 2, 2, + 2, 2, 1, 3, 2, 2, 3, 3, + 1, 9, 1, 5, 1, 3, 2, 2, + 3, 2, 3, 3, 3, 1, 3, 3, + 2, 2, 4, 5, 3, 3, 4, 3, + 3, 3, 2, 2, 2, 4, 2, 2, + 1, 3, 3, 3, 3, 3, 3, 2, + 2, 3, 2, 3, 3, 2, 3, 2, + 3, 1, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 3, 2, 3, + 2, 3, 5, 3, 3, 1, 2, 3, + 2, 2, 1, 2, 3, 4, 3, 0, + 3, 0, 2, 3, 1, 0, 0, 0, + 0, 2, 3, 2, 4, 6, 4, 1, + 1, 2, 1, 2, 1, 3, 2, 3, + 2, 0, 1, 1, 1, 1, 1, 0, + 0, 1, 1, 1, 0, 0, 0, 0, + 1, 1, 1, 0, 0, 11, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 5, 0, 0, 1, 1, + 1, 0, 1, 1, 5, 4, 2, 0, + 1, 0, 2, 2, 5, 2, 3, 5, + 3, 2, 3, 5, 1, 1, 1, 3, + 1, 1, 2, 2, 3, 1, 2, 3, + 1, 5, 6, 6, 4, 5, 5, 6, + 0, 0, 0, 0, 1, 1, 1, 5, + 6, 0, 0, 1, 1, 1, 5, 6, + 0, 0, 1, 1, 1, +} + +var _zcltok_index_offsets []int16 = []int16{ + 0, 0, 2, 4, 6, 9, 14, 18, + 20, 57, 93, 135, 137, 142, 146, 147, + 149, 151, 157, 162, 167, 169, 172, 174, + 177, 181, 187, 190, 193, 199, 201, 203, + 205, 208, 241, 243, 245, 248, 251, 254, + 262, 270, 281, 289, 298, 306, 315, 324, + 336, 343, 350, 358, 366, 375, 381, 389, + 395, 403, 405, 408, 422, 428, 436, 440, + 444, 446, 493, 495, 498, 500, 505, 511, + 517, 522, 525, 529, 532, 535, 537, 540, + 543, 546, 550, 555, 560, 564, 566, 569, + 571, 575, 578, 581, 584, 587, 591, 596, + 600, 602, 604, 607, 609, 613, 616, 619, + 627, 631, 639, 655, 657, 662, 664, 668, + 679, 683, 685, 688, 690, 693, 698, 702, + 708, 714, 725, 730, 733, 736, 739, 742, + 744, 748, 749, 752, 754, 784, 786, 788, + 791, 795, 798, 802, 804, 806, 808, 814, + 817, 820, 824, 826, 831, 836, 843, 846, + 850, 854, 856, 859, 879, 881, 883, 890, + 894, 896, 898, 900, 903, 907, 911, 913, + 917, 920, 922, 927, 945, 984, 990, 993, + 995, 997, 999, 1002, 1005, 1008, 1011, 1014, + 1018, 1021, 1024, 1027, 1029, 1031, 1034, 1041, + 1044, 1046, 1049, 1052, 1055, 1063, 1065, 1067, + 1070, 1072, 1075, 1077, 1079, 1109, 1112, 1115, + 1118, 1121, 1126, 1130, 1137, 1140, 1149, 1158, + 1161, 1165, 1168, 1171, 1175, 1177, 1181, 1183, + 1186, 1188, 1192, 1196, 1200, 1208, 1210, 1212, + 1216, 1220, 1222, 1235, 1237, 1240, 1243, 1248, + 1250, 1253, 1255, 1257, 1260, 1265, 1267, 1269, + 1274, 1276, 1279, 1283, 1303, 1307, 1311, 1313, + 1315, 1323, 1325, 1332, 1337, 1339, 1343, 1346, + 1349, 1352, 1356, 1359, 1362, 1366, 1376, 1382, + 1385, 1388, 1398, 1418, 1424, 1427, 1429, 1433, + 1435, 1438, 1440, 1444, 1446, 1448, 1452, 1454, + 1458, 1463, 1469, 1471, 1473, 1476, 1478, 1482, + 1489, 1492, 1494, 1497, 1501, 1531, 1536, 1538, + 1541, 1545, 1554, 1559, 1567, 1571, 1579, 1583, + 1591, 1595, 1606, 1608, 1614, 1617, 1625, 1629, + 1634, 1639, 1644, 1646, 1649, 1664, 1668, 1670, + 1673, 1675, 1724, 1727, 1734, 1737, 1739, 1743, + 1747, 1750, 1754, 1756, 1759, 1761, 1763, 1765, + 1767, 1771, 1773, 1775, 1778, 1782, 1796, 1799, + 1803, 1806, 1811, 1822, 1827, 1830, 1860, 1864, + 1867, 1872, 1874, 1878, 1881, 1884, 1886, 1891, + 1893, 1899, 1904, 1910, 1912, 1932, 1940, 1943, + 1945, 1963, 2001, 2003, 2006, 2008, 2013, 2016, + 2045, 2047, 2049, 2051, 2053, 2056, 2058, 2062, + 2065, 2067, 2070, 2072, 2074, 2077, 2079, 2081, + 2083, 2085, 2087, 2090, 2093, 2096, 2109, 2111, + 2115, 2118, 2120, 2125, 2128, 2142, 2145, 2154, + 2156, 2161, 2165, 2166, 2168, 2170, 2176, 2181, + 2186, 2188, 2191, 2193, 2196, 2200, 2206, 2209, + 2212, 2218, 2220, 2222, 2224, 2227, 2260, 2262, + 2264, 2267, 2270, 2273, 2281, 2289, 2300, 2308, + 2317, 2325, 2334, 2343, 2355, 2362, 2369, 2377, + 2385, 2394, 2400, 2408, 2414, 2422, 2424, 2427, + 2441, 2447, 2455, 2459, 2463, 2465, 2512, 2514, + 2517, 2519, 2524, 2530, 2536, 2541, 2544, 2548, + 2551, 2554, 2556, 2559, 2562, 2565, 2569, 2574, + 2579, 2583, 2585, 2588, 2590, 2594, 2597, 2600, + 2603, 2606, 2610, 2615, 2619, 2621, 2623, 2626, + 2628, 2632, 2635, 2638, 2646, 2650, 2658, 2674, + 2676, 2681, 2683, 2687, 2698, 2702, 2704, 2707, + 2709, 2712, 2717, 2721, 2727, 2733, 2744, 2749, + 2752, 2755, 2758, 2761, 2763, 2767, 2768, 2771, + 2773, 2803, 2805, 2807, 2810, 2814, 2817, 2821, + 2823, 2825, 2827, 2833, 2836, 2839, 2843, 2845, + 2850, 2855, 2862, 2865, 2869, 2873, 2875, 2878, + 2898, 2900, 2902, 2909, 2913, 2915, 2917, 2919, + 2922, 2926, 2930, 2932, 2936, 2939, 2941, 2946, + 2964, 3003, 3009, 3012, 3014, 3016, 3018, 3021, + 3024, 3027, 3030, 3033, 3037, 3040, 3043, 3046, + 3048, 3050, 3053, 3060, 3063, 3065, 3068, 3071, + 3074, 3082, 3084, 3086, 3089, 3091, 3094, 3096, + 3098, 3128, 3131, 3134, 3137, 3140, 3145, 3149, + 3156, 3159, 3168, 3177, 3180, 3184, 3187, 3190, + 3194, 3196, 3200, 3202, 3205, 3207, 3211, 3215, + 3219, 3227, 3229, 3231, 3235, 3239, 3241, 3254, + 3256, 3259, 3262, 3267, 3269, 3272, 3274, 3276, + 3279, 3284, 3286, 3288, 3293, 3295, 3298, 3302, + 3322, 3326, 3330, 3332, 3334, 3342, 3344, 3351, + 3356, 3358, 3362, 3365, 3368, 3371, 3375, 3378, + 3381, 3385, 3395, 3401, 3404, 3407, 3417, 3437, + 3443, 3446, 3448, 3452, 3454, 3457, 3459, 3463, + 3465, 3467, 3471, 3473, 3475, 3481, 3484, 3489, + 3494, 3500, 3510, 3518, 3530, 3537, 3547, 3553, + 3565, 3571, 3589, 3592, 3600, 3606, 3616, 3623, + 3630, 3638, 3646, 3649, 3654, 3674, 3680, 3683, + 3687, 3691, 3695, 3707, 3710, 3715, 3716, 3722, + 3729, 3735, 3738, 3741, 3745, 3749, 3752, 3755, + 3760, 3764, 3770, 3776, 3779, 3783, 3786, 3789, + 3794, 3797, 3800, 3806, 3810, 3813, 3817, 3820, + 3823, 3827, 3831, 3838, 3841, 3844, 3850, 3853, + 3860, 3862, 3864, 3867, 3876, 3881, 3895, 3899, + 3903, 3918, 3924, 3927, 3930, 3932, 3937, 3943, + 3947, 3955, 3961, 3971, 3974, 3977, 3982, 3986, + 3989, 3992, 3995, 3999, 4004, 4008, 4012, 4015, + 4020, 4025, 4028, 4034, 4038, 4044, 4049, 4053, + 4057, 4065, 4068, 4076, 4082, 4092, 4103, 4106, + 4109, 4111, 4115, 4117, 4120, 4131, 4135, 4138, + 4141, 4144, 4147, 4149, 4153, 4157, 4160, 4164, + 4169, 4172, 4182, 4184, 4225, 4231, 4235, 4238, + 4241, 4245, 4248, 4252, 4256, 4261, 4263, 4267, + 4271, 4274, 4277, 4282, 4291, 4295, 4300, 4305, + 4309, 4316, 4320, 4323, 4327, 4330, 4335, 4338, + 4341, 4371, 4375, 4379, 4383, 4387, 4392, 4396, + 4402, 4406, 4414, 4417, 4422, 4426, 4429, 4434, + 4437, 4441, 4444, 4447, 4450, 4453, 4456, 4460, + 4464, 4467, 4477, 4480, 4483, 4488, 4494, 4497, + 4512, 4515, 4519, 4525, 4529, 4533, 4536, 4540, + 4547, 4550, 4553, 4559, 4562, 4566, 4571, 4587, + 4589, 4597, 4599, 4607, 4613, 4615, 4619, 4622, + 4625, 4628, 4632, 4643, 4646, 4658, 4682, 4690, + 4692, 4696, 4699, 4704, 4707, 4709, 4714, 4717, + 4723, 4726, 4728, 4730, 4732, 4734, 4736, 4738, + 4740, 4742, 4744, 4746, 4748, 4750, 4752, 4754, + 4756, 4758, 4760, 4762, 4764, 4766, 4831, 4833, + 4835, 4837, 4839, 4841, 4843, 4845, 4848, 4850, + 4855, 4858, 4861, 4863, 4903, 4905, 4907, 4909, + 4914, 4918, 4919, 4921, 4923, 4930, 4937, 4944, + 4946, 4948, 4950, 4953, 4956, 4962, 4965, 4970, + 4977, 4982, 4985, 4989, 4996, 5028, 5077, 5092, + 5105, 5110, 5112, 5116, 5147, 5153, 5155, 5176, + 5196, 5198, 5210, 5221, 5231, 5237, 5247, 5257, + 5268, 5270, 5272, 5274, 5276, 5278, 5280, 5282, + 5292, 5301, 5303, 5305, 5307, 5309, 5311, 5321, + 5330, 5332, 5334, 5336, 5338, +} + +var _zcltok_indicies []int16 = []int16{ + 2, 1, 4, 3, 6, 5, 6, 7, + 5, 9, 11, 11, 10, 8, 12, 12, + 10, 8, 10, 8, 13, 15, 16, 18, + 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 42, 43, + 44, 45, 46, 14, 14, 17, 17, 41, + 3, 15, 16, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, + 39, 40, 42, 43, 44, 45, 46, 14, + 14, 17, 17, 41, 3, 47, 48, 14, + 14, 49, 16, 18, 19, 20, 19, 50, + 51, 23, 52, 25, 26, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 40, 42, 66, 44, 67, 68, + 69, 14, 14, 14, 17, 41, 3, 47, + 3, 14, 14, 14, 14, 3, 14, 14, + 14, 3, 14, 3, 14, 3, 14, 3, + 3, 3, 3, 3, 14, 3, 3, 3, + 3, 14, 14, 14, 14, 14, 3, 3, + 14, 3, 3, 14, 3, 14, 3, 3, + 14, 3, 3, 3, 14, 14, 14, 14, + 14, 14, 3, 14, 14, 3, 14, 14, + 3, 3, 3, 3, 3, 3, 14, 14, + 3, 3, 14, 3, 14, 14, 14, 3, + 70, 71, 72, 73, 17, 74, 75, 76, + 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, 91, 92, + 93, 94, 95, 96, 97, 98, 99, 100, + 3, 14, 3, 14, 3, 14, 14, 3, + 14, 14, 3, 3, 3, 14, 3, 3, + 3, 3, 3, 3, 3, 14, 3, 3, + 3, 3, 3, 3, 3, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, + 3, 3, 3, 3, 3, 3, 3, 3, + 14, 14, 14, 14, 14, 14, 14, 14, + 14, 3, 3, 3, 3, 3, 3, 3, + 3, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 3, 14, 14, 14, 14, 14, + 14, 14, 14, 3, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 3, + 14, 14, 14, 14, 14, 14, 3, 14, + 14, 14, 14, 14, 14, 3, 3, 3, + 3, 3, 3, 3, 3, 14, 14, 14, + 14, 14, 14, 14, 14, 3, 14, 14, + 14, 14, 14, 14, 14, 14, 3, 14, + 14, 14, 14, 14, 3, 3, 3, 3, + 3, 3, 3, 3, 14, 14, 14, 14, + 14, 14, 3, 14, 14, 14, 14, 14, + 14, 14, 3, 14, 3, 14, 14, 3, + 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 3, 14, 14, + 14, 14, 14, 3, 14, 14, 14, 14, + 14, 14, 14, 3, 14, 14, 14, 3, + 14, 14, 14, 3, 14, 3, 101, 102, + 103, 104, 105, 106, 107, 108, 109, 110, + 111, 112, 113, 114, 115, 116, 117, 19, + 118, 119, 120, 121, 122, 123, 124, 125, + 126, 127, 128, 129, 130, 131, 132, 133, + 134, 135, 17, 18, 136, 137, 138, 139, + 140, 17, 19, 17, 3, 14, 3, 14, + 14, 3, 3, 14, 3, 3, 3, 3, + 14, 3, 3, 3, 3, 3, 14, 3, + 3, 3, 3, 3, 14, 14, 14, 14, + 14, 3, 3, 3, 14, 3, 3, 3, + 14, 14, 14, 3, 3, 3, 14, 14, + 3, 3, 3, 14, 14, 14, 3, 3, + 3, 14, 14, 14, 14, 3, 14, 14, + 14, 14, 3, 3, 3, 3, 3, 14, + 14, 14, 14, 3, 3, 14, 14, 14, + 3, 3, 14, 14, 14, 14, 3, 14, + 14, 3, 14, 14, 3, 3, 3, 14, + 14, 14, 3, 3, 3, 3, 14, 14, + 14, 14, 14, 3, 3, 3, 3, 14, + 3, 14, 14, 3, 14, 14, 3, 14, + 3, 14, 14, 14, 3, 14, 14, 3, + 3, 3, 14, 3, 3, 3, 3, 3, + 3, 3, 14, 14, 14, 14, 3, 14, + 14, 14, 14, 14, 14, 14, 3, 141, + 142, 143, 144, 145, 146, 147, 148, 149, + 17, 150, 151, 152, 153, 154, 3, 14, + 3, 3, 3, 3, 3, 14, 14, 3, + 14, 14, 14, 3, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 3, 14, + 14, 14, 3, 3, 14, 14, 14, 3, + 3, 14, 3, 3, 14, 14, 14, 14, + 14, 3, 3, 3, 3, 14, 14, 14, + 14, 14, 14, 3, 14, 14, 14, 14, + 14, 3, 155, 112, 156, 157, 158, 17, + 159, 160, 19, 17, 3, 14, 14, 14, + 14, 3, 3, 3, 14, 3, 3, 14, + 14, 14, 3, 3, 3, 14, 14, 3, + 122, 3, 19, 17, 17, 161, 3, 17, + 3, 14, 19, 162, 163, 19, 164, 165, + 19, 60, 166, 167, 168, 169, 170, 19, + 171, 172, 173, 19, 174, 175, 176, 18, + 177, 178, 179, 18, 180, 19, 17, 3, + 3, 14, 14, 3, 3, 3, 14, 14, + 14, 14, 3, 14, 14, 3, 3, 3, + 3, 14, 14, 3, 3, 14, 14, 3, + 3, 3, 3, 3, 3, 14, 14, 14, + 3, 3, 3, 14, 3, 3, 3, 14, + 14, 3, 14, 14, 14, 14, 3, 14, + 14, 14, 14, 3, 14, 14, 14, 14, + 14, 14, 3, 3, 3, 14, 14, 14, + 14, 3, 181, 182, 3, 17, 3, 14, + 3, 3, 14, 19, 183, 184, 185, 186, + 60, 187, 188, 58, 189, 190, 191, 192, + 193, 194, 195, 196, 197, 17, 3, 3, + 14, 3, 14, 14, 14, 14, 14, 14, + 14, 3, 14, 14, 14, 3, 14, 3, + 3, 14, 3, 14, 3, 3, 14, 14, + 14, 14, 3, 14, 14, 14, 3, 3, + 14, 14, 14, 14, 3, 14, 14, 3, + 3, 14, 14, 14, 14, 14, 3, 198, + 199, 200, 201, 202, 203, 204, 205, 206, + 207, 208, 204, 209, 210, 211, 212, 41, + 3, 213, 214, 19, 215, 216, 217, 218, + 219, 220, 221, 222, 223, 19, 17, 224, + 225, 226, 227, 19, 228, 229, 230, 231, + 232, 233, 234, 235, 236, 237, 238, 239, + 240, 241, 242, 19, 147, 17, 243, 3, + 14, 14, 14, 14, 14, 3, 3, 3, + 14, 3, 14, 14, 3, 14, 3, 14, + 14, 3, 3, 3, 14, 14, 14, 3, + 3, 3, 14, 14, 14, 3, 3, 3, + 3, 14, 3, 3, 14, 3, 3, 14, + 14, 14, 3, 3, 14, 3, 14, 14, + 14, 3, 14, 14, 14, 14, 14, 14, + 3, 3, 3, 14, 14, 3, 14, 14, + 3, 14, 14, 3, 14, 14, 3, 14, + 14, 14, 14, 14, 14, 14, 3, 14, + 3, 14, 3, 14, 14, 3, 14, 3, + 14, 14, 3, 14, 3, 14, 3, 244, + 215, 245, 246, 247, 248, 249, 250, 251, + 252, 253, 101, 254, 19, 255, 256, 257, + 19, 258, 132, 259, 260, 261, 262, 263, + 264, 265, 266, 19, 3, 3, 3, 14, + 14, 14, 3, 14, 14, 3, 14, 14, + 3, 3, 3, 3, 3, 14, 14, 14, + 14, 3, 14, 14, 14, 14, 14, 14, + 3, 3, 3, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 3, 14, 14, 14, + 14, 14, 14, 14, 14, 3, 14, 14, + 3, 3, 3, 3, 14, 14, 14, 3, + 3, 3, 14, 3, 3, 3, 14, 14, + 3, 14, 14, 14, 3, 14, 3, 3, + 3, 14, 14, 3, 14, 14, 14, 3, + 14, 14, 14, 3, 3, 3, 3, 14, + 19, 184, 267, 268, 17, 19, 17, 3, + 3, 14, 3, 14, 19, 267, 17, 3, + 19, 269, 17, 3, 3, 14, 19, 270, + 271, 272, 175, 273, 274, 19, 275, 276, + 277, 17, 3, 3, 14, 14, 14, 3, + 14, 14, 3, 14, 14, 14, 14, 3, + 3, 14, 3, 3, 14, 14, 3, 14, + 3, 19, 17, 3, 278, 19, 279, 3, + 17, 3, 14, 3, 14, 280, 19, 281, + 282, 3, 14, 3, 3, 3, 14, 14, + 14, 14, 3, 283, 284, 285, 19, 286, + 287, 288, 289, 290, 291, 292, 293, 294, + 295, 296, 297, 298, 299, 17, 3, 14, + 14, 14, 3, 3, 3, 3, 14, 14, + 3, 3, 14, 3, 3, 3, 3, 3, + 3, 3, 14, 3, 14, 3, 3, 3, + 3, 3, 3, 14, 14, 14, 14, 14, + 3, 3, 14, 3, 3, 3, 14, 3, + 3, 14, 3, 3, 14, 3, 3, 14, + 3, 3, 3, 14, 14, 14, 3, 3, + 3, 14, 14, 14, 14, 3, 300, 19, + 301, 19, 302, 303, 304, 305, 17, 3, + 14, 14, 14, 14, 14, 3, 3, 3, + 14, 3, 3, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 3, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, + 14, 3, 14, 14, 14, 14, 14, 3, + 306, 19, 17, 3, 14, 307, 19, 103, + 17, 3, 14, 308, 3, 17, 3, 14, + 19, 309, 17, 3, 3, 14, 310, 3, + 19, 311, 17, 3, 3, 14, 14, 14, + 14, 3, 14, 14, 14, 14, 3, 14, + 14, 14, 14, 14, 3, 3, 14, 3, + 14, 14, 14, 3, 14, 3, 14, 14, + 14, 3, 3, 3, 3, 3, 3, 3, + 14, 14, 14, 3, 14, 3, 3, 3, + 14, 14, 14, 14, 3, 312, 313, 72, + 314, 315, 316, 317, 318, 319, 320, 321, + 322, 323, 324, 325, 326, 327, 328, 329, + 330, 331, 332, 334, 335, 336, 337, 338, + 339, 333, 3, 14, 14, 14, 14, 3, + 14, 3, 14, 14, 3, 14, 14, 14, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 14, 14, 14, 14, 14, 3, 14, + 14, 14, 14, 14, 14, 14, 3, 14, + 14, 14, 3, 14, 14, 14, 14, 14, + 14, 14, 3, 14, 14, 14, 3, 14, + 14, 14, 14, 14, 14, 14, 3, 14, + 14, 14, 3, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 3, 14, 3, + 14, 14, 14, 14, 14, 3, 14, 14, + 3, 14, 14, 14, 14, 14, 14, 14, + 3, 14, 14, 14, 3, 14, 14, 14, + 14, 3, 14, 14, 14, 14, 3, 14, + 14, 14, 14, 3, 14, 3, 14, 14, + 3, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 3, + 14, 14, 14, 3, 14, 3, 14, 14, + 3, 14, 3, 340, 341, 342, 104, 105, + 106, 107, 108, 343, 110, 111, 112, 113, + 114, 115, 344, 345, 170, 346, 261, 120, + 347, 122, 232, 272, 125, 348, 349, 350, + 351, 352, 353, 354, 355, 356, 357, 134, + 358, 19, 17, 18, 19, 137, 138, 139, + 140, 17, 17, 3, 14, 14, 3, 14, + 14, 14, 14, 14, 14, 3, 3, 3, + 14, 3, 14, 14, 14, 14, 3, 14, + 14, 14, 3, 14, 14, 3, 14, 14, + 14, 3, 3, 14, 14, 14, 3, 3, + 14, 14, 3, 14, 3, 14, 3, 14, + 14, 14, 3, 3, 14, 14, 3, 14, + 14, 3, 14, 14, 14, 3, 359, 143, + 145, 146, 147, 148, 149, 17, 360, 151, + 361, 153, 362, 3, 14, 14, 3, 3, + 3, 3, 14, 3, 3, 14, 14, 14, + 14, 14, 3, 363, 112, 364, 157, 158, + 17, 159, 160, 19, 17, 3, 14, 14, + 14, 14, 3, 3, 3, 14, 19, 162, + 163, 19, 365, 366, 222, 311, 166, 167, + 168, 367, 170, 368, 369, 370, 371, 372, + 373, 374, 375, 376, 377, 178, 179, 18, + 378, 19, 17, 3, 3, 3, 3, 14, + 14, 14, 3, 3, 3, 3, 3, 14, + 14, 3, 14, 14, 14, 3, 14, 14, + 3, 3, 3, 14, 14, 3, 14, 14, + 14, 14, 3, 14, 3, 14, 14, 14, + 14, 14, 3, 3, 3, 3, 3, 14, + 14, 14, 14, 14, 14, 3, 14, 3, + 19, 183, 184, 379, 186, 60, 187, 188, + 58, 189, 190, 380, 17, 193, 381, 195, + 196, 197, 17, 3, 14, 14, 14, 14, + 14, 14, 14, 3, 14, 14, 3, 14, + 3, 382, 383, 200, 201, 202, 384, 204, + 205, 385, 386, 387, 204, 209, 210, 211, + 212, 41, 3, 213, 214, 19, 215, 216, + 218, 388, 220, 389, 222, 223, 19, 17, + 390, 225, 226, 227, 19, 228, 229, 230, + 231, 232, 233, 234, 235, 391, 237, 238, + 392, 240, 241, 242, 19, 147, 17, 243, + 3, 3, 14, 3, 3, 14, 3, 14, + 14, 14, 14, 14, 3, 14, 14, 3, + 393, 394, 395, 396, 397, 398, 399, 400, + 250, 401, 322, 402, 216, 403, 404, 405, + 406, 407, 404, 408, 409, 410, 261, 411, + 263, 412, 413, 274, 3, 14, 3, 14, + 3, 14, 3, 14, 3, 14, 14, 3, + 14, 3, 14, 14, 14, 3, 14, 14, + 3, 3, 14, 14, 14, 3, 14, 3, + 14, 3, 14, 14, 3, 14, 3, 14, + 3, 14, 3, 14, 3, 14, 3, 3, + 3, 14, 14, 14, 3, 14, 14, 3, + 19, 270, 232, 414, 404, 415, 274, 19, + 416, 417, 277, 17, 3, 14, 3, 14, + 14, 14, 3, 3, 3, 14, 14, 3, + 280, 19, 281, 418, 3, 14, 14, 3, + 19, 286, 287, 288, 289, 290, 291, 292, + 293, 294, 295, 419, 17, 3, 3, 3, + 14, 19, 420, 19, 268, 303, 304, 305, + 17, 3, 3, 14, 422, 422, 422, 422, + 421, 422, 422, 422, 421, 422, 421, 422, + 422, 421, 421, 421, 421, 421, 421, 422, + 421, 421, 421, 421, 422, 422, 422, 422, + 422, 421, 421, 422, 421, 421, 422, 421, + 422, 421, 421, 422, 421, 421, 421, 422, + 422, 422, 422, 422, 422, 421, 422, 422, + 421, 422, 422, 421, 421, 421, 421, 421, + 421, 422, 422, 421, 421, 422, 421, 422, + 422, 422, 421, 423, 424, 425, 426, 427, + 428, 429, 430, 431, 432, 433, 434, 435, + 436, 437, 438, 439, 440, 441, 442, 443, + 444, 445, 446, 447, 448, 449, 450, 451, + 452, 453, 454, 421, 422, 421, 422, 421, + 422, 422, 421, 422, 422, 421, 421, 421, + 422, 421, 421, 421, 421, 421, 421, 421, + 422, 421, 421, 421, 421, 421, 421, 421, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 421, 421, 421, 421, 421, + 421, 421, 421, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 421, 421, 421, 421, + 421, 421, 421, 421, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 421, 422, 422, + 422, 422, 422, 422, 422, 422, 421, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 421, 422, 422, 422, 422, 422, + 422, 421, 422, 422, 422, 422, 422, 422, + 421, 421, 421, 421, 421, 421, 421, 421, + 422, 422, 422, 422, 422, 422, 422, 422, + 421, 422, 422, 422, 422, 422, 422, 422, + 422, 421, 422, 422, 422, 422, 422, 421, + 421, 421, 421, 421, 421, 421, 421, 422, + 422, 422, 422, 422, 422, 421, 422, 422, + 422, 422, 422, 422, 422, 421, 422, 421, + 422, 422, 421, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 421, 422, 422, 422, 422, 422, 421, 422, + 422, 422, 422, 422, 422, 422, 421, 422, + 422, 422, 421, 422, 422, 422, 421, 422, + 421, 455, 456, 457, 458, 459, 460, 461, + 462, 463, 464, 465, 466, 467, 468, 469, + 470, 471, 472, 473, 474, 475, 476, 477, + 478, 479, 480, 481, 482, 483, 484, 485, + 486, 487, 488, 489, 490, 427, 491, 492, + 493, 494, 495, 496, 427, 472, 427, 421, + 422, 421, 422, 422, 421, 421, 422, 421, + 421, 421, 421, 422, 421, 421, 421, 421, + 421, 422, 421, 421, 421, 421, 421, 422, + 422, 422, 422, 422, 421, 421, 421, 422, + 421, 421, 421, 422, 422, 422, 421, 421, + 421, 422, 422, 421, 421, 421, 422, 422, + 422, 421, 421, 421, 422, 422, 422, 422, + 421, 422, 422, 422, 422, 421, 421, 421, + 421, 421, 422, 422, 422, 422, 421, 421, + 422, 422, 422, 421, 421, 422, 422, 422, + 422, 421, 422, 422, 421, 422, 422, 421, + 421, 421, 422, 422, 422, 421, 421, 421, + 421, 422, 422, 422, 422, 422, 421, 421, + 421, 421, 422, 421, 422, 422, 421, 422, + 422, 421, 422, 421, 422, 422, 422, 421, + 422, 422, 421, 421, 421, 422, 421, 421, + 421, 421, 421, 421, 421, 422, 422, 422, + 422, 421, 422, 422, 422, 422, 422, 422, + 422, 421, 497, 498, 499, 500, 501, 502, + 503, 504, 505, 427, 506, 507, 508, 509, + 510, 421, 422, 421, 421, 421, 421, 421, + 422, 422, 421, 422, 422, 422, 421, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 421, 422, 422, 422, 421, 421, 422, + 422, 422, 421, 421, 422, 421, 421, 422, + 422, 422, 422, 422, 421, 421, 421, 421, + 422, 422, 422, 422, 422, 422, 421, 422, + 422, 422, 422, 422, 421, 511, 466, 512, + 513, 514, 427, 515, 516, 472, 427, 421, + 422, 422, 422, 422, 421, 421, 421, 422, + 421, 421, 422, 422, 422, 421, 421, 421, + 422, 422, 421, 477, 421, 472, 427, 427, + 517, 421, 427, 421, 422, 472, 518, 519, + 472, 520, 521, 472, 522, 523, 524, 525, + 526, 527, 472, 528, 529, 530, 472, 531, + 532, 533, 491, 534, 535, 536, 491, 537, + 472, 427, 421, 421, 422, 422, 421, 421, + 421, 422, 422, 422, 422, 421, 422, 422, + 421, 421, 421, 421, 422, 422, 421, 421, + 422, 422, 421, 421, 421, 421, 421, 421, + 422, 422, 422, 421, 421, 421, 422, 421, + 421, 421, 422, 422, 421, 422, 422, 422, + 422, 421, 422, 422, 422, 422, 421, 422, + 422, 422, 422, 422, 422, 421, 421, 421, + 422, 422, 422, 422, 421, 538, 539, 421, + 427, 421, 422, 421, 421, 422, 472, 540, + 541, 542, 543, 522, 544, 545, 546, 547, + 548, 549, 550, 551, 552, 553, 554, 555, + 427, 421, 421, 422, 421, 422, 422, 422, + 422, 422, 422, 422, 421, 422, 422, 422, + 421, 422, 421, 421, 422, 421, 422, 421, + 421, 422, 422, 422, 422, 421, 422, 422, + 422, 421, 421, 422, 422, 422, 422, 421, + 422, 422, 421, 421, 422, 422, 422, 422, + 422, 421, 556, 557, 558, 559, 560, 561, + 562, 563, 564, 565, 566, 562, 568, 569, + 570, 571, 567, 421, 572, 573, 472, 574, + 575, 576, 577, 578, 579, 580, 581, 582, + 472, 427, 583, 584, 585, 586, 472, 587, + 588, 589, 590, 591, 592, 593, 594, 595, + 596, 597, 598, 599, 600, 601, 472, 503, + 427, 602, 421, 422, 422, 422, 422, 422, + 421, 421, 421, 422, 421, 422, 422, 421, + 422, 421, 422, 422, 421, 421, 421, 422, + 422, 422, 421, 421, 421, 422, 422, 422, + 421, 421, 421, 421, 422, 421, 421, 422, + 421, 421, 422, 422, 422, 421, 421, 422, + 421, 422, 422, 422, 421, 422, 422, 422, + 422, 422, 422, 421, 421, 421, 422, 422, + 421, 422, 422, 421, 422, 422, 421, 422, + 422, 421, 422, 422, 422, 422, 422, 422, + 422, 421, 422, 421, 422, 421, 422, 422, + 421, 422, 421, 422, 422, 421, 422, 421, + 422, 421, 603, 574, 604, 605, 606, 607, + 608, 609, 610, 611, 612, 455, 613, 472, + 614, 615, 616, 472, 617, 487, 618, 619, + 620, 621, 622, 623, 624, 625, 472, 421, + 421, 421, 422, 422, 422, 421, 422, 422, + 421, 422, 422, 421, 421, 421, 421, 421, + 422, 422, 422, 422, 421, 422, 422, 422, + 422, 422, 422, 421, 421, 421, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 421, + 422, 422, 422, 422, 422, 422, 422, 422, + 421, 422, 422, 421, 421, 421, 421, 422, + 422, 422, 421, 421, 421, 422, 421, 421, + 421, 422, 422, 421, 422, 422, 422, 421, + 422, 421, 421, 421, 422, 422, 421, 422, + 422, 422, 421, 422, 422, 422, 421, 421, + 421, 421, 422, 472, 541, 626, 627, 427, + 472, 427, 421, 421, 422, 421, 422, 472, + 626, 427, 421, 472, 628, 427, 421, 421, + 422, 472, 629, 630, 631, 532, 632, 633, + 472, 634, 635, 636, 427, 421, 421, 422, + 422, 422, 421, 422, 422, 421, 422, 422, + 422, 422, 421, 421, 422, 421, 421, 422, + 422, 421, 422, 421, 472, 427, 421, 637, + 472, 638, 421, 427, 421, 422, 421, 422, + 639, 472, 640, 641, 421, 422, 421, 421, + 421, 422, 422, 422, 422, 421, 642, 643, + 644, 472, 645, 646, 647, 648, 649, 650, + 651, 652, 653, 654, 655, 656, 657, 658, + 427, 421, 422, 422, 422, 421, 421, 421, + 421, 422, 422, 421, 421, 422, 421, 421, + 421, 421, 421, 421, 421, 422, 421, 422, + 421, 421, 421, 421, 421, 421, 422, 422, + 422, 422, 422, 421, 421, 422, 421, 421, + 421, 422, 421, 421, 422, 421, 421, 422, + 421, 421, 422, 421, 421, 421, 422, 422, + 422, 421, 421, 421, 422, 422, 422, 422, + 421, 659, 472, 660, 472, 661, 662, 663, + 664, 427, 421, 422, 422, 422, 422, 422, + 421, 421, 421, 422, 421, 421, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 421, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 421, 422, 422, 422, + 422, 422, 421, 665, 472, 427, 421, 422, + 666, 472, 457, 427, 421, 422, 667, 421, + 427, 421, 422, 472, 668, 427, 421, 421, + 422, 669, 421, 472, 670, 427, 421, 421, + 422, 672, 671, 422, 422, 422, 422, 672, + 671, 422, 672, 671, 672, 672, 422, 672, + 671, 422, 672, 422, 672, 671, 422, 672, + 422, 672, 422, 671, 672, 672, 672, 672, + 672, 672, 672, 672, 671, 422, 422, 672, + 672, 422, 672, 422, 672, 671, 672, 672, + 672, 672, 672, 422, 672, 422, 672, 422, + 672, 671, 672, 672, 422, 672, 422, 672, + 671, 672, 672, 672, 672, 672, 422, 672, + 422, 672, 671, 422, 422, 672, 422, 672, + 671, 672, 672, 672, 422, 672, 422, 672, + 422, 672, 422, 672, 671, 672, 422, 672, + 422, 672, 671, 422, 672, 672, 672, 672, + 422, 672, 422, 672, 422, 672, 422, 672, + 422, 672, 422, 672, 671, 422, 672, 671, + 672, 672, 672, 422, 672, 422, 672, 671, + 672, 422, 672, 422, 672, 671, 422, 672, + 672, 672, 672, 422, 672, 422, 672, 671, + 422, 672, 422, 672, 422, 672, 671, 672, + 672, 422, 672, 422, 672, 671, 422, 672, + 422, 672, 422, 672, 422, 671, 672, 672, + 672, 422, 672, 422, 672, 671, 422, 672, + 671, 672, 672, 422, 672, 671, 672, 672, + 672, 422, 672, 672, 672, 672, 672, 672, + 422, 422, 672, 422, 672, 422, 672, 422, + 672, 671, 672, 422, 672, 422, 672, 671, + 422, 672, 671, 672, 422, 672, 671, 672, + 422, 672, 671, 422, 422, 672, 671, 422, + 672, 422, 672, 422, 672, 422, 672, 422, + 672, 422, 671, 672, 672, 422, 672, 672, + 672, 672, 422, 422, 672, 672, 672, 672, + 672, 422, 672, 672, 672, 672, 672, 671, + 422, 672, 672, 422, 672, 422, 671, 672, + 672, 422, 672, 671, 422, 422, 672, 422, + 671, 672, 672, 671, 422, 672, 422, 671, + 672, 671, 422, 672, 422, 672, 422, 671, + 672, 672, 671, 422, 672, 422, 672, 422, + 672, 671, 672, 422, 672, 422, 672, 671, + 422, 672, 671, 422, 422, 672, 671, 672, + 422, 671, 672, 671, 422, 672, 422, 672, + 422, 671, 672, 671, 422, 422, 672, 671, + 672, 422, 672, 422, 672, 671, 422, 672, + 422, 671, 672, 671, 422, 422, 672, 422, + 671, 672, 671, 422, 422, 672, 671, 672, + 422, 672, 671, 672, 422, 672, 671, 672, + 422, 672, 422, 672, 422, 671, 672, 671, + 422, 422, 672, 671, 672, 422, 672, 422, + 672, 671, 422, 672, 671, 672, 672, 422, + 672, 422, 672, 671, 671, 422, 671, 422, + 672, 672, 422, 672, 672, 672, 672, 672, + 672, 672, 671, 422, 672, 672, 672, 422, + 671, 672, 672, 672, 422, 672, 422, 672, + 422, 672, 422, 672, 422, 672, 671, 422, + 422, 672, 671, 672, 422, 672, 671, 422, + 422, 672, 422, 422, 422, 672, 422, 672, + 422, 672, 422, 672, 422, 671, 422, 672, + 422, 672, 422, 671, 672, 671, 422, 672, + 422, 671, 672, 422, 672, 672, 672, 671, + 422, 672, 422, 422, 672, 422, 671, 672, + 672, 671, 422, 672, 672, 672, 672, 422, + 672, 422, 671, 672, 672, 672, 422, 672, + 671, 672, 422, 672, 422, 672, 422, 672, + 422, 672, 671, 672, 672, 422, 672, 671, + 422, 672, 422, 672, 422, 671, 672, 672, + 671, 422, 672, 422, 671, 672, 671, 422, + 672, 671, 422, 672, 422, 672, 671, 672, + 672, 672, 671, 422, 422, 422, 672, 671, + 422, 672, 422, 671, 672, 671, 422, 672, + 422, 672, 422, 671, 672, 672, 672, 671, + 422, 672, 422, 671, 672, 672, 672, 672, + 671, 422, 672, 422, 672, 671, 422, 422, + 672, 422, 672, 671, 672, 422, 672, 422, + 671, 672, 672, 671, 422, 672, 422, 672, + 671, 422, 672, 672, 672, 422, 672, 422, + 671, 422, 672, 671, 672, 422, 422, 672, + 422, 672, 422, 671, 672, 672, 672, 672, + 671, 422, 672, 422, 672, 422, 672, 422, + 672, 422, 672, 671, 672, 672, 672, 422, + 672, 422, 672, 422, 672, 422, 671, 672, + 672, 422, 422, 672, 671, 672, 422, 672, + 672, 671, 422, 672, 422, 672, 671, 422, + 422, 672, 672, 672, 672, 422, 672, 422, + 672, 422, 671, 672, 672, 422, 671, 672, + 671, 422, 672, 422, 671, 672, 671, 422, + 672, 422, 671, 672, 422, 672, 672, 671, + 422, 672, 672, 422, 671, 672, 671, 422, + 672, 422, 672, 671, 672, 422, 672, 422, + 671, 672, 671, 422, 672, 422, 672, 422, + 672, 422, 672, 422, 672, 671, 673, 671, + 674, 675, 676, 677, 678, 679, 680, 681, + 682, 683, 684, 676, 685, 686, 687, 688, + 689, 676, 690, 691, 692, 693, 694, 695, + 696, 697, 698, 699, 700, 701, 702, 703, + 704, 676, 705, 673, 685, 673, 706, 673, + 671, 672, 672, 672, 672, 422, 671, 672, + 672, 671, 422, 672, 671, 422, 422, 672, + 671, 422, 672, 422, 671, 672, 671, 422, + 422, 672, 422, 671, 672, 672, 671, 422, + 672, 672, 672, 671, 422, 672, 422, 672, + 672, 671, 422, 422, 672, 422, 671, 672, + 671, 422, 672, 671, 422, 422, 672, 422, + 672, 671, 422, 672, 422, 422, 672, 422, + 672, 422, 671, 672, 672, 671, 422, 672, + 672, 422, 672, 671, 422, 672, 422, 672, + 671, 422, 672, 422, 671, 422, 672, 672, + 672, 422, 672, 671, 672, 422, 672, 671, + 422, 672, 671, 672, 422, 672, 671, 422, + 672, 671, 422, 672, 422, 672, 671, 422, + 672, 671, 422, 672, 671, 707, 708, 709, + 710, 711, 712, 713, 714, 715, 716, 717, + 718, 678, 719, 720, 721, 722, 723, 720, + 724, 725, 726, 727, 728, 729, 730, 731, + 732, 673, 671, 672, 422, 672, 671, 672, + 422, 672, 671, 672, 422, 672, 671, 672, + 422, 672, 671, 422, 672, 422, 672, 671, + 672, 422, 672, 671, 672, 422, 422, 422, + 672, 671, 672, 422, 672, 671, 672, 672, + 672, 672, 422, 672, 422, 671, 672, 671, + 422, 422, 672, 422, 672, 671, 672, 422, + 672, 671, 422, 672, 671, 672, 672, 422, + 672, 671, 422, 672, 671, 672, 422, 672, + 671, 422, 672, 671, 422, 672, 671, 422, + 672, 671, 672, 671, 422, 422, 672, 671, + 672, 422, 672, 671, 422, 672, 422, 671, + 672, 671, 422, 676, 733, 673, 676, 734, + 676, 735, 685, 673, 671, 672, 671, 422, + 672, 671, 422, 676, 734, 685, 673, 671, + 676, 736, 673, 685, 673, 671, 672, 671, + 422, 676, 737, 694, 738, 720, 739, 732, + 676, 740, 741, 742, 673, 685, 673, 671, + 672, 671, 422, 672, 422, 672, 671, 422, + 672, 422, 672, 422, 671, 672, 672, 671, + 422, 672, 422, 672, 671, 422, 672, 671, + 676, 685, 427, 671, 743, 676, 744, 685, + 673, 671, 427, 672, 671, 422, 672, 671, + 422, 745, 676, 746, 747, 673, 671, 422, + 672, 671, 672, 672, 671, 422, 422, 672, + 422, 672, 671, 676, 748, 749, 750, 751, + 752, 753, 754, 755, 756, 757, 758, 673, + 685, 673, 671, 672, 422, 672, 672, 672, + 672, 672, 672, 672, 422, 672, 422, 672, + 672, 672, 672, 672, 672, 671, 422, 672, + 672, 422, 672, 422, 671, 672, 422, 672, + 672, 672, 422, 672, 672, 422, 672, 672, + 422, 672, 672, 422, 672, 672, 671, 422, + 676, 759, 676, 735, 760, 761, 762, 673, + 685, 673, 671, 672, 671, 422, 672, 672, + 672, 422, 672, 672, 672, 422, 672, 422, + 672, 671, 422, 422, 422, 422, 672, 672, + 422, 422, 422, 422, 422, 672, 672, 672, + 672, 672, 672, 672, 422, 672, 422, 672, + 422, 671, 672, 672, 672, 422, 672, 422, + 672, 671, 685, 427, 763, 676, 685, 427, + 672, 671, 422, 764, 676, 765, 685, 427, + 672, 671, 422, 672, 422, 766, 685, 673, + 671, 427, 672, 671, 422, 676, 767, 673, + 685, 673, 671, 672, 671, 422, 768, 769, + 769, 768, 770, 768, 771, 768, 769, 772, + 773, 772, 775, 774, 776, 777, 777, 774, + 778, 774, 779, 776, 780, 777, 781, 777, + 783, 782, 784, 785, 785, 782, 786, 782, + 787, 784, 788, 785, 789, 785, 790, 791, + 792, 793, 794, 795, 796, 797, 799, 800, + 801, 802, 803, 672, 672, 672, 804, 805, + 806, 807, 672, 810, 811, 813, 814, 815, + 809, 816, 817, 818, 819, 820, 821, 822, + 823, 824, 825, 826, 827, 828, 829, 830, + 831, 832, 833, 834, 835, 837, 838, 839, + 840, 841, 842, 672, 798, 10, 798, 422, + 798, 422, 809, 812, 836, 843, 808, 790, + 844, 791, 845, 793, 846, 848, 847, 2, + 1, 849, 847, 850, 847, 5, 1, 847, + 6, 5, 9, 11, 11, 10, 852, 853, + 854, 847, 855, 856, 847, 857, 847, 422, + 422, 859, 860, 491, 472, 861, 472, 862, + 863, 864, 865, 866, 867, 868, 869, 870, + 871, 872, 546, 873, 522, 874, 875, 876, + 877, 878, 879, 880, 881, 882, 883, 884, + 885, 422, 422, 422, 427, 567, 858, 886, + 847, 887, 847, 672, 888, 422, 422, 422, + 672, 888, 672, 672, 422, 888, 422, 888, + 422, 888, 422, 672, 672, 672, 672, 672, + 888, 422, 672, 672, 672, 422, 672, 422, + 888, 422, 672, 672, 672, 672, 422, 888, + 672, 422, 672, 422, 672, 422, 672, 672, + 422, 672, 888, 422, 672, 422, 672, 422, + 672, 888, 672, 422, 888, 672, 422, 672, + 422, 888, 672, 672, 672, 672, 672, 888, + 422, 422, 672, 422, 672, 888, 672, 422, + 888, 672, 672, 888, 422, 422, 672, 422, + 672, 422, 672, 888, 889, 890, 891, 892, + 893, 894, 895, 896, 897, 898, 899, 717, + 900, 901, 902, 903, 904, 905, 906, 907, + 908, 909, 910, 911, 910, 912, 913, 914, + 915, 916, 673, 888, 917, 918, 919, 920, + 921, 922, 923, 924, 925, 926, 927, 928, + 929, 930, 931, 932, 933, 934, 935, 727, + 936, 937, 938, 694, 939, 940, 941, 942, + 943, 944, 673, 945, 946, 947, 948, 949, + 950, 951, 952, 676, 953, 673, 676, 954, + 955, 956, 957, 685, 888, 958, 959, 960, + 961, 705, 962, 963, 685, 964, 965, 966, + 967, 968, 673, 888, 969, 928, 970, 971, + 972, 685, 973, 974, 676, 673, 685, 427, + 888, 938, 673, 676, 685, 427, 685, 427, + 975, 685, 888, 427, 676, 976, 977, 676, + 978, 979, 683, 980, 981, 982, 983, 984, + 934, 985, 986, 987, 988, 989, 990, 991, + 992, 993, 994, 995, 996, 953, 997, 676, + 685, 427, 888, 998, 999, 685, 673, 888, + 427, 673, 888, 676, 1000, 733, 1001, 1002, + 1003, 1004, 1005, 1006, 1007, 1008, 673, 1009, + 1010, 1011, 1012, 1013, 1014, 673, 685, 888, + 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, + 1024, 1025, 1026, 1022, 1028, 1029, 1030, 1031, + 1015, 1027, 1015, 888, 1015, 888, 1032, 1032, + 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, + 1037, 769, 1041, 1041, 1041, 1036, 1042, 1041, + 770, 771, 1043, 1041, 769, 1041, 1041, 1036, + 1044, 1041, 770, 771, 1043, 1041, 769, 1036, + 1044, 1045, 1046, 1047, 769, 1041, 1041, 1041, + 1036, 1042, 770, 771, 1043, 1041, 769, 1041, + 1041, 1041, 1036, 1042, 770, 771, 1043, 1041, + 769, 1041, 1041, 1041, 1036, 1042, 771, 770, + 771, 1043, 1041, 769, 1049, 769, 1051, 1050, + 1052, 769, 1054, 1053, 769, 1055, 773, 1055, + 1056, 1055, 775, 1057, 1058, 1059, 1060, 1061, + 1062, 1063, 1060, 777, 775, 1057, 1065, 1064, + 778, 779, 1066, 1064, 777, 1068, 1067, 1070, + 1069, 777, 1071, 778, 1071, 779, 1071, 783, + 1072, 1073, 1074, 1075, 1076, 1077, 1078, 1075, + 785, 783, 1072, 1080, 1079, 786, 787, 1081, + 1079, 785, 1083, 1082, 1085, 1084, 785, 1086, + 786, 1086, 787, 1086, +} + +var _zcltok_trans_targs []int16 = []int16{ + 949, 1, 949, 949, 949, 3, 4, 958, + 949, 5, 959, 6, 7, 9, 10, 287, + 13, 14, 15, 16, 17, 288, 289, 20, + 290, 22, 23, 291, 292, 293, 294, 295, + 296, 297, 298, 299, 300, 329, 349, 354, + 128, 129, 130, 357, 152, 372, 376, 949, + 11, 12, 18, 19, 21, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 65, + 106, 121, 132, 155, 171, 284, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, + 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, 91, 92, + 93, 94, 95, 96, 97, 98, 99, 100, + 101, 102, 103, 104, 105, 107, 108, 109, + 110, 111, 112, 113, 114, 115, 116, 117, + 118, 119, 120, 122, 123, 124, 125, 126, + 127, 131, 133, 134, 135, 136, 137, 138, + 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 149, 150, 151, 153, 154, 156, + 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 172, 204, + 228, 231, 232, 234, 243, 244, 247, 251, + 269, 276, 278, 280, 282, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, + 192, 193, 194, 195, 196, 197, 198, 199, + 200, 201, 202, 203, 205, 206, 207, 208, + 209, 210, 211, 212, 213, 214, 215, 216, + 217, 218, 219, 220, 221, 222, 223, 224, + 225, 226, 227, 229, 230, 233, 235, 236, + 237, 238, 239, 240, 241, 242, 245, 246, + 248, 249, 250, 252, 253, 254, 255, 256, + 257, 258, 259, 260, 261, 262, 263, 264, + 265, 266, 267, 268, 270, 271, 272, 273, + 274, 275, 277, 279, 281, 283, 285, 286, + 301, 302, 303, 304, 305, 306, 307, 308, + 309, 310, 311, 312, 313, 314, 315, 316, + 317, 318, 319, 320, 321, 322, 323, 324, + 325, 326, 327, 328, 330, 331, 332, 333, + 334, 335, 336, 337, 338, 339, 340, 341, + 342, 343, 344, 345, 346, 347, 348, 350, + 351, 352, 353, 355, 356, 358, 359, 360, + 361, 362, 363, 364, 365, 366, 367, 368, + 369, 370, 371, 373, 374, 375, 377, 383, + 405, 410, 412, 414, 378, 379, 380, 381, + 382, 384, 385, 386, 387, 388, 389, 390, + 391, 392, 393, 394, 395, 396, 397, 398, + 399, 400, 401, 402, 403, 404, 406, 407, + 408, 409, 411, 413, 415, 949, 963, 438, + 439, 440, 441, 418, 442, 443, 444, 445, + 446, 447, 448, 449, 450, 451, 452, 453, + 454, 455, 456, 457, 458, 459, 460, 461, + 462, 463, 464, 465, 466, 467, 468, 470, + 471, 472, 473, 474, 475, 476, 477, 478, + 479, 480, 481, 482, 483, 484, 485, 486, + 420, 487, 488, 489, 490, 491, 492, 493, + 494, 495, 496, 497, 498, 499, 500, 501, + 502, 503, 504, 419, 505, 506, 507, 508, + 509, 511, 512, 513, 514, 515, 516, 517, + 518, 519, 520, 521, 522, 523, 524, 526, + 527, 528, 529, 530, 531, 535, 537, 538, + 539, 540, 435, 541, 542, 543, 544, 545, + 546, 547, 548, 549, 550, 551, 552, 553, + 554, 555, 557, 558, 560, 561, 562, 563, + 564, 565, 433, 566, 567, 568, 569, 570, + 571, 572, 573, 574, 576, 608, 632, 635, + 636, 638, 647, 648, 651, 655, 673, 533, + 680, 682, 684, 686, 577, 578, 579, 580, + 581, 582, 583, 584, 585, 586, 587, 588, + 589, 590, 591, 592, 593, 594, 595, 596, + 597, 598, 599, 600, 601, 602, 603, 604, + 605, 606, 607, 609, 610, 611, 612, 613, + 614, 615, 616, 617, 618, 619, 620, 621, + 622, 623, 624, 625, 626, 627, 628, 629, + 630, 631, 633, 634, 637, 639, 640, 641, + 642, 643, 644, 645, 646, 649, 650, 652, + 653, 654, 656, 657, 658, 659, 660, 661, + 662, 663, 664, 665, 666, 667, 668, 669, + 670, 671, 672, 674, 675, 676, 677, 678, + 679, 681, 683, 685, 687, 689, 690, 949, + 949, 691, 828, 829, 760, 830, 831, 832, + 833, 834, 835, 789, 836, 725, 837, 838, + 839, 840, 841, 842, 843, 844, 745, 845, + 846, 847, 848, 849, 850, 851, 852, 853, + 854, 770, 855, 857, 858, 859, 860, 861, + 862, 863, 864, 865, 866, 703, 867, 868, + 869, 870, 871, 872, 873, 874, 875, 741, + 876, 877, 878, 879, 880, 811, 882, 883, + 886, 888, 889, 890, 891, 892, 893, 896, + 897, 899, 900, 901, 903, 904, 905, 906, + 907, 908, 909, 910, 911, 912, 913, 915, + 916, 917, 918, 921, 923, 924, 926, 928, + 1001, 1002, 930, 931, 1001, 933, 1015, 1015, + 1015, 1016, 937, 938, 1017, 1018, 1022, 1022, + 1022, 1023, 944, 945, 1024, 1025, 950, 949, + 951, 952, 953, 949, 954, 955, 949, 956, + 957, 960, 961, 962, 949, 964, 949, 965, + 949, 966, 967, 968, 969, 970, 971, 972, + 973, 974, 975, 976, 977, 978, 979, 980, + 981, 982, 983, 984, 985, 986, 987, 988, + 989, 990, 991, 992, 993, 994, 995, 996, + 997, 998, 999, 1000, 949, 949, 949, 949, + 949, 949, 2, 949, 949, 8, 949, 949, + 949, 949, 949, 416, 417, 421, 422, 423, + 424, 425, 426, 427, 428, 429, 430, 431, + 432, 434, 436, 437, 469, 510, 525, 532, + 534, 536, 556, 559, 575, 688, 949, 949, + 949, 692, 693, 694, 695, 696, 697, 698, + 699, 700, 701, 702, 704, 705, 706, 707, + 708, 709, 710, 711, 712, 713, 714, 715, + 716, 717, 718, 719, 720, 721, 722, 723, + 724, 726, 727, 728, 729, 730, 731, 732, + 733, 734, 735, 736, 737, 738, 739, 740, + 742, 743, 744, 746, 747, 748, 749, 750, + 751, 752, 753, 754, 755, 756, 757, 758, + 759, 761, 762, 763, 764, 765, 766, 767, + 768, 769, 771, 772, 773, 774, 775, 776, + 777, 778, 779, 780, 781, 782, 783, 784, + 785, 786, 787, 788, 790, 791, 792, 793, + 794, 795, 796, 797, 798, 799, 800, 801, + 802, 803, 804, 805, 806, 807, 808, 809, + 810, 812, 813, 814, 815, 816, 817, 818, + 819, 820, 821, 822, 823, 824, 825, 826, + 827, 856, 881, 884, 885, 887, 894, 895, + 898, 902, 914, 919, 920, 922, 925, 927, + 1001, 1001, 1008, 1010, 1003, 1001, 1012, 1013, + 1014, 1001, 929, 932, 1004, 1005, 1006, 1007, + 1001, 1009, 1001, 1001, 1011, 1001, 1001, 1001, + 934, 935, 940, 941, 1015, 1019, 1020, 1021, + 1015, 936, 939, 1015, 1015, 1015, 1015, 1015, + 942, 947, 948, 1022, 1026, 1027, 1028, 1022, + 943, 946, 1022, 1022, 1022, 1022, 1022, +} + +var _zcltok_trans_actions []byte = []byte{ + 131, 0, 71, 127, 87, 0, 0, 151, + 123, 0, 5, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 101, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 125, 148, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 129, + 105, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 27, 5, 0, 0, 29, 0, 49, 35, + 47, 136, 0, 0, 0, 0, 69, 55, + 67, 142, 0, 0, 0, 0, 0, 73, + 0, 0, 0, 99, 160, 0, 91, 5, + 154, 5, 0, 0, 93, 0, 95, 0, + 103, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 5, + 5, 5, 157, 157, 157, 157, 157, 157, + 5, 5, 157, 5, 117, 121, 107, 115, + 77, 83, 0, 113, 109, 0, 81, 75, + 89, 79, 111, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 85, 97, + 119, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 13, 11, 0, 0, 5, 15, 0, 5, + 5, 21, 0, 0, 0, 5, 5, 5, + 23, 0, 17, 7, 0, 19, 9, 25, + 0, 0, 0, 0, 37, 0, 139, 139, + 43, 0, 0, 39, 31, 41, 33, 45, + 0, 0, 0, 57, 0, 145, 145, 63, + 0, 0, 59, 51, 61, 53, 65, +} + +var _zcltok_to_state_actions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 133, + 0, 0, 0, 0, 0, 0, 133, 0, + 0, 0, 0, 0, 0, +} + +var _zcltok_from_state_actions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 3, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 3, 0, + 0, 0, 0, 0, 0, +} + +var _zcltok_eof_trans []int16 = []int16{ + 0, 1, 4, 1, 1, 9, 9, 9, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 422, 422, 1, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 769, 769, 769, 769, 773, 773, 775, + 777, 775, 775, 777, 0, 0, 783, 785, + 783, 783, 785, 0, 0, 0, 845, 846, + 847, 848, 846, 848, 848, 848, 852, 853, + 848, 848, 848, 859, 848, 848, 889, 889, + 889, 889, 889, 889, 889, 889, 889, 889, + 889, 889, 889, 889, 889, 889, 889, 889, + 889, 889, 889, 889, 889, 889, 889, 889, + 889, 889, 889, 889, 889, 889, 889, 889, + 889, 0, 1042, 1042, 1042, 1042, 1042, 1042, + 1049, 1051, 1049, 1054, 1056, 1056, 1056, 0, + 1065, 1068, 1070, 1072, 1072, 1072, 0, 1080, + 1083, 1085, 1087, 1087, 1087, +} + +const zcltok_start int = 949 +const zcltok_first_final int = 949 +const zcltok_error int = 0 + +const zcltok_en_stringTemplate int = 1001 +const zcltok_en_heredocTemplate int = 1015 +const zcltok_en_bareTemplate int = 1022 +const zcltok_en_main int = 949 + +// line 15 "scan_tokens.rl" + +func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token { + f := &tokenAccum{ + Filename: filename, + Bytes: data, + Pos: start, + } + + // line 272 "scan_tokens.rl" + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + act := 0 + eof := pe + var stack []int + var top int + + var cs int // current state + switch mode { + case scanNormal: + cs = zcltok_en_main + case scanTemplate: + cs = zcltok_en_bareTemplate + default: + panic("invalid scanMode") + } + + braces := 0 + var retBraces []int // stack of brace levels that cause us to use fret + var heredocs []heredocInProgress // stack of heredocs we're currently processing + + // line 305 "scan_tokens.rl" + + // Make Go compiler happy + _ = ts + _ = te + _ = act + _ = eof + + token := func(ty TokenType) { + f.emitToken(ty, ts, te) + } + selfToken := func() { + b := data[ts:te] + if len(b) != 1 { + // should never happen + panic("selfToken only works for single-character tokens") + } + f.emitToken(TokenType(b[0]), ts, te) + } + + // line 2816 "scan_tokens.go" + { + top = 0 + ts = 0 + te = 0 + act = 0 + } + + // line 2824 "scan_tokens.go" + { + var _klen int + var _trans int + var _acts int + var _nacts uint + var _keys int + if p == pe { + goto _test_eof + } + if cs == 0 { + goto _out + } + _resume: + _acts = int(_zcltok_from_state_actions[cs]) + _nacts = uint(_zcltok_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _zcltok_actions[_acts-1] { + case 2: + // line 1 "NONE" + + ts = p + + // line 2848 "scan_tokens.go" + } + } + + _keys = int(_zcltok_key_offsets[cs]) + _trans = int(_zcltok_index_offsets[cs]) + + _klen = int(_zcltok_single_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } + + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case data[p] < _zcltok_trans_keys[_mid]: + _upper = _mid - 1 + case data[p] > _zcltok_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match + } + } + _keys += _klen + _trans += _klen + } + + _klen = int(_zcltok_range_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } + + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case data[p] < _zcltok_trans_keys[_mid]: + _upper = _mid - 2 + case data[p] > _zcltok_trans_keys[_mid+1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match + } + } + _trans += _klen + } + + _match: + _trans = int(_zcltok_indicies[_trans]) + _eof_trans: + cs = int(_zcltok_trans_targs[_trans]) + + if _zcltok_trans_actions[_trans] == 0 { + goto _again + } + + _acts = int(_zcltok_trans_actions[_trans]) + _nacts = uint(_zcltok_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _zcltok_actions[_acts-1] { + case 3: + // line 1 "NONE" + + te = p + 1 + + case 4: + // line 138 "scan_tokens.rl" + + te = p + 1 + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 949 + goto _again + } + } + case 5: + // line 148 "scan_tokens.rl" + + te = p + 1 + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 949 + goto _again + } + } + case 6: + // line 80 "scan_tokens.rl" + + te = p + 1 + { + token(TokenCQuote) + top-- + cs = stack[top] + { + stack = stack[:len(stack)-1] + } + goto _again + + } + case 7: + // line 222 "scan_tokens.rl" + + te = p + 1 + { + token(TokenInvalid) + } + case 8: + // line 223 "scan_tokens.rl" + + te = p + 1 + { + token(TokenBadUTF8) + } + case 9: + // line 138 "scan_tokens.rl" + + te = p + p-- + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 949 + goto _again + } + } + case 10: + // line 148 "scan_tokens.rl" + + te = p + p-- + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 949 + goto _again + } + } + case 11: + // line 221 "scan_tokens.rl" + + te = p + p-- + { + token(TokenQuotedLit) + } + case 12: + // line 222 "scan_tokens.rl" + + te = p + p-- + { + token(TokenInvalid) + } + case 13: + // line 223 "scan_tokens.rl" + + te = p + p-- + { + token(TokenBadUTF8) + } + case 14: + // line 221 "scan_tokens.rl" + + p = (te) - 1 + { + token(TokenQuotedLit) + } + case 15: + // line 223 "scan_tokens.rl" + + p = (te) - 1 + { + token(TokenBadUTF8) + } + case 16: + // line 126 "scan_tokens.rl" + + act = 10 + case 17: + // line 231 "scan_tokens.rl" + + act = 11 + case 18: + // line 138 "scan_tokens.rl" + + te = p + 1 + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 949 + goto _again + } + } + case 19: + // line 148 "scan_tokens.rl" + + te = p + 1 + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 949 + goto _again + } + } + case 20: + // line 107 "scan_tokens.rl" + + te = p + 1 + { + // This action is called specificially when a heredoc literal + // ends with a newline character. + + // This might actually be our end marker. + topdoc := &heredocs[len(heredocs)-1] + if topdoc.StartOfLine { + maybeMarker := bytes.TrimSpace(data[ts:te]) + if bytes.Equal(maybeMarker, topdoc.Marker) { + token(TokenCHeredoc) + heredocs = heredocs[:len(heredocs)-1] + top-- + cs = stack[top] + { + stack = stack[:len(stack)-1] + } + goto _again + + } + } + + topdoc.StartOfLine = true + token(TokenStringLit) + } + case 21: + // line 231 "scan_tokens.rl" + + te = p + 1 + { + token(TokenBadUTF8) + } + case 22: + // line 138 "scan_tokens.rl" + + te = p + p-- + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 949 + goto _again + } + } + case 23: + // line 148 "scan_tokens.rl" + + te = p + p-- + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 949 + goto _again + } + } + case 24: + // line 126 "scan_tokens.rl" + + te = p + p-- + { + // This action is called when a heredoc literal _doesn't_ end + // with a newline character, e.g. because we're about to enter + // an interpolation sequence. + heredocs[len(heredocs)-1].StartOfLine = false + token(TokenStringLit) + } + case 25: + // line 231 "scan_tokens.rl" + + te = p + p-- + { + token(TokenBadUTF8) + } + case 26: + // line 126 "scan_tokens.rl" + + p = (te) - 1 + { + // This action is called when a heredoc literal _doesn't_ end + // with a newline character, e.g. because we're about to enter + // an interpolation sequence. + heredocs[len(heredocs)-1].StartOfLine = false + token(TokenStringLit) + } + case 27: + // line 1 "NONE" + + switch act { + case 0: + { + cs = 0 + goto _again + } + case 10: + { + p = (te) - 1 + + // This action is called when a heredoc literal _doesn't_ end + // with a newline character, e.g. because we're about to enter + // an interpolation sequence. + heredocs[len(heredocs)-1].StartOfLine = false + token(TokenStringLit) + } + case 11: + { + p = (te) - 1 + token(TokenBadUTF8) + } + } + + case 28: + // line 134 "scan_tokens.rl" + + act = 14 + case 29: + // line 238 "scan_tokens.rl" + + act = 15 + case 30: + // line 138 "scan_tokens.rl" + + te = p + 1 + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 949 + goto _again + } + } + case 31: + // line 148 "scan_tokens.rl" + + te = p + 1 + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 949 + goto _again + } + } + case 32: + // line 134 "scan_tokens.rl" + + te = p + 1 + { + token(TokenStringLit) + } + case 33: + // line 238 "scan_tokens.rl" + + te = p + 1 + { + token(TokenBadUTF8) + } + case 34: + // line 138 "scan_tokens.rl" + + te = p + p-- + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 949 + goto _again + } + } + case 35: + // line 148 "scan_tokens.rl" + + te = p + p-- + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 949 + goto _again + } + } + case 36: + // line 134 "scan_tokens.rl" + + te = p + p-- + { + token(TokenStringLit) + } + case 37: + // line 238 "scan_tokens.rl" + + te = p + p-- + { + token(TokenBadUTF8) + } + case 38: + // line 134 "scan_tokens.rl" + + p = (te) - 1 + { + token(TokenStringLit) + } + case 39: + // line 1 "NONE" + + switch act { + case 0: + { + cs = 0 + goto _again + } + case 14: + { + p = (te) - 1 + + token(TokenStringLit) + } + case 15: + { + p = (te) - 1 + token(TokenBadUTF8) + } + } + + case 40: + // line 244 "scan_tokens.rl" + + act = 18 + case 41: + // line 246 "scan_tokens.rl" + + act = 19 + case 42: + // line 257 "scan_tokens.rl" + + act = 29 + case 43: + // line 268 "scan_tokens.rl" + + act = 36 + case 44: + // line 269 "scan_tokens.rl" + + act = 37 + case 45: + // line 246 "scan_tokens.rl" + + te = p + 1 + { + token(TokenComment) + } + case 46: + // line 247 "scan_tokens.rl" + + te = p + 1 + { + token(TokenNewline) + } + case 47: + // line 249 "scan_tokens.rl" + + te = p + 1 + { + token(TokenEqualOp) + } + case 48: + // line 250 "scan_tokens.rl" + + te = p + 1 + { + token(TokenNotEqual) + } + case 49: + // line 251 "scan_tokens.rl" + + te = p + 1 + { + token(TokenGreaterThanEq) + } + case 50: + // line 252 "scan_tokens.rl" + + te = p + 1 + { + token(TokenLessThanEq) + } + case 51: + // line 253 "scan_tokens.rl" + + te = p + 1 + { + token(TokenAnd) + } + case 52: + // line 254 "scan_tokens.rl" + + te = p + 1 + { + token(TokenOr) + } + case 53: + // line 255 "scan_tokens.rl" + + te = p + 1 + { + token(TokenEllipsis) + } + case 54: + // line 256 "scan_tokens.rl" + + te = p + 1 + { + token(TokenFatArrow) + } + case 55: + // line 257 "scan_tokens.rl" + + te = p + 1 + { + selfToken() + } + case 56: + // line 158 "scan_tokens.rl" + + te = p + 1 + { + token(TokenOBrace) + braces++ + } + case 57: + // line 163 "scan_tokens.rl" + + te = p + 1 + { + if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { + token(TokenTemplateSeqEnd) + braces-- + retBraces = retBraces[0 : len(retBraces)-1] + top-- + cs = stack[top] + { + stack = stack[:len(stack)-1] + } + goto _again + + } else { + token(TokenCBrace) + braces-- + } + } + case 58: + // line 175 "scan_tokens.rl" + + te = p + 1 + { + // Only consume from the retBraces stack and return if we are at + // a suitable brace nesting level, otherwise things will get + // confused. (Not entering this branch indicates a syntax error, + // which we will catch in the parser.) + if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { + token(TokenTemplateSeqEnd) + braces-- + retBraces = retBraces[0 : len(retBraces)-1] + top-- + cs = stack[top] + { + stack = stack[:len(stack)-1] + } + goto _again + + } else { + // We intentionally generate a TokenTemplateSeqEnd here, + // even though the user apparently wanted a brace, because + // we want to allow the parser to catch the incorrect use + // of a ~} to balance a generic opening brace, rather than + // a template sequence. + token(TokenTemplateSeqEnd) + braces-- + } + } + case 59: + // line 75 "scan_tokens.rl" + + te = p + 1 + { + token(TokenOQuote) + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1001 + goto _again + } + } + case 60: + // line 85 "scan_tokens.rl" + + te = p + 1 + { + token(TokenOHeredoc) + // the token is currently the whole heredoc introducer, like + // < 0; _nacts-- { + _acts++ + switch _zcltok_actions[_acts-1] { + case 0: + // line 1 "NONE" + + ts = 0 + + case 1: + // line 1 "NONE" + + act = 0 + + // line 3612 "scan_tokens.go" + } + } + + if cs == 0 { + goto _out + } + p++ + if p != pe { + goto _resume + } + _test_eof: + { + } + if p == eof { + if _zcltok_eof_trans[cs] > 0 { + _trans = int(_zcltok_eof_trans[cs] - 1) + goto _eof_trans + } + } + + _out: + { + } + } + + // line 328 "scan_tokens.rl" + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which we'll + // deal with as an invalid. + if cs < zcltok_first_final { + f.emitToken(TokenInvalid, p, len(data)) + } + + // We always emit a synthetic EOF token at the end, since it gives the + // parser position information for an "unexpected EOF" diagnostic. + f.emitToken(TokenEOF, len(data), len(data)) + + return f.Tokens +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl new file mode 100644 index 00000000000..4a395c1336b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl @@ -0,0 +1,342 @@ +package zclsyntax + +import ( + "bytes" + + "github.com/zclconf/go-zcl/zcl" +) + +// This file is generated from scan_tokens.rl. DO NOT EDIT. +%%{ + # (except you are actually in scan_tokens.rl here, so edit away!) + + machine zcltok; + write data; +}%% + +func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token { + f := &tokenAccum{ + Filename: filename, + Bytes: data, + Pos: start, + } + + %%{ + include UnicodeDerived "unicode_derived.rl"; + + UTF8Cont = 0x80 .. 0xBF; + AnyUTF8 = ( + 0x00..0x7F | + 0xC0..0xDF . UTF8Cont | + 0xE0..0xEF . UTF8Cont . UTF8Cont | + 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont + ); + BrokenUTF8 = any - AnyUTF8; + + NumberLitContinue = (digit|'.'|('e'|'E') ('+'|'-')? digit); + NumberLit = digit ("" | (NumberLitContinue - '.') | (NumberLitContinue* (NumberLitContinue - '.'))); + Ident = ID_Start (ID_Continue | '-')*; + + # Symbols that just represent themselves are handled as a single rule. + SelfToken = "[" | "]" | "(" | ")" | "." | "," | "*" | "/" | "+" | "-" | "=" | "<" | ">" | "!" | "?" | ":" | "\n" | "&" | "|" | "~" | "^" | ";" | "`"; + + EqualOp = "=="; + NotEqual = "!="; + GreaterThanEqual = ">="; + LessThanEqual = "<="; + LogicalAnd = "&&"; + LogicalOr = "||"; + + Ellipsis = "..."; + FatArrow = "=>"; + + Newline = '\r' ? '\n'; + EndOfLine = Newline; + + BeginStringTmpl = '"'; + BeginHeredocTmpl = '<<' ('-')? Ident Newline; + + Comment = ( + ("#" (any - EndOfLine)* EndOfLine) | + ("//" (any - EndOfLine)* EndOfLine) | + ("/*" any* "*/") + ); + + # Tabs are not valid, but we accept them in the scanner and mark them + # as tokens so that we can produce diagnostics advising the user to + # use spaces instead. + Tabs = 0x09+; + + # Note: zclwrite assumes that only ASCII spaces appear between tokens, + # and uses this assumption to recreate the spaces between tokens by + # looking at byte offset differences. + Spaces = ' '+; + + action beginStringTemplate { + token(TokenOQuote); + fcall stringTemplate; + } + + action endStringTemplate { + token(TokenCQuote); + fret; + } + + action beginHeredocTemplate { + token(TokenOHeredoc); + // the token is currently the whole heredoc introducer, like + // < 0 { + heredocs[len(heredocs)-1].StartOfLine = false; + } + fcall main; + } + + action beginTemplateControl { + token(TokenTemplateControl); + braces++; + retBraces = append(retBraces, braces); + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false; + } + fcall main; + } + + action openBrace { + token(TokenOBrace); + braces++; + } + + action closeBrace { + if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { + token(TokenTemplateSeqEnd); + braces--; + retBraces = retBraces[0:len(retBraces)-1] + fret; + } else { + token(TokenCBrace); + braces--; + } + } + + action closeTemplateSeqEatWhitespace { + // Only consume from the retBraces stack and return if we are at + // a suitable brace nesting level, otherwise things will get + // confused. (Not entering this branch indicates a syntax error, + // which we will catch in the parser.) + if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { + token(TokenTemplateSeqEnd); + braces--; + retBraces = retBraces[0:len(retBraces)-1] + fret; + } else { + // We intentionally generate a TokenTemplateSeqEnd here, + // even though the user apparently wanted a brace, because + // we want to allow the parser to catch the incorrect use + // of a ~} to balance a generic opening brace, rather than + // a template sequence. + token(TokenTemplateSeqEnd); + braces--; + } + } + + TemplateInterp = "${" ("~")?; + TemplateControl = "%{" ("~")?; + EndStringTmpl = '"'; + StringLiteralChars = (AnyUTF8 - ("\r"|"\n")); + TemplateStringLiteral = ( + ('$' ^'{') | + ('%' ^'{') | + ('\\' StringLiteralChars) | + (StringLiteralChars - ("$" | '%' | '"')) + )+; + HeredocStringLiteral = ( + ('$' ^'{') | + ('%' ^'{') | + (StringLiteralChars - ("$" | '%')) + )*; + BareStringLiteral = ( + ('$' ^'{') | + ('%' ^'{') | + (StringLiteralChars - ("$" | '%')) + )* Newline?; + + stringTemplate := |* + TemplateInterp => beginTemplateInterp; + TemplateControl => beginTemplateControl; + EndStringTmpl => endStringTemplate; + TemplateStringLiteral => { token(TokenQuotedLit); }; + AnyUTF8 => { token(TokenInvalid); }; + BrokenUTF8 => { token(TokenBadUTF8); }; + *|; + + heredocTemplate := |* + TemplateInterp => beginTemplateInterp; + TemplateControl => beginTemplateControl; + HeredocStringLiteral EndOfLine => heredocLiteralEOL; + HeredocStringLiteral => heredocLiteralMidline; + BrokenUTF8 => { token(TokenBadUTF8); }; + *|; + + bareTemplate := |* + TemplateInterp => beginTemplateInterp; + TemplateControl => beginTemplateControl; + BareStringLiteral => bareTemplateLiteral; + BrokenUTF8 => { token(TokenBadUTF8); }; + *|; + + main := |* + Spaces => {}; + NumberLit => { token(TokenNumberLit) }; + Ident => { token(TokenIdent) }; + + Comment => { token(TokenComment) }; + Newline => { token(TokenNewline) }; + + EqualOp => { token(TokenEqualOp); }; + NotEqual => { token(TokenNotEqual); }; + GreaterThanEqual => { token(TokenGreaterThanEq); }; + LessThanEqual => { token(TokenLessThanEq); }; + LogicalAnd => { token(TokenAnd); }; + LogicalOr => { token(TokenOr); }; + Ellipsis => { token(TokenEllipsis); }; + FatArrow => { token(TokenFatArrow); }; + SelfToken => { selfToken() }; + + "{" => openBrace; + "}" => closeBrace; + + "~}" => closeTemplateSeqEatWhitespace; + + BeginStringTmpl => beginStringTemplate; + BeginHeredocTmpl => beginHeredocTemplate; + + Tabs => { token(TokenTabs) }; + BrokenUTF8 => { token(TokenBadUTF8) }; + AnyUTF8 => { token(TokenInvalid) }; + *|; + + }%% + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + act := 0 + eof := pe + var stack []int + var top int + + var cs int // current state + switch mode { + case scanNormal: + cs = zcltok_en_main + case scanTemplate: + cs = zcltok_en_bareTemplate + default: + panic("invalid scanMode") + } + + braces := 0 + var retBraces []int // stack of brace levels that cause us to use fret + var heredocs []heredocInProgress // stack of heredocs we're currently processing + + %%{ + prepush { + stack = append(stack, 0); + } + postpop { + stack = stack[:len(stack)-1]; + } + }%% + + // Make Go compiler happy + _ = ts + _ = te + _ = act + _ = eof + + token := func (ty TokenType) { + f.emitToken(ty, ts, te) + } + selfToken := func () { + b := data[ts:te] + if len(b) != 1 { + // should never happen + panic("selfToken only works for single-character tokens") + } + f.emitToken(TokenType(b[0]), ts, te) + } + + %%{ + write init nocs; + write exec; + }%% + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which we'll + // deal with as an invalid. + if cs < zcltok_first_final { + f.emitToken(TokenInvalid, p, len(data)) + } + + // We always emit a synthetic EOF token at the end, since it gives the + // parser position information for an "unexpected EOF" diagnostic. + f.emitToken(TokenEOF, len(data), len(data)) + + return f.Tokens +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md new file mode 100644 index 00000000000..faf279170d8 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md @@ -0,0 +1,879 @@ +# HCL Native Syntax Specification + +This is the specification of the syntax and semantics of the native syntax +for HCL. HCL is a system for defining configuration languages for applications. +The HCL information model is designed to support multiple concrete syntaxes +for configuration, but this native syntax is considered the primary format +and is optimized for human authoring and maintenence, as opposed to machine +generation of configuration. + +The language consists of three integrated sub-languages: + +* The _structural_ language defines the overall heirarchical configuration + structure, and is a serialization of HCL bodies, blocks and attributes. + +* The _expression_ language is used to express attribute values, either as + literals or as derivations of other values. + +* The _template_ language is used to compose values together into strings, + as one of several types of expression in the expression language. + +In normal use these three sub-languages are used together within configuration +files to describe an overall configuration, with the structural language +being used at the top level. The expression and template languages can also +be used in isolation, to implement features such as REPLs, debuggers, and +integration into more limited HCL syntaxes such as the JSON profile. + +## Syntax Notation + +Within this specification a semi-formal notation is used to illustrate the +details of syntax. This notation is intended for human consumption rather +than machine consumption, with the following conventions: + +* A naked name starting with an uppercase letter is a global production, + common to all of the syntax specifications in this document. +* A naked name starting with a lowercase letter is a local production, + meaningful only within the specification where it is defined. +* Double and single quotes (`"` and `'`) are used to mark literal character + sequences, which may be either punctuation markers or keywords. +* The default operator for combining items, which has no punctuation, + is concatenation. +* The symbol `|` indicates that any one of its left and right operands may + be present. +* The `*` symbol indicates zero or more repetitions of the item to its left. +* The `?` symbol indicates zero or one of the item to its left. +* Parentheses (`(` and `)`) are used to group items together to apply + the `|`, `*` and `?` operators to them collectively. + +The grammar notation does not fully describe the language. The prose may +augment or conflict with the illustrated grammar. In case of conflict, prose +has priority. + +## Source Code Representation + +Source code is unicode text expressed in the UTF-8 encoding. The language +itself does not perform unicode normalization, so syntax features such as +identifiers are sequences of unicode code points and so e.g. a precombined +accented character is distinct from a letter associated with a combining +accent. (String literals have some special handling with regard to Unicode +normalization which will be covered later in the relevant section.) + +UTF-8 encoded Unicode byte order marks are not permitted. Invalid or +non-normalized UTF-8 encoding is always a parse error. + +## Lexical Elements + +### Comments and Whitespace + +Comments and Whitespace are recognized as lexical elements but are ignored +except as described below. + +Whitespace is defined as a sequence of zero or more space characters +(U+0020). Newline sequences (either U+000A or U+000D followed by U+000A) +are _not_ considered whitespace but are ignored as such in certain contexts. + +Horizontal tab characters (U+0009) are not considered to be whitespace and +are not valid within HCL native syntax. + +Comments serve as program documentation and come in two forms: + +* _Line comments_ start with either the `//` or `#` sequences and end with + the next newline sequence. A line comments is considered equivalent to a + newline sequence. + +* _Inline comments_ start with the `/*` sequence and end with the `*/` + sequence, and may have any characters within except the ending sequence. + An inline comments is considered equivalent to a whitespace sequence. + +Comments and whitespace cannot begin within within other comments, or within +template literals except inside an interpolation sequence or template directive. + +### Identifiers + +Identifiers name entities such as blocks, attributes and expression variables. +Identifiers are interpreted as per [UAX #31][UAX31] Section 2. Specifically, +their syntax is defined in terms of the `ID_Start` and `ID_Continue` +character properties as follows: + +```ebnf +Identifier = ID_Start (ID_Continue | '-')*; +``` + +The Unicode specification provides the normative requirements for identifier +parsing. Non-normatively, the spirit of this specification is that `ID_Start` +consists of Unicode letter and certain unambiguous punctuation tokens, while +`ID_Continue` augments that set with Unicode digits, combining marks, etc. + +The dash character `-` is additionally allowed in identifiers, even though +that is not part of the unicode `ID_Continue` definition. This is to allow +attribute names and block type names to contain dashes, although underscores +as word separators are considered the idiomatic usage. + +[UAX31]: http://unicode.org/reports/tr31/ "Unicode Identifier and Pattern Syntax" + +### Keywords + +There are no globally-reserved words, but in some contexts certain identifiers +are reserved to function as keywords. These are discussed further in the +relevant documentation sections that follow. In such situations, the +identifier's role as a keyword supersedes any other valid interpretation that +may be possible. Outside of these specific situations, the keywords have no +special meaning and are interpreted as regular identifiers. + +### Operators and Delimiters + +The following character sequences represent operators, delimiters, and other +special tokens: + +``` ++ && == < : { [ ( ${ +- || != > ? } ] ) %{ +* ! <= = . +/ >= => , +% ... +``` + +### Numeric Literals + +A numeric literal is a decimal representation of a +real number. It has an integer part, a fractional part, +and an exponent part. + +```ebnf +NumericLit = decimal+ ("." decimal+)? (expmark decimal+)?; +decimal = '0' .. '9'; +expmark = ('e' | 'E') ("+" | "-")?; +``` + +## Structural Elements + +The structural language consists of syntax representing the following +constructs: + +* _Attributes_, which assign a value to a specified name. +* _Blocks_, which create a child body annotated by a type and optional labels. +* _Body Content_, which consists of a collection of attributes and blocks. + +These constructs correspond to the similarly-named concepts in the +language-agnostic HCL information model. + +```ebnf +ConfigFile = Body; +Body = (Attribute | Block)*; +Attribute = Identifier "=" Expression Newline; +Block = Identifier (StringLit)* "{" Newline Body "}" Newline; +``` + +### Configuration Files + +A _configuration file_ is a sequence of characters whose top-level is +interpreted as a Body. + +### Bodies + +A _body_ is a collection of associated attributes and blocks. The meaning of +this association is defined by the calling application. + +### Attribute Definitions + +An _attribute definition_ assigns a value to a particular attribute name within +a body. Each distinct attribute name may be defined no more than once within a +single body. + +The attribute value is given as an expression, which is retained literally +for later evaluation by the calling application. + +### Blocks + +A _block_ creates a child body that is annotated with a block _type_ and +zero or more optional block _labels_. Blocks create a structural heirachy +which can be interpreted by the calling application. + +## Expressions + +The expression sub-language is used within attribute definitions to specify +values. + +```ebnf +Expression = ( + ExprTerm | + Operation | + Conditional +); +``` + +### Types + +The value types used within the expression language are those defined by the +syntax-agnostic HCL information model. An expression may return any valid +type, but only a subset of the available types have first-class syntax. +A calling application may make other types available via _variables_ and +_functions_. + +### Expression Terms + +Expression _terms_ are the operands for unary and binary expressions, as well +as acting as expressions in their own right. + +```ebnf +ExprTerm = ( + LiteralValue | + CollectionValue | + TemplateExpr | + VariableExpr | + FunctionCall | + ForExpr | + ExprTerm Index | + ExprTerm GetAttr | + ExprTerm Splat | + "(" Expression ")" +); +``` + +The productions for these different term types are given in their corresponding +sections. + +Between the `(` and `)` characters denoting a sub-expression, newline +characters are ignored as whitespace. + +### Literal Values + +A _literal value_ immediately represents a particular value of a primitive +type. + +```ebnf +LiteralValue = ( + NumericLit | + "true" | + "false" | + "null" +); +``` + +* Numeric literals represent values of type _number_. +* The `true` and `false` keywords represent values of type _bool_. +* The `null` keyword represents a null value of the dynamic pseudo-type. + +String literals are not directly available in the expression sub-language, but +are available via the template sub-language, which can in turn be incorporated +via _template expressions_. + +### Collection Values + +A _collection value_ combines zero or more other expressions to produce a +collection value. + +```ebnf +CollectionValue = tuple | object; +tuple = "[" ( + (Expression ("," Expression)* ","?)? +) "]"; +object = "{" ( + (objectelem ("," objectelem)* ","?)? +) "}"; +objectelem = (Identifier | Expression) "=" Expression; +``` + +Only tuple and object values can be directly constructed via native syntax. +Tuple and object values can in turn be converted to list, set and map values +with other operations, which behaves as defined by the syntax-agnostic HCL +information model. + +When specifying an object element, an identifier is interpreted as a literal +attribute name as opposed to a variable reference. To populate an item key +from a variable, use parentheses to disambiguate: + +* `{foo = "baz"}` is interpreted as an attribute literally named `foo`. +* `{(foo) = "baz"}` is interpreted as an attribute whose name is taken + from the variable named `foo`. + +Between the open and closing delimiters of these sequences, newline sequences +are ignored as whitespace. + +There is a syntax ambiguity between _for expressions_ and collection values +whose first element is a reference to a variable named `for`. The +_for expression_ interpretation has priority, so to produce a tuple whose +first element is the value of a variable named `for`, or an object with a +key named `for`, use paretheses to disambiguate: + +* `[for, foo, baz]` is a syntax error. +* `[(for), foo, baz]` is a tuple whose first element is the value of variable + `for`. +* `{for: 1, baz: 2}` is a syntax error. +* `{(for): 1, baz: 2}` is an object with an attribute literally named `for`. +* `{baz: 2, for: 1}` is equivalent to the previous example, and resolves the + ambiguity by reordering. + +### Template Expressions + +A _template expression_ embeds a program written in the template sub-language +as an expression. Template expressions come in two forms: + +* A _quoted_ template expression is delimited by quote characters (`"`) and + defines a template as a single-line expression with escape characters. +* A _heredoc_ template expression is introduced by a `<<` sequence and + defines a template via a multi-line sequence terminated by a user-chosen + delimiter. + +In both cases the template interpolation and directive syntax is available for +use within the delimiters, and any text outside of these special sequences is +interpreted as a literal string. + +In _quoted_ template expressions any literal string sequences within the +template behave in a special way: literal newline sequences are not permitted +and instead _escape sequences_ can be included, starting with the +backslash `\`: + +``` + \n Unicode newline control character + \r Unicode carriage return control character + \t Unicode tab control character + \" Literal quote mark, used to prevent interpretation as end of string + \\ Literal backslash, used to prevent interpretation as escape sequence + \uNNNN Unicode character from Basic Multilingual Plane (NNNN is four hexadecimal digits) + \UNNNNNNNN Unicode character from supplementary planes (NNNNNNNN is eight hexadecimal digits) +``` + +The _heredoc_ template expression type is introduced by either `<<` or `<<-`, +followed by an identifier. The template expression ends when the given +identifier subsequently appears again on a line of its own. + +If a heredoc template is introduced with the `<<-` symbol, any literal string +at the start of each line is analyzed to find the minimum number of leading +spaces, and then that number of prefix spaces is removed from all line-leading +literal strings. The final closing marker may also have an arbitrary number +of spaces preceding it on its line. + +```ebnf +TemplateExpr = quotedTemplate | heredocTemplate; +quotedTemplate = (as defined in prose above); +heredocTemplate = ( + ("<<" | "<<-") Identifier Newline + (content as defined in prose above) + Identifier Newline +); +``` + +A quoted template expression containing only a single literal string serves +as a syntax for defining literal string _expressions_. In certain contexts +the template syntax is restricted in this manner: + +```ebnf +StringLit = '"' (quoted literals as defined in prose above) '"'; +``` + +The `StringLit` production permits the escape sequences discussed for quoted +template expressions as above, but does _not_ permit template interpolation +or directive sequences. + +### Variables and Variable Expressions + +A _variable_ is a value that has been assigned a symbolic name. Variables are +made available for use in expressions by the calling application, by populating +the _global scope_ used for expression evaluation. + +Variables can also be created by expressions themselves, which always creates +a _child scope_ that incorporates the variables from its parent scope but +(re-)defines zero or more names with new values. + +The value of a variable is accessed using a _variable expression_, which is +a standalone `Identifier` whose name corresponds to a defined variable: + +```ebnf +VariableExpr = Identifier; +``` + +Variables in a particular scope are immutable, but child scopes may _hide_ +a variable from an ancestor scope by defining a new variable of the same name. +When looking up variables, the most locally-defined variable of the given name +is used, and ancestor-scoped variables of the same name cannot be accessed. + +No direct syntax is provided for declaring or assigning variables, but other +expression constructs implicitly create child scopes and define variables as +part of their evaluation. + +### Functions and Function Calls + +A _function_ is an operation that has been assigned a symbolic name. Functions +are made available for use in expressions by the calling application, by +populating the _function table_ used for expression evaluation. + +The namespace of functions is distinct from the namespace of variables. A +function and a variable may share the same name with no implication that they +are in any way related. + +A function can be executed via a _function call_ expression: + +```ebnf +FunctionCall = Identifier "(" arguments ")"; +Arguments = ( + () || + (Expression ("," Expression)* ("," | "...")?) +); +``` + +The definition of functions and the semantics of calling them are defined by +the language-agnostic HCL information model. The given arguments are mapped +onto the function's _parameters_ and the result of a function call expression +is the return value of the named function when given those arguments. + +If the final argument expression is followed by the ellipsis symbol (`...`), +the final argument expression must evaluate to either a list or tuple value. +The elements of the value are each mapped to a single parameter of the +named function, beginning at the first parameter remaining after all other +argument expressions have been mapped. + +Within the parentheses that delimit the function arguments, newline sequences +are ignored as whitespace. + +### For Expressions + +A _for expression_ is a construct for constructing a collection by projecting +the items from another collection. + +```ebnf +ForExpr = forTupleExpr | forObjectExpr; +forTupleExpr = "[" forIntro Expression forCond? "]"; +forObjectExpr = "{" forIntro Expression "=>" Expression "..."? forCond? "}"; +forIntro = "for" Identifier ("," Identifier)? "in" Expression ":"; +forCond = "if" Expression; +``` + +The punctuation used to delimit a for expression decide whether it will produce +a tuple value (`[` and `]`) or an object value (`{` and `}`). + +The "introduction" is equivalent in both cases: the keyword `for` followed by +either one or two identifiers separated by a comma which define the temporary +variable names used for iteration, followed by the keyword `in` and then +an expression that must evaluate to a value that can be iterated. The +introduction is then terminated by the colon (`:`) symbol. + +If only one identifier is provided, it is the name of a variable that will +be temporarily assigned the value of each element during iteration. If both +are provided, the first is the key and the second is the value. + +Tuple, object, list, map, and set types are iterable. The type of collection +used defines how the key and value variables are populated: + +* For tuple and list types, the _key_ is the zero-based index into the + sequence for each element, and the _value_ is the element value. The + elements are visited in index order. +* For object and map types, the _key_ is the string attribute name or element + key, and the _value_ is the attribute or element value. The elements are + visited in the order defined by a lexicographic sort of the attribute names + or keys. +* For set types, the _key_ and _value_ are both the element value. The elements + are visited in an undefined but consistent order. + +The expression after the colon and (in the case of object `for`) the expression +after the `=>` are both evaluated once for each element of the source +collection, in a local scope that defines the key and value variable names +specified. + +The results of evaluating these expressions for each input element are used +to populate an element in the new collection. In the case of tuple `for`, the +single expression becomes an element, appending values to the tuple in visit +order. In the case of object `for`, the pair of expressions is used as an +attribute name and value respectively, creating an element in the resulting +object. + +In the case of object `for`, it is an error if two input elements produce +the same result from the attribute name expression, since duplicate +attributes are not possible. If the ellipsis symbol (`...`) appears +immediately after the value experssion, this activates the grouping mode in +which each value in the resulting object is a _tuple_ of all of the values +that were produced against each distinct key. + +* `[for v in ["a", "b"]: v]` returns `["a", "b"]`. +* `[for i, v in ["a", "b"]: i]` returns `[0, 1]`. +* `{for i, v in ["a", "b"]: v => i}` returns `{a = 0, b = 1}`. +* `{for i, v in ["a", "a", "b"]: k => v}` produces an error, because attribute + `a` is defined twice. +* `{for i, v in ["a", "a", "b"]: v => i...}` returns `{a = [0, 1], b = [2]}`. + +If the `if` keyword is used after the element expression(s), it applies an +additional predicate that can be used to conditionally filter elements from +the source collection from consideration. The expression following `if` is +evaluated once for each source element, in the same scope used for the +element expression(s). It must evaluate to a boolean value; if `true`, the +element will be evaluated as normal, while if `false` the element will be +skipped. + +* `[for i, v in ["a", "b", "c"]: v if i < 2]` returns `["a", "b"]`. + +If the collection value, element expression(s) or condition expression return +unknown values that are otherwise type-valid, the result is a value of the +dynamic pseudo-type. + +### Index Operator + +The _index_ operator returns the value of a single element of a collection +value. It is a postfix operator and can be applied to any value that has +a tuple, object, map, or list type. + +```ebnf +Index = "[" Expression "]"; +``` + +The expression delimited by the brackets is the _key_ by which an element +will be looked up. + +If the index operator is applied to a value of tuple or list type, the +key expression must be an non-negative integer number representing the +zero-based element index to access. If applied to a value of object or map +type, the key expression must be a string representing the attribute name +or element key. If the given key value is not of the appropriate type, a +conversion is attempted using the conversion rules from the HCL +syntax-agnostic information model. + +An error is produced if the given key expression does not correspond to +an element in the collection, either because it is of an unconvertable type, +because it is outside the range of elements for a tuple or list, or because +the given attribute or key does not exist. + +If either the collection or the key are an unknown value of an +otherwise-suitable type, the return value is an unknown value whose type +matches what type would be returned given known values, or a value of the +dynamic pseudo-type if type information alone cannot determine a suitable +return type. + +Within the brackets that delimit the index key, newline sequences are ignored +as whitespace. + +### Attribute Access Operator + +The _attribute access_ operator returns the value of a single attribute in +an object value. It is a postfix operator and can be applied to any value +that has an object type. + +```ebnf +GetAttr = "." Identifier; +``` + +The given identifier is interpreted as the name of the attribute to access. +An error is produced if the object to which the operator is applied does not +have an attribute with the given name. + +If the object is an unknown value of a type that has the attribute named, the +result is an unknown value of the attribute's type. + +### Splat Operators + +The _splat operators_ allow convenient access to attributes or elements of +elements in a tuple, list, or set value. + +There are two kinds of "splat" operator: + +* The _attribute-only_ splat operator supports only attribute lookups into + the elements from a list, but supports an arbitrary number of them. + +* The _full_ splat operator additionally supports indexing into the elements + from a list, and allows any combination of attribute access and index + operations. + +```ebnf +Splat = attrSplat | fullSplat; +attrSplat = "." "*" GetAttr*; +fullSplat = "[" "*" "]" (GetAttr | Index)*; +``` + +The splat operators can be thought of as shorthands for common operations that +could otherwise be performed using _for expressions_: + +* `tuple.*.foo.bar[0]` is approximately equivalent to + `[for v in tuple: v.foo.bar][0]`. +* `tuple[*].foo.bar[0]` is approximately equivalent to + `[for v in tuple: v.foo.bar[0]]` + +Note the difference in how the trailing index operator is interpreted in +each case. This different interpretation is the key difference between the +_attribute-only_ and _full_ splat operators. + +Splat operators have one additional behavior compared to the equivalent +_for expressions_ shown above: if a splat operator is applied to a value that +is _not_ of tuple, list, or set type, the value is coerced automatically into +a single-value list of the value type: + +* `any_object.*.id` is equivalent to `[any_object.id]`, assuming that `any_object` + is a single object. +* `any_number.*` is equivalent to `[any_number]`, assuming that `any_number` + is a single number. + +If the left operand of a splat operator is an unknown value of any type, the +result is a value of the dynamic pseudo-type. + +### Operations + +Operations apply a particular operator to either one or two expression terms. + +```ebnf +Operation = unaryOp | binaryOp; +unaryOp = ("-" | "!") ExprTerm; +binaryOp = ExprTerm binaryOperator ExprTerm; +binaryOperator = compareOperator | arithmeticOperator | logicOperator; +compareOperator = "==" | "!=" | "<" | ">" | "<=" | ">="; +arithmeticOperator = "+" | "-" | "*" | "/" | "%"; +logicOperator = "&&" | "||" | "!"; +``` + +The unary operators have the highest precedence. + +The binary operators are grouped into the following precedence levels: + +``` +Level Operators + 6 * / % + 5 + - + 4 > >= < <= + 3 == != + 2 && + 1 || +``` + +Higher values of "level" bind tighter. Operators within the same precedence +level have left-to-right associativity. For example, `x / y * z` is equivalent +to `(x / y) * z`. + +### Comparison Operators + +Comparison operators always produce boolean values, as a result of testing +the relationship between two values. + +The two equality operators apply to values of any type: + +``` +a == b equal +a != b not equal +``` + +Two values are equal if the are of identical types and their values are +equal as defined in the HCL syntax-agnostic information model. The equality +operators are commutative and opposite, such that `(a == b) == !(a != b)` +and `(a == b) == (b == a)` for all values `a` and `b`. + +The four numeric comparison operators apply only to numbers: + +``` +a < b less than +a <= b less than or equal to +a > b greater than +a >= b greater than or equal to +``` + +If either operand of a comparison operator is a correctly-typed unknown value +or a value of the dynamic pseudo-type, the result is an unknown boolean. + +### Arithmetic Operators + +Arithmetic operators apply only to number values and always produce number +values as results. + +``` +a + b sum (addition) +a - b difference (subtraction) +a * b product (multiplication) +a / b quotient (division) +a % b remainder (modulo) +-a negation +``` + +Arithmetic operations are considered to be performed in an arbitrary-precision +number space. + +If either operand of an arithmetic operator is an unknown number or a value +of the dynamic pseudo-type, the result is an unknown number. + +### Logic Operators + +Logic operators apply only to boolean values and always produce boolean values +as results. + +``` +a && b logical AND +a || b logical OR +!a logical NOT +``` + +If either operand of a logic operator is an unknown bool value or a value +of the dynamic pseudo-type, the result is an unknown bool value. + +### Conditional Operator + +The conditional operator allows selecting from one of two expressions based on +the outcome of a boolean expression. + +```ebnf +Conditional = Expression "?" Expression ":" Expression; +``` + +The first expression is the _predicate_, which is evaluated and must produce +a boolean result. If the predicate value is `true`, the result of the second +expression is the result of the conditional. If the predicate value is +`false`, the result of the third expression is the result of the conditional. + +The second and third expressions must be of the same type or must be able to +unify into a common type using the type unification rules defined in the +HCL syntax-agnostic information model. This unified type is the result type +of the conditional, with both expressions converted as necessary to the +unified type. + +If the predicate is an unknown boolean value or a value of the dynamic +pseudo-type then the result is an unknown value of the unified type of the +other two expressions. + +If either the second or third expressions produce errors when evaluated, +these errors are passed through only if the erroneous expression is selected. +This allows for expressions such as +`length(some_list) > 0 ? some_list[0] : default` (given some suitable `length` +function) without producing an error when the predicate is `false`. + +## Templates + +The template sub-language is used within template expressions to concisely +combine strings and other values to produce other strings. It can also be +used in isolation as a standalone template language. + +```ebnf +Template = ( + TemplateLiteral | + TemplateInterpolation | + TemplateDirective +)* +TemplateDirective = TemplateIf | TemplateFor; +``` + +A template behaves like an expression that always returns a string value. +The different elements of the template are evaluated and combined into a +single string to return. If any of the elements produce an unknown string +or a value of the dynamic pseudo-type, the result is an unknown string. + +An important use-case for standalone templates is to enable the use of +expressions in alternative HCL syntaxes where a native expression grammar is +not available. For example, the HCL JSON profile treats the values of JSON +strings as standalone templates when attributes are evaluated in expression +mode. + +### Template Literals + +A template literal is a literal sequence of characters to include in the +resulting string. When the template sub-language is used standalone, a +template literal can contain any unicode character, with the exception +of the sequences that introduce interpolations and directives, and for the +sequences that escape those introductions. + +The interpolation and directive introductions are escaped by doubling their +leading characters. The `${` sequence is escaped as `$${` and the `%{` +sequence is escaped as `%%{`. + +When the template sub-language is embedded in the expression language via +_template expressions_, additional constraints and transforms are applied to +template literalsas described in the definition of template expressions. + +The value of a template literal can be modified by _strip markers_ in any +interpolations or directives that are adjacent to it. A strip marker is +a tilde (`~`) placed immediately after the opening `{` or before the closing +`}` of a template sequence: + +* `hello ${~ "world" }` produces `"helloworld"`. +* `%{ if true ~} hello %{~ endif }` produces `"hello"`. + +When a strip marker is present, any spaces adjacent to it in the corresponding +string literal (if any) are removed before producing the final value. Space +characters are interpreted as per Unicode's definition. + +Stripping is done at syntax level rather than value level. Values returned +by interpolations or directives are not subject to stripping: + +* `${"hello" ~}${" world"}` produces `"hello world"`, and not `"helloworld"`, + because the space is not in a template literal directly adjacent to the + strip marker. + +### Template Interpolations + +An _interpolation sequence_ evaluates an expression (written in the +expression sub-language), converts the result to a string value, and +replaces itself with the resulting string. + +```ebnf +TemplateInterpolation = ("${" | "${~") Expression ("}" | "~}"; +``` + +If the expression result cannot be converted to a string, an error is +produced. + +### Template If Directive + +The template `if` directive is the template equivalent of the +_conditional expression_, allowing selection of one of two sub-templates based +on the value of a predicate expression. + +```ebnf +TemplateIf = ( + ("%{" | "%{~") "if" Expression ("}" | "~}") + Template + ( + ("%{" | "%{~") "else" ("}" | "~}") + Template + )? + ("%{" | "%{~") "endif" ("}" | "~}") +); +``` + +The evaluation of the `if` directive is equivalent to the conditional +expression, with the following exceptions: + +* The two sub-templates always produce strings, and thus the result value is + also always a string. +* The `else` clause may be omitted, in which case the conditional's third + expression result is implied to be the empty string. + +### Template For Directive + +The template `for` directive is the template equivalent of the _for expression_, +producing zero or more copies of its sub-template based on the elements of +a collection. + +```ebnf +TemplateFor = ( + ("%{" | "%{~") "for" Identifier ("," Identifier) "in" Expression ("}" | "~}") + Template + ("%{" | "%{~") "endfor" ("}" | "~}") +); +``` + +The evaluation of the `for` directive is equivalent to the _for expression_ +when producing a tuple, with the following exceptions: + +* The sub-template always produces a string. +* There is no equivalent of the "if" clause on the for expression. +* The elements of the resulting tuple are all converted to strings and + concatenated to produce a flat string result. + +### Template Interpolation Unwrapping + +As a special case, a template that consists only of a single interpolation, +with no surrounding literals, directives or other interpolations, is +"unwrapped". In this case, the result of the interpolation expression is +returned verbatim, without conversion to string. + +This special case exists primarily to enable the native template language +to be used inside strings in alternative HCL syntaxes that lack a first-class +template or expression syntax. Unwrapping allows arbitrary expressions to be +used to populate attributes when strings in such languages are interpreted +as templates. + +* `${true}` produces the boolean value `true` +* `${"${true}"}` produces the boolean value `true`, because both the inner + and outer interpolations are subject to unwrapping. +* `hello ${true}` produces the string `"hello true"` +* `${""}${true}` produces the string `"true"` because there are two + interpolation sequences, even though one produces an empty result. +* `%{ for v in [true] }${v}%{ endif }` produces the string `true` because + the presence of the `for` directive circumvents the unwrapping even though + the final result is a single value. + +In some contexts this unwrapping behavior may be circumvented by the calling +application, by converting the final template result to string. This is +necessary, for example, if a standalone template is being used to produce +the direct contents of a file, since the result in that case must always be a +string. diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go new file mode 100644 index 00000000000..eb686d5de78 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go @@ -0,0 +1,379 @@ +package hclsyntax + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl2/hcl" +) + +// AsHCLBlock returns the block data expressed as a *hcl.Block. +func (b *Block) AsHCLBlock() *hcl.Block { + lastHeaderRange := b.TypeRange + if len(b.LabelRanges) > 0 { + lastHeaderRange = b.LabelRanges[len(b.LabelRanges)-1] + } + + return &hcl.Block{ + Type: b.Type, + Labels: b.Labels, + Body: b.Body, + + DefRange: hcl.RangeBetween(b.TypeRange, lastHeaderRange), + TypeRange: b.TypeRange, + LabelRanges: b.LabelRanges, + } +} + +// Body is the implementation of hcl.Body for the zcl native syntax. +type Body struct { + Attributes Attributes + Blocks Blocks + + // These are used with PartialContent to produce a "remaining items" + // body to return. They are nil on all bodies fresh out of the parser. + hiddenAttrs map[string]struct{} + hiddenBlocks map[string]struct{} + + SrcRange hcl.Range + EndRange hcl.Range // Final token of the body, for reporting missing items +} + +// Assert that *Body implements hcl.Body +var assertBodyImplBody hcl.Body = &Body{} + +func (b *Body) walkChildNodes(w internalWalkFunc) { + b.Attributes = w(b.Attributes).(Attributes) + b.Blocks = w(b.Blocks).(Blocks) +} + +func (b *Body) Range() hcl.Range { + return b.SrcRange +} + +func (b *Body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + content, remainHCL, diags := b.PartialContent(schema) + + // No we'll see if anything actually remains, to produce errors about + // extraneous items. + remain := remainHCL.(*Body) + + for name, attr := range b.Attributes { + if _, hidden := remain.hiddenAttrs[name]; !hidden { + var suggestions []string + for _, attrS := range schema.Attributes { + if _, defined := content.Attributes[attrS.Name]; defined { + continue + } + suggestions = append(suggestions, attrS.Name) + } + suggestion := nameSuggestion(name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } else { + // Is there a block of the same name? + for _, blockS := range schema.Blocks { + if blockS.Type == name { + suggestion = fmt.Sprintf(" Did you mean to define a block of type %q?", name) + break + } + } + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported attribute", + Detail: fmt.Sprintf("An attribute named %q is not expected here.%s", name, suggestion), + Subject: &attr.NameRange, + }) + } + } + + for _, block := range b.Blocks { + blockTy := block.Type + if _, hidden := remain.hiddenBlocks[blockTy]; !hidden { + var suggestions []string + for _, blockS := range schema.Blocks { + suggestions = append(suggestions, blockS.Type) + } + suggestion := nameSuggestion(blockTy, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } else { + // Is there an attribute of the same name? + for _, attrS := range schema.Attributes { + if attrS.Name == blockTy { + suggestion = fmt.Sprintf(" Did you mean to define attribute %q?", blockTy) + break + } + } + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported block type", + Detail: fmt.Sprintf("Blocks of type %q are not expected here.%s", blockTy, suggestion), + Subject: &block.TypeRange, + }) + } + } + + return content, diags +} + +func (b *Body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + attrs := make(hcl.Attributes) + var blocks hcl.Blocks + var diags hcl.Diagnostics + hiddenAttrs := make(map[string]struct{}) + hiddenBlocks := make(map[string]struct{}) + + if b.hiddenAttrs != nil { + for k, v := range b.hiddenAttrs { + hiddenAttrs[k] = v + } + } + if b.hiddenBlocks != nil { + for k, v := range b.hiddenBlocks { + hiddenBlocks[k] = v + } + } + + for _, attrS := range schema.Attributes { + name := attrS.Name + attr, exists := b.Attributes[name] + _, hidden := hiddenAttrs[name] + if hidden || !exists { + if attrS.Required { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing required attribute", + Detail: fmt.Sprintf("The attribute %q is required, but no definition was found.", attrS.Name), + Subject: b.MissingItemRange().Ptr(), + }) + } + continue + } + + hiddenAttrs[name] = struct{}{} + attrs[name] = attr.AsHCLAttribute() + } + + blocksWanted := make(map[string]hcl.BlockHeaderSchema) + for _, blockS := range schema.Blocks { + blocksWanted[blockS.Type] = blockS + } + + for _, block := range b.Blocks { + if _, hidden := hiddenBlocks[block.Type]; hidden { + continue + } + blockS, wanted := blocksWanted[block.Type] + if !wanted { + continue + } + + if len(block.Labels) > len(blockS.LabelNames) { + name := block.Type + if len(blockS.LabelNames) == 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Extraneous label for %s", name), + Detail: fmt.Sprintf( + "No labels are expected for %s blocks.", name, + ), + Subject: block.LabelRanges[0].Ptr(), + Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Extraneous label for %s", name), + Detail: fmt.Sprintf( + "Only %d labels (%s) are expected for %s blocks.", + len(blockS.LabelNames), strings.Join(blockS.LabelNames, ", "), name, + ), + Subject: block.LabelRanges[len(blockS.LabelNames)].Ptr(), + Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(), + }) + } + continue + } + + if len(block.Labels) < len(blockS.LabelNames) { + name := block.Type + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Missing %s for %s", blockS.LabelNames[len(block.Labels)], name), + Detail: fmt.Sprintf( + "All %s blocks must have %d labels (%s).", + name, len(blockS.LabelNames), strings.Join(blockS.LabelNames, ", "), + ), + Subject: &block.OpenBraceRange, + Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(), + }) + continue + } + + blocks = append(blocks, block.AsHCLBlock()) + } + + // We hide blocks only after we've processed all of them, since otherwise + // we can't process more than one of the same type. + for _, blockS := range schema.Blocks { + hiddenBlocks[blockS.Type] = struct{}{} + } + + remain := &Body{ + Attributes: b.Attributes, + Blocks: b.Blocks, + + hiddenAttrs: hiddenAttrs, + hiddenBlocks: hiddenBlocks, + + SrcRange: b.SrcRange, + EndRange: b.EndRange, + } + + return &hcl.BodyContent{ + Attributes: attrs, + Blocks: blocks, + + MissingItemRange: b.MissingItemRange(), + }, remain, diags +} + +func (b *Body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + attrs := make(hcl.Attributes) + var diags hcl.Diagnostics + + if len(b.Blocks) > 0 { + example := b.Blocks[0] + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unexpected %s block", example.Type), + Detail: "Blocks are not allowed here.", + Context: &example.TypeRange, + }) + // we will continue processing anyway, and return the attributes + // we are able to find so that certain analyses can still be done + // in the face of errors. + } + + if b.Attributes == nil { + return attrs, diags + } + + for name, attr := range b.Attributes { + if _, hidden := b.hiddenAttrs[name]; hidden { + continue + } + attrs[name] = attr.AsHCLAttribute() + } + + return attrs, diags +} + +func (b *Body) MissingItemRange() hcl.Range { + return b.EndRange +} + +// Attributes is the collection of attribute definitions within a body. +type Attributes map[string]*Attribute + +func (a Attributes) walkChildNodes(w internalWalkFunc) { + for k, attr := range a { + a[k] = w(attr).(*Attribute) + } +} + +// Range returns the range of some arbitrary point within the set of +// attributes, or an invalid range if there are no attributes. +// +// This is provided only to complete the Node interface, but has no practical +// use. +func (a Attributes) Range() hcl.Range { + // An attributes doesn't really have a useful range to report, since + // it's just a grouping construct. So we'll arbitrarily take the + // range of one of the attributes, or produce an invalid range if we have + // none. In practice, there's little reason to ask for the range of + // an Attributes. + for _, attr := range a { + return attr.Range() + } + return hcl.Range{ + Filename: "", + } +} + +// Attribute represents a single attribute definition within a body. +type Attribute struct { + Name string + Expr Expression + + SrcRange hcl.Range + NameRange hcl.Range + EqualsRange hcl.Range +} + +func (a *Attribute) walkChildNodes(w internalWalkFunc) { + a.Expr = w(a.Expr).(Expression) +} + +func (a *Attribute) Range() hcl.Range { + return a.SrcRange +} + +// AsHCLAttribute returns the block data expressed as a *hcl.Attribute. +func (a *Attribute) AsHCLAttribute() *hcl.Attribute { + return &hcl.Attribute{ + Name: a.Name, + Expr: a.Expr, + + Range: a.SrcRange, + NameRange: a.NameRange, + } +} + +// Blocks is the list of nested blocks within a body. +type Blocks []*Block + +func (bs Blocks) walkChildNodes(w internalWalkFunc) { + for i, block := range bs { + bs[i] = w(block).(*Block) + } +} + +// Range returns the range of some arbitrary point within the list of +// blocks, or an invalid range if there are no blocks. +// +// This is provided only to complete the Node interface, but has no practical +// use. +func (bs Blocks) Range() hcl.Range { + if len(bs) > 0 { + return bs[0].Range() + } + return hcl.Range{ + Filename: "", + } +} + +// Block represents a nested block structure +type Block struct { + Type string + Labels []string + Body *Body + + TypeRange hcl.Range + LabelRanges []hcl.Range + OpenBraceRange hcl.Range + CloseBraceRange hcl.Range +} + +func (b *Block) walkChildNodes(w internalWalkFunc) { + b.Body = w(b.Body).(*Body) +} + +func (b *Block) Range() hcl.Range { + return hcl.RangeBetween(b.TypeRange, b.CloseBraceRange) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go new file mode 100644 index 00000000000..00d6d720a2b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go @@ -0,0 +1,271 @@ +package hclsyntax + +import ( + "fmt" + + "github.com/apparentlymart/go-textseg/textseg" + "github.com/hashicorp/hcl2/hcl" +) + +// Token represents a sequence of bytes from some HCL code that has been +// tagged with a type and its range within the source file. +type Token struct { + Type TokenType + Bytes []byte + Range hcl.Range +} + +// Tokens is a slice of Token. +type Tokens []Token + +// TokenType is an enumeration used for the Type field on Token. +type TokenType rune + +const ( + // Single-character tokens are represented by their own character, for + // convenience in producing these within the scanner. However, the values + // are otherwise arbitrary and just intended to be mnemonic for humans + // who might see them in debug output. + + TokenOBrace TokenType = '{' + TokenCBrace TokenType = '}' + TokenOBrack TokenType = '[' + TokenCBrack TokenType = ']' + TokenOParen TokenType = '(' + TokenCParen TokenType = ')' + TokenOQuote TokenType = '«' + TokenCQuote TokenType = '»' + TokenOHeredoc TokenType = 'H' + TokenCHeredoc TokenType = 'h' + + TokenStar TokenType = '*' + TokenSlash TokenType = '/' + TokenPlus TokenType = '+' + TokenMinus TokenType = '-' + TokenPercent TokenType = '%' + + TokenEqual TokenType = '=' + TokenEqualOp TokenType = '≔' + TokenNotEqual TokenType = '≠' + TokenLessThan TokenType = '<' + TokenLessThanEq TokenType = '≤' + TokenGreaterThan TokenType = '>' + TokenGreaterThanEq TokenType = '≥' + + TokenAnd TokenType = '∧' + TokenOr TokenType = '∨' + TokenBang TokenType = '!' + + TokenDot TokenType = '.' + TokenComma TokenType = ',' + + TokenEllipsis TokenType = '…' + TokenFatArrow TokenType = '⇒' + + TokenQuestion TokenType = '?' + TokenColon TokenType = ':' + + TokenTemplateInterp TokenType = '∫' + TokenTemplateControl TokenType = 'λ' + TokenTemplateSeqEnd TokenType = '∎' + + TokenQuotedLit TokenType = 'Q' // might contain backslash escapes + TokenStringLit TokenType = 'S' // cannot contain backslash escapes + TokenNumberLit TokenType = 'N' + TokenIdent TokenType = 'I' + + TokenComment TokenType = 'C' + + TokenNewline TokenType = '\n' + TokenEOF TokenType = '␄' + + // The rest are not used in the language but recognized by the scanner so + // we can generate good diagnostics in the parser when users try to write + // things that might work in other languages they are familiar with, or + // simply make incorrect assumptions about the HCL language. + + TokenBitwiseAnd TokenType = '&' + TokenBitwiseOr TokenType = '|' + TokenBitwiseNot TokenType = '~' + TokenBitwiseXor TokenType = '^' + TokenStarStar TokenType = '➚' + TokenBacktick TokenType = '`' + TokenSemicolon TokenType = ';' + TokenTabs TokenType = '␉' + TokenInvalid TokenType = '�' + TokenBadUTF8 TokenType = '💩' + + // TokenNil is a placeholder for when a token is required but none is + // available, e.g. when reporting errors. The scanner will never produce + // this as part of a token stream. + TokenNil TokenType = '\x00' +) + +func (t TokenType) GoString() string { + return fmt.Sprintf("hclsyntax.%s", t.String()) +} + +type scanMode int + +const ( + scanNormal scanMode = iota + scanTemplate +) + +type tokenAccum struct { + Filename string + Bytes []byte + Pos hcl.Pos + Tokens []Token +} + +func (f *tokenAccum) emitToken(ty TokenType, startOfs, endOfs int) { + // Walk through our buffer to figure out how much we need to adjust + // the start pos to get our end pos. + + start := f.Pos + start.Column += startOfs - f.Pos.Byte // Safe because only ASCII spaces can be in the offset + start.Byte = startOfs + + end := start + end.Byte = endOfs + b := f.Bytes[startOfs:endOfs] + for len(b) > 0 { + advance, seq, _ := textseg.ScanGraphemeClusters(b, true) + if len(seq) == 1 && seq[0] == '\n' { + end.Line++ + end.Column = 1 + } else { + end.Column++ + } + b = b[advance:] + } + + f.Pos = end + + f.Tokens = append(f.Tokens, Token{ + Type: ty, + Bytes: f.Bytes[startOfs:endOfs], + Range: hcl.Range{ + Filename: f.Filename, + Start: start, + End: end, + }, + }) +} + +type heredocInProgress struct { + Marker []byte + StartOfLine bool +} + +// checkInvalidTokens does a simple pass across the given tokens and generates +// diagnostics for tokens that should _never_ appear in HCL source. This +// is intended to avoid the need for the parser to have special support +// for them all over. +// +// Returns a diagnostics with no errors if everything seems acceptable. +// Otherwise, returns zero or more error diagnostics, though tries to limit +// repetition of the same information. +func checkInvalidTokens(tokens Tokens) hcl.Diagnostics { + var diags hcl.Diagnostics + + toldBitwise := 0 + toldExponent := 0 + toldBacktick := 0 + toldSemicolon := 0 + toldTabs := 0 + toldBadUTF8 := 0 + + for _, tok := range tokens { + switch tok.Type { + case TokenBitwiseAnd, TokenBitwiseOr, TokenBitwiseXor, TokenBitwiseNot: + if toldBitwise < 4 { + var suggestion string + switch tok.Type { + case TokenBitwiseAnd: + suggestion = " Did you mean boolean AND (\"&&\")?" + case TokenBitwiseOr: + suggestion = " Did you mean boolean OR (\"&&\")?" + case TokenBitwiseNot: + suggestion = " Did you mean boolean NOT (\"!\")?" + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported operator", + Detail: fmt.Sprintf("Bitwise operators are not supported.%s", suggestion), + Subject: &tok.Range, + }) + toldBitwise++ + } + case TokenStarStar: + if toldExponent < 1 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported operator", + Detail: "\"**\" is not a supported operator. Exponentiation is not supported as an operator.", + Subject: &tok.Range, + }) + + toldExponent++ + } + case TokenBacktick: + // Only report for alternating (even) backticks, so we won't report both start and ends of the same + // backtick-quoted string. + if toldExponent < 4 && (toldExponent%2) == 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid character", + Detail: "The \"`\" character is not valid. To create a multi-line string, use the \"heredoc\" syntax, like \"< +# +# This script uses the unicode spec to generate a Ragel state machine +# that recognizes unicode alphanumeric characters. It generates 5 +# character classes: uupper, ulower, ualpha, udigit, and ualnum. +# Currently supported encodings are UTF-8 [default] and UCS-4. +# +# Usage: unicode2ragel.rb [options] +# -e, --encoding [ucs4 | utf8] Data encoding +# -h, --help Show this message +# +# This script was originally written as part of the Ferret search +# engine library. +# +# Author: Rakan El-Khalil + +require 'optparse' +require 'open-uri' + +ENCODINGS = [ :utf8, :ucs4 ] +ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" } +DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt" +DEFAULT_MACHINE_NAME= "WChar" + +### +# Display vars & default option + +TOTAL_WIDTH = 80 +RANGE_WIDTH = 23 +@encoding = :utf8 +@chart_url = DEFAULT_CHART_URL +machine_name = DEFAULT_MACHINE_NAME +properties = [] +@output = $stdout + +### +# Option parsing + +cli_opts = OptionParser.new do |opts| + opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o| + @encoding = o.downcase.to_sym + end + opts.on("-h", "--help", "Show this message") do + puts opts + exit + end + opts.on("-u", "--url URL", "URL to process") do |o| + @chart_url = o + end + opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o| + machine_name = o + end + opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o| + properties = o + end + opts.on("-o", "--output FILE", "output file") do |o| + @output = File.new(o, "w+") + end +end + +cli_opts.parse(ARGV) +unless ENCODINGS.member? @encoding + puts "Invalid encoding: #{@encoding}" + puts cli_opts + exit +end + +## +# Downloads the document at url and yields every alpha line's hex +# range and description. + +def each_alpha( url, property ) + open( url ) do |file| + file.each_line do |line| + next if line =~ /^#/; + next if line !~ /; #{property} #/; + + range, description = line.split(/;/) + range.strip! + description.gsub!(/.*#/, '').strip! + + if range =~ /\.\./ + start, stop = range.split '..' + else start = stop = range + end + + yield start.hex .. stop.hex, description + end + end +end + +### +# Formats to hex at minimum width + +def to_hex( n ) + r = "%0X" % n + r = "0#{r}" unless (r.length % 2).zero? + r +end + +### +# UCS4 is just a straight hex conversion of the unicode codepoint. + +def to_ucs4( range ) + rangestr = "0x" + to_hex(range.begin) + rangestr << "..0x" + to_hex(range.end) if range.begin != range.end + [ rangestr ] +end + +## +# 0x00 - 0x7f -> 0zzzzzzz[7] +# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6] +# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6] +# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6] + +UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff] + +def to_utf8_enc( n ) + r = 0 + if n <= 0x7f + r = n + elsif n <= 0x7ff + y = 0xc0 | (n >> 6) + z = 0x80 | (n & 0x3f) + r = y << 8 | z + elsif n <= 0xffff + x = 0xe0 | (n >> 12) + y = 0x80 | (n >> 6) & 0x3f + z = 0x80 | n & 0x3f + r = x << 16 | y << 8 | z + elsif n <= 0x10ffff + w = 0xf0 | (n >> 18) + x = 0x80 | (n >> 12) & 0x3f + y = 0x80 | (n >> 6) & 0x3f + z = 0x80 | n & 0x3f + r = w << 24 | x << 16 | y << 8 | z + end + + to_hex(r) +end + +def from_utf8_enc( n ) + n = n.hex + r = 0 + if n <= 0x7f + r = n + elsif n <= 0xdfff + y = (n >> 8) & 0x1f + z = n & 0x3f + r = y << 6 | z + elsif n <= 0xefffff + x = (n >> 16) & 0x0f + y = (n >> 8) & 0x3f + z = n & 0x3f + r = x << 10 | y << 6 | z + elsif n <= 0xf7ffffff + w = (n >> 24) & 0x07 + x = (n >> 16) & 0x3f + y = (n >> 8) & 0x3f + z = n & 0x3f + r = w << 18 | x << 12 | y << 6 | z + end + r +end + +### +# Given a range, splits it up into ranges that can be continuously +# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff] +# This is not strictly needed since the current [5.1] unicode standard +# doesn't have ranges that straddle utf8 boundaries. This is included +# for completeness as there is no telling if that will ever change. + +def utf8_ranges( range ) + ranges = [] + UTF8_BOUNDARIES.each do |max| + if range.begin <= max + if range.end <= max + ranges << range + return ranges + end + + ranges << (range.begin .. max) + range = (max + 1) .. range.end + end + end + ranges +end + +def build_range( start, stop ) + size = start.size/2 + left = size - 1 + return [""] if size < 1 + + a = start[0..1] + b = stop[0..1] + + ### + # Shared prefix + + if a == b + return build_range(start[2..-1], stop[2..-1]).map do |elt| + "0x#{a} " + elt + end + end + + ### + # Unshared prefix, end of run + + return ["0x#{a}..0x#{b} "] if left.zero? + + ### + # Unshared prefix, not end of run + # Range can be 0x123456..0x56789A + # Which is equivalent to: + # 0x123456 .. 0x12FFFF + # 0x130000 .. 0x55FFFF + # 0x560000 .. 0x56789A + + ret = [] + ret << build_range(start, a + "FF" * left) + + ### + # Only generate middle range if need be. + + if a.hex+1 != b.hex + max = to_hex(b.hex - 1) + max = "FF" if b == "FF" + ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left + end + + ### + # Don't generate last range if it is covered by first range + + ret << build_range(b + "00" * left, stop) unless b == "FF" + ret.flatten! +end + +def to_utf8( range ) + utf8_ranges( range ).map do |r| + begin_enc = to_utf8_enc(r.begin) + end_enc = to_utf8_enc(r.end) + build_range begin_enc, end_enc + end.flatten! +end + +## +# Perform a 3-way comparison of the number of codepoints advertised by +# the unicode spec for the given range, the originally parsed range, +# and the resulting utf8 encoded range. + +def count_codepoints( code ) + code.split(' ').inject(1) do |acc, elt| + if elt =~ /0x(.+)\.\.0x(.+)/ + if @encoding == :utf8 + acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1) + else + acc * ($2.hex - $1.hex + 1) + end + else + acc + end + end +end + +def is_valid?( range, desc, codes ) + spec_count = 1 + spec_count = $1.to_i if desc =~ /\[(\d+)\]/ + range_count = range.end - range.begin + 1 + + sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) } + sum == spec_count and sum == range_count +end + +## +# Generate the state maching to stdout + +def generate_machine( name, property ) + pipe = " " + @output.puts " #{name} = " + each_alpha( @chart_url, property ) do |range, desc| + + codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range) + + #raise "Invalid encoding of range #{range}: #{codes.inspect}" unless + # is_valid? range, desc, codes + + range_width = codes.map { |a| a.size }.max + range_width = RANGE_WIDTH if range_width < RANGE_WIDTH + + desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11 + desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH + + if desc.size > desc_width + desc = desc[0..desc_width - 4] + "..." + end + + codes.each_with_index do |r, idx| + desc = "" unless idx.zero? + code = "%-#{range_width}s" % r + @output.puts " #{pipe} #{code} ##{desc}" + pipe = "|" + end + end + @output.puts " ;" + @output.puts "" +end + +@output.puts <= 'a' || b <= 'z' || b >= 'A' || b <= 'Z': + return true + default: + return false + } +} + +func scanKeyword(buf []byte, start pos) ([]byte, []byte, pos) { + var i int + p := start +Byte: + for i = 0; i < len(buf); i++ { + b := buf[i] + switch { + case (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_': + p.Pos.Byte++ + p.Pos.Column++ + default: + break Byte + } + } + return buf[:i], buf[i:], p +} + +func scanString(buf []byte, start pos) ([]byte, []byte, pos) { + // The scanner doesn't validate correct use of escapes, etc. It pays + // attention to escapes only for the purpose of identifying the closing + // quote character. It's the parser's responsibility to do proper + // validation. + // + // The scanner also doesn't specifically detect unterminated string + // literals, though they can be identified in the parser by checking if + // the final byte in a string token is the double-quote character. + + // Skip the opening quote symbol + i := 1 + p := start + p.Pos.Byte++ + p.Pos.Column++ + escaping := false +Byte: + for i < len(buf) { + b := buf[i] + + switch { + case b == '\\': + escaping = !escaping + p.Pos.Byte++ + p.Pos.Column++ + i++ + case b == '"': + p.Pos.Byte++ + p.Pos.Column++ + i++ + if !escaping { + break Byte + } + escaping = false + case b < 32: + break Byte + default: + // Advance by one grapheme cluster, so that we consider each + // grapheme to be a "column". + // Ignoring error because this scanner cannot produce errors. + advance, _, _ := textseg.ScanGraphemeClusters(buf[i:], true) + + p.Pos.Byte += advance + p.Pos.Column++ + i += advance + + escaping = false + } + } + return buf[:i], buf[i:], p +} + +func skipWhitespace(buf []byte, start pos) ([]byte, pos) { + var i int + p := start +Byte: + for i = 0; i < len(buf); i++ { + switch buf[i] { + case ' ': + p.Pos.Byte++ + p.Pos.Column++ + case '\n': + p.Pos.Byte++ + p.Pos.Column = 1 + p.Pos.Line++ + case '\r': + // For the purpose of line/column counting we consider a + // carriage return to take up no space, assuming that it will + // be paired up with a newline (on Windows, for example) that + // will account for both of them. + p.Pos.Byte++ + case '\t': + // We arbitrarily count a tab as if it were two spaces, because + // we need to choose _some_ number here. This means any system + // that renders code on-screen with markers must itself treat + // tabs as a pair of spaces for rendering purposes, or instead + // use the byte offset and back into its own column position. + p.Pos.Byte++ + p.Pos.Column += 2 + default: + break Byte + } + } + return buf[i:], p +} + +type pos struct { + Filename string + Pos hcl.Pos +} + +func (p *pos) Range(byteLen, charLen int) hcl.Range { + start := p.Pos + end := p.Pos + end.Byte += byteLen + end.Column += charLen + return hcl.Range{ + Filename: p.Filename, + Start: start, + End: end, + } +} + +func posRange(start, end pos) hcl.Range { + return hcl.Range{ + Filename: start.Filename, + Start: start.Pos, + End: end.Pos, + } +} + +func (t token) GoString() string { + return fmt.Sprintf("json.token{json.%s, []byte(%q), %#v}", t.Type, t.Bytes, t.Range) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/spec.md b/vendor/github.com/hashicorp/hcl2/hcl/json/spec.md new file mode 100644 index 00000000000..d6e8bf696f1 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/spec.md @@ -0,0 +1,265 @@ +# HCL JSON Syntax Specification + +This is the specification for the JSON serialization for hcl. HCL is a system +for defining configuration languages for applications. The HCL information +model is designed to support multiple concrete syntaxes for configuration, +and this JSON-based format complements [the native syntax](../zclsyntax/spec.md) +by being easy to machine-generate, whereas the native syntax is oriented +towards human authoring and maintenence. + +This syntax is defined in terms of JSON as defined in +[RFC7159](https://tools.ietf.org/html/rfc7159). As such it inherits the JSON +grammar as-is, and merely defines a specific methodology for interpreting +JSON constructs into HCL structural elements and expressions. + +This mapping is defined such that valid JSON-serialized HCL input can be +produced using standard JSON implementations in various programming languages. +_Parsing_ such JSON has some additional constraints not beyond what is normally +supported by JSON parsers, though adaptations are defined to allow processing +with an off-the-shelf JSON parser with certain caveats, described in later +sections. + +## Structural Elements + +The HCL language-agnostic information model defines a _body_ as an abstract +container for attribute definitions and child blocks. A body is represented +in JSON as a JSON _object_. + +As defined in the language-agnostic model, body processing is done in terms +of a schema which provides context for interpreting the body's content. For +JSON bodies, the schema is crucial to allow differentiation of attribute +definitions and block definitions, both of which are represented via object +properties. + +The special property name `"//"`, when used in an object representing a HCL +body, is parsed and ignored. A property with this name can be used to +include human-readable comments. (This special property name is _not_ +processed in this way for any _other_ HCL constructs that are represented as +JSON objects.) + +### Attributes + +Where the given schema describes an attribute with a given name, the object +property with the matching name — if present — serves as the attribute's +definition. + +When a body is being processed in the _dynamic attributes_ mode, each object +property serves as an attribute definition for the attribute whose name +matches the property name. + +The value of an attribute definition property is interpreted as an _expression_, +as described in a later section. + +Given a schema that calls for an attribute named "foo", a JSON object like +the following provides a definition for that attribute: + +```json +{ + "foo": "bar baz" +} +``` + +### Blocks + +Where the given schema describes a block with a given type name, the object +property with the matching name — if present — serves as a definition of +zero or more blocks of that type. + +Processing of child blocks is in terms of nested JSON objects and arrays. +If the schema defines one or more _labels_ for the block type, a nested +object is required for each labelling level, with the object keys serving as +the label values at that level. + +After any labelling levels, the next nested value is either a JSON object +representing a single block body, or a JSON array of JSON objects that each +represent a single block body. Use of an array accommodates the definition +of multiple blocks that have identical type and labels. + +Given a schema that calls for a block type named "foo" with no labels, the +following JSON objects are all valid definitions of zero or more blocks of this +type: + +```json +{ + "foo": { + "child_attr": "baz" + } +} +``` + +```json +{ + "foo": [ + { + "child_attr": "baz" + }, + { + "child_attr": "boz" + } + ] +} +``` +```json +{ + "foo": [] +} +``` + +The first of these defines a single child block of type "foo". The second +defines _two_ such blocks. The final example shows a degenerate definition +of zero blocks, though generators should prefer to omit the property entirely +in this scenario. + +Given a schema that calls for a block type named "foo" with _two_ labels, the +extra label levels must be represented as objects as in the following examples: + +```json +{ + "foo": { + "bar": { + "baz": { + "child_attr": "baz" + }, + "boz": { + "child_attr": "baz" + } + }, + "boz": { + "baz": { + "child_attr": "baz" + }, + } + } +} +``` +```json +{ + "foo": { + "bar": { + "baz": { + "child_attr": "baz" + }, + "boz": { + "child_attr": "baz" + } + }, + "boz": { + "baz": [ + { + "child_attr": "baz" + }, + { + "child_attr": "boz" + } + ] + } + } +} +``` + +Where multiple definitions are included for the same type and labels, the +JSON array is always the value of the property representing the final label, +and contains objects representing block bodies. It is not valid to use an array +at any other point in the block definition structure. + +## Expressions + +JSON lacks a native expression syntax, so the HCL JSON syntax instead defines +a mapping for each of the JSON value types, including a special mapping for +strings that allows optional use of arbitrary expressions. + +### Objects + +When interpreted as an expression, a JSON object represents a value of a HCL +object type. + +Each property of the JSON object represents an attribute of the HCL object type. +The object type is constructed by enumerating the JSON object properties, +creating for each an attribute whose name exactly matches the property name, +and whose type is the result of recursively applying the expression mapping +rules. + +An instance of the constructed object type is then created, whose values +are interpreted by again recursively applying the mapping rules defined in +this section. + +It is an error to define the same property name multiple times within a single +JSON object interpreted as an expression. + +### Arrays + +When interpreted as an expression, a JSON array represents a value of a HCL +tuple type. + +Each element of the JSON array represents an element of the HCL tuple type. +The tuple type is constructed by enumerationg the JSON array elements, creating +for each an element whose type is the result of recursively applying the +expression mapping rules. Correspondance is preserved between the array element +indices and the tuple element indices. + +An instance of the constructed tuple type is then created, whose values are +interpreted by again recursively applying the mapping rules defined in this +section. + +### Numbers + +When interpreted as an expression, a JSON number represents a HCL number value. + +HCL numbers are arbitrary-precision decimal values, so an ideal implementation +of this specification will translate exactly the value given to a number of +corresponding precision. + +In practice, off-the-shelf JSON parsers often do not support customizing the +processing of numbers, and instead force processing as 32-bit or 64-bit +floating point values with a potential loss of precision. It is permissable +for a HCL JSON parser to pass on such limitations _if and only if_ the +available precision and other constraints are defined in its documentation. +Calling applications each have differing precision requirements, so calling +applications are free to select an implementation with more limited precision +capabilities should high precision not be required for that application. + +### Boolean Values + +The JSON boolean values `true` and `false`, when interpreted as expressions, +represent the corresponding HCL boolean values. + +### The Null Value + +The JSON value `null`, when interpreted as an expression, represents a +HCL null value of the dynamic pseudo-type. + +### Strings + +When intepreted as an expression, a JSON string may be interpreted in one of +two ways depending on the evaluation mode. + +If evaluating in literal-only mode (as defined by the syntax-agnostic +information model) the literal string is intepreted directly as a HCL string +value, by directly using the exact sequence of unicode characters represented. +Template interpolations and directives MUST NOT be processed in this mode, +allowing any characters that appear as introduction sequences to pass through +literally: + +```json +"Hello world! Template sequences like ${ are not intepreted here." +``` + +When evaluating in full expression mode (again, as defined by the syntax- +agnostic information model) the literal string is instead interpreted as a +_standalone template_ in the HCL Native Syntax. The expression evaluation +result is then the direct result of evaluating that template with the current +variable scope and function table. + +```json +"Hello, ${name}! Template sequences are interpreted in full expression mode." +``` + +In particular the _Template Interpolation Unwrapping_ requirement from the +HCL native syntax specification must be implemented, allowing the use of +single-interpolation templates to represent expressions that would not +otherwise be representable in JSON, such as the following example where +the result must be a number, rather than a string representation of a number: + +```json +"${ a + b }" +``` diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go b/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go new file mode 100644 index 00000000000..d13607eed0e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go @@ -0,0 +1,355 @@ +package json + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// body is the implementation of "Body" used for files processed with the JSON +// parser. +type body struct { + obj *objectVal + + // If non-nil, the keys of this map cause the corresponding attributes to + // be treated as non-existing. This is used when Body.PartialContent is + // called, to produce the "remaining content" Body. + hiddenAttrs map[string]struct{} + + // If set, string values are turned into expressions using HIL's template + // language, rather than the native zcl language. This is intended to + // allow applications moving from HCL to zcl to continue to parse the + // JSON variant of their config that HCL handled previously. + useHIL bool +} + +// expression is the implementation of "Expression" used for files processed +// with the JSON parser. +type expression struct { + src node +} + +func (b *body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + content, newBody, diags := b.PartialContent(schema) + + hiddenAttrs := newBody.(*body).hiddenAttrs + + var nameSuggestions []string + for _, attrS := range schema.Attributes { + if _, ok := hiddenAttrs[attrS.Name]; !ok { + // Only suggest an attribute name if we didn't use it already. + nameSuggestions = append(nameSuggestions, attrS.Name) + } + } + for _, blockS := range schema.Blocks { + // Blocks can appear multiple times, so we'll suggest their type + // names regardless of whether they've already been used. + nameSuggestions = append(nameSuggestions, blockS.Type) + } + + for k, attr := range b.obj.Attrs { + if k == "//" { + // Ignore "//" keys in objects representing bodies, to allow + // their use as comments. + continue + } + + if _, ok := hiddenAttrs[k]; !ok { + var fixItHint string + suggestion := nameSuggestion(k, nameSuggestions) + if suggestion != "" { + fixItHint = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extraneous JSON object property", + Detail: fmt.Sprintf("No attribute or block type is named %q.%s", k, fixItHint), + Subject: &attr.NameRange, + Context: attr.Range().Ptr(), + }) + } + } + + return content, diags +} + +func (b *body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + + obj := b.obj + jsonAttrs := obj.Attrs + usedNames := map[string]struct{}{} + if b.hiddenAttrs != nil { + for k := range b.hiddenAttrs { + usedNames[k] = struct{}{} + } + } + var diags hcl.Diagnostics + + content := &hcl.BodyContent{ + Attributes: map[string]*hcl.Attribute{}, + Blocks: nil, + + MissingItemRange: b.MissingItemRange(), + } + + for _, attrS := range schema.Attributes { + jsonAttr, exists := jsonAttrs[attrS.Name] + _, used := usedNames[attrS.Name] + if used || !exists { + if attrS.Required { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing required attribute", + Detail: fmt.Sprintf("The attribute %q is required, so a JSON object property must be present with this name.", attrS.Name), + Subject: &obj.OpenRange, + }) + } + continue + } + content.Attributes[attrS.Name] = &hcl.Attribute{ + Name: attrS.Name, + Expr: &expression{src: jsonAttr.Value}, + Range: hcl.RangeBetween(jsonAttr.NameRange, jsonAttr.Value.Range()), + NameRange: jsonAttr.NameRange, + } + usedNames[attrS.Name] = struct{}{} + } + + for _, blockS := range schema.Blocks { + jsonAttr, exists := jsonAttrs[blockS.Type] + _, used := usedNames[blockS.Type] + if used || !exists { + usedNames[blockS.Type] = struct{}{} + continue + } + v := jsonAttr.Value + diags = append(diags, b.unpackBlock(v, blockS.Type, &jsonAttr.NameRange, blockS.LabelNames, nil, nil, &content.Blocks)...) + usedNames[blockS.Type] = struct{}{} + } + + unusedBody := &body{ + obj: b.obj, + hiddenAttrs: usedNames, + useHIL: b.useHIL, + } + + return content, unusedBody, diags +} + +// JustAttributes for JSON bodies interprets all properties of the wrapped +// JSON object as attributes and returns them. +func (b *body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + attrs := make(map[string]*hcl.Attribute) + for name, jsonAttr := range b.obj.Attrs { + if name == "//" { + // Ignore "//" keys in objects representing bodies, to allow + // their use as comments. + continue + } + + if _, hidden := b.hiddenAttrs[name]; hidden { + continue + } + attrs[name] = &hcl.Attribute{ + Name: name, + Expr: &expression{src: jsonAttr.Value}, + Range: hcl.RangeBetween(jsonAttr.NameRange, jsonAttr.Value.Range()), + NameRange: jsonAttr.NameRange, + } + } + + // No diagnostics possible here, since the parser already took care of + // finding duplicates and every JSON value can be a valid attribute value. + return attrs, nil +} + +func (b *body) MissingItemRange() hcl.Range { + return b.obj.CloseRange +} + +func (b *body) unpackBlock(v node, typeName string, typeRange *hcl.Range, labelsLeft []string, labelsUsed []string, labelRanges []hcl.Range, blocks *hcl.Blocks) (diags hcl.Diagnostics) { + if len(labelsLeft) > 0 { + labelName := labelsLeft[0] + ov, ok := v.(*objectVal) + if !ok { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: fmt.Sprintf("A JSON object is required, whose keys represent the %s block's %s.", typeName, labelName), + Subject: v.StartRange().Ptr(), + }) + return + } + if len(ov.Attrs) == 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing block label", + Detail: fmt.Sprintf("At least one object property is required, whose name represents the %s block's %s.", typeName, labelName), + Subject: v.StartRange().Ptr(), + }) + return + } + labelsUsed := append(labelsUsed, "") + labelRanges := append(labelRanges, hcl.Range{}) + for pk, p := range ov.Attrs { + labelsUsed[len(labelsUsed)-1] = pk + labelRanges[len(labelRanges)-1] = p.NameRange + diags = append(diags, b.unpackBlock(p.Value, typeName, typeRange, labelsLeft[1:], labelsUsed, labelRanges, blocks)...) + } + return + } + + // By the time we get here, we've peeled off all the labels and we're ready + // to deal with the block's actual content. + + // need to copy the label slices because their underlying arrays will + // continue to be mutated after we return. + labels := make([]string, len(labelsUsed)) + copy(labels, labelsUsed) + labelR := make([]hcl.Range, len(labelRanges)) + copy(labelR, labelRanges) + + switch tv := v.(type) { + case *objectVal: + // Single instance of the block + *blocks = append(*blocks, &hcl.Block{ + Type: typeName, + Labels: labels, + Body: &body{ + obj: tv, + useHIL: b.useHIL, + }, + + DefRange: tv.OpenRange, + TypeRange: *typeRange, + LabelRanges: labelR, + }) + case *arrayVal: + // Multiple instances of the block + for _, av := range tv.Values { + ov, ok := av.(*objectVal) + if !ok { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: fmt.Sprintf("A JSON object is required, representing the contents of a %q block.", typeName), + Subject: v.StartRange().Ptr(), + }) + continue + } + + *blocks = append(*blocks, &hcl.Block{ + Type: typeName, + Labels: labels, + Body: &body{ + obj: ov, + useHIL: b.useHIL, + }, + + DefRange: tv.OpenRange, + TypeRange: *typeRange, + LabelRanges: labelR, + }) + } + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: fmt.Sprintf("Either a JSON object or a JSON array is required, representing the contents of one or more %q blocks.", typeName), + Subject: v.StartRange().Ptr(), + }) + } + return +} + +func (e *expression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + switch v := e.src.(type) { + case *stringVal: + if ctx != nil { + // Parse string contents as a zcl native language expression. + // We only do this if we have a context, so passing a nil context + // is how the caller specifies that interpolations are not allowed + // and that the string should just be returned verbatim. + templateSrc := v.Value + expr, diags := hclsyntax.ParseTemplate( + []byte(templateSrc), + v.SrcRange.Filename, + + // This won't produce _exactly_ the right result, since + // the zclsyntax parser can't "see" any escapes we removed + // while parsing JSON, but it's better than nothing. + hcl.Pos{ + Line: v.SrcRange.Start.Line, + + // skip over the opening quote mark + Byte: v.SrcRange.Start.Byte + 1, + Column: v.SrcRange.Start.Column + 1, + }, + ) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + val, evalDiags := expr.Value(ctx) + diags = append(diags, evalDiags...) + return val, diags + } + + // FIXME: Once the native zcl template language parser is implemented, + // parse string values as templates and evaluate them. + return cty.StringVal(v.Value), nil + case *numberVal: + return cty.NumberVal(v.Value), nil + case *booleanVal: + return cty.BoolVal(v.Value), nil + case *arrayVal: + vals := []cty.Value{} + for _, jsonVal := range v.Values { + val, _ := (&expression{src: jsonVal}).Value(ctx) + vals = append(vals, val) + } + return cty.TupleVal(vals), nil + case *objectVal: + attrs := map[string]cty.Value{} + for name, jsonAttr := range v.Attrs { + val, _ := (&expression{src: jsonAttr.Value}).Value(ctx) + attrs[name] = val + } + return cty.ObjectVal(attrs), nil + default: + // Default to DynamicVal so that ASTs containing invalid nodes can + // still be partially-evaluated. + return cty.DynamicVal, nil + } +} + +func (e *expression) Variables() []hcl.Traversal { + var vars []hcl.Traversal + + switch v := e.src.(type) { + case *stringVal: + // FIXME: Once the native zcl template language parser is implemented, + // parse with that and look for variables in there too, + + case *arrayVal: + for _, jsonVal := range v.Values { + vars = append(vars, (&expression{src: jsonVal}).Variables()...) + } + case *objectVal: + for _, jsonAttr := range v.Attrs { + vars = append(vars, (&expression{src: jsonAttr.Value}).Variables()...) + } + } + + return vars +} + +func (e *expression) Range() hcl.Range { + return e.src.Range() +} + +func (e *expression) StartRange() hcl.Range { + return e.src.StartRange() +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/tokentype_string.go b/vendor/github.com/hashicorp/hcl2/hcl/json/tokentype_string.go new file mode 100644 index 00000000000..8773235fe22 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/tokentype_string.go @@ -0,0 +1,29 @@ +// Code generated by "stringer -type tokenType scanner.go"; DO NOT EDIT. + +package json + +import "fmt" + +const _tokenType_name = "tokenInvalidtokenCommatokenColontokenEqualstokenKeywordtokenNumbertokenStringtokenBrackOtokenBrackCtokenBraceOtokenBraceCtokenEOF" + +var _tokenType_map = map[tokenType]string{ + 0: _tokenType_name[0:12], + 44: _tokenType_name[12:22], + 58: _tokenType_name[22:32], + 61: _tokenType_name[32:43], + 75: _tokenType_name[43:55], + 78: _tokenType_name[55:66], + 83: _tokenType_name[66:77], + 91: _tokenType_name[77:88], + 93: _tokenType_name[88:99], + 123: _tokenType_name[99:110], + 125: _tokenType_name[110:121], + 9220: _tokenType_name[121:129], +} + +func (i tokenType) String() string { + if str, ok := _tokenType_map[i]; ok { + return str + } + return fmt.Sprintf("tokenType(%d)", i) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/merged.go b/vendor/github.com/hashicorp/hcl2/hcl/merged.go new file mode 100644 index 00000000000..ca2b728af34 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/merged.go @@ -0,0 +1,226 @@ +package hcl + +import ( + "fmt" +) + +// MergeFiles combines the given files to produce a single body that contains +// configuration from all of the given files. +// +// The ordering of the given files decides the order in which contained +// elements will be returned. If any top-level attributes are defined with +// the same name across multiple files, a diagnostic will be produced from +// the Content and PartialContent methods describing this error in a +// user-friendly way. +func MergeFiles(files []*File) Body { + var bodies []Body + for _, file := range files { + bodies = append(bodies, file.Body) + } + return MergeBodies(bodies) +} + +// MergeBodies is like MergeFiles except it deals directly with bodies, rather +// than with entire files. +func MergeBodies(bodies []Body) Body { + if len(bodies) == 0 { + // Swap out for our singleton empty body, to reduce the number of + // empty slices we have hanging around. + return emptyBody + } + + // If any of the given bodies are already merged bodies, we'll unpack + // to flatten to a single mergedBodies, since that's conceptually simpler. + // This also, as a side-effect, eliminates any empty bodies, since + // empties are merged bodies with no inner bodies. + var newLen int + var flatten bool + for _, body := range bodies { + if children, merged := body.(mergedBodies); merged { + newLen += len(children) + flatten = true + } else { + newLen++ + } + } + + if !flatten { // not just newLen == len, because we might have mergedBodies with single bodies inside + return mergedBodies(bodies) + } + + if newLen == 0 { + // Don't allocate a new empty when we already have one + return emptyBody + } + + new := make([]Body, 0, newLen) + for _, body := range bodies { + if children, merged := body.(mergedBodies); merged { + new = append(new, children...) + } else { + new = append(new, body) + } + } + return mergedBodies(new) +} + +var emptyBody = mergedBodies([]Body{}) + +// EmptyBody returns a body with no content. This body can be used as a +// placeholder when a body is required but no body content is available. +func EmptyBody() Body { + return emptyBody +} + +type mergedBodies []Body + +// Content returns the content produced by applying the given schema to all +// of the merged bodies and merging the result. +// +// Although required attributes _are_ supported, they should be used sparingly +// with merged bodies since in this case there is no contextual information +// with which to return good diagnostics. Applications working with merged +// bodies may wish to mark all attributes as optional and then check for +// required attributes afterwards, to produce better diagnostics. +func (mb mergedBodies) Content(schema *BodySchema) (*BodyContent, Diagnostics) { + // the returned body will always be empty in this case, because mergedContent + // will only ever call Content on the child bodies. + content, _, diags := mb.mergedContent(schema, false) + return content, diags +} + +func (mb mergedBodies) PartialContent(schema *BodySchema) (*BodyContent, Body, Diagnostics) { + return mb.mergedContent(schema, true) +} + +func (mb mergedBodies) JustAttributes() (Attributes, Diagnostics) { + attrs := make(map[string]*Attribute) + var diags Diagnostics + + for _, body := range mb { + thisAttrs, thisDiags := body.JustAttributes() + + if len(thisDiags) != 0 { + diags = append(diags, thisDiags...) + } + + if thisAttrs != nil { + for name, attr := range thisAttrs { + if existing := attrs[name]; existing != nil { + diags = diags.Append(&Diagnostic{ + Severity: DiagError, + Summary: "Duplicate attribute", + Detail: fmt.Sprintf( + "Attribute %q was already assigned at %s", + name, existing.NameRange.String(), + ), + Subject: &attr.NameRange, + }) + continue + } + + attrs[name] = attr + } + } + } + + return attrs, diags +} + +func (mb mergedBodies) MissingItemRange() Range { + if len(mb) == 0 { + // Nothing useful to return here, so we'll return some garbage. + return Range{ + Filename: "", + } + } + + // arbitrarily use the first body's missing item range + return mb[0].MissingItemRange() +} + +func (mb mergedBodies) mergedContent(schema *BodySchema, partial bool) (*BodyContent, Body, Diagnostics) { + // We need to produce a new schema with none of the attributes marked as + // required, since _any one_ of our bodies can contribute an attribute value. + // We'll separately check that all required attributes are present at + // the end. + mergedSchema := &BodySchema{ + Blocks: schema.Blocks, + } + for _, attrS := range schema.Attributes { + mergedAttrS := attrS + mergedAttrS.Required = false + mergedSchema.Attributes = append(mergedSchema.Attributes, mergedAttrS) + } + + var mergedLeftovers []Body + content := &BodyContent{ + Attributes: map[string]*Attribute{}, + } + + var diags Diagnostics + for _, body := range mb { + var thisContent *BodyContent + var thisLeftovers Body + var thisDiags Diagnostics + + if partial { + thisContent, thisLeftovers, thisDiags = body.PartialContent(mergedSchema) + } else { + thisContent, thisDiags = body.Content(mergedSchema) + } + + if thisLeftovers != nil { + mergedLeftovers = append(mergedLeftovers) + } + if len(thisDiags) != 0 { + diags = append(diags, thisDiags...) + } + + if thisContent.Attributes != nil { + for name, attr := range thisContent.Attributes { + if existing := content.Attributes[name]; existing != nil { + diags = diags.Append(&Diagnostic{ + Severity: DiagError, + Summary: "Duplicate attribute", + Detail: fmt.Sprintf( + "Attribute %q was already assigned at %s", + name, existing.NameRange.String(), + ), + Subject: &attr.NameRange, + }) + continue + } + content.Attributes[name] = attr + } + } + + if len(thisContent.Blocks) != 0 { + content.Blocks = append(content.Blocks, thisContent.Blocks...) + } + } + + // Finally, we check for required attributes. + for _, attrS := range schema.Attributes { + if !attrS.Required { + continue + } + + if content.Attributes[attrS.Name] == nil { + // We don't have any context here to produce a good diagnostic, + // which is why we warn in the Content docstring to minimize the + // use of required attributes on merged bodies. + diags = diags.Append(&Diagnostic{ + Severity: DiagError, + Summary: "Missing required attribute", + Detail: fmt.Sprintf( + "The attribute %q is required, but was not assigned.", + attrS.Name, + ), + }) + } + } + + leftoverBody := MergeBodies(mergedLeftovers) + return content, leftoverBody, diags +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/ops.go b/vendor/github.com/hashicorp/hcl2/hcl/ops.go new file mode 100644 index 00000000000..80312b010df --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/ops.go @@ -0,0 +1,147 @@ +package hcl + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// Index is a helper function that performs the same operation as the index +// operator in the zcl expression language. That is, the result is the +// same as it would be for collection[key] in a configuration expression. +// +// This is exported so that applications can perform indexing in a manner +// consistent with how the language does it, including handling of null and +// unknown values, etc. +// +// Diagnostics are produced if the given combination of values is not valid. +// Therefore a pointer to a source range must be provided to use in diagnostics, +// though nil can be provided if the calling application is going to +// ignore the subject of the returned diagnostics anyway. +func Index(collection, key cty.Value, srcRange *Range) (cty.Value, Diagnostics) { + if collection.IsNull() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Attempt to index null value", + Detail: "This value is null, so it does not have any indices.", + Subject: srcRange, + }, + } + } + if key.IsNull() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: "Can't use a null value as an indexing key.", + Subject: srcRange, + }, + } + } + ty := collection.Type() + kty := key.Type() + if kty == cty.DynamicPseudoType || ty == cty.DynamicPseudoType { + return cty.DynamicVal, nil + } + + switch { + + case ty.IsListType() || ty.IsTupleType() || ty.IsMapType(): + var wantType cty.Type + switch { + case ty.IsListType() || ty.IsTupleType(): + wantType = cty.Number + case ty.IsMapType(): + wantType = cty.String + default: + // should never happen + panic("don't know what key type we want") + } + + key, keyErr := convert.Convert(key, wantType) + if keyErr != nil { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: fmt.Sprintf( + "The given key does not identify an element in this collection value: %s.", + keyErr.Error(), + ), + Subject: srcRange, + }, + } + } + + has := collection.HasIndex(key) + if !has.IsKnown() { + if ty.IsTupleType() { + return cty.DynamicVal, nil + } else { + return cty.UnknownVal(ty.ElementType()), nil + } + } + if has.False() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: "The given key does not identify an element in this collection value.", + Subject: srcRange, + }, + } + } + + return collection.Index(key), nil + + case ty.IsObjectType(): + key, keyErr := convert.Convert(key, cty.String) + if keyErr != nil { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: fmt.Sprintf( + "The given key does not identify an element in this collection value: %s.", + keyErr.Error(), + ), + Subject: srcRange, + }, + } + } + if !collection.IsKnown() { + return cty.DynamicVal, nil + } + if !key.IsKnown() { + return cty.DynamicVal, nil + } + + attrName := key.AsString() + + if !ty.HasAttribute(attrName) { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: "The given key does not identify an element in this collection value.", + Subject: srcRange, + }, + } + } + + return collection.GetAttr(attrName), nil + + default: + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: "This value does not have any indices.", + Subject: srcRange, + }, + } + } + +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/pos.go b/vendor/github.com/hashicorp/hcl2/hcl/pos.go new file mode 100644 index 00000000000..3ccdfacb876 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/pos.go @@ -0,0 +1,96 @@ +package hcl + +import "fmt" + +// Pos represents a single position in a source file, by addressing the +// start byte of a unicode character encoded in UTF-8. +// +// Pos is generally used only in the context of a Range, which then defines +// which source file the position is within. +type Pos struct { + // Line is the source code line where this position points. Lines are + // counted starting at 1 and incremented for each newline character + // encountered. + Line int + + // Column is the source code column where this position points, in + // unicode characters, with counting starting at 1. + // + // Column counts characters as they appear visually, so for example a + // latin letter with a combining diacritic mark counts as one character. + // This is intended for rendering visual markers against source code in + // contexts where these diacritics would be rendered in a single character + // cell. Technically speaking, Column is counting grapheme clusters as + // used in unicode normalization. + Column int + + // Byte is the byte offset into the file where the indicated character + // begins. This is a zero-based offset to the first byte of the first + // UTF-8 codepoint sequence in the character, and thus gives a position + // that can be resolved _without_ awareness of Unicode characters. + Byte int +} + +// Range represents a span of characters between two positions in a source +// file. +// +// This struct is usually used by value in types that represent AST nodes, +// but by pointer in types that refer to the positions of other objects, +// such as in diagnostics. +type Range struct { + // Filename is the name of the file into which this range's positions + // point. + Filename string + + // Start and End represent the bounds of this range. Start is inclusive + // and End is exclusive. + Start, End Pos +} + +// RangeBetween returns a new range that spans from the beginning of the +// start range to the end of the end range. +// +// The result is meaningless if the two ranges do not belong to the same +// source file or if the end range appears before the start range. +func RangeBetween(start, end Range) Range { + return Range{ + Filename: start.Filename, + Start: start.Start, + End: end.End, + } +} + +// ContainsOffset returns true if and only if the given byte offset is within +// the receiving Range. +func (r Range) ContainsOffset(offset int) bool { + return offset >= r.Start.Byte && offset < r.End.Byte +} + +// Ptr returns a pointer to a copy of the receiver. This is a convenience when +// ranges in places where pointers are required, such as in Diagnostic, but +// the range in question is returned from a method. Go would otherwise not +// allow one to take the address of a function call. +func (r Range) Ptr() *Range { + return &r +} + +// String returns a compact string representation of the receiver. +// Callers should generally prefer to present a range more visually, +// e.g. via markers directly on the relevant portion of source code. +func (r Range) String() string { + if r.Start.Line == r.End.Line { + return fmt.Sprintf( + "%s:%d,%d-%d", + r.Filename, + r.Start.Line, r.Start.Column, + r.End.Column, + ) + } else { + return fmt.Sprintf( + "%s:%d,%d-%d,%d", + r.Filename, + r.Start.Line, r.Start.Column, + r.End.Line, r.End.Column, + ) + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/schema.go b/vendor/github.com/hashicorp/hcl2/hcl/schema.go new file mode 100644 index 00000000000..891257acb20 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/schema.go @@ -0,0 +1,21 @@ +package hcl + +// BlockHeaderSchema represents the shape of a block header, and is +// used for matching blocks within bodies. +type BlockHeaderSchema struct { + Type string + LabelNames []string +} + +// AttributeSchema represents the requirements for an attribute, and is used +// for matching attributes within bodies. +type AttributeSchema struct { + Name string + Required bool +} + +// BodySchema represents the desired shallow structure of a body. +type BodySchema struct { + Attributes []AttributeSchema + Blocks []BlockHeaderSchema +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/spec.md b/vendor/github.com/hashicorp/hcl2/hcl/spec.md new file mode 100644 index 00000000000..db4e9ef979c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/spec.md @@ -0,0 +1,646 @@ +# HCL Syntax-Agnostic Information Model + +This is the specification for the general information model (abstract types and +semantics) for hcl. HCL is a system for defining configuration languages for +applications. The HCL information model is designed to support multiple +concrete syntaxes for configuration, each with a mapping to the model defined +in this specification. + +The two primary syntaxes intended for use in conjunction with this model are +[the HCL native syntax](./zclsyntax/spec.md) and [the JSON syntax](./json/spec.md). +In principle other syntaxes are possible as long as either their language model +is sufficiently rich to express the concepts described in this specification +or the language targets a well-defined subset of the specification. + +## Structural Elements + +The primary structural element is the _body_, which is a container representing +a set of zero or more _attributes_ and a set of zero or more _blocks_. + +A _configuration file_ is the top-level object, and will usually be produced +by reading a file from disk and parsing it as a particular syntax. A +configuration file has its own _body_, representing the top-level attributes +and blocks. + +An _attribute_ is a name and value pair associated with a body. Attribute names +are unique within a given body. Attribute values are provided as _expressions_, +which are discussed in detail in a later section. + +A _block_ is a nested structure that has a _type name_, zero or more string +_labels_ (e.g. identifiers), and a nested body. + +Together the structural elements create a heirarchical data structure, with +attributes intended to represent the direct properties of a particular object +in the calling application, and blocks intended to represent child objects +of a particular object. + +## Body Content + +To support the expression of the HCL concepts in languages whose information +model is a subset of HCL's, such as JSON, a _body_ is an opaque container +whose content can only be accessed by providing information on the expected +structure of the content. + +The specification for each syntax must describe how its physical constructs +are mapped on to body content given a schema. For syntaxes that have +first-class syntax distinguishing attributes and bodies this can be relatively +straightforward, while more detailed mapping rules may be required in syntaxes +where the representation of attributes vs. blocks is ambiguous. + +### Schema-driven Processing + +Schema-driven processing is the primary way to access body content. +A _body schema_ is a description of what is expected within a particular body, +which can then be used to extract the _body content_, which then provides +access to the specific attributes and blocks requested. + +A _body schema_ consists of a list of _attribute schemata_ and +_block header schemata_: + +* An _attribute schema_ provides the name of an attribute and whether its + presence is required. + +* A _block header schema_ provides a block type name and the semantic names + assigned to each of the labels of that block type, if any. + +Within a schema, it is an error to request the same attribute name twice or +to request a block type whose name is also an attribute name. While this can +in principle be supported in some syntaxes, in other syntaxes the attribute +and block namespaces are combined and so an an attribute cannot coexist with +a block whose type name is identical to the attribute name. + +The result of applying a body schema to a body is _body content_, which +consists of an _attribute map_ and a _block sequence_: + +* The _attribute map_ is a map data structure whose keys are attribute names + and whose values are _expressions_ that represent the corresponding attribute + values. + +* The _block sequence_ is an ordered sequence of blocks, with each specifying + a block _type name_, the sequence of _labels_ specified for the block, + and the body object (not body _content_) representing the block's own body. + +After obtaining _body content_, the calling application may continue processing +by evaluating attribute expressions and/or recursively applying further +schema-driven processing to the child block bodies. + +**Note:** The _body schema_ is intentionally minimal, to reduce the set of +mapping rules that must be defined for each syntax. Higher-level utility +libraries may be provided to assist in the construction of a schema and +perform additional processing, such as automatically evaluating attribute +expressions and assigning their result values into a data structure, or +recursively applying a schema to child blocks. Such utilities are not part of +this core specification and will vary depending on the capabilities and idiom +of the implementation language. + +### _Dynamic Attributes_ Processing + +The _schema-driven_ processing model is useful when the expected structure +of a body is known a priori by the calling application. Some blocks are +instead more free-form, such as a user-provided set of arbitrary key/value +pairs. + +The alternative _dynamic attributes_ processing mode allows for this more +ad-hoc approach. Processing in this mode behaves as if a schema had been +constructed without any _block header schemata_ and with an attribute +schema for each distinct key provided within the physical representation +of the body. + +The means by which _distinct keys_ are identified is dependent on the +physical syntax; this processing mode assumes that the syntax has a way +to enumerate keys provided by the author and identify expressions that +correspond with those keys, but does not define the means by which this is +done. + +The result of _dynamic attributes_ processing is an _attribute map_ as +defined in the previous section. No _block sequence_ is produced in this +processing mode. + +### Partial Processing of Body Content + +Under _schema-driven processing_, by default the given schema is assumed +to be exhaustive, such that any attribute or block not matched by schema +elements is considered an error. This allows feedback about unsupported +attributes and blocks (such as typos) to be provided. + +An alternative is _partial processing_, where any additional elements within +the body are not considered an error. + +Under partial processing, the result is both body content as described +above _and_ a new body that represents any body elements that remain after +the schema has been processed. + +Specifically: + +* Any attribute whose name is specified in the schema is returned in body + content and elided from the new body. + +* Any block whose type is specified in the schema is returned in body content + and elided from the new body. + +* Any attribute or block _not_ meeting the above conditions is placed into + the new body, unmodified. + +The new body can then be recursively processed using any of the body +processing models. This facility allows different subsets of body content +to be processed by different parts of the calling application. + +Processing a body in two steps — first partial processing of a source body, +then exhaustive processing of the returned body — is equivalent to single-step +processing with a schema that is the union of the schemata used +across the two steps. + +## Expressions + +Attribute values are represented by _expressions_. Depending on the concrete +syntax in use, an expression may just be a literal value or it may describe +a computation in terms of literal values, variables, and functions. + +Each syntax defines its own representation of expressions. For syntaxes based +in languages that do not have any non-literal expression syntax, it is +recommended to embed the template language from +[the native syntax](./zclsyntax/spec.md) e.g. as a post-processing step on +string literals. + +### Expression Evaluation + +In order to obtain a concrete value, each expression must be _evaluated_. +Evaluation is performed in terms of an evaluation context, which +consists of the following: + +* An _evaluation mode_, which is defined below. +* A _variable scope_, which provides a set of named variables for use in + expressions. +* A _function table_, which provides a set of named functions for use in + expressions. + +The _evaluation mode_ allows for two different interpretations of an +expression: + +* In _literal-only mode_, variables and functions are not available and it + is assumed that the calling application's intent is to treat the attribute + value as a literal. + +* In _full expression mode_, variables and functions are defined and it is + assumed that the calling application wishes to provide a full expression + language for definition of the attribute value. + +The actual behavior of these two modes depends on the syntax in use. For +languages with first-class expression syntax, these two modes may be considered +equivalent, with _literal-only mode_ simply not defining any variables or +functions. For languages that embed arbitrary expressions via string templates, +_literal-only mode_ may disable such processing, allowing literal strings to +pass through without interpretation as templates. + +Since literal-only mode does not support variables and functions, it is an +error for the calling application to enable this mode and yet provide a +variable scope and/or function table. + +## Values and Value Types + +The result of expression evaluation is a _value_. Each value has a _type_, +which is dynamically determined during evaluation. The _variable scope_ in +the evaluation context is a map from variable name to value, using the same +definition of value. + +The type system for HCL values is intended to be of a level abstraction +suitable for configuration of various applications. A well-defined, +implementation-language-agnostic type system is defined to allow for +consistent processing of configuration across many implementation languages. +Concrete implementations may provide additional functionality to lower +HCL values and types to corresponding native language types, which may then +impose additional constraints on the values outside of the scope of this +specification. + +Two values are _equal_ if and only if they have identical types and their +values are equal according to the rules of their shared type. + +### Primitive Types + +The primitive types are _string_, _bool_, and _number_. + +A _string_ is a sequence of unicode characters. Two strings are equal if +NFC normalization ([UAX#15](http://unicode.org/reports/tr15/) +of each string produces two identical sequences of characters. +NFC normalization ensures that, for example, a precomposed combination of a +latin letter and a diacritic compares equal with the letter followed by +a combining diacritic. + +The _bool_ type has only two non-null values: _true_ and _false_. Two bool +values are equal if and only if they are either both true or both false. + +A _number_ is an arbitrary-precision floating point value. An implementation +_must_ make the full-precision values available to the calling application +for interpretation into any suitable number representation. An implementation +may in practice implement numbers with limited precision so long as the +following constraints are met: + +* Integers are represented with at least 256 bits. +* Non-integer numbers are represented as floating point values with a + mantissa of at least 256 bits and a signed binary exponent of at least + 16 bits. +* An error is produced if an integer value given in source cannot be + represented precisely. +* An error is produced if a non-integer value cannot be represented due to + overflow. +* A non-integer number is rounded to the nearest possible value when a + value is of too high a precision to be represented. + +The _number_ type also requires representation of both positive and negative +infinity. A "not a number" (NaN) value is _not_ provided nor used. + +Two number values are equal if they are numerically equal to the precision +associated with the number. Positive infinity and negative infinity are +equal to themselves but not to each other. Positive infinity is greater than +any other number value, and negative infinity is less than any other number +value. + +Some syntaxes may be unable to represent numeric literals of arbitrary +precision. This must be defined in the syntax specification as part of its +description of mapping numeric literals to HCL values. + +### Structural Types + +_Structural types_ are types that are constructed by combining other types. +Each distinct combination of other types is itself a distinct type. There +are two structural type _kinds_: + +* _Object types_ are constructed of a set of named attributes, each of which + has a type. Attribute names are always strings. (_Object_ attributes are a + distinct idea from _body_ attributes, though calling applications + may choose to blur the distinction by use of common naming schemes.) +* _Tuple tupes_ are constructed of a sequence of elements, each of which + has a type. + +Values of structural types are compared for equality in terms of their +attributes or elements. A structural type value is equal to another if and +only if all of the corresponding attributes or elements are equal. + +Two structural types are identical if they are of the same kind and +have attributes or elements with identical types. + +### Collection Types + +_Collection types_ are types that combine together an arbitrary number of +values of some other single type. There are three collection type _kinds_: + +* _List types_ represent ordered sequences of values of their element type. +* _Map types_ represent values of their element type accessed via string keys. +* _Set types_ represent unordered sets of distinct values of their element type. + +For each of these kinds and each distinct element type there is a distinct +collection type. For example, "list of string" is a distinct type from +"set of string", and "list of number" is a distinct type from "list of string". + +Values of collection types are compared for equality in terms of their +elements. A collection type value is equal to another if and only if both +have the same number of elements and their corresponding elements are equal. + +Two collection types are identical if they are of the same kind and have +the same element type. + +### Null values + +Each type has a null value. The null value of a type represents the absense +of a value, but with type information retained to allow for type checking. + +Null values are used primarily to represent the conditional absense of a +body attribute. In a syntax with a conditional operator, one of the result +values of that conditional may be null to indicate that the attribute should be +considered not present in that case. + +Calling applications _should_ consider an attribute with a null value as +equivalent to the value not being present at all. + +A null value of a particular type is equal to itself. + +### Unknown Values and the Dynamic Pseudo-type + +An _unknown value_ is a placeholder for a value that is not yet known. +Operations on unknown values themselves return unknown values that have a +type appropriate to the operation. For example, adding together two unknown +numbers yields an unknown number, while comparing two unknown values of any +type for equality yields an unknown bool. + +Each type has a distinct unknown value. For example, an unknown _number_ is +a distinct value from an unknown _string_. + +_The dynamic pseudo-type_ is a placeholder for a type that is not yet known. +The only values of this type are its null value and its unknown value. It is +referred to as a _pseudo-type_ because it should not be considered a type in +its own right, but rather as a placeholder for a type yet to be established. +The unknown value of the dynamic pseudo-type is referred to as _the dynamic +value_. + +Operations on values of the dynamic pseudo-type behave as if it is a value +of the expected type, optimistically assuming that once the value and type +are known they will be valid for the operation. For example, adding together +a number and the dynamic value produces an unknown number. + +Unknown values and the dynamic pseudo-type can be used as a mechanism for +partial type checking and semantic checking: by evaluating an expression with +all variables set to an unknown value, the expression can be evaluated to +produce an unknown value of a given type, or produce an error if any operation +is provably invalid with only type information. + +Unknown values and the dynamic pseudo-type must never be returned from +operations unless at least one operand is unknown or dynamic. Calling +applications are guaranteed that unless the global scope includes unknown +values, or the function table includes functions that return unknown values, +no expression will evaluate to an unknown value. The calling application is +thus in total control over the use and meaning of unknown values. + +The dynamic pseudo-type is identical only to itself. + +### Capsule Types + +A _capsule type_ is a custom type defined by the calling application. A value +of a capsule type is considered opaque to HCL, but may be accepted +by functions provided by the calling application. + +A particular capsule type is identical only to itself. The equality of two +values of the same capsule type is defined by the calling application. No +other operations are supported for values of capsule types. + +Support for capsule types in a HCL implementation is optional. Capsule types +are intended to allow calling applications to pass through values that are +not part of the standard type system. For example, an application that +deals with raw binary data may define a capsule type representing a byte +array, and provide functions that produce or operate on byte arrays. + +### Type Specifications + +In certain situations it is necessary to define expectations about the expected +type of a value. Whereas two _types_ have a commutative _identity_ relationship, +a type has a non-commutative _matches_ relationship with a _type specification_. +A type specification is, in practice, just a different interpretation of a +type such that: + +* Any type _matches_ any type that it is identical to. + +* Any type _matches_ the dynamic pseudo-type. + +For example, given a type specification "list of dynamic pseudo-type", the +concrete types "list of string" and "list of map" match, but the +type "set of string" does not. + +## Functions and Function Calls + +The evaluation context used to evaluate an expression includes a function +table, which represents an application-defined set of named functions +available for use in expressions. + +Each syntax defines whether function calls are supported and how they are +physically represented in source code, but the semantics of function calls are +defined here to ensure consistent results across syntaxes and to allow +applications to provide functions that are interoperable with all syntaxes. + +A _function_ is defined from the following elements: + +* Zero or more _positional parameters_, each with a name used for documentation, + a type specification for expected argument values, and a flag for whether + each of null values, unknown values, and values of the dynamic pseudo-type + are accepted. + +* Zero or one _variadic parameters_, with the same structure as the _positional_ + parameters, which if present collects any additional arguments provided at + the function call site. + +* A _result type definition_, which specifies the value type returned for each + valid sequence of argument values. + +* A _result value definition_, which specifies the value returned for each + valid sequence of argument values. + +A _function call_, regardless of source syntax, consists of a sequence of +argument values. The argument values are each mapped to a corresponding +parameter as follows: + +* For each of the function's positional parameters in sequence, take the next + argument. If there are no more arguments, the call is erroneous. + +* If the function has a variadic parameter, take all remaining arguments that + where not yet assigned to a positional parameter and collect them into + a sequence of variadic arguments that each correspond to the variadic + parameter. + +* If the function has _no_ variadic parameter, it is an error if any arguments + remain after taking one argument for each positional parameter. + +After mapping each argument to a parameter, semantic checking proceeds +for each argument: + +* If the argument value corresponding to a parameter does not match the + parameter's type specification, the call is erroneous. + +* If the argument value corresponding to a parameter is null and the parameter + is not specified as accepting nulls, the call is erroneous. + +* If the argument value corresponding to a parameter is the dynamic value + and the parameter is not specified as accepting values of the dynamic + pseudo-type, the call is valid but its _result type_ is forced to be the + dynamic pseudo type. + +* If neither of the above conditions holds for any argument, the call is + valid and the function's value type definition is used to determine the + call's _result type_. A function _may_ vary its result type depending on + the argument _values_ as well as the argument _types_; for example, a + function that decodes a JSON value will return a different result type + depending on the data structure described by the given JSON source code. + +If semantic checking succeeds without error, the call is _executed_: + +* For each argument, if its value is unknown and its corresponding parameter + is not specified as accepting unknowns, the _result value_ is forced to be an + unknown value of the result type. + +* If the previous condition does not apply, the function's result value + definition is used to determine the call's _result value_. + +The result of a function call expression is either an error, if one of the +erroenous conditions above applies, or the _result value_. + +## Type Conversions and Unification + +Values given in configuration may not always match the expectations of the +operations applied to them or to the calling application. In such situations, +automatic type conversion is attempted as a convenience to the user. + +Along with conversions to a _specified_ type, it is sometimes necessary to +ensure that a selection of values are all of the _same_ type, without any +constraint on which type that is. This is the process of _type unification_, +which attempts to find the most general type that all of the given types can +be converted to. + +Both type conversions and unification are defined in the syntax-agnostic +model to ensure consistency of behavior between syntaxes. + +Type conversions are broadly characterized into two categories: _safe_ and +_unsafe_. A conversion is "safe" if any distinct value of the source type +has a corresponding distinct value in the target type. A conversion is +"unsafe" if either the target type values are _not_ distinct (information +may be lost in conversion) or if some values of the source type do not have +any corresponding value in the target type. An unsafe conversion may result +in an error. + +A given type can always be converted to itself, which is a no-op. + +### Conversion of Null Values + +All null values are safely convertable to a null value of any other type, +regardless of other type-specific rules specified in the sections below. + +### Conversion to and from the Dynamic Pseudo-type + +Conversion _from_ the dynamic pseudo-type _to_ any other type always succeeds, +producing an unknown value of the target type. + +Conversion of any value _to_ the dynamic pseudo-type is a no-op. The result +is the input value, verbatim. This is the only situation where the conversion +result value is not of the the given target type. + +### Primitive Type Conversions + +Bidirectional conversions are available between the string and number types, +and between the string and boolean types. + +The bool value true corresponds to the string containing the characters "true", +while the bool value false corresponds to teh string containing the characters +"false". Conversion from bool to string is safe, while the converse is +unsafe. The strings "1" and "0" are alternative string representations +of true and false respectively. It is an error to convert a string other than +the four in this paragraph to type bool. + +A number value is converted to string by translating its integer portion +into a sequence of decimal digits (`0` through `9`), and then if it has a +non-zero fractional part, a period `.` followed by a sequence of decimal +digits representing its fractional part. No exponent portion is included. +The number is converted at its full precision. Conversion from number to +string is safe. + +A string is converted to a number value by reversing the above mapping. +No exponent portion is allowed. Conversion from string to number is unsafe. +It is an error to convert a string that does not comply with the expected +syntax to type number. + +No direct conversion is available between the bool and number types. + +### Collection and Structural Type Conversions + +Conversion from set types to list types is _safe_, as long as their +element types are safely convertable. If the element types are _unsafely_ +convertable, then the collection conversion is also unsafe. Each set element +becomes a corresponding list element, in an undefined order. Although no +particular ordering is required, implementations _should_ produce list +elements in a consistent order for a given input set, as a convenience +to calling applications. + +Conversion from list types to set types is _unsafe_, as long as their element +types are convertable. Each distinct list item becomes a distinct set item. +If two list items are equal, one of the two is lost in the conversion. + +Conversion from tuple types to list types permitted if all of the +tuple element types are convertable to the target list element type. +The safety of the conversion depends on the safety of each of the element +conversions. Each element in turn is converted to the list element type, +producing a list of identical length. + +Conversion from tuple types to set types is permitted, behaving as if the +tuple type was first converted to a list of the same element type and then +that list converted to the target set type. + +Conversion from object types to map types is permitted if all of the object +attribute types are convertable to the target map element type. The safety +of the conversion depends on the safety of each of the attribute conversions. +Each attribute in turn is converted to the map element type, and map element +keys are set to the name of each corresponding object attribute. + +Conversion from list and set types to tuple types is permitted, following +the opposite steps as the converse conversions. Such conversions are _unsafe_. +It is an error to convert a list or set to a tuple type whose number of +elements does not match the list or set length. + +Conversion from map types to object types is permitted if each map key +corresponds to an attribute in the target object type. It is an error to +convert from a map value whose set of keys does not exactly match the target +type's attributes. The conversion takes the opposite steps of the converse +conversion. + +Conversion from one object type to another is permitted as long as the +common attribute names have convertable types. Any attribute present in the +target type but not in the source type is populated with a null value of +the appropriate type. + +Conversion from one tuple type to another is permitted as long as the +tuples have the same length and the elements have convertable types. + +### Type Unification + +Type unification is an operation that takes a list of types and attempts +to find a single type to which they can all be converted. Since some +type pairs have bidirectional conversions, preference is given to _safe_ +conversions. In technical terms, all possible types are arranged into +a lattice, from which a most general supertype is selected where possible. + +The type resulting from type unification may be one of the input types, or +it may be an entirely new type produced by combination of two or more +input types. + +The following rules do not guarantee a valid result. In addition to these +rules, unification fails if any of the given types are not convertable +(per the above rules) to the selected result type. + +The following unification rules apply transitively. That is, if a rule is +defined from A to B, and one from B to C, then A can unify to C. + +Number and bool types both unify with string by preferring string. + +Two collection types of the same kind unify according to the unification +of their element types. + +List and set types unify by preferring the list type. + +Map and object types unify by preferring the object type. + +List, set and tuple types unify by preferring the tuple type. + +The dynamic pseudo-type unifies with any other type by selecting that other +type. The dynamic pseudo-type is the result type only if _all_ input types +are the dynamic pseudo-type. + +Two object types unify by constructing a new type whose attributes are +the union of those of the two input types. Any common attributes themselves +have their types unified. + +Two tuple types of the same length unify constructing a new type of the +same length whose elements are the unification of the corresponding elements +in the two input types. + +## Implementation Considerations + +Implementations of this specification are free to adopt any strategy that +produces behavior consistent with the specification. This non-normative +section describes some possible implementation strategies that are consistent +with the goals of this specification. + +### Language-agnosticism + +The language-agnosticism of this specification assumes that certain behaviors +are implemented separately for each syntax: + +* Matching of a body schema with the physical elements of a body in the + source language, to determine correspondance between physical constructs + and schema elements. + +* Implementing the _dynamic attributes_ body processing mode by either + interpreting all physical constructs as attributes or producing an error + if non-attribute constructs are present. + +* Providing an evaluation function for all possible expressions that produces + a value given an evaluation context. + +The suggested implementation strategy is to use an implementation language's +closest concept to an _abstract type_, _virtual type_ or _interface type_ +to represent both Body and Expression. Each language-specific implementation +can then provide an implementation of each of these types wrapping AST nodes +or other physical constructs from the language parser. diff --git a/vendor/github.com/hashicorp/hcl2/hcl/static_expr.go b/vendor/github.com/hashicorp/hcl2/hcl/static_expr.go new file mode 100644 index 00000000000..98ada87b629 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/static_expr.go @@ -0,0 +1,40 @@ +package hcl + +import ( + "github.com/zclconf/go-cty/cty" +) + +type staticExpr struct { + val cty.Value + rng Range +} + +// StaticExpr returns an Expression that always evaluates to the given value. +// +// This is useful to substitute default values for expressions that are +// not explicitly given in configuration and thus would otherwise have no +// Expression to return. +// +// Since expressions are expected to have a source range, the caller must +// provide one. Ideally this should be a real source range, but it can +// be a synthetic one (with an empty-string filename) if no suitable range +// is available. +func StaticExpr(val cty.Value, rng Range) Expression { + return staticExpr{val, rng} +} + +func (e staticExpr) Value(ctx *EvalContext) (cty.Value, Diagnostics) { + return e.val, nil +} + +func (e staticExpr) Variables() []Traversal { + return nil +} + +func (e staticExpr) Range() Range { + return e.rng +} + +func (e staticExpr) StartRange() Range { + return e.rng +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/structure.go b/vendor/github.com/hashicorp/hcl2/hcl/structure.go new file mode 100644 index 00000000000..b336f300dd2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/structure.go @@ -0,0 +1,151 @@ +package hcl + +import ( + "github.com/zclconf/go-cty/cty" +) + +// File is the top-level node that results from parsing a HCL file. +type File struct { + Body Body + Bytes []byte + + // Nav is used to integrate with the "hcled" editor integration package, + // and with diagnostic information formatters. It is not for direct use + // by a calling application. + Nav interface{} +} + +// Block represents a nested block within a Body. +type Block struct { + Type string + Labels []string + Body Body + + DefRange Range // Range that can be considered the "definition" for seeking in an editor + TypeRange Range // Range for the block type declaration specifically. + LabelRanges []Range // Ranges for the label values specifically. +} + +// Blocks is a sequence of Block. +type Blocks []*Block + +// Attributes is a set of attributes keyed by their names. +type Attributes map[string]*Attribute + +// Body is a container for attributes and blocks. It serves as the primary +// unit of heirarchical structure within configuration. +// +// The content of a body cannot be meaningfully intepreted without a schema, +// so Body represents the raw body content and has methods that allow the +// content to be extracted in terms of a given schema. +type Body interface { + // Content verifies that the entire body content conforms to the given + // schema and then returns it, and/or returns diagnostics. The returned + // body content is valid if non-nil, regardless of whether Diagnostics + // are provided, but diagnostics should still be eventually shown to + // the user. + Content(schema *BodySchema) (*BodyContent, Diagnostics) + + // PartialContent is like Content except that it permits the configuration + // to contain additional blocks or attributes not specified in the + // schema. If any are present, the returned Body is non-nil and contains + // the remaining items from the body that were not selected by the schema. + PartialContent(schema *BodySchema) (*BodyContent, Body, Diagnostics) + + // JustAttributes attempts to interpret all of the contents of the body + // as attributes, allowing for the contents to be accessed without a priori + // knowledge of the structure. + // + // The behavior of this method depends on the body's source language. + // Some languages, like JSON, can't distinguish between attributes and + // blocks without schema hints, but for languages that _can_ error + // diagnostics will be generated if any blocks are present in the body. + // + // Diagnostics may be produced for other reasons too, such as duplicate + // declarations of the same attribute. + JustAttributes() (Attributes, Diagnostics) + + // MissingItemRange returns a range that represents where a missing item + // might hypothetically be inserted. This is used when producing + // diagnostics about missing required attributes or blocks. Not all bodies + // will have an obvious single insertion point, so the result here may + // be rather arbitrary. + MissingItemRange() Range +} + +// BodyContent is the result of applying a BodySchema to a Body. +type BodyContent struct { + Attributes Attributes + Blocks Blocks + + MissingItemRange Range +} + +// Attribute represents an attribute from within a body. +type Attribute struct { + Name string + Expr Expression + + Range Range + NameRange Range +} + +// Expression is a literal value or an expression provided in the +// configuration, which can be evaluated within a scope to produce a value. +type Expression interface { + // Value returns the value resulting from evaluating the expression + // in the given evaluation context. + // + // The context may be nil, in which case the expression may contain + // only constants and diagnostics will be produced for any non-constant + // sub-expressions. (The exact definition of this depends on the source + // language.) + // + // The context may instead be set but have either its Variables or + // Functions maps set to nil, in which case only use of these features + // will return diagnostics. + // + // Different diagnostics are provided depending on whether the given + // context maps are nil or empty. In the former case, the message + // tells the user that variables/functions are not permitted at all, + // while in the latter case usage will produce a "not found" error for + // the specific symbol in question. + Value(ctx *EvalContext) (cty.Value, Diagnostics) + + // Variables returns a list of variables referenced in the receiving + // expression. These are expressed as absolute Traversals, so may include + // additional information about how the variable is used, such as + // attribute lookups, which the calling application can potentially use + // to only selectively populate the scope. + Variables() []Traversal + + Range() Range + StartRange() Range +} + +// OfType filters the receiving block sequence by block type name, +// returning a new block sequence including only the blocks of the +// requested type. +func (els Blocks) OfType(typeName string) Blocks { + ret := make(Blocks, 0) + for _, el := range els { + if el.Type == typeName { + ret = append(ret, el) + } + } + return ret +} + +// ByType transforms the receiving block sequence into a map from type +// name to block sequences of only that type. +func (els Blocks) ByType() map[string]Blocks { + ret := make(map[string]Blocks) + for _, el := range els { + ty := el.Type + if ret[ty] == nil { + ret[ty] = make(Blocks, 0, 1) + } + ret[ty] = append(ret[ty], el) + } + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/traversal.go b/vendor/github.com/hashicorp/hcl2/hcl/traversal.go new file mode 100644 index 00000000000..867ed425f5e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/traversal.go @@ -0,0 +1,324 @@ +package hcl + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" +) + +// A Traversal is a description of traversing through a value through a +// series of operations such as attribute lookup, index lookup, etc. +// +// It is used to look up values in scopes, for example. +// +// The traversal operations are implementations of interface Traverser. +// This is a closed set of implementations, so the interface cannot be +// implemented from outside this package. +// +// A traversal can be absolute (its first value is a symbol name) or relative +// (starts from an existing value). +type Traversal []Traverser + +// TraversalJoin appends a relative traversal to an absolute traversal to +// produce a new absolute traversal. +func TraversalJoin(abs Traversal, rel Traversal) Traversal { + if abs.IsRelative() { + panic("first argument to TraversalJoin must be absolute") + } + if !rel.IsRelative() { + panic("second argument to TraversalJoin must be relative") + } + + ret := make(Traversal, len(abs)+len(rel)) + copy(ret, abs) + copy(ret[len(abs):], rel) + return ret +} + +// TraverseRel applies the receiving traversal to the given value, returning +// the resulting value. This is supported only for relative traversals, +// and will panic if applied to an absolute traversal. +func (t Traversal) TraverseRel(val cty.Value) (cty.Value, Diagnostics) { + if !t.IsRelative() { + panic("can't use TraverseRel on an absolute traversal") + } + + current := val + var diags Diagnostics + for _, tr := range t { + var newDiags Diagnostics + current, newDiags = tr.TraversalStep(current) + diags = append(diags, newDiags...) + if newDiags.HasErrors() { + return cty.DynamicVal, diags + } + } + return current, diags +} + +// TraverseAbs applies the receiving traversal to the given eval context, +// returning the resulting value. This is supported only for absolute +// traversals, and will panic if applied to a relative traversal. +func (t Traversal) TraverseAbs(ctx *EvalContext) (cty.Value, Diagnostics) { + if t.IsRelative() { + panic("can't use TraverseAbs on a relative traversal") + } + + split := t.SimpleSplit() + root := split.Abs[0].(TraverseRoot) + name := root.Name + + thisCtx := ctx + hasNonNil := false + for thisCtx != nil { + if thisCtx.Variables == nil { + thisCtx = thisCtx.parent + continue + } + hasNonNil = true + val, exists := thisCtx.Variables[name] + if exists { + return split.Rel.TraverseRel(val) + } + thisCtx = thisCtx.parent + } + + if !hasNonNil { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Variables not allowed", + Detail: "Variables may not be used here.", + Subject: &root.SrcRange, + }, + } + } + + suggestions := make([]string, 0, len(ctx.Variables)) + thisCtx = ctx + for thisCtx != nil { + for k := range thisCtx.Variables { + suggestions = append(suggestions, k) + } + thisCtx = thisCtx.parent + } + suggestion := nameSuggestion(name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Unknown variable", + Detail: fmt.Sprintf("There is no variable named %q.%s", name, suggestion), + Subject: &root.SrcRange, + }, + } +} + +// IsRelative returns true if the receiver is a relative traversal, or false +// otherwise. +func (t Traversal) IsRelative() bool { + if len(t) == 0 { + return true + } + if _, firstIsRoot := t[0].(TraverseRoot); firstIsRoot { + return false + } + return true +} + +// SimpleSplit returns a TraversalSplit where the name lookup is the absolute +// part and the remainder is the relative part. Supported only for +// absolute traversals, and will panic if applied to a relative traversal. +// +// This can be used by applications that have a relatively-simple variable +// namespace where only the top-level is directly populated in the scope, with +// everything else handled by relative lookups from those initial values. +func (t Traversal) SimpleSplit() TraversalSplit { + if t.IsRelative() { + panic("can't use SimpleSplit on a relative traversal") + } + return TraversalSplit{ + Abs: t[0:1], + Rel: t[1:], + } +} + +// RootName returns the root name for a absolute traversal. Will panic if +// called on a relative traversal. +func (t Traversal) RootName() string { + if t.IsRelative() { + panic("can't use RootName on a relative traversal") + + } + return t[0].(TraverseRoot).Name +} + +// TraversalSplit represents a pair of traversals, the first of which is +// an absolute traversal and the second of which is relative to the first. +// +// This is used by calling applications that only populate prefixes of the +// traversals in the scope, with Abs representing the part coming from the +// scope and Rel representing the remaining steps once that part is +// retrieved. +type TraversalSplit struct { + Abs Traversal + Rel Traversal +} + +// TraverseAbs traverses from a scope to the value resulting from the +// absolute traversal. +func (t TraversalSplit) TraverseAbs(ctx *EvalContext) (cty.Value, Diagnostics) { + return t.Abs.TraverseAbs(ctx) +} + +// TraverseRel traverses from a given value, assumed to be the result of +// TraverseAbs on some scope, to a final result for the entire split traversal. +func (t TraversalSplit) TraverseRel(val cty.Value) (cty.Value, Diagnostics) { + return t.Rel.TraverseRel(val) +} + +// Traverse is a convenience function to apply TraverseAbs followed by +// TraverseRel. +func (t TraversalSplit) Traverse(ctx *EvalContext) (cty.Value, Diagnostics) { + v1, diags := t.TraverseAbs(ctx) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + v2, newDiags := t.TraverseRel(v1) + diags = append(diags, newDiags...) + return v2, diags +} + +// Join concatenates together the Abs and Rel parts to produce a single +// absolute traversal. +func (t TraversalSplit) Join() Traversal { + return TraversalJoin(t.Abs, t.Rel) +} + +// RootName returns the root name for the absolute part of the split. +func (t TraversalSplit) RootName() string { + return t.Abs.RootName() +} + +// A Traverser is a step within a Traversal. +type Traverser interface { + TraversalStep(cty.Value) (cty.Value, Diagnostics) + isTraverserSigil() isTraverser +} + +// Embed this in a struct to declare it as a Traverser +type isTraverser struct { +} + +func (tr isTraverser) isTraverserSigil() isTraverser { + return isTraverser{} +} + +// TraverseRoot looks up a root name in a scope. It is used as the first step +// of an absolute Traversal, and cannot itself be traversed directly. +type TraverseRoot struct { + isTraverser + Name string + SrcRange Range +} + +// TraversalStep on a TraverseName immediately panics, because absolute +// traversals cannot be directly traversed. +func (tn TraverseRoot) TraversalStep(cty.Value) (cty.Value, Diagnostics) { + panic("Cannot traverse an absolute traversal") +} + +// TraverseAttr looks up an attribute in its initial value. +type TraverseAttr struct { + isTraverser + Name string + SrcRange Range +} + +func (tn TraverseAttr) TraversalStep(val cty.Value) (cty.Value, Diagnostics) { + if val.IsNull() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Attempt to get attribute from null value", + Detail: "This value is null, so it does not have any attributes.", + Subject: &tn.SrcRange, + }, + } + } + + ty := val.Type() + switch { + case ty.IsObjectType(): + if !ty.HasAttribute(tn.Name) { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Unsupported attribute", + Detail: fmt.Sprintf("This object does not have an attribute named %q.", tn.Name), + Subject: &tn.SrcRange, + }, + } + } + + if !val.IsKnown() { + return cty.UnknownVal(ty.AttributeType(tn.Name)), nil + } + + return val.GetAttr(tn.Name), nil + case ty.IsMapType(): + if !val.IsKnown() { + return cty.UnknownVal(ty.ElementType()), nil + } + + idx := cty.StringVal(tn.Name) + if val.HasIndex(idx).False() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Missing map element", + Detail: fmt.Sprintf("This map does not have an element with the key %q.", tn.Name), + Subject: &tn.SrcRange, + }, + } + } + + return val.Index(idx), nil + case ty == cty.DynamicPseudoType: + return cty.DynamicVal, nil + default: + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Unsupported attribute", + Detail: "This value does not have any attributes.", + Subject: &tn.SrcRange, + }, + } + } +} + +// TraverseIndex applies the index operation to its initial value. +type TraverseIndex struct { + isTraverser + Key cty.Value + SrcRange Range +} + +func (tn TraverseIndex) TraversalStep(val cty.Value) (cty.Value, Diagnostics) { + return Index(val, tn.Key, &tn.SrcRange) +} + +// TraverseSplat applies the splat operation to its initial value. +type TraverseSplat struct { + isTraverser + Each Traversal + SrcRange Range +} + +func (tn TraverseSplat) TraversalStep(val cty.Value) (cty.Value, Diagnostics) { + panic("TraverseSplat not yet implemented") +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/block_labels.go b/vendor/github.com/hashicorp/hcl2/hcldec/block_labels.go new file mode 100644 index 00000000000..7e652e9bc64 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/block_labels.go @@ -0,0 +1,21 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +type blockLabel struct { + Value string + Range hcl.Range +} + +func labelsForBlock(block *hcl.Block) []blockLabel { + ret := make([]blockLabel, len(block.Labels)) + for i := range block.Labels { + ret[i] = blockLabel{ + Value: block.Labels[i], + Range: block.LabelRanges[i], + } + } + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/decode.go b/vendor/github.com/hashicorp/hcl2/hcldec/decode.go new file mode 100644 index 00000000000..6cf93fedd3e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/decode.go @@ -0,0 +1,36 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +func decode(body hcl.Body, blockLabels []blockLabel, ctx *hcl.EvalContext, spec Spec, partial bool) (cty.Value, hcl.Body, hcl.Diagnostics) { + schema := ImpliedSchema(spec) + + var content *hcl.BodyContent + var diags hcl.Diagnostics + var leftovers hcl.Body + + if partial { + content, leftovers, diags = body.PartialContent(schema) + } else { + content, diags = body.Content(schema) + } + + val, valDiags := spec.decode(content, blockLabels, ctx) + diags = append(diags, valDiags...) + + return val, leftovers, diags +} + +func impliedType(spec Spec) cty.Type { + return spec.impliedType() +} + +func sourceRange(body hcl.Body, blockLabels []blockLabel, spec Spec) hcl.Range { + schema := ImpliedSchema(spec) + content, _, _ := body.PartialContent(schema) + + return spec.sourceRange(content, blockLabels) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/doc.go b/vendor/github.com/hashicorp/hcl2/hcldec/doc.go new file mode 100644 index 00000000000..23bfe542b2f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/doc.go @@ -0,0 +1,12 @@ +// Package hcldec provides a higher-level API for unpacking the content of +// HCL bodies, implemented in terms of the low-level "Content" API exposed +// by the bodies themselves. +// +// It allows decoding an entire nested configuration in a single operation +// by providing a description of the intended structure. +// +// For some applications it may be more convenient to use the "gohcl" +// package, which has a similar purpose but decodes directly into native +// Go data types. hcldec instead targets the cty type system, and thus allows +// a cty-driven application to remain within that type system. +package hcldec diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/gob.go b/vendor/github.com/hashicorp/hcl2/hcldec/gob.go new file mode 100644 index 00000000000..e2027cfd2d2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/gob.go @@ -0,0 +1,23 @@ +package hcldec + +import ( + "encoding/gob" +) + +func init() { + // Every Spec implementation should be registered with gob, so that + // specs can be sent over gob channels, such as using + // github.com/hashicorp/go-plugin with plugins that need to describe + // what shape of configuration they are expecting. + gob.Register(ObjectSpec(nil)) + gob.Register(TupleSpec(nil)) + gob.Register((*AttrSpec)(nil)) + gob.Register((*LiteralSpec)(nil)) + gob.Register((*ExprSpec)(nil)) + gob.Register((*BlockSpec)(nil)) + gob.Register((*BlockListSpec)(nil)) + gob.Register((*BlockSetSpec)(nil)) + gob.Register((*BlockMapSpec)(nil)) + gob.Register((*BlockLabelSpec)(nil)) + gob.Register((*DefaultSpec)(nil)) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/public.go b/vendor/github.com/hashicorp/hcl2/hcldec/public.go new file mode 100644 index 00000000000..3e58f7b3c2c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/public.go @@ -0,0 +1,53 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// Decode interprets the given body using the given specification and returns +// the resulting value. If the given body is not valid per the spec, error +// diagnostics are returned and the returned value is likely to be incomplete. +// +// The ctx argument may be nil, in which case any references to variables or +// functions will produce error diagnostics. +func Decode(body hcl.Body, spec Spec, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + val, _, diags := decode(body, nil, ctx, spec, false) + return val, diags +} + +// PartialDecode is like Decode except that it permits "leftover" items in +// the top-level body, which are returned as a new body to allow for +// further processing. +// +// Any descendent block bodies are _not_ decoded partially and thus must +// be fully described by the given specification. +func PartialDecode(body hcl.Body, spec Spec, ctx *hcl.EvalContext) (cty.Value, hcl.Body, hcl.Diagnostics) { + return decode(body, nil, ctx, spec, true) +} + +// ImpliedType returns the value type that should result from decoding the +// given spec. +func ImpliedType(spec Spec) cty.Type { + return impliedType(spec) +} + +// SourceRange interprets the given body using the given specification and +// then returns the source range of the value that would be used to +// fulfill the spec. +// +// This can be used if application-level validation detects value errors, to +// obtain a reasonable SourceRange to use for generated diagnostics. It works +// best when applied to specific body items (e.g. using AttrSpec, BlockSpec, ...) +// as opposed to entire bodies using ObjectSpec, TupleSpec. The result will +// be less useful the broader the specification, so e.g. a spec that returns +// the entirety of all of the blocks of a given type is likely to be +// _particularly_ arbitrary and useless. +// +// If the given body is not valid per the given spec, the result is best-effort +// and may not actually be something ideal. It's expected that an application +// will already have used Decode or PartialDecode earlier and thus had an +// opportunity to detect and report spec violations. +func SourceRange(body hcl.Body, spec Spec) hcl.Range { + return sourceRange(body, nil, spec) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/schema.go b/vendor/github.com/hashicorp/hcl2/hcldec/schema.go new file mode 100644 index 00000000000..b57bd969209 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/schema.go @@ -0,0 +1,36 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// ImpliedSchema returns the *hcl.BodySchema implied by the given specification. +// This is the schema that the Decode function will use internally to +// access the content of a given body. +func ImpliedSchema(spec Spec) *hcl.BodySchema { + var attrs []hcl.AttributeSchema + var blocks []hcl.BlockHeaderSchema + + // visitSameBodyChildren walks through the spec structure, calling + // the given callback for each descendent spec encountered. We are + // interested in the specs that reference attributes and blocks. + var visit visitFunc + visit = func(s Spec) { + if as, ok := s.(attrSpec); ok { + attrs = append(attrs, as.attrSchemata()...) + } + + if bs, ok := s.(blockSpec); ok { + blocks = append(blocks, bs.blockHeaderSchemata()...) + } + + s.visitSameBodyChildren(visit) + } + + visit(spec) + + return &hcl.BodySchema{ + Attributes: attrs, + Blocks: blocks, + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/spec.go b/vendor/github.com/hashicorp/hcl2/hcldec/spec.go new file mode 100644 index 00000000000..f0e6842be0d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/spec.go @@ -0,0 +1,859 @@ +package hcldec + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// A Spec is a description of how to decode a hcl.Body to a cty.Value. +// +// The various other types in this package whose names end in "Spec" are +// the spec implementations. The most common top-level spec is ObjectSpec, +// which decodes body content into a cty.Value of an object type. +type Spec interface { + // Perform the decode operation on the given body, in the context of + // the given block (which might be null), using the given eval context. + // + // "block" is provided only by the nested calls performed by the spec + // types that work on block bodies. + decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) + + // Return the cty.Type that should be returned when decoding a body with + // this spec. + impliedType() cty.Type + + // Call the given callback once for each of the nested specs that would + // get decoded with the same body and block as the receiver. This should + // not descend into the nested specs used when decoding blocks. + visitSameBodyChildren(cb visitFunc) + + // Determine the source range of the value that would be returned for the + // spec in the given content, in the context of the given block + // (which might be null). If the corresponding item is missing, return + // a place where it might be inserted. + sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range +} + +type visitFunc func(spec Spec) + +// An ObjectSpec is a Spec that produces a cty.Value of an object type whose +// attributes correspond to the keys of the spec map. +type ObjectSpec map[string]Spec + +// attrSpec is implemented by specs that require attributes from the body. +type attrSpec interface { + attrSchemata() []hcl.AttributeSchema +} + +// blockSpec is implemented by specs that require blocks from the body. +type blockSpec interface { + blockHeaderSchemata() []hcl.BlockHeaderSchema +} + +// specNeedingVariables is implemented by specs that can use variables +// from the EvalContext, to declare which variables they need. +type specNeedingVariables interface { + variablesNeeded(content *hcl.BodyContent) []hcl.Traversal +} + +func (s ObjectSpec) visitSameBodyChildren(cb visitFunc) { + for _, c := range s { + cb(c) + } +} + +func (s ObjectSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + vals := make(map[string]cty.Value, len(s)) + var diags hcl.Diagnostics + + for k, spec := range s { + var kd hcl.Diagnostics + vals[k], kd = spec.decode(content, blockLabels, ctx) + diags = append(diags, kd...) + } + + return cty.ObjectVal(vals), diags +} + +func (s ObjectSpec) impliedType() cty.Type { + if len(s) == 0 { + return cty.EmptyObject + } + + attrTypes := make(map[string]cty.Type) + for k, childSpec := range s { + attrTypes[k] = childSpec.impliedType() + } + return cty.Object(attrTypes) +} + +func (s ObjectSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // This is not great, but the best we can do. In practice, it's rather + // strange to ask for the source range of an entire top-level body, since + // that's already readily available to the caller. + return content.MissingItemRange +} + +// A TupleSpec is a Spec that produces a cty.Value of a tuple type whose +// elements correspond to the elements of the spec slice. +type TupleSpec []Spec + +func (s TupleSpec) visitSameBodyChildren(cb visitFunc) { + for _, c := range s { + cb(c) + } +} + +func (s TupleSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + vals := make([]cty.Value, len(s)) + var diags hcl.Diagnostics + + for i, spec := range s { + var ed hcl.Diagnostics + vals[i], ed = spec.decode(content, blockLabels, ctx) + diags = append(diags, ed...) + } + + return cty.TupleVal(vals), diags +} + +func (s TupleSpec) impliedType() cty.Type { + if len(s) == 0 { + return cty.EmptyTuple + } + + attrTypes := make([]cty.Type, len(s)) + for i, childSpec := range s { + attrTypes[i] = childSpec.impliedType() + } + return cty.Tuple(attrTypes) +} + +func (s TupleSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // This is not great, but the best we can do. In practice, it's rather + // strange to ask for the source range of an entire top-level body, since + // that's already readily available to the caller. + return content.MissingItemRange +} + +// An AttrSpec is a Spec that evaluates a particular attribute expression in +// the body and returns its resulting value converted to the requested type, +// or produces a diagnostic if the type is incorrect. +type AttrSpec struct { + Name string + Type cty.Type + Required bool +} + +func (s *AttrSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +// specNeedingVariables implementation +func (s *AttrSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + attr, exists := content.Attributes[s.Name] + if !exists { + return nil + } + + return attr.Expr.Variables() +} + +// attrSpec implementation +func (s *AttrSpec) attrSchemata() []hcl.AttributeSchema { + return []hcl.AttributeSchema{ + { + Name: s.Name, + Required: s.Required, + }, + } +} + +func (s *AttrSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + attr, exists := content.Attributes[s.Name] + if !exists { + return content.MissingItemRange + } + + return attr.Expr.Range() +} + +func (s *AttrSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + attr, exists := content.Attributes[s.Name] + if !exists { + // We don't need to check required and emit a diagnostic here, because + // that would already have happened when building "content". + return cty.NullVal(s.Type), nil + } + + val, diags := attr.Expr.Value(ctx) + + convVal, err := convert.Convert(val, s.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect attribute value type", + Detail: fmt.Sprintf( + "Inappropriate value for attribute %q: %s.", + s.Name, err.Error(), + ), + Subject: attr.Expr.StartRange().Ptr(), + Context: hcl.RangeBetween(attr.NameRange, attr.Expr.StartRange()).Ptr(), + }) + // We'll return an unknown value of the _correct_ type so that the + // incomplete result can still be used for some analysis use-cases. + val = cty.UnknownVal(s.Type) + } else { + val = convVal + } + + return val, diags +} + +func (s *AttrSpec) impliedType() cty.Type { + return s.Type +} + +// A LiteralSpec is a Spec that produces the given literal value, ignoring +// the given body. +type LiteralSpec struct { + Value cty.Value +} + +func (s *LiteralSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +func (s *LiteralSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return s.Value, nil +} + +func (s *LiteralSpec) impliedType() cty.Type { + return s.Value.Type() +} + +func (s *LiteralSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // No sensible range to return for a literal, so the caller had better + // ensure it doesn't cause any diagnostics. + return hcl.Range{ + Filename: "", + } +} + +// An ExprSpec is a Spec that evaluates the given expression, ignoring the +// given body. +type ExprSpec struct { + Expr hcl.Expression +} + +func (s *ExprSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +// specNeedingVariables implementation +func (s *ExprSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + return s.Expr.Variables() +} + +func (s *ExprSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return s.Expr.Value(ctx) +} + +func (s *ExprSpec) impliedType() cty.Type { + // We can't know the type of our expression until we evaluate it + return cty.DynamicPseudoType +} + +func (s *ExprSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + return s.Expr.Range() +} + +// A BlockSpec is a Spec that produces a cty.Value by decoding the contents +// of a single nested block of a given type, using a nested spec. +// +// If the Required flag is not set, the nested block may be omitted, in which +// case a null value is produced. If it _is_ set, an error diagnostic is +// produced if there are no nested blocks of the given type. +type BlockSpec struct { + TypeName string + Nested Spec + Required bool +} + +func (s *BlockSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: findLabelSpecs(s.Nested), + }, + } +} + +// specNeedingVariables implementation +func (s *BlockSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return nil + } + + return Variables(childBlock.Body, s.Nested) +} + +func (s *BlockSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + if childBlock != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), + Detail: fmt.Sprintf( + "Only one block of type %q is allowed. Previous definition was at %s.", + s.TypeName, childBlock.DefRange.String(), + ), + Subject: &candidate.DefRange, + }) + break + } + + childBlock = candidate + } + + if childBlock == nil { + if s.Required { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Missing %s block", s.TypeName), + Detail: fmt.Sprintf( + "A block of type %q is required here.", s.TypeName, + ), + Subject: &content.MissingItemRange, + }) + } + return cty.NullVal(s.Nested.impliedType()), diags + } + + if s.Nested == nil { + panic("BlockSpec with no Nested Spec") + } + val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) + diags = append(diags, childDiags...) + return val, diags +} + +func (s *BlockSpec) impliedType() cty.Type { + return s.Nested.impliedType() +} + +func (s *BlockSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockListSpec is a Spec that produces a cty list of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +type BlockListSpec struct { + TypeName string + Nested Spec + MinItems int + MaxItems int +} + +func (s *BlockListSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockListSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: findLabelSpecs(s.Nested), + }, + } +} + +// specNeedingVariables implementation +func (s *BlockListSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockListSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockListSpec with no Nested Spec") + } + + var elems []cty.Value + var sourceRanges []hcl.Range + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) + diags = append(diags, childDiags...) + elems = append(elems, val) + sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) + } + + if len(elems) < s.MinItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), + Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), + Subject: &content.MissingItemRange, + }) + } else if s.MaxItems > 0 && len(elems) > s.MaxItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), + Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), + Subject: &sourceRanges[s.MaxItems], + }) + } + + var ret cty.Value + + if len(elems) == 0 { + ret = cty.ListValEmpty(s.Nested.impliedType()) + } else { + ret = cty.ListVal(elems) + } + + return ret, diags +} + +func (s *BlockListSpec) impliedType() cty.Type { + return cty.List(s.Nested.impliedType()) +} + +func (s *BlockListSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockSetSpec is a Spec that produces a cty set of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +type BlockSetSpec struct { + TypeName string + Nested Spec + MinItems int + MaxItems int +} + +func (s *BlockSetSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockSetSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: findLabelSpecs(s.Nested), + }, + } +} + +// specNeedingVariables implementation +func (s *BlockSetSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockSetSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockSetSpec with no Nested Spec") + } + + var elems []cty.Value + var sourceRanges []hcl.Range + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) + diags = append(diags, childDiags...) + elems = append(elems, val) + sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) + } + + if len(elems) < s.MinItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), + Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), + Subject: &content.MissingItemRange, + }) + } else if s.MaxItems > 0 && len(elems) > s.MaxItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), + Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), + Subject: &sourceRanges[s.MaxItems], + }) + } + + var ret cty.Value + + if len(elems) == 0 { + ret = cty.SetValEmpty(s.Nested.impliedType()) + } else { + ret = cty.SetVal(elems) + } + + return ret, diags +} + +func (s *BlockSetSpec) impliedType() cty.Type { + return cty.Set(s.Nested.impliedType()) +} + +func (s *BlockSetSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockMapSpec is a Spec that produces a cty map of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +// +// One level of map structure is created for each of the given label names. +// There must be at least one given label name. +type BlockMapSpec struct { + TypeName string + LabelNames []string + Nested Spec +} + +func (s *BlockMapSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockMapSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: append(s.LabelNames, findLabelSpecs(s.Nested)...), + }, + } +} + +// specNeedingVariables implementation +func (s *BlockMapSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockMapSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockSetSpec with no Nested Spec") + } + + elems := map[string]interface{}{} + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + childLabels := labelsForBlock(childBlock) + val, _, childDiags := decode(childBlock.Body, childLabels[len(s.LabelNames):], ctx, s.Nested, false) + targetMap := elems + for _, key := range childBlock.Labels[:len(s.LabelNames)-1] { + if _, exists := targetMap[key]; !exists { + targetMap[key] = make(map[string]interface{}) + } + targetMap = targetMap[key].(map[string]interface{}) + } + + diags = append(diags, childDiags...) + + key := childBlock.Labels[len(s.LabelNames)-1] + if _, exists := targetMap[key]; exists { + labelsBuf := bytes.Buffer{} + for _, label := range childBlock.Labels { + fmt.Fprintf(&labelsBuf, " %q", label) + } + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), + Detail: fmt.Sprintf( + "A block for %s%s was already defined. The %s labels must be unique.", + s.TypeName, labelsBuf.String(), s.TypeName, + ), + Subject: &childBlock.DefRange, + }) + continue + } + + targetMap[key] = val + } + + if len(elems) == 0 { + return cty.MapValEmpty(s.Nested.impliedType()), diags + } + + var ctyMap func(map[string]interface{}, int) cty.Value + ctyMap = func(raw map[string]interface{}, depth int) cty.Value { + vals := make(map[string]cty.Value, len(raw)) + if depth == 1 { + for k, v := range raw { + vals[k] = v.(cty.Value) + } + } else { + for k, v := range raw { + vals[k] = ctyMap(v.(map[string]interface{}), depth-1) + } + } + return cty.MapVal(vals) + } + + return ctyMap(elems, len(s.LabelNames)), diags +} + +func (s *BlockMapSpec) impliedType() cty.Type { + ret := s.Nested.impliedType() + for _ = range s.LabelNames { + ret = cty.Map(ret) + } + return ret +} + +func (s *BlockMapSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockLabelSpec is a Spec that returns a cty.String representing the +// label of the block its given body belongs to, if indeed its given body +// belongs to a block. It is a programming error to use this in a non-block +// context, so this spec will panic in that case. +// +// This spec only works in the nested spec within a BlockSpec, BlockListSpec, +// BlockSetSpec or BlockMapSpec. +// +// The full set of label specs used against a particular block must have a +// consecutive set of indices starting at zero. The maximum index found +// defines how many labels the corresponding blocks must have in cty source. +type BlockLabelSpec struct { + Index int + Name string +} + +func (s *BlockLabelSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +func (s *BlockLabelSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + if s.Index >= len(blockLabels) { + panic("BlockListSpec used in non-block context") + } + + return cty.StringVal(blockLabels[s.Index].Value), nil +} + +func (s *BlockLabelSpec) impliedType() cty.Type { + return cty.String // labels are always strings +} + +func (s *BlockLabelSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + if s.Index >= len(blockLabels) { + panic("BlockListSpec used in non-block context") + } + + return blockLabels[s.Index].Range +} + +func findLabelSpecs(spec Spec) []string { + maxIdx := -1 + var names map[int]string + + var visit visitFunc + visit = func(s Spec) { + if ls, ok := s.(*BlockLabelSpec); ok { + if maxIdx < ls.Index { + maxIdx = ls.Index + } + if names == nil { + names = make(map[int]string) + } + names[ls.Index] = ls.Name + } + s.visitSameBodyChildren(visit) + } + + visit(spec) + + if maxIdx < 0 { + return nil // no labels at all + } + + ret := make([]string, maxIdx+1) + for i := range ret { + name := names[i] + if name == "" { + // Should never happen if the spec is conformant, since we require + // consecutive indices starting at zero. + name = fmt.Sprintf("missing%02d", i) + } + ret[i] = name + } + + return ret +} + +// DefaultSpec is a spec that wraps two specs, evaluating the primary first +// and then evaluating the default if the primary returns a null value. +// +// The two specifications must have the same implied result type for correct +// operation. If not, the result is undefined. +type DefaultSpec struct { + Primary Spec + Default Spec +} + +func (s *DefaultSpec) visitSameBodyChildren(cb visitFunc) { + cb(s.Primary) + cb(s.Default) +} + +func (s *DefaultSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + val, diags := s.Primary.decode(content, blockLabels, ctx) + if val.IsNull() { + var moreDiags hcl.Diagnostics + val, moreDiags = s.Default.decode(content, blockLabels, ctx) + diags = append(diags, moreDiags...) + } + return val, diags +} + +func (s *DefaultSpec) impliedType() cty.Type { + return s.Primary.impliedType() +} + +func (s *DefaultSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We can't tell from here which of the two specs will ultimately be used + // in our result, so we'll just assume the first. This is usually the right + // choice because the default is often a literal spec that doesn't have a + // reasonable source range to return anyway. + return s.Primary.sourceRange(content, blockLabels) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/variables.go b/vendor/github.com/hashicorp/hcl2/hcldec/variables.go new file mode 100644 index 00000000000..427b0d0e620 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/variables.go @@ -0,0 +1,34 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// Variables processes the given body with the given spec and returns a +// list of the variable traversals that would be required to decode +// the same pairing of body and spec. +// +// This can be used to conditionally populate the variables in the EvalContext +// passed to Decode, for applications where a static scope is insufficient. +// +// If the given body is not compliant with the given schema, the result may +// be incomplete, but that's assumed to be okay because the eventual call +// to Decode will produce error diagnostics anyway. +func Variables(body hcl.Body, spec Spec) []hcl.Traversal { + schema := ImpliedSchema(spec) + + content, _, _ := body.PartialContent(schema) + + var vars []hcl.Traversal + + if vs, ok := spec.(specNeedingVariables); ok { + vars = append(vars, vs.variablesNeeded(content)...) + } + spec.visitSameBodyChildren(func(s Spec) { + if vs, ok := s.(specNeedingVariables); ok { + vars = append(vars, vs.variablesNeeded(content)...) + } + }) + + return vars +} diff --git a/vendor/github.com/hashicorp/hcl2/hclparse/parser.go b/vendor/github.com/hashicorp/hcl2/hclparse/parser.go new file mode 100644 index 00000000000..6d47f1268f4 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclparse/parser.go @@ -0,0 +1,123 @@ +package hclparse + +import ( + "fmt" + "io/ioutil" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/hashicorp/hcl2/hcl/json" +) + +// NOTE: This is the public interface for parsing. The actual parsers are +// in other packages alongside this one, with this package just wrapping them +// to provide a unified interface for the caller across all supported formats. + +// Parser is the main interface for parsing configuration files. As well as +// parsing files, a parser also retains a registry of all of the files it +// has parsed so that multiple attempts to parse the same file will return +// the same object and so the collected files can be used when printing +// diagnostics. +// +// Any diagnostics for parsing a file are only returned once on the first +// call to parse that file. Callers are expected to collect up diagnostics +// and present them together, so returning diagnostics for the same file +// multiple times would create a confusing result. +type Parser struct { + files map[string]*hcl.File +} + +// NewParser creates a new parser, ready to parse configuration files. +func NewParser() *Parser { + return &Parser{ + files: map[string]*hcl.File{}, + } +} + +// ParseHCL parses the given buffer (which is assumed to have been loaded from +// the given filename) as a native-syntax configuration file and returns the +// hcl.File object representing it. +func (p *Parser) ParseHCL(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { + if existing := p.files[filename]; existing != nil { + return existing, nil + } + + file, diags := hclsyntax.ParseConfig(src, filename, hcl.Pos{Byte: 0, Line: 1, Column: 1}) + p.files[filename] = file + return file, diags +} + +// ParseHCLFile reads the given filename and parses it as a native-syntax HCL +// configuration file. An error diagnostic is returned if the given file +// cannot be read. +func (p *Parser) ParseHCLFile(filename string) (*hcl.File, hcl.Diagnostics) { + if existing := p.files[filename]; existing != nil { + return existing, nil + } + + src, err := ioutil.ReadFile(filename) + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Failed to read file", + Detail: fmt.Sprintf("The configuration file %q could not be read.", filename), + }, + } + } + + return p.ParseHCL(src, filename) +} + +// ParseJSON parses the given JSON buffer (which is assumed to have been loaded +// from the given filename) and returns the hcl.File object representing it. +func (p *Parser) ParseJSON(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { + if existing := p.files[filename]; existing != nil { + return existing, nil + } + + file, diags := json.Parse(src, filename) + p.files[filename] = file + return file, diags +} + +// ParseJSONFile reads the given filename and parses it as JSON, similarly to +// ParseJSON. An error diagnostic is returned if the given file cannot be read. +func (p *Parser) ParseJSONFile(filename string) (*hcl.File, hcl.Diagnostics) { + if existing := p.files[filename]; existing != nil { + return existing, nil + } + + file, diags := json.ParseFile(filename) + p.files[filename] = file + return file, diags +} + +// AddFile allows a caller to record in a parser a file that was parsed some +// other way, thus allowing it to be included in the registry of sources. +func (p *Parser) AddFile(filename string, file *hcl.File) { + p.files[filename] = file +} + +// Sources returns a map from filenames to the raw source code that was +// read from them. This is intended to be used, for example, to print +// diagnostics with contextual information. +// +// The arrays underlying the returned slices should not be modified. +func (p *Parser) Sources() map[string][]byte { + ret := make(map[string][]byte) + for fn, f := range p.files { + ret[fn] = f.Bytes + } + return ret +} + +// Files returns a map from filenames to the File objects produced from them. +// This is intended to be used, for example, to print diagnostics with +// contextual information. +// +// The returned map and all of the objects it refers to directly or indirectly +// must not be modified. +func (p *Parser) Files() map[string]*hcl.File { + return p.files +} diff --git a/vendor/github.com/hashicorp/serf/LICENSE b/vendor/github.com/hashicorp/serf/LICENSE new file mode 100644 index 00000000000..c33dcc7c928 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/serf/coordinate/client.go b/vendor/github.com/hashicorp/serf/coordinate/client.go new file mode 100644 index 00000000000..403ec780142 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/client.go @@ -0,0 +1,232 @@ +package coordinate + +import ( + "fmt" + "math" + "sort" + "sync" + "time" +) + +// Client manages the estimated network coordinate for a given node, and adjusts +// it as the node observes round trip times and estimated coordinates from other +// nodes. The core algorithm is based on Vivaldi, see the documentation for Config +// for more details. +type Client struct { + // coord is the current estimate of the client's network coordinate. + coord *Coordinate + + // origin is a coordinate sitting at the origin. + origin *Coordinate + + // config contains the tuning parameters that govern the performance of + // the algorithm. + config *Config + + // adjustmentIndex is the current index into the adjustmentSamples slice. + adjustmentIndex uint + + // adjustment is used to store samples for the adjustment calculation. + adjustmentSamples []float64 + + // latencyFilterSamples is used to store the last several RTT samples, + // keyed by node name. We will use the config's LatencyFilterSamples + // value to determine how many samples we keep, per node. + latencyFilterSamples map[string][]float64 + + // stats is used to record events that occur when updating coordinates. + stats ClientStats + + // mutex enables safe concurrent access to the client. + mutex sync.RWMutex +} + +// ClientStats is used to record events that occur when updating coordinates. +type ClientStats struct { + // Resets is incremented any time we reset our local coordinate because + // our calculations have resulted in an invalid state. + Resets int +} + +// NewClient creates a new Client and verifies the configuration is valid. +func NewClient(config *Config) (*Client, error) { + if !(config.Dimensionality > 0) { + return nil, fmt.Errorf("dimensionality must be >0") + } + + return &Client{ + coord: NewCoordinate(config), + origin: NewCoordinate(config), + config: config, + adjustmentIndex: 0, + adjustmentSamples: make([]float64, config.AdjustmentWindowSize), + latencyFilterSamples: make(map[string][]float64), + }, nil +} + +// GetCoordinate returns a copy of the coordinate for this client. +func (c *Client) GetCoordinate() *Coordinate { + c.mutex.RLock() + defer c.mutex.RUnlock() + + return c.coord.Clone() +} + +// SetCoordinate forces the client's coordinate to a known state. +func (c *Client) SetCoordinate(coord *Coordinate) error { + c.mutex.Lock() + defer c.mutex.Unlock() + + if err := c.checkCoordinate(coord); err != nil { + return err + } + + c.coord = coord.Clone() + return nil +} + +// ForgetNode removes any client state for the given node. +func (c *Client) ForgetNode(node string) { + c.mutex.Lock() + defer c.mutex.Unlock() + + delete(c.latencyFilterSamples, node) +} + +// Stats returns a copy of stats for the client. +func (c *Client) Stats() ClientStats { + c.mutex.Lock() + defer c.mutex.Unlock() + + return c.stats +} + +// checkCoordinate returns an error if the coordinate isn't compatible with +// this client, or if the coordinate itself isn't valid. This assumes the mutex +// has been locked already. +func (c *Client) checkCoordinate(coord *Coordinate) error { + if !c.coord.IsCompatibleWith(coord) { + return fmt.Errorf("dimensions aren't compatible") + } + + if !coord.IsValid() { + return fmt.Errorf("coordinate is invalid") + } + + return nil +} + +// latencyFilter applies a simple moving median filter with a new sample for +// a node. This assumes that the mutex has been locked already. +func (c *Client) latencyFilter(node string, rttSeconds float64) float64 { + samples, ok := c.latencyFilterSamples[node] + if !ok { + samples = make([]float64, 0, c.config.LatencyFilterSize) + } + + // Add the new sample and trim the list, if needed. + samples = append(samples, rttSeconds) + if len(samples) > int(c.config.LatencyFilterSize) { + samples = samples[1:] + } + c.latencyFilterSamples[node] = samples + + // Sort a copy of the samples and return the median. + sorted := make([]float64, len(samples)) + copy(sorted, samples) + sort.Float64s(sorted) + return sorted[len(sorted)/2] +} + +// updateVivialdi updates the Vivaldi portion of the client's coordinate. This +// assumes that the mutex has been locked already. +func (c *Client) updateVivaldi(other *Coordinate, rttSeconds float64) { + const zeroThreshold = 1.0e-6 + + dist := c.coord.DistanceTo(other).Seconds() + if rttSeconds < zeroThreshold { + rttSeconds = zeroThreshold + } + wrongness := math.Abs(dist-rttSeconds) / rttSeconds + + totalError := c.coord.Error + other.Error + if totalError < zeroThreshold { + totalError = zeroThreshold + } + weight := c.coord.Error / totalError + + c.coord.Error = c.config.VivaldiCE*weight*wrongness + c.coord.Error*(1.0-c.config.VivaldiCE*weight) + if c.coord.Error > c.config.VivaldiErrorMax { + c.coord.Error = c.config.VivaldiErrorMax + } + + delta := c.config.VivaldiCC * weight + force := delta * (rttSeconds - dist) + c.coord = c.coord.ApplyForce(c.config, force, other) +} + +// updateAdjustment updates the adjustment portion of the client's coordinate, if +// the feature is enabled. This assumes that the mutex has been locked already. +func (c *Client) updateAdjustment(other *Coordinate, rttSeconds float64) { + if c.config.AdjustmentWindowSize == 0 { + return + } + + // Note that the existing adjustment factors don't figure in to this + // calculation so we use the raw distance here. + dist := c.coord.rawDistanceTo(other) + c.adjustmentSamples[c.adjustmentIndex] = rttSeconds - dist + c.adjustmentIndex = (c.adjustmentIndex + 1) % c.config.AdjustmentWindowSize + + sum := 0.0 + for _, sample := range c.adjustmentSamples { + sum += sample + } + c.coord.Adjustment = sum / (2.0 * float64(c.config.AdjustmentWindowSize)) +} + +// updateGravity applies a small amount of gravity to pull coordinates towards +// the center of the coordinate system to combat drift. This assumes that the +// mutex is locked already. +func (c *Client) updateGravity() { + dist := c.origin.DistanceTo(c.coord).Seconds() + force := -1.0 * math.Pow(dist/c.config.GravityRho, 2.0) + c.coord = c.coord.ApplyForce(c.config, force, c.origin) +} + +// Update takes other, a coordinate for another node, and rtt, a round trip +// time observation for a ping to that node, and updates the estimated position of +// the client's coordinate. Returns the updated coordinate. +func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) (*Coordinate, error) { + c.mutex.Lock() + defer c.mutex.Unlock() + + if err := c.checkCoordinate(other); err != nil { + return nil, err + } + + const maxRTT = 10 * time.Second + if rtt <= 0 || rtt > maxRTT { + return nil, fmt.Errorf("round trip time not in valid range, duration %v is not a positive value less than %v ", rtt, maxRTT) + } + + rttSeconds := c.latencyFilter(node, rtt.Seconds()) + c.updateVivaldi(other, rttSeconds) + c.updateAdjustment(other, rttSeconds) + c.updateGravity() + if !c.coord.IsValid() { + c.stats.Resets++ + c.coord = NewCoordinate(c.config) + } + + return c.coord.Clone(), nil +} + +// DistanceTo returns the estimated RTT from the client's coordinate to other, the +// coordinate for another node. +func (c *Client) DistanceTo(other *Coordinate) time.Duration { + c.mutex.RLock() + defer c.mutex.RUnlock() + + return c.coord.DistanceTo(other) +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/config.go b/vendor/github.com/hashicorp/serf/coordinate/config.go new file mode 100644 index 00000000000..b85a8ab7b00 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/config.go @@ -0,0 +1,70 @@ +package coordinate + +// Config is used to set the parameters of the Vivaldi-based coordinate mapping +// algorithm. +// +// The following references are called out at various points in the documentation +// here: +// +// [1] Dabek, Frank, et al. "Vivaldi: A decentralized network coordinate system." +// ACM SIGCOMM Computer Communication Review. Vol. 34. No. 4. ACM, 2004. +// [2] Ledlie, Jonathan, Paul Gardner, and Margo I. Seltzer. "Network Coordinates +// in the Wild." NSDI. Vol. 7. 2007. +// [3] Lee, Sanghwan, et al. "On suitability of Euclidean embedding for +// host-based network coordinate systems." Networking, IEEE/ACM Transactions +// on 18.1 (2010): 27-40. +type Config struct { + // The dimensionality of the coordinate system. As discussed in [2], more + // dimensions improves the accuracy of the estimates up to a point. Per [2] + // we chose 8 dimensions plus a non-Euclidean height. + Dimensionality uint + + // VivaldiErrorMax is the default error value when a node hasn't yet made + // any observations. It also serves as an upper limit on the error value in + // case observations cause the error value to increase without bound. + VivaldiErrorMax float64 + + // VivaldiCE is a tuning factor that controls the maximum impact an + // observation can have on a node's confidence. See [1] for more details. + VivaldiCE float64 + + // VivaldiCC is a tuning factor that controls the maximum impact an + // observation can have on a node's coordinate. See [1] for more details. + VivaldiCC float64 + + // AdjustmentWindowSize is a tuning factor that determines how many samples + // we retain to calculate the adjustment factor as discussed in [3]. Setting + // this to zero disables this feature. + AdjustmentWindowSize uint + + // HeightMin is the minimum value of the height parameter. Since this + // always must be positive, it will introduce a small amount error, so + // the chosen value should be relatively small compared to "normal" + // coordinates. + HeightMin float64 + + // LatencyFilterSamples is the maximum number of samples that are retained + // per node, in order to compute a median. The intent is to ride out blips + // but still keep the delay low, since our time to probe any given node is + // pretty infrequent. See [2] for more details. + LatencyFilterSize uint + + // GravityRho is a tuning factor that sets how much gravity has an effect + // to try to re-center coordinates. See [2] for more details. + GravityRho float64 +} + +// DefaultConfig returns a Config that has some default values suitable for +// basic testing of the algorithm, but not tuned to any particular type of cluster. +func DefaultConfig() *Config { + return &Config{ + Dimensionality: 8, + VivaldiErrorMax: 1.5, + VivaldiCE: 0.25, + VivaldiCC: 0.25, + AdjustmentWindowSize: 20, + HeightMin: 10.0e-6, + LatencyFilterSize: 3, + GravityRho: 150.0, + } +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/coordinate.go b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go new file mode 100644 index 00000000000..fbe792c90d4 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go @@ -0,0 +1,203 @@ +package coordinate + +import ( + "math" + "math/rand" + "time" +) + +// Coordinate is a specialized structure for holding network coordinates for the +// Vivaldi-based coordinate mapping algorithm. All of the fields should be public +// to enable this to be serialized. All values in here are in units of seconds. +type Coordinate struct { + // Vec is the Euclidean portion of the coordinate. This is used along + // with the other fields to provide an overall distance estimate. The + // units here are seconds. + Vec []float64 + + // Err reflects the confidence in the given coordinate and is updated + // dynamically by the Vivaldi Client. This is dimensionless. + Error float64 + + // Adjustment is a distance offset computed based on a calculation over + // observations from all other nodes over a fixed window and is updated + // dynamically by the Vivaldi Client. The units here are seconds. + Adjustment float64 + + // Height is a distance offset that accounts for non-Euclidean effects + // which model the access links from nodes to the core Internet. The access + // links are usually set by bandwidth and congestion, and the core links + // usually follow distance based on geography. + Height float64 +} + +const ( + // secondsToNanoseconds is used to convert float seconds to nanoseconds. + secondsToNanoseconds = 1.0e9 + + // zeroThreshold is used to decide if two coordinates are on top of each + // other. + zeroThreshold = 1.0e-6 +) + +// ErrDimensionalityConflict will be panic-d if you try to perform operations +// with incompatible dimensions. +type DimensionalityConflictError struct{} + +// Adds the error interface. +func (e DimensionalityConflictError) Error() string { + return "coordinate dimensionality does not match" +} + +// NewCoordinate creates a new coordinate at the origin, using the given config +// to supply key initial values. +func NewCoordinate(config *Config) *Coordinate { + return &Coordinate{ + Vec: make([]float64, config.Dimensionality), + Error: config.VivaldiErrorMax, + Adjustment: 0.0, + Height: config.HeightMin, + } +} + +// Clone creates an independent copy of this coordinate. +func (c *Coordinate) Clone() *Coordinate { + vec := make([]float64, len(c.Vec)) + copy(vec, c.Vec) + return &Coordinate{ + Vec: vec, + Error: c.Error, + Adjustment: c.Adjustment, + Height: c.Height, + } +} + +// componentIsValid returns false if a floating point value is a NaN or an +// infinity. +func componentIsValid(f float64) bool { + return !math.IsInf(f, 0) && !math.IsNaN(f) +} + +// IsValid returns false if any component of a coordinate isn't valid, per the +// componentIsValid() helper above. +func (c *Coordinate) IsValid() bool { + for i := range c.Vec { + if !componentIsValid(c.Vec[i]) { + return false + } + } + + return componentIsValid(c.Error) && + componentIsValid(c.Adjustment) && + componentIsValid(c.Height) +} + +// IsCompatibleWith checks to see if the two coordinates are compatible +// dimensionally. If this returns true then you are guaranteed to not get +// any runtime errors operating on them. +func (c *Coordinate) IsCompatibleWith(other *Coordinate) bool { + return len(c.Vec) == len(other.Vec) +} + +// ApplyForce returns the result of applying the force from the direction of the +// other coordinate. +func (c *Coordinate) ApplyForce(config *Config, force float64, other *Coordinate) *Coordinate { + if !c.IsCompatibleWith(other) { + panic(DimensionalityConflictError{}) + } + + ret := c.Clone() + unit, mag := unitVectorAt(c.Vec, other.Vec) + ret.Vec = add(ret.Vec, mul(unit, force)) + if mag > zeroThreshold { + ret.Height = (ret.Height+other.Height)*force/mag + ret.Height + ret.Height = math.Max(ret.Height, config.HeightMin) + } + return ret +} + +// DistanceTo returns the distance between this coordinate and the other +// coordinate, including adjustments. +func (c *Coordinate) DistanceTo(other *Coordinate) time.Duration { + if !c.IsCompatibleWith(other) { + panic(DimensionalityConflictError{}) + } + + dist := c.rawDistanceTo(other) + adjustedDist := dist + c.Adjustment + other.Adjustment + if adjustedDist > 0.0 { + dist = adjustedDist + } + return time.Duration(dist * secondsToNanoseconds) +} + +// rawDistanceTo returns the Vivaldi distance between this coordinate and the +// other coordinate in seconds, not including adjustments. This assumes the +// dimensions have already been checked to be compatible. +func (c *Coordinate) rawDistanceTo(other *Coordinate) float64 { + return magnitude(diff(c.Vec, other.Vec)) + c.Height + other.Height +} + +// add returns the sum of vec1 and vec2. This assumes the dimensions have +// already been checked to be compatible. +func add(vec1 []float64, vec2 []float64) []float64 { + ret := make([]float64, len(vec1)) + for i := range ret { + ret[i] = vec1[i] + vec2[i] + } + return ret +} + +// diff returns the difference between the vec1 and vec2. This assumes the +// dimensions have already been checked to be compatible. +func diff(vec1 []float64, vec2 []float64) []float64 { + ret := make([]float64, len(vec1)) + for i := range ret { + ret[i] = vec1[i] - vec2[i] + } + return ret +} + +// mul returns vec multiplied by a scalar factor. +func mul(vec []float64, factor float64) []float64 { + ret := make([]float64, len(vec)) + for i := range vec { + ret[i] = vec[i] * factor + } + return ret +} + +// magnitude computes the magnitude of the vec. +func magnitude(vec []float64) float64 { + sum := 0.0 + for i := range vec { + sum += vec[i] * vec[i] + } + return math.Sqrt(sum) +} + +// unitVectorAt returns a unit vector pointing at vec1 from vec2. If the two +// positions are the same then a random unit vector is returned. We also return +// the distance between the points for use in the later height calculation. +func unitVectorAt(vec1 []float64, vec2 []float64) ([]float64, float64) { + ret := diff(vec1, vec2) + + // If the coordinates aren't on top of each other we can normalize. + if mag := magnitude(ret); mag > zeroThreshold { + return mul(ret, 1.0/mag), mag + } + + // Otherwise, just return a random unit vector. + for i := range ret { + ret[i] = rand.Float64() - 0.5 + } + if mag := magnitude(ret); mag > zeroThreshold { + return mul(ret, 1.0/mag), 0.0 + } + + // And finally just give up and make a unit vector along the first + // dimension. This should be exceedingly rare. + ret = make([]float64, len(ret)) + ret[0] = 1.0 + return ret, 0.0 +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/phantom.go b/vendor/github.com/hashicorp/serf/coordinate/phantom.go new file mode 100644 index 00000000000..6fb033c0cd3 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/phantom.go @@ -0,0 +1,187 @@ +package coordinate + +import ( + "fmt" + "math" + "math/rand" + "time" +) + +// GenerateClients returns a slice with nodes number of clients, all with the +// given config. +func GenerateClients(nodes int, config *Config) ([]*Client, error) { + clients := make([]*Client, nodes) + for i, _ := range clients { + client, err := NewClient(config) + if err != nil { + return nil, err + } + + clients[i] = client + } + return clients, nil +} + +// GenerateLine returns a truth matrix as if all the nodes are in a straight linke +// with the given spacing between them. +func GenerateLine(nodes int, spacing time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rtt := time.Duration(j-i) * spacing + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateGrid returns a truth matrix as if all the nodes are in a two dimensional +// grid with the given spacing between them. +func GenerateGrid(nodes int, spacing time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + n := int(math.Sqrt(float64(nodes))) + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + x1, y1 := float64(i%n), float64(i/n) + x2, y2 := float64(j%n), float64(j/n) + dx, dy := x2-x1, y2-y1 + dist := math.Sqrt(dx*dx + dy*dy) + rtt := time.Duration(dist * float64(spacing)) + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateSplit returns a truth matrix as if half the nodes are close together in +// one location and half the nodes are close together in another. The lan factor +// is used to separate the nodes locally and the wan factor represents the split +// between the two sides. +func GenerateSplit(nodes int, lan time.Duration, wan time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + split := nodes / 2 + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rtt := lan + if (i <= split && j > split) || (i > split && j <= split) { + rtt += wan + } + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateCircle returns a truth matrix for a set of nodes, evenly distributed +// around a circle with the given radius. The first node is at the "center" of the +// circle because it's equidistant from all the other nodes, but we place it at +// double the radius, so it should show up above all the other nodes in height. +func GenerateCircle(nodes int, radius time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + var rtt time.Duration + if i == 0 { + rtt = 2 * radius + } else { + t1 := 2.0 * math.Pi * float64(i) / float64(nodes) + x1, y1 := math.Cos(t1), math.Sin(t1) + t2 := 2.0 * math.Pi * float64(j) / float64(nodes) + x2, y2 := math.Cos(t2), math.Sin(t2) + dx, dy := x2-x1, y2-y1 + dist := math.Sqrt(dx*dx + dy*dy) + rtt = time.Duration(dist * float64(radius)) + } + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateRandom returns a truth matrix for a set of nodes with normally +// distributed delays, with the given mean and deviation. The RNG is re-seeded +// so you always get the same matrix for a given size. +func GenerateRandom(nodes int, mean time.Duration, deviation time.Duration) [][]time.Duration { + rand.Seed(1) + + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rttSeconds := rand.NormFloat64()*deviation.Seconds() + mean.Seconds() + rtt := time.Duration(rttSeconds * secondsToNanoseconds) + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// Simulate runs the given number of cycles using the given list of clients and +// truth matrix. On each cycle, each client will pick a random node and observe +// the truth RTT, updating its coordinate estimate. The RNG is re-seeded for +// each simulation run to get deterministic results (for this algorithm and the +// underlying algorithm which will use random numbers for position vectors when +// starting out with everything at the origin). +func Simulate(clients []*Client, truth [][]time.Duration, cycles int) { + rand.Seed(1) + + nodes := len(clients) + for cycle := 0; cycle < cycles; cycle++ { + for i, _ := range clients { + if j := rand.Intn(nodes); j != i { + c := clients[j].GetCoordinate() + rtt := truth[i][j] + node := fmt.Sprintf("node_%d", j) + clients[i].Update(node, c, rtt) + } + } + } +} + +// Stats is returned from the Evaluate function with a summary of the algorithm +// performance. +type Stats struct { + ErrorMax float64 + ErrorAvg float64 +} + +// Evaluate uses the coordinates of the given clients to calculate estimated +// distances and compares them with the given truth matrix, returning summary +// stats. +func Evaluate(clients []*Client, truth [][]time.Duration) (stats Stats) { + nodes := len(clients) + count := 0 + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + est := clients[i].DistanceTo(clients[j].GetCoordinate()).Seconds() + actual := truth[i][j].Seconds() + error := math.Abs(est-actual) / actual + stats.ErrorMax = math.Max(stats.ErrorMax, error) + stats.ErrorAvg += error + count += 1 + } + } + + stats.ErrorAvg /= float64(count) + fmt.Printf("Error avg=%9.6f max=%9.6f\n", stats.ErrorAvg, stats.ErrorMax) + return +} diff --git a/vendor/github.com/hashicorp/terraform/CHANGELOG.md b/vendor/github.com/hashicorp/terraform/CHANGELOG.md index 9a132758ca2..09a9269df2f 100644 --- a/vendor/github.com/hashicorp/terraform/CHANGELOG.md +++ b/vendor/github.com/hashicorp/terraform/CHANGELOG.md @@ -1,12 +1,241 @@ -## 0.10.0 (Unreleased) +## 0.11.0-beta1 (Unreleased) -BUG FIXES: +IMPROVEMENTS: + +* cli: The `terraform versions` command now prints out the version numbers of initialized plugins as well as the version of Terraform core, so that they can be more easily shared when opening GitHub Issues, etc. [GH-16439] +* helper/schema: Loosen validation for 'id' field [GH-16456] + +## 0.10.8 (October 25, 2017) + +NEW FEATURES: + +* **New `etcdv3` backend**, for use with the newer etcd api ([#15680](https://github.com/hashicorp/terraform/issues/15680)) +* **New interpolation function `chunklist`**, for spliting a list into a list of lists with specified sublist chunk sizes. ([#15112](https://github.com/hashicorp/terraform/issues/15112)) + +IMPROVEMENTS: + +* backend/s3: Add options to skip AWS validation which allows non-AWS S3 backends ([#15553](https://github.com/hashicorp/terraform/issues/15553)) + +BUG FIXES: + +* command/validate: Respect `-plugin-dir` overridden plugin paths in the `terraform validate` command. ([#15985](https://github.com/hashicorp/terraform/issues/15985)) +* provisioner/chef: Clean clients from `chef-vault` when `recreate_client` enabled ([#16357](https://github.com/hashicorp/terraform/issues/16357)) +* communicator/winrm: Support the `cacert` option for custom certificate authorities when provisioning over WinRM ([#14783](https://github.com/hashicorp/terraform/issues/14783)) + +## 0.10.7 (October 2, 2017) + +NEW FEATURES: + +* Provider plugins can now optionally be cached in a shared directory to avoid re-downloading them for each configuration working directory. For more information, see [the documentation](https://github.com/hashicorp/terraform/blob/34956cd12449cb77db3f55e3286cd369e8332eeb/website/docs/configuration/providers.html.md#provider-plugin-cache). ([#16000](https://github.com/hashicorp/terraform/issues/16000)) + +IMPROVEMENTS: + +* config: New `abs` interpolation function, returning the absolute value of a number ([#16168](https://github.com/hashicorp/terraform/issues/16168)) +* config: New `transpose` interpolation function, which swaps the keys and values in a map of lists of strings. ([#16192](https://github.com/hashicorp/terraform/issues/16192)) +* cli: The Terraform CLI now supports tab-completion for commands and certain arguments for `bash` and `zsh` users. See [the tab-completion docs](https://github.com/hashicorp/terraform/blob/2c782e60fad78e6fc976d850162322608f074e57/website/docs/commands/index.html.markdown#shell-tab-completion) for information on how to enable it. ([#16176](https://github.com/hashicorp/terraform/issues/16176)) +* cli: `terraform state rm` now includes in its output the count of resources that were removed from the state. ([#16137](https://github.com/hashicorp/terraform/issues/16137)) + +BUG FIXES: + +* modules: Update go-getter to fix crash when fetching invalid source subdir ([#16161](https://github.com/hashicorp/terraform/issues/16161)) +* modules: Fix regression in the handling of modules sourcing other modules with relative paths ([#16160](https://github.com/hashicorp/terraform/issues/16160)) +* core: Skip local value interpolation during destroy ([#16213](https://github.com/hashicorp/terraform/issues/16213)) + +## 0.10.6 (September 19, 2017) + +UPGRADE NOTES: + +* The internal storage of modules has changed in this release, so after + upgrading `terraform init` must be run to re-install modules in the new + on-disk format. The existing installed versions of modules will be ignored, + so the latest version of each module will be installed. + +IMPROVEMENTS: + +* Modules can now be installed from [the Terraform Registry](https://registry.terraform.io/) +* cli: `terraform import` now accepts an option `-allow-missing-config` that overrides the default requirement that a configuration block must already be present for the resource being imported. ([#15876](https://github.com/hashicorp/terraform/issues/15876)) + +## 0.10.5 (September 14, 2017) + +NEW FEATURES: + +* config: `indent` interpolation function appends spaces to all but the first line of a multi-line string ([#15311](https://github.com/hashicorp/terraform/issues/15311)) + +IMPROVEMENTS: + +* cli: `terraform fmt` has a new option `-check` which makes it return a non-zero exit status if any formatting changes are required ([#15387](https://github.com/hashicorp/terraform/issues/15387)) +* cli: When [running Terraform in automation](https://www.terraform.io/guides/running-terraform-in-automation.html), a new environment variable `TF_IN_AUTOMATION` can be used to disable or adjust certain prompts that would normally include specific CLI commands to run. This assumes that the wrapping automation tool is providing its own UI for guiding the user through the workflow, and thus the standard advice would be redundant and/or confusing. ([#16059](https://github.com/hashicorp/terraform/issues/16059)) + +BUG FIXES: + +* cli: restore the "(forces new resource)" annotations on attributes that were inadvertently disabled in 0.10.4. ([#16067](https://github.com/hashicorp/terraform/issues/16067)) +* cli: fix regression with installing modules from git when the `GIT_SSH_COMMAND` environment variable is set ([#16099](https://github.com/hashicorp/terraform/issues/16099)) + +## 0.10.4 (September 6, 2017) + +IMPROVEMENTS: +* `terraform apply` now uses the standard resource address syntax to refer to resources in its log ([#15884](https://github.com/hashicorp/terraform/issues/15884)) +* `terraform plan` output has some minor adjustments to improve readability and accessibility for those who can't see its colors ([#15884](https://github.com/hashicorp/terraform/issues/15884)) + +BUG FIXES: + +* backend/consul: fix crash during consul backend initialization ([#15976](https://github.com/hashicorp/terraform/issues/15976)) +* backend/azurerm: ensure that blob storage metadata is preserved when updating state blobs, to avoid losing track of lock metadata ([#16015](https://github.com/hashicorp/terraform/issues/16015)) +* config: local values now work properly in resource `count` and in modules with more than one `.tf` file ([#15995](https://github.com/hashicorp/terraform/issues/15995)] [[#15982](https://github.com/hashicorp/terraform/issues/15982)) +* cli: removed some inconsistencies in how data sources are counted and tallied in plan vs. apply and apply vs. destroy. In particular, data sources are no longer incorrectly counted as destroyed in `terraform destroy` ([#15884](https://github.com/hashicorp/terraform/issues/15884)) + +## 0.10.3 (August 30, 2017) + +BACKWARDS INCOMPATIBILITIES / NOTES: + +* LGPL Dependencies Removed ([#15862](https://github.com/hashicorp/terraform/issues/15862)) + +NEW FEATURES: + +* **Local Values**: this new configuration language feature allows assigning a symbolic local name to an expression so it can be used multiple times in configuration without repetition. See [the documentation](https://github.com/hashicorp/terraform/blob/master/website/docs/configuration/locals.html.md) for how to define and use local values. ([#15449](https://github.com/hashicorp/terraform/issues/15449)) +* **`base64gzip` interpolation function**: compresses a string with gzip and then base64-encodes the result ([#3858](https://github.com/hashicorp/terraform/issues/3858)) +* **`flatten` interpolation function**: turns a list of lists, or list of lists of lists, etc into a flat list of primitive values ([#15278](https://github.com/hashicorp/terraform/issues/15278)) +* **`urlencode` interpolation function**: applies standard URL encoding to a string so that it can be embedded in a URL without making it invalid and without any of the characters being interpreted as part of the URL structure ([#15871](https://github.com/hashicorp/terraform/issues/15871)) +* **`salt-masterless` provisioner**: runs Salt in masterless mode on a target server ([#14720](https://github.com/hashicorp/terraform/issues/14720)) + +IMPROVEMENTS: + +* config: The `jsonencode` interpolation function now accepts nested list and map structures, where before it would accept only strings, lists of strings, and maps of strings. ([#14884](https://github.com/hashicorp/terraform/issues/14884)) +* cli: The "creation complete" (and similar) messages from `terraform apply` now include a total elapsed time for each operation. ([#15548](https://github.com/hashicorp/terraform/issues/15548)) +* cli: Module installation (with either `terraform init` or `terraform get`) now detects and recursively initializes submodules when the source is a git repository. ([#15891](https://github.com/hashicorp/terraform/issues/15891)) +* cli: Modules can now be installed from `.tar.xz` archives, in addition to the existing `.tar.gz`, `.tar.bz2` and `.zip`. ([#15891](https://github.com/hashicorp/terraform/issues/15891)) +* provisioner/local-exec: now possible to specify a custom "interpreter", overriding the default of either `bash -c` (on Unix) or `cmd.exe /C` (on Windows) ([#15166](https://github.com/hashicorp/terraform/issues/15166)) +* backend/consul: can now set the path to a specific CA certificate file, client certificate file, and client key file that will be used when configuring the underlying Consul client. ([#15405](https://github.com/hashicorp/terraform/issues/15405)) +* backend/http: now has optional support for locking, with special support from the target server. Additionally, the update operation can now optionally be implemented via `PUT` rather than `POST`. ([#15793](https://github.com/hashicorp/terraform/issues/15793)) +* helper/resource: Add `TestStep.SkipFunc` ([#15957](https://github.com/hashicorp/terraform/issues/15957)) + +BUG FIXES: + +* cli: `terraform init` now verifies the required Terraform version from the root module config. Previously this was verified only on subsequent commands, after initialization. ([#15935](https://github.com/hashicorp/terraform/issues/15935)) +* cli: `terraform validate` now consults `terraform.tfvars`, if present, to set variable values. This is now consistent with the behavior of other commands. ([#15938](https://github.com/hashicorp/terraform/issues/15938)) + +## 0.10.2 (August 16, 2017) + +BUG FIXES: + +* tools/terraform-bundle: Add missing Ui to ProviderInstaller (fix crash) ([#15826](https://github.com/hashicorp/terraform/issues/15826)) +* go-plugin: don't crash when server emits non-key-value JSON ([go-plugin#43](https://github.com/hashicorp/go-plugin/pull/43)) + +## 0.10.1 (August 15, 2017) + +BUG FIXES: + +* Fix `terraform state rm` and `mv` commands to work correctly with remote state backends ([#15652](https://github.com/hashicorp/terraform/issues/15652)) +* Fix errors when interpolations fail during input ([#15780](https://github.com/hashicorp/terraform/issues/15780)) +* Backoff retries in remote-execution provisioner ([#15772](https://github.com/hashicorp/terraform/issues/15772)) +* Load plugins from `~/.terraform.d/plugins/OS_ARCH/` and `.terraformrc` ([#15769](https://github.com/hashicorp/terraform/issues/15769)) +* The `import` command was ignoring the remote state configuration ([#15768](https://github.com/hashicorp/terraform/issues/15768)) +* Don't allow leading slashes in s3 bucket names for remote state ([#15738](https://github.com/hashicorp/terraform/issues/15738)) + +IMPROVEMENTS: + +* helper/schema: Add `GetOkExists` schema function ([#15723](https://github.com/hashicorp/terraform/issues/15723)) +* helper/schema: Make 'id' a reserved field name ([#15695](https://github.com/hashicorp/terraform/issues/15695)) +* command/init: Display version + source when initializing plugins ([#15804](https://github.com/hashicorp/terraform/issues/15804)) + +INTERNAL CHANGES: + +* DiffFieldReader.ReadField caches results to optimize deeply nested schemas ([#15663](https://github.com/hashicorp/terraform/issues/15663)) + + +## 0.10.0 (August 2, 2017) + +**This is the complete 0.9.11 to 0.10.0 CHANGELOG** + +BACKWARDS INCOMPATIBILITIES / NOTES: + +* A new flag `-auto-approve` has been added to `terraform apply`. This flag controls whether an interactive approval is applied before + making the changes in the plan. For now this flag defaults to `true` to preserve previous behavior, but this will become the new default + in a future version. We suggest that anyone running `terraform apply` in wrapper scripts or automation refer to the upgrade guide to learn + how to prepare such wrapper scripts for the later breaking change. +* The `validate` command now checks that all variables are specified by default. The validation will fail by default if that's not the + case. ([#13872](https://github.com/hashicorp/terraform/issues/13872)) +* `terraform state rm` now requires at least one argument. Previously, calling it with no arguments would remove all resources from state, + which is consistent with the other `terraform state` commands but unlikely enough that we considered it better to be inconsistent here to + reduce the risk of accidentally destroying the state. +* Terraform providers are no longer distributed as part of the main Terraform distribution. Instead, they are installed automatically as + part of running `terraform init`. It is therefore now mandatory to run `terraform init` before any other operations that use provider + plugins, to ensure that the required plugins are installed and properly initialized. +* The `terraform env` family of commands have been renamed to `terraform workspace`, in response to feedback that the previous naming was + confusing due to collisions with other concepts of the same name. The commands still work the same as they did before, and the `env` + subcommand is still supported as an alias for backward compatibility. The `env` subcommand will be removed altogether in a future release, + so it's recommended to update any automation or wrapper scripts that use these commands. +* The `terraform init` subcommand no longer takes a SOURCE argument to copy to the current directory. The behavior has been changed to match + that of `plan` and `apply`, so that a configuration can be provided as an argument on the commandline while initializing the current + directory. If a module needs to be copied into the current directory before initialization, it will have to be done manually. +* The `-target` option available on several Terraform subcommands has changed behavior and **now matches potentially more resources**. In + particular, given an option `-target=module.foo`, resources in any descendent modules of `foo` will also be targeted, where before this + was not true. After upgrading, be sure to look carefully at the set of changes proposed by `terraform plan` when using `-target` to ensure + that the target is being interpreted as expected. Note that the `-target` argument is offered for exceptional circumstances only and is + not intended for routine use. +* The `import` command requires that imported resources be specified in the configuration file. Previously, users were encouraged to import + a resource and _then_ write the configuration block for it. This creates the risk that users could import a resource and subsequently + create no configuration for it, which results in Terraform deleting the resource. If the imported resource is not present in the + configuration file, the `import` command will fail. + +FEATURES: + +* **Separate Provider Releases:** Providers are now released independently from Terraform. +* **Automatic Provider Installation:** The required providers will be automatically installed during `terraform init`. +* **Provider Constraints:** Provider are now versioned, and version constraints may be declared in the configuration. + +PROVIDERS: + +* Providers now maintain their own CHANGELOGs in their respective repositories: [terraform-providers](https://github.com/terraform-providers) + +IMPROVEMENTS: + +* cli: Add a `-from-module` flag to `terraform init` to re-introduce the legacy `terraform init` behavior of fetching a module. ([#15666](https://github.com/hashicorp/terraform/issues/15666)) +* backend/s3: Add `workspace_key_prefix` to allow a user-configurable prefix for workspaces in the S3 Backend. ([#15370](https://github.com/hashicorp/terraform/issues/15370)) +* cli: `terraform apply` now has an option `-auto-approve=false` that produces an interactive prompt to approve the generated plan. This will become the default workflow in a future Terraform version. ([#7251](https://github.com/hashicorp/terraform/issues/7251)) +* cli: `terraform workspace show` command prints the current workspace name in a way that's more convenient for processing in wrapper scripts. ([#15157](https://github.com/hashicorp/terraform/issues/15157)) +* cli: `terraform state rm` will now generate an error if no arguments are passed, whereas before it treated it as an open resource address selecting _all_ resources ([#15283](https://github.com/hashicorp/terraform/issues/15283)) +* cli: Files in the config directory ending in `.auto.tfvars` are now loaded automatically (in lexicographical order) in addition to the single `terraform.tfvars` file that auto-loaded previously. ([#13306](https://github.com/hashicorp/terraform/issues/13306)) +* Providers no longer in the main Terraform distribution; installed automatically by init instead ([#15208](https://github.com/hashicorp/terraform/issues/15208)) +* cli: `terraform env` command renamed to `terraform workspace` ([#14952](https://github.com/hashicorp/terraform/issues/14952)) +* cli: `terraform init` command now has `-upgrade` option to download the latest versions (within specified constraints) of modules and provider plugins. +* cli: The `-target` option to various Terraform operation can now target resources in descendent modules. ([#15314](https://github.com/hashicorp/terraform/issues/15314)) +* cli: Minor updates to `terraform plan` output: use standard resource address syntax, more visually-distinct `-/+` actions, and more ([#15362](https://github.com/hashicorp/terraform/issues/15362)) +* config: New interpolation function `contains`, to find if a given string exists in a list of strings. ([#15322](https://github.com/hashicorp/terraform/issues/15322)) + +BUG FIXES: + +* provisioner/chef: fix panic ([#15617](https://github.com/hashicorp/terraform/issues/15617)) +* Don't show a message about the path to the state file if the state is remote ([#15435](https://github.com/hashicorp/terraform/issues/15435)) +* Fix crash when `terraform graph` is run with no configuration ([#15588](https://github.com/hashicorp/terraform/issues/15588)) +* Handle correctly the `.exe` suffix on locally-compiled provider plugins on Windows systems. ([#15587](https://github.com/hashicorp/terraform/issues/15587)) +* config: Fixed a parsing issue in the interpolation language HIL that was causing misinterpretation of literal strings ending with escaped backslashes ([#15415](https://github.com/hashicorp/terraform/issues/15415)) +* core: the S3 Backend was failing to remove the state file checksums from DynamoDB when deleting a workspace ([#15383](https://github.com/hashicorp/terraform/issues/15383)) +* core: Improved reslience against crashes for a certain kind of inconsistency in the representation of list values in state. ([#15390](https://github.com/hashicorp/terraform/issues/15390)) +* core: Display correct to and from backends in copy message when migrating to new remote state ([#15318](https://github.com/hashicorp/terraform/issues/15318)) +* core: Fix a regression from 0.9.6 that was causing the tally of resources to create to be double-counted sometimes in the plan output ([#15344](https://github.com/hashicorp/terraform/issues/15344)) +* cli: the state `rm` and `mv` commands were always loading a state from a Backend, and ignoring the `-state` flag ([#15388](https://github.com/hashicorp/terraform/issues/15388)) +* cli: certain prompts in `terraform init` were respecting `-input=false` but not the `TF_INPUT` environment variable ([#15391](https://github.com/hashicorp/terraform/issues/15391)) +* state: Further work, building on [#15423](https://github.com/hashicorp/terraform/issues/15423), to improve the internal design of the state managers to make this code more maintainable and reduce the risk of regressions; this may lead to slight changes to the number of times Terraform writes to remote state and how the serial is implemented with respect to those writes, which does not affect outward functionality but is worth noting if you log or inspect state updates for debugging purposes. +* config: Interpolation function `cidrhost` was not correctly calcluating host addresses under IPv6 CIDR prefixes ([#15321](https://github.com/hashicorp/terraform/issues/15321)) +* provisioner/chef: Prevent a panic while trying to read the connection info ([#15271](https://github.com/hashicorp/terraform/issues/15271)) +* provisioner/file: Refactor the provisioner validation function to prevent false positives ([#15273](https://github.com/hashicorp/terraform/issues/15273)) + +INTERNAL CHANGES: + +* helper/schema: Actively disallow reserved field names in schema ([#15522](https://github.com/hashicorp/terraform/issues/15522)) +* helper/schema: Force field names to be alphanum lowercase + underscores ([#15562](https://github.com/hashicorp/terraform/issues/15562)) + + +## 0.10.0-rc1 to 0.10.0 (August 2, 2017) + +BUG FIXES: -* provisioner/chef: fix panic [GH-15617] +* provisioner/chef: fix panic ([#15617](https://github.com/hashicorp/terraform/issues/15617)) IMPROVEMENTS: -* cli: Add a `-from-module` flag to `terraform init` to re-introduce the legacy `terraform init` behavior of fetching a module. [GH-15666] +* cli: Add a `-from-module` flag to `terraform init` to re-introduce the legacy `terraform init` behavior of fetching a module. ([#15666](https://github.com/hashicorp/terraform/issues/15666)) ## 0.10.0-rc1 (July 19, 2017) diff --git a/vendor/github.com/hashicorp/terraform/Dockerfile b/vendor/github.com/hashicorp/terraform/Dockerfile index d435c9f91fd..3863822cbd0 100644 --- a/vendor/github.com/hashicorp/terraform/Dockerfile +++ b/vendor/github.com/hashicorp/terraform/Dockerfile @@ -14,6 +14,7 @@ MAINTAINER "HashiCorp Terraform Team " RUN apk add --update git bash openssh ENV TF_DEV=true +ENV TF_RELEASE=1 WORKDIR $GOPATH/src/github.com/hashicorp/terraform COPY . . diff --git a/vendor/github.com/hashicorp/terraform/Makefile b/vendor/github.com/hashicorp/terraform/Makefile index c51b77e1d13..07897daacdc 100644 --- a/vendor/github.com/hashicorp/terraform/Makefile +++ b/vendor/github.com/hashicorp/terraform/Makefile @@ -1,4 +1,4 @@ -TEST?=$$(go list ./... | grep -v '/terraform/vendor/' | grep -v '/builtin/bins/') +TEST?=./... GOFMT_FILES?=$$(find . -name '*.go' | grep -v vendor) default: test vet @@ -27,10 +27,11 @@ plugin-dev: generate mv $(GOPATH)/bin/$(PLUGIN) $(GOPATH)/bin/terraform-$(PLUGIN) # test runs the unit tests +# we run this one package at a time here because running the entire suite in +# one command creates memory usage issues when running in Travis-CI. test: fmtcheck generate go test -i $(TEST) || exit 1 - echo $(TEST) | \ - xargs -t -n4 go test $(TESTARGS) -timeout=60s -parallel=4 + go list $(TEST) | xargs -t -n4 go test $(TESTARGS) -timeout=60s -parallel=4 # testacc runs acceptance tests testacc: fmtcheck generate @@ -41,6 +42,12 @@ testacc: fmtcheck generate fi TF_ACC=1 go test $(TEST) -v $(TESTARGS) -timeout 120m +# e2etest runs the end-to-end tests against a generated Terraform binary +# The TF_ACC here allows network access, but does not require any special +# credentials since the e2etests use local-only providers such as "null". +e2etest: generate + TF_ACC=1 go test -v ./command/e2etest + test-compile: fmtcheck generate @if [ "$(TEST)" = "./..." ]; then \ echo "ERROR: Set TEST to a specific package. For example,"; \ @@ -64,8 +71,8 @@ cover: # vet runs the Go source code static analysis tool `vet` to find # any common errors. vet: - @echo 'go vet $$(go list ./... | grep -v /terraform/vendor/)' - @go vet $$(go list ./... | grep -v /terraform/vendor/) ; if [ $$? -eq 1 ]; then \ + @echo 'go vet ./...' + @go vet ./... ; if [ $$? -eq 1 ]; then \ echo ""; \ echo "Vet found suspicious constructs. Please check the reported constructs"; \ echo "and fix them if necessary before submitting the code for review."; \ @@ -78,7 +85,7 @@ generate: @which stringer > /dev/null; if [ $$? -ne 0 ]; then \ go get -u golang.org/x/tools/cmd/stringer; \ fi - go generate $$(go list ./... | grep -v /terraform/vendor/) + go generate ./... @go fmt command/internal_plugin_list.go > /dev/null fmt: @@ -95,4 +102,4 @@ vendor-status: # under parallel conditions. .NOTPARALLEL: -.PHONY: bin cover default dev fmt fmtcheck generate plugin-dev quickdev test-compile test testacc testrace tools vendor-status vet +.PHONY: bin cover default dev e2etest fmt fmtcheck generate plugin-dev quickdev test-compile test testacc testrace tools vendor-status vet diff --git a/vendor/github.com/hashicorp/terraform/README.md b/vendor/github.com/hashicorp/terraform/README.md index fc3810e0eaf..03f8be85dd9 100644 --- a/vendor/github.com/hashicorp/terraform/README.md +++ b/vendor/github.com/hashicorp/terraform/README.md @@ -6,7 +6,7 @@ Terraform [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fhashicorp%2Fterraform.svg?type=shield)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fhashicorp%2Fterraform?ref=badge_shield) - Mailing list: [Google Groups](http://groups.google.com/group/terraform-tool) -![Terraform](https://www.terraform.io/assets/images/logo-hashicorp.svg) +Terraform Terraform is a tool for building, changing, and versioning infrastructure safely and efficiently. Terraform can manage existing and popular service providers as well as custom in-house solutions. @@ -25,12 +25,17 @@ For more information, see the [introduction section](http://www.terraform.io/int Getting Started & Documentation ------------------------------- -All documentation is available on the [Terraform website](http://www.terraform.io). +If you're new to Terraform and want to get started creating infrastructure, please checkout our [Getting Started](https://www.terraform.io/intro/getting-started/install.html) guide, available on the [Terraform website](http://www.terraform.io). + +All documentation is available on the [Terraform website](http://www.terraform.io): + + - [Intro](https://www.terraform.io/intro/index.html) + - [Docs](https://www.terraform.io/docs/index.html) Developing Terraform -------------------- -If you wish to work on Terraform itself or any of its built-in providers, you'll first need [Go](http://www.golang.org) installed on your machine (version 1.8+ is *required*). Alternatively, you can use the Vagrantfile in the root of this repo to stand up a virtual machine with the appropriate dev tooling already set up for you. +If you wish to work on Terraform itself or any of its built-in providers, you'll first need [Go](http://www.golang.org) installed on your machine (version 1.9+ is *required*). Alternatively, you can use the Vagrantfile in the root of this repo to stand up a virtual machine with the appropriate dev tooling already set up for you. This repository contains only Terraform core, which includes the command line interface and the main graph engine. Providers are implemented as plugins that each have their own repository in [the `terraform-providers` organization](https://github.com/terraform-providers) on GitHub. Instructions for developing each provider are in the associated README file. For more information, see [the provider development overview](https://www.terraform.io/docs/plugins/provider.html). @@ -163,4 +168,4 @@ docker run --rm -v $(pwd):/go/src/github.com/hashicorp/terraform -w /go/src/gith ## License -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fhashicorp%2Fterraform.svg?type=large)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fhashicorp%2Fterraform?ref=badge_large) \ No newline at end of file +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fhashicorp%2Fterraform.svg?type=large)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fhashicorp%2Fterraform?ref=badge_large) diff --git a/vendor/github.com/hashicorp/terraform/Vagrantfile b/vendor/github.com/hashicorp/terraform/Vagrantfile index 3683318686e..d140efb9188 100644 --- a/vendor/github.com/hashicorp/terraform/Vagrantfile +++ b/vendor/github.com/hashicorp/terraform/Vagrantfile @@ -5,7 +5,7 @@ VAGRANTFILE_API_VERSION = "2" # Software version variables -GOVERSION = "1.8.1" +GOVERSION = "1.9" UBUNTUVERSION = "16.04" # CPU and RAM can be adjusted depending on your system diff --git a/vendor/github.com/hashicorp/terraform/backend/atlas/backend.go b/vendor/github.com/hashicorp/terraform/backend/atlas/backend.go new file mode 100644 index 00000000000..660327ae025 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/atlas/backend.go @@ -0,0 +1,163 @@ +package atlas + +import ( + "context" + "fmt" + "net/url" + "os" + "strings" + "sync" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/cli" + "github.com/mitchellh/colorstring" +) + +// Backend is an implementation of EnhancedBackend that performs all operations +// in Atlas. State must currently also be stored in Atlas, although it is worth +// investigating in the future if state storage can be external as well. +type Backend struct { + // CLI and Colorize control the CLI output. If CLI is nil then no CLI + // output will be done. If CLIColor is nil then no coloring will be done. + CLI cli.Ui + CLIColor *colorstring.Colorize + + // ContextOpts are the base context options to set when initializing a + // Terraform context. Many of these will be overridden or merged by + // Operation. See Operation for more details. + ContextOpts *terraform.ContextOpts + + //--------------------------------------------------------------- + // Internal fields, do not set + //--------------------------------------------------------------- + // stateClient is the legacy state client, setup in Configure + stateClient *stateClient + + // schema is the schema for configuration, set by init + schema *schema.Backend + once sync.Once + + // opLock locks operations + opLock sync.Mutex +} + +func (b *Backend) Input( + ui terraform.UIInput, c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { + b.once.Do(b.init) + return b.schema.Input(ui, c) +} + +func (b *Backend) Validate(c *terraform.ResourceConfig) ([]string, []error) { + b.once.Do(b.init) + return b.schema.Validate(c) +} + +func (b *Backend) Configure(c *terraform.ResourceConfig) error { + b.once.Do(b.init) + return b.schema.Configure(c) +} + +func (b *Backend) States() ([]string, error) { + return nil, backend.ErrNamedStatesNotSupported +} + +func (b *Backend) DeleteState(name string) error { + return backend.ErrNamedStatesNotSupported +} + +func (b *Backend) State(name string) (state.State, error) { + if name != backend.DefaultStateName { + return nil, backend.ErrNamedStatesNotSupported + } + + return &remote.State{Client: b.stateClient}, nil +} + +// Colorize returns the Colorize structure that can be used for colorizing +// output. This is gauranteed to always return a non-nil value and so is useful +// as a helper to wrap any potentially colored strings. +func (b *Backend) Colorize() *colorstring.Colorize { + if b.CLIColor != nil { + return b.CLIColor + } + + return &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, + } +} + +func (b *Backend) init() { + b.schema = &schema.Backend{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: schemaDescriptions["name"], + }, + + "access_token": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: schemaDescriptions["access_token"], + DefaultFunc: schema.EnvDefaultFunc("ATLAS_TOKEN", nil), + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: schemaDescriptions["address"], + DefaultFunc: schema.EnvDefaultFunc("ATLAS_ADDRESS", defaultAtlasServer), + }, + }, + + ConfigureFunc: b.schemaConfigure, + } +} + +func (b *Backend) schemaConfigure(ctx context.Context) error { + d := schema.FromContextBackendConfig(ctx) + + // Parse the address + addr := d.Get("address").(string) + addrUrl, err := url.Parse(addr) + if err != nil { + return fmt.Errorf("Error parsing 'address': %s", err) + } + + // Parse the org/env + name := d.Get("name").(string) + parts := strings.Split(name, "/") + if len(parts) != 2 { + return fmt.Errorf("malformed name '%s', expected format '/'", name) + } + org := parts[0] + env := parts[1] + + // Setup the client + b.stateClient = &stateClient{ + Server: addr, + ServerURL: addrUrl, + AccessToken: d.Get("access_token").(string), + User: org, + Name: env, + + // This is optionally set during Atlas Terraform runs. + RunId: os.Getenv("ATLAS_RUN_ID"), + } + + return nil +} + +var schemaDescriptions = map[string]string{ + "name": "Full name of the environment in Atlas, such as 'hashicorp/myenv'", + "access_token": "Access token to use to access Atlas. If ATLAS_TOKEN is set then\n" + + "this will override any saved value for this.", + "address": "Address to your Atlas installation. This defaults to the publicly\n" + + "hosted version at 'https://atlas.hashicorp.com/'. This address\n" + + "should contain the full HTTP scheme to use.", +} diff --git a/vendor/github.com/hashicorp/terraform/backend/atlas/cli.go b/vendor/github.com/hashicorp/terraform/backend/atlas/cli.go new file mode 100644 index 00000000000..5b3656eff3f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/atlas/cli.go @@ -0,0 +1,13 @@ +package atlas + +import ( + "github.com/hashicorp/terraform/backend" +) + +// backend.CLI impl. +func (b *Backend) CLIInit(opts *backend.CLIOpts) error { + b.CLI = opts.CLI + b.CLIColor = opts.CLIColor + b.ContextOpts = opts.ContextOpts + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/atlas/state_client.go b/vendor/github.com/hashicorp/terraform/backend/atlas/state_client.go new file mode 100644 index 00000000000..e49cb719241 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/atlas/state_client.go @@ -0,0 +1,319 @@ +package atlas + +import ( + "bytes" + "crypto/md5" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "fmt" + "io" + "log" + "net/http" + "net/url" + "os" + "path" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-retryablehttp" + "github.com/hashicorp/go-rootcerts" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/terraform" +) + +const ( + // defaultAtlasServer is used when no address is given + defaultAtlasServer = "https://atlas.hashicorp.com/" + atlasTokenHeader = "X-Atlas-Token" +) + +// AtlasClient implements the Client interface for an Atlas compatible server. +type stateClient struct { + Server string + ServerURL *url.URL + User string + Name string + AccessToken string + RunId string + HTTPClient *retryablehttp.Client + + conflictHandlingAttempted bool +} + +func (c *stateClient) Get() (*remote.Payload, error) { + // Make the HTTP request + req, err := retryablehttp.NewRequest("GET", c.url().String(), nil) + if err != nil { + return nil, fmt.Errorf("Failed to make HTTP request: %v", err) + } + + req.Header.Set(atlasTokenHeader, c.AccessToken) + + // Request the url + client, err := c.http() + if err != nil { + return nil, err + } + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + // Handle the common status codes + switch resp.StatusCode { + case http.StatusOK: + // Handled after + case http.StatusNoContent: + return nil, nil + case http.StatusNotFound: + return nil, nil + case http.StatusUnauthorized: + return nil, fmt.Errorf("HTTP remote state endpoint requires auth") + case http.StatusForbidden: + return nil, fmt.Errorf("HTTP remote state endpoint invalid auth") + case http.StatusInternalServerError: + return nil, fmt.Errorf("HTTP remote state internal server error") + default: + return nil, fmt.Errorf( + "Unexpected HTTP response code: %d\n\nBody: %s", + resp.StatusCode, c.readBody(resp.Body)) + } + + // Read in the body + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, resp.Body); err != nil { + return nil, fmt.Errorf("Failed to read remote state: %v", err) + } + + // Create the payload + payload := &remote.Payload{ + Data: buf.Bytes(), + } + + if len(payload.Data) == 0 { + return nil, nil + } + + // Check for the MD5 + if raw := resp.Header.Get("Content-MD5"); raw != "" { + md5, err := base64.StdEncoding.DecodeString(raw) + if err != nil { + return nil, fmt.Errorf("Failed to decode Content-MD5 '%s': %v", raw, err) + } + + payload.MD5 = md5 + } else { + // Generate the MD5 + hash := md5.Sum(payload.Data) + payload.MD5 = hash[:] + } + + return payload, nil +} + +func (c *stateClient) Put(state []byte) error { + // Get the target URL + base := c.url() + + // Generate the MD5 + hash := md5.Sum(state) + b64 := base64.StdEncoding.EncodeToString(hash[:]) + + // Make the HTTP client and request + req, err := retryablehttp.NewRequest("PUT", base.String(), bytes.NewReader(state)) + if err != nil { + return fmt.Errorf("Failed to make HTTP request: %v", err) + } + + // Prepare the request + req.Header.Set(atlasTokenHeader, c.AccessToken) + req.Header.Set("Content-MD5", b64) + req.Header.Set("Content-Type", "application/json") + req.ContentLength = int64(len(state)) + + // Make the request + client, err := c.http() + if err != nil { + return err + } + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("Failed to upload state: %v", err) + } + defer resp.Body.Close() + + // Handle the error codes + switch resp.StatusCode { + case http.StatusOK: + return nil + case http.StatusConflict: + return c.handleConflict(c.readBody(resp.Body), state) + default: + return fmt.Errorf( + "HTTP error: %d\n\nBody: %s", + resp.StatusCode, c.readBody(resp.Body)) + } +} + +func (c *stateClient) Delete() error { + // Make the HTTP request + req, err := retryablehttp.NewRequest("DELETE", c.url().String(), nil) + if err != nil { + return fmt.Errorf("Failed to make HTTP request: %v", err) + } + req.Header.Set(atlasTokenHeader, c.AccessToken) + + // Make the request + client, err := c.http() + if err != nil { + return err + } + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("Failed to delete state: %v", err) + } + defer resp.Body.Close() + + // Handle the error codes + switch resp.StatusCode { + case http.StatusOK: + return nil + case http.StatusNoContent: + return nil + case http.StatusNotFound: + return nil + default: + return fmt.Errorf( + "HTTP error: %d\n\nBody: %s", + resp.StatusCode, c.readBody(resp.Body)) + } +} + +func (c *stateClient) readBody(b io.Reader) string { + var buf bytes.Buffer + if _, err := io.Copy(&buf, b); err != nil { + return fmt.Sprintf("Error reading body: %s", err) + } + + result := buf.String() + if result == "" { + result = "" + } + + return result +} + +func (c *stateClient) url() *url.URL { + values := url.Values{} + + values.Add("atlas_run_id", c.RunId) + + return &url.URL{ + Scheme: c.ServerURL.Scheme, + Host: c.ServerURL.Host, + Path: path.Join("api/v1/terraform/state", c.User, c.Name), + RawQuery: values.Encode(), + } +} + +func (c *stateClient) http() (*retryablehttp.Client, error) { + if c.HTTPClient != nil { + return c.HTTPClient, nil + } + tlsConfig := &tls.Config{} + err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{ + CAFile: os.Getenv("ATLAS_CAFILE"), + CAPath: os.Getenv("ATLAS_CAPATH"), + }) + if err != nil { + return nil, err + } + rc := retryablehttp.NewClient() + + rc.CheckRetry = func(resp *http.Response, err error) (bool, error) { + if err != nil { + // don't bother retrying if the certs don't match + if err, ok := err.(*url.Error); ok { + if _, ok := err.Err.(x509.UnknownAuthorityError); ok { + return false, nil + } + } + // continue retrying + return true, nil + } + return retryablehttp.DefaultRetryPolicy(resp, err) + } + + t := cleanhttp.DefaultTransport() + t.TLSClientConfig = tlsConfig + rc.HTTPClient.Transport = t + + c.HTTPClient = rc + return rc, nil +} + +// Atlas returns an HTTP 409 - Conflict if the pushed state reports the same +// Serial number but the checksum of the raw content differs. This can +// sometimes happen when Terraform changes state representation internally +// between versions in a way that's semantically neutral but affects the JSON +// output and therefore the checksum. +// +// Here we detect and handle this situation by ticking the serial and retrying +// iff for the previous state and the proposed state: +// +// * the serials match +// * the parsed states are Equal (semantically equivalent) +// +// In other words, in this situation Terraform can override Atlas's detected +// conflict by asserting that the state it is pushing is indeed correct. +func (c *stateClient) handleConflict(msg string, state []byte) error { + log.Printf("[DEBUG] Handling Atlas conflict response: %s", msg) + + if c.conflictHandlingAttempted { + log.Printf("[DEBUG] Already attempted conflict resolution; returning conflict.") + } else { + c.conflictHandlingAttempted = true + log.Printf("[DEBUG] Atlas reported conflict, checking for equivalent states.") + + payload, err := c.Get() + if err != nil { + return conflictHandlingError(err) + } + + currentState, err := terraform.ReadState(bytes.NewReader(payload.Data)) + if err != nil { + return conflictHandlingError(err) + } + + proposedState, err := terraform.ReadState(bytes.NewReader(state)) + if err != nil { + return conflictHandlingError(err) + } + + if statesAreEquivalent(currentState, proposedState) { + log.Printf("[DEBUG] States are equivalent, incrementing serial and retrying.") + proposedState.Serial++ + var buf bytes.Buffer + if err := terraform.WriteState(proposedState, &buf); err != nil { + return conflictHandlingError(err) + + } + return c.Put(buf.Bytes()) + } else { + log.Printf("[DEBUG] States are not equivalent, returning conflict.") + } + } + + return fmt.Errorf( + "Atlas detected a remote state conflict.\n\nMessage: %s", msg) +} + +func conflictHandlingError(err error) error { + return fmt.Errorf( + "Error while handling a conflict response from Atlas: %s", err) +} + +func statesAreEquivalent(current, proposed *terraform.State) bool { + return current.Serial == proposed.Serial && current.Equal(proposed) +} diff --git a/vendor/github.com/hashicorp/terraform/backend/backend.go b/vendor/github.com/hashicorp/terraform/backend/backend.go new file mode 100644 index 00000000000..efa42d4b6fa --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/backend.go @@ -0,0 +1,169 @@ +// Package backend provides interfaces that the CLI uses to interact with +// Terraform. A backend provides the abstraction that allows the same CLI +// to simultaneously support both local and remote operations for seamlessly +// using Terraform in a team environment. +package backend + +import ( + "context" + "errors" + "time" + + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/terraform" +) + +// This is the name of the default, initial state that every backend +// must have. This state cannot be deleted. +const DefaultStateName = "default" + +// Error value to return when a named state operation isn't supported. +// This must be returned rather than a custom error so that the Terraform +// CLI can detect it and handle it appropriately. +var ErrNamedStatesNotSupported = errors.New("named states not supported") + +// Backend is the minimal interface that must be implemented to enable Terraform. +type Backend interface { + // Ask for input and configure the backend. Similar to + // terraform.ResourceProvider. + Input(terraform.UIInput, *terraform.ResourceConfig) (*terraform.ResourceConfig, error) + Validate(*terraform.ResourceConfig) ([]string, []error) + Configure(*terraform.ResourceConfig) error + + // State returns the current state for this environment. This state may + // not be loaded locally: the proper APIs should be called on state.State + // to load the state. If the state.State is a state.Locker, it's up to the + // caller to call Lock and Unlock as needed. + // + // If the named state doesn't exist it will be created. The "default" state + // is always assumed to exist. + State(name string) (state.State, error) + + // DeleteState removes the named state if it exists. It is an error + // to delete the default state. + // + // DeleteState does not prevent deleting a state that is in use. It is the + // responsibility of the caller to hold a Lock on the state when calling + // this method. + DeleteState(name string) error + + // States returns a list of configured named states. + States() ([]string, error) +} + +// Enhanced implements additional behavior on top of a normal backend. +// +// Enhanced backends allow customizing the behavior of Terraform operations. +// This allows Terraform to potentially run operations remotely, load +// configurations from external sources, etc. +type Enhanced interface { + Backend + + // Operation performs a Terraform operation such as refresh, plan, apply. + // It is up to the implementation to determine what "performing" means. + // This DOES NOT BLOCK. The context returned as part of RunningOperation + // should be used to block for completion. + // If the state used in the operation can be locked, it is the + // responsibility of the Backend to lock the state for the duration of the + // running operation. + Operation(context.Context, *Operation) (*RunningOperation, error) +} + +// Local implements additional behavior on a Backend that allows local +// operations in addition to remote operations. +// +// This enables more behaviors of Terraform that require more data such +// as `console`, `import`, `graph`. These require direct access to +// configurations, variables, and more. Not all backends may support this +// so we separate it out into its own optional interface. +type Local interface { + // Context returns a runnable terraform Context. The operation parameter + // doesn't need a Type set but it needs other options set such as Module. + Context(*Operation) (*terraform.Context, state.State, error) +} + +// An operation represents an operation for Terraform to execute. +// +// Note that not all fields are supported by all backends and can result +// in an error if set. All backend implementations should show user-friendly +// errors explaining any incorrectly set values. For example, the local +// backend doesn't support a PlanId being set. +// +// The operation options are purposely designed to have maximal compatibility +// between Terraform and Terraform Servers (a commercial product offered by +// HashiCorp). Therefore, it isn't expected that other implementation support +// every possible option. The struct here is generalized in order to allow +// even partial implementations to exist in the open, without walling off +// remote functionality 100% behind a commercial wall. Anyone can implement +// against this interface and have Terraform interact with it just as it +// would with HashiCorp-provided Terraform Servers. +type Operation struct { + // Type is the operation to perform. + Type OperationType + + // PlanId is an opaque value that backends can use to execute a specific + // plan for an apply operation. + // + // PlanOutBackend is the backend to store with the plan. This is the + // backend that will be used when applying the plan. + PlanId string + PlanRefresh bool // PlanRefresh will do a refresh before a plan + PlanOutPath string // PlanOutPath is the path to save the plan + PlanOutBackend *terraform.BackendState + + // Module settings specify the root module to use for operations. + Module *module.Tree + + // Plan is a plan that was passed as an argument. This is valid for + // plan and apply arguments but may not work for all backends. + Plan *terraform.Plan + + // The options below are more self-explanatory and affect the runtime + // behavior of the operation. + Destroy bool + Targets []string + Variables map[string]interface{} + AutoApprove bool + DestroyForce bool + + // Input/output/control options. + UIIn terraform.UIInput + UIOut terraform.UIOutput + + // If LockState is true, the Operation must Lock any + // state.Lockers for its duration, and Unlock when complete. + LockState bool + + // The duration to retry obtaining a State lock. + StateLockTimeout time.Duration + + // Workspace is the name of the workspace that this operation should run + // in, which controls which named state is used. + Workspace string +} + +// RunningOperation is the result of starting an operation. +type RunningOperation struct { + // Context should be used to track Done and Err for errors. + // + // For implementers of a backend, this context should not wrap the + // passed in context. Otherwise, canceling the parent context will + // immediately mark this context as "done" but those aren't the semantics + // we want: we want this context to be done only when the operation itself + // is fully done. + context.Context + + // Err is the error of the operation. This is populated after + // the operation has completed. + Err error + + // PlanEmpty is populated after a Plan operation completes without error + // to note whether a plan is empty or has changes. + PlanEmpty bool + + // State is the final state after the operation completed. Persisting + // this state is managed by the backend. This should only be read + // after the operation completes to avoid read/write races. + State *terraform.State +} diff --git a/vendor/github.com/hashicorp/terraform/backend/cli.go b/vendor/github.com/hashicorp/terraform/backend/cli.go new file mode 100644 index 00000000000..40a66e69859 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/cli.go @@ -0,0 +1,83 @@ +package backend + +import ( + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/cli" + "github.com/mitchellh/colorstring" +) + +// CLI is an optional interface that can be implemented to be initialized +// with information from the Terraform CLI. If this is implemented, this +// initialization function will be called with data to help interact better +// with a CLI. +// +// This interface was created to improve backend interaction with the +// official Terraform CLI while making it optional for API users to have +// to provide full CLI interaction to every backend. +// +// If you're implementing a Backend, it is acceptable to require CLI +// initialization. In this case, your backend should be coded to error +// on other methods (such as State, Operation) if CLI initialization was not +// done with all required fields. +type CLI interface { + Backend + + // CLIIinit is called once with options. The options passed to this + // function may not be modified after calling this since they can be + // read/written at any time by the Backend implementation. + // + // This may be called before or after Configure is called, so if settings + // here affect configurable settings, care should be taken to handle + // whether they should be overwritten or not. + CLIInit(*CLIOpts) error +} + +// CLIOpts are the options passed into CLIInit for the CLI interface. +// +// These options represent the functionality the CLI exposes and often +// maps to meta-flags available on every CLI (such as -input). +// +// When implementing a backend, it isn't expected that every option applies. +// Your backend should be documented clearly to explain to end users what +// options have an affect and what won't. In some cases, it may even make sense +// to error in your backend when an option is set so that users don't make +// a critically incorrect assumption about behavior. +type CLIOpts struct { + // CLI and Colorize control the CLI output. If CLI is nil then no CLI + // output will be done. If CLIColor is nil then no coloring will be done. + CLI cli.Ui + CLIColor *colorstring.Colorize + + // StatePath is the local path where state is read from. + // + // StateOutPath is the local path where the state will be written. + // If this is empty, it will default to StatePath. + // + // StateBackupPath is the local path where a backup file will be written. + // If this is empty, no backup will be taken. + StatePath string + StateOutPath string + StateBackupPath string + + // ContextOpts are the base context options to set when initializing a + // Terraform context. Many of these will be overridden or merged by + // Operation. See Operation for more details. + ContextOpts *terraform.ContextOpts + + // Input will ask for necessary input prior to performing any operations. + // + // Validation will perform validation prior to running an operation. The + // variable naming doesn't match the style of others since we have a func + // Validate. + Input bool + Validation bool + + // RunningInAutomation indicates that commands are being run by an + // automated system rather than directly at a command prompt. + // + // This is a hint not to produce messages that expect that a user can + // run a follow-up command, perhaps because Terraform is running in + // some sort of workflow automation tool that abstracts away the + // exact commands that are being run. + RunningInAutomation bool +} diff --git a/vendor/github.com/hashicorp/terraform/backend/init/init.go b/vendor/github.com/hashicorp/terraform/backend/init/init.go new file mode 100644 index 00000000000..fdc0ad263d0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/init/init.go @@ -0,0 +1,120 @@ +// Package init contains the list of backends that can be initialized and +// basic helper functions for initializing those backends. +package init + +import ( + "sync" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/terraform" + + backendatlas "github.com/hashicorp/terraform/backend/atlas" + backendlegacy "github.com/hashicorp/terraform/backend/legacy" + backendlocal "github.com/hashicorp/terraform/backend/local" + backendAzure "github.com/hashicorp/terraform/backend/remote-state/azure" + backendconsul "github.com/hashicorp/terraform/backend/remote-state/consul" + backendetcdv3 "github.com/hashicorp/terraform/backend/remote-state/etcdv3" + backendinmem "github.com/hashicorp/terraform/backend/remote-state/inmem" + backendS3 "github.com/hashicorp/terraform/backend/remote-state/s3" + backendSwift "github.com/hashicorp/terraform/backend/remote-state/swift" +) + +// backends is the list of available backends. This is a global variable +// because backends are currently hardcoded into Terraform and can't be +// modified without recompilation. +// +// To read an available backend, use the Backend function. This ensures +// safe concurrent read access to the list of built-in backends. +// +// Backends are hardcoded into Terraform because the API for backends uses +// complex structures and supporting that over the plugin system is currently +// prohibitively difficult. For those wanting to implement a custom backend, +// they can do so with recompilation. +var backends map[string]func() backend.Backend +var backendsLock sync.Mutex + +func init() { + // Our hardcoded backends. We don't need to acquire a lock here + // since init() code is serial and can't spawn goroutines. + backends = map[string]func() backend.Backend{ + "atlas": func() backend.Backend { return &backendatlas.Backend{} }, + "local": func() backend.Backend { return &backendlocal.Local{} }, + "consul": func() backend.Backend { return backendconsul.New() }, + "inmem": func() backend.Backend { return backendinmem.New() }, + "swift": func() backend.Backend { return backendSwift.New() }, + "s3": func() backend.Backend { return backendS3.New() }, + "azure": deprecateBackend(backendAzure.New(), + `Warning: "azure" name is deprecated, please use "azurerm"`), + "azurerm": func() backend.Backend { return backendAzure.New() }, + "etcdv3": func() backend.Backend { return backendetcdv3.New() }, + } + + // Add the legacy remote backends that haven't yet been convertd to + // the new backend API. + backendlegacy.Init(backends) +} + +// Backend returns the initialization factory for the given backend, or +// nil if none exists. +func Backend(name string) func() backend.Backend { + backendsLock.Lock() + defer backendsLock.Unlock() + return backends[name] +} + +// Set sets a new backend in the list of backends. If f is nil then the +// backend will be removed from the map. If this backend already exists +// then it will be overwritten. +// +// This method sets this backend globally and care should be taken to do +// this only before Terraform is executing to prevent odd behavior of backends +// changing mid-execution. +func Set(name string, f func() backend.Backend) { + backendsLock.Lock() + defer backendsLock.Unlock() + + if f == nil { + delete(backends, name) + return + } + + backends[name] = f +} + +// deprecatedBackendShim is used to wrap a backend and inject a deprecation +// warning into the Validate method. +type deprecatedBackendShim struct { + backend.Backend + Message string +} + +// Validate the Backend then add the deprecation warning. +func (b deprecatedBackendShim) Validate(c *terraform.ResourceConfig) ([]string, []error) { + warns, errs := b.Backend.Validate(c) + warns = append(warns, b.Message) + return warns, errs +} + +// DeprecateBackend can be used to wrap a backend to retrun a deprecation +// warning during validation. +func deprecateBackend(b backend.Backend, message string) func() backend.Backend { + // Since a Backend wrapped by deprecatedBackendShim can no longer be + // asserted as an Enhanced or Local backend, disallow those types here + // entirely. If something other than a basic backend.Backend needs to be + // deprecated, we can add that functionality to schema.Backend or the + // backend itself. + if _, ok := b.(backend.Enhanced); ok { + panic("cannot use DeprecateBackend on an Enhanced Backend") + } + + if _, ok := b.(backend.Local); ok { + panic("cannot use DeprecateBackend on a Local Backend") + } + + return func() backend.Backend { + return deprecatedBackendShim{ + Backend: b, + Message: message, + } + } +} diff --git a/vendor/github.com/hashicorp/terraform/backend/legacy/backend.go b/vendor/github.com/hashicorp/terraform/backend/legacy/backend.go new file mode 100644 index 00000000000..a8b0cad9fb5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/legacy/backend.go @@ -0,0 +1,75 @@ +package legacy + +import ( + "fmt" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/mapstructure" +) + +// Backend is an implementation of backend.Backend for legacy remote state +// clients. +type Backend struct { + // Type is the type of remote state client to support + Type string + + // client is set after Configure is called and client is initialized. + client remote.Client +} + +func (b *Backend) Input( + ui terraform.UIInput, c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { + // Return the config as-is, legacy doesn't support input + return c, nil +} + +func (b *Backend) Validate(*terraform.ResourceConfig) ([]string, []error) { + // No validation was supported for old clients + return nil, nil +} + +func (b *Backend) Configure(c *terraform.ResourceConfig) error { + // Legacy remote state was only map[string]string config + var conf map[string]string + if err := mapstructure.Decode(c.Raw, &conf); err != nil { + return fmt.Errorf( + "Failed to decode %q configuration: %s\n\n"+ + "This backend expects all configuration keys and values to be\n"+ + "strings. Please verify your configuration and try again.", + b.Type, err) + } + + client, err := remote.NewClient(b.Type, conf) + if err != nil { + return fmt.Errorf( + "Failed to configure remote backend %q: %s", + b.Type, err) + } + + // Set our client + b.client = client + return nil +} + +func (b *Backend) State(name string) (state.State, error) { + if name != backend.DefaultStateName { + return nil, backend.ErrNamedStatesNotSupported + } + + if b.client == nil { + panic("State called with nil remote state client") + } + + return &remote.State{Client: b.client}, nil +} + +func (b *Backend) States() ([]string, error) { + return nil, backend.ErrNamedStatesNotSupported +} + +func (b *Backend) DeleteState(string) error { + return backend.ErrNamedStatesNotSupported +} diff --git a/vendor/github.com/hashicorp/terraform/backend/legacy/legacy.go b/vendor/github.com/hashicorp/terraform/backend/legacy/legacy.go new file mode 100644 index 00000000000..be3163bd53a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/legacy/legacy.go @@ -0,0 +1,28 @@ +// Package legacy contains a backend implementation that can be used +// with the legacy remote state clients. +package legacy + +import ( + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/state/remote" +) + +// Init updates the backend/init package map of initializers to support +// all the remote state types. +// +// If a type is already in the map, it will not be added. This will allow +// us to slowly convert the legacy types to first-class backends. +func Init(m map[string]func() backend.Backend) { + for k, _ := range remote.BuiltinClients { + if _, ok := m[k]; !ok { + // Copy the "k" value since the variable "k" is reused for + // each key (address doesn't change). + typ := k + + // Build the factory function to return a backend of typ + m[k] = func() backend.Backend { + return &Backend{Type: typ} + } + } + } +} diff --git a/vendor/github.com/hashicorp/terraform/backend/local/backend.go b/vendor/github.com/hashicorp/terraform/backend/local/backend.go new file mode 100644 index 00000000000..3f6f6184787 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/local/backend.go @@ -0,0 +1,440 @@ +package local + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "sync" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/cli" + "github.com/mitchellh/colorstring" +) + +const ( + DefaultWorkspaceDir = "terraform.tfstate.d" + DefaultWorkspaceFile = "environment" + DefaultStateFilename = "terraform.tfstate" + DefaultBackupExtension = ".backup" +) + +// Local is an implementation of EnhancedBackend that performs all operations +// locally. This is the "default" backend and implements normal Terraform +// behavior as it is well known. +type Local struct { + // CLI and Colorize control the CLI output. If CLI is nil then no CLI + // output will be done. If CLIColor is nil then no coloring will be done. + CLI cli.Ui + CLIColor *colorstring.Colorize + + // The State* paths are set from the backend config, and may be left blank + // to use the defaults. If the actual paths for the local backend state are + // needed, use the StatePaths method. + // + // StatePath is the local path where state is read from. + // + // StateOutPath is the local path where the state will be written. + // If this is empty, it will default to StatePath. + // + // StateBackupPath is the local path where a backup file will be written. + // Set this to "-" to disable state backup. + // + // StateWorkspaceDir is the path to the folder containing data for + // non-default workspaces. This defaults to DefaultWorkspaceDir if not set. + StatePath string + StateOutPath string + StateBackupPath string + StateWorkspaceDir string + + // We only want to create a single instance of a local state, so store them + // here as they're loaded. + states map[string]state.State + + // Terraform context. Many of these will be overridden or merged by + // Operation. See Operation for more details. + ContextOpts *terraform.ContextOpts + + // OpInput will ask for necessary input prior to performing any operations. + // + // OpValidation will perform validation prior to running an operation. The + // variable naming doesn't match the style of others since we have a func + // Validate. + OpInput bool + OpValidation bool + + // Backend, if non-nil, will use this backend for non-enhanced behavior. + // This allows local behavior with remote state storage. It is a way to + // "upgrade" a non-enhanced backend to an enhanced backend with typical + // behavior. + // + // If this is nil, local performs normal state loading and storage. + Backend backend.Backend + + // RunningInAutomation indicates that commands are being run by an + // automated system rather than directly at a command prompt. + // + // This is a hint not to produce messages that expect that a user can + // run a follow-up command, perhaps because Terraform is running in + // some sort of workflow automation tool that abstracts away the + // exact commands that are being run. + RunningInAutomation bool + + schema *schema.Backend + opLock sync.Mutex + once sync.Once +} + +func (b *Local) Input( + ui terraform.UIInput, c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { + b.once.Do(b.init) + + f := b.schema.Input + if b.Backend != nil { + f = b.Backend.Input + } + + return f(ui, c) +} + +func (b *Local) Validate(c *terraform.ResourceConfig) ([]string, []error) { + b.once.Do(b.init) + + f := b.schema.Validate + if b.Backend != nil { + f = b.Backend.Validate + } + + return f(c) +} + +func (b *Local) Configure(c *terraform.ResourceConfig) error { + b.once.Do(b.init) + + f := b.schema.Configure + if b.Backend != nil { + f = b.Backend.Configure + } + + return f(c) +} + +func (b *Local) States() ([]string, error) { + // If we have a backend handling state, defer to that. + if b.Backend != nil { + return b.Backend.States() + } + + // the listing always start with "default" + envs := []string{backend.DefaultStateName} + + entries, err := ioutil.ReadDir(b.stateWorkspaceDir()) + // no error if there's no envs configured + if os.IsNotExist(err) { + return envs, nil + } + if err != nil { + return nil, err + } + + var listed []string + for _, entry := range entries { + if entry.IsDir() { + listed = append(listed, filepath.Base(entry.Name())) + } + } + + sort.Strings(listed) + envs = append(envs, listed...) + + return envs, nil +} + +// DeleteState removes a named state. +// The "default" state cannot be removed. +func (b *Local) DeleteState(name string) error { + // If we have a backend handling state, defer to that. + if b.Backend != nil { + return b.Backend.DeleteState(name) + } + + if name == "" { + return errors.New("empty state name") + } + + if name == backend.DefaultStateName { + return errors.New("cannot delete default state") + } + + delete(b.states, name) + return os.RemoveAll(filepath.Join(b.stateWorkspaceDir(), name)) +} + +func (b *Local) State(name string) (state.State, error) { + statePath, stateOutPath, backupPath := b.StatePaths(name) + + // If we have a backend handling state, defer to that. + if b.Backend != nil { + s, err := b.Backend.State(name) + if err != nil { + return nil, err + } + + // make sure we always have a backup state, unless it disabled + if backupPath == "" { + return s, nil + } + + // see if the delegated backend returned a BackupState of its own + if s, ok := s.(*state.BackupState); ok { + return s, nil + } + + s = &state.BackupState{ + Real: s, + Path: backupPath, + } + return s, nil + } + + if s, ok := b.states[name]; ok { + return s, nil + } + + if err := b.createState(name); err != nil { + return nil, err + } + + // Otherwise, we need to load the state. + var s state.State = &state.LocalState{ + Path: statePath, + PathOut: stateOutPath, + } + + // If we are backing up the state, wrap it + if backupPath != "" { + s = &state.BackupState{ + Real: s, + Path: backupPath, + } + } + + if b.states == nil { + b.states = map[string]state.State{} + } + b.states[name] = s + return s, nil +} + +// Operation implements backend.Enhanced +// +// This will initialize an in-memory terraform.Context to perform the +// operation within this process. +// +// The given operation parameter will be merged with the ContextOpts on +// the structure with the following rules. If a rule isn't specified and the +// name conflicts, assume that the field is overwritten if set. +func (b *Local) Operation(ctx context.Context, op *backend.Operation) (*backend.RunningOperation, error) { + // Determine the function to call for our operation + var f func(context.Context, *backend.Operation, *backend.RunningOperation) + switch op.Type { + case backend.OperationTypeRefresh: + f = b.opRefresh + case backend.OperationTypePlan: + f = b.opPlan + case backend.OperationTypeApply: + f = b.opApply + default: + return nil, fmt.Errorf( + "Unsupported operation type: %s\n\n"+ + "This is a bug in Terraform and should be reported. The local backend\n"+ + "is built-in to Terraform and should always support all operations.", + op.Type) + } + + // Lock + b.opLock.Lock() + + // Build our running operation + runningCtx, runningCtxCancel := context.WithCancel(context.Background()) + runningOp := &backend.RunningOperation{Context: runningCtx} + + // Do it + go func() { + defer b.opLock.Unlock() + defer runningCtxCancel() + f(ctx, op, runningOp) + }() + + // Return + return runningOp, nil +} + +// Colorize returns the Colorize structure that can be used for colorizing +// output. This is gauranteed to always return a non-nil value and so is useful +// as a helper to wrap any potentially colored strings. +func (b *Local) Colorize() *colorstring.Colorize { + if b.CLIColor != nil { + return b.CLIColor + } + + return &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, + } +} + +func (b *Local) init() { + b.schema = &schema.Backend{ + Schema: map[string]*schema.Schema{ + "path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + }, + + "workspace_dir": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + }, + + "environment_dir": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + ConflictsWith: []string{"workspace_dir"}, + + Deprecated: "workspace_dir should be used instead, with the same meaning", + }, + }, + + ConfigureFunc: b.schemaConfigure, + } +} + +func (b *Local) schemaConfigure(ctx context.Context) error { + d := schema.FromContextBackendConfig(ctx) + + // Set the path if it is set + pathRaw, ok := d.GetOk("path") + if ok { + path := pathRaw.(string) + if path == "" { + return fmt.Errorf("configured path is empty") + } + + b.StatePath = path + b.StateOutPath = path + } + + if raw, ok := d.GetOk("workspace_dir"); ok { + path := raw.(string) + if path != "" { + b.StateWorkspaceDir = path + } + } + + // Legacy name, which ConflictsWith workspace_dir + if raw, ok := d.GetOk("environment_dir"); ok { + path := raw.(string) + if path != "" { + b.StateWorkspaceDir = path + } + } + + return nil +} + +// StatePaths returns the StatePath, StateOutPath, and StateBackupPath as +// configured from the CLI. +func (b *Local) StatePaths(name string) (string, string, string) { + statePath := b.StatePath + stateOutPath := b.StateOutPath + backupPath := b.StateBackupPath + + if name == "" { + name = backend.DefaultStateName + } + + if name == backend.DefaultStateName { + if statePath == "" { + statePath = DefaultStateFilename + } + } else { + statePath = filepath.Join(b.stateWorkspaceDir(), name, DefaultStateFilename) + } + + if stateOutPath == "" { + stateOutPath = statePath + } + + switch backupPath { + case "-": + backupPath = "" + case "": + backupPath = stateOutPath + DefaultBackupExtension + } + + return statePath, stateOutPath, backupPath +} + +// this only ensures that the named directory exists +func (b *Local) createState(name string) error { + if name == backend.DefaultStateName { + return nil + } + + stateDir := filepath.Join(b.stateWorkspaceDir(), name) + s, err := os.Stat(stateDir) + if err == nil && s.IsDir() { + // no need to check for os.IsNotExist, since that is covered by os.MkdirAll + // which will catch the other possible errors as well. + return nil + } + + err = os.MkdirAll(stateDir, 0755) + if err != nil { + return err + } + + return nil +} + +// stateWorkspaceDir returns the directory where state environments are stored. +func (b *Local) stateWorkspaceDir() string { + if b.StateWorkspaceDir != "" { + return b.StateWorkspaceDir + } + + return DefaultWorkspaceDir +} + +func (b *Local) pluginInitRequired(providerErr *terraform.ResourceProviderError) { + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + strings.TrimSpace(errPluginInit)+"\n", + providerErr))) +} + +// this relies on multierror to format the plugin errors below the copy +const errPluginInit = ` +[reset][bold][yellow]Plugin reinitialization required. Please run "terraform init".[reset] +[yellow]Reason: Could not satisfy plugin requirements. + +Plugins are external binaries that Terraform uses to access and manipulate +resources. The configuration provided requires plugins which can't be located, +don't satisfy the version constraints, or are otherwise incompatible. + +[reset][red]%s + +[reset][yellow]Terraform automatically discovers provider requirements from your +configuration, including providers used in child modules. To see the +requirements and constraints from each module, run "terraform providers". +` diff --git a/vendor/github.com/hashicorp/terraform/backend/local/backend_apply.go b/vendor/github.com/hashicorp/terraform/backend/local/backend_apply.go new file mode 100644 index 00000000000..4463431815d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/local/backend_apply.go @@ -0,0 +1,335 @@ +package local + +import ( + "bytes" + "context" + "errors" + "fmt" + "log" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/terraform" +) + +func (b *Local) opApply( + ctx context.Context, + op *backend.Operation, + runningOp *backend.RunningOperation) { + log.Printf("[INFO] backend/local: starting Apply operation") + + // If we have a nil module at this point, then set it to an empty tree + // to avoid any potential crashes. + if op.Plan == nil && op.Module == nil && !op.Destroy { + runningOp.Err = fmt.Errorf(strings.TrimSpace(applyErrNoConfig)) + return + } + + // If we have a nil module at this point, then set it to an empty tree + // to avoid any potential crashes. + if op.Module == nil { + op.Module = module.NewEmptyTree() + } + + // Setup our count hook that keeps track of resource changes + countHook := new(CountHook) + stateHook := new(StateHook) + if b.ContextOpts == nil { + b.ContextOpts = new(terraform.ContextOpts) + } + old := b.ContextOpts.Hooks + defer func() { b.ContextOpts.Hooks = old }() + b.ContextOpts.Hooks = append(b.ContextOpts.Hooks, countHook, stateHook) + + // Get our context + tfCtx, opState, err := b.context(op) + if err != nil { + runningOp.Err = err + return + } + + if op.LockState { + lockCtx, cancel := context.WithTimeout(ctx, op.StateLockTimeout) + defer cancel() + + lockInfo := state.NewLockInfo() + lockInfo.Operation = op.Type.String() + lockID, err := clistate.Lock(lockCtx, opState, lockInfo, b.CLI, b.Colorize()) + if err != nil { + runningOp.Err = errwrap.Wrapf("Error locking state: {{err}}", err) + return + } + + defer func() { + if err := clistate.Unlock(opState, lockID, b.CLI, b.Colorize()); err != nil { + runningOp.Err = multierror.Append(runningOp.Err, err) + } + }() + } + + // Setup the state + runningOp.State = tfCtx.State() + + // If we weren't given a plan, then we refresh/plan + if op.Plan == nil { + // If we're refreshing before apply, perform that + if op.PlanRefresh { + log.Printf("[INFO] backend/local: apply calling Refresh") + _, err := tfCtx.Refresh() + if err != nil { + runningOp.Err = errwrap.Wrapf("Error refreshing state: {{err}}", err) + return + } + } + + // Perform the plan + log.Printf("[INFO] backend/local: apply calling Plan") + plan, err := tfCtx.Plan() + if err != nil { + runningOp.Err = errwrap.Wrapf("Error running plan: {{err}}", err) + return + } + + dispPlan := format.NewPlan(plan) + trivialPlan := dispPlan.Empty() + hasUI := op.UIOut != nil && op.UIIn != nil + mustConfirm := hasUI && ((op.Destroy && !op.DestroyForce) || (!op.Destroy && !op.AutoApprove && !trivialPlan)) + if mustConfirm { + var desc, query string + if op.Destroy { + // Default destroy message + desc = "Terraform will destroy all your managed infrastructure, as shown above.\n" + + "There is no undo. Only 'yes' will be accepted to confirm." + query = "Do you really want to destroy?" + } else { + desc = "Terraform will perform the actions described above.\n" + + "Only 'yes' will be accepted to approve." + query = "Do you want to perform these actions?" + } + + if !trivialPlan { + // Display the plan of what we are going to apply/destroy. + b.renderPlan(dispPlan) + b.CLI.Output("") + } + + v, err := op.UIIn.Input(&terraform.InputOpts{ + Id: "approve", + Query: query, + Description: desc, + }) + if err != nil { + runningOp.Err = errwrap.Wrapf("Error asking for approval: {{err}}", err) + return + } + if v != "yes" { + if op.Destroy { + runningOp.Err = errors.New("Destroy cancelled.") + } else { + runningOp.Err = errors.New("Apply cancelled.") + } + return + } + } + } + + // Setup our hook for continuous state updates + stateHook.State = opState + + // Start the apply in a goroutine so that we can be interrupted. + var applyState *terraform.State + var applyErr error + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + _, applyErr = tfCtx.Apply() + // we always want the state, even if apply failed + applyState = tfCtx.State() + + /* + // Record any shadow errors for later + if err := ctx.ShadowError(); err != nil { + shadowErr = multierror.Append(shadowErr, multierror.Prefix( + err, "apply operation:")) + } + */ + }() + + // Wait for the apply to finish or for us to be interrupted so + // we can handle it properly. + err = nil + select { + case <-ctx.Done(): + if b.CLI != nil { + b.CLI.Output("stopping apply operation...") + } + + // try to force a PersistState just in case the process is terminated + // before we can complete. + if err := opState.PersistState(); err != nil { + // We can't error out from here, but warn the user if there was an error. + // If this isn't transient, we will catch it again below, and + // attempt to save the state another way. + if b.CLI != nil { + b.CLI.Error(fmt.Sprintf(earlyStateWriteErrorFmt, err)) + } + } + + // Stop execution + go tfCtx.Stop() + + // Wait for completion still + <-doneCh + case <-doneCh: + } + + // Store the final state + runningOp.State = applyState + + // Persist the state + if err := opState.WriteState(applyState); err != nil { + runningOp.Err = b.backupStateForError(applyState, err) + return + } + if err := opState.PersistState(); err != nil { + runningOp.Err = b.backupStateForError(applyState, err) + return + } + + if applyErr != nil { + runningOp.Err = fmt.Errorf( + "Error applying plan:\n\n"+ + "%s\n\n"+ + "Terraform does not automatically rollback in the face of errors.\n"+ + "Instead, your Terraform state file has been partially updated with\n"+ + "any resources that successfully completed. Please address the error\n"+ + "above and apply again to incrementally change your infrastructure.", + multierror.Flatten(applyErr)) + return + } + + // If we have a UI, output the results + if b.CLI != nil { + if op.Destroy { + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "[reset][bold][green]\n"+ + "Destroy complete! Resources: %d destroyed.", + countHook.Removed))) + } else { + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "[reset][bold][green]\n"+ + "Apply complete! Resources: %d added, %d changed, %d destroyed.", + countHook.Added, + countHook.Changed, + countHook.Removed))) + } + + // only show the state file help message if the state is local. + if (countHook.Added > 0 || countHook.Changed > 0) && b.StateOutPath != "" { + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "[reset]\n"+ + "The state of your infrastructure has been saved to the path\n"+ + "below. This state is required to modify and destroy your\n"+ + "infrastructure, so keep it safe. To inspect the complete state\n"+ + "use the `terraform show` command.\n\n"+ + "State path: %s", + b.StateOutPath))) + } + } +} + +// backupStateForError is called in a scenario where we're unable to persist the +// state for some reason, and will attempt to save a backup copy of the state +// to local disk to help the user recover. This is a "last ditch effort" sort +// of thing, so we really don't want to end up in this codepath; we should do +// everything we possibly can to get the state saved _somewhere_. +func (b *Local) backupStateForError(applyState *terraform.State, err error) error { + b.CLI.Error(fmt.Sprintf("Failed to save state: %s\n", err)) + + local := &state.LocalState{Path: "errored.tfstate"} + writeErr := local.WriteState(applyState) + if writeErr != nil { + b.CLI.Error(fmt.Sprintf( + "Also failed to create local state file for recovery: %s\n\n", writeErr, + )) + // To avoid leaving the user with no state at all, our last resort + // is to print the JSON state out onto the terminal. This is an awful + // UX, so we should definitely avoid doing this if at all possible, + // but at least the user has _some_ path to recover if we end up + // here for some reason. + stateBuf := new(bytes.Buffer) + jsonErr := terraform.WriteState(applyState, stateBuf) + if jsonErr != nil { + b.CLI.Error(fmt.Sprintf( + "Also failed to JSON-serialize the state to print it: %s\n\n", jsonErr, + )) + return errors.New(stateWriteFatalError) + } + + b.CLI.Output(stateBuf.String()) + + return errors.New(stateWriteConsoleFallbackError) + } + + return errors.New(stateWriteBackedUpError) +} + +const applyErrNoConfig = ` +No configuration files found! + +Apply requires configuration to be present. Applying without a configuration +would mark everything for destruction, which is normally not what is desired. +If you would like to destroy everything, please run 'terraform destroy' instead +which does not require any configuration files. +` + +const stateWriteBackedUpError = `Failed to persist state to backend. + +The error shown above has prevented Terraform from writing the updated state +to the configured backend. To allow for recovery, the state has been written +to the file "errored.tfstate" in the current working directory. + +Running "terraform apply" again at this point will create a forked state, +making it harder to recover. + +To retry writing this state, use the following command: + terraform state push errored.tfstate +` + +const stateWriteConsoleFallbackError = `Failed to persist state to backend. + +The errors shown above prevented Terraform from writing the updated state to +the configured backend and from creating a local backup file. As a fallback, +the raw state data is printed above as a JSON object. + +To retry writing this state, copy the state data (from the first { to the +last } inclusive) and save it into a local file called errored.tfstate, then +run the following command: + terraform state push errored.tfstate +` + +const stateWriteFatalError = `Failed to save state after apply. + +A catastrophic error has prevented Terraform from persisting the state file +or creating a backup. Unfortunately this means that the record of any resources +created during this apply has been lost, and such resources may exist outside +of Terraform's management. + +For resources that support import, it is possible to recover by manually +importing each resource using its id from the target system. + +This is a serious bug in Terraform and should be reported. +` + +const earlyStateWriteErrorFmt = `Error saving current state: %s + +Terraform encountered an error attempting to save the state before canceling +the current operation. Once the operation is complete another attempt will be +made to save the final state. +` diff --git a/vendor/github.com/hashicorp/terraform/backend/local/backend_local.go b/vendor/github.com/hashicorp/terraform/backend/local/backend_local.go new file mode 100644 index 00000000000..2c121d2e6d3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/local/backend_local.go @@ -0,0 +1,128 @@ +package local + +import ( + "errors" + "fmt" + "log" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/terraform" +) + +// backend.Local implementation. +func (b *Local) Context(op *backend.Operation) (*terraform.Context, state.State, error) { + // Make sure the type is invalid. We use this as a way to know not + // to ask for input/validate. + op.Type = backend.OperationTypeInvalid + + return b.context(op) +} + +func (b *Local) context(op *backend.Operation) (*terraform.Context, state.State, error) { + // Get the state. + s, err := b.State(op.Workspace) + if err != nil { + return nil, nil, errwrap.Wrapf("Error loading state: {{err}}", err) + } + + if err := s.RefreshState(); err != nil { + return nil, nil, errwrap.Wrapf("Error loading state: {{err}}", err) + } + + // Initialize our context options + var opts terraform.ContextOpts + if v := b.ContextOpts; v != nil { + opts = *v + } + + // Copy set options from the operation + opts.Destroy = op.Destroy + opts.Module = op.Module + opts.Targets = op.Targets + opts.UIInput = op.UIIn + if op.Variables != nil { + opts.Variables = op.Variables + } + + // Load our state + // By the time we get here, the backend creation code in "command" took + // care of making s.State() return a state compatible with our plan, + // if any, so we can safely pass this value in both the plan context + // and new context cases below. + opts.State = s.State() + + // Build the context + var tfCtx *terraform.Context + if op.Plan != nil { + tfCtx, err = op.Plan.Context(&opts) + } else { + tfCtx, err = terraform.NewContext(&opts) + } + + // any errors resolving plugins returns this + if rpe, ok := err.(*terraform.ResourceProviderError); ok { + b.pluginInitRequired(rpe) + // we wrote the full UI error here, so return a generic error for flow + // control in the command. + return nil, nil, errors.New("error satisfying plugin requirements") + } + + if err != nil { + return nil, nil, err + } + + // If we have an operation, then we automatically do the input/validate + // here since every option requires this. + if op.Type != backend.OperationTypeInvalid { + // If input asking is enabled, then do that + if op.Plan == nil && b.OpInput { + mode := terraform.InputModeProvider + mode |= terraform.InputModeVar + mode |= terraform.InputModeVarUnset + + if err := tfCtx.Input(mode); err != nil { + return nil, nil, errwrap.Wrapf("Error asking for user input: {{err}}", err) + } + } + + // If validation is enabled, validate + if b.OpValidation { + // We ignore warnings here on purpose. We expect users to be listening + // to the terraform.Hook called after a validation. + ws, es := tfCtx.Validate() + if len(ws) > 0 { + // Log just in case the CLI isn't enabled + log.Printf("[WARN] backend/local: %d warnings: %v", len(ws), ws) + + // If we have a CLI, output the warnings + if b.CLI != nil { + b.CLI.Warn(strings.TrimSpace(validateWarnHeader) + "\n") + for _, w := range ws { + b.CLI.Warn(fmt.Sprintf(" * %s", w)) + } + + // Make a newline before continuing + b.CLI.Output("") + } + } + + if len(es) > 0 { + return nil, nil, multierror.Append(nil, es...) + } + } + } + + return tfCtx, s, nil +} + +const validateWarnHeader = ` +There are warnings related to your configuration. If no errors occurred, +Terraform will continue despite these warnings. It is a good idea to resolve +these warnings in the near future. + +Warnings: +` diff --git a/vendor/github.com/hashicorp/terraform/backend/local/backend_plan.go b/vendor/github.com/hashicorp/terraform/backend/local/backend_plan.go new file mode 100644 index 00000000000..a4e92c1c711 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/local/backend_plan.go @@ -0,0 +1,244 @@ +package local + +import ( + "bytes" + "context" + "fmt" + "log" + "os" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/terraform" +) + +func (b *Local) opPlan( + ctx context.Context, + op *backend.Operation, + runningOp *backend.RunningOperation) { + log.Printf("[INFO] backend/local: starting Plan operation") + + if b.CLI != nil && op.Plan != nil { + b.CLI.Output(b.Colorize().Color( + "[reset][bold][yellow]" + + "The plan command received a saved plan file as input. This command\n" + + "will output the saved plan. This will not modify the already-existing\n" + + "plan. If you wish to generate a new plan, please pass in a configuration\n" + + "directory as an argument.\n\n")) + } + + // A local plan requires either a plan or a module + if op.Plan == nil && op.Module == nil && !op.Destroy { + runningOp.Err = fmt.Errorf(strings.TrimSpace(planErrNoConfig)) + return + } + + // If we have a nil module at this point, then set it to an empty tree + // to avoid any potential crashes. + if op.Module == nil { + op.Module = module.NewEmptyTree() + } + + // Setup our count hook that keeps track of resource changes + countHook := new(CountHook) + if b.ContextOpts == nil { + b.ContextOpts = new(terraform.ContextOpts) + } + old := b.ContextOpts.Hooks + defer func() { b.ContextOpts.Hooks = old }() + b.ContextOpts.Hooks = append(b.ContextOpts.Hooks, countHook) + + // Get our context + tfCtx, opState, err := b.context(op) + if err != nil { + runningOp.Err = err + return + } + + if op.LockState { + lockCtx, cancel := context.WithTimeout(ctx, op.StateLockTimeout) + defer cancel() + + lockInfo := state.NewLockInfo() + lockInfo.Operation = op.Type.String() + lockID, err := clistate.Lock(lockCtx, opState, lockInfo, b.CLI, b.Colorize()) + if err != nil { + runningOp.Err = errwrap.Wrapf("Error locking state: {{err}}", err) + return + } + + defer func() { + if err := clistate.Unlock(opState, lockID, b.CLI, b.Colorize()); err != nil { + runningOp.Err = multierror.Append(runningOp.Err, err) + } + }() + } + + // Setup the state + runningOp.State = tfCtx.State() + + // If we're refreshing before plan, perform that + if op.PlanRefresh { + log.Printf("[INFO] backend/local: plan calling Refresh") + + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(planRefreshing) + "\n")) + } + + _, err := tfCtx.Refresh() + if err != nil { + runningOp.Err = errwrap.Wrapf("Error refreshing state: {{err}}", err) + return + } + if b.CLI != nil { + b.CLI.Output("\n------------------------------------------------------------------------") + } + } + + // Perform the plan + log.Printf("[INFO] backend/local: plan calling Plan") + plan, err := tfCtx.Plan() + if err != nil { + runningOp.Err = errwrap.Wrapf("Error running plan: {{err}}", err) + return + } + + // Record state + runningOp.PlanEmpty = plan.Diff.Empty() + + // Save the plan to disk + if path := op.PlanOutPath; path != "" { + // Write the backend if we have one + plan.Backend = op.PlanOutBackend + + // This works around a bug (#12871) which is no longer possible to + // trigger but will exist for already corrupted upgrades. + if plan.Backend != nil && plan.State != nil { + plan.State.Remote = nil + } + + log.Printf("[INFO] backend/local: writing plan output to: %s", path) + f, err := os.Create(path) + if err == nil { + err = terraform.WritePlan(plan, f) + } + f.Close() + if err != nil { + runningOp.Err = fmt.Errorf("Error writing plan file: %s", err) + return + } + } + + // Perform some output tasks if we have a CLI to output to. + if b.CLI != nil { + dispPlan := format.NewPlan(plan) + if dispPlan.Empty() { + b.CLI.Output("\n" + b.Colorize().Color(strings.TrimSpace(planNoChanges))) + return + } + + b.renderPlan(dispPlan) + + // Give the user some next-steps, unless we're running in an automation + // tool which is presumed to provide its own UI for further actions. + if !b.RunningInAutomation { + + b.CLI.Output("\n------------------------------------------------------------------------") + + if path := op.PlanOutPath; path == "" { + b.CLI.Output(fmt.Sprintf( + "\n" + strings.TrimSpace(planHeaderNoOutput) + "\n", + )) + } else { + b.CLI.Output(fmt.Sprintf( + "\n"+strings.TrimSpace(planHeaderYesOutput)+"\n", + path, path, + )) + } + + } + } +} + +func (b *Local) renderPlan(dispPlan *format.Plan) { + + headerBuf := &bytes.Buffer{} + fmt.Fprintf(headerBuf, "\n%s\n", strings.TrimSpace(planHeaderIntro)) + counts := dispPlan.ActionCounts() + if counts[terraform.DiffCreate] > 0 { + fmt.Fprintf(headerBuf, "%s create\n", format.DiffActionSymbol(terraform.DiffCreate)) + } + if counts[terraform.DiffUpdate] > 0 { + fmt.Fprintf(headerBuf, "%s update in-place\n", format.DiffActionSymbol(terraform.DiffUpdate)) + } + if counts[terraform.DiffDestroy] > 0 { + fmt.Fprintf(headerBuf, "%s destroy\n", format.DiffActionSymbol(terraform.DiffDestroy)) + } + if counts[terraform.DiffDestroyCreate] > 0 { + fmt.Fprintf(headerBuf, "%s destroy and then create replacement\n", format.DiffActionSymbol(terraform.DiffDestroyCreate)) + } + if counts[terraform.DiffRefresh] > 0 { + fmt.Fprintf(headerBuf, "%s read (data resources)\n", format.DiffActionSymbol(terraform.DiffRefresh)) + } + + b.CLI.Output(b.Colorize().Color(headerBuf.String())) + + b.CLI.Output("Terraform will perform the following actions:\n") + + b.CLI.Output(dispPlan.Format(b.Colorize())) + + stats := dispPlan.Stats() + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "[reset][bold]Plan:[reset] "+ + "%d to add, %d to change, %d to destroy.", + stats.ToAdd, stats.ToChange, stats.ToDestroy, + ))) +} + +const planErrNoConfig = ` +No configuration files found! + +Plan requires configuration to be present. Planning without a configuration +would mark everything for destruction, which is normally not what is desired. +If you would like to destroy everything, please run plan with the "-destroy" +flag or create a single empty configuration file. Otherwise, please create +a Terraform configuration file in the path being executed and try again. +` + +const planHeaderIntro = ` +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: +` + +const planHeaderNoOutput = ` +Note: You didn't specify an "-out" parameter to save this plan, so Terraform +can't guarantee that exactly these actions will be performed if +"terraform apply" is subsequently run. +` + +const planHeaderYesOutput = ` +This plan was saved to: %s + +To perform exactly these actions, run the following command to apply: + terraform apply %q +` + +const planNoChanges = ` +[reset][bold][green]No changes. Infrastructure is up-to-date.[reset][green] + +This means that Terraform did not detect any differences between your +configuration and real physical resources that exist. As a result, no +actions need to be performed. +` + +const planRefreshing = ` +[reset][bold]Refreshing Terraform state in-memory prior to plan...[reset] +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. +` diff --git a/vendor/github.com/hashicorp/terraform/backend/local/backend_refresh.go b/vendor/github.com/hashicorp/terraform/backend/local/backend_refresh.go new file mode 100644 index 00000000000..282e63045af --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/local/backend_refresh.go @@ -0,0 +1,106 @@ +package local + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/state" +) + +func (b *Local) opRefresh( + ctx context.Context, + op *backend.Operation, + runningOp *backend.RunningOperation) { + // Check if our state exists if we're performing a refresh operation. We + // only do this if we're managing state with this backend. + if b.Backend == nil { + if _, err := os.Stat(b.StatePath); err != nil { + if os.IsNotExist(err) { + err = nil + } + + if err != nil { + runningOp.Err = fmt.Errorf( + "There was an error reading the Terraform state that is needed\n"+ + "for refreshing. The path and error are shown below.\n\n"+ + "Path: %s\n\nError: %s", + b.StatePath, err) + return + } + } + } + + // If we have no config module given to use, create an empty tree to + // avoid crashes when Terraform.Context is initialized. + if op.Module == nil { + op.Module = module.NewEmptyTree() + } + + // Get our context + tfCtx, opState, err := b.context(op) + if err != nil { + runningOp.Err = err + return + } + + if op.LockState { + lockCtx, cancel := context.WithTimeout(ctx, op.StateLockTimeout) + defer cancel() + + lockInfo := state.NewLockInfo() + lockInfo.Operation = op.Type.String() + lockID, err := clistate.Lock(lockCtx, opState, lockInfo, b.CLI, b.Colorize()) + if err != nil { + runningOp.Err = errwrap.Wrapf("Error locking state: {{err}}", err) + return + } + + defer func() { + if err := clistate.Unlock(opState, lockID, b.CLI, b.Colorize()); err != nil { + runningOp.Err = multierror.Append(runningOp.Err, err) + } + }() + } + + // Set our state + runningOp.State = opState.State() + if runningOp.State.Empty() || !runningOp.State.HasResources() { + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color( + strings.TrimSpace(refreshNoState) + "\n")) + } + } + + // Perform operation and write the resulting state to the running op + newState, err := tfCtx.Refresh() + runningOp.State = newState + if err != nil { + runningOp.Err = errwrap.Wrapf("Error refreshing state: {{err}}", err) + return + } + + // Write and persist the state + if err := opState.WriteState(newState); err != nil { + runningOp.Err = errwrap.Wrapf("Error writing state: {{err}}", err) + return + } + if err := opState.PersistState(); err != nil { + runningOp.Err = errwrap.Wrapf("Error saving state: {{err}}", err) + return + } +} + +const refreshNoState = ` +[reset][bold][yellow]Empty or non-existent state file.[reset][yellow] + +Refresh will do nothing. Refresh does not error or return an erroneous +exit status because many automation scripts use refresh, plan, then apply +and may not have a state file yet for the first run. +` diff --git a/vendor/github.com/hashicorp/terraform/backend/local/cli.go b/vendor/github.com/hashicorp/terraform/backend/local/cli.go new file mode 100644 index 00000000000..f9edfd44968 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/local/cli.go @@ -0,0 +1,24 @@ +package local + +import ( + "github.com/hashicorp/terraform/backend" +) + +// backend.CLI impl. +func (b *Local) CLIInit(opts *backend.CLIOpts) error { + b.CLI = opts.CLI + b.CLIColor = opts.CLIColor + b.ContextOpts = opts.ContextOpts + b.OpInput = opts.Input + b.OpValidation = opts.Validation + b.RunningInAutomation = opts.RunningInAutomation + + // Only configure state paths if we didn't do so via the configure func. + if b.StatePath == "" { + b.StatePath = opts.StatePath + b.StateOutPath = opts.StateOutPath + b.StateBackupPath = opts.StateBackupPath + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/local/counthookaction_string.go b/vendor/github.com/hashicorp/terraform/backend/local/counthookaction_string.go new file mode 100644 index 00000000000..92b2624a531 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/local/counthookaction_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=countHookAction hook_count_action.go"; DO NOT EDIT. + +package local + +import "fmt" + +const _countHookAction_name = "countHookActionAddcountHookActionChangecountHookActionRemove" + +var _countHookAction_index = [...]uint8{0, 18, 39, 60} + +func (i countHookAction) String() string { + if i >= countHookAction(len(_countHookAction_index)-1) { + return fmt.Sprintf("countHookAction(%d)", i) + } + return _countHookAction_name[_countHookAction_index[i]:_countHookAction_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform/backend/local/hook_count.go b/vendor/github.com/hashicorp/terraform/backend/local/hook_count.go new file mode 100644 index 00000000000..4708159dc0c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/local/hook_count.go @@ -0,0 +1,115 @@ +package local + +import ( + "strings" + "sync" + + "github.com/hashicorp/terraform/terraform" +) + +// CountHook is a hook that counts the number of resources +// added, removed, changed during the course of an apply. +type CountHook struct { + Added int + Changed int + Removed int + + ToAdd int + ToChange int + ToRemove int + ToRemoveAndAdd int + + pending map[string]countHookAction + + sync.Mutex + terraform.NilHook +} + +func (h *CountHook) Reset() { + h.Lock() + defer h.Unlock() + + h.pending = nil + h.Added = 0 + h.Changed = 0 + h.Removed = 0 +} + +func (h *CountHook) PreApply( + n *terraform.InstanceInfo, + s *terraform.InstanceState, + d *terraform.InstanceDiff) (terraform.HookAction, error) { + h.Lock() + defer h.Unlock() + + if d.Empty() { + return terraform.HookActionContinue, nil + } + + if h.pending == nil { + h.pending = make(map[string]countHookAction) + } + + action := countHookActionChange + if d.GetDestroy() { + action = countHookActionRemove + } else if s.ID == "" { + action = countHookActionAdd + } + + h.pending[n.HumanId()] = action + + return terraform.HookActionContinue, nil +} + +func (h *CountHook) PostApply( + n *terraform.InstanceInfo, + s *terraform.InstanceState, + e error) (terraform.HookAction, error) { + h.Lock() + defer h.Unlock() + + if h.pending != nil { + if a, ok := h.pending[n.HumanId()]; ok { + delete(h.pending, n.HumanId()) + + if e == nil { + switch a { + case countHookActionAdd: + h.Added += 1 + case countHookActionChange: + h.Changed += 1 + case countHookActionRemove: + h.Removed += 1 + } + } + } + } + + return terraform.HookActionContinue, nil +} + +func (h *CountHook) PostDiff( + n *terraform.InstanceInfo, d *terraform.InstanceDiff) ( + terraform.HookAction, error) { + h.Lock() + defer h.Unlock() + + // We don't count anything for data sources + if strings.HasPrefix(n.Id, "data.") { + return terraform.HookActionContinue, nil + } + + switch d.ChangeType() { + case terraform.DiffDestroyCreate: + h.ToRemoveAndAdd += 1 + case terraform.DiffCreate: + h.ToAdd += 1 + case terraform.DiffDestroy: + h.ToRemove += 1 + case terraform.DiffUpdate: + h.ToChange += 1 + } + + return terraform.HookActionContinue, nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/local/hook_count_action.go b/vendor/github.com/hashicorp/terraform/backend/local/hook_count_action.go new file mode 100644 index 00000000000..9a28464c2fd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/local/hook_count_action.go @@ -0,0 +1,11 @@ +package local + +//go:generate stringer -type=countHookAction hook_count_action.go + +type countHookAction byte + +const ( + countHookActionAdd countHookAction = iota + countHookActionChange + countHookActionRemove +) diff --git a/vendor/github.com/hashicorp/terraform/backend/local/hook_state.go b/vendor/github.com/hashicorp/terraform/backend/local/hook_state.go new file mode 100644 index 00000000000..5483c4344ff --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/local/hook_state.go @@ -0,0 +1,33 @@ +package local + +import ( + "sync" + + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/terraform" +) + +// StateHook is a hook that continuously updates the state by calling +// WriteState on a state.State. +type StateHook struct { + terraform.NilHook + sync.Mutex + + State state.State +} + +func (h *StateHook) PostStateUpdate( + s *terraform.State) (terraform.HookAction, error) { + h.Lock() + defer h.Unlock() + + if h.State != nil { + // Write the new state + if err := h.State.WriteState(s); err != nil { + return terraform.HookActionHalt, err + } + } + + // Continue forth + return terraform.HookActionContinue, nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/local/testing.go b/vendor/github.com/hashicorp/terraform/backend/local/testing.go new file mode 100644 index 00000000000..beb88508e11 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/local/testing.go @@ -0,0 +1,100 @@ +package local + +import ( + "io/ioutil" + "path/filepath" + "testing" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/terraform" +) + +// TestLocal returns a configured Local struct with temporary paths and +// in-memory ContextOpts. +// +// No operations will be called on the returned value, so you can still set +// public fields without any locks. +func TestLocal(t *testing.T) *Local { + tempDir := testTempDir(t) + return &Local{ + StatePath: filepath.Join(tempDir, "state.tfstate"), + StateOutPath: filepath.Join(tempDir, "state.tfstate"), + StateBackupPath: filepath.Join(tempDir, "state.tfstate.bak"), + StateWorkspaceDir: filepath.Join(tempDir, "state.tfstate.d"), + ContextOpts: &terraform.ContextOpts{}, + } +} + +// TestLocalProvider modifies the ContextOpts of the *Local parameter to +// have a provider with the given name. +func TestLocalProvider(t *testing.T, b *Local, name string) *terraform.MockResourceProvider { + // Build a mock resource provider for in-memory operations + p := new(terraform.MockResourceProvider) + p.DiffReturn = &terraform.InstanceDiff{} + p.RefreshFn = func( + info *terraform.InstanceInfo, + s *terraform.InstanceState) (*terraform.InstanceState, error) { + return s, nil + } + p.ResourcesReturn = []terraform.ResourceType{ + terraform.ResourceType{ + Name: "test_instance", + }, + } + + // Initialize the opts + if b.ContextOpts == nil { + b.ContextOpts = &terraform.ContextOpts{} + } + + // Setup our provider + b.ContextOpts.ProviderResolver = terraform.ResourceProviderResolverFixed( + map[string]terraform.ResourceProviderFactory{ + name: terraform.ResourceProviderFactoryFixed(p), + }, + ) + + return p +} + +// TestNewLocalSingle is a factory for creating a TestLocalSingleState. +// This function matches the signature required for backend/init. +func TestNewLocalSingle() backend.Backend { + return &TestLocalSingleState{} +} + +// TestLocalSingleState is a backend implementation that wraps Local +// and modifies it to only support single states (returns +// ErrNamedStatesNotSupported for multi-state operations). +// +// This isn't an actual use case, this is exported just to provide a +// easy way to test that behavior. +type TestLocalSingleState struct { + Local +} + +func (b *TestLocalSingleState) State(name string) (state.State, error) { + if name != backend.DefaultStateName { + return nil, backend.ErrNamedStatesNotSupported + } + + return b.Local.State(name) +} + +func (b *TestLocalSingleState) States() ([]string, error) { + return nil, backend.ErrNamedStatesNotSupported +} + +func (b *TestLocalSingleState) DeleteState(string) error { + return backend.ErrNamedStatesNotSupported +} + +func testTempDir(t *testing.T) string { + d, err := ioutil.TempDir("", "tf") + if err != nil { + t.Fatalf("err: %s", err) + } + + return d +} diff --git a/vendor/github.com/hashicorp/terraform/backend/nil.go b/vendor/github.com/hashicorp/terraform/backend/nil.go new file mode 100644 index 00000000000..e2f11e91d15 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/nil.go @@ -0,0 +1,39 @@ +package backend + +import ( + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/terraform" +) + +// Nil is a no-op implementation of Backend. +// +// This is useful to embed within another struct to implement all of the +// backend interface for testing. +type Nil struct{} + +func (Nil) Input( + ui terraform.UIInput, + c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { + return c, nil +} + +func (Nil) Validate(*terraform.ResourceConfig) ([]string, []error) { + return nil, nil +} + +func (Nil) Configure(*terraform.ResourceConfig) error { + return nil +} + +func (Nil) State(string) (state.State, error) { + // We have to return a non-nil state to adhere to the interface + return &state.InmemState{}, nil +} + +func (Nil) DeleteState(string) error { + return nil +} + +func (Nil) States() ([]string, error) { + return []string{DefaultStateName}, nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/operation_type.go b/vendor/github.com/hashicorp/terraform/backend/operation_type.go new file mode 100644 index 00000000000..1739dc7fc4f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/operation_type.go @@ -0,0 +1,14 @@ +package backend + +//go:generate stringer -type=OperationType operation_type.go + +// OperationType is an enum used with Operation to specify the operation +// type to perform for Terraform. +type OperationType uint + +const ( + OperationTypeInvalid OperationType = iota + OperationTypeRefresh + OperationTypePlan + OperationTypeApply +) diff --git a/vendor/github.com/hashicorp/terraform/backend/operationtype_string.go b/vendor/github.com/hashicorp/terraform/backend/operationtype_string.go new file mode 100644 index 00000000000..15fbba6ecce --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/operationtype_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=OperationType operation_type.go"; DO NOT EDIT. + +package backend + +import "fmt" + +const _OperationType_name = "OperationTypeInvalidOperationTypeRefreshOperationTypePlanOperationTypeApply" + +var _OperationType_index = [...]uint8{0, 20, 40, 57, 75} + +func (i OperationType) String() string { + if i >= OperationType(len(_OperationType_index)-1) { + return fmt.Sprintf("OperationType(%d)", i) + } + return _OperationType_name[_OperationType_index[i]:_OperationType_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/backend.go new file mode 100644 index 00000000000..5c7577b3033 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/backend.go @@ -0,0 +1,225 @@ +package azure + +import ( + "context" + "fmt" + + armStorage "github.com/Azure/azure-sdk-for-go/arm/storage" + "github.com/Azure/azure-sdk-for-go/storage" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/schema" +) + +// New creates a new backend for S3 remote state. +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "storage_account_name": { + Type: schema.TypeString, + Required: true, + Description: "The name of the storage account.", + }, + + "container_name": { + Type: schema.TypeString, + Required: true, + Description: "The container name.", + }, + + "key": { + Type: schema.TypeString, + Required: true, + Description: "The blob key.", + }, + + "environment": { + Type: schema.TypeString, + Optional: true, + Description: "The Azure cloud environment.", + Default: "", + }, + + "access_key": { + Type: schema.TypeString, + Optional: true, + Description: "The access key.", + DefaultFunc: schema.EnvDefaultFunc("ARM_ACCESS_KEY", ""), + }, + + "resource_group_name": { + Type: schema.TypeString, + Optional: true, + Description: "The resource group name.", + }, + + "arm_subscription_id": { + Type: schema.TypeString, + Optional: true, + Description: "The Subscription ID.", + DefaultFunc: schema.EnvDefaultFunc("ARM_SUBSCRIPTION_ID", ""), + }, + + "arm_client_id": { + Type: schema.TypeString, + Optional: true, + Description: "The Client ID.", + DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_ID", ""), + }, + + "arm_client_secret": { + Type: schema.TypeString, + Optional: true, + Description: "The Client Secret.", + DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET", ""), + }, + + "arm_tenant_id": { + Type: schema.TypeString, + Optional: true, + Description: "The Tenant ID.", + DefaultFunc: schema.EnvDefaultFunc("ARM_TENANT_ID", ""), + }, + }, + } + + result := &Backend{Backend: s} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + + // The fields below are set from configure + blobClient storage.BlobStorageClient + + containerName string + keyName string + leaseID string +} + +type BackendConfig struct { + AccessKey string + Environment string + ClientID string + ClientSecret string + ResourceGroupName string + StorageAccountName string + SubscriptionID string + TenantID string +} + +func (b *Backend) configure(ctx context.Context) error { + if b.containerName != "" { + return nil + } + + // Grab the resource data + data := schema.FromContextBackendConfig(ctx) + + b.containerName = data.Get("container_name").(string) + b.keyName = data.Get("key").(string) + + config := BackendConfig{ + AccessKey: data.Get("access_key").(string), + ClientID: data.Get("arm_client_id").(string), + ClientSecret: data.Get("arm_client_secret").(string), + Environment: data.Get("environment").(string), + ResourceGroupName: data.Get("resource_group_name").(string), + StorageAccountName: data.Get("storage_account_name").(string), + SubscriptionID: data.Get("arm_subscription_id").(string), + TenantID: data.Get("arm_tenant_id").(string), + } + + blobClient, err := getBlobClient(config) + if err != nil { + return err + } + b.blobClient = blobClient + + return nil +} + +func getBlobClient(config BackendConfig) (storage.BlobStorageClient, error) { + var client storage.BlobStorageClient + + env, err := getAzureEnvironment(config.Environment) + if err != nil { + return client, err + } + + accessKey, err := getAccessKey(config, env) + if err != nil { + return client, err + } + + storageClient, err := storage.NewClient(config.StorageAccountName, accessKey, env.StorageEndpointSuffix, + storage.DefaultAPIVersion, true) + if err != nil { + return client, fmt.Errorf("Error creating storage client for storage account %q: %s", config.StorageAccountName, err) + } + + client = storageClient.GetBlobService() + return client, nil +} + +func getAccessKey(config BackendConfig, env azure.Environment) (string, error) { + if config.AccessKey != "" { + return config.AccessKey, nil + } + + rgOk := config.ResourceGroupName != "" + subOk := config.SubscriptionID != "" + clientIDOk := config.ClientID != "" + clientSecretOK := config.ClientSecret != "" + tenantIDOk := config.TenantID != "" + if !rgOk || !subOk || !clientIDOk || !clientSecretOK || !tenantIDOk { + return "", fmt.Errorf("resource_group_name and credentials must be provided when access_key is absent") + } + + oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID) + if err != nil { + return "", err + } + + spt, err := adal.NewServicePrincipalToken(*oauthConfig, config.ClientID, config.ClientSecret, env.ResourceManagerEndpoint) + if err != nil { + return "", err + } + + accountsClient := armStorage.NewAccountsClientWithBaseURI(env.ResourceManagerEndpoint, config.SubscriptionID) + accountsClient.Authorizer = autorest.NewBearerAuthorizer(spt) + + keys, err := accountsClient.ListKeys(config.ResourceGroupName, config.StorageAccountName) + if err != nil { + return "", fmt.Errorf("Error retrieving keys for storage account %q: %s", config.StorageAccountName, err) + } + + if keys.Keys == nil { + return "", fmt.Errorf("Nil key returned for storage account %q", config.StorageAccountName) + } + + accessKeys := *keys.Keys + return *accessKeys[0].Value, nil +} + +func getAzureEnvironment(environment string) (azure.Environment, error) { + if environment == "" { + return azure.PublicCloud, nil + } + + env, err := azure.EnvironmentFromName(environment) + if err != nil { + // try again with wrapped value to support readable values like german instead of AZUREGERMANCLOUD + var innerErr error + env, innerErr = azure.EnvironmentFromName(fmt.Sprintf("AZURE%sCLOUD", environment)) + if innerErr != nil { + return env, fmt.Errorf("invalid 'environment' configuration: %s", err) + } + } + + return env, nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/backend_state.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/backend_state.go new file mode 100644 index 00000000000..c7bc02755f0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/backend_state.go @@ -0,0 +1,141 @@ +package azure + +import ( + "fmt" + "sort" + "strings" + + "github.com/Azure/azure-sdk-for-go/storage" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/terraform" +) + +const ( + // This will be used as directory name, the odd looking colon is simply to + // reduce the chance of name conflicts with existing objects. + keyEnvPrefix = "env:" +) + +func (b *Backend) States() ([]string, error) { + prefix := b.keyName + keyEnvPrefix + params := storage.ListBlobsParameters{ + Prefix: prefix, + } + + container := b.blobClient.GetContainerReference(b.containerName) + resp, err := container.ListBlobs(params) + if err != nil { + return nil, err + } + + envs := map[string]struct{}{} + for _, obj := range resp.Blobs { + key := obj.Name + if strings.HasPrefix(key, prefix) { + name := strings.TrimPrefix(key, prefix) + // we store the state in a key, not a directory + if strings.Contains(name, "/") { + continue + } + + envs[name] = struct{}{} + } + } + + result := []string{backend.DefaultStateName} + for name := range envs { + result = append(result, name) + } + sort.Strings(result[1:]) + return result, nil +} + +func (b *Backend) DeleteState(name string) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + containerReference := b.blobClient.GetContainerReference(b.containerName) + blobReference := containerReference.GetBlobReference(b.path(name)) + options := &storage.DeleteBlobOptions{} + + return blobReference.Delete(options) +} + +func (b *Backend) State(name string) (state.State, error) { + client := &RemoteClient{ + blobClient: b.blobClient, + containerName: b.containerName, + keyName: b.path(name), + } + + stateMgr := &remote.State{Client: client} + + //if this isn't the default state name, we need to create the object so + //it's listed by States. + if name != backend.DefaultStateName { + // take a lock on this state while we write it + lockInfo := state.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := client.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to lock azure state: %s", err) + } + + // Local helper function so we can call it multiple places + lockUnlock := func(parent error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) + } + return parent + } + + // Grab the value + if err := stateMgr.RefreshState(); err != nil { + err = lockUnlock(err) + return nil, err + } + + // If we have no state, we have to create an empty state + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(terraform.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(); err != nil { + err = lockUnlock(err) + return nil, err + } + } + + // Unlock, the state should now be initialized + if err := lockUnlock(nil); err != nil { + return nil, err + } + + } + + return stateMgr, nil +} + +func (b *Backend) client() *RemoteClient { + return &RemoteClient{} +} + +func (b *Backend) path(name string) string { + if name == backend.DefaultStateName { + return b.keyName + } + + return b.keyName + keyEnvPrefix + name +} + +const errStateUnlock = ` +Error unlocking Azure state. Lock ID: %s + +Error: %s + +You may have to force-unlock this state in order to use it again. +` diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/client.go new file mode 100644 index 00000000000..52999579b81 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/client.go @@ -0,0 +1,271 @@ +package azure + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "log" + + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/storage" + multierror "github.com/hashicorp/go-multierror" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/terraform" +) + +const ( + leaseHeader = "x-ms-lease-id" + // Must be lower case + lockInfoMetaKey = "terraformlockid" +) + +type RemoteClient struct { + blobClient storage.BlobStorageClient + containerName string + keyName string + leaseID string +} + +func (c *RemoteClient) Get() (*remote.Payload, error) { + containerReference := c.blobClient.GetContainerReference(c.containerName) + blobReference := containerReference.GetBlobReference(c.keyName) + options := &storage.GetBlobOptions{} + + if c.leaseID != "" { + options.LeaseID = c.leaseID + } + + blob, err := blobReference.Get(options) + if err != nil { + if storErr, ok := err.(storage.AzureStorageServiceError); ok { + if storErr.Code == "BlobNotFound" { + return nil, nil + } + } + return nil, err + } + + defer blob.Close() + + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, blob); err != nil { + return nil, fmt.Errorf("Failed to read remote state: %s", err) + } + + payload := &remote.Payload{ + Data: buf.Bytes(), + } + + // If there was no data, then return nil + if len(payload.Data) == 0 { + return nil, nil + } + + return payload, nil +} + +func (c *RemoteClient) Put(data []byte) error { + getOptions := &storage.GetBlobMetadataOptions{} + setOptions := &storage.SetBlobPropertiesOptions{} + putOptions := &storage.PutBlobOptions{} + + containerReference := c.blobClient.GetContainerReference(c.containerName) + blobReference := containerReference.GetBlobReference(c.keyName) + + blobReference.Properties.ContentType = "application/json" + blobReference.Properties.ContentLength = int64(len(data)) + + if c.leaseID != "" { + getOptions.LeaseID = c.leaseID + setOptions.LeaseID = c.leaseID + putOptions.LeaseID = c.leaseID + } + + exists, err := blobReference.Exists() + if err != nil { + return err + } + + if exists { + err = blobReference.GetMetadata(getOptions) + if err != nil { + return err + } + } + + reader := bytes.NewReader(data) + + err = blobReference.CreateBlockBlobFromReader(reader, putOptions) + if err != nil { + return err + } + + return blobReference.SetProperties(setOptions) +} + +func (c *RemoteClient) Delete() error { + containerReference := c.blobClient.GetContainerReference(c.containerName) + blobReference := containerReference.GetBlobReference(c.keyName) + options := &storage.DeleteBlobOptions{} + + if c.leaseID != "" { + options.LeaseID = c.leaseID + } + + return blobReference.Delete(options) +} + +func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) { + stateName := fmt.Sprintf("%s/%s", c.containerName, c.keyName) + info.Path = stateName + + if info.ID == "" { + lockID, err := uuid.GenerateUUID() + if err != nil { + return "", err + } + + info.ID = lockID + } + + getLockInfoErr := func(err error) error { + lockInfo, infoErr := c.getLockInfo() + if infoErr != nil { + err = multierror.Append(err, infoErr) + } + + return &state.LockError{ + Err: err, + Info: lockInfo, + } + } + + containerReference := c.blobClient.GetContainerReference(c.containerName) + blobReference := containerReference.GetBlobReference(c.keyName) + leaseID, err := blobReference.AcquireLease(-1, info.ID, &storage.LeaseOptions{}) + if err != nil { + if storErr, ok := err.(storage.AzureStorageServiceError); ok && storErr.Code != "BlobNotFound" { + return "", getLockInfoErr(err) + } + + // failed to lock as there was no state blob, write empty state + stateMgr := &remote.State{Client: c} + + // ensure state is actually empty + if err := stateMgr.RefreshState(); err != nil { + return "", fmt.Errorf("Failed to refresh state before writing empty state for locking: %s", err) + } + + log.Print("[DEBUG] Could not lock as state blob did not exist, creating with empty state") + + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(terraform.NewState()); err != nil { + return "", fmt.Errorf("Failed to write empty state for locking: %s", err) + } + if err := stateMgr.PersistState(); err != nil { + return "", fmt.Errorf("Failed to persist empty state for locking: %s", err) + } + } + + leaseID, err = blobReference.AcquireLease(-1, info.ID, &storage.LeaseOptions{}) + if err != nil { + return "", getLockInfoErr(err) + } + } + + info.ID = leaseID + c.leaseID = leaseID + + if err := c.writeLockInfo(info); err != nil { + return "", err + } + + return info.ID, nil +} + +func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) { + containerReference := c.blobClient.GetContainerReference(c.containerName) + blobReference := containerReference.GetBlobReference(c.keyName) + err := blobReference.GetMetadata(&storage.GetBlobMetadataOptions{}) + if err != nil { + return nil, err + } + + raw := blobReference.Metadata[lockInfoMetaKey] + if raw == "" { + return nil, fmt.Errorf("blob metadata %q was empty", lockInfoMetaKey) + } + + data, err := base64.StdEncoding.DecodeString(raw) + if err != nil { + return nil, err + } + + lockInfo := &state.LockInfo{} + err = json.Unmarshal(data, lockInfo) + if err != nil { + return nil, err + } + + return lockInfo, nil +} + +// writes info to blob meta data, deletes metadata entry if info is nil +func (c *RemoteClient) writeLockInfo(info *state.LockInfo) error { + containerReference := c.blobClient.GetContainerReference(c.containerName) + blobReference := containerReference.GetBlobReference(c.keyName) + err := blobReference.GetMetadata(&storage.GetBlobMetadataOptions{ + LeaseID: c.leaseID, + }) + if err != nil { + return err + } + + if info == nil { + delete(blobReference.Metadata, lockInfoMetaKey) + } else { + value := base64.StdEncoding.EncodeToString(info.Marshal()) + blobReference.Metadata[lockInfoMetaKey] = value + } + + opts := &storage.SetBlobMetadataOptions{ + LeaseID: c.leaseID, + } + return blobReference.SetMetadata(opts) +} + +func (c *RemoteClient) Unlock(id string) error { + lockErr := &state.LockError{} + + lockInfo, err := c.getLockInfo() + if err != nil { + lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err) + return lockErr + } + lockErr.Info = lockInfo + + if lockInfo.ID != id { + lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id) + return lockErr + } + + if err := c.writeLockInfo(nil); err != nil { + lockErr.Err = fmt.Errorf("failed to delete lock info from metadata: %s", err) + return lockErr + } + + containerReference := c.blobClient.GetContainerReference(c.containerName) + blobReference := containerReference.GetBlobReference(c.keyName) + err = blobReference.ReleaseLease(id, &storage.LeaseOptions{}) + if err != nil { + lockErr.Err = err + return lockErr + } + + c.leaseID = "" + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/backend.go new file mode 100644 index 00000000000..271a60b6341 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/backend.go @@ -0,0 +1,180 @@ +package consul + +import ( + "context" + "net" + "strings" + "time" + + consulapi "github.com/hashicorp/consul/api" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/schema" +) + +// New creates a new backend for Consul remote state. +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "path": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Path to store state in Consul", + }, + + "access_token": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Access token for a Consul ACL", + Default: "", // To prevent input + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Address to the Consul Cluster", + Default: "", // To prevent input + }, + + "scheme": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Scheme to communicate to Consul with", + Default: "", // To prevent input + }, + + "datacenter": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Datacenter to communicate with", + Default: "", // To prevent input + }, + + "http_auth": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "HTTP Auth in the format of 'username:password'", + Default: "", // To prevent input + }, + + "gzip": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Compress the state data using gzip", + Default: false, + }, + + "lock": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Lock state access", + Default: true, + }, + + "ca_file": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "A path to a PEM-encoded certificate authority used to verify the remote agent's certificate.", + DefaultFunc: schema.EnvDefaultFunc("CONSUL_CACERT", ""), + }, + + "cert_file": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "A path to a PEM-encoded certificate provided to the remote agent; requires use of key_file.", + DefaultFunc: schema.EnvDefaultFunc("CONSUL_CLIENT_CERT", ""), + }, + + "key_file": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "A path to a PEM-encoded private key, required if cert_file is specified.", + DefaultFunc: schema.EnvDefaultFunc("CONSUL_CLIENT_KEY", ""), + }, + }, + } + + result := &Backend{Backend: s} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + + // The fields below are set from configure + client *consulapi.Client + configData *schema.ResourceData + lock bool +} + +func (b *Backend) configure(ctx context.Context) error { + // Grab the resource data + b.configData = schema.FromContextBackendConfig(ctx) + + // Store the lock information + b.lock = b.configData.Get("lock").(bool) + + data := b.configData + + // Configure the client + config := consulapi.DefaultConfig() + + // replace the default Transport Dialer to reduce the KeepAlive + config.Transport.DialContext = dialContext + + if v, ok := data.GetOk("access_token"); ok && v.(string) != "" { + config.Token = v.(string) + } + if v, ok := data.GetOk("address"); ok && v.(string) != "" { + config.Address = v.(string) + } + if v, ok := data.GetOk("scheme"); ok && v.(string) != "" { + config.Scheme = v.(string) + } + if v, ok := data.GetOk("datacenter"); ok && v.(string) != "" { + config.Datacenter = v.(string) + } + + if v, ok := data.GetOk("ca_file"); ok && v.(string) != "" { + config.TLSConfig.CAFile = v.(string) + } + if v, ok := data.GetOk("cert_file"); ok && v.(string) != "" { + config.TLSConfig.CertFile = v.(string) + } + if v, ok := data.GetOk("key_file"); ok && v.(string) != "" { + config.TLSConfig.KeyFile = v.(string) + } + + if v, ok := data.GetOk("http_auth"); ok && v.(string) != "" { + auth := v.(string) + + var username, password string + if strings.Contains(auth, ":") { + split := strings.SplitN(auth, ":", 2) + username = split[0] + password = split[1] + } else { + username = auth + } + + config.HttpAuth = &consulapi.HttpBasicAuth{ + Username: username, + Password: password, + } + } + + client, err := consulapi.NewClient(config) + if err != nil { + return err + } + + b.client = client + return nil +} + +// dialContext is the DialContext function for the consul client transport. +// This is stored in a package var to inject a different dialer for tests. +var dialContext = (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 17 * time.Second, +}).DialContext diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/backend_state.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/backend_state.go new file mode 100644 index 00000000000..95010aa0e6a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/backend_state.go @@ -0,0 +1,155 @@ +package consul + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/terraform" +) + +const ( + keyEnvPrefix = "-env:" +) + +func (b *Backend) States() ([]string, error) { + // List our raw path + prefix := b.configData.Get("path").(string) + keyEnvPrefix + keys, _, err := b.client.KV().Keys(prefix, "/", nil) + if err != nil { + return nil, err + } + + // Find the envs, we use a map since we can get duplicates with + // path suffixes. + envs := map[string]struct{}{} + for _, key := range keys { + // Consul should ensure this but it doesn't hurt to check again + if strings.HasPrefix(key, prefix) { + key = strings.TrimPrefix(key, prefix) + + // Ignore anything with a "/" in it since we store the state + // directly in a key not a directory. + if idx := strings.IndexRune(key, '/'); idx >= 0 { + continue + } + + envs[key] = struct{}{} + } + } + + result := make([]string, 1, len(envs)+1) + result[0] = backend.DefaultStateName + for k, _ := range envs { + result = append(result, k) + } + + return result, nil +} + +func (b *Backend) DeleteState(name string) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + // Determine the path of the data + path := b.path(name) + + // Delete it. We just delete it without any locking since + // the DeleteState API is documented as such. + _, err := b.client.KV().Delete(path, nil) + return err +} + +func (b *Backend) State(name string) (state.State, error) { + // Determine the path of the data + path := b.path(name) + + // Determine whether to gzip or not + gzip := b.configData.Get("gzip").(bool) + + // Build the state client + var stateMgr state.State = &remote.State{ + Client: &RemoteClient{ + Client: b.client, + Path: path, + GZip: gzip, + lockState: b.lock, + }, + } + + // If we're not locking, disable it + if !b.lock { + stateMgr = &state.LockDisabled{Inner: stateMgr} + } + + // the default state always exists + if name == backend.DefaultStateName { + return stateMgr, nil + } + + // Grab a lock, we use this to write an empty state if one doesn't + // exist already. We have to write an empty state as a sentinel value + // so States() knows it exists. + lockInfo := state.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := stateMgr.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to lock state in Consul: %s", err) + } + + // Local helper function so we can call it multiple places + lockUnlock := func(parent error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) + } + + return parent + } + + // Grab the value + if err := stateMgr.RefreshState(); err != nil { + err = lockUnlock(err) + return nil, err + } + + // If we have no state, we have to create an empty state + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(terraform.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(); err != nil { + err = lockUnlock(err) + return nil, err + } + } + + // Unlock, the state should now be initialized + if err := lockUnlock(nil); err != nil { + return nil, err + } + + return stateMgr, nil +} + +func (b *Backend) path(name string) string { + path := b.configData.Get("path").(string) + if name != backend.DefaultStateName { + path += fmt.Sprintf("%s%s", keyEnvPrefix, name) + } + + return path +} + +const errStateUnlock = ` +Error unlocking Consul state. Lock ID: %s + +Error: %s + +You may have to force-unlock this state in order to use it again. +The Consul backend acquires a lock during initialization to ensure +the minimum required key/values are prepared. +` diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/client.go new file mode 100644 index 00000000000..bd37712f3fe --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/client.go @@ -0,0 +1,468 @@ +package consul + +import ( + "bytes" + "compress/gzip" + "context" + "crypto/md5" + "encoding/json" + "errors" + "fmt" + "log" + "sync" + "time" + + consulapi "github.com/hashicorp/consul/api" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" +) + +const ( + lockSuffix = "/.lock" + lockInfoSuffix = "/.lockinfo" + + // The Session TTL associated with this lock. + lockSessionTTL = "15s" + + // the delay time from when a session is lost to when the + // lock is released by the server + lockDelay = 5 * time.Second + // interval between attempts to reacquire a lost lock + lockReacquireInterval = 2 * time.Second +) + +var lostLockErr = errors.New("consul lock was lost") + +// RemoteClient is a remote client that stores data in Consul. +type RemoteClient struct { + Client *consulapi.Client + Path string + GZip bool + + mu sync.Mutex + // lockState is true if we're using locks + lockState bool + + // The index of the last state we wrote. + // If this is > 0, Put will perform a CAS to ensure that the state wasn't + // changed during the operation. This is important even with locks, because + // if the client loses the lock for some reason, then reacquires it, we + // need to make sure that the state was not modified. + modifyIndex uint64 + + consulLock *consulapi.Lock + lockCh <-chan struct{} + + info *state.LockInfo + + // cancel our goroutine which is monitoring the lock to automatically + // reacquire it when possible. + monitorCancel context.CancelFunc + monitorWG sync.WaitGroup + + // sessionCancel cancels the Context use for session.RenewPeriodic, and is + // called when unlocking, or before creating a new lock if the lock is + // lost. + sessionCancel context.CancelFunc +} + +func (c *RemoteClient) Get() (*remote.Payload, error) { + c.mu.Lock() + defer c.mu.Unlock() + + pair, _, err := c.Client.KV().Get(c.Path, nil) + if err != nil { + return nil, err + } + if pair == nil { + return nil, nil + } + + c.modifyIndex = pair.ModifyIndex + + payload := pair.Value + // If the payload starts with 0x1f, it's gzip, not json + if len(pair.Value) >= 1 && pair.Value[0] == '\x1f' { + if data, err := uncompressState(pair.Value); err == nil { + payload = data + } else { + return nil, err + } + } + + md5 := md5.Sum(pair.Value) + return &remote.Payload{ + Data: payload, + MD5: md5[:], + }, nil +} + +func (c *RemoteClient) Put(data []byte) error { + c.mu.Lock() + defer c.mu.Unlock() + + payload := data + if c.GZip { + if compressedState, err := compressState(data); err == nil { + payload = compressedState + } else { + return err + } + } + + kv := c.Client.KV() + + // default to doing a CAS + verb := consulapi.KVCAS + + // Assume a 0 index doesn't need a CAS for now, since we are either + // creating a new state or purposely overwriting one. + if c.modifyIndex == 0 { + verb = consulapi.KVSet + } + + // KV.Put doesn't return the new index, so we use a single operation + // transaction to get the new index with a single request. + txOps := consulapi.KVTxnOps{ + &consulapi.KVTxnOp{ + Verb: verb, + Key: c.Path, + Value: payload, + Index: c.modifyIndex, + }, + } + + ok, resp, _, err := kv.Txn(txOps, nil) + if err != nil { + return err + } + + // transaction was rolled back + if !ok { + return fmt.Errorf("consul CAS failed with transaction errors: %v", resp.Errors) + } + + if len(resp.Results) != 1 { + // this probably shouldn't happen + return fmt.Errorf("expected on 1 response value, got: %d", len(resp.Results)) + } + + c.modifyIndex = resp.Results[0].ModifyIndex + return nil +} + +func (c *RemoteClient) Delete() error { + c.mu.Lock() + defer c.mu.Unlock() + + kv := c.Client.KV() + _, err := kv.Delete(c.Path, nil) + return err +} + +func (c *RemoteClient) putLockInfo(info *state.LockInfo) error { + info.Path = c.Path + info.Created = time.Now().UTC() + + kv := c.Client.KV() + _, err := kv.Put(&consulapi.KVPair{ + Key: c.Path + lockInfoSuffix, + Value: info.Marshal(), + }, nil) + + return err +} + +func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) { + path := c.Path + lockInfoSuffix + pair, _, err := c.Client.KV().Get(path, nil) + if err != nil { + return nil, err + } + if pair == nil { + return nil, nil + } + + li := &state.LockInfo{} + err = json.Unmarshal(pair.Value, li) + if err != nil { + return nil, fmt.Errorf("error unmarshaling lock info: %s", err) + } + + return li, nil +} + +func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) { + c.mu.Lock() + defer c.mu.Unlock() + + if !c.lockState { + return "", nil + } + + c.info = info + + // These checks only are to ensure we strictly follow the specification. + // Terraform shouldn't ever re-lock, so provide errors for the 2 possible + // states if this is called. + select { + case <-c.lockCh: + // We had a lock, but lost it. + return "", errors.New("lost consul lock, cannot re-lock") + default: + if c.lockCh != nil { + // we have an active lock already + return "", fmt.Errorf("state %q already locked", c.Path) + } + } + + return c.lock() +} + +// the lock implementation. +// Only to be called while holding Client.mu +func (c *RemoteClient) lock() (string, error) { + // We create a new session here, so it can be canceled when the lock is + // lost or unlocked. + lockSession, err := c.createSession() + if err != nil { + return "", err + } + + // store the session ID for correlation with consul logs + c.info.Info = "consul session: " + lockSession + + opts := &consulapi.LockOptions{ + Key: c.Path + lockSuffix, + Session: lockSession, + + // only wait briefly, so terraform has the choice to fail fast or + // retry as needed. + LockWaitTime: time.Second, + LockTryOnce: true, + + // Don't let the lock monitor give up right away, as it's possible the + // session is still OK. While the session is refreshed at a rate of + // TTL/2, the lock monitor is an idle blocking request and is more + // susceptible to being closed by a lower network layer. + MonitorRetries: 5, + // + // The delay between lock monitor retries. + // While the session has a 15s TTL plus a 5s wait period on a lost + // lock, if we can't get our lock back in 10+ seconds something is + // wrong so we're going to drop the session and start over. + MonitorRetryTime: 2 * time.Second, + } + + c.consulLock, err = c.Client.LockOpts(opts) + if err != nil { + return "", err + } + + lockErr := &state.LockError{} + + lockCh, err := c.consulLock.Lock(make(chan struct{})) + if err != nil { + lockErr.Err = err + return "", lockErr + } + + if lockCh == nil { + lockInfo, e := c.getLockInfo() + if e != nil { + lockErr.Err = e + return "", lockErr + } + + lockErr.Info = lockInfo + + return "", lockErr + } + + c.lockCh = lockCh + + err = c.putLockInfo(c.info) + if err != nil { + if unlockErr := c.unlock(c.info.ID); unlockErr != nil { + err = multierror.Append(err, unlockErr) + } + + return "", err + } + + // Start a goroutine to monitor the lock state. + // If we lose the lock to due communication issues with the consul agent, + // attempt to immediately reacquire the lock. Put will verify the integrity + // of the state by using a CAS operation. + ctx, cancel := context.WithCancel(context.Background()) + c.monitorCancel = cancel + c.monitorWG.Add(1) + go func() { + defer c.monitorWG.Done() + select { + case <-c.lockCh: + log.Println("[ERROR] lost consul lock") + for { + c.mu.Lock() + // We lost our lock, so we need to cancel the session too. + // The CancelFunc is only replaced while holding Client.mu, so + // this is safe to call here. This will be replaced by the + // lock() call below. + c.sessionCancel() + + c.consulLock = nil + _, err := c.lock() + c.mu.Unlock() + + if err != nil { + // We failed to get the lock, keep trying as long as + // terraform is running. There may be changes in progress, + // so there's no use in aborting. Either we eventually + // reacquire the lock, or a Put will fail on a CAS. + log.Printf("[ERROR] could not reacquire lock: %s", err) + time.Sleep(lockReacquireInterval) + + select { + case <-ctx.Done(): + return + default: + } + continue + } + + // if the error was nil, the new lock started a new copy of + // this goroutine. + return + } + + case <-ctx.Done(): + return + } + }() + + if testLockHook != nil { + testLockHook() + } + + return c.info.ID, nil +} + +// called after a lock is acquired +var testLockHook func() + +func (c *RemoteClient) createSession() (string, error) { + // create the context first. Even if the session creation fails, we assume + // that the CancelFunc is always callable. + ctx, cancel := context.WithCancel(context.Background()) + c.sessionCancel = cancel + + session := c.Client.Session() + se := &consulapi.SessionEntry{ + Name: consulapi.DefaultLockSessionName, + TTL: lockSessionTTL, + LockDelay: lockDelay, + } + + id, _, err := session.Create(se, nil) + if err != nil { + return "", err + } + + log.Println("[INFO] created consul lock session", id) + + // keep the session renewed + go session.RenewPeriodic(lockSessionTTL, id, nil, ctx.Done()) + + return id, nil +} + +func (c *RemoteClient) Unlock(id string) error { + c.mu.Lock() + defer c.mu.Unlock() + + if !c.lockState { + return nil + } + + return c.unlock(id) +} + +// the unlock implementation. +// Only to be called while holding Client.mu +func (c *RemoteClient) unlock(id string) error { + // this doesn't use the lock id, because the lock is tied to the consul client. + if c.consulLock == nil || c.lockCh == nil { + return nil + } + + // cancel our monitoring goroutine + c.monitorCancel() + + defer func() { + c.consulLock = nil + + // The consul session is only used for this single lock, so cancel it + // after we unlock. + // The session is only created and replaced holding Client.mu, so the + // CancelFunc must be non-nil. + c.sessionCancel() + }() + + select { + case <-c.lockCh: + return lostLockErr + default: + } + + kv := c.Client.KV() + + var errs error + + if _, err := kv.Delete(c.Path+lockInfoSuffix, nil); err != nil { + errs = multierror.Append(errs, err) + } + + if err := c.consulLock.Unlock(); err != nil { + errs = multierror.Append(errs, err) + } + + // the monitoring goroutine may be in a select on the lockCh, so we need to + // wait for it to return before changing the value. + c.monitorWG.Wait() + c.lockCh = nil + + // This is only cleanup, and will fail if the lock was immediately taken by + // another client, so we don't report an error to the user here. + c.consulLock.Destroy() + + return errs +} + +func compressState(data []byte) ([]byte, error) { + b := new(bytes.Buffer) + gz := gzip.NewWriter(b) + if _, err := gz.Write(data); err != nil { + return nil, err + } + if err := gz.Flush(); err != nil { + return nil, err + } + if err := gz.Close(); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func uncompressState(data []byte) ([]byte, error) { + b := new(bytes.Buffer) + gz, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + return nil, err + } + b.ReadFrom(gz) + if err := gz.Close(); err != nil { + return nil, err + } + return b.Bytes(), nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/backend.go new file mode 100644 index 00000000000..fb3f5e20295 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/backend.go @@ -0,0 +1,157 @@ +package etcd + +import ( + "context" + + etcdv3 "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/pkg/transport" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/schema" +) + +const ( + endpointsKey = "endpoints" + usernameKey = "username" + usernameEnvVarName = "ETCDV3_USERNAME" + passwordKey = "password" + passwordEnvVarName = "ETCDV3_PASSWORD" + prefixKey = "prefix" + lockKey = "lock" + cacertPathKey = "cacert_path" + certPathKey = "cert_path" + keyPathKey = "key_path" +) + +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + endpointsKey: &schema.Schema{ + Type: schema.TypeList, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + MinItems: 1, + Required: true, + Description: "Endpoints for the etcd cluster.", + }, + + usernameKey: &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Username used to connect to the etcd cluster.", + DefaultFunc: schema.EnvDefaultFunc(usernameEnvVarName, ""), + }, + + passwordKey: &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Password used to connect to the etcd cluster.", + DefaultFunc: schema.EnvDefaultFunc(passwordEnvVarName, ""), + }, + + prefixKey: &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "An optional prefix to be added to keys when to storing state in etcd.", + Default: "", + }, + + lockKey: &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Whether to lock state access.", + Default: true, + }, + + cacertPathKey: &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The path to a PEM-encoded CA bundle with which to verify certificates of TLS-enabled etcd servers.", + Default: "", + }, + + certPathKey: &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The path to a PEM-encoded certificate to provide to etcd for secure client identification.", + Default: "", + }, + + keyPathKey: &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The path to a PEM-encoded key to provide to etcd for secure client identification.", + Default: "", + }, + }, + } + + result := &Backend{Backend: s} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + + // The fields below are set from configure. + client *etcdv3.Client + data *schema.ResourceData + lock bool + prefix string +} + +func (b *Backend) configure(ctx context.Context) error { + var err error + // Grab the resource data. + b.data = schema.FromContextBackendConfig(ctx) + // Store the lock information. + b.lock = b.data.Get(lockKey).(bool) + // Store the prefix information. + b.prefix = b.data.Get(prefixKey).(string) + // Initialize a client to test config. + b.client, err = b.rawClient() + // Return err, if any. + return err +} + +func (b *Backend) rawClient() (*etcdv3.Client, error) { + config := etcdv3.Config{} + tlsInfo := transport.TLSInfo{} + + if v, ok := b.data.GetOk(endpointsKey); ok { + config.Endpoints = retrieveEndpoints(v) + } + if v, ok := b.data.GetOk(usernameKey); ok && v.(string) != "" { + config.Username = v.(string) + } + if v, ok := b.data.GetOk(passwordKey); ok && v.(string) != "" { + config.Password = v.(string) + } + if v, ok := b.data.GetOk(cacertPathKey); ok && v.(string) != "" { + tlsInfo.TrustedCAFile = v.(string) + } + if v, ok := b.data.GetOk(certPathKey); ok && v.(string) != "" { + tlsInfo.CertFile = v.(string) + } + if v, ok := b.data.GetOk(keyPathKey); ok && v.(string) != "" { + tlsInfo.KeyFile = v.(string) + } + + if tlsCfg, err := tlsInfo.ClientConfig(); err != nil { + return nil, err + } else if !tlsInfo.Empty() { + config.TLS = tlsCfg // Assign TLS configuration only if it valid and non-empty. + } + + return etcdv3.New(config) +} + +func retrieveEndpoints(v interface{}) []string { + var endpoints []string + list := v.([]interface{}) + for _, ep := range list { + endpoints = append(endpoints, ep.(string)) + } + return endpoints +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/backend_state.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/backend_state.go new file mode 100644 index 00000000000..4c9f78a9b51 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/backend_state.go @@ -0,0 +1,103 @@ +package etcd + +import ( + "context" + "fmt" + "sort" + "strings" + + etcdv3 "github.com/coreos/etcd/clientv3" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/terraform" +) + +func (b *Backend) States() ([]string, error) { + res, err := b.client.Get(context.TODO(), b.prefix, etcdv3.WithPrefix(), etcdv3.WithKeysOnly()) + if err != nil { + return nil, err + } + + result := make([]string, 1, len(res.Kvs)+1) + result[0] = backend.DefaultStateName + for _, kv := range res.Kvs { + result = append(result, strings.TrimPrefix(string(kv.Key), b.prefix)) + } + sort.Strings(result[1:]) + + return result, nil +} + +func (b *Backend) DeleteState(name string) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("Can't delete default state.") + } + + key := b.determineKey(name) + + _, err := b.client.Delete(context.TODO(), key) + return err +} + +func (b *Backend) State(name string) (state.State, error) { + var stateMgr state.State = &remote.State{ + Client: &RemoteClient{ + Client: b.client, + DoLock: b.lock, + Key: b.determineKey(name), + }, + } + + if !b.lock { + stateMgr = &state.LockDisabled{Inner: stateMgr} + } + + lockInfo := state.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := stateMgr.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("Failed to lock state in etcd: %s.", err) + } + + lockUnlock := func(parent error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) + } + return parent + } + + if err := stateMgr.RefreshState(); err != nil { + err = lockUnlock(err) + return nil, err + } + + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(terraform.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(); err != nil { + err = lockUnlock(err) + return nil, err + } + } + + if err := lockUnlock(nil); err != nil { + return nil, err + } + + return stateMgr, nil +} + +func (b *Backend) determineKey(name string) string { + return b.prefix + name +} + +const errStateUnlock = ` +Error unlocking etcd state. Lock ID: %s + +Error: %s + +You may have to force-unlock this state in order to use it again. +` diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/client.go new file mode 100644 index 00000000000..155e8d8c16c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/client.go @@ -0,0 +1,211 @@ +package etcd + +import ( + "context" + "crypto/md5" + "encoding/json" + "fmt" + "sync" + "time" + + etcdv3 "github.com/coreos/etcd/clientv3" + etcdv3sync "github.com/coreos/etcd/clientv3/concurrency" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" +) + +const ( + lockAcquireTimeout = 2 * time.Second + lockInfoSuffix = ".lockinfo" +) + +// RemoteClient is a remote client that will store data in etcd. +type RemoteClient struct { + Client *etcdv3.Client + DoLock bool + Key string + + etcdMutex *etcdv3sync.Mutex + etcdSession *etcdv3sync.Session + info *state.LockInfo + mu sync.Mutex + modRevision int64 +} + +func (c *RemoteClient) Get() (*remote.Payload, error) { + c.mu.Lock() + defer c.mu.Unlock() + + res, err := c.Client.KV.Get(context.TODO(), c.Key) + if err != nil { + return nil, err + } + if res.Count == 0 { + return nil, nil + } + if res.Count >= 2 { + return nil, fmt.Errorf("Expected a single result but got %d.", res.Count) + } + + c.modRevision = res.Kvs[0].ModRevision + + payload := res.Kvs[0].Value + md5 := md5.Sum(payload) + + return &remote.Payload{ + Data: payload, + MD5: md5[:], + }, nil +} + +func (c *RemoteClient) Put(data []byte) error { + c.mu.Lock() + defer c.mu.Unlock() + + res, err := etcdv3.NewKV(c.Client).Txn(context.TODO()).If( + etcdv3.Compare(etcdv3.ModRevision(c.Key), "=", c.modRevision), + ).Then( + etcdv3.OpPut(c.Key, string(data)), + etcdv3.OpGet(c.Key), + ).Commit() + + if err != nil { + return err + } + if !res.Succeeded { + return fmt.Errorf("The transaction did not succeed.") + } + if len(res.Responses) != 2 { + return fmt.Errorf("Expected two responses but got %d.", len(res.Responses)) + } + + c.modRevision = res.Responses[1].GetResponseRange().Kvs[0].ModRevision + return nil +} + +func (c *RemoteClient) Delete() error { + c.mu.Lock() + defer c.mu.Unlock() + + _, err := c.Client.KV.Delete(context.TODO(), c.Key) + return err +} + +func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) { + c.mu.Lock() + defer c.mu.Unlock() + + if !c.DoLock { + return "", nil + } + if c.etcdSession != nil { + return "", fmt.Errorf("state %q already locked", c.Key) + } + + c.info = info + return c.lock() +} + +func (c *RemoteClient) Unlock(id string) error { + c.mu.Lock() + defer c.mu.Unlock() + + if !c.DoLock { + return nil + } + + return c.unlock(id) +} + +func (c *RemoteClient) deleteLockInfo(info *state.LockInfo) error { + res, err := c.Client.KV.Delete(context.TODO(), c.Key+lockInfoSuffix) + if err != nil { + return err + } + if res.Deleted == 0 { + return fmt.Errorf("No keys deleted for %s when deleting lock info.", c.Key+lockInfoSuffix) + } + return nil +} + +func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) { + res, err := c.Client.KV.Get(context.TODO(), c.Key+lockInfoSuffix) + if err != nil { + return nil, err + } + if res.Count == 0 { + return nil, nil + } + + li := &state.LockInfo{} + err = json.Unmarshal(res.Kvs[0].Value, li) + if err != nil { + return nil, fmt.Errorf("Error unmarshaling lock info: %s.", err) + } + + return li, nil +} + +func (c *RemoteClient) putLockInfo(info *state.LockInfo) error { + c.info.Path = c.etcdMutex.Key() + c.info.Created = time.Now().UTC() + + _, err := c.Client.KV.Put(context.TODO(), c.Key+lockInfoSuffix, string(c.info.Marshal())) + return err +} + +func (c *RemoteClient) lock() (string, error) { + session, err := etcdv3sync.NewSession(c.Client) + if err != nil { + return "", nil + } + + ctx, cancel := context.WithTimeout(context.TODO(), lockAcquireTimeout) + defer cancel() + + mutex := etcdv3sync.NewMutex(session, c.Key) + if err1 := mutex.Lock(ctx); err1 != nil { + lockInfo, err2 := c.getLockInfo() + if err2 != nil { + return "", &state.LockError{Err: err2} + } + return "", &state.LockError{Info: lockInfo, Err: err1} + } + + c.etcdMutex = mutex + c.etcdSession = session + + err = c.putLockInfo(c.info) + if err != nil { + if unlockErr := c.unlock(c.info.ID); unlockErr != nil { + err = multierror.Append(err, unlockErr) + } + return "", err + } + + return c.info.ID, nil +} + +func (c *RemoteClient) unlock(id string) error { + if c.etcdMutex == nil { + return nil + } + + var errs error + + if err := c.deleteLockInfo(c.info); err != nil { + errs = multierror.Append(errs, err) + } + if err := c.etcdMutex.Unlock(context.TODO()); err != nil { + errs = multierror.Append(errs, err) + } + if err := c.etcdSession.Close(); err != nil { + errs = multierror.Append(errs, err) + } + + c.etcdMutex = nil + c.etcdSession = nil + + return errs +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/inmem/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/inmem/backend.go new file mode 100644 index 00000000000..5eab8d0c6be --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/inmem/backend.go @@ -0,0 +1,208 @@ +package inmem + +import ( + "context" + "errors" + "fmt" + "sort" + "sync" + "time" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/terraform" +) + +// we keep the states and locks in package-level variables, so that they can be +// accessed from multiple instances of the backend. This better emulates +// backend instances accessing a single remote data store. +var ( + states stateMap + locks lockMap +) + +func init() { + Reset() +} + +// Reset clears out all existing state and lock data. +// This is used to initialize the package during init, as well as between +// tests. +func Reset() { + states = stateMap{ + m: map[string]*remote.State{}, + } + + locks = lockMap{ + m: map[string]*state.LockInfo{}, + } +} + +// New creates a new backend for Inmem remote state. +func New() backend.Backend { + // Set the schema + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "lock_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "initializes the state in a locked configuration", + }, + }, + } + backend := &Backend{Backend: s} + backend.Backend.ConfigureFunc = backend.configure + return backend +} + +type Backend struct { + *schema.Backend +} + +func (b *Backend) configure(ctx context.Context) error { + states.Lock() + defer states.Unlock() + + defaultClient := &RemoteClient{ + Name: backend.DefaultStateName, + } + + states.m[backend.DefaultStateName] = &remote.State{ + Client: defaultClient, + } + + // set the default client lock info per the test config + data := schema.FromContextBackendConfig(ctx) + if v, ok := data.GetOk("lock_id"); ok && v.(string) != "" { + info := state.NewLockInfo() + info.ID = v.(string) + info.Operation = "test" + info.Info = "test config" + + locks.lock(backend.DefaultStateName, info) + } + + return nil +} + +func (b *Backend) States() ([]string, error) { + states.Lock() + defer states.Unlock() + + var workspaces []string + + for s := range states.m { + workspaces = append(workspaces, s) + } + + sort.Strings(workspaces) + return workspaces, nil +} + +func (b *Backend) DeleteState(name string) error { + states.Lock() + defer states.Unlock() + + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + delete(states.m, name) + return nil +} + +func (b *Backend) State(name string) (state.State, error) { + states.Lock() + defer states.Unlock() + + s := states.m[name] + if s == nil { + s = &remote.State{ + Client: &RemoteClient{ + Name: name, + }, + } + states.m[name] = s + + // to most closely replicate other implementations, we are going to + // take a lock and create a new state if it doesn't exist. + lockInfo := state.NewLockInfo() + lockInfo.Operation = "init" + lockID, err := s.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to lock inmem state: %s", err) + } + defer s.Unlock(lockID) + + // If we have no state, we have to create an empty state + if v := s.State(); v == nil { + if err := s.WriteState(terraform.NewState()); err != nil { + return nil, err + } + if err := s.PersistState(); err != nil { + return nil, err + } + } + } + + return s, nil +} + +type stateMap struct { + sync.Mutex + m map[string]*remote.State +} + +// Global level locks for inmem backends. +type lockMap struct { + sync.Mutex + m map[string]*state.LockInfo +} + +func (l *lockMap) lock(name string, info *state.LockInfo) (string, error) { + l.Lock() + defer l.Unlock() + + lockInfo := l.m[name] + if lockInfo != nil { + lockErr := &state.LockError{ + Info: lockInfo, + } + + lockErr.Err = errors.New("state locked") + // make a copy of the lock info to avoid any testing shenanigans + *lockErr.Info = *lockInfo + return "", lockErr + } + + info.Created = time.Now().UTC() + l.m[name] = info + + return info.ID, nil +} + +func (l *lockMap) unlock(name, id string) error { + l.Lock() + defer l.Unlock() + + lockInfo := l.m[name] + + if lockInfo == nil { + return errors.New("state not locked") + } + + lockErr := &state.LockError{ + Info: &state.LockInfo{}, + } + + if id != lockInfo.ID { + lockErr.Err = errors.New("invalid lock id") + *lockErr.Info = *lockInfo + return lockErr + } + + delete(l.m, name) + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/inmem/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/inmem/client.go new file mode 100644 index 00000000000..51c8d7251b9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/inmem/client.go @@ -0,0 +1,47 @@ +package inmem + +import ( + "crypto/md5" + + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" +) + +// RemoteClient is a remote client that stores data in memory for testing. +type RemoteClient struct { + Data []byte + MD5 []byte + Name string +} + +func (c *RemoteClient) Get() (*remote.Payload, error) { + if c.Data == nil { + return nil, nil + } + + return &remote.Payload{ + Data: c.Data, + MD5: c.MD5, + }, nil +} + +func (c *RemoteClient) Put(data []byte) error { + md5 := md5.Sum(data) + + c.Data = data + c.MD5 = md5[:] + return nil +} + +func (c *RemoteClient) Delete() error { + c.Data = nil + c.MD5 = nil + return nil +} + +func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) { + return locks.lock(c.Name, info) +} +func (c *RemoteClient) Unlock(id string) error { + return locks.unlock(c.Name, id) +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend.go new file mode 100644 index 00000000000..a9018fd278b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend.go @@ -0,0 +1,259 @@ +package s3 + +import ( + "context" + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/schema" + + terraformAWS "github.com/terraform-providers/terraform-provider-aws/aws" +) + +// New creates a new backend for S3 remote state. +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: "The name of the S3 bucket", + }, + + "key": { + Type: schema.TypeString, + Required: true, + Description: "The path to the state file inside the bucket", + ValidateFunc: func(v interface{}, s string) ([]string, []error) { + // s3 will strip leading slashes from an object, so while this will + // technically be accepted by s3, it will break our workspace hierarchy. + if strings.HasPrefix(v.(string), "/") { + return nil, []error{fmt.Errorf("key must not start with '/'")} + } + return nil, nil + }, + }, + + "region": { + Type: schema.TypeString, + Required: true, + Description: "The region of the S3 bucket.", + DefaultFunc: schema.EnvDefaultFunc("AWS_DEFAULT_REGION", nil), + }, + + "endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "A custom endpoint for the S3 API", + DefaultFunc: schema.EnvDefaultFunc("AWS_S3_ENDPOINT", ""), + }, + + "encrypt": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to enable server side encryption of the state file", + Default: false, + }, + + "acl": { + Type: schema.TypeString, + Optional: true, + Description: "Canned ACL to be applied to the state file", + Default: "", + }, + + "access_key": { + Type: schema.TypeString, + Optional: true, + Description: "AWS access key", + Default: "", + }, + + "secret_key": { + Type: schema.TypeString, + Optional: true, + Description: "AWS secret key", + Default: "", + }, + + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Description: "The ARN of a KMS Key to use for encrypting the state", + Default: "", + }, + + "lock_table": { + Type: schema.TypeString, + Optional: true, + Description: "DynamoDB table for state locking", + Default: "", + Deprecated: "please use the dynamodb_table attribute", + }, + + "dynamodb_table": { + Type: schema.TypeString, + Optional: true, + Description: "DynamoDB table for state locking and consistency", + Default: "", + }, + + "profile": { + Type: schema.TypeString, + Optional: true, + Description: "AWS profile name", + Default: "", + }, + + "shared_credentials_file": { + Type: schema.TypeString, + Optional: true, + Description: "Path to a shared credentials file", + Default: "", + }, + + "token": { + Type: schema.TypeString, + Optional: true, + Description: "MFA token", + Default: "", + }, + + "skip_credentials_validation": { + Type: schema.TypeBool, + Optional: true, + Description: "Skip the credentials validation via STS API.", + Default: false, + }, + + "skip_get_ec2_platforms": { + Type: schema.TypeBool, + Optional: true, + Description: "Skip getting the supported EC2 platforms.", + Default: false, + }, + + "skip_requesting_account_id": { + Type: schema.TypeBool, + Optional: true, + Description: "Skip requesting the account ID.", + Default: false, + }, + + "skip_metadata_api_check": { + Type: schema.TypeBool, + Optional: true, + Description: "Skip the AWS Metadata API check.", + Default: false, + }, + + "role_arn": { + Type: schema.TypeString, + Optional: true, + Description: "The role to be assumed", + Default: "", + }, + + "session_name": { + Type: schema.TypeString, + Optional: true, + Description: "The session name to use when assuming the role.", + Default: "", + }, + + "external_id": { + Type: schema.TypeString, + Optional: true, + Description: "The external ID to use when assuming the role", + Default: "", + }, + + "assume_role_policy": { + Type: schema.TypeString, + Optional: true, + Description: "The permissions applied when assuming a role.", + Default: "", + }, + + "workspace_key_prefix": { + Type: schema.TypeString, + Optional: true, + Description: "The prefix applied to the non-default state path inside the bucket", + Default: "env:", + }, + }, + } + + result := &Backend{Backend: s} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + + // The fields below are set from configure + s3Client *s3.S3 + dynClient *dynamodb.DynamoDB + + bucketName string + keyName string + serverSideEncryption bool + acl string + kmsKeyID string + ddbTable string + workspaceKeyPrefix string +} + +func (b *Backend) configure(ctx context.Context) error { + if b.s3Client != nil { + return nil + } + + // Grab the resource data + data := schema.FromContextBackendConfig(ctx) + + b.bucketName = data.Get("bucket").(string) + b.keyName = data.Get("key").(string) + b.serverSideEncryption = data.Get("encrypt").(bool) + b.acl = data.Get("acl").(string) + b.kmsKeyID = data.Get("kms_key_id").(string) + b.workspaceKeyPrefix = data.Get("workspace_key_prefix").(string) + + b.ddbTable = data.Get("dynamodb_table").(string) + if b.ddbTable == "" { + // try the depracted field + b.ddbTable = data.Get("lock_table").(string) + } + + cfg := &terraformAWS.Config{ + AccessKey: data.Get("access_key").(string), + AssumeRoleARN: data.Get("role_arn").(string), + AssumeRoleExternalID: data.Get("external_id").(string), + AssumeRolePolicy: data.Get("assume_role_policy").(string), + AssumeRoleSessionName: data.Get("session_name").(string), + CredsFilename: data.Get("shared_credentials_file").(string), + Profile: data.Get("profile").(string), + Region: data.Get("region").(string), + S3Endpoint: data.Get("endpoint").(string), + SecretKey: data.Get("secret_key").(string), + Token: data.Get("token").(string), + SkipCredsValidation: data.Get("skip_credentials_validation").(bool), + SkipGetEC2Platforms: data.Get("skip_get_ec2_platforms").(bool), + SkipRequestingAccountId: data.Get("skip_requesting_account_id").(bool), + SkipMetadataApiCheck: data.Get("skip_metadata_api_check").(bool), + } + + client, err := cfg.Client() + if err != nil { + return err + } + + b.s3Client = client.(*terraformAWS.AWSClient).S3() + b.dynClient = client.(*terraformAWS.AWSClient).DynamoDB() + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend_state.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend_state.go new file mode 100644 index 00000000000..f38b199b043 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend_state.go @@ -0,0 +1,189 @@ +package s3 + +import ( + "errors" + "fmt" + "sort" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/terraform" +) + +func (b *Backend) States() ([]string, error) { + params := &s3.ListObjectsInput{ + Bucket: &b.bucketName, + Prefix: aws.String(b.workspaceKeyPrefix + "/"), + } + + resp, err := b.s3Client.ListObjects(params) + if err != nil { + return nil, err + } + + envs := []string{backend.DefaultStateName} + for _, obj := range resp.Contents { + env := b.keyEnv(*obj.Key) + if env != "" { + envs = append(envs, env) + } + } + + sort.Strings(envs[1:]) + return envs, nil +} + +// extract the env name from the S3 key +func (b *Backend) keyEnv(key string) string { + // we have 3 parts, the prefix, the env name, and the key name + parts := strings.SplitN(key, "/", 3) + if len(parts) < 3 { + // no env here + return "" + } + + // shouldn't happen since we listed by prefix + if parts[0] != b.workspaceKeyPrefix { + return "" + } + + // not our key, so don't include it in our listing + if parts[2] != b.keyName { + return "" + } + + return parts[1] +} + +func (b *Backend) DeleteState(name string) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + client, err := b.remoteClient(name) + if err != nil { + return err + } + + return client.Delete() +} + +// get a remote client configured for this state +func (b *Backend) remoteClient(name string) (*RemoteClient, error) { + if name == "" { + return nil, errors.New("missing state name") + } + + client := &RemoteClient{ + s3Client: b.s3Client, + dynClient: b.dynClient, + bucketName: b.bucketName, + path: b.path(name), + serverSideEncryption: b.serverSideEncryption, + acl: b.acl, + kmsKeyID: b.kmsKeyID, + ddbTable: b.ddbTable, + } + + return client, nil +} + +func (b *Backend) State(name string) (state.State, error) { + client, err := b.remoteClient(name) + if err != nil { + return nil, err + } + + stateMgr := &remote.State{Client: client} + // Check to see if this state already exists. + // If we're trying to force-unlock a state, we can't take the lock before + // fetching the state. If the state doesn't exist, we have to assume this + // is a normal create operation, and take the lock at that point. + // + // If we need to force-unlock, but for some reason the state no longer + // exists, the user will have to use aws tools to manually fix the + // situation. + existing, err := b.States() + if err != nil { + return nil, err + } + + exists := false + for _, s := range existing { + if s == name { + exists = true + break + } + } + + // We need to create the object so it's listed by States. + if !exists { + // take a lock on this state while we write it + lockInfo := state.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := client.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to lock s3 state: %s", err) + } + + // Local helper function so we can call it multiple places + lockUnlock := func(parent error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) + } + return parent + } + + // Grab the value + // This is to ensure that no one beat us to writing a state between + // the `exists` check and taking the lock. + if err := stateMgr.RefreshState(); err != nil { + err = lockUnlock(err) + return nil, err + } + + // If we have no state, we have to create an empty state + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(terraform.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(); err != nil { + err = lockUnlock(err) + return nil, err + } + } + + // Unlock, the state should now be initialized + if err := lockUnlock(nil); err != nil { + return nil, err + } + + } + + return stateMgr, nil +} + +func (b *Backend) client() *RemoteClient { + return &RemoteClient{} +} + +func (b *Backend) path(name string) string { + if name == backend.DefaultStateName { + return b.keyName + } + + return strings.Join([]string{b.workspaceKeyPrefix, name, b.keyName}, "/") +} + +const errStateUnlock = ` +Error unlocking S3 state. Lock ID: %s + +Error: %s + +You may have to force-unlock this state in order to use it again. +` diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/client.go new file mode 100644 index 00000000000..675544fdb02 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/client.go @@ -0,0 +1,414 @@ +package s3 + +import ( + "bytes" + "crypto/md5" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/s3" + multierror "github.com/hashicorp/go-multierror" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" +) + +// Store the last saved serial in dynamo with this suffix for consistency checks. +const ( + stateIDSuffix = "-md5" + s3ErrCodeInternalError = "InternalError" +) + +type RemoteClient struct { + s3Client *s3.S3 + dynClient *dynamodb.DynamoDB + bucketName string + path string + serverSideEncryption bool + acl string + kmsKeyID string + ddbTable string +} + +var ( + // The amount of time we will retry a state waiting for it to match the + // expected checksum. + consistencyRetryTimeout = 10 * time.Second + + // delay when polling the state + consistencyRetryPollInterval = 2 * time.Second +) + +// test hook called when checksums don't match +var testChecksumHook func() + +func (c *RemoteClient) Get() (payload *remote.Payload, err error) { + deadline := time.Now().Add(consistencyRetryTimeout) + + // If we have a checksum, and the returned payload doesn't match, we retry + // up until deadline. + for { + payload, err = c.get() + if err != nil { + return nil, err + } + + // If the remote state was manually removed the payload will be nil, + // but if there's still a digest entry for that state we will still try + // to compare the MD5 below. + var digest []byte + if payload != nil { + digest = payload.MD5 + } + + // verify that this state is what we expect + if expected, err := c.getMD5(); err != nil { + log.Printf("[WARNING] failed to fetch state md5: %s", err) + } else if len(expected) > 0 && !bytes.Equal(expected, digest) { + log.Printf("[WARNING] state md5 mismatch: expected '%x', got '%x'", expected, digest) + + if testChecksumHook != nil { + testChecksumHook() + } + + if time.Now().Before(deadline) { + time.Sleep(consistencyRetryPollInterval) + log.Println("[INFO] retrying S3 RemoteClient.Get...") + continue + } + + return nil, fmt.Errorf(errBadChecksumFmt, digest) + } + + break + } + + return payload, err +} + +func (c *RemoteClient) get() (*remote.Payload, error) { + var output *s3.GetObjectOutput + var err error + + // we immediately retry on an internal error, as those are usually transient + maxRetries := 2 + for retryCount := 0; ; retryCount++ { + output, err = c.s3Client.GetObject(&s3.GetObjectInput{ + Bucket: &c.bucketName, + Key: &c.path, + }) + + if err != nil { + if awserr, ok := err.(awserr.Error); ok { + switch awserr.Code() { + case s3.ErrCodeNoSuchKey: + return nil, nil + case s3ErrCodeInternalError: + if retryCount > maxRetries { + return nil, err + } + log.Println("[WARN] s3 internal error, retrying...") + continue + } + } + return nil, err + } + break + } + + defer output.Body.Close() + + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, output.Body); err != nil { + return nil, fmt.Errorf("Failed to read remote state: %s", err) + } + + sum := md5.Sum(buf.Bytes()) + payload := &remote.Payload{ + Data: buf.Bytes(), + MD5: sum[:], + } + + // If there was no data, then return nil + if len(payload.Data) == 0 { + return nil, nil + } + + return payload, nil +} + +func (c *RemoteClient) Put(data []byte) error { + contentType := "application/json" + contentLength := int64(len(data)) + + // we immediately retry on an internal error, as those are usually transient + maxRetries := 2 + for retryCount := 0; ; retryCount++ { + i := &s3.PutObjectInput{ + ContentType: &contentType, + ContentLength: &contentLength, + Body: bytes.NewReader(data), + Bucket: &c.bucketName, + Key: &c.path, + } + + if c.serverSideEncryption { + if c.kmsKeyID != "" { + i.SSEKMSKeyId = &c.kmsKeyID + i.ServerSideEncryption = aws.String("aws:kms") + } else { + i.ServerSideEncryption = aws.String("AES256") + } + } + + if c.acl != "" { + i.ACL = aws.String(c.acl) + } + + log.Printf("[DEBUG] Uploading remote state to S3: %#v", i) + + _, err := c.s3Client.PutObject(i) + if err != nil { + if awserr, ok := err.(awserr.Error); ok { + if awserr.Code() == s3ErrCodeInternalError { + if retryCount > maxRetries { + return fmt.Errorf("failed to upload state: %s", err) + } + log.Println("[WARN] s3 internal error, retrying...") + continue + } + } + return fmt.Errorf("failed to upload state: %s", err) + } + break + } + + sum := md5.Sum(data) + if err := c.putMD5(sum[:]); err != nil { + // if this errors out, we unfortunately have to error out altogether, + // since the next Get will inevitably fail. + return fmt.Errorf("failed to store state MD5: %s", err) + + } + + return nil +} + +func (c *RemoteClient) Delete() error { + _, err := c.s3Client.DeleteObject(&s3.DeleteObjectInput{ + Bucket: &c.bucketName, + Key: &c.path, + }) + + if err != nil { + return err + } + + if err := c.deleteMD5(); err != nil { + log.Printf("error deleting state md5: %s", err) + } + + return nil +} + +func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) { + if c.ddbTable == "" { + return "", nil + } + + info.Path = c.lockPath() + + if info.ID == "" { + lockID, err := uuid.GenerateUUID() + if err != nil { + return "", err + } + + info.ID = lockID + } + + putParams := &dynamodb.PutItemInput{ + Item: map[string]*dynamodb.AttributeValue{ + "LockID": {S: aws.String(c.lockPath())}, + "Info": {S: aws.String(string(info.Marshal()))}, + }, + TableName: aws.String(c.ddbTable), + ConditionExpression: aws.String("attribute_not_exists(LockID)"), + } + _, err := c.dynClient.PutItem(putParams) + + if err != nil { + lockInfo, infoErr := c.getLockInfo() + if infoErr != nil { + err = multierror.Append(err, infoErr) + } + + lockErr := &state.LockError{ + Err: err, + Info: lockInfo, + } + return "", lockErr + } + + return info.ID, nil +} + +func (c *RemoteClient) getMD5() ([]byte, error) { + if c.ddbTable == "" { + return nil, nil + } + + getParams := &dynamodb.GetItemInput{ + Key: map[string]*dynamodb.AttributeValue{ + "LockID": {S: aws.String(c.lockPath() + stateIDSuffix)}, + }, + ProjectionExpression: aws.String("LockID, Digest"), + TableName: aws.String(c.ddbTable), + } + + resp, err := c.dynClient.GetItem(getParams) + if err != nil { + return nil, err + } + + var val string + if v, ok := resp.Item["Digest"]; ok && v.S != nil { + val = *v.S + } + + sum, err := hex.DecodeString(val) + if err != nil || len(sum) != md5.Size { + return nil, errors.New("invalid md5") + } + + return sum, nil +} + +// store the hash of the state to that clients can check for stale state files. +func (c *RemoteClient) putMD5(sum []byte) error { + if c.ddbTable == "" { + return nil + } + + if len(sum) != md5.Size { + return errors.New("invalid payload md5") + } + + putParams := &dynamodb.PutItemInput{ + Item: map[string]*dynamodb.AttributeValue{ + "LockID": {S: aws.String(c.lockPath() + stateIDSuffix)}, + "Digest": {S: aws.String(hex.EncodeToString(sum))}, + }, + TableName: aws.String(c.ddbTable), + } + _, err := c.dynClient.PutItem(putParams) + if err != nil { + log.Printf("[WARNING] failed to record state serial in dynamodb: %s", err) + } + + return nil +} + +// remove the hash value for a deleted state +func (c *RemoteClient) deleteMD5() error { + if c.ddbTable == "" { + return nil + } + + params := &dynamodb.DeleteItemInput{ + Key: map[string]*dynamodb.AttributeValue{ + "LockID": {S: aws.String(c.lockPath() + stateIDSuffix)}, + }, + TableName: aws.String(c.ddbTable), + } + if _, err := c.dynClient.DeleteItem(params); err != nil { + return err + } + return nil +} + +func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) { + getParams := &dynamodb.GetItemInput{ + Key: map[string]*dynamodb.AttributeValue{ + "LockID": {S: aws.String(c.lockPath())}, + }, + ProjectionExpression: aws.String("LockID, Info"), + TableName: aws.String(c.ddbTable), + } + + resp, err := c.dynClient.GetItem(getParams) + if err != nil { + return nil, err + } + + var infoData string + if v, ok := resp.Item["Info"]; ok && v.S != nil { + infoData = *v.S + } + + lockInfo := &state.LockInfo{} + err = json.Unmarshal([]byte(infoData), lockInfo) + if err != nil { + return nil, err + } + + return lockInfo, nil +} + +func (c *RemoteClient) Unlock(id string) error { + if c.ddbTable == "" { + return nil + } + + lockErr := &state.LockError{} + + // TODO: store the path and lock ID in separate fields, and have proper + // projection expression only delete the lock if both match, rather than + // checking the ID from the info field first. + lockInfo, err := c.getLockInfo() + if err != nil { + lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err) + return lockErr + } + lockErr.Info = lockInfo + + if lockInfo.ID != id { + lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id) + return lockErr + } + + params := &dynamodb.DeleteItemInput{ + Key: map[string]*dynamodb.AttributeValue{ + "LockID": {S: aws.String(c.lockPath())}, + }, + TableName: aws.String(c.ddbTable), + } + _, err = c.dynClient.DeleteItem(params) + + if err != nil { + lockErr.Err = err + return lockErr + } + return nil +} + +func (c *RemoteClient) lockPath() string { + return fmt.Sprintf("%s/%s", c.bucketName, c.path) +} + +const errBadChecksumFmt = `state data in S3 does not have the expected content. + +This may be caused by unusually long delays in S3 processing a previous state +update. Please wait for a minute or two and try again. If this problem +persists, and neither S3 nor DynamoDB are experiencing an outage, you may need +to manually verify the remote state and update the Digest value stored in the +DynamoDB table to the following value: %x +` diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/backend.go new file mode 100644 index 00000000000..e1e4e95b298 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/backend.go @@ -0,0 +1,325 @@ +package swift + +import ( + "context" + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/schema" + tf_openstack "github.com/terraform-providers/terraform-provider-openstack/openstack" +) + +// New creates a new backend for Swift remote state. +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "auth_url": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("OS_AUTH_URL", nil), + Description: descriptions["auth_url"], + }, + + "user_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_USER_ID", ""), + Description: descriptions["user_name"], + }, + + "user_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_USERNAME", ""), + Description: descriptions["user_name"], + }, + + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "OS_TENANT_ID", + "OS_PROJECT_ID", + }, ""), + Description: descriptions["tenant_id"], + }, + + "tenant_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "OS_TENANT_NAME", + "OS_PROJECT_NAME", + }, ""), + Description: descriptions["tenant_name"], + }, + + "password": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Sensitive: true, + DefaultFunc: schema.EnvDefaultFunc("OS_PASSWORD", ""), + Description: descriptions["password"], + }, + + "token": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_AUTH_TOKEN", ""), + Description: descriptions["token"], + }, + + "domain_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "OS_USER_DOMAIN_ID", + "OS_PROJECT_DOMAIN_ID", + "OS_DOMAIN_ID", + }, ""), + Description: descriptions["domain_id"], + }, + + "domain_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "OS_USER_DOMAIN_NAME", + "OS_PROJECT_DOMAIN_NAME", + "OS_DOMAIN_NAME", + "OS_DEFAULT_DOMAIN", + }, ""), + Description: descriptions["domain_name"], + }, + + "region_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + Description: descriptions["region_name"], + }, + + "insecure": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_INSECURE", ""), + Description: descriptions["insecure"], + }, + + "endpoint_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_ENDPOINT_TYPE", ""), + }, + + "cacert_file": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_CACERT", ""), + Description: descriptions["cacert_file"], + }, + + "cert": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_CERT", ""), + Description: descriptions["cert"], + }, + + "key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_KEY", ""), + Description: descriptions["key"], + }, + + "path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: descriptions["path"], + Deprecated: "Use container instead", + ConflictsWith: []string{"container"}, + }, + + "container": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: descriptions["container"], + }, + + "archive_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: descriptions["archive_path"], + Deprecated: "Use archive_container instead", + ConflictsWith: []string{"archive_container"}, + }, + + "archive_container": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: descriptions["archive_container"], + }, + + "expire_after": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: descriptions["expire_after"], + }, + }, + } + + result := &Backend{Backend: s} + result.Backend.ConfigureFunc = result.configure + return result +} + +var descriptions map[string]string + +func init() { + descriptions = map[string]string{ + "auth_url": "The Identity authentication URL.", + + "user_name": "Username to login with.", + + "user_id": "User ID to login with.", + + "tenant_id": "The ID of the Tenant (Identity v2) or Project (Identity v3)\n" + + "to login with.", + + "tenant_name": "The name of the Tenant (Identity v2) or Project (Identity v3)\n" + + "to login with.", + + "password": "Password to login with.", + + "token": "Authentication token to use as an alternative to username/password.", + + "domain_id": "The ID of the Domain to scope to (Identity v3).", + + "domain_name": "The name of the Domain to scope to (Identity v3).", + + "region_name": "The name of the Region to use.", + + "insecure": "Trust self-signed certificates.", + + "cacert_file": "A Custom CA certificate.", + + "endpoint_type": "The catalog endpoint type to use.", + + "cert": "A client certificate to authenticate with.", + + "key": "A client private key to authenticate with.", + + "path": "Swift container path to use.", + + "container": "Swift container to create", + + "archive_path": "Swift container path to archive state to.", + + "archive_container": "Swift container to archive state to.", + + "expire_after": "Archive object expiry duration.", + } +} + +type Backend struct { + *schema.Backend + + // Fields below are set from configure + client *gophercloud.ServiceClient + archive bool + archiveContainer string + expireSecs int + container string +} + +func (b *Backend) configure(ctx context.Context) error { + if b.client != nil { + return nil + } + + // Grab the resource data + data := schema.FromContextBackendConfig(ctx) + + config := &tf_openstack.Config{ + CACertFile: data.Get("cacert_file").(string), + ClientCertFile: data.Get("cert").(string), + ClientKeyFile: data.Get("key").(string), + DomainID: data.Get("domain_id").(string), + DomainName: data.Get("domain_name").(string), + EndpointType: data.Get("endpoint_type").(string), + IdentityEndpoint: data.Get("auth_url").(string), + Insecure: data.Get("insecure").(bool), + Password: data.Get("password").(string), + Token: data.Get("token").(string), + TenantID: data.Get("tenant_id").(string), + TenantName: data.Get("tenant_name").(string), + Username: data.Get("user_name").(string), + UserID: data.Get("user_id").(string), + } + + if err := config.LoadAndValidate(); err != nil { + return err + } + + // Assign Container + b.container = data.Get("container").(string) + if b.container == "" { + // Check deprecated field + b.container = data.Get("path").(string) + } + + // Enable object archiving? + if archiveContainer, ok := data.GetOk("archive_container"); ok { + log.Printf("[DEBUG] Archive_container set, enabling object versioning") + b.archive = true + b.archiveContainer = archiveContainer.(string) + } else if archivePath, ok := data.GetOk("archive_path"); ok { + log.Printf("[DEBUG] Archive_path set, enabling object versioning") + b.archive = true + b.archiveContainer = archivePath.(string) + } + + // Enable object expiry? + if expireRaw, ok := data.GetOk("expire_after"); ok { + expire := expireRaw.(string) + log.Printf("[DEBUG] Requested that remote state expires after %s", expire) + + if strings.HasSuffix(expire, "d") { + log.Printf("[DEBUG] Got a days expire after duration. Converting to hours") + days, err := strconv.Atoi(expire[:len(expire)-1]) + if err != nil { + return fmt.Errorf("Error converting expire_after value %s to int: %s", expire, err) + } + + expire = fmt.Sprintf("%dh", days*24) + log.Printf("[DEBUG] Expire after %s hours", expire) + } + + expireDur, err := time.ParseDuration(expire) + if err != nil { + log.Printf("[DEBUG] Error parsing duration %s: %s", expire, err) + return fmt.Errorf("Error parsing expire_after duration '%s': %s", expire, err) + } + log.Printf("[DEBUG] Seconds duration = %d", int(expireDur.Seconds())) + b.expireSecs = int(expireDur.Seconds()) + } + + objClient, err := openstack.NewObjectStorageV1(config.OsClient, gophercloud.EndpointOpts{ + Region: data.Get("region_name").(string), + }) + if err != nil { + return err + } + + b.client = objClient + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/backend_state.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/backend_state.go new file mode 100644 index 00000000000..b8ab9810742 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/backend_state.go @@ -0,0 +1,31 @@ +package swift + +import ( + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" +) + +func (b *Backend) States() ([]string, error) { + return nil, backend.ErrNamedStatesNotSupported +} + +func (b *Backend) DeleteState(name string) error { + return backend.ErrNamedStatesNotSupported +} + +func (b *Backend) State(name string) (state.State, error) { + if name != backend.DefaultStateName { + return nil, backend.ErrNamedStatesNotSupported + } + + client := &RemoteClient{ + client: b.client, + container: b.container, + archive: b.archive, + archiveContainer: b.archiveContainer, + expireSecs: b.expireSecs, + } + + return &remote.State{Client: client}, nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/client.go new file mode 100644 index 00000000000..1f8bf464998 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/client.go @@ -0,0 +1,115 @@ +package swift + +import ( + "bytes" + "crypto/md5" + "log" + "os" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects" + + "github.com/hashicorp/terraform/state/remote" +) + +const ( + TFSTATE_NAME = "tfstate.tf" + TFSTATE_LOCK_NAME = "tfstate.lock" +) + +// RemoteClient implements the Client interface for an Openstack Swift server. +type RemoteClient struct { + client *gophercloud.ServiceClient + container string + archive bool + archiveContainer string + expireSecs int +} + +func (c *RemoteClient) Get() (*remote.Payload, error) { + log.Printf("[DEBUG] Getting object %s in container %s", TFSTATE_NAME, c.container) + result := objects.Download(c.client, c.container, TFSTATE_NAME, nil) + + // Extract any errors from result + _, err := result.Extract() + + // 404 response is to be expected if the object doesn't already exist! + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Println("[DEBUG] Object doesn't exist to download.") + return nil, nil + } + + bytes, err := result.ExtractContent() + if err != nil { + return nil, err + } + + hash := md5.Sum(bytes) + payload := &remote.Payload{ + Data: bytes, + MD5: hash[:md5.Size], + } + + return payload, nil +} + +func (c *RemoteClient) Put(data []byte) error { + if err := c.ensureContainerExists(); err != nil { + return err + } + + log.Printf("[DEBUG] Putting object %s in container %s", TFSTATE_NAME, c.container) + reader := bytes.NewReader(data) + createOpts := objects.CreateOpts{ + Content: reader, + } + + if c.expireSecs != 0 { + log.Printf("[DEBUG] ExpireSecs = %d", c.expireSecs) + createOpts.DeleteAfter = c.expireSecs + } + + result := objects.Create(c.client, c.container, TFSTATE_NAME, createOpts) + + return result.Err +} + +func (c *RemoteClient) Delete() error { + log.Printf("[DEBUG] Deleting object %s in container %s", TFSTATE_NAME, c.container) + result := objects.Delete(c.client, c.container, TFSTATE_NAME, nil) + return result.Err +} + +func (c *RemoteClient) ensureContainerExists() error { + containerOpts := &containers.CreateOpts{} + + if c.archive { + log.Printf("[DEBUG] Creating archive container %s", c.archiveContainer) + result := containers.Create(c.client, c.archiveContainer, nil) + if result.Err != nil { + log.Printf("[DEBUG] Error creating archive container %s: %s", c.archiveContainer, result.Err) + return result.Err + } + + log.Printf("[DEBUG] Enabling Versioning on container %s", c.container) + containerOpts.VersionsLocation = c.archiveContainer + } + + log.Printf("[DEBUG] Creating container %s", c.container) + result := containers.Create(c.client, c.container, containerOpts) + if result.Err != nil { + return result.Err + } + + return nil +} + +func multiEnv(ks []string) string { + for _, k := range ks { + if v := os.Getenv(k); v != "" { + return v + } + } + return "" +} diff --git a/vendor/github.com/hashicorp/terraform/backend/testing.go b/vendor/github.com/hashicorp/terraform/backend/testing.go new file mode 100644 index 00000000000..d7cc40ca0b9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/testing.go @@ -0,0 +1,313 @@ +package backend + +import ( + "reflect" + "sort" + "testing" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/terraform" +) + +// TestBackendConfig validates and configures the backend with the +// given configuration. +func TestBackendConfig(t *testing.T, b Backend, c map[string]interface{}) Backend { + t.Helper() + + // Get the proper config structure + rc, err := config.NewRawConfig(c) + if err != nil { + t.Fatalf("bad: %s", err) + } + conf := terraform.NewResourceConfig(rc) + + // Validate + warns, errs := b.Validate(conf) + if len(warns) > 0 { + t.Fatalf("warnings: %s", warns) + } + if len(errs) > 0 { + t.Fatalf("errors: %s", errs) + } + + // Configure + if err := b.Configure(conf); err != nil { + t.Fatalf("err: %s", err) + } + + return b +} + +// TestBackend will test the functionality of a Backend. The backend is +// assumed to already be configured. This will test state functionality. +// If the backend reports it doesn't support multi-state by returning the +// error ErrNamedStatesNotSupported, then it will not test that. +// +// If you want to test locking, two backends must be given. If b2 is nil, +// then state locking won't be tested. +func TestBackend(t *testing.T, b1, b2 Backend) { + t.Helper() + + testBackendStates(t, b1) + + if b2 != nil { + testBackendStateLock(t, b1, b2) + } +} + +func testBackendStates(t *testing.T, b Backend) { + t.Helper() + + states, err := b.States() + if err == ErrNamedStatesNotSupported { + t.Logf("TestBackend: named states not supported in %T, skipping", b) + return + } + + // Test it starts with only the default + if len(states) != 1 || states[0] != DefaultStateName { + t.Fatalf("should only have default to start: %#v", states) + } + + // Create a couple states + foo, err := b.State("foo") + if err != nil { + t.Fatalf("error: %s", err) + } + if err := foo.RefreshState(); err != nil { + t.Fatalf("bad: %s", err) + } + if v := foo.State(); v.HasResources() { + t.Fatalf("should be empty: %s", v) + } + + bar, err := b.State("bar") + if err != nil { + t.Fatalf("error: %s", err) + } + if err := bar.RefreshState(); err != nil { + t.Fatalf("bad: %s", err) + } + if v := bar.State(); v.HasResources() { + t.Fatalf("should be empty: %s", v) + } + + // Verify they are distinct states that can be read back from storage + { + // start with a fresh state, and record the lineage being + // written to "bar" + barState := terraform.NewState() + + // creating the named state may have created a lineage, so use that if it exists. + if s := bar.State(); s != nil && s.Lineage != "" { + barState.Lineage = s.Lineage + } + barLineage := barState.Lineage + + // the foo lineage should be distinct from bar, and unchanged after + // modifying bar + fooState := terraform.NewState() + // creating the named state may have created a lineage, so use that if it exists. + if s := foo.State(); s != nil && s.Lineage != "" { + fooState.Lineage = s.Lineage + } + fooLineage := fooState.Lineage + + // write a known state to foo + if err := foo.WriteState(fooState); err != nil { + t.Fatal("error writing foo state:", err) + } + if err := foo.PersistState(); err != nil { + t.Fatal("error persisting foo state:", err) + } + + // write a distinct known state to bar + if err := bar.WriteState(barState); err != nil { + t.Fatalf("bad: %s", err) + } + if err := bar.PersistState(); err != nil { + t.Fatalf("bad: %s", err) + } + + // verify that foo is unchanged with the existing state manager + if err := foo.RefreshState(); err != nil { + t.Fatal("error refreshing foo:", err) + } + fooState = foo.State() + switch { + case fooState == nil: + t.Fatal("nil state read from foo") + case fooState.Lineage == barLineage: + t.Fatalf("bar lineage read from foo: %#v", fooState) + case fooState.Lineage != fooLineage: + t.Fatal("foo lineage alterred") + } + + // fetch foo again from the backend + foo, err = b.State("foo") + if err != nil { + t.Fatal("error re-fetching state:", err) + } + if err := foo.RefreshState(); err != nil { + t.Fatal("error refreshing foo:", err) + } + fooState = foo.State() + switch { + case fooState == nil: + t.Fatal("nil state read from foo") + case fooState.Lineage != fooLineage: + t.Fatal("incorrect state returned from backend") + } + + // fetch the bar again from the backend + bar, err = b.State("bar") + if err != nil { + t.Fatal("error re-fetching state:", err) + } + if err := bar.RefreshState(); err != nil { + t.Fatal("error refreshing bar:", err) + } + barState = bar.State() + switch { + case barState == nil: + t.Fatal("nil state read from bar") + case barState.Lineage != barLineage: + t.Fatal("incorrect state returned from backend") + } + } + + // Verify we can now list them + { + // we determined that named stated are supported earlier + states, err := b.States() + if err != nil { + t.Fatal(err) + } + + sort.Strings(states) + expected := []string{"bar", "default", "foo"} + if !reflect.DeepEqual(states, expected) { + t.Fatalf("bad: %#v", states) + } + } + + // Delete some states + if err := b.DeleteState("foo"); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the default state can't be deleted + if err := b.DeleteState(DefaultStateName); err == nil { + t.Fatal("expected error") + } + + // Create and delete the foo state again. + // Make sure that there are no leftover artifacts from a deleted state + // preventing re-creation. + foo, err = b.State("foo") + if err != nil { + t.Fatalf("error: %s", err) + } + if err := foo.RefreshState(); err != nil { + t.Fatalf("bad: %s", err) + } + if v := foo.State(); v.HasResources() { + t.Fatalf("should be empty: %s", v) + } + // and delete it again + if err := b.DeleteState("foo"); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify deletion + { + states, err := b.States() + if err == ErrNamedStatesNotSupported { + t.Logf("TestBackend: named states not supported in %T, skipping", b) + return + } + + sort.Strings(states) + expected := []string{"bar", "default"} + if !reflect.DeepEqual(states, expected) { + t.Fatalf("bad: %#v", states) + } + } +} + +func testBackendStateLock(t *testing.T, b1, b2 Backend) { + t.Helper() + + // Get the default state for each + b1StateMgr, err := b1.State(DefaultStateName) + if err != nil { + t.Fatalf("error: %s", err) + } + if err := b1StateMgr.RefreshState(); err != nil { + t.Fatalf("bad: %s", err) + } + + // Fast exit if this doesn't support locking at all + if _, ok := b1StateMgr.(state.Locker); !ok { + t.Logf("TestBackend: backend %T doesn't support state locking, not testing", b1) + return + } + + t.Logf("TestBackend: testing state locking for %T", b1) + + b2StateMgr, err := b2.State(DefaultStateName) + if err != nil { + t.Fatalf("error: %s", err) + } + if err := b2StateMgr.RefreshState(); err != nil { + t.Fatalf("bad: %s", err) + } + + // Reassign so its obvious whats happening + lockerA := b1StateMgr.(state.Locker) + lockerB := b2StateMgr.(state.Locker) + + infoA := state.NewLockInfo() + infoA.Operation = "test" + infoA.Who = "clientA" + + infoB := state.NewLockInfo() + infoB.Operation = "test" + infoB.Who = "clientB" + + lockIDA, err := lockerA.Lock(infoA) + if err != nil { + t.Fatal("unable to get initial lock:", err) + } + + // If the lock ID is blank, assume locking is disabled + if lockIDA == "" { + t.Logf("TestBackend: %T: empty string returned for lock, assuming disabled", b1) + return + } + + _, err = lockerB.Lock(infoB) + if err == nil { + lockerA.Unlock(lockIDA) + t.Fatal("client B obtained lock while held by client A") + } + + if err := lockerA.Unlock(lockIDA); err != nil { + t.Fatal("error unlocking client A", err) + } + + lockIDB, err := lockerB.Lock(infoB) + if err != nil { + t.Fatal("unable to obtain lock from client B") + } + + if lockIDB == lockIDA { + t.Fatalf("duplicate lock IDs: %q", lockIDB) + } + + if err = lockerB.Unlock(lockIDB); err != nil { + t.Fatal("error unlocking client B:", err) + } + +} diff --git a/vendor/github.com/hashicorp/terraform/builtin/provisioners/chef/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/builtin/provisioners/chef/resource_provisioner.go index a459bbe76b0..8546e04fceb 100644 --- a/vendor/github.com/hashicorp/terraform/builtin/provisioners/chef/resource_provisioner.go +++ b/vendor/github.com/hashicorp/terraform/builtin/provisioners/chef/resource_provisioner.go @@ -558,6 +558,25 @@ func (p *provisioner) configureVaultsFunc(gemCmd string, knifeCmd string, confDi path.Join(confDir, p.UserName+".pem"), ) + // if client gets recreated, remove (old) client (with old keys) from vaults/items + // otherwise, the (new) client (with new keys) will not be able to decrypt the vault + if p.RecreateClient { + for vault, items := range p.Vaults { + for _, item := range items { + deleteCmd := fmt.Sprintf("%s vault remove %s %s -C \"%s\" -M client %s", + knifeCmd, + vault, + item, + p.NodeName, + options, + ) + if err := p.runCommand(o, comm, deleteCmd); err != nil { + return err + } + } + } + } + for vault, items := range p.Vaults { for _, item := range items { updateCmd := fmt.Sprintf("%s vault update %s %s -C %s -M client %s", diff --git a/vendor/github.com/hashicorp/terraform/builtin/provisioners/local-exec/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/builtin/provisioners/local-exec/resource_provisioner.go index 731c85baf94..f537bb34ab5 100644 --- a/vendor/github.com/hashicorp/terraform/builtin/provisioners/local-exec/resource_provisioner.go +++ b/vendor/github.com/hashicorp/terraform/builtin/provisioners/local-exec/resource_provisioner.go @@ -28,6 +28,12 @@ func Provisioner() terraform.ResourceProvisioner { Type: schema.TypeString, Required: true, }, + + "interpreter": &schema.Schema{ + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + }, }, ApplyFunc: applyFn, @@ -39,19 +45,29 @@ func applyFn(ctx context.Context) error { o := ctx.Value(schema.ProvOutputKey).(terraform.UIOutput) command := data.Get("command").(string) + if command == "" { return fmt.Errorf("local-exec provisioner command must be a non-empty string") } // Execute the command using a shell - var shell, flag string - if runtime.GOOS == "windows" { - shell = "cmd" - flag = "/C" + interpreter := data.Get("interpreter").([]interface{}) + + var cmdargs []string + if len(interpreter) > 0 { + for _, i := range interpreter { + if arg, ok := i.(string); ok { + cmdargs = append(cmdargs, arg) + } + } } else { - shell = "/bin/sh" - flag = "-c" + if runtime.GOOS == "windows" { + cmdargs = []string{"cmd", "/C"} + } else { + cmdargs = []string{"/bin/sh", "-c"} + } } + cmdargs = append(cmdargs, command) // Setup the reader that will read the output from the command. // We use an os.Pipe so that the *os.File can be passed directly to the @@ -63,7 +79,7 @@ func applyFn(ctx context.Context) error { } // Setup the command - cmd := exec.CommandContext(ctx, shell, flag, command) + cmd := exec.CommandContext(ctx, cmdargs[0], cmdargs[1:]...) cmd.Stderr = pw cmd.Stdout = pw @@ -77,9 +93,7 @@ func applyFn(ctx context.Context) error { go copyOutput(o, tee, copyDoneCh) // Output what we're about to run - o.Output(fmt.Sprintf( - "Executing: %s %s \"%s\"", - shell, flag, command)) + o.Output(fmt.Sprintf("Executing: %q", cmdargs)) // Start the command err = cmd.Start() diff --git a/vendor/github.com/hashicorp/terraform/builtin/provisioners/remote-exec/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/builtin/provisioners/remote-exec/resource_provisioner.go index 7dd86daf040..ba811dafe51 100644 --- a/vendor/github.com/hashicorp/terraform/builtin/provisioners/remote-exec/resource_provisioner.go +++ b/vendor/github.com/hashicorp/terraform/builtin/provisioners/remote-exec/resource_provisioner.go @@ -19,6 +19,10 @@ import ( "github.com/mitchellh/go-linereader" ) +// maxBackoffDealy is the maximum delay between retry attempts +var maxBackoffDelay = 10 * time.Second +var initialBackoffDelay = time.Second + func Provisioner() terraform.ResourceProvisioner { return &schema.Provisioner{ Schema: map[string]*schema.Schema{ @@ -246,7 +250,6 @@ func copyOutput( } // retryFunc is used to retry a function for a given duration -// TODO: this should probably backoff too func retryFunc(ctx context.Context, timeout time.Duration, f func() error) error { // Build a new context with the timeout ctx, done := context.WithTimeout(ctx, timeout) @@ -263,12 +266,13 @@ func retryFunc(ctx context.Context, timeout time.Duration, f func() error) error go func() { defer close(doneCh) + delay := time.Duration(0) for { // If our context ended, we want to exit right away. select { case <-ctx.Done(): return - default: + case <-time.After(delay): } // Try the function call @@ -279,7 +283,19 @@ func retryFunc(ctx context.Context, timeout time.Duration, f func() error) error return } - log.Printf("Retryable error: %v", err) + log.Printf("[WARN] retryable error: %v", err) + + delay *= 2 + + if delay == 0 { + delay = initialBackoffDelay + } + + if delay > maxBackoffDelay { + delay = maxBackoffDelay + } + + log.Printf("[INFO] sleeping for %s", delay) } }() diff --git a/vendor/github.com/hashicorp/terraform/builtin/provisioners/salt-masterless/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/builtin/provisioners/salt-masterless/resource_provisioner.go new file mode 100644 index 00000000000..b81a3703789 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/builtin/provisioners/salt-masterless/resource_provisioner.go @@ -0,0 +1,468 @@ +// This package implements a provisioner for Terraform that executes a +// saltstack state within the remote machine +// +// Adapted from gitub.com/hashicorp/packer/provisioner/salt-masterless + +package saltmasterless + +import ( + "bytes" + "context" + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/hashicorp/terraform/communicator" + "github.com/hashicorp/terraform/communicator/remote" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +type provisionFn func(terraform.UIOutput, communicator.Communicator) error + +type provisioner struct { + SkipBootstrap bool + BootstrapArgs string + LocalStateTree string + DisableSudo bool + CustomState string + MinionConfig string + LocalPillarRoots string + RemoteStateTree string + RemotePillarRoots string + TempConfigDir string + NoExitOnFailure bool + LogLevel string + SaltCallArgs string + CmdArgs string +} + +const DefaultStateTreeDir = "/srv/salt" +const DefaultPillarRootDir = "/srv/pillar" + +// Provisioner returns a salt-masterless provisioner +func Provisioner() terraform.ResourceProvisioner { + return &schema.Provisioner{ + Schema: map[string]*schema.Schema{ + "local_state_tree": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "local_pillar_roots": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "remote_state_tree": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "remote_pillar_roots": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "temp_config_dir": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "/tmp/salt", + }, + "skip_bootstrap": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "no_exit_on_failure": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "bootstrap_args": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "disable_sudo": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "custom_state": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "minion_config_file": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "cmd_args": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "salt_call_args": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "log_level": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + + ApplyFunc: applyFn, + ValidateFunc: validateFn, + } +} + +// Apply executes the file provisioner +func applyFn(ctx context.Context) error { + // Decode the raw config for this provisioner + var err error + + o := ctx.Value(schema.ProvOutputKey).(terraform.UIOutput) + d := ctx.Value(schema.ProvConfigDataKey).(*schema.ResourceData) + connState := ctx.Value(schema.ProvRawStateKey).(*terraform.InstanceState) + + p, err := decodeConfig(d) + if err != nil { + return err + } + + // Get a new communicator + comm, err := communicator.New(connState) + if err != nil { + return err + } + + var src, dst string + + o.Output("Provisioning with Salt...") + if !p.SkipBootstrap { + cmd := &remote.Cmd{ + // Fallback on wget if curl failed for any reason (such as not being installed) + Command: fmt.Sprintf("curl -L https://bootstrap.saltstack.com -o /tmp/install_salt.sh || wget -O /tmp/install_salt.sh https://bootstrap.saltstack.com"), + } + o.Output(fmt.Sprintf("Downloading saltstack bootstrap to /tmp/install_salt.sh")) + if err = comm.Start(cmd); err != nil { + return fmt.Errorf("Unable to download Salt: %s", err) + } + cmd = &remote.Cmd{ + Command: fmt.Sprintf("%s /tmp/install_salt.sh %s", p.sudo("sh"), p.BootstrapArgs), + } + o.Output(fmt.Sprintf("Installing Salt with command %s", cmd.Command)) + if err = comm.Start(cmd); err != nil { + return fmt.Errorf("Unable to install Salt: %s", err) + } + } + + o.Output(fmt.Sprintf("Creating remote temporary directory: %s", p.TempConfigDir)) + if err := p.createDir(o, comm, p.TempConfigDir); err != nil { + return fmt.Errorf("Error creating remote temporary directory: %s", err) + } + + if p.MinionConfig != "" { + o.Output(fmt.Sprintf("Uploading minion config: %s", p.MinionConfig)) + src = p.MinionConfig + dst = filepath.ToSlash(filepath.Join(p.TempConfigDir, "minion")) + if err = p.uploadFile(o, comm, dst, src); err != nil { + return fmt.Errorf("Error uploading local minion config file to remote: %s", err) + } + + // move minion config into /etc/salt + o.Output(fmt.Sprintf("Make sure directory %s exists", "/etc/salt")) + if err := p.createDir(o, comm, "/etc/salt"); err != nil { + return fmt.Errorf("Error creating remote salt configuration directory: %s", err) + } + src = filepath.ToSlash(filepath.Join(p.TempConfigDir, "minion")) + dst = "/etc/salt/minion" + if err = p.moveFile(o, comm, dst, src); err != nil { + return fmt.Errorf("Unable to move %s/minion to /etc/salt/minion: %s", p.TempConfigDir, err) + } + } + + o.Output(fmt.Sprintf("Uploading local state tree: %s", p.LocalStateTree)) + src = p.LocalStateTree + dst = filepath.ToSlash(filepath.Join(p.TempConfigDir, "states")) + if err = p.uploadDir(o, comm, dst, src, []string{".git"}); err != nil { + return fmt.Errorf("Error uploading local state tree to remote: %s", err) + } + + // move state tree from temporary directory + src = filepath.ToSlash(filepath.Join(p.TempConfigDir, "states")) + dst = p.RemoteStateTree + if err = p.removeDir(o, comm, dst); err != nil { + return fmt.Errorf("Unable to clear salt tree: %s", err) + } + if err = p.moveFile(o, comm, dst, src); err != nil { + return fmt.Errorf("Unable to move %s/states to %s: %s", p.TempConfigDir, dst, err) + } + + if p.LocalPillarRoots != "" { + o.Output(fmt.Sprintf("Uploading local pillar roots: %s", p.LocalPillarRoots)) + src = p.LocalPillarRoots + dst = filepath.ToSlash(filepath.Join(p.TempConfigDir, "pillar")) + if err = p.uploadDir(o, comm, dst, src, []string{".git"}); err != nil { + return fmt.Errorf("Error uploading local pillar roots to remote: %s", err) + } + + // move pillar root from temporary directory + src = filepath.ToSlash(filepath.Join(p.TempConfigDir, "pillar")) + dst = p.RemotePillarRoots + + if err = p.removeDir(o, comm, dst); err != nil { + return fmt.Errorf("Unable to clear pillar root: %s", err) + } + if err = p.moveFile(o, comm, dst, src); err != nil { + return fmt.Errorf("Unable to move %s/pillar to %s: %s", p.TempConfigDir, dst, err) + } + } + + o.Output(fmt.Sprintf("Running: salt-call --local %s", p.CmdArgs)) + cmd := &remote.Cmd{Command: p.sudo(fmt.Sprintf("salt-call --local %s", p.CmdArgs))} + if err = comm.Start(cmd); err != nil || cmd.ExitStatus != 0 { + if err == nil { + err = fmt.Errorf("Bad exit status: %d", cmd.ExitStatus) + } + + return fmt.Errorf("Error executing salt-call: %s", err) + } + + return nil +} + +// Prepends sudo to supplied command if config says to +func (p *provisioner) sudo(cmd string) string { + if p.DisableSudo { + return cmd + } + + return "sudo " + cmd +} + +func validateDirConfig(path string, name string, required bool) error { + if required == true && path == "" { + return fmt.Errorf("%s cannot be empty", name) + } else if required == false && path == "" { + return nil + } + info, err := os.Stat(path) + if err != nil { + return fmt.Errorf("%s: path '%s' is invalid: %s", name, path, err) + } else if !info.IsDir() { + return fmt.Errorf("%s: path '%s' must point to a directory", name, path) + } + return nil +} + +func validateFileConfig(path string, name string, required bool) error { + if required == true && path == "" { + return fmt.Errorf("%s cannot be empty", name) + } else if required == false && path == "" { + return nil + } + info, err := os.Stat(path) + if err != nil { + return fmt.Errorf("%s: path '%s' is invalid: %s", name, path, err) + } else if info.IsDir() { + return fmt.Errorf("%s: path '%s' must point to a file", name, path) + } + return nil +} + +func (p *provisioner) uploadFile(o terraform.UIOutput, comm communicator.Communicator, dst, src string) error { + f, err := os.Open(src) + if err != nil { + return fmt.Errorf("Error opening: %s", err) + } + defer f.Close() + + if err = comm.Upload(dst, f); err != nil { + return fmt.Errorf("Error uploading %s: %s", src, err) + } + return nil +} + +func (p *provisioner) moveFile(o terraform.UIOutput, comm communicator.Communicator, dst, src string) error { + o.Output(fmt.Sprintf("Moving %s to %s", src, dst)) + cmd := &remote.Cmd{Command: fmt.Sprintf(p.sudo("mv %s %s"), src, dst)} + if err := comm.Start(cmd); err != nil || cmd.ExitStatus != 0 { + if err == nil { + err = fmt.Errorf("Bad exit status: %d", cmd.ExitStatus) + } + + return fmt.Errorf("Unable to move %s to %s: %s", src, dst, err) + } + return nil +} + +func (p *provisioner) createDir(o terraform.UIOutput, comm communicator.Communicator, dir string) error { + o.Output(fmt.Sprintf("Creating directory: %s", dir)) + cmd := &remote.Cmd{ + Command: fmt.Sprintf("mkdir -p '%s'", dir), + } + if err := comm.Start(cmd); err != nil { + return err + } + if cmd.ExitStatus != 0 { + return fmt.Errorf("Non-zero exit status.") + } + return nil +} + +func (p *provisioner) removeDir(o terraform.UIOutput, comm communicator.Communicator, dir string) error { + o.Output(fmt.Sprintf("Removing directory: %s", dir)) + cmd := &remote.Cmd{ + Command: fmt.Sprintf("rm -rf '%s'", dir), + } + if err := comm.Start(cmd); err != nil { + return err + } + if cmd.ExitStatus != 0 { + return fmt.Errorf("Non-zero exit status.") + } + return nil +} + +func (p *provisioner) uploadDir(o terraform.UIOutput, comm communicator.Communicator, dst, src string, ignore []string) error { + if err := p.createDir(o, comm, dst); err != nil { + return err + } + + // Make sure there is a trailing "/" so that the directory isn't + // created on the other side. + if src[len(src)-1] != '/' { + src = src + "/" + } + return comm.UploadDir(dst, src) +} + +// Validate checks if the required arguments are configured +func validateFn(c *terraform.ResourceConfig) (ws []string, es []error) { + // require a salt state tree + localStateTreeTmp, ok := c.Get("local_state_tree") + var localStateTree string + if !ok { + es = append(es, + errors.New("Required local_state_tree is not set")) + } else { + localStateTree = localStateTreeTmp.(string) + } + err := validateDirConfig(localStateTree, "local_state_tree", true) + if err != nil { + es = append(es, err) + } + + var localPillarRoots string + localPillarRootsTmp, ok := c.Get("local_pillar_roots") + if !ok { + localPillarRoots = "" + } else { + localPillarRoots = localPillarRootsTmp.(string) + } + + err = validateDirConfig(localPillarRoots, "local_pillar_roots", false) + if err != nil { + es = append(es, err) + } + + var minionConfig string + minionConfigTmp, ok := c.Get("minion_config_file") + if !ok { + minionConfig = "" + } else { + minionConfig = minionConfigTmp.(string) + } + err = validateFileConfig(minionConfig, "minion_config_file", false) + if err != nil { + es = append(es, err) + } + + var remoteStateTree string + remoteStateTreeTmp, ok := c.Get("remote_state_tree") + if !ok { + remoteStateTree = "" + } else { + remoteStateTree = remoteStateTreeTmp.(string) + } + + var remotePillarRoots string + remotePillarRootsTmp, ok := c.Get("remote_pillar_roots") + if !ok { + remotePillarRoots = "" + } else { + remotePillarRoots = remotePillarRootsTmp.(string) + } + + if minionConfig != "" && (remoteStateTree != "" || remotePillarRoots != "") { + es = append(es, + errors.New("remote_state_tree and remote_pillar_roots only apply when minion_config_file is not used")) + } + + if len(es) > 0 { + return ws, es + } + + return ws, es +} + +func decodeConfig(d *schema.ResourceData) (*provisioner, error) { + p := &provisioner{ + LocalStateTree: d.Get("local_state_tree").(string), + LogLevel: d.Get("log_level").(string), + SaltCallArgs: d.Get("salt_call_args").(string), + CmdArgs: d.Get("cmd_args").(string), + MinionConfig: d.Get("minion_config_file").(string), + CustomState: d.Get("custom_state").(string), + DisableSudo: d.Get("disable_sudo").(bool), + BootstrapArgs: d.Get("bootstrap_args").(string), + NoExitOnFailure: d.Get("no_exit_on_failure").(bool), + SkipBootstrap: d.Get("skip_bootstrap").(bool), + TempConfigDir: d.Get("temp_config_dir").(string), + RemotePillarRoots: d.Get("remote_pillar_roots").(string), + RemoteStateTree: d.Get("remote_state_tree").(string), + LocalPillarRoots: d.Get("local_pillar_roots").(string), + } + + // build the command line args to pass onto salt + var cmdArgs bytes.Buffer + + if p.CustomState == "" { + cmdArgs.WriteString(" state.highstate") + } else { + cmdArgs.WriteString(" state.sls ") + cmdArgs.WriteString(p.CustomState) + } + + if p.MinionConfig == "" { + // pass --file-root and --pillar-root if no minion_config_file is supplied + if p.RemoteStateTree != "" { + cmdArgs.WriteString(" --file-root=") + cmdArgs.WriteString(p.RemoteStateTree) + } else { + cmdArgs.WriteString(" --file-root=") + cmdArgs.WriteString(DefaultStateTreeDir) + } + if p.RemotePillarRoots != "" { + cmdArgs.WriteString(" --pillar-root=") + cmdArgs.WriteString(p.RemotePillarRoots) + } else { + cmdArgs.WriteString(" --pillar-root=") + cmdArgs.WriteString(DefaultPillarRootDir) + } + } + + if !p.NoExitOnFailure { + cmdArgs.WriteString(" --retcode-passthrough") + } + + if p.LogLevel == "" { + cmdArgs.WriteString(" -l info") + } else { + cmdArgs.WriteString(" -l ") + cmdArgs.WriteString(p.LogLevel) + } + + if p.SaltCallArgs != "" { + cmdArgs.WriteString(" ") + cmdArgs.WriteString(p.SaltCallArgs) + } + + p.CmdArgs = cmdArgs.String() + + return p, nil +} diff --git a/vendor/github.com/hashicorp/terraform/command/apply.go b/vendor/github.com/hashicorp/terraform/command/apply.go index 94f7d129c63..3bd2c08930b 100644 --- a/vendor/github.com/hashicorp/terraform/command/apply.go +++ b/vendor/github.com/hashicorp/terraform/command/apply.go @@ -8,6 +8,7 @@ import ( "sort" "strings" + "github.com/hashicorp/errwrap" "github.com/hashicorp/go-getter" "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/config" @@ -130,7 +131,8 @@ func (c *ApplyCommand) Run(args []string) int { if plan == nil { mod, err = c.Module(configPath) if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load root config module: %s", err)) + err = errwrap.Wrapf("Failed to load root config module: {{err}}", err) + c.showDiagnostics(err) return 1 } } @@ -203,7 +205,7 @@ func (c *ApplyCommand) Run(args []string) int { } case <-op.Done(): if err := op.Err; err != nil { - c.Ui.Error(err.Error()) + c.showDiagnostics(err) return 1 } } diff --git a/vendor/github.com/hashicorp/terraform/command/autocomplete.go b/vendor/github.com/hashicorp/terraform/command/autocomplete.go new file mode 100644 index 00000000000..cc1ad145bd3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/autocomplete.go @@ -0,0 +1,79 @@ +package command + +import ( + "github.com/posener/complete" + "github.com/posener/complete/match" +) + +// This file contains some re-usable predictors for auto-complete. The +// command-specific autocomplete configurations live within each command's +// own source file, as AutocompleteArgs and AutocompleteFlags methods on each +// Command implementation. + +// For completing the value of boolean flags like -foo false +var completePredictBoolean = complete.PredictSet("true", "false") + +// We don't currently have a real predictor for module sources, but +// we'll probably add one later. +var completePredictModuleSource = complete.PredictAnything + +type completePredictSequence []complete.Predictor + +func (s completePredictSequence) Predict(a complete.Args) []string { + // Only one level of command is stripped off the prefix of a.Completed + // here, so nested subcommands like "workspace new" will need to provide + // dummy entries (e.g. complete.PredictNothing) as placeholders for + // all but the first subcommand. For example, "workspace new" needs + // one placeholder for the argument "new". + idx := len(a.Completed) + if idx >= len(s) { + return nil + } + + return s[idx].Predict(a) +} + +func (m *Meta) completePredictWorkspaceName() complete.Predictor { + return complete.PredictFunc(func(a complete.Args) []string { + // There are lot of things that can fail in here, so if we encounter + // any error then we'll just return nothing and not support autocomplete + // until whatever error is fixed. (The user can't actually see the error + // here, but other commands should produce a user-visible error before + // too long.) + + // We assume here that we want to autocomplete for the current working + // directory, since we don't have enough context to know where to + // find any config path argument, and it might be _after_ the argument + // we're trying to complete here anyway. + configPath, err := ModulePath(nil) + if err != nil { + return nil + } + + cfg, err := m.Config(configPath) + if err != nil { + return nil + } + + b, err := m.Backend(&BackendOpts{ + Config: cfg, + }) + if err != nil { + return nil + } + + names, _ := b.States() + + if a.Last != "" { + // filter for names that match the prefix only + filtered := make([]string, 0, len(names)) + for _, name := range names { + if match.Prefix(name, a.Last) { + filtered = append(filtered, name) + } + } + names = filtered + } + return names + }) +} diff --git a/vendor/github.com/hashicorp/terraform/command/fmt.go b/vendor/github.com/hashicorp/terraform/command/fmt.go index 9352cdabff6..9fefd25178d 100644 --- a/vendor/github.com/hashicorp/terraform/command/fmt.go +++ b/vendor/github.com/hashicorp/terraform/command/fmt.go @@ -1,6 +1,7 @@ package command import ( + "bytes" "flag" "fmt" "io" @@ -21,6 +22,7 @@ const ( type FmtCommand struct { Meta opts fmtcmd.Options + check bool input io.Reader // STDIN if nil } @@ -38,6 +40,7 @@ func (c *FmtCommand) Run(args []string) int { cmdFlags.BoolVar(&c.opts.List, "list", true, "list") cmdFlags.BoolVar(&c.opts.Write, "write", true, "write") cmdFlags.BoolVar(&c.opts.Diff, "diff", false, "diff") + cmdFlags.BoolVar(&c.check, "check", false, "check") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } if err := cmdFlags.Parse(args); err != nil { @@ -61,13 +64,37 @@ func (c *FmtCommand) Run(args []string) int { dirs = []string{args[0]} } - output := &cli.UiWriter{Ui: c.Ui} + var output io.Writer + list := c.opts.List // preserve the original value of -list + if c.check { + // set to true so we can use the list output to check + // if the input needs formatting + c.opts.List = true + c.opts.Write = false + output = &bytes.Buffer{} + } else { + output = &cli.UiWriter{Ui: c.Ui} + } + err = fmtcmd.Run(dirs, []string{fileExtension}, c.input, output, c.opts) if err != nil { c.Ui.Error(fmt.Sprintf("Error running fmt: %s", err)) return 2 } + if c.check { + buf := output.(*bytes.Buffer) + ok := buf.Len() == 0 + if list { + io.Copy(&cli.UiWriter{Ui: c.Ui}, buf) + } + if ok { + return 0 + } else { + return 3 + } + } + return 0 } @@ -84,10 +111,12 @@ Options: -list=true List files whose formatting differs (always false if using STDIN) - -write=true Write result to source file instead of STDOUT (always false if using STDIN) + -write=true Write result to source file instead of STDOUT (always false if using STDIN or -check) -diff=false Display diffs of formatting changes + -check=false Check if the input is formatted. Exit status will be 0 if all input is properly formatted and non-zero otherwise. + ` return strings.TrimSpace(helpText) } diff --git a/vendor/github.com/hashicorp/terraform/command/format/diagnostic.go b/vendor/github.com/hashicorp/terraform/command/format/diagnostic.go new file mode 100644 index 00000000000..4c5f7fff086 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/format/diagnostic.go @@ -0,0 +1,65 @@ +package format + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/terraform/tfdiags" + "github.com/mitchellh/colorstring" + wordwrap "github.com/mitchellh/go-wordwrap" +) + +// Diagnostic formats a single diagnostic message. +// +// The width argument specifies at what column the diagnostic messages will +// be wrapped. If set to zero, messages will not be wrapped by this function +// at all. Although the long-form text parts of the message are wrapped, +// not all aspects of the message are guaranteed to fit within the specified +// terminal width. +func Diagnostic(diag tfdiags.Diagnostic, color *colorstring.Colorize, width int) string { + if diag == nil { + // No good reason to pass a nil diagnostic in here... + return "" + } + + var buf bytes.Buffer + + switch diag.Severity() { + case tfdiags.Error: + buf.WriteString(color.Color("\n[bold][red]Error: [reset]")) + case tfdiags.Warning: + buf.WriteString(color.Color("\n[bold][yellow]Warning: [reset]")) + default: + // Clear out any coloring that might be applied by Terraform's UI helper, + // so our result is not context-sensitive. + buf.WriteString(color.Color("\n[reset]")) + } + + desc := diag.Description() + sourceRefs := diag.Source() + + // We don't wrap the summary, since we expect it to be terse, and since + // this is where we put the text of a native Go error it may not always + // be pure text that lends itself well to word-wrapping. + if sourceRefs.Subject != nil { + fmt.Fprintf(&buf, color.Color("[bold]%s[reset] at %s\n\n"), desc.Summary, sourceRefs.Subject.StartString()) + } else { + fmt.Fprintf(&buf, color.Color("[bold]%s[reset]\n\n"), desc.Summary) + } + + // TODO: also print out the relevant snippet of config source with the + // relevant section highlighted, so the user doesn't need to manually + // correlate back to config. Before we can do this, the HCL2 parser + // needs to be more deeply integrated so that we can use it to obtain + // the parsed source code and AST. + + if desc.Detail != "" { + detail := desc.Detail + if width != 0 { + detail = wordwrap.WrapString(detail, uint(width)) + } + fmt.Fprintf(&buf, "%s\n", detail) + } + + return buf.String() +} diff --git a/vendor/github.com/hashicorp/terraform/command/format/plan.go b/vendor/github.com/hashicorp/terraform/command/format/plan.go index 6bc699e398b..65d2c9d2771 100644 --- a/vendor/github.com/hashicorp/terraform/command/format/plan.go +++ b/vendor/github.com/hashicorp/terraform/command/format/plan.go @@ -11,232 +11,339 @@ import ( "github.com/mitchellh/colorstring" ) -// PlanOpts are the options for formatting a plan. -type PlanOpts struct { - // Plan is the plan to format. This is required. - Plan *terraform.Plan +// Plan is a representation of a plan optimized for display to +// an end-user, as opposed to terraform.Plan which is for internal use. +// +// DisplayPlan excludes implementation details that may otherwise appear +// in the main plan, such as destroy actions on data sources (which are +// there only to clean up the state). +type Plan struct { + Resources []*InstanceDiff +} + +// InstanceDiff is a representation of an instance diff optimized +// for display, in conjunction with DisplayPlan. +type InstanceDiff struct { + Addr *terraform.ResourceAddress + Action terraform.DiffChangeType - // Color is the colorizer. This is optional. - Color *colorstring.Colorize + // Attributes describes changes to the attributes of the instance. + // + // For destroy diffs this is always nil. + Attributes []*AttributeDiff - // ModuleDepth is the depth of the modules to expand. By default this - // is zero which will not expand modules at all. - ModuleDepth int + Tainted bool + Deposed bool } -// Plan takes a plan and returns a -func Plan(opts *PlanOpts) string { - p := opts.Plan - if p.Diff == nil || p.Diff.Empty() { - return "This plan does nothing." - } +// AttributeDiff is a representation of an attribute diff optimized +// for display, in conjunction with DisplayInstanceDiff. +type AttributeDiff struct { + // Path is a dot-delimited traversal through possibly many levels of list and map structure, + // intended for display purposes only. + Path string - if opts.Color == nil { - opts.Color = &colorstring.Colorize{ - Colors: colorstring.DefaultColors, - Reset: false, - } - } + Action terraform.DiffChangeType - buf := new(bytes.Buffer) - for _, m := range p.Diff.Modules { - if len(m.Path)-1 <= opts.ModuleDepth || opts.ModuleDepth == -1 { - formatPlanModuleExpand(buf, m, opts) - } else { - formatPlanModuleSingle(buf, m, opts) - } - } + OldValue string + NewValue string - return strings.TrimSpace(buf.String()) + NewComputed bool + Sensitive bool + ForcesNew bool } -// formatPlanModuleExpand will output the given module and all of its -// resources. -func formatPlanModuleExpand( - buf *bytes.Buffer, m *terraform.ModuleDiff, opts *PlanOpts) { - // Ignore empty diffs - if m.Empty() { - return - } +// PlanStats gives summary counts for a Plan. +type PlanStats struct { + ToAdd, ToChange, ToDestroy int +} - var modulePath []string - if !m.IsRoot() { - modulePath = m.Path[1:] +// NewPlan produces a display-oriented Plan from a terraform.Plan. +func NewPlan(plan *terraform.Plan) *Plan { + ret := &Plan{} + if plan == nil || plan.Diff == nil || plan.Diff.Empty() { + // Nothing to do! + return ret } - // We want to output the resources in sorted order to make things - // easier to scan through, so get all the resource names and sort them. - names := make([]string, 0, len(m.Resources)) - addrs := map[string]*terraform.ResourceAddress{} - for name := range m.Resources { - names = append(names, name) - var err error - addrs[name], err = terraform.ParseResourceAddressForInstanceDiff(modulePath, name) - if err != nil { - // should never happen; indicates invalid diff - panic("invalid resource address in diff") + for _, m := range plan.Diff.Modules { + var modulePath []string + if !m.IsRoot() { + // trim off the leading "root" path segment, since it's implied + // when we use a path in a resource address. + modulePath = m.Path[1:] } - } - sort.Slice(names, func(i, j int) bool { - return addrs[names[i]].Less(addrs[names[j]]) - }) - // Go through each sorted name and start building the output - for _, name := range names { - rdiff := m.Resources[name] - if rdiff.Empty() { - continue - } + for k, r := range m.Resources { + if r.Empty() { + continue + } - addr := addrs[name] - addrStr := addr.String() - dataSource := addr.Mode == config.DataResourceMode - - // Determine the color for the text (green for adding, yellow - // for change, red for delete), and symbol, and output the - // resource header. - color := "yellow" - symbol := " ~" - oldValues := true - switch rdiff.ChangeType() { - case terraform.DiffDestroyCreate: - color = "yellow" - symbol = "[red]-[reset]/[green]+[reset][yellow]" - case terraform.DiffCreate: - color = "green" - symbol = " +" - oldValues = false - - // If we're "creating" a data resource then we'll present it - // to the user as a "read" operation, so it's clear that this - // operation won't change anything outside of the Terraform state. - // Unfortunately by the time we get here we only have the name - // to work with, so we need to cheat and exploit knowledge of the - // naming scheme for data resources. - if dataSource { - symbol = " <=" - color = "cyan" + addr, err := terraform.ParseResourceAddressForInstanceDiff(modulePath, k) + if err != nil { + // should never happen; indicates invalid diff + panic("invalid resource address in diff") } - case terraform.DiffDestroy: - color = "red" - symbol = " -" - } - var extraAttr []string - if rdiff.DestroyTainted { - extraAttr = append(extraAttr, "tainted") - } - if rdiff.DestroyDeposed { - extraAttr = append(extraAttr, "deposed") - } - var extraStr string - if len(extraAttr) > 0 { - extraStr = fmt.Sprintf(" (%s)", strings.Join(extraAttr, ", ")) - } - if rdiff.ChangeType() == terraform.DiffDestroyCreate { - extraStr = extraStr + opts.Color.Color(" [red][bold](new resource required)") - } + dataSource := addr.Mode == config.DataResourceMode - buf.WriteString(opts.Color.Color(fmt.Sprintf( - "[%s]%s %s%s\n", - color, symbol, addrStr, extraStr))) - - // Get all the attributes that are changing, and sort them. Also - // determine the longest key so that we can align them all. - keyLen := 0 - keys := make([]string, 0, len(rdiff.Attributes)) - for key, _ := range rdiff.Attributes { - // Skip the ID since we do that specially - if key == "id" { + // We create "destroy" actions for data resources so we can clean + // up their entries in state, but this is an implementation detail + // that users shouldn't see. + if dataSource && r.ChangeType() == terraform.DiffDestroy { continue } - keys = append(keys, key) - if len(key) > keyLen { - keyLen = len(key) + did := &InstanceDiff{ + Addr: addr, + Action: r.ChangeType(), + Tainted: r.DestroyTainted, + Deposed: r.DestroyDeposed, } - } - sort.Strings(keys) - - // Go through and output each attribute - for _, attrK := range keys { - attrDiff := rdiff.Attributes[attrK] - v := attrDiff.New - if v == "" && attrDiff.NewComputed { - v = "" + if dataSource && did.Action == terraform.DiffCreate { + // Use "refresh" as the action for display, since core + // currently uses Create for this. + did.Action = terraform.DiffRefresh } - if attrDiff.Sensitive { - v = "" + ret.Resources = append(ret.Resources, did) + + if did.Action == terraform.DiffDestroy { + // Don't show any outputs for destroy actions + continue } - updateMsg := "" - if attrDiff.RequiresNew && rdiff.Destroy { - updateMsg = opts.Color.Color(" [red](forces new resource)") - } else if attrDiff.Sensitive && oldValues { - updateMsg = opts.Color.Color(" [yellow](attribute changed)") + for k, a := range r.Attributes { + var action terraform.DiffChangeType + switch { + case a.NewRemoved: + action = terraform.DiffDestroy + case did.Action == terraform.DiffCreate: + action = terraform.DiffCreate + default: + action = terraform.DiffUpdate + } + + did.Attributes = append(did.Attributes, &AttributeDiff{ + Path: k, + Action: action, + + OldValue: a.Old, + NewValue: a.New, + + Sensitive: a.Sensitive, + ForcesNew: a.RequiresNew, + NewComputed: a.NewComputed, + }) } - if oldValues { - var u string - if attrDiff.Sensitive { - u = "" - } else { - u = attrDiff.Old + // Sort the attributes by their paths for display + sort.Slice(did.Attributes, func(i, j int) bool { + iPath := did.Attributes[i].Path + jPath := did.Attributes[j].Path + + // as a special case, "id" is always first + switch { + case iPath != jPath && (iPath == "id" || jPath == "id"): + return iPath == "id" + default: + return iPath < jPath } - buf.WriteString(fmt.Sprintf( - " %s:%s %#v => %#v%s\n", - attrK, - strings.Repeat(" ", keyLen-len(attrK)), - u, - v, - updateMsg)) - } else { - buf.WriteString(fmt.Sprintf( - " %s:%s %#v%s\n", - attrK, - strings.Repeat(" ", keyLen-len(attrK)), - v, - updateMsg)) + }) + + } + } + + // Sort the instance diffs by their addresses for display. + sort.Slice(ret.Resources, func(i, j int) bool { + iAddr := ret.Resources[i].Addr + jAddr := ret.Resources[j].Addr + return iAddr.Less(jAddr) + }) + + return ret +} + +// Format produces and returns a text representation of the receiving plan +// intended for display in a terminal. +// +// If color is not nil, it is used to colorize the output. +func (p *Plan) Format(color *colorstring.Colorize) string { + if p.Empty() { + return "This plan does nothing." + } + + if color == nil { + color = &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Reset: false, + } + } + + // Find the longest path length of all the paths that are changing, + // so we can align them all. + keyLen := 0 + for _, r := range p.Resources { + for _, attr := range r.Attributes { + key := attr.Path + + if len(key) > keyLen { + keyLen = len(key) } } + } + + buf := new(bytes.Buffer) + for _, r := range p.Resources { + formatPlanInstanceDiff(buf, r, keyLen, color) + } + + return strings.TrimSpace(buf.String()) +} + +// Stats returns statistics about the plan +func (p *Plan) Stats() PlanStats { + var ret PlanStats + for _, r := range p.Resources { + switch r.Action { + case terraform.DiffCreate: + ret.ToAdd++ + case terraform.DiffUpdate: + ret.ToChange++ + case terraform.DiffDestroyCreate: + ret.ToAdd++ + ret.ToDestroy++ + case terraform.DiffDestroy: + ret.ToDestroy++ + } + } + return ret +} - // Write the reset color so we don't overload the user's terminal - buf.WriteString(opts.Color.Color("[reset]\n")) +// ActionCounts returns the number of diffs for each action type +func (p *Plan) ActionCounts() map[terraform.DiffChangeType]int { + ret := map[terraform.DiffChangeType]int{} + for _, r := range p.Resources { + ret[r.Action]++ } + return ret } -// formatPlanModuleSingle will output the given module and all of its -// resources. -func formatPlanModuleSingle( - buf *bytes.Buffer, m *terraform.ModuleDiff, opts *PlanOpts) { - // Ignore empty diffs - if m.Empty() { - return +// Empty returns true if there is at least one resource diff in the receiving plan. +func (p *Plan) Empty() bool { + return len(p.Resources) == 0 +} + +// DiffActionSymbol returns a string that, once passed through a +// colorstring.Colorize, will produce a result that can be written +// to a terminal to produce a symbol made of three printable +// characters, possibly interspersed with VT100 color codes. +func DiffActionSymbol(action terraform.DiffChangeType) string { + switch action { + case terraform.DiffDestroyCreate: + return "[red]-[reset]/[green]+[reset]" + case terraform.DiffCreate: + return " [green]+[reset]" + case terraform.DiffDestroy: + return " [red]-[reset]" + case terraform.DiffRefresh: + return " [cyan]<=[reset]" + default: + return " [yellow]~[reset]" } +} - moduleName := fmt.Sprintf("module.%s", strings.Join(m.Path[1:], ".")) +// formatPlanInstanceDiff writes the text representation of the given instance diff +// to the given buffer, using the given colorizer. +func formatPlanInstanceDiff(buf *bytes.Buffer, r *InstanceDiff, keyLen int, colorizer *colorstring.Colorize) { + addrStr := r.Addr.String() // Determine the color for the text (green for adding, yellow // for change, red for delete), and symbol, and output the // resource header. color := "yellow" - symbol := "~" - switch m.ChangeType() { + symbol := DiffActionSymbol(r.Action) + oldValues := true + switch r.Action { + case terraform.DiffDestroyCreate: + color = "yellow" case terraform.DiffCreate: color = "green" - symbol = "+" + oldValues = false case terraform.DiffDestroy: color = "red" - symbol = "-" + case terraform.DiffRefresh: + color = "cyan" + oldValues = false + } + + var extraStr string + if r.Tainted { + extraStr = extraStr + " (tainted)" + } + if r.Deposed { + extraStr = extraStr + " (deposed)" + } + if r.Action == terraform.DiffDestroyCreate { + extraStr = extraStr + colorizer.Color(" [red][bold](new resource required)") + } + + buf.WriteString( + colorizer.Color(fmt.Sprintf( + "[%s]%s [%s]%s%s\n", + color, symbol, color, addrStr, extraStr, + )), + ) + + for _, attr := range r.Attributes { + + v := attr.NewValue + var dispV string + switch { + case v == "" && attr.NewComputed: + dispV = "" + case attr.Sensitive: + dispV = "" + default: + dispV = fmt.Sprintf("%q", v) + } + + updateMsg := "" + switch { + case attr.ForcesNew && r.Action == terraform.DiffDestroyCreate: + updateMsg = colorizer.Color(" [red](forces new resource)") + case attr.Sensitive && oldValues: + updateMsg = colorizer.Color(" [yellow](attribute changed)") + } + + if oldValues { + u := attr.OldValue + var dispU string + switch { + case attr.Sensitive: + dispU = "" + default: + dispU = fmt.Sprintf("%q", u) + } + buf.WriteString(fmt.Sprintf( + " %s:%s %s => %s%s\n", + attr.Path, + strings.Repeat(" ", keyLen-len(attr.Path)), + dispU, dispV, + updateMsg, + )) + } else { + buf.WriteString(fmt.Sprintf( + " %s:%s %s%s\n", + attr.Path, + strings.Repeat(" ", keyLen-len(attr.Path)), + dispV, + updateMsg, + )) + } } - buf.WriteString(opts.Color.Color(fmt.Sprintf( - "[%s]%s %s\n", - color, symbol, moduleName))) - buf.WriteString(fmt.Sprintf( - " %d resource(s)", - len(m.Resources))) - buf.WriteString(opts.Color.Color("[reset]\n")) + // Write the reset color so we don't bleed color into later text + buf.WriteString(colorizer.Color("[reset]\n")) } diff --git a/vendor/github.com/hashicorp/terraform/command/hook_ui.go b/vendor/github.com/hashicorp/terraform/command/hook_ui.go index a53edfa38a9..76b1ca59cff 100644 --- a/vendor/github.com/hashicorp/terraform/command/hook_ui.go +++ b/vendor/github.com/hashicorp/terraform/command/hook_ui.go @@ -65,6 +65,7 @@ func (h *UiHook) PreApply( } id := n.HumanId() + addr := n.ResourceAddress() op := uiResourceModify if d.Destroy { @@ -142,7 +143,7 @@ func (h *UiHook) PreApply( h.ui.Output(h.Colorize.Color(fmt.Sprintf( "[reset][bold]%s: %s%s[reset]%s", - id, + addr, operation, stateIdSuffix, attrString))) @@ -210,6 +211,7 @@ func (h *UiHook) PostApply( applyerr error) (terraform.HookAction, error) { id := n.HumanId() + addr := n.ResourceAddress() h.l.Lock() state := h.resources[id] @@ -243,8 +245,8 @@ func (h *UiHook) PostApply( } colorized := h.Colorize.Color(fmt.Sprintf( - "[reset][bold]%s: %s%s[reset]", - id, msg, stateIdSuffix)) + "[reset][bold]%s: %s after %s%s[reset]", + addr, msg, time.Now().Round(time.Second).Sub(state.Start), stateIdSuffix)) h.ui.Output(colorized) @@ -260,10 +262,10 @@ func (h *UiHook) PreDiff( func (h *UiHook) PreProvision( n *terraform.InstanceInfo, provId string) (terraform.HookAction, error) { - id := n.HumanId() + addr := n.ResourceAddress() h.ui.Output(h.Colorize.Color(fmt.Sprintf( "[reset][bold]%s: Provisioning with '%s'...[reset]", - id, provId))) + addr, provId))) return terraform.HookActionContinue, nil } @@ -271,11 +273,11 @@ func (h *UiHook) ProvisionOutput( n *terraform.InstanceInfo, provId string, msg string) { - id := n.HumanId() + addr := n.ResourceAddress() var buf bytes.Buffer buf.WriteString(h.Colorize.Color("[reset]")) - prefix := fmt.Sprintf("%s (%s): ", id, provId) + prefix := fmt.Sprintf("%s (%s): ", addr, provId) s := bufio.NewScanner(strings.NewReader(msg)) s.Split(scanLines) for s.Scan() { @@ -293,7 +295,7 @@ func (h *UiHook) PreRefresh( s *terraform.InstanceState) (terraform.HookAction, error) { h.once.Do(h.init) - id := n.HumanId() + addr := n.ResourceAddress() var stateIdSuffix string // Data resources refresh before they have ids, whereas managed @@ -304,7 +306,7 @@ func (h *UiHook) PreRefresh( h.ui.Output(h.Colorize.Color(fmt.Sprintf( "[reset][bold]%s: Refreshing state...%s", - id, stateIdSuffix))) + addr, stateIdSuffix))) return terraform.HookActionContinue, nil } @@ -313,9 +315,10 @@ func (h *UiHook) PreImportState( id string) (terraform.HookAction, error) { h.once.Do(h.init) + addr := n.ResourceAddress() h.ui.Output(h.Colorize.Color(fmt.Sprintf( "[reset][bold]%s: Importing from ID %q...", - n.HumanId(), id))) + addr, id))) return terraform.HookActionContinue, nil } @@ -324,9 +327,9 @@ func (h *UiHook) PostImportState( s []*terraform.InstanceState) (terraform.HookAction, error) { h.once.Do(h.init) - id := n.HumanId() + addr := n.ResourceAddress() h.ui.Output(h.Colorize.Color(fmt.Sprintf( - "[reset][bold][green]%s: Import complete!", id))) + "[reset][bold][green]%s: Import complete!", addr))) for _, s := range s { h.ui.Output(h.Colorize.Color(fmt.Sprintf( "[reset][green] Imported %s (ID: %s)", diff --git a/vendor/github.com/hashicorp/terraform/command/import.go b/vendor/github.com/hashicorp/terraform/command/import.go index b1cc623e448..0dc2003a02b 100644 --- a/vendor/github.com/hashicorp/terraform/command/import.go +++ b/vendor/github.com/hashicorp/terraform/command/import.go @@ -41,6 +41,7 @@ func (c *ImportCommand) Run(args []string) int { cmdFlags.StringVar(&c.Meta.provider, "provider", "", "provider") cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") + cmdFlags.BoolVar(&c.Meta.allowMissingConfig, "allow-missing-config", false, "allow missing config") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } if err := cmdFlags.Parse(args); err != nil { return 1 @@ -103,7 +104,7 @@ func (c *ImportCommand) Run(args []string) int { break } } - if rc == nil { + if !c.Meta.allowMissingConfig && rc == nil { modulePath := addr.WholeModuleAddress().String() if modulePath == "" { modulePath = "the root module" @@ -123,15 +124,18 @@ func (c *ImportCommand) Run(args []string) int { // Load the backend b, err := c.Backend(&BackendOpts{ - Config: mod.Config(), - ForceLocal: true, + Config: mod.Config(), }) if err != nil { c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) return 1 } - // We require a local backend + // We require a backend.Local to build a context. + // This isn't necessarily a "local.Local" backend, which provides local + // operations, however that is the only current implementation. A + // "local.Local" backend also doesn't necessarily provide local state, as + // that may be delegated to a "remotestate.Backend". local, ok := b.(backend.Local) if !ok { c.Ui.Error(ErrUnsupportedLocalOp) @@ -179,6 +183,10 @@ func (c *ImportCommand) Run(args []string) int { c.Ui.Output(c.Colorize().Color("[reset][green]\n" + importCommandSuccessMsg)) + if c.Meta.allowMissingConfig && rc == nil { + c.Ui.Output(c.Colorize().Color("[reset][yellow]\n" + importCommandAllowMissingResourceMsg)) + } + return 0 } @@ -199,11 +207,13 @@ Usage: terraform import [options] ADDR ID determine the ID syntax to use. It typically matches directly to the ID that the provider uses. - In the current state of Terraform import, the resource is only imported - into your state file. Once it is imported, you must manually write - configuration for the new resource or Terraform will mark it for destruction. - Future versions of Terraform will expand the functionality of Terraform - import. + The current implementation of Terraform import can only import resources + into the state. It does not generate configuration. A future version of + Terraform will also generate configuration. + + Because of this, prior to running terraform import it is necessary to write + a resource configuration block for the resource manually, to which the + imported object will be attached. This command will not modify your infrastructure, but it will make network requests to inspect parts of your infrastructure relevant to @@ -211,40 +221,43 @@ Usage: terraform import [options] ADDR ID Options: - -backup=path Path to backup the existing state file before - modifying. Defaults to the "-state-out" path with - ".backup" extension. Set to "-" to disable backup. + -backup=path Path to backup the existing state file before + modifying. Defaults to the "-state-out" path with + ".backup" extension. Set to "-" to disable backup. - -config=path Path to a directory of Terraform configuration files - to use to configure the provider. Defaults to pwd. - If no config files are present, they must be provided - via the input prompts or env vars. + -config=path Path to a directory of Terraform configuration files + to use to configure the provider. Defaults to pwd. + If no config files are present, they must be provided + via the input prompts or env vars. - -input=true Ask for input for variables if not directly set. + -allow-missing-config Allow import when no resource configuration block exists. - -lock=true Lock the state file when locking is supported. + -input=true Ask for input for variables if not directly set. - -lock-timeout=0s Duration to retry a state lock. + -lock=true Lock the state file when locking is supported. - -no-color If specified, output won't contain any color. + -lock-timeout=0s Duration to retry a state lock. - -provider=provider Specific provider to use for import. This is used for - specifying aliases, such as "aws.eu". Defaults to the - normal provider prefix of the resource being imported. + -no-color If specified, output won't contain any color. - -state=path Path to read and save state (unless state-out - is specified). Defaults to "terraform.tfstate". + -provider=provider Specific provider to use for import. This is used for + specifying aliases, such as "aws.eu". Defaults to the + normal provider prefix of the resource being imported. - -state-out=path Path to write updated state file. By default, the - "-state" path will be used. + -state=PATH Path to the source state file. Defaults to the configured + backend, or "terraform.tfstate" - -var 'foo=bar' Set a variable in the Terraform configuration. This - flag can be set multiple times. This is only useful - with the "-config" flag. + -state-out=PATH Path to the destination state file to write to. If this + isn't specified, the source state file will be used. This + can be a new or existing path. - -var-file=foo Set variables in the Terraform configuration from - a file. If "terraform.tfvars" or any ".auto.tfvars" - files are present, they will be automatically loaded. + -var 'foo=bar' Set a variable in the Terraform configuration. This + flag can be set multiple times. This is only useful + with the "-config" flag. + + -var-file=foo Set variables in the Terraform configuration from + a file. If "terraform.tfvars" or any ".auto.tfvars" + files are present, they will be automatically loaded. ` @@ -296,9 +309,13 @@ const importCommandSuccessMsg = `Import successful! The resources that were imported are shown above. These resources are now in your Terraform state and will henceforth be managed by Terraform. +` + +const importCommandAllowMissingResourceMsg = `Import does not generate resource configuration, you must create a resource +configuration block that matches the current or desired state manually. -Import does not generate configuration, so the next step is to ensure that -the resource configurations match the current (or desired) state of the -imported resources. You can use the output from "terraform plan" to verify that -the configuration is correct and complete. +If there is no matching resource configuration block for the imported +resource, Terraform will delete the resource on the next "terraform apply". +It is recommended that you run "terraform plan" to verify that the +configuration is correct and complete. ` diff --git a/vendor/github.com/hashicorp/terraform/command/init.go b/vendor/github.com/hashicorp/terraform/command/init.go index 427a8ce523f..28e4ffbbb7e 100644 --- a/vendor/github.com/hashicorp/terraform/command/init.go +++ b/vendor/github.com/hashicorp/terraform/command/init.go @@ -7,6 +7,8 @@ import ( "sort" "strings" + "github.com/posener/complete" + "github.com/hashicorp/go-getter" multierror "github.com/hashicorp/go-multierror" @@ -69,12 +71,14 @@ func (c *InitCommand) Run(args []string) int { c.getPlugins = false } - // set getProvider if we don't have a test version already + // set providerInstaller if we don't have a test version already if c.providerInstaller == nil { c.providerInstaller = &discovery.ProviderInstaller{ - Dir: c.pluginDir(), + Dir: c.pluginDir(), + Cache: c.pluginCache(), PluginProtocolVersion: plugin.Handshake.ProtocolVersion, SkipVerify: !flagVerifyPlugins, + Ui: c.Ui, } } @@ -151,8 +155,10 @@ func (c *InitCommand) Run(args []string) int { if flagGet || flagBackend { conf, err := c.Config(path) if err != nil { - c.Ui.Error(fmt.Sprintf( - "Error loading configuration: %s", err)) + // Since this may be the user's first ever interaction with Terraform, + // we'll provide some additional context in this case. + c.Ui.Error(strings.TrimSpace(errInitConfigError)) + c.showDiagnostics(err) return 1 } @@ -256,6 +262,12 @@ func (c *InitCommand) Run(args []string) int { } c.Ui.Output(c.Colorize().Color(strings.TrimSpace(outputInitSuccess))) + if !c.RunningInAutomation { + // If we're not running in an automation wrapper, give the user + // some more detailed next steps that are appropriate for interactive + // shell usage. + c.Ui.Output(c.Colorize().Color(strings.TrimSpace(outputInitSuccessCLI))) + } return 0 } @@ -287,6 +299,11 @@ func (c *InitCommand) getProviders(path string, state *terraform.State, upgrade return err } + if err := terraform.CheckRequiredVersion(mod); err != nil { + c.Ui.Error(err.Error()) + return err + } + var available discovery.PluginMetaSet if upgrade { // If we're in upgrade mode, we ignore any auto-installed plugins @@ -310,8 +327,12 @@ func (c *InitCommand) getProviders(path string, state *terraform.State, upgrade var errs error if c.getPlugins { + if len(missing) > 0 { + c.Ui.Output(fmt.Sprintf("- Checking for available provider plugins on %s...", + discovery.GetReleaseHost())) + } + for provider, reqd := range missing { - c.Ui.Output(fmt.Sprintf("- Downloading plugin for provider %q...", provider)) _, err := c.providerInstaller.Get(provider, reqd.Versions) if err != nil { @@ -436,6 +457,29 @@ func (c *InitCommand) getProviders(path string, state *terraform.State, upgrade return nil } +func (c *InitCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictDirs("") +} + +func (c *InitCommand) AutocompleteFlags() complete.Flags { + return complete.Flags{ + "-backend": completePredictBoolean, + "-backend-config": complete.PredictFiles("*.tfvars"), // can also be key=value, but we can't "predict" that + "-force-copy": complete.PredictNothing, + "-from-module": completePredictModuleSource, + "-get": completePredictBoolean, + "-get-plugins": completePredictBoolean, + "-input": completePredictBoolean, + "-lock": completePredictBoolean, + "-lock-timeout": complete.PredictAnything, + "-no-color": complete.PredictNothing, + "-plugin-dir": complete.PredictDirs(""), + "-reconfigure": complete.PredictNothing, + "-upgrade": completePredictBoolean, + "-verify-plugins": completePredictBoolean, + } +} + func (c *InitCommand) Help() string { helpText := ` Usage: terraform init [options] [DIR] @@ -509,6 +553,13 @@ func (c *InitCommand) Synopsis() string { return "Initialize a Terraform working directory" } +const errInitConfigError = ` +There are some problems with the configuration, described below. + +The Terraform configuration must be valid before initialization so that +Terraform can determine which modules and providers need to be installed. +` + const errInitCopyNotEmpty = ` The working directory already contains files. The -from-module option requires an empty directory into which a copy of the referenced module will be placed. @@ -526,7 +577,9 @@ with Terraform immediately by creating Terraform configuration files. const outputInitSuccess = ` [reset][bold][green]Terraform has been successfully initialized![reset][green] +` +const outputInitSuccessCLI = `[reset][green] You may now begin working with Terraform. Try running "terraform plan" to see any changes that are required for your infrastructure. All Terraform commands should now work. diff --git a/vendor/github.com/hashicorp/terraform/command/internal_plugin_list.go b/vendor/github.com/hashicorp/terraform/command/internal_plugin_list.go index 321fbd09e70..6834bf5f7ea 100644 --- a/vendor/github.com/hashicorp/terraform/command/internal_plugin_list.go +++ b/vendor/github.com/hashicorp/terraform/command/internal_plugin_list.go @@ -8,6 +8,7 @@ import ( fileprovisioner "github.com/hashicorp/terraform/builtin/provisioners/file" localexecprovisioner "github.com/hashicorp/terraform/builtin/provisioners/local-exec" remoteexecprovisioner "github.com/hashicorp/terraform/builtin/provisioners/remote-exec" + saltmasterlessprovisioner "github.com/hashicorp/terraform/builtin/provisioners/salt-masterless" "github.com/hashicorp/terraform/plugin" ) @@ -15,8 +16,9 @@ import ( var InternalProviders = map[string]plugin.ProviderFunc{} var InternalProvisioners = map[string]plugin.ProvisionerFunc{ - "chef": chefprovisioner.Provisioner, - "file": fileprovisioner.Provisioner, - "local-exec": localexecprovisioner.Provisioner, - "remote-exec": remoteexecprovisioner.Provisioner, + "chef": chefprovisioner.Provisioner, + "file": fileprovisioner.Provisioner, + "local-exec": localexecprovisioner.Provisioner, + "remote-exec": remoteexecprovisioner.Provisioner, + "salt-masterless": saltmasterlessprovisioner.Provisioner, } diff --git a/vendor/github.com/hashicorp/terraform/command/meta.go b/vendor/github.com/hashicorp/terraform/command/meta.go index aca94b1a85c..bb58ab65416 100644 --- a/vendor/github.com/hashicorp/terraform/command/meta.go +++ b/vendor/github.com/hashicorp/terraform/command/meta.go @@ -19,11 +19,15 @@ import ( "github.com/hashicorp/go-getter" "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/backend/local" + "github.com/hashicorp/terraform/command/format" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/helper/experiment" "github.com/hashicorp/terraform/helper/variables" "github.com/hashicorp/terraform/helper/wrappedstreams" + "github.com/hashicorp/terraform/svchost/auth" + "github.com/hashicorp/terraform/svchost/disco" "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/cli" "github.com/mitchellh/colorstring" ) @@ -42,6 +46,31 @@ type Meta struct { // ExtraHooks are extra hooks to add to the context. ExtraHooks []terraform.Hook + // Services provides access to remote endpoint information for + // "terraform-native' services running at a specific user-facing hostname. + Services *disco.Disco + + // Credentials provides access to credentials for "terraform-native" + // services, which are accessed by a service hostname. + Credentials auth.CredentialsSource + + // RunningInAutomation indicates that commands are being run by an + // automated system rather than directly at a command prompt. + // + // This is a hint to various command routines that it may be confusing + // to print out messages that suggest running specific follow-up + // commands, since the user consuming the output will not be + // in a position to run such commands. + // + // The intended use-case of this flag is when Terraform is running in + // some sort of workflow orchestration tool which is abstracting away + // the specific commands being run. + RunningInAutomation bool + + // PluginCacheDir, if non-empty, enables caching of downloaded plugins + // into the given directory. + PluginCacheDir string + //---------------------------------------------------------- // Protected: commands can set these //---------------------------------------------------------- @@ -129,6 +158,9 @@ type Meta struct { errWriter *io.PipeWriter // done chan to wait for the scanner goroutine errScannerDone chan struct{} + + // Used with the import command to allow import of state when no matching config exists. + allowMissingConfig bool } type PluginOverrides struct { @@ -456,6 +488,36 @@ func (m *Meta) confirm(opts *terraform.InputOpts) (bool, error) { } } +// showDiagnostics displays error and warning messages in the UI. +// +// "Diagnostics" here means the Diagnostics type from the tfdiag package, +// though as a convenience this function accepts anything that could be +// passed to the "Append" method on that type, converting it to Diagnostics +// before displaying it. +// +// Internally this function uses Diagnostics.Append, and so it will panic +// if given unsupported value types, just as Append does. +func (m *Meta) showDiagnostics(vals ...interface{}) { + var diags tfdiags.Diagnostics + diags = diags.Append(vals...) + + for _, diag := range diags { + // TODO: Actually measure the terminal width and pass it here. + // For now, we don't have easy access to the writer that + // ui.Error (etc) are writing to and thus can't interrogate + // to see if it's a terminal and what size it is. + msg := format.Diagnostic(diag, m.Colorize(), 78) + switch diag.Severity() { + case tfdiags.Error: + m.Ui.Error(msg) + case tfdiags.Warning: + m.Ui.Warn(msg) + default: + m.Ui.Output(msg) + } + } +} + const ( // ModuleDepthDefault is the default value for // module depth, which can be overridden by flag diff --git a/vendor/github.com/hashicorp/terraform/command/meta_backend.go b/vendor/github.com/hashicorp/terraform/command/meta_backend.go index 74422e1dc9f..4a52ef00ec3 100644 --- a/vendor/github.com/hashicorp/terraform/command/meta_backend.go +++ b/vendor/github.com/hashicorp/terraform/command/meta_backend.go @@ -96,13 +96,14 @@ func (m *Meta) Backend(opts *BackendOpts) (backend.Enhanced, error) { // Setup the CLI opts we pass into backends that support it cliOpts := &backend.CLIOpts{ - CLI: m.Ui, - CLIColor: m.Colorize(), - StatePath: m.statePath, - StateOutPath: m.stateOutPath, - StateBackupPath: m.backupPath, - ContextOpts: m.contextOpts(), - Input: m.Input(), + CLI: m.Ui, + CLIColor: m.Colorize(), + StatePath: m.statePath, + StateOutPath: m.stateOutPath, + StateBackupPath: m.backupPath, + ContextOpts: m.contextOpts(), + Input: m.Input(), + RunningInAutomation: m.RunningInAutomation, } // Don't validate if we have a plan. Validation is normally harmless here, diff --git a/vendor/github.com/hashicorp/terraform/command/meta_new.go b/vendor/github.com/hashicorp/terraform/command/meta_new.go index a1d376b34d5..9447caf3b89 100644 --- a/vendor/github.com/hashicorp/terraform/command/meta_new.go +++ b/vendor/github.com/hashicorp/terraform/command/meta_new.go @@ -47,7 +47,7 @@ func (m *Meta) Module(path string) (*module.Tree, error) { err = mod.Load(m.moduleStorage(m.DataDir()), module.GetModeNone) if err != nil { - return nil, fmt.Errorf("Error loading modules: %s", err) + return nil, errwrap.Wrapf("Error loading modules: {{err}}", err) } return mod, nil diff --git a/vendor/github.com/hashicorp/terraform/command/plan.go b/vendor/github.com/hashicorp/terraform/command/plan.go index f3536519774..757984f8fac 100644 --- a/vendor/github.com/hashicorp/terraform/command/plan.go +++ b/vendor/github.com/hashicorp/terraform/command/plan.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" @@ -73,7 +74,8 @@ func (c *PlanCommand) Run(args []string) int { if plan == nil { mod, err = c.Module(configPath) if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load root config module: %s", err)) + err = errwrap.Wrapf("Failed to load root config module: {{err}}", err) + c.showDiagnostics(err) return 1 } } @@ -111,7 +113,7 @@ func (c *PlanCommand) Run(args []string) int { // Wait for the operation to complete <-op.Done() if err := op.Err; err != nil { - c.Ui.Error(err.Error()) + c.showDiagnostics(err) return 1 } diff --git a/vendor/github.com/hashicorp/terraform/command/plugins.go b/vendor/github.com/hashicorp/terraform/command/plugins.go index ca94f07b937..ba034a2a904 100644 --- a/vendor/github.com/hashicorp/terraform/command/plugins.go +++ b/vendor/github.com/hashicorp/terraform/command/plugins.go @@ -168,10 +168,27 @@ func (m *Meta) pluginDirs(includeAutoInstalled bool) []string { return dirs } +func (m *Meta) pluginCache() discovery.PluginCache { + dir := m.PluginCacheDir + if dir == "" { + return nil // cache disabled + } + + dir = filepath.Join(dir, pluginMachineName) + + return discovery.NewLocalPluginCache(dir) +} + // providerPluginSet returns the set of valid providers that were discovered in // the defined search paths. func (m *Meta) providerPluginSet() discovery.PluginMetaSet { plugins := discovery.FindPlugins("provider", m.pluginDirs(true)) + + // Add providers defined in the legacy .terraformrc, + if m.PluginOverrides != nil { + plugins = plugins.OverridePaths(m.PluginOverrides.Providers) + } + plugins, _ = plugins.ValidateVersions() for p := range plugins { @@ -198,6 +215,12 @@ func (m *Meta) providerPluginAutoInstalledSet() discovery.PluginMetaSet { // in all locations *except* the auto-install directory. func (m *Meta) providerPluginManuallyInstalledSet() discovery.PluginMetaSet { plugins := discovery.FindPlugins("provider", m.pluginDirs(false)) + + // Add providers defined in the legacy .terraformrc, + if m.PluginOverrides != nil { + plugins = plugins.OverridePaths(m.PluginOverrides.Providers) + } + plugins, _ = plugins.ValidateVersions() for p := range plugins { diff --git a/vendor/github.com/hashicorp/terraform/command/push.go b/vendor/github.com/hashicorp/terraform/command/push.go index bad5a7ac4ef..1cced2d91a5 100644 --- a/vendor/github.com/hashicorp/terraform/command/push.go +++ b/vendor/github.com/hashicorp/terraform/command/push.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/atlas-go/v1" "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/version" ) type PushCommand struct { @@ -174,7 +174,7 @@ func (c *PushCommand) Run(args []string) int { } } - client.DefaultHeader.Set(terraform.VersionHeader, terraform.Version) + client.DefaultHeader.Set(version.Header, version.Version) if atlasToken != "" { client.Token = atlasToken diff --git a/vendor/github.com/hashicorp/terraform/command/refresh.go b/vendor/github.com/hashicorp/terraform/command/refresh.go index 1949b54fe46..f4b9921f148 100644 --- a/vendor/github.com/hashicorp/terraform/command/refresh.go +++ b/vendor/github.com/hashicorp/terraform/command/refresh.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/terraform" @@ -43,7 +44,8 @@ func (c *RefreshCommand) Run(args []string) int { // Load the module mod, err := c.Module(configPath) if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load root config module: %s", err)) + err = errwrap.Wrapf("Failed to load root config module: {{err}}", err) + c.showDiagnostics(err) return 1 } @@ -82,7 +84,7 @@ func (c *RefreshCommand) Run(args []string) int { // Wait for the operation to complete <-op.Done() if err := op.Err; err != nil { - c.Ui.Error(err.Error()) + c.showDiagnostics(err) return 1 } diff --git a/vendor/github.com/hashicorp/terraform/command/show.go b/vendor/github.com/hashicorp/terraform/command/show.go index 254644863be..d8d1c84efef 100644 --- a/vendor/github.com/hashicorp/terraform/command/show.go +++ b/vendor/github.com/hashicorp/terraform/command/show.go @@ -110,11 +110,8 @@ func (c *ShowCommand) Run(args []string) int { } if plan != nil { - c.Ui.Output(format.Plan(&format.PlanOpts{ - Plan: plan, - Color: c.Colorize(), - ModuleDepth: moduleDepth, - })) + dispPlan := format.NewPlan(plan) + c.Ui.Output(dispPlan.Format(c.Colorize())) return 0 } diff --git a/vendor/github.com/hashicorp/terraform/command/state_meta.go b/vendor/github.com/hashicorp/terraform/command/state_meta.go index dc17fa0734a..aa79e9d47e7 100644 --- a/vendor/github.com/hashicorp/terraform/command/state_meta.go +++ b/vendor/github.com/hashicorp/terraform/command/state_meta.go @@ -11,30 +11,32 @@ import ( ) // StateMeta is the meta struct that should be embedded in state subcommands. -type StateMeta struct{} +type StateMeta struct { + Meta +} // State returns the state for this meta. This gets the appropriate state from // the backend, but changes the way that backups are done. This configures // backups to be timestamped rather than just the original state path plus a // backup path. -func (c *StateMeta) State(m *Meta) (state.State, error) { +func (c *StateMeta) State() (state.State, error) { var realState state.State - backupPath := m.backupPath - stateOutPath := m.statePath + backupPath := c.backupPath + stateOutPath := c.statePath // use the specified state - if m.statePath != "" { + if c.statePath != "" { realState = &state.LocalState{ - Path: m.statePath, + Path: c.statePath, } } else { // Load the backend - b, err := m.Backend(nil) + b, err := c.Backend(nil) if err != nil { return nil, err } - env := m.Workspace() + env := c.Workspace() // Get the state s, err := b.State(env) if err != nil { @@ -42,7 +44,7 @@ func (c *StateMeta) State(m *Meta) (state.State, error) { } // Get a local backend - localRaw, err := m.Backend(&BackendOpts{ForceLocal: true}) + localRaw, err := c.Backend(&BackendOpts{ForceLocal: true}) if err != nil { // This should never fail panic(err) diff --git a/vendor/github.com/hashicorp/terraform/command/state_mv.go b/vendor/github.com/hashicorp/terraform/command/state_mv.go index 5c51f2da751..e2f89c9639a 100644 --- a/vendor/github.com/hashicorp/terraform/command/state_mv.go +++ b/vendor/github.com/hashicorp/terraform/command/state_mv.go @@ -10,7 +10,6 @@ import ( // StateMvCommand is a Command implementation that shows a single resource. type StateMvCommand struct { - Meta StateMeta } @@ -21,12 +20,13 @@ func (c *StateMvCommand) Run(args []string) int { } // We create two metas to track the two states - var meta1, meta2 Meta + var backupPathOut, statePathOut string + cmdFlags := c.Meta.flagSet("state mv") - cmdFlags.StringVar(&meta1.backupPath, "backup", "-", "backup") - cmdFlags.StringVar(&meta1.statePath, "state", DefaultStateFilename, "path") - cmdFlags.StringVar(&meta2.backupPath, "backup-out", "-", "backup") - cmdFlags.StringVar(&meta2.statePath, "state-out", "", "path") + cmdFlags.StringVar(&c.backupPath, "backup", "-", "backup") + cmdFlags.StringVar(&c.statePath, "state", "", "path") + cmdFlags.StringVar(&backupPathOut, "backup-out", "-", "backup") + cmdFlags.StringVar(&statePathOut, "state-out", "", "path") if err := cmdFlags.Parse(args); err != nil { return cli.RunResultHelp } @@ -36,16 +36,11 @@ func (c *StateMvCommand) Run(args []string) int { return cli.RunResultHelp } - // Copy the `-state` flag for output if we weren't given a custom one - if meta2.statePath == "" { - meta2.statePath = meta1.statePath - } - // Read the from state - stateFrom, err := c.StateMeta.State(&meta1) + stateFrom, err := c.State() if err != nil { c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) - return cli.RunResultHelp + return 1 } if err := stateFrom.RefreshState(); err != nil { @@ -62,11 +57,14 @@ func (c *StateMvCommand) Run(args []string) int { // Read the destination state stateTo := stateFrom stateToReal := stateFromReal - if meta2.statePath != meta1.statePath { - stateTo, err = c.StateMeta.State(&meta2) + + if statePathOut != "" { + c.statePath = statePathOut + c.backupPath = backupPathOut + stateTo, err = c.State() if err != nil { c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) - return cli.RunResultHelp + return 1 } if err := stateTo.RefreshState(); err != nil { @@ -185,28 +183,30 @@ func (c *StateMvCommand) addableResult(results []*terraform.StateFilterResult) i func (c *StateMvCommand) Help() string { helpText := ` -Usage: terraform state mv [options] ADDRESS ADDRESS +Usage: terraform state mv [options] SOURCE DESTINATION - Move an item in the state to another location or to a completely different - state file. + This command will move an item matched by the address given to the + destination address. This command can also move to a destination address + in a completely different state file. - This command is useful for module refactors (moving items into a module), - configuration refactors (moving items to a completely different or new - state file), or generally renaming of resources. + This can be used for simple resource renaming, moving items to and from + a module, moving entire modules, and more. And because this command can also + move data to a completely new state, it can also be used for refactoring + one configuration into multiple separately managed Terraform configurations. - This command creates a timestamped backup of the state on every invocation. - This can't be disabled. Due to the destructive nature of this command, - the backup is ensured by Terraform for safety reasons. + This command will output a backup copy of the state prior to saving any + changes. The backup cannot be disabled. Due to the destructive nature + of this command, backups are required. - If you're moving from one state file to a different state file, a backup - will be created for each state file. + If you're moving an item to a different state file, a backup will be created + for each state file. Options: -backup=PATH Path where Terraform should write the backup for the original state. This can't be disabled. If not set, Terraform will write it to the same path as the statefile with - a backup extension. + a ".backup" extension. -backup-out=PATH Path where Terraform should write the backup for the destination state. This can't be disabled. If not set, Terraform @@ -215,13 +215,12 @@ Options: to be specified if -state-out is set to a different path than -state. - -state=PATH Path to a Terraform state file to use to look - up Terraform-managed resources. By default it will - use the state "terraform.tfstate" if it exists. + -state=PATH Path to the source state file. Defaults to the configured + backend, or "terraform.tfstate" - -state-out=PATH Path to the destination state file to move the item - to. This defaults to the same statefile. This will - overwrite the destination state file. + -state-out=PATH Path to the destination state file to write to. If this + isn't specified, the source state file will be used. This + can be a new or existing path. ` return strings.TrimSpace(helpText) diff --git a/vendor/github.com/hashicorp/terraform/command/state_rm.go b/vendor/github.com/hashicorp/terraform/command/state_rm.go index 40dc870509b..53bb50d01f1 100644 --- a/vendor/github.com/hashicorp/terraform/command/state_rm.go +++ b/vendor/github.com/hashicorp/terraform/command/state_rm.go @@ -9,7 +9,6 @@ import ( // StateRmCommand is a Command implementation that shows a single resource. type StateRmCommand struct { - Meta StateMeta } @@ -20,8 +19,8 @@ func (c *StateRmCommand) Run(args []string) int { } cmdFlags := c.Meta.flagSet("state show") - cmdFlags.StringVar(&c.Meta.backupPath, "backup", "-", "backup") - cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") + cmdFlags.StringVar(&c.backupPath, "backup", "-", "backup") + cmdFlags.StringVar(&c.statePath, "state", "", "path") if err := cmdFlags.Parse(args); err != nil { return cli.RunResultHelp } @@ -32,10 +31,10 @@ func (c *StateRmCommand) Run(args []string) int { return 1 } - state, err := c.StateMeta.State(&c.Meta) + state, err := c.State() if err != nil { c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) - return cli.RunResultHelp + return 1 } if err := state.RefreshState(); err != nil { c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) @@ -53,6 +52,8 @@ func (c *StateRmCommand) Run(args []string) int { return 1 } + c.Ui.Output(fmt.Sprintf("%d items removed.", len(args))) + if err := state.WriteState(stateReal); err != nil { c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) return 1 @@ -88,9 +89,8 @@ Options: will write it to the same path as the statefile with a backup extension. - -state=statefile Path to a Terraform state file to use to look - up Terraform-managed resources. By default it will - use the state "terraform.tfstate" if it exists. + -state=PATH Path to the source state file. Defaults to the configured + backend, or "terraform.tfstate" ` return strings.TrimSpace(helpText) diff --git a/vendor/github.com/hashicorp/terraform/command/validate.go b/vendor/github.com/hashicorp/terraform/command/validate.go index 04ae2dddb30..b6aff5a95f9 100644 --- a/vendor/github.com/hashicorp/terraform/command/validate.go +++ b/vendor/github.com/hashicorp/terraform/command/validate.go @@ -17,7 +17,7 @@ type ValidateCommand struct { const defaultPath = "." func (c *ValidateCommand) Run(args []string) int { - args, err := c.Meta.process(args, false) + args, err := c.Meta.process(args, true) if err != nil { return 1 } @@ -46,6 +46,12 @@ func (c *ValidateCommand) Run(args []string) int { "Unable to locate directory %v\n", err.Error())) } + // Check for user-supplied plugin path + if c.pluginPath, err = c.loadPluginPath(); err != nil { + c.Ui.Error(fmt.Sprintf("Error loading plugin path: %s", err)) + return 1 + } + rtnCode := c.validate(dir, checkVars) return rtnCode @@ -91,21 +97,19 @@ Options: func (c *ValidateCommand) validate(dir string, checkVars bool) int { cfg, err := config.LoadDir(dir) if err != nil { - c.Ui.Error(fmt.Sprintf( - "Error loading files %v\n", err.Error())) + c.showDiagnostics(err) return 1 } err = cfg.Validate() if err != nil { - c.Ui.Error(fmt.Sprintf( - "Error validating: %v\n", err.Error())) + c.showDiagnostics(err) return 1 } if checkVars { mod, err := c.Module(dir) if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load root config module: %s", err)) + c.showDiagnostics(err) return 1 } @@ -114,7 +118,7 @@ func (c *ValidateCommand) validate(dir string, checkVars bool) int { tfCtx, err := terraform.NewContext(opts) if err != nil { - c.Ui.Error(fmt.Sprintf("Error: %v\n", err.Error())) + c.showDiagnostics(err) return 1 } diff --git a/vendor/github.com/hashicorp/terraform/command/version.go b/vendor/github.com/hashicorp/terraform/command/version.go index f727667f6ec..b62da93bfca 100644 --- a/vendor/github.com/hashicorp/terraform/command/version.go +++ b/vendor/github.com/hashicorp/terraform/command/version.go @@ -3,6 +3,7 @@ package command import ( "bytes" "fmt" + "sort" ) // VersionCommand is a Command implementation prints the version. @@ -50,6 +51,50 @@ func (c *VersionCommand) Run(args []string) int { c.Ui.Output(versionString.String()) + // We'll also attempt to print out the selected plugin versions. We can + // do this only if "terraform init" was already run and thus we've committed + // to a specific set of plugins. If not, the plugins lock will be empty + // and so we'll show _no_ providers. + // + // Generally-speaking this is a best-effort thing that will give us a good + // result in the usual case where the user successfully ran "terraform init" + // and then hit a problem running _another_ command. + providerPlugins := c.providerPluginSet() + pluginsLockFile := c.providerPluginsLock() + pluginsLock := pluginsLockFile.Read() + var pluginVersions []string + for meta := range providerPlugins { + name := meta.Name + wantHash, wanted := pluginsLock[name] + if !wanted { + // Ignore providers that aren't used by the current config at all + continue + } + gotHash, err := meta.SHA256() + if err != nil { + // if we can't read the file to hash it, ignore it. + continue + } + if !bytes.Equal(gotHash, wantHash) { + // Not the plugin we've locked, so ignore it. + continue + } + + // If we get here then we've found a selected plugin, so we'll print + // out its details. + if meta.Version == "0.0.0" { + pluginVersions = append(pluginVersions, fmt.Sprintf("+ provider.%s (unversioned)", name)) + } else { + pluginVersions = append(pluginVersions, fmt.Sprintf("+ provider.%s v%s", name, meta.Version)) + } + } + if len(pluginVersions) != 0 { + sort.Strings(pluginVersions) + for _, str := range pluginVersions { + c.Ui.Output(str) + } + } + // If we have a version check function, then let's check for // the latest version as well. if c.CheckFunc != nil { @@ -65,7 +110,7 @@ func (c *VersionCommand) Run(args []string) int { if info.Outdated { c.Ui.Output(fmt.Sprintf( "Your version of Terraform is out of date! The latest version\n"+ - "is %s. You can update by downloading from www.terraform.io", + "is %s. You can update by downloading from www.terraform.io/downloads.html", info.Latest)) } } diff --git a/vendor/github.com/hashicorp/terraform/command/workspace_delete.go b/vendor/github.com/hashicorp/terraform/command/workspace_delete.go index cb96fba5e48..99dac86d3b2 100644 --- a/vendor/github.com/hashicorp/terraform/command/workspace_delete.go +++ b/vendor/github.com/hashicorp/terraform/command/workspace_delete.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/terraform/command/clistate" "github.com/hashicorp/terraform/state" "github.com/mitchellh/cli" + "github.com/posener/complete" ) type WorkspaceDeleteCommand struct { @@ -156,6 +157,21 @@ func (c *WorkspaceDeleteCommand) Run(args []string) int { return 0 } + +func (c *WorkspaceDeleteCommand) AutocompleteArgs() complete.Predictor { + return completePredictSequence{ + complete.PredictNothing, // the "select" subcommand itself (already matched) + c.completePredictWorkspaceName(), + complete.PredictDirs(""), + } +} + +func (c *WorkspaceDeleteCommand) AutocompleteFlags() complete.Flags { + return complete.Flags{ + "-force": complete.PredictNothing, + } +} + func (c *WorkspaceDeleteCommand) Help() string { helpText := ` Usage: terraform workspace delete [OPTIONS] NAME [DIR] diff --git a/vendor/github.com/hashicorp/terraform/command/workspace_list.go b/vendor/github.com/hashicorp/terraform/command/workspace_list.go index bd00246eb4a..da7f9b19c6b 100644 --- a/vendor/github.com/hashicorp/terraform/command/workspace_list.go +++ b/vendor/github.com/hashicorp/terraform/command/workspace_list.go @@ -4,6 +4,8 @@ import ( "bytes" "fmt" "strings" + + "github.com/posener/complete" ) type WorkspaceListCommand struct { @@ -75,6 +77,14 @@ func (c *WorkspaceListCommand) Run(args []string) int { return 0 } +func (c *WorkspaceListCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictDirs("") +} + +func (c *WorkspaceListCommand) AutocompleteFlags() complete.Flags { + return nil +} + func (c *WorkspaceListCommand) Help() string { helpText := ` Usage: terraform workspace list [DIR] diff --git a/vendor/github.com/hashicorp/terraform/command/workspace_new.go b/vendor/github.com/hashicorp/terraform/command/workspace_new.go index af7ad2265fe..71c9fdc1fcc 100644 --- a/vendor/github.com/hashicorp/terraform/command/workspace_new.go +++ b/vendor/github.com/hashicorp/terraform/command/workspace_new.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/terraform/state" "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/cli" + "github.com/posener/complete" ) type WorkspaceNewCommand struct { @@ -156,6 +157,20 @@ func (c *WorkspaceNewCommand) Run(args []string) int { return 0 } +func (c *WorkspaceNewCommand) AutocompleteArgs() complete.Predictor { + return completePredictSequence{ + complete.PredictNothing, // the "new" subcommand itself (already matched) + complete.PredictAnything, + complete.PredictDirs(""), + } +} + +func (c *WorkspaceNewCommand) AutocompleteFlags() complete.Flags { + return complete.Flags{ + "-state": complete.PredictFiles("*.tfstate"), + } +} + func (c *WorkspaceNewCommand) Help() string { helpText := ` Usage: terraform workspace new [OPTIONS] NAME [DIR] diff --git a/vendor/github.com/hashicorp/terraform/command/workspace_select.go b/vendor/github.com/hashicorp/terraform/command/workspace_select.go index a80c7f68b18..7070cc6116f 100644 --- a/vendor/github.com/hashicorp/terraform/command/workspace_select.go +++ b/vendor/github.com/hashicorp/terraform/command/workspace_select.go @@ -5,6 +5,7 @@ import ( "strings" "github.com/mitchellh/cli" + "github.com/posener/complete" ) type WorkspaceSelectCommand struct { @@ -103,6 +104,18 @@ func (c *WorkspaceSelectCommand) Run(args []string) int { return 0 } +func (c *WorkspaceSelectCommand) AutocompleteArgs() complete.Predictor { + return completePredictSequence{ + complete.PredictNothing, // the "select" subcommand itself (already matched) + c.completePredictWorkspaceName(), + complete.PredictDirs(""), + } +} + +func (c *WorkspaceSelectCommand) AutocompleteFlags() complete.Flags { + return nil +} + func (c *WorkspaceSelectCommand) Help() string { helpText := ` Usage: terraform workspace select NAME [DIR] diff --git a/vendor/github.com/hashicorp/terraform/command/workspace_show.go b/vendor/github.com/hashicorp/terraform/command/workspace_show.go index 0e4d0159b60..cca688d780c 100644 --- a/vendor/github.com/hashicorp/terraform/command/workspace_show.go +++ b/vendor/github.com/hashicorp/terraform/command/workspace_show.go @@ -2,6 +2,8 @@ package command import ( "strings" + + "github.com/posener/complete" ) type WorkspaceShowCommand struct { @@ -26,6 +28,14 @@ func (c *WorkspaceShowCommand) Run(args []string) int { return 0 } +func (c *WorkspaceShowCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *WorkspaceShowCommand) AutocompleteFlags() complete.Flags { + return nil +} + func (c *WorkspaceShowCommand) Help() string { helpText := ` Usage: terraform workspace show diff --git a/vendor/github.com/hashicorp/terraform/commands.go b/vendor/github.com/hashicorp/terraform/commands.go index 85f1794b9d6..d5e53cf6f7e 100644 --- a/vendor/github.com/hashicorp/terraform/commands.go +++ b/vendor/github.com/hashicorp/terraform/commands.go @@ -1,13 +1,23 @@ package main import ( + "log" "os" "os/signal" "github.com/hashicorp/terraform/command" + pluginDiscovery "github.com/hashicorp/terraform/plugin/discovery" + "github.com/hashicorp/terraform/svchost" + "github.com/hashicorp/terraform/svchost/auth" + "github.com/hashicorp/terraform/svchost/disco" "github.com/mitchellh/cli" ) +// runningInAutomationEnvName gives the name of an environment variable that +// can be set to any non-empty value in order to suppress certain messages +// that assume that Terraform is being run from a command prompt. +const runningInAutomationEnvName = "TF_IN_AUTOMATION" + // Commands is the mapping of all the available Terraform commands. var Commands map[string]cli.CommandFactory var PlumbingCommands map[string]struct{} @@ -20,20 +30,27 @@ const ( OutputPrefix = "o:" ) -func init() { - Ui = &cli.PrefixedUi{ - AskPrefix: OutputPrefix, - OutputPrefix: OutputPrefix, - InfoPrefix: OutputPrefix, - ErrorPrefix: ErrorPrefix, - Ui: &cli.BasicUi{Writer: os.Stdout}, +func initCommands(config *Config) { + var inAutomation bool + if v := os.Getenv(runningInAutomationEnvName); v != "" { + inAutomation = true } + credsSrc := credentialsSource(config) + services := disco.NewDisco() + services.SetCredentialsSource(credsSrc) + meta := command.Meta{ Color: true, GlobalPluginDirs: globalPluginDirs(), PluginOverrides: &PluginOverrides, Ui: Ui, + + Services: services, + Credentials: credsSrc, + + RunningInAutomation: inAutomation, + PluginCacheDir: config.PluginCacheDir, } // The command list is included in the terraform -help @@ -276,13 +293,17 @@ func init() { "state rm": func() (cli.Command, error) { return &command.StateRmCommand{ - Meta: meta, + StateMeta: command.StateMeta{ + Meta: meta, + }, }, nil }, "state mv": func() (cli.Command, error) { return &command.StateMvCommand{ - Meta: meta, + StateMeta: command.StateMeta{ + Meta: meta, + }, }, nil }, @@ -323,3 +344,45 @@ func makeShutdownCh() <-chan struct{} { return resultCh } + +func credentialsSource(config *Config) auth.CredentialsSource { + creds := auth.NoCredentials + if len(config.Credentials) > 0 { + staticTable := map[svchost.Hostname]map[string]interface{}{} + for userHost, creds := range config.Credentials { + host, err := svchost.ForComparison(userHost) + if err != nil { + // We expect the config was already validated by the time we get + // here, so we'll just ignore invalid hostnames. + continue + } + staticTable[host] = creds + } + creds = auth.StaticCredentialsSource(staticTable) + } + + for helperType, helperConfig := range config.CredentialsHelpers { + log.Printf("[DEBUG] Searching for credentials helper named %q", helperType) + available := pluginDiscovery.FindPlugins("credentials", globalPluginDirs()) + available = available.WithName(helperType) + if available.Count() == 0 { + log.Printf("[ERROR] Unable to find credentials helper %q; ignoring", helperType) + break + } + + selected := available.Newest() + + helperSource := auth.HelperProgramCredentialsSource(selected.Path, helperConfig.Args...) + creds = auth.Credentials{ + creds, + auth.CachingCredentialsSource(helperSource), // cached because external operation may be slow/expensive + } + + // There should only be zero or one "credentials_helper" blocks. We + // assume that the config was validated earlier and so we don't check + // for extras here. + break + } + + return creds +} diff --git a/vendor/github.com/hashicorp/terraform/communicator/ssh/communicator.go b/vendor/github.com/hashicorp/terraform/communicator/ssh/communicator.go index 491a62c3fc4..4ad67aefcd2 100644 --- a/vendor/github.com/hashicorp/terraform/communicator/ssh/communicator.go +++ b/vendor/github.com/hashicorp/terraform/communicator/ssh/communicator.go @@ -467,7 +467,7 @@ func (c *Communicator) scpSession(scpCommand string, f func(io.Writer, *bufio.Re if exitErr, ok := err.(*ssh.ExitError); ok { // Otherwise, we have an ExitErorr, meaning we can just read // the exit status - log.Printf("non-zero exit status: %d", exitErr.ExitStatus()) + log.Printf(exitErr.String()) // If we exited with status 127, it means SCP isn't available. // Return a more descriptive error for that. diff --git a/vendor/github.com/hashicorp/terraform/communicator/winrm/communicator.go b/vendor/github.com/hashicorp/terraform/communicator/winrm/communicator.go index c25c2bc63a8..729e6eb51b7 100644 --- a/vendor/github.com/hashicorp/terraform/communicator/winrm/communicator.go +++ b/vendor/github.com/hashicorp/terraform/communicator/winrm/communicator.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform/communicator/remote" "github.com/hashicorp/terraform/terraform" - "github.com/masterzen/winrm/winrm" + "github.com/masterzen/winrm" "github.com/packer-community/winrmcp/winrmcp" // This import is a bit strange, but it's needed so `make updatedeps` can see and download it @@ -39,7 +39,9 @@ func New(s *terraform.InstanceState) (*Communicator, error) { Port: connInfo.Port, HTTPS: connInfo.HTTPS, Insecure: connInfo.Insecure, - CACert: connInfo.CACert, + } + if len(connInfo.CACert) > 0 { + endpoint.CACert = []byte(connInfo.CACert) } comm := &Communicator{ @@ -58,7 +60,7 @@ func (c *Communicator) Connect(o terraform.UIOutput) error { return nil } - params := winrm.DefaultParameters() + params := winrm.DefaultParameters params.Timeout = formatDuration(c.Timeout()) client, err := winrm.NewClientWithParameters( @@ -83,7 +85,7 @@ func (c *Communicator) Connect(o terraform.UIOutput) error { c.connInfo.Password != "", c.connInfo.HTTPS, c.connInfo.Insecure, - c.connInfo.CACert != nil, + c.connInfo.CACert != "", )) } @@ -208,8 +210,8 @@ func (c *Communicator) newCopyClient() (*winrmcp.Winrmcp, error) { MaxOperationsPerShell: 15, // lowest common denominator } - if c.connInfo.CACert != nil { - config.CACertBytes = *c.connInfo.CACert + if c.connInfo.CACert != "" { + config.CACertBytes = []byte(c.connInfo.CACert) } return winrmcp.New(addr, &config) diff --git a/vendor/github.com/hashicorp/terraform/communicator/winrm/provisioner.go b/vendor/github.com/hashicorp/terraform/communicator/winrm/provisioner.go index 2dab1e97d4a..1482042456b 100644 --- a/vendor/github.com/hashicorp/terraform/communicator/winrm/provisioner.go +++ b/vendor/github.com/hashicorp/terraform/communicator/winrm/provisioner.go @@ -37,7 +37,7 @@ type connectionInfo struct { Port int HTTPS bool Insecure bool - CACert *[]byte `mapstructure:"ca_cert"` + CACert string `mapstructure:"cacert"` Timeout string ScriptPath string `mapstructure:"script_path"` TimeoutVal time.Duration `mapstructure:"-"` diff --git a/vendor/github.com/hashicorp/terraform/config.go b/vendor/github.com/hashicorp/terraform/config.go index d9391c3cecf..3c546b876e8 100644 --- a/vendor/github.com/hashicorp/terraform/config.go +++ b/vendor/github.com/hashicorp/terraform/config.go @@ -6,11 +6,17 @@ import ( "io/ioutil" "log" "os" + "path/filepath" "github.com/hashicorp/hcl" + "github.com/hashicorp/terraform/command" + "github.com/hashicorp/terraform/svchost" + "github.com/hashicorp/terraform/tfdiags" ) +const pluginCacheDirEnvVar = "TF_PLUGIN_CACHE_DIR" + // Config is the structure of the configuration for the Terraform CLI. // // This is not the configuration for Terraform itself. That is in the @@ -21,6 +27,19 @@ type Config struct { DisableCheckpoint bool `hcl:"disable_checkpoint"` DisableCheckpointSignature bool `hcl:"disable_checkpoint_signature"` + + // If set, enables local caching of plugins in this directory to + // avoid repeatedly re-downloading over the Internet. + PluginCacheDir string `hcl:"plugin_cache_dir"` + + Credentials map[string]map[string]interface{} `hcl:"credentials"` + CredentialsHelpers map[string]*ConfigCredentialsHelper `hcl:"credentials_helper"` +} + +// ConfigCredentialsHelper is the structure of the "credentials_helper" +// nested block within the CLI configuration. +type ConfigCredentialsHelper struct { + Args []string `hcl:"args"` } // BuiltinConfig is the built-in defaults for the configuration. These @@ -45,26 +64,65 @@ func ConfigDir() (string, error) { return configDir() } -// LoadConfig loads the CLI configuration from ".terraformrc" files. -func LoadConfig(path string) (*Config, error) { +// LoadConfig reads the CLI configuration from the various filesystem locations +// and from the environment, returning a merged configuration along with any +// diagnostics (errors and warnings) encountered along the way. +func LoadConfig() (*Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + configVal := BuiltinConfig // copy + config := &configVal + + if mainFilename, err := cliConfigFile(); err == nil { + if _, err := os.Stat(mainFilename); err == nil { + mainConfig, mainDiags := loadConfigFile(mainFilename) + diags = diags.Append(mainDiags) + config = config.Merge(mainConfig) + } + } + + if configDir, err := ConfigDir(); err == nil { + if info, err := os.Stat(configDir); err == nil && info.IsDir() { + dirConfig, dirDiags := loadConfigDir(configDir) + diags = diags.Append(dirDiags) + config = config.Merge(dirConfig) + } + } + + if envConfig := EnvConfig(); envConfig != nil { + // envConfig takes precedence + config = envConfig.Merge(config) + } + + diags = diags.Append(config.Validate()) + + return config, diags +} + +// loadConfigFile loads the CLI configuration from ".terraformrc" files. +func loadConfigFile(path string) (*Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + result := &Config{} + + log.Printf("Loading CLI configuration from %s", path) + // Read the HCL file and prepare for parsing d, err := ioutil.ReadFile(path) if err != nil { - return nil, fmt.Errorf( - "Error reading %s: %s", path, err) + diags = diags.Append(fmt.Errorf("Error reading %s: %s", path, err)) + return result, diags } // Parse it obj, err := hcl.Parse(string(d)) if err != nil { - return nil, fmt.Errorf( - "Error parsing %s: %s", path, err) + diags = diags.Append(fmt.Errorf("Error parsing %s: %s", path, err)) + return result, diags } // Build up the result - var result Config if err := hcl.DecodeObject(&result, obj); err != nil { - return nil, err + diags = diags.Append(fmt.Errorf("Error parsing %s: %s", path, err)) + return result, diags } // Replace all env vars @@ -75,7 +133,95 @@ func LoadConfig(path string) (*Config, error) { result.Provisioners[k] = os.ExpandEnv(v) } - return &result, nil + if result.PluginCacheDir != "" { + result.PluginCacheDir = os.ExpandEnv(result.PluginCacheDir) + } + + return result, diags +} + +func loadConfigDir(path string) (*Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + result := &Config{} + + entries, err := ioutil.ReadDir(path) + if err != nil { + diags = diags.Append(fmt.Errorf("Error reading %s: %s", path, err)) + return result, diags + } + + for _, entry := range entries { + name := entry.Name() + // Ignoring errors here because it is used only to indicate pattern + // syntax errors, and our patterns are hard-coded here. + hclMatched, _ := filepath.Match("*.tfrc", name) + jsonMatched, _ := filepath.Match("*.tfrc.json", name) + if !(hclMatched || jsonMatched) { + continue + } + + filePath := filepath.Join(path, name) + fileConfig, fileDiags := loadConfigFile(filePath) + diags = diags.Append(fileDiags) + result = result.Merge(fileConfig) + } + + return result, diags +} + +// EnvConfig returns a Config populated from environment variables. +// +// Any values specified in this config should override those set in the +// configuration file. +func EnvConfig() *Config { + config := &Config{} + + if envPluginCacheDir := os.Getenv(pluginCacheDirEnvVar); envPluginCacheDir != "" { + // No Expandenv here, because expanding environment variables inside + // an environment variable would be strange and seems unnecessary. + // (User can expand variables into the value while setting it using + // standard shell features.) + config.PluginCacheDir = envPluginCacheDir + } + + return config +} + +// Validate checks for errors in the configuration that cannot be detected +// just by HCL decoding, returning any problems as diagnostics. +// +// On success, the returned diagnostics will return false from the HasErrors +// method. A non-nil diagnostics is not necessarily an error, since it may +// contain just warnings. +func (c *Config) Validate() tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if c == nil { + return diags + } + + // FIXME: Right now our config parsing doesn't retain enough information + // to give proper source references to any errors. We should improve + // on this when we change the CLI config parser to use HCL2. + + // Check that all "credentials" blocks have valid hostnames. + for givenHost := range c.Credentials { + _, err := svchost.ForComparison(givenHost) + if err != nil { + diags = diags.Append( + fmt.Errorf("The credentials %q block has an invalid hostname: %s", givenHost, err), + ) + } + } + + // Should have zero or one "credentials_helper" blocks + if len(c.CredentialsHelpers) > 1 { + diags = diags.Append( + fmt.Errorf("No more than one credentials_helper block may be specified"), + ) + } + + return diags } // Merge merges two configurations and returns a third entirely @@ -105,5 +251,33 @@ func (c1 *Config) Merge(c2 *Config) *Config { result.DisableCheckpoint = c1.DisableCheckpoint || c2.DisableCheckpoint result.DisableCheckpointSignature = c1.DisableCheckpointSignature || c2.DisableCheckpointSignature + result.PluginCacheDir = c1.PluginCacheDir + if result.PluginCacheDir == "" { + result.PluginCacheDir = c2.PluginCacheDir + } + + if (len(c1.Credentials) + len(c2.Credentials)) > 0 { + result.Credentials = make(map[string]map[string]interface{}) + for host, creds := range c1.Credentials { + result.Credentials[host] = creds + } + for host, creds := range c2.Credentials { + // We just clobber an entry from the other file right now. Will + // improve on this later using the more-robust merging behavior + // built in to HCL2. + result.Credentials[host] = creds + } + } + + if (len(c1.CredentialsHelpers) + len(c2.CredentialsHelpers)) > 0 { + result.CredentialsHelpers = make(map[string]*ConfigCredentialsHelper) + for name, helper := range c1.CredentialsHelpers { + result.CredentialsHelpers[name] = helper + } + for name, helper := range c2.CredentialsHelpers { + result.CredentialsHelpers[name] = helper + } + } + return &result } diff --git a/vendor/github.com/hashicorp/terraform/config/append.go b/vendor/github.com/hashicorp/terraform/config/append.go index 5f4e89eef7a..9d80c42b7f3 100644 --- a/vendor/github.com/hashicorp/terraform/config/append.go +++ b/vendor/github.com/hashicorp/terraform/config/append.go @@ -82,5 +82,11 @@ func Append(c1, c2 *Config) (*Config, error) { c.Variables = append(c.Variables, c2.Variables...) } + if len(c1.Locals) > 0 || len(c2.Locals) > 0 { + c.Locals = make([]*Local, 0, len(c1.Locals)+len(c2.Locals)) + c.Locals = append(c.Locals, c1.Locals...) + c.Locals = append(c.Locals, c2.Locals...) + } + return c, nil } diff --git a/vendor/github.com/hashicorp/terraform/config/config.go b/vendor/github.com/hashicorp/terraform/config/config.go index 3f756dcf4d8..dec8e3bb23b 100644 --- a/vendor/github.com/hashicorp/terraform/config/config.go +++ b/vendor/github.com/hashicorp/terraform/config/config.go @@ -9,7 +9,6 @@ import ( "strings" "github.com/hashicorp/go-multierror" - "github.com/hashicorp/hil" "github.com/hashicorp/hil/ast" "github.com/hashicorp/terraform/helper/hilmapstructure" "github.com/hashicorp/terraform/plugin/discovery" @@ -34,6 +33,7 @@ type Config struct { ProviderConfigs []*ProviderConfig Resources []*Resource Variables []*Variable + Locals []*Local Outputs []*Output // The fields below can be filled in by loaders for validation @@ -147,7 +147,7 @@ func (p *Provisioner) Copy() *Provisioner { } } -// Variable is a variable defined within the configuration. +// Variable is a module argument defined within the configuration. type Variable struct { Name string DeclaredType string `mapstructure:"type"` @@ -155,6 +155,12 @@ type Variable struct { Description string } +// Local is a local value defined within the configuration. +type Local struct { + Name string + RawConfig *RawConfig +} + // Output is an output defined within the configuration. An output is // resulting data that is highlighted by Terraform when finished. An // output marked Sensitive will be output in a masked form following @@ -550,6 +556,7 @@ func (c *Config) Validate() error { case *ResourceVariable: case *TerraformVariable: case *UserVariable: + case *LocalVariable: default: errs = append(errs, fmt.Errorf( @@ -558,22 +565,7 @@ func (c *Config) Validate() error { } } - // Interpolate with a fixed number to verify that its a number. - r.RawCount.interpolate(func(root ast.Node) (interface{}, error) { - // Execute the node but transform the AST so that it returns - // a fixed value of "5" for all interpolations. - result, err := hil.Eval( - hil.FixedValueTransform( - root, &ast.LiteralNode{Value: "5", Typex: ast.TypeString}), - nil) - if err != nil { - return "", err - } - - return result.Value, nil - }) - _, err := strconv.ParseInt(r.RawCount.Value().(string), 0, 0) - if err != nil { + if !r.RawCount.couldBeInteger() { errs = append(errs, fmt.Errorf( "%s: resource count must be an integer", n)) @@ -680,6 +672,29 @@ func (c *Config) Validate() error { } } + // Check that all locals are valid + { + found := make(map[string]struct{}) + for _, l := range c.Locals { + if _, ok := found[l.Name]; ok { + errs = append(errs, fmt.Errorf( + "%s: duplicate local. local value names must be unique", + l.Name, + )) + continue + } + found[l.Name] = struct{}{} + + for _, v := range l.RawConfig.Variables { + if _, ok := v.(*CountVariable); ok { + errs = append(errs, fmt.Errorf( + "local %s: count variables are only valid within resources", l.Name, + )) + } + } + } + } + // Check that all outputs are valid { found := make(map[string]struct{}) diff --git a/vendor/github.com/hashicorp/terraform/config/config_string.go b/vendor/github.com/hashicorp/terraform/config/config_string.go index 0b3abbcd53a..a6933c2a5f4 100644 --- a/vendor/github.com/hashicorp/terraform/config/config_string.go +++ b/vendor/github.com/hashicorp/terraform/config/config_string.go @@ -143,6 +143,46 @@ func outputsStr(os []*Output) string { result += fmt.Sprintf(" %s: %s\n", kind, str) } } + + if o.Description != "" { + result += fmt.Sprintf(" description\n %s\n", o.Description) + } + } + + return strings.TrimSpace(result) +} + +func localsStr(ls []*Local) string { + ns := make([]string, 0, len(ls)) + m := make(map[string]*Local) + for _, l := range ls { + ns = append(ns, l.Name) + m[l.Name] = l + } + sort.Strings(ns) + + result := "" + for _, n := range ns { + l := m[n] + + result += fmt.Sprintf("%s\n", n) + + if len(l.RawConfig.Variables) > 0 { + result += fmt.Sprintf(" vars\n") + for _, rawV := range l.RawConfig.Variables { + kind := "unknown" + str := rawV.FullKey() + + switch rawV.(type) { + case *ResourceVariable: + kind = "resource" + case *UserVariable: + kind = "user" + } + + result += fmt.Sprintf(" %s: %s\n", kind, str) + } + } } return strings.TrimSpace(result) diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go b/vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go new file mode 100644 index 00000000000..2b1b0cacbfe --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go @@ -0,0 +1,97 @@ +package configschema + +import ( + "github.com/hashicorp/hcl2/hcldec" + "github.com/zclconf/go-cty/cty" +) + +var mapLabelNames = []string{"key"} + +// DecoderSpec returns a hcldec.Spec that can be used to decode a HCL Body +// using the facilities in the hcldec package. +// +// The returned specification is guaranteed to return a value of the same type +// returned by method ImpliedType, but it may contain null or unknown values if +// any of the block attributes are defined as optional and/or computed +// respectively. +func (b *Block) DecoderSpec() hcldec.Spec { + ret := hcldec.ObjectSpec{} + if b == nil { + return ret + } + + for name, attrS := range b.Attributes { + switch { + case attrS.Computed && attrS.Optional: + // In this special case we use an unknown value as a default + // to get the intended behavior that the result is computed + // unless it has been explicitly set in config. + ret[name] = &hcldec.DefaultSpec{ + Primary: &hcldec.AttrSpec{ + Name: name, + Type: attrS.Type, + }, + Default: &hcldec.LiteralSpec{ + Value: cty.UnknownVal(attrS.Type), + }, + } + case attrS.Computed: + ret[name] = &hcldec.LiteralSpec{ + Value: cty.UnknownVal(attrS.Type), + } + default: + ret[name] = &hcldec.AttrSpec{ + Name: name, + Type: attrS.Type, + Required: attrS.Required, + } + } + } + + for name, blockS := range b.BlockTypes { + if _, exists := ret[name]; exists { + // This indicates an invalid schema, since it's not valid to + // define both an attribute and a block type of the same name. + // However, we don't raise this here since it's checked by + // InternalValidate. + continue + } + + childSpec := blockS.Block.DecoderSpec() + + switch blockS.Nesting { + case NestingSingle: + ret[name] = &hcldec.BlockSpec{ + TypeName: name, + Nested: childSpec, + Required: blockS.MinItems == 1 && blockS.MaxItems >= 1, + } + case NestingList: + ret[name] = &hcldec.BlockListSpec{ + TypeName: name, + Nested: childSpec, + MinItems: blockS.MinItems, + MaxItems: blockS.MaxItems, + } + case NestingSet: + ret[name] = &hcldec.BlockSetSpec{ + TypeName: name, + Nested: childSpec, + MinItems: blockS.MinItems, + MaxItems: blockS.MaxItems, + } + case NestingMap: + ret[name] = &hcldec.BlockMapSpec{ + TypeName: name, + Nested: childSpec, + LabelNames: mapLabelNames, + } + default: + // Invalid nesting type is just ignored. It's checked by + // InternalValidate. + continue + } + } + + return ret +} diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/doc.go b/vendor/github.com/hashicorp/terraform/config/configschema/doc.go new file mode 100644 index 00000000000..caf8d730c1e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/configschema/doc.go @@ -0,0 +1,14 @@ +// Package configschema contains types for describing the expected structure +// of a configuration block whose shape is not known until runtime. +// +// For example, this is used to describe the expected contents of a resource +// configuration block, which is defined by the corresponding provider plugin +// and thus not compiled into Terraform core. +// +// A configschema primarily describes the shape of configuration, but it is +// also suitable for use with other structures derived from the configuration, +// such as the cached state of a resource or a resource diff. +// +// This package should not be confused with the package helper/schema, which +// is the higher-level helper library used to implement providers themselves. +package configschema diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go b/vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go new file mode 100644 index 00000000000..67324ebce11 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go @@ -0,0 +1,21 @@ +package configschema + +import ( + "github.com/hashicorp/hcl2/hcldec" + "github.com/zclconf/go-cty/cty" +) + +// ImpliedType returns the cty.Type that would result from decoding a +// configuration block using the receiving block schema. +// +// ImpliedType always returns a result, even if the given schema is +// inconsistent. Code that creates configschema.Block objects should be +// tested using the InternalValidate method to detect any inconsistencies +// that would cause this method to fall back on defaults and assumptions. +func (b *Block) ImpliedType() cty.Type { + if b == nil { + return cty.EmptyObject + } + + return hcldec.ImpliedType(b.DecoderSpec()) +} diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/internal_validate.go b/vendor/github.com/hashicorp/terraform/config/configschema/internal_validate.go new file mode 100644 index 00000000000..33cbe884f1f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/configschema/internal_validate.go @@ -0,0 +1,92 @@ +package configschema + +import ( + "fmt" + "regexp" + + "github.com/zclconf/go-cty/cty" + + multierror "github.com/hashicorp/go-multierror" +) + +var validName = regexp.MustCompile(`^[a-z0-9_]+$`) + +// InternalValidate returns an error if the receiving block and its child +// schema definitions have any consistencies with the documented rules for +// valid schema. +// +// This is intended to be used within unit tests to detect when a given +// schema is invalid. +func (b *Block) InternalValidate() error { + if b == nil { + return fmt.Errorf("top-level block schema is nil") + } + return b.internalValidate("", nil) + +} + +func (b *Block) internalValidate(prefix string, err error) error { + for name, attrS := range b.Attributes { + if attrS == nil { + err = multierror.Append(err, fmt.Errorf("%s%s: attribute schema is nil", prefix, name)) + continue + } + if !validName.MatchString(name) { + err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) + } + if attrS.Optional == false && attrS.Required == false && attrS.Computed == false { + err = multierror.Append(err, fmt.Errorf("%s%s: must set Optional, Required or Computed", prefix, name)) + } + if attrS.Optional && attrS.Required { + err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Optional and Required", prefix, name)) + } + if attrS.Computed && attrS.Required { + err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Computed and Required", prefix, name)) + } + if attrS.Type == cty.NilType { + err = multierror.Append(err, fmt.Errorf("%s%s: Type must be set to something other than cty.NilType", prefix, name)) + } + } + + for name, blockS := range b.BlockTypes { + if blockS == nil { + err = multierror.Append(err, fmt.Errorf("%s%s: block schema is nil", prefix, name)) + continue + } + + if _, isAttr := b.Attributes[name]; isAttr { + err = multierror.Append(err, fmt.Errorf("%s%s: name defined as both attribute and child block type", prefix, name)) + } else if !validName.MatchString(name) { + err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) + } + + if blockS.MinItems < 0 || blockS.MaxItems < 0 { + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be greater than zero", prefix, name)) + } + + switch blockS.Nesting { + case NestingSingle: + switch { + case blockS.MinItems != blockS.MaxItems: + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must match in NestingSingle mode", prefix, name)) + case blockS.MinItems < 0 || blockS.MinItems > 1: + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must be set to either 0 or 1 in NestingSingle mode", prefix, name)) + } + case NestingList, NestingSet: + if blockS.MinItems > blockS.MaxItems && blockS.MaxItems != 0 { + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems must be less than or equal to MaxItems in %s mode", prefix, name, blockS.Nesting)) + } + case NestingMap: + if blockS.MinItems != 0 || blockS.MaxItems != 0 { + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be 0 in NestingMap mode", prefix, name)) + } + default: + err = multierror.Append(err, fmt.Errorf("%s%s: invalid nesting mode %s", prefix, name, blockS.Nesting)) + } + + subPrefix := prefix + name + "." + err = blockS.Block.internalValidate(subPrefix, err) + } + + return err +} diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go b/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go new file mode 100644 index 00000000000..d9253a20f64 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=NestingMode"; DO NOT EDIT. + +package configschema + +import "fmt" + +const _NestingMode_name = "nestingModeInvalidNestingSingleNestingListNestingSetNestingMap" + +var _NestingMode_index = [...]uint8{0, 18, 31, 42, 52, 62} + +func (i NestingMode) String() string { + if i < 0 || i >= NestingMode(len(_NestingMode_index)-1) { + return fmt.Sprintf("NestingMode(%d)", i) + } + return _NestingMode_name[_NestingMode_index[i]:_NestingMode_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/schema.go b/vendor/github.com/hashicorp/terraform/config/configschema/schema.go new file mode 100644 index 00000000000..9a8ee550aa1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/configschema/schema.go @@ -0,0 +1,107 @@ +package configschema + +import ( + "github.com/zclconf/go-cty/cty" +) + +// Block represents a configuration block. +// +// "Block" here is a logical grouping construct, though it happens to map +// directly onto the physical block syntax of Terraform's native configuration +// syntax. It may be a more a matter of convention in other syntaxes, such as +// JSON. +// +// When converted to a value, a Block always becomes an instance of an object +// type derived from its defined attributes and nested blocks +type Block struct { + // Attributes describes any attributes that may appear directly inside + // the block. + Attributes map[string]*Attribute + + // BlockTypes describes any nested block types that may appear directly + // inside the block. + BlockTypes map[string]*NestedBlock +} + +// Attribute represents a configuration attribute, within a block. +type Attribute struct { + // Type is a type specification that the attribute's value must conform to. + Type cty.Type + + // Required, if set to true, specifies that an omitted or null value is + // not permitted. + Required bool + + // Optional, if set to true, specifies that an omitted or null value is + // permitted. This field conflicts with Required. + Optional bool + + // Computed, if set to true, specifies that the value comes from the + // provider rather than from configuration. If combined with Optional, + // then the config may optionally provide an overridden value. + Computed bool + + // Sensitive, if set to true, indicates that an attribute may contain + // sensitive information. + // + // At present nothing is done with this information, but callers are + // encouraged to set it where appropriate so that it may be used in the + // future to help Terraform mask sensitive information. (Terraform + // currently achieves this in a limited sense via other mechanisms.) + Sensitive bool +} + +// NestedBlock represents the embedding of one block within another. +type NestedBlock struct { + // Block is the description of the block that's nested. + Block + + // Nesting provides the nesting mode for the child block, which determines + // how many instances of the block are allowed, how many labels it expects, + // and how the resulting data will be converted into a data structure. + Nesting NestingMode + + // MinItems and MaxItems set, for the NestingList and NestingSet nesting + // modes, lower and upper limits on the number of child blocks allowed + // of the given type. If both are left at zero, no limit is applied. + // + // As a special case, both values can be set to 1 for NestingSingle in + // order to indicate that a particular single block is required. + // + // These fields are ignored for other nesting modes and must both be left + // at zero. + MinItems, MaxItems int +} + +// NestingMode is an enumeration of modes for nesting blocks inside other +// blocks. +type NestingMode int + +//go:generate stringer -type=NestingMode + +const ( + nestingModeInvalid NestingMode = iota + + // NestingSingle indicates that only a single instance of a given + // block type is permitted, with no labels, and its content should be + // provided directly as an object value. + NestingSingle + + // NestingList indicates that multiple blocks of the given type are + // permitted, with no labels, and that their corresponding objects should + // be provided in a list. + NestingList + + // NestingSet indicates that multiple blocks of the given type are + // permitted, with no labels, and that their corresponding objects should + // be provided in a set. + NestingSet + + // NestingMap indicates that multiple blocks of the given type are + // permitted, each with a single label, and that their corresponding + // objects should be provided in a map whose keys are the labels. + // + // It's an error, therefore, to use the same label value on multiple + // blocks. + NestingMap +) diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2_shim_util.go b/vendor/github.com/hashicorp/terraform/config/hcl2_shim_util.go new file mode 100644 index 00000000000..207d105986b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/hcl2_shim_util.go @@ -0,0 +1,134 @@ +package config + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty/function/stdlib" + + "github.com/hashicorp/hil/ast" + "github.com/hashicorp/terraform/config/hcl2shim" + + hcl2 "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// --------------------------------------------------------------------------- +// This file contains some helper functions that are used to shim between +// HCL2 concepts and HCL/HIL concepts, to help us mostly preserve the existing +// public API that was built around HCL/HIL-oriented approaches. +// --------------------------------------------------------------------------- + +func hcl2InterpolationFuncs() map[string]function.Function { + hcl2Funcs := map[string]function.Function{} + + for name, hilFunc := range Funcs() { + hcl2Funcs[name] = hcl2InterpolationFuncShim(hilFunc) + } + + // Some functions in the old world are dealt with inside langEvalConfig + // due to their legacy reliance on direct access to the symbol table. + // Since 0.7 they don't actually need it anymore and just ignore it, + // so we're cheating a bit here and exploiting that detail by passing nil. + hcl2Funcs["lookup"] = hcl2InterpolationFuncShim(interpolationFuncLookup(nil)) + hcl2Funcs["keys"] = hcl2InterpolationFuncShim(interpolationFuncKeys(nil)) + hcl2Funcs["values"] = hcl2InterpolationFuncShim(interpolationFuncValues(nil)) + + // As a bonus, we'll provide the JSON-handling functions from the cty + // function library since its "jsonencode" is more complete (doesn't force + // weird type conversions) and HIL's type system can't represent + // "jsondecode" at all. The result of jsondecode will eventually be forced + // to conform to the HIL type system on exit into the rest of Terraform due + // to our shimming right now, but it should be usable for decoding _within_ + // an expression. + hcl2Funcs["jsonencode"] = stdlib.JSONEncodeFunc + hcl2Funcs["jsondecode"] = stdlib.JSONDecodeFunc + + return hcl2Funcs +} + +func hcl2InterpolationFuncShim(hilFunc ast.Function) function.Function { + spec := &function.Spec{} + + for i, hilArgType := range hilFunc.ArgTypes { + spec.Params = append(spec.Params, function.Parameter{ + Type: hcl2shim.HCL2TypeForHILType(hilArgType), + Name: fmt.Sprintf("arg%d", i+1), // HIL args don't have names, so we'll fudge it + }) + } + + if hilFunc.Variadic { + spec.VarParam = &function.Parameter{ + Type: hcl2shim.HCL2TypeForHILType(hilFunc.VariadicType), + Name: "varargs", // HIL args don't have names, so we'll fudge it + } + } + + spec.Type = func(args []cty.Value) (cty.Type, error) { + return hcl2shim.HCL2TypeForHILType(hilFunc.ReturnType), nil + } + spec.Impl = func(args []cty.Value, retType cty.Type) (cty.Value, error) { + hilArgs := make([]interface{}, len(args)) + for i, arg := range args { + hilV := hcl2shim.HILVariableFromHCL2Value(arg) + + // Although the cty function system does automatic type conversions + // to match the argument types, cty doesn't distinguish int and + // float and so we may need to adjust here to ensure that the + // wrapped function gets exactly the Go type it was expecting. + var wantType ast.Type + if i < len(hilFunc.ArgTypes) { + wantType = hilFunc.ArgTypes[i] + } else { + wantType = hilFunc.VariadicType + } + switch { + case hilV.Type == ast.TypeInt && wantType == ast.TypeFloat: + hilV.Type = wantType + hilV.Value = float64(hilV.Value.(int)) + case hilV.Type == ast.TypeFloat && wantType == ast.TypeInt: + hilV.Type = wantType + hilV.Value = int(hilV.Value.(float64)) + } + + // HIL functions actually expect to have the outermost variable + // "peeled" but any nested values (in lists or maps) will + // still have their ast.Variable wrapping. + hilArgs[i] = hilV.Value + } + + hilResult, err := hilFunc.Callback(hilArgs) + if err != nil { + return cty.DynamicVal, err + } + + // Just as on the way in, we get back a partially-peeled ast.Variable + // which we need to re-wrap in order to convert it back into what + // we're calling a "config value". + rv := hcl2shim.HCL2ValueFromHILVariable(ast.Variable{ + Type: hilFunc.ReturnType, + Value: hilResult, + }) + + return convert.Convert(rv, retType) // if result is unknown we'll force the correct type here + } + return function.New(spec) +} + +func hcl2EvalWithUnknownVars(expr hcl2.Expression) (cty.Value, hcl2.Diagnostics) { + trs := expr.Variables() + vars := map[string]cty.Value{} + val := cty.DynamicVal + + for _, tr := range trs { + name := tr.RootName() + vars[name] = val + } + + ctx := &hcl2.EvalContext{ + Variables: vars, + Functions: hcl2InterpolationFuncs(), + } + return expr.Value(ctx) +} diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/single_attr_body.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/single_attr_body.go new file mode 100644 index 00000000000..19651c81cf2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/single_attr_body.go @@ -0,0 +1,85 @@ +package hcl2shim + +import ( + "fmt" + + hcl2 "github.com/hashicorp/hcl2/hcl" +) + +// SingleAttrBody is a weird implementation of hcl2.Body that acts as if +// it has a single attribute whose value is the given expression. +// +// This is used to shim Resource.RawCount and Output.RawConfig to behave +// more like they do in the old HCL loader. +type SingleAttrBody struct { + Name string + Expr hcl2.Expression +} + +var _ hcl2.Body = SingleAttrBody{} + +func (b SingleAttrBody) Content(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Diagnostics) { + content, all, diags := b.content(schema) + if !all { + // This should never happen because this body implementation should only + // be used by code that is aware that it's using a single-attr body. + diags = append(diags, &hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid attribute", + Detail: fmt.Sprintf("The correct attribute name is %q.", b.Name), + Subject: b.Expr.Range().Ptr(), + }) + } + return content, diags +} + +func (b SingleAttrBody) PartialContent(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Body, hcl2.Diagnostics) { + content, all, diags := b.content(schema) + var remain hcl2.Body + if all { + // If the request matched the one attribute we represent, then the + // remaining body is empty. + remain = hcl2.EmptyBody() + } else { + remain = b + } + return content, remain, diags +} + +func (b SingleAttrBody) content(schema *hcl2.BodySchema) (*hcl2.BodyContent, bool, hcl2.Diagnostics) { + ret := &hcl2.BodyContent{} + all := false + var diags hcl2.Diagnostics + + for _, attrS := range schema.Attributes { + if attrS.Name == b.Name { + attrs, _ := b.JustAttributes() + ret.Attributes = attrs + all = true + } else if attrS.Required { + diags = append(diags, &hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Missing attribute", + Detail: fmt.Sprintf("The attribute %q is required.", attrS.Name), + Subject: b.Expr.Range().Ptr(), + }) + } + } + + return ret, all, diags +} + +func (b SingleAttrBody) JustAttributes() (hcl2.Attributes, hcl2.Diagnostics) { + return hcl2.Attributes{ + b.Name: { + Expr: b.Expr, + Name: b.Name, + NameRange: b.Expr.Range(), + Range: b.Expr.Range(), + }, + }, nil +} + +func (b SingleAttrBody) MissingItemRange() hcl2.Range { + return b.Expr.Range() +} diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go new file mode 100644 index 00000000000..0b697a5f5ce --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go @@ -0,0 +1,246 @@ +package hcl2shim + +import ( + "fmt" + "math/big" + + "github.com/hashicorp/hil/ast" + "github.com/zclconf/go-cty/cty" +) + +// UnknownVariableValue is a sentinel value that can be used +// to denote that the value of a variable is unknown at this time. +// RawConfig uses this information to build up data about +// unknown keys. +const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" + +// ConfigValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic +// types library that HCL2 uses) to a value type that matches what would've +// been produced from the HCL-based interpolator for an equivalent structure. +// +// This function will transform a cty null value into a Go nil value, which +// isn't a possible outcome of the HCL/HIL-based decoder and so callers may +// need to detect and reject any null values. +func ConfigValueFromHCL2(v cty.Value) interface{} { + if !v.IsKnown() { + return UnknownVariableValue + } + if v.IsNull() { + return nil + } + + switch v.Type() { + case cty.Bool: + return v.True() // like HCL.BOOL + case cty.String: + return v.AsString() // like HCL token.STRING or token.HEREDOC + case cty.Number: + // We can't match HCL _exactly_ here because it distinguishes between + // int and float values, but we'll get as close as we can by using + // an int if the number is exactly representable, and a float if not. + // The conversion to float will force precision to that of a float64, + // which is potentially losing information from the specific number + // given, but no worse than what HCL would've done in its own conversion + // to float. + + f := v.AsBigFloat() + if i, acc := f.Int64(); acc == big.Exact { + // if we're on a 32-bit system and the number is too big for 32-bit + // int then we'll fall through here and use a float64. + const MaxInt = int(^uint(0) >> 1) + const MinInt = -MaxInt - 1 + if i <= int64(MaxInt) && i >= int64(MinInt) { + return int(i) // Like HCL token.NUMBER + } + } + + f64, _ := f.Float64() + return f64 // like HCL token.FLOAT + } + + if v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType() { + l := make([]interface{}, 0, v.LengthInt()) + it := v.ElementIterator() + for it.Next() { + _, ev := it.Element() + l = append(l, ConfigValueFromHCL2(ev)) + } + return l + } + + if v.Type().IsMapType() || v.Type().IsObjectType() { + l := make(map[string]interface{}) + it := v.ElementIterator() + for it.Next() { + ek, ev := it.Element() + l[ek.AsString()] = ConfigValueFromHCL2(ev) + } + return l + } + + // If we fall out here then we have some weird type that we haven't + // accounted for. This should never happen unless the caller is using + // capsule types, and we don't currently have any such types defined. + panic(fmt.Errorf("can't convert %#v to config value", v)) +} + +// HCL2ValueFromConfigValue is the opposite of configValueFromHCL2: it takes +// a value as would be returned from the old interpolator and turns it into +// a cty.Value so it can be used within, for example, an HCL2 EvalContext. +func HCL2ValueFromConfigValue(v interface{}) cty.Value { + if v == nil { + return cty.NullVal(cty.DynamicPseudoType) + } + if v == UnknownVariableValue { + return cty.DynamicVal + } + + switch tv := v.(type) { + case bool: + return cty.BoolVal(tv) + case string: + return cty.StringVal(tv) + case int: + return cty.NumberIntVal(int64(tv)) + case float64: + return cty.NumberFloatVal(tv) + case []interface{}: + vals := make([]cty.Value, len(tv)) + for i, ev := range tv { + vals[i] = HCL2ValueFromConfigValue(ev) + } + return cty.TupleVal(vals) + case map[string]interface{}: + vals := map[string]cty.Value{} + for k, ev := range tv { + vals[k] = HCL2ValueFromConfigValue(ev) + } + return cty.ObjectVal(vals) + default: + // HCL/HIL should never generate anything that isn't caught by + // the above, so if we get here something has gone very wrong. + panic(fmt.Errorf("can't convert %#v to cty.Value", v)) + } +} + +func HILVariableFromHCL2Value(v cty.Value) ast.Variable { + if v.IsNull() { + // Caller should guarantee/check this before calling + panic("Null values cannot be represented in HIL") + } + if !v.IsKnown() { + return ast.Variable{ + Type: ast.TypeUnknown, + Value: UnknownVariableValue, + } + } + + switch v.Type() { + case cty.Bool: + return ast.Variable{ + Type: ast.TypeBool, + Value: v.True(), + } + case cty.Number: + v := ConfigValueFromHCL2(v) + switch tv := v.(type) { + case int: + return ast.Variable{ + Type: ast.TypeInt, + Value: tv, + } + case float64: + return ast.Variable{ + Type: ast.TypeFloat, + Value: tv, + } + default: + // should never happen + panic("invalid return value for configValueFromHCL2") + } + case cty.String: + return ast.Variable{ + Type: ast.TypeString, + Value: v.AsString(), + } + } + + if v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType() { + l := make([]ast.Variable, 0, v.LengthInt()) + it := v.ElementIterator() + for it.Next() { + _, ev := it.Element() + l = append(l, HILVariableFromHCL2Value(ev)) + } + // If we were given a tuple then this could actually produce an invalid + // list with non-homogenous types, which we expect to be caught inside + // HIL just like a user-supplied non-homogenous list would be. + return ast.Variable{ + Type: ast.TypeList, + Value: l, + } + } + + if v.Type().IsMapType() || v.Type().IsObjectType() { + l := make(map[string]ast.Variable) + it := v.ElementIterator() + for it.Next() { + ek, ev := it.Element() + l[ek.AsString()] = HILVariableFromHCL2Value(ev) + } + // If we were given an object then this could actually produce an invalid + // map with non-homogenous types, which we expect to be caught inside + // HIL just like a user-supplied non-homogenous map would be. + return ast.Variable{ + Type: ast.TypeMap, + Value: l, + } + } + + // If we fall out here then we have some weird type that we haven't + // accounted for. This should never happen unless the caller is using + // capsule types, and we don't currently have any such types defined. + panic(fmt.Errorf("can't convert %#v to HIL variable", v)) +} + +func HCL2ValueFromHILVariable(v ast.Variable) cty.Value { + switch v.Type { + case ast.TypeList: + vals := make([]cty.Value, len(v.Value.([]ast.Variable))) + for i, ev := range v.Value.([]ast.Variable) { + vals[i] = HCL2ValueFromHILVariable(ev) + } + return cty.TupleVal(vals) + case ast.TypeMap: + vals := make(map[string]cty.Value, len(v.Value.(map[string]ast.Variable))) + for k, ev := range v.Value.(map[string]ast.Variable) { + vals[k] = HCL2ValueFromHILVariable(ev) + } + return cty.ObjectVal(vals) + default: + return HCL2ValueFromConfigValue(v.Value) + } +} + +func HCL2TypeForHILType(hilType ast.Type) cty.Type { + switch hilType { + case ast.TypeAny: + return cty.DynamicPseudoType + case ast.TypeUnknown: + return cty.DynamicPseudoType + case ast.TypeBool: + return cty.Bool + case ast.TypeInt: + return cty.Number + case ast.TypeFloat: + return cty.Number + case ast.TypeString: + return cty.String + case ast.TypeList: + return cty.List(cty.DynamicPseudoType) + case ast.TypeMap: + return cty.Map(cty.DynamicPseudoType) + default: + return cty.NilType // equilvalent to ast.TypeInvalid + } +} diff --git a/vendor/github.com/hashicorp/terraform/config/import_tree.go b/vendor/github.com/hashicorp/terraform/config/import_tree.go index 37ec11a155a..08cbc773621 100644 --- a/vendor/github.com/hashicorp/terraform/config/import_tree.go +++ b/vendor/github.com/hashicorp/terraform/config/import_tree.go @@ -1,8 +1,12 @@ package config import ( + "bufio" "fmt" "io" + "os" + + "github.com/hashicorp/errwrap" ) // configurable is an interface that must be implemented by any configuration @@ -27,15 +31,52 @@ type importTree struct { // imports. type fileLoaderFunc func(path string) (configurable, []string, error) +// Set this to a non-empty value at link time to enable the HCL2 experiment. +// This is not currently enabled for release builds. +// +// For example: +// go install -ldflags="-X github.com/hashicorp/terraform/config.enableHCL2Experiment=true" github.com/hashicorp/terraform +var enableHCL2Experiment = "" + // loadTree takes a single file and loads the entire importTree for that // file. This function detects what kind of configuration file it is an // executes the proper fileLoaderFunc. func loadTree(root string) (*importTree, error) { var f fileLoaderFunc - switch ext(root) { - case ".tf", ".tf.json": - f = loadFileHcl - default: + + // HCL2 experiment is currently activated at build time via the linker. + // See the comment on this variable for more information. + if enableHCL2Experiment == "" { + // Main-line behavior: always use the original HCL parser + switch ext(root) { + case ".tf", ".tf.json": + f = loadFileHcl + default: + } + } else { + // Experimental behavior: use the HCL2 parser if the opt-in comment + // is present. + switch ext(root) { + case ".tf": + // We need to sniff the file for the opt-in comment line to decide + // if the file is participating in the HCL2 experiment. + cf, err := os.Open(root) + if err != nil { + return nil, err + } + sc := bufio.NewScanner(cf) + for sc.Scan() { + if sc.Text() == "#terraform:hcl2" { + f = globalHCL2Loader.loadFile + } + } + if f == nil { + f = loadFileHcl + } + case ".tf.json": + f = loadFileHcl + default: + } } if f == nil { @@ -86,10 +127,7 @@ func (t *importTree) Close() error { func (t *importTree) ConfigTree() (*configTree, error) { config, err := t.Raw.Config() if err != nil { - return nil, fmt.Errorf( - "Error loading %s: %s", - t.Path, - err) + return nil, errwrap.Wrapf(fmt.Sprintf("Error loading %s: {{err}}", t.Path), err) } // Build our result diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate.go b/vendor/github.com/hashicorp/terraform/config/interpolate.go index bbb35554185..599e5ecdb35 100644 --- a/vendor/github.com/hashicorp/terraform/config/interpolate.go +++ b/vendor/github.com/hashicorp/terraform/config/interpolate.go @@ -5,6 +5,8 @@ import ( "strconv" "strings" + "github.com/hashicorp/terraform/tfdiags" + "github.com/hashicorp/hil/ast" ) @@ -14,6 +16,21 @@ import ( // variables can come from: user variables, resources, etc. type InterpolatedVariable interface { FullKey() string + SourceRange() tfdiags.SourceRange +} + +// varRange can be embedded into an InterpolatedVariable implementation to +// implement the SourceRange method. +type varRange struct { + rng tfdiags.SourceRange +} + +func (r varRange) SourceRange() tfdiags.SourceRange { + return r.rng +} + +func makeVarRange(rng tfdiags.SourceRange) varRange { + return varRange{rng} } // CountVariable is a variable for referencing information about @@ -21,6 +38,7 @@ type InterpolatedVariable interface { type CountVariable struct { Type CountValueType key string + varRange } // CountValueType is the type of the count variable that is referenced. @@ -37,6 +55,7 @@ type ModuleVariable struct { Name string Field string key string + varRange } // A PathVariable is a variable that references path information about the @@ -44,6 +63,7 @@ type ModuleVariable struct { type PathVariable struct { Type PathValueType key string + varRange } type PathValueType byte @@ -67,6 +87,7 @@ type ResourceVariable struct { Index int // Index for multi-variable: aws_instance.foo.1.id == 1 key string + varRange } // SelfVariable is a variable that is referencing the same resource @@ -75,6 +96,7 @@ type SelfVariable struct { Field string key string + varRange } // SimpleVariable is an unprefixed variable, which can show up when users have @@ -82,6 +104,7 @@ type SelfVariable struct { // internally. The template_file resource is an example of this. type SimpleVariable struct { Key string + varRange } // TerraformVariable is a "terraform."-prefixed variable used to access @@ -89,6 +112,7 @@ type SimpleVariable struct { type TerraformVariable struct { Field string key string + varRange } // A UserVariable is a variable that is referencing a user variable @@ -99,6 +123,14 @@ type UserVariable struct { Elem string key string + varRange +} + +// A LocalVariable is a variable that references a local value defined within +// the current module, via a "locals" block. This looks like "${local.foo}". +type LocalVariable struct { + Name string + varRange } func NewInterpolatedVariable(v string) (InterpolatedVariable, error) { @@ -112,6 +144,8 @@ func NewInterpolatedVariable(v string) (InterpolatedVariable, error) { return NewTerraformVariable(v) } else if strings.HasPrefix(v, "var.") { return NewUserVariable(v) + } else if strings.HasPrefix(v, "local.") { + return NewLocalVariable(v) } else if strings.HasPrefix(v, "module.") { return NewModuleVariable(v) } else if !strings.ContainsRune(v, '.') { @@ -276,7 +310,7 @@ func (v *SelfVariable) GoString() string { } func NewSimpleVariable(key string) (*SimpleVariable, error) { - return &SimpleVariable{key}, nil + return &SimpleVariable{Key: key}, nil } func (v *SimpleVariable) FullKey() string { @@ -331,6 +365,25 @@ func (v *UserVariable) GoString() string { return fmt.Sprintf("*%#v", *v) } +func NewLocalVariable(key string) (*LocalVariable, error) { + name := key[len("local."):] + if idx := strings.Index(name, "."); idx > -1 { + return nil, fmt.Errorf("Can't use dot (.) attribute access in local.%s; use square bracket indexing", name) + } + + return &LocalVariable{ + Name: name, + }, nil +} + +func (v *LocalVariable) FullKey() string { + return fmt.Sprintf("local.%s", v.Name) +} + +func (v *LocalVariable) GoString() string { + return fmt.Sprintf("*%#v", *v) +} + // DetectVariables takes an AST root and returns all the interpolated // variables that are detected in the AST tree. func DetectVariables(root ast.Node) ([]InterpolatedVariable, error) { diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go index a298cf2d3dc..94894ffee47 100644 --- a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go +++ b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go @@ -1,6 +1,8 @@ package config import ( + "bytes" + "compress/gzip" "crypto/md5" "crypto/sha1" "crypto/sha256" @@ -12,6 +14,7 @@ import ( "io/ioutil" "math" "net" + "net/url" "path/filepath" "regexp" "sort" @@ -55,9 +58,11 @@ func listVariableValueToStringSlice(values []ast.Variable) ([]string, error) { // Funcs is the mapping of built-in functions for configuration. func Funcs() map[string]ast.Function { return map[string]ast.Function{ + "abs": interpolationFuncAbs(), "basename": interpolationFuncBasename(), "base64decode": interpolationFuncBase64Decode(), "base64encode": interpolationFuncBase64Encode(), + "base64gzip": interpolationFuncBase64Gzip(), "base64sha256": interpolationFuncBase64Sha256(), "base64sha512": interpolationFuncBase64Sha512(), "bcrypt": interpolationFuncBcrypt(), @@ -74,11 +79,14 @@ func Funcs() map[string]ast.Function { "dirname": interpolationFuncDirname(), "distinct": interpolationFuncDistinct(), "element": interpolationFuncElement(), + "chunklist": interpolationFuncChunklist(), "file": interpolationFuncFile(), "matchkeys": interpolationFuncMatchKeys(), + "flatten": interpolationFuncFlatten(), "floor": interpolationFuncFloor(), "format": interpolationFuncFormat(), "formatlist": interpolationFuncFormatList(), + "indent": interpolationFuncIndent(), "index": interpolationFuncIndex(), "join": interpolationFuncJoin(), "jsonencode": interpolationFuncJSONEncode(), @@ -105,8 +113,10 @@ func Funcs() map[string]ast.Function { "substr": interpolationFuncSubstr(), "timestamp": interpolationFuncTimestamp(), "title": interpolationFuncTitle(), + "transpose": interpolationFuncTranspose(), "trimspace": interpolationFuncTrimSpace(), "upper": interpolationFuncUpper(), + "urlencode": interpolationFuncURLEncode(), "zipmap": interpolationFuncZipMap(), } } @@ -669,6 +679,21 @@ func interpolationFuncFormatList() ast.Function { } } +// interpolationFuncIndent indents a multi-line string with the +// specified number of spaces +func interpolationFuncIndent() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt, ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + spaces := args[0].(int) + data := args[1].(string) + pad := strings.Repeat(" ", spaces) + return strings.Replace(data, "\n", "\n"+pad, -1), nil + }, + } +} + // interpolationFuncIndex implements the "index" function that allows one to // find the index of a specific element in a list func interpolationFuncIndex() ast.Function { @@ -823,8 +848,7 @@ func interpolationFuncJoin() ast.Function { } // interpolationFuncJSONEncode implements the "jsonencode" function that encodes -// a string, list, or map as its JSON representation. For now, values in the -// list or map may only be strings. +// a string, list, or map as its JSON representation. func interpolationFuncJSONEncode() ast.Function { return ast.Function{ ArgTypes: []ast.Type{ast.TypeAny}, @@ -837,28 +861,36 @@ func interpolationFuncJSONEncode() ast.Function { toEncode = typedArg case []ast.Variable: - // We preallocate the list here. Note that it's important that in - // the length 0 case, we have an empty list rather than nil, as - // they encode differently. - // XXX It would be nice to support arbitrarily nested data here. Is - // there an inverse of hil.InterfaceToVariable? strings := make([]string, len(typedArg)) for i, v := range typedArg { if v.Type != ast.TypeString { - return "", fmt.Errorf("list elements must be strings") + variable, _ := hil.InterfaceToVariable(typedArg) + toEncode, _ = hil.VariableToInterface(variable) + + jEnc, err := json.Marshal(toEncode) + if err != nil { + return "", fmt.Errorf("failed to encode JSON data '%s'", toEncode) + } + return string(jEnc), nil + } strings[i] = v.Value.(string) } toEncode = strings case map[string]ast.Variable: - // XXX It would be nice to support arbitrarily nested data here. Is - // there an inverse of hil.InterfaceToVariable? stringMap := make(map[string]string) for k, v := range typedArg { if v.Type != ast.TypeString { - return "", fmt.Errorf("map values must be strings") + variable, _ := hil.InterfaceToVariable(typedArg) + toEncode, _ = hil.VariableToInterface(variable) + + jEnc, err := json.Marshal(toEncode) + if err != nil { + return "", fmt.Errorf("failed to encode JSON data '%s'", toEncode) + } + return string(jEnc), nil } stringMap[k] = v.Value.(string) } @@ -1098,6 +1130,56 @@ func interpolationFuncElement() ast.Function { } } +// returns the `list` items chunked by `size`. +func interpolationFuncChunklist() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ + ast.TypeList, // inputList + ast.TypeInt, // size + }, + ReturnType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + output := make([]ast.Variable, 0) + + values, _ := args[0].([]ast.Variable) + size, _ := args[1].(int) + + // errors if size is negative + if size < 0 { + return nil, fmt.Errorf("The size argument must be positive") + } + + // if size is 0, returns a list made of the initial list + if size == 0 { + output = append(output, ast.Variable{ + Type: ast.TypeList, + Value: values, + }) + return output, nil + } + + variables := make([]ast.Variable, 0) + chunk := ast.Variable{ + Type: ast.TypeList, + Value: variables, + } + l := len(values) + for i, v := range values { + variables = append(variables, v) + + // Chunk when index isn't 0, or when reaching the values's length + if (i+1)%size == 0 || (i+1) == l { + chunk.Value = variables + output = append(output, chunk) + variables = make([]ast.Variable, 0) + } + } + + return output, nil + }, + } +} + // interpolationFuncKeys implements the "keys" function that yields a list of // keys of map types within a Terraform configuration. func interpolationFuncKeys(vs map[string]ast.Variable) ast.Function { @@ -1197,6 +1279,32 @@ func interpolationFuncBase64Decode() ast.Function { } } +// interpolationFuncBase64Gzip implements the "gzip" function that allows gzip +// compression encoding the result using base64 +func interpolationFuncBase64Gzip() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + + var b bytes.Buffer + gz := gzip.NewWriter(&b) + if _, err := gz.Write([]byte(s)); err != nil { + return "", fmt.Errorf("failed to write gzip raw data: '%s'", s) + } + if err := gz.Flush(); err != nil { + return "", fmt.Errorf("failed to flush gzip writer: '%s'", s) + } + if err := gz.Close(); err != nil { + return "", fmt.Errorf("failed to close gzip writer: '%s'", s) + } + + return base64.StdEncoding.EncodeToString(b.Bytes()), nil + }, + } +} + // interpolationFuncLower implements the "lower" function that does // string lower casing. func interpolationFuncLower() ast.Function { @@ -1453,3 +1561,99 @@ func interpolationFuncSubstr() ast.Function { }, } } + +// Flatten until it's not ast.TypeList +func flattener(finalList []ast.Variable, flattenList []ast.Variable) []ast.Variable { + for _, val := range flattenList { + if val.Type == ast.TypeList { + finalList = flattener(finalList, val.Value.([]ast.Variable)) + } else { + finalList = append(finalList, val) + } + } + return finalList +} + +// Flatten to single list +func interpolationFuncFlatten() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList}, + ReturnType: ast.TypeList, + Variadic: false, + Callback: func(args []interface{}) (interface{}, error) { + inputList := args[0].([]ast.Variable) + + var outputList []ast.Variable + return flattener(outputList, inputList), nil + }, + } +} + +func interpolationFuncURLEncode() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + return url.QueryEscape(s), nil + }, + } +} + +// interpolationFuncTranspose implements the "transpose" function +// that converts a map (string,list) to a map (string,list) where +// the unique values of the original lists become the keys of the +// new map and the keys of the original map become values for the +// corresponding new keys. +func interpolationFuncTranspose() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeMap}, + ReturnType: ast.TypeMap, + Callback: func(args []interface{}) (interface{}, error) { + + inputMap := args[0].(map[string]ast.Variable) + outputMap := make(map[string]ast.Variable) + tmpMap := make(map[string][]string) + + for inKey, inVal := range inputMap { + if inVal.Type != ast.TypeList { + return nil, fmt.Errorf("transpose requires a map of lists of strings") + } + values := inVal.Value.([]ast.Variable) + for _, listVal := range values { + if listVal.Type != ast.TypeString { + return nil, fmt.Errorf("transpose requires the given map values to be lists of strings") + } + outKey := listVal.Value.(string) + if _, ok := tmpMap[outKey]; !ok { + tmpMap[outKey] = make([]string, 0) + } + outVal := tmpMap[outKey] + outVal = append(outVal, inKey) + sort.Strings(outVal) + tmpMap[outKey] = outVal + } + } + + for outKey, outVal := range tmpMap { + values := make([]ast.Variable, 0) + for _, v := range outVal { + values = append(values, ast.Variable{Type: ast.TypeString, Value: v}) + } + outputMap[outKey] = ast.Variable{Type: ast.TypeList, Value: values} + } + return outputMap, nil + }, + } +} + +// interpolationFuncAbs returns the absolute value of a given float. +func interpolationFuncAbs() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat}, + ReturnType: ast.TypeFloat, + Callback: func(args []interface{}) (interface{}, error) { + return math.Abs(args[0].(float64)), nil + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go index ead3d102e18..66a677d5d93 100644 --- a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go +++ b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go @@ -271,9 +271,7 @@ func (w *interpolationWalker) splitSlice() { result = append(result, val.Value) } case []interface{}: - for _, element := range val { - result = append(result, element) - } + result = append(result, val...) default: result = append(result, v) } diff --git a/vendor/github.com/hashicorp/terraform/config/loader.go b/vendor/github.com/hashicorp/terraform/config/loader.go index 5dd7d468981..6e3478167f4 100644 --- a/vendor/github.com/hashicorp/terraform/config/loader.go +++ b/vendor/github.com/hashicorp/terraform/config/loader.go @@ -80,7 +80,7 @@ func LoadDir(root string) (*Config, error) { if err != nil { return nil, err } - if len(files) == 0 { + if len(files) == 0 && len(overrides) == 0 { return nil, &ErrNoConfigsFound{Dir: root} } @@ -112,6 +112,9 @@ func LoadDir(root string) (*Config, error) { result = c } } + if len(files) == 0 { + result = &Config{} + } // Load all the overrides, and merge them into the config for _, f := range overrides { diff --git a/vendor/github.com/hashicorp/terraform/config/loader_hcl.go b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go index e85e493555a..c8c82c312f5 100644 --- a/vendor/github.com/hashicorp/terraform/config/loader_hcl.go +++ b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go @@ -17,10 +17,20 @@ type hclConfigurable struct { Root *ast.File } +var ReservedDataSourceFields = []string{ + "connection", + "count", + "depends_on", + "lifecycle", + "provider", + "provisioner", +} + var ReservedResourceFields = []string{ "connection", "count", "depends_on", + "id", "lifecycle", "provider", "provisioner", @@ -35,6 +45,7 @@ func (t *hclConfigurable) Config() (*Config, error) { validKeys := map[string]struct{}{ "atlas": struct{}{}, "data": struct{}{}, + "locals": struct{}{}, "module": struct{}{}, "output": struct{}{}, "provider": struct{}{}, @@ -70,6 +81,15 @@ func (t *hclConfigurable) Config() (*Config, error) { } } + // Build local values + if locals := list.Filter("locals"); len(locals.Items) > 0 { + var err error + config.Locals, err = loadLocalsHcl(locals) + if err != nil { + return nil, err + } + } + // Get Atlas configuration if atlas := list.Filter("atlas"); len(atlas.Items) > 0 { var err error @@ -406,6 +426,59 @@ func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) { return result, nil } +// loadLocalsHcl recurses into the given HCL object turns it into +// a list of locals. +func loadLocalsHcl(list *ast.ObjectList) ([]*Local, error) { + + result := make([]*Local, 0, len(list.Items)) + + for _, block := range list.Items { + if len(block.Keys) > 0 { + return nil, fmt.Errorf( + "locals block at %s should not have label %q", + block.Pos(), block.Keys[0].Token.Value(), + ) + } + + blockObj, ok := block.Val.(*ast.ObjectType) + if !ok { + return nil, fmt.Errorf("locals value at %s should be a block", block.Val.Pos()) + } + + // blockObj now contains directly our local decls + for _, item := range blockObj.List.Items { + if len(item.Keys) != 1 { + return nil, fmt.Errorf("local declaration at %s may not be a block", item.Val.Pos()) + } + + // By the time we get here there can only be one item left, but + // we'll decode into a map anyway because it's a convenient way + // to extract both the key and the value robustly. + kv := map[string]interface{}{} + hcl.DecodeObject(&kv, item) + for k, v := range kv { + rawConfig, err := NewRawConfig(map[string]interface{}{ + "value": v, + }) + + if err != nil { + return nil, fmt.Errorf( + "error parsing local value %q at %s: %s", + k, item.Val.Pos(), err, + ) + } + + result = append(result, &Local{ + Name: k, + RawConfig: rawConfig, + }) + } + } + } + + return result, nil +} + // LoadOutputsHcl recurses into the given HCL object and turns // it into a mapping of outputs. func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) { @@ -434,6 +507,7 @@ func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) { // Delete special keys delete(config, "depends_on") + delete(config, "description") rawConfig, err := NewRawConfig(config) if err != nil { @@ -455,10 +529,23 @@ func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) { } } + // If we have a description field, then filter that + var description string + if o := listVal.Filter("description"); len(o.Items) > 0 { + err := hcl.DecodeObject(&description, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error reading description for output %q: %s", + n, + err) + } + } + result = append(result, &Output{ - Name: n, - RawConfig: rawConfig, - DependsOn: dependsOn, + Name: n, + RawConfig: rawConfig, + DependsOn: dependsOn, + Description: description, }) } diff --git a/vendor/github.com/hashicorp/terraform/config/loader_hcl2.go b/vendor/github.com/hashicorp/terraform/config/loader_hcl2.go new file mode 100644 index 00000000000..9243bfb769f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/loader_hcl2.go @@ -0,0 +1,462 @@ +package config + +import ( + "fmt" + "sort" + "strings" + + gohcl2 "github.com/hashicorp/hcl2/gohcl" + hcl2 "github.com/hashicorp/hcl2/hcl" + hcl2parse "github.com/hashicorp/hcl2/hclparse" + "github.com/hashicorp/terraform/config/hcl2shim" + "github.com/zclconf/go-cty/cty" +) + +// hcl2Configurable is an implementation of configurable that knows +// how to turn a HCL Body into a *Config object. +type hcl2Configurable struct { + SourceFilename string + Body hcl2.Body +} + +// hcl2Loader is a wrapper around a HCL parser that provides a fileLoaderFunc. +type hcl2Loader struct { + Parser *hcl2parse.Parser +} + +// For the moment we'll just have a global loader since we don't have anywhere +// better to stash this. +// TODO: refactor the loader API so that it uses some sort of object we can +// stash the parser inside. +var globalHCL2Loader = newHCL2Loader() + +// newHCL2Loader creates a new hcl2Loader containing a new HCL Parser. +// +// HCL parsers retain information about files that are loaded to aid in +// producing diagnostic messages, so all files within a single configuration +// should be loaded with the same parser to ensure the availability of +// full diagnostic information. +func newHCL2Loader() hcl2Loader { + return hcl2Loader{ + Parser: hcl2parse.NewParser(), + } +} + +// loadFile is a fileLoaderFunc that knows how to read a HCL2 file and turn it +// into a hcl2Configurable. +func (l hcl2Loader) loadFile(filename string) (configurable, []string, error) { + var f *hcl2.File + var diags hcl2.Diagnostics + if strings.HasSuffix(filename, ".json") { + f, diags = l.Parser.ParseJSONFile(filename) + } else { + f, diags = l.Parser.ParseHCLFile(filename) + } + if diags.HasErrors() { + // Return diagnostics as an error; callers may type-assert this to + // recover the original diagnostics, if it doesn't end up wrapped + // in another error. + return nil, nil, diags + } + + return &hcl2Configurable{ + SourceFilename: filename, + Body: f.Body, + }, nil, nil +} + +func (t *hcl2Configurable) Config() (*Config, error) { + config := &Config{} + + // these structs are used only for the initial shallow decoding; we'll + // expand this into the main, public-facing config structs afterwards. + type atlas struct { + Name string `hcl:"name"` + Include *[]string `hcl:"include"` + Exclude *[]string `hcl:"exclude"` + } + type module struct { + Name string `hcl:"name,label"` + Source string `hcl:"source,attr"` + Config hcl2.Body `hcl:",remain"` + } + type provider struct { + Name string `hcl:"name,label"` + Alias *string `hcl:"alias,attr"` + Version *string `hcl:"version,attr"` + Config hcl2.Body `hcl:",remain"` + } + type resourceLifecycle struct { + CreateBeforeDestroy *bool `hcl:"create_before_destroy,attr"` + PreventDestroy *bool `hcl:"prevent_destroy,attr"` + IgnoreChanges *[]string `hcl:"ignore_changes,attr"` + } + type connection struct { + Config hcl2.Body `hcl:",remain"` + } + type provisioner struct { + Type string `hcl:"type,label"` + + When *string `hcl:"when,attr"` + OnFailure *string `hcl:"on_failure,attr"` + + Connection *connection `hcl:"connection,block"` + Config hcl2.Body `hcl:",remain"` + } + type managedResource struct { + Type string `hcl:"type,label"` + Name string `hcl:"name,label"` + + CountExpr hcl2.Expression `hcl:"count,attr"` + Provider *string `hcl:"provider,attr"` + DependsOn *[]string `hcl:"depends_on,attr"` + + Lifecycle *resourceLifecycle `hcl:"lifecycle,block"` + Provisioners []provisioner `hcl:"provisioner,block"` + Connection *connection `hcl:"connection,block"` + + Config hcl2.Body `hcl:",remain"` + } + type dataResource struct { + Type string `hcl:"type,label"` + Name string `hcl:"name,label"` + + CountExpr hcl2.Expression `hcl:"count,attr"` + Provider *string `hcl:"provider,attr"` + DependsOn *[]string `hcl:"depends_on,attr"` + + Config hcl2.Body `hcl:",remain"` + } + type variable struct { + Name string `hcl:"name,label"` + + DeclaredType *string `hcl:"type,attr"` + Default *cty.Value `hcl:"default,attr"` + Description *string `hcl:"description,attr"` + Sensitive *bool `hcl:"sensitive,attr"` + } + type output struct { + Name string `hcl:"name,label"` + + ValueExpr hcl2.Expression `hcl:"value,attr"` + DependsOn *[]string `hcl:"depends_on,attr"` + Description *string `hcl:"description,attr"` + Sensitive *bool `hcl:"sensitive,attr"` + } + type locals struct { + Definitions hcl2.Attributes `hcl:",remain"` + } + type backend struct { + Type string `hcl:"type,label"` + Config hcl2.Body `hcl:",remain"` + } + type terraform struct { + RequiredVersion *string `hcl:"required_version,attr"` + Backend *backend `hcl:"backend,block"` + } + type topLevel struct { + Atlas *atlas `hcl:"atlas,block"` + Datas []dataResource `hcl:"data,block"` + Modules []module `hcl:"module,block"` + Outputs []output `hcl:"output,block"` + Providers []provider `hcl:"provider,block"` + Resources []managedResource `hcl:"resource,block"` + Terraform *terraform `hcl:"terraform,block"` + Variables []variable `hcl:"variable,block"` + Locals []*locals `hcl:"locals,block"` + } + + var raw topLevel + diags := gohcl2.DecodeBody(t.Body, nil, &raw) + if diags.HasErrors() { + // Do some minimal decoding to see if we can at least get the + // required Terraform version, which might help explain why we + // couldn't parse the rest. + if raw.Terraform != nil && raw.Terraform.RequiredVersion != nil { + config.Terraform = &Terraform{ + RequiredVersion: *raw.Terraform.RequiredVersion, + } + } + + // We return the diags as an implementation of error, which the + // caller than then type-assert if desired to recover the individual + // diagnostics. + // FIXME: The current API gives us no way to return warnings in the + // absense of any errors. + return config, diags + } + + if raw.Terraform != nil { + var reqdVersion string + var backend *Backend + + if raw.Terraform.RequiredVersion != nil { + reqdVersion = *raw.Terraform.RequiredVersion + } + if raw.Terraform.Backend != nil { + backend = new(Backend) + backend.Type = raw.Terraform.Backend.Type + + // We don't permit interpolations or nested blocks inside the + // backend config, so we can decode the config early here and + // get direct access to the values, which is important for the + // config hashing to work as expected. + var config map[string]string + configDiags := gohcl2.DecodeBody(raw.Terraform.Backend.Config, nil, &config) + diags = append(diags, configDiags...) + + raw := make(map[string]interface{}, len(config)) + for k, v := range config { + raw[k] = v + } + + var err error + backend.RawConfig, err = NewRawConfig(raw) + if err != nil { + diags = append(diags, &hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid backend configuration", + Detail: fmt.Sprintf("Error in backend configuration: %s", err), + }) + } + } + + config.Terraform = &Terraform{ + RequiredVersion: reqdVersion, + Backend: backend, + } + } + + if raw.Atlas != nil { + var include, exclude []string + if raw.Atlas.Include != nil { + include = *raw.Atlas.Include + } + if raw.Atlas.Exclude != nil { + exclude = *raw.Atlas.Exclude + } + config.Atlas = &AtlasConfig{ + Name: raw.Atlas.Name, + Include: include, + Exclude: exclude, + } + } + + for _, rawM := range raw.Modules { + m := &Module{ + Name: rawM.Name, + Source: rawM.Source, + RawConfig: NewRawConfigHCL2(rawM.Config), + } + config.Modules = append(config.Modules, m) + } + + for _, rawV := range raw.Variables { + v := &Variable{ + Name: rawV.Name, + } + if rawV.DeclaredType != nil { + v.DeclaredType = *rawV.DeclaredType + } + if rawV.Default != nil { + v.Default = hcl2shim.ConfigValueFromHCL2(*rawV.Default) + } + if rawV.Description != nil { + v.Description = *rawV.Description + } + + config.Variables = append(config.Variables, v) + } + + for _, rawO := range raw.Outputs { + o := &Output{ + Name: rawO.Name, + } + + if rawO.Description != nil { + o.Description = *rawO.Description + } + if rawO.DependsOn != nil { + o.DependsOn = *rawO.DependsOn + } + if rawO.Sensitive != nil { + o.Sensitive = *rawO.Sensitive + } + + // The result is expected to be a map like map[string]interface{}{"value": something}, + // so we'll fake that with our hcl2shim.SingleAttrBody shim. + o.RawConfig = NewRawConfigHCL2(hcl2shim.SingleAttrBody{ + Name: "value", + Expr: rawO.ValueExpr, + }) + + config.Outputs = append(config.Outputs, o) + } + + for _, rawR := range raw.Resources { + r := &Resource{ + Mode: ManagedResourceMode, + Type: rawR.Type, + Name: rawR.Name, + } + if rawR.Lifecycle != nil { + var l ResourceLifecycle + if rawR.Lifecycle.CreateBeforeDestroy != nil { + l.CreateBeforeDestroy = *rawR.Lifecycle.CreateBeforeDestroy + } + if rawR.Lifecycle.PreventDestroy != nil { + l.PreventDestroy = *rawR.Lifecycle.PreventDestroy + } + if rawR.Lifecycle.IgnoreChanges != nil { + l.IgnoreChanges = *rawR.Lifecycle.IgnoreChanges + } + r.Lifecycle = l + } + if rawR.Provider != nil { + r.Provider = *rawR.Provider + } + if rawR.DependsOn != nil { + r.DependsOn = *rawR.DependsOn + } + + var defaultConnInfo *RawConfig + if rawR.Connection != nil { + defaultConnInfo = NewRawConfigHCL2(rawR.Connection.Config) + } + + for _, rawP := range rawR.Provisioners { + p := &Provisioner{ + Type: rawP.Type, + } + + switch { + case rawP.When == nil: + p.When = ProvisionerWhenCreate + case *rawP.When == "create": + p.When = ProvisionerWhenCreate + case *rawP.When == "destroy": + p.When = ProvisionerWhenDestroy + default: + p.When = ProvisionerWhenInvalid + } + + switch { + case rawP.OnFailure == nil: + p.OnFailure = ProvisionerOnFailureFail + case *rawP.When == "fail": + p.OnFailure = ProvisionerOnFailureFail + case *rawP.When == "continue": + p.OnFailure = ProvisionerOnFailureContinue + default: + p.OnFailure = ProvisionerOnFailureInvalid + } + + if rawP.Connection != nil { + p.ConnInfo = NewRawConfigHCL2(rawP.Connection.Config) + } else { + p.ConnInfo = defaultConnInfo + } + + p.RawConfig = NewRawConfigHCL2(rawP.Config) + + r.Provisioners = append(r.Provisioners, p) + } + + // The old loader records the count expression as a weird RawConfig with + // a single-element map inside. Since the rest of the world is assuming + // that, we'll mimic it here. + { + countBody := hcl2shim.SingleAttrBody{ + Name: "count", + Expr: rawR.CountExpr, + } + + r.RawCount = NewRawConfigHCL2(countBody) + r.RawCount.Key = "count" + } + + r.RawConfig = NewRawConfigHCL2(rawR.Config) + + config.Resources = append(config.Resources, r) + + } + + for _, rawR := range raw.Datas { + r := &Resource{ + Mode: DataResourceMode, + Type: rawR.Type, + Name: rawR.Name, + } + + if rawR.Provider != nil { + r.Provider = *rawR.Provider + } + if rawR.DependsOn != nil { + r.DependsOn = *rawR.DependsOn + } + + // The old loader records the count expression as a weird RawConfig with + // a single-element map inside. Since the rest of the world is assuming + // that, we'll mimic it here. + { + countBody := hcl2shim.SingleAttrBody{ + Name: "count", + Expr: rawR.CountExpr, + } + + r.RawCount = NewRawConfigHCL2(countBody) + r.RawCount.Key = "count" + } + + r.RawConfig = NewRawConfigHCL2(rawR.Config) + + config.Resources = append(config.Resources, r) + } + + for _, rawP := range raw.Providers { + p := &ProviderConfig{ + Name: rawP.Name, + } + + if rawP.Alias != nil { + p.Alias = *rawP.Alias + } + if rawP.Version != nil { + p.Version = *rawP.Version + } + + // The result is expected to be a map like map[string]interface{}{"value": something}, + // so we'll fake that with our hcl2shim.SingleAttrBody shim. + p.RawConfig = NewRawConfigHCL2(rawP.Config) + + config.ProviderConfigs = append(config.ProviderConfigs, p) + } + + for _, rawL := range raw.Locals { + names := make([]string, 0, len(rawL.Definitions)) + for n := range rawL.Definitions { + names = append(names, n) + } + sort.Strings(names) + for _, n := range names { + attr := rawL.Definitions[n] + l := &Local{ + Name: n, + RawConfig: NewRawConfigHCL2(hcl2shim.SingleAttrBody{ + Name: "value", + Expr: attr.Expr, + }), + } + config.Locals = append(config.Locals, l) + } + } + + // FIXME: The current API gives us no way to return warnings in the + // absense of any errors. + var err error + if diags.HasErrors() { + err = diags + } + + return config, err +} diff --git a/vendor/github.com/hashicorp/terraform/config/merge.go b/vendor/github.com/hashicorp/terraform/config/merge.go index db214be4562..55fc864f7ae 100644 --- a/vendor/github.com/hashicorp/terraform/config/merge.go +++ b/vendor/github.com/hashicorp/terraform/config/merge.go @@ -137,6 +137,17 @@ func Merge(c1, c2 *Config) (*Config, error) { } } + // Local Values + // These are simpler than the other config elements because they are just + // flat values and so no deep merging is required. + if localsCount := len(c1.Locals) + len(c2.Locals); localsCount != 0 { + // Explicit length check above because we want c.Locals to remain + // nil if the result would be empty. + c.Locals = make([]*Local, 0, len(c1.Locals)+len(c2.Locals)) + c.Locals = append(c.Locals, c1.Locals...) + c.Locals = append(c.Locals, c2.Locals...) + } + return c, nil } diff --git a/vendor/github.com/hashicorp/terraform/config/module/get.go b/vendor/github.com/hashicorp/terraform/config/module/get.go index 96b4a63c3d9..c62b7d2ee0c 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/get.go +++ b/vendor/github.com/hashicorp/terraform/config/module/get.go @@ -1,10 +1,18 @@ package module import ( + "fmt" "io/ioutil" + "log" + "net/http" + "net/url" "os" + "regexp" + "strings" "github.com/hashicorp/go-getter" + + cleanhttp "github.com/hashicorp/go-cleanhttp" ) // GetMode is an enum that describes how modules are loaded. @@ -69,3 +77,131 @@ func getStorage(s getter.Storage, key string, src string, mode GetMode) (string, // Get the directory where the module is. return s.Dir(key) } + +const ( + registryAPI = "https://registry.terraform.io/v1/modules" + xTerraformGet = "X-Terraform-Get" +) + +var detectors = []getter.Detector{ + new(getter.GitHubDetector), + new(getter.BitBucketDetector), + new(getter.S3Detector), + new(localDetector), + new(registryDetector), +} + +// these prefixes can't be registry IDs +// "http", "./", "/", "getter::" +var skipRegistry = regexp.MustCompile(`^(http|\./|/|[A-Za-z0-9]+::)`).MatchString + +// registryDetector implements getter.Detector to detect Terraform Registry modules. +// If a path looks like a registry module identifier, attempt to locate it in +// the registry. If it's not found, pass it on in case it can be found by +// other means. +type registryDetector struct { + // override the default registry URL + api string + + client *http.Client +} + +func (d registryDetector) Detect(src, _ string) (string, bool, error) { + // the namespace can't start with "http", a relative or absolute path, or + // contain a go-getter "forced getter" + if skipRegistry(src) { + return "", false, nil + } + + // there are 3 parts to a registry ID + if len(strings.Split(src, "/")) != 3 { + return "", false, nil + } + + return d.lookupModule(src) +} + +// Lookup the module in the registry. +func (d registryDetector) lookupModule(src string) (string, bool, error) { + if d.api == "" { + d.api = registryAPI + } + + if d.client == nil { + d.client = cleanhttp.DefaultClient() + } + + // src is already partially validated in Detect. We know it's a path, and + // if it can be parsed as a URL we will hand it off to the registry to + // determine if it's truly valid. + resp, err := d.client.Get(fmt.Sprintf("%s/%s/download", d.api, src)) + if err != nil { + log.Printf("[WARN] error looking up module %q: %s", src, err) + return "", false, nil + } + defer resp.Body.Close() + + // there should be no body, but save it for logging + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + fmt.Printf("[WARN] error reading response body from registry: %s", err) + return "", false, nil + } + + switch resp.StatusCode { + case http.StatusOK, http.StatusNoContent: + // OK + case http.StatusNotFound: + log.Printf("[INFO] module %q not found in registry", src) + return "", false, nil + default: + // anything else is an error: + log.Printf("[WARN] error getting download location for %q: %s resp:%s", src, resp.Status, body) + return "", false, nil + } + + // the download location is in the X-Terraform-Get header + location := resp.Header.Get(xTerraformGet) + if location == "" { + return "", false, fmt.Errorf("failed to get download URL for %q: %s resp:%s", src, resp.Status, body) + } + + return location, true, nil +} + +// localDetector wraps the default getter.FileDetector and checks if the module +// exists in the local filesystem. The default FileDetector only converts paths +// into file URLs, and returns found. We want to first check for a local module +// before passing it off to the registryDetector so we don't inadvertently +// replace a local module with a registry module of the same name. +type localDetector struct{} + +func (d localDetector) Detect(src, wd string) (string, bool, error) { + localSrc, ok, err := new(getter.FileDetector).Detect(src, wd) + if err != nil { + return src, ok, err + } + + if !ok { + return "", false, nil + } + + u, err := url.Parse(localSrc) + if err != nil { + return "", false, err + } + + _, err = os.Stat(u.Path) + + // just continue detection if it doesn't exist + if os.IsNotExist(err) { + return "", false, nil + } + + // return any other errors + if err != nil { + return "", false, err + } + + return localSrc, true, nil +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/tree.go b/vendor/github.com/hashicorp/terraform/config/module/tree.go index 4b0b153f726..262224a04ef 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/tree.go +++ b/vendor/github.com/hashicorp/terraform/config/module/tree.go @@ -3,12 +3,16 @@ package module import ( "bufio" "bytes" + "encoding/json" "fmt" + "io/ioutil" + "log" + "os" "path/filepath" "strings" "sync" - "github.com/hashicorp/go-getter" + getter "github.com/hashicorp/go-getter" "github.com/hashicorp/terraform/config" ) @@ -177,23 +181,74 @@ func (t *Tree) Load(s getter.Storage, mode GetMode) error { copy(path, t.path) path = append(path, m.Name) - // Split out the subdir if we have one + // The key is the string that will be hashed to uniquely id the Source. + // The leading digit can be incremented to force re-fetch all existing + // modules. + key := fmt.Sprintf("0.root.%s-%s", strings.Join(path, "."), m.Source) + + log.Printf("[TRACE] module source %q", m.Source) + // Split out the subdir if we have one. + // Terraform keeps the entire requested tree for now, so that modules can + // reference sibling modules from the same archive or repo. source, subDir := getter.SourceDirSubdir(m.Source) - source, err := getter.Detect(source, t.config.Dir, getter.Detectors) + // First check if we we need to download anything. + // This is also checked by the getter.Storage implementation, but we + // want to be able to short-circuit the detection as well, since some + // detectors may need to make external calls. + dir, found, err := s.Dir(key) + if err != nil { + return err + } + + // looks like we already have it + // In order to load the Tree we need to find out if there was another + // subDir stored from discovery. + if found && mode != GetModeUpdate { + subDir, err := t.getSubdir(dir) + if err != nil { + // If there's a problem with the subdir record, we'll let the + // recordSubdir method fix it up. Any other errors filesystem + // errors will turn up again below. + log.Println("[WARN] error reading subdir record:", err) + } else { + dir := filepath.Join(dir, subDir) + // Load the configurations.Dir(source) + children[m.Name], err = NewTreeModule(m.Name, dir) + if err != nil { + return fmt.Errorf("module %s: %s", m.Name, err) + } + // Set the path of this child + children[m.Name].path = path + continue + } + } + + log.Printf("[TRACE] module source: %q", source) + + source, err = getter.Detect(source, t.config.Dir, detectors) if err != nil { return fmt.Errorf("module %s: %s", m.Name, err) } + log.Printf("[TRACE] detected module source %q", source) + // Check if the detector introduced something new. + // For example, the registry always adds a subdir of `//*`, + // indicating that we need to strip off the first component from the + // tar archive, though we may not yet know what it is called. + // + // TODO: This can cause us to lose the previously detected subdir. It + // was never an issue before, since none of the supported detectors + // previously had this behavior, but we may want to add this ability to + // registry modules. source, subDir2 := getter.SourceDirSubdir(source) if subDir2 != "" { - subDir = filepath.Join(subDir2, subDir) + subDir = subDir2 } - // Get the directory where this module is so we can load it - key := strings.Join(path, ".") - key = fmt.Sprintf("root.%s-%s", key, m.Source) + log.Printf("[TRACE] getting module source %q", source) + dir, ok, err := getStorage(s, key, source, mode) if err != nil { return err @@ -203,18 +258,31 @@ func (t *Tree) Load(s getter.Storage, mode GetMode) error { "module %s: not found, may need to be downloaded using 'terraform get'", m.Name) } - // If we have a subdirectory, then merge that in + // expand and record the subDir for later if subDir != "" { - dir = filepath.Join(dir, subDir) + fullDir, err := getter.SubdirGlob(dir, subDir) + if err != nil { + return err + } + + // +1 to account for the pathsep + if len(dir)+1 > len(fullDir) { + return fmt.Errorf("invalid module storage path %q", fullDir) + } + + subDir = fullDir[len(dir)+1:] + + if err := t.recordSubdir(dir, subDir); err != nil { + return err + } + dir = fullDir } // Load the configurations.Dir(source) children[m.Name], err = NewTreeModule(m.Name, dir) if err != nil { - return fmt.Errorf( - "module %s: %s", m.Name, err) + return fmt.Errorf("module %s: %s", m.Name, err) } - // Set the path of this child children[m.Name].path = path } @@ -232,6 +300,65 @@ func (t *Tree) Load(s getter.Storage, mode GetMode) error { return nil } +func subdirRecordsPath(dir string) string { + const filename = "module-subdir.json" + // Get the parent directory. + // The current FolderStorage implementation needed to be able to create + // this directory, so we can be reasonably certain we can use it. + parent := filepath.Dir(filepath.Clean(dir)) + return filepath.Join(parent, filename) +} + +// unmarshal the records file in the parent directory. Always returns a valid map. +func loadSubdirRecords(dir string) (map[string]string, error) { + records := map[string]string{} + + recordsPath := subdirRecordsPath(dir) + data, err := ioutil.ReadFile(recordsPath) + if err != nil && !os.IsNotExist(err) { + return records, err + } + + if len(data) == 0 { + return records, nil + } + + if err := json.Unmarshal(data, &records); err != nil { + return records, err + } + return records, nil +} + +func (t *Tree) getSubdir(dir string) (string, error) { + records, err := loadSubdirRecords(dir) + if err != nil { + return "", err + } + + return records[dir], nil +} + +// Mark the location of a detected subdir in a top-level file so we +// can skip detection when not updating the module. +func (t *Tree) recordSubdir(dir, subdir string) error { + records, err := loadSubdirRecords(dir) + if err != nil { + // if there was a problem with the file, we will attempt to write a new + // one. Any non-data related error should surface there. + log.Printf("[WARN] error reading subdir records: %s", err) + } + + records[dir] = subdir + + js, err := json.Marshal(records) + if err != nil { + return err + } + + recordsPath := subdirRecordsPath(dir) + return ioutil.WriteFile(recordsPath, js, 0644) +} + // Path is the full path to this tree. func (t *Tree) Path() []string { return t.path diff --git a/vendor/github.com/hashicorp/terraform/config/raw_config.go b/vendor/github.com/hashicorp/terraform/config/raw_config.go index f8498d85ceb..1854a8b2068 100644 --- a/vendor/github.com/hashicorp/terraform/config/raw_config.go +++ b/vendor/github.com/hashicorp/terraform/config/raw_config.go @@ -3,8 +3,14 @@ package config import ( "bytes" "encoding/gob" + "errors" + "strconv" "sync" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + hcl2 "github.com/hashicorp/hcl2/hcl" "github.com/hashicorp/hil" "github.com/hashicorp/hil/ast" "github.com/mitchellh/copystructure" @@ -27,8 +33,24 @@ const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" // RawConfig supports a query-like interface to request // information from deep within the structure. type RawConfig struct { - Key string - Raw map[string]interface{} + Key string + + // Only _one_ of Raw and Body may be populated at a time. + // + // In the normal case, Raw is populated and Body is nil. + // + // When the experimental HCL2 parsing mode is enabled, "Body" + // is populated and RawConfig serves only to transport the hcl2.Body + // through the rest of Terraform core so we can ultimately decode it + // once its schema is known. + // + // Once we transition to HCL2 as the primary representation, RawConfig + // should be removed altogether and the hcl2.Body should be passed + // around directly. + + Raw map[string]interface{} + Body hcl2.Body + Interpolations []ast.Node Variables map[string]InterpolatedVariable @@ -48,6 +70,26 @@ func NewRawConfig(raw map[string]interface{}) (*RawConfig, error) { return result, nil } +// NewRawConfigHCL2 creates a new RawConfig that is serving as a capsule +// to transport a hcl2.Body. In this mode, the publicly-readable struct +// fields are not populated since all operations should instead be diverted +// to the HCL2 body. +// +// For a RawConfig object constructed with this function, the only valid use +// is to later retrieve the Body value and call its own methods. Callers +// may choose to set and then later handle the Key field, in a manner +// consistent with how it is handled by the Value method, but the Value +// method itself must not be used. +// +// This is an experimental codepath to be used only by the HCL2 config loader. +// Non-experimental parsing should _always_ use NewRawConfig to produce a +// fully-functional RawConfig object. +func NewRawConfigHCL2(body hcl2.Body) *RawConfig { + return &RawConfig{ + Body: body, + } +} + // RawMap returns a copy of the RawConfig.Raw map. func (r *RawConfig) RawMap() map[string]interface{} { r.lock.Lock() @@ -69,6 +111,10 @@ func (r *RawConfig) Copy() *RawConfig { r.lock.Lock() defer r.lock.Unlock() + if r.Body != nil { + return NewRawConfigHCL2(r.Body) + } + newRaw := make(map[string]interface{}) for k, v := range r.Raw { newRaw[k] = v @@ -223,6 +269,13 @@ func (r *RawConfig) init() error { } func (r *RawConfig) interpolate(fn interpolationWalkerFunc) error { + if r.Body != nil { + // For RawConfigs created for the HCL2 experiement, callers must + // use the HCL2 Body API directly rather than interpolating via + // the RawConfig. + return errors.New("this feature is not yet supported under the HCL2 experiment") + } + config, err := copystructure.Copy(r.Raw) if err != nil { return err @@ -268,6 +321,74 @@ func (r *RawConfig) merge(r2 *RawConfig) *RawConfig { return result } +// couldBeInteger is a helper that determines if the represented value could +// result in an integer. +// +// This function only works for RawConfigs that have "Key" set, meaning that +// a single result can be produced. Calling this function will overwrite +// the Config and Value results to be a test value. +// +// This function is conservative. If there is some doubt about whether the +// result could be an integer -- for example, if it depends on a variable +// whose type we don't know yet -- it will still return true. +func (r *RawConfig) couldBeInteger() bool { + if r.Key == "" { + // un-keyed RawConfigs can never produce numbers + return false + } + if r.Body == nil { + // Normal path: using the interpolator in this package + // Interpolate with a fixed number to verify that its a number. + r.interpolate(func(root ast.Node) (interface{}, error) { + // Execute the node but transform the AST so that it returns + // a fixed value of "5" for all interpolations. + result, err := hil.Eval( + hil.FixedValueTransform( + root, &ast.LiteralNode{Value: "5", Typex: ast.TypeString}), + nil) + if err != nil { + return "", err + } + + return result.Value, nil + }) + _, err := strconv.ParseInt(r.Value().(string), 0, 0) + return err == nil + } else { + // HCL2 experiment path: using the HCL2 API via shims + // + // This path catches fewer situations because we have to assume all + // variables are entirely unknown in HCL2, rather than the assumption + // above that all variables can be numbers because names like "var.foo" + // are considered a single variable rather than an attribute access. + // This is fine in practice, because we get a definitive answer + // during the graph walk when we have real values to work with. + attrs, diags := r.Body.JustAttributes() + if diags.HasErrors() { + // This body is not just a single attribute with a value, so + // this can't be a number. + return false + } + attr, hasAttr := attrs[r.Key] + if !hasAttr { + return false + } + result, diags := hcl2EvalWithUnknownVars(attr.Expr) + if diags.HasErrors() { + // We'll conservatively assume that this error is a result of + // us not being ready to fully-populate the scope, and catch + // any further problems during the main graph walk. + return true + } + + // If the result is convertable to number then we'll allow it. + // We do this because an unknown string is optimistically convertable + // to number (might be "5") but a _known_ string "hello" is not. + _, err := convert.Convert(result, cty.Number) + return err == nil + } +} + // UnknownKeys returns the keys of the configuration that are unknown // because they had interpolated variables that must be computed. func (r *RawConfig) UnknownKeys() []string { diff --git a/vendor/github.com/hashicorp/terraform/config/testing.go b/vendor/github.com/hashicorp/terraform/config/testing.go index f7bfadd9e84..831fc77867f 100644 --- a/vendor/github.com/hashicorp/terraform/config/testing.go +++ b/vendor/github.com/hashicorp/terraform/config/testing.go @@ -6,6 +6,8 @@ import ( // TestRawConfig is used to create a RawConfig for testing. func TestRawConfig(t *testing.T, c map[string]interface{}) *RawConfig { + t.Helper() + cfg, err := NewRawConfig(c) if err != nil { t.Fatalf("err: %s", err) diff --git a/vendor/github.com/hashicorp/terraform/dag/dag.go b/vendor/github.com/hashicorp/terraform/dag/dag.go index f8776bc5115..b7eb10c3354 100644 --- a/vendor/github.com/hashicorp/terraform/dag/dag.go +++ b/vendor/github.com/hashicorp/terraform/dag/dag.go @@ -106,7 +106,7 @@ func (g *AcyclicGraph) TransitiveReduction() { uTargets := g.DownEdges(u) vs := AsVertexList(g.DownEdges(u)) - g.DepthFirstWalk(vs, func(v Vertex, d int) error { + g.depthFirstWalk(vs, false, func(v Vertex, d int) error { shared := uTargets.Intersection(g.DownEdges(v)) for _, vPrime := range AsVertexList(shared) { g.RemoveEdge(BasicEdge(u, vPrime)) @@ -187,9 +187,18 @@ type vertexAtDepth struct { } // depthFirstWalk does a depth-first walk of the graph starting from -// the vertices in start. This is not exported now but it would make sense -// to export this publicly at some point. +// the vertices in start. func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error { + return g.depthFirstWalk(start, true, f) +} + +// This internal method provides the option of not sorting the vertices during +// the walk, which we use for the Transitive reduction. +// Some configurations can lead to fully-connected subgraphs, which makes our +// transitive reduction algorithm O(n^3). This is still passable for the size +// of our graphs, but the additional n^2 sort operations would make this +// uncomputable in a reasonable amount of time. +func (g *AcyclicGraph) depthFirstWalk(start []Vertex, sorted bool, f DepthWalkFunc) error { defer g.debug.BeginOperation(typeDepthFirstWalk, "").End("") seen := make(map[Vertex]struct{}) @@ -219,7 +228,11 @@ func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error { // Visit targets of this in a consistent order. targets := AsVertexList(g.DownEdges(current.Vertex)) - sort.Sort(byVertexName(targets)) + + if sorted { + sort.Sort(byVertexName(targets)) + } + for _, t := range targets { frontier = append(frontier, &vertexAtDepth{ Vertex: t, diff --git a/vendor/github.com/hashicorp/terraform/dag/marshal.go b/vendor/github.com/hashicorp/terraform/dag/marshal.go index 16d5dd6dde8..c567d27194f 100644 --- a/vendor/github.com/hashicorp/terraform/dag/marshal.go +++ b/vendor/github.com/hashicorp/terraform/dag/marshal.go @@ -273,6 +273,9 @@ func (e *encoder) Encode(i interface{}) { } func (e *encoder) Add(v Vertex) { + if e == nil { + return + } e.Encode(marshalTransform{ Type: typeTransform, AddVertex: newMarshalVertex(v), @@ -281,6 +284,9 @@ func (e *encoder) Add(v Vertex) { // Remove records the removal of Vertex v. func (e *encoder) Remove(v Vertex) { + if e == nil { + return + } e.Encode(marshalTransform{ Type: typeTransform, RemoveVertex: newMarshalVertex(v), @@ -288,6 +294,9 @@ func (e *encoder) Remove(v Vertex) { } func (e *encoder) Connect(edge Edge) { + if e == nil { + return + } e.Encode(marshalTransform{ Type: typeTransform, AddEdge: newMarshalEdge(edge), @@ -295,6 +304,9 @@ func (e *encoder) Connect(edge Edge) { } func (e *encoder) RemoveEdge(edge Edge) { + if e == nil { + return + } e.Encode(marshalTransform{ Type: typeTransform, RemoveEdge: newMarshalEdge(edge), diff --git a/vendor/github.com/hashicorp/terraform/dag/walk.go b/vendor/github.com/hashicorp/terraform/dag/walk.go index 23c87adc1ab..f03b10030be 100644 --- a/vendor/github.com/hashicorp/terraform/dag/walk.go +++ b/vendor/github.com/hashicorp/terraform/dag/walk.go @@ -166,7 +166,7 @@ func (w *Walker) Update(g *AcyclicGraph) { w.wait.Add(1) // Add to our own set so we know about it already - log.Printf("[DEBUG] dag/walk: added new vertex: %q", VertexName(v)) + log.Printf("[TRACE] dag/walk: added new vertex: %q", VertexName(v)) w.vertices.Add(raw) // Initialize the vertex info @@ -198,7 +198,7 @@ func (w *Walker) Update(g *AcyclicGraph) { // Delete it out of the map delete(w.vertexMap, v) - log.Printf("[DEBUG] dag/walk: removed vertex: %q", VertexName(v)) + log.Printf("[TRACE] dag/walk: removed vertex: %q", VertexName(v)) w.vertices.Delete(raw) } @@ -229,7 +229,7 @@ func (w *Walker) Update(g *AcyclicGraph) { changedDeps.Add(waiter) log.Printf( - "[DEBUG] dag/walk: added edge: %q waiting on %q", + "[TRACE] dag/walk: added edge: %q waiting on %q", VertexName(waiter), VertexName(dep)) w.edges.Add(raw) } @@ -253,7 +253,7 @@ func (w *Walker) Update(g *AcyclicGraph) { changedDeps.Add(waiter) log.Printf( - "[DEBUG] dag/walk: removed edge: %q waiting on %q", + "[TRACE] dag/walk: removed edge: %q waiting on %q", VertexName(waiter), VertexName(dep)) w.edges.Delete(raw) } @@ -296,7 +296,7 @@ func (w *Walker) Update(g *AcyclicGraph) { info.depsCancelCh = cancelCh log.Printf( - "[DEBUG] dag/walk: dependencies changed for %q, sending new deps", + "[TRACE] dag/walk: dependencies changed for %q, sending new deps", VertexName(v)) // Start the waiter @@ -383,10 +383,10 @@ func (w *Walker) walkVertex(v Vertex, info *walkerVertex) { // Run our callback or note that our upstream failed var err error if depsSuccess { - log.Printf("[DEBUG] dag/walk: walking %q", VertexName(v)) + log.Printf("[TRACE] dag/walk: walking %q", VertexName(v)) err = w.Callback(v) } else { - log.Printf("[DEBUG] dag/walk: upstream errored, not walking %q", VertexName(v)) + log.Printf("[TRACE] dag/walk: upstream errored, not walking %q", VertexName(v)) err = errWalkUpstream } @@ -423,7 +423,7 @@ func (w *Walker) waitDeps( return case <-time.After(time.Second * 5): - log.Printf("[DEBUG] dag/walk: vertex %q, waiting for: %q", + log.Printf("[TRACE] dag/walk: vertex %q, waiting for: %q", VertexName(v), VertexName(dep)) } } diff --git a/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go b/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go index 64d8263e601..6ccc5231834 100644 --- a/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go +++ b/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go @@ -1,6 +1,8 @@ package hashcode import ( + "bytes" + "fmt" "hash/crc32" ) @@ -20,3 +22,14 @@ func String(s string) int { // v == MinInt return 0 } + +// Strings hashes a list of strings to a unique hashcode. +func Strings(strings []string) string { + var buf bytes.Buffer + + for _, s := range strings { + buf.WriteString(fmt.Sprintf("%s-", s)) + } + + return fmt.Sprintf("%d", String(buf.String())) +} diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/logging.go b/vendor/github.com/hashicorp/terraform/helper/logging/logging.go index 433cd77d3b3..6bd92f77784 100644 --- a/vendor/github.com/hashicorp/terraform/helper/logging/logging.go +++ b/vendor/github.com/hashicorp/terraform/helper/logging/logging.go @@ -18,7 +18,7 @@ const ( EnvLogFile = "TF_LOG_PATH" // Set to a file ) -var validLevels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"} +var ValidLevels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"} // LogOutput determines where we should send logs (if anywhere) and the log level. func LogOutput() (logOutput io.Writer, err error) { @@ -40,7 +40,7 @@ func LogOutput() (logOutput io.Writer, err error) { // This was the default since the beginning logOutput = &logutils.LevelFilter{ - Levels: validLevels, + Levels: ValidLevels, MinLevel: logutils.LogLevel(logLevel), Writer: logOutput, } @@ -77,7 +77,7 @@ func LogLevel() string { logLevel = strings.ToUpper(envLevel) } else { log.Printf("[WARN] Invalid log level: %q. Defaulting to level: TRACE. Valid levels are: %+v", - envLevel, validLevels) + envLevel, ValidLevels) } return logLevel @@ -90,7 +90,7 @@ func IsDebugOrHigher() bool { } func isValidLogLevel(level string) bool { - for _, l := range validLevels { + for _, l := range ValidLevels { if strings.ToUpper(level) == string(l) { return true } diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go index d7de1a030a4..aa0aa51fba6 100644 --- a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go +++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go @@ -11,11 +11,13 @@ import ( "reflect" "regexp" "strings" + "syscall" "testing" "github.com/davecgh/go-spew/spew" "github.com/hashicorp/go-getter" "github.com/hashicorp/go-multierror" + "github.com/hashicorp/logutils" "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/helper/logging" "github.com/hashicorp/terraform/terraform" @@ -186,6 +188,10 @@ type TestCheckFunc func(*terraform.State) error // ImportStateCheckFunc is the check function for ImportState tests type ImportStateCheckFunc func([]*terraform.InstanceState) error +// ImportStateIdFunc is an ID generation function to help with complex ID +// generation for ImportState tests. +type ImportStateIdFunc func(*terraform.State) (string, error) + // TestCase is a single acceptance test case used to test the apply/destroy // lifecycle of a resource in a specific configuration. // @@ -308,6 +314,10 @@ type TestStep struct { // are tested alongside real resources PreventPostDestroyRefresh bool + // SkipFunc is called before applying config, but after PreConfig + // This is useful for defining test steps with platform-dependent checks + SkipFunc func() (bool, error) + //--------------------------------------------------------------- // ImportState testing //--------------------------------------------------------------- @@ -329,6 +339,12 @@ type TestStep struct { // the unset ImportStateId field. ImportStateIdPrefix string + // ImportStateIdFunc is a function that can be used to dynamically generate + // the ID for the ImportState tests. It is sent the state, which can be + // checked to derive the attributes necessary and generate the string in the + // desired format. + ImportStateIdFunc ImportStateIdFunc + // ImportStateCheck checks the results of ImportState. It should be // used to verify that the resulting value of ImportState has the // proper resources, IDs, and attributes. @@ -345,6 +361,37 @@ type TestStep struct { ImportStateVerifyIgnore []string } +// Set to a file mask in sprintf format where %s is test name +const EnvLogPathMask = "TF_LOG_PATH_MASK" + +func LogOutput(t TestT) (logOutput io.Writer, err error) { + logOutput = ioutil.Discard + + logLevel := logging.LogLevel() + if logLevel == "" { + return + } + + logOutput = os.Stderr + if logPathMask := os.Getenv(EnvLogPathMask); logPathMask != "" { + logPath := fmt.Sprintf(logPathMask, t.Name()) + var err error + logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) + if err != nil { + return nil, err + } + } + + // This was the default since the beginning + logOutput = &logutils.LevelFilter{ + Levels: logging.ValidLevels, + MinLevel: logutils.LogLevel(logLevel), + Writer: logOutput, + } + + return +} + // Test performs an acceptance test on a resource. // // Tests are not run unless an environmental variable "TF_ACC" is @@ -366,7 +413,7 @@ func Test(t TestT, c TestCase) { return } - logWriter, err := logging.LogOutput() + logWriter, err := LogOutput(t) if err != nil { t.Error(fmt.Errorf("error setting up logging: %s", err)) } @@ -398,7 +445,18 @@ func Test(t TestT, c TestCase) { errored := false for i, step := range c.Steps { var err error - log.Printf("[WARN] Test: Executing step %d", i) + log.Printf("[DEBUG] Test: Executing step %d", i) + + if step.SkipFunc != nil { + skip, err := step.SkipFunc() + if err != nil { + t.Fatal(err) + } + if skip { + log.Printf("[WARN] Skipping step %d", i) + continue + } + } if step.Config == "" && !step.ImportState { err = fmt.Errorf( @@ -936,6 +994,7 @@ type TestT interface { Error(args ...interface{}) Fatal(args ...interface{}) Skip(args ...interface{}) + Name() string } // This is set to true by unit tests to alter some behavior diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go index 28ad105267d..94fef3cfbe8 100644 --- a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go +++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go @@ -16,15 +16,24 @@ func testStepImportState( state *terraform.State, step TestStep) (*terraform.State, error) { // Determine the ID to import - importId := step.ImportStateId - if importId == "" { + var importId string + switch { + case step.ImportStateIdFunc != nil: + var err error + importId, err = step.ImportStateIdFunc(state) + if err != nil { + return state, err + } + case step.ImportStateId != "": + importId = step.ImportStateId + default: resource, err := testResource(step, state) if err != nil { return state, err } - importId = resource.Primary.ID } + importPrefix := step.ImportStateIdPrefix if importPrefix != "" { importId = fmt.Sprintf("%s%s", importPrefix, importId) diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go new file mode 100644 index 00000000000..bf952f66312 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go @@ -0,0 +1,155 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform/config/configschema" + "github.com/zclconf/go-cty/cty" +) + +// The functions and methods in this file are concerned with the conversion +// of this package's schema model into the slightly-lower-level schema model +// used by Terraform core for configuration parsing. + +// CoreConfigSchema lowers the receiver to the schema model expected by +// Terraform core. +// +// This lower-level model has fewer features than the schema in this package, +// describing only the basic structure of configuration and state values we +// expect. The full schemaMap from this package is still required for full +// validation, handling of default values, etc. +// +// This method presumes a schema that passes InternalValidate, and so may +// panic or produce an invalid result if given an invalid schemaMap. +func (m schemaMap) CoreConfigSchema() *configschema.Block { + if len(m) == 0 { + // We return an actual (empty) object here, rather than a nil, + // because a nil result would mean that we don't have a schema at + // all, rather than that we have an empty one. + return &configschema.Block{} + } + + ret := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + BlockTypes: map[string]*configschema.NestedBlock{}, + } + + for name, schema := range m { + if schema.Elem == nil { + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + continue + } + switch schema.Elem.(type) { + case *Schema: + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + case *Resource: + ret.BlockTypes[name] = schema.coreConfigSchemaBlock() + default: + // Should never happen for a valid schema + panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", schema.Elem)) + } + } + + return ret +} + +// coreConfigSchemaAttribute prepares a configschema.Attribute representation +// of a schema. This is appropriate only for primitives or collections whose +// Elem is an instance of Schema. Use coreConfigSchemaBlock for collections +// whose elem is a whole resource. +func (s *Schema) coreConfigSchemaAttribute() *configschema.Attribute { + return &configschema.Attribute{ + Type: s.coreConfigSchemaType(), + Optional: s.Optional, + Required: s.Required, + Computed: s.Computed, + Sensitive: s.Sensitive, + } +} + +// coreConfigSchemaBlock prepares a configschema.NestedBlock representation of +// a schema. This is appropriate only for collections whose Elem is an instance +// of Resource, and will panic otherwise. +func (s *Schema) coreConfigSchemaBlock() *configschema.NestedBlock { + ret := &configschema.NestedBlock{} + if nested := s.Elem.(*Resource).CoreConfigSchema(); nested != nil { + ret.Block = *nested + } + switch s.Type { + case TypeList: + ret.Nesting = configschema.NestingList + case TypeSet: + ret.Nesting = configschema.NestingSet + case TypeMap: + ret.Nesting = configschema.NestingMap + default: + // Should never happen for a valid schema + panic(fmt.Errorf("invalid s.Type %s for s.Elem being resource", s.Type)) + } + + ret.MinItems = s.MinItems + ret.MaxItems = s.MaxItems + + if s.Required && s.MinItems == 0 { + // configschema doesn't have a "required" representation for nested + // blocks, but we can fake it by requiring at least one item. + ret.MinItems = 1 + } + + return ret +} + +// coreConfigSchemaType determines the core config schema type that corresponds +// to a particular schema's type. +func (s *Schema) coreConfigSchemaType() cty.Type { + switch s.Type { + case TypeString: + return cty.String + case TypeBool: + return cty.Bool + case TypeInt, TypeFloat: + // configschema doesn't distinguish int and float, so helper/schema + // will deal with this as an additional validation step after + // configuration has been parsed and decoded. + return cty.Number + case TypeList, TypeSet, TypeMap: + var elemType cty.Type + switch set := s.Elem.(type) { + case *Schema: + elemType = set.coreConfigSchemaType() + case *Resource: + // In practice we don't actually use this for normal schema + // construction because we construct a NestedBlock in that + // case instead. See schemaMap.CoreConfigSchema. + elemType = set.CoreConfigSchema().ImpliedType() + default: + if set != nil { + // Should never happen for a valid schema + panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", s.Elem)) + } + // Some pre-existing schemas assume string as default, so we need + // to be compatible with them. + elemType = cty.String + } + switch s.Type { + case TypeList: + return cty.List(elemType) + case TypeSet: + return cty.Set(elemType) + case TypeMap: + return cty.Map(elemType) + default: + // can never get here in practice, due to the case we're inside + panic("invalid collection type") + } + default: + // should never happen for a valid schema + panic(fmt.Errorf("invalid Schema.Type %s", s.Type)) + } +} + +// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema +// on the resource's schema. +func (r *Resource) CoreConfigSchema() *configschema.Block { + return schemaMap(r.Schema).CoreConfigSchema() +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go index 16bbae29613..644b93e6a05 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go @@ -29,29 +29,59 @@ type DiffFieldReader struct { Diff *terraform.InstanceDiff Source FieldReader Schema map[string]*Schema + + // cache for memoizing ReadField calls. + cache map[string]cachedFieldReadResult +} + +type cachedFieldReadResult struct { + val FieldReadResult + err error } func (r *DiffFieldReader) ReadField(address []string) (FieldReadResult, error) { + if r.cache == nil { + r.cache = make(map[string]cachedFieldReadResult) + } + + // Create the cache key by joining around a value that isn't a valid part + // of an address. This assumes that the Source and Schema are not changed + // for the life of this DiffFieldReader. + cacheKey := strings.Join(address, "|") + if cached, ok := r.cache[cacheKey]; ok { + return cached.val, cached.err + } + schemaList := addrToSchema(address, r.Schema) if len(schemaList) == 0 { + r.cache[cacheKey] = cachedFieldReadResult{} return FieldReadResult{}, nil } + var res FieldReadResult + var err error + schema := schemaList[len(schemaList)-1] switch schema.Type { case TypeBool, TypeInt, TypeFloat, TypeString: - return r.readPrimitive(address, schema) + res, err = r.readPrimitive(address, schema) case TypeList: - return readListField(r, address, schema) + res, err = readListField(r, address, schema) case TypeMap: - return r.readMap(address, schema) + res, err = r.readMap(address, schema) case TypeSet: - return r.readSet(address, schema) + res, err = r.readSet(address, schema) case typeObject: - return readObjectField(r, address, schema.Elem.(map[string]*Schema)) + res, err = readObjectField(r, address, schema.Elem.(map[string]*Schema)) default: panic(fmt.Sprintf("Unknown type: %#v", schema.Type)) } + + r.cache[cacheKey] = cachedFieldReadResult{ + val: res, + err: err, + } + return res, err } func (r *DiffFieldReader) readMap( diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go index fb28b4151d4..30b6b9e3d77 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/configschema" "github.com/hashicorp/terraform/terraform" ) @@ -185,6 +186,29 @@ func (p *Provider) TestReset() error { return nil } +// GetSchema implementation of terraform.ResourceProvider interface +func (p *Provider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) { + resourceTypes := map[string]*configschema.Block{} + dataSources := map[string]*configschema.Block{} + + for _, name := range req.ResourceTypes { + if r, exists := p.ResourcesMap[name]; exists { + resourceTypes[name] = r.CoreConfigSchema() + } + } + for _, name := range req.DataSources { + if r, exists := p.DataSourcesMap[name]; exists { + dataSources[name] = r.CoreConfigSchema() + } + } + + return &terraform.ProviderSchema{ + Provider: schemaMap(p.Schema).CoreConfigSchema(), + ResourceTypes: resourceTypes, + DataSources: dataSources, + }, nil +} + // Input implementation of terraform.ResourceProvider interface. func (p *Provider) Input( input terraform.UIInput, @@ -305,6 +329,10 @@ func (p *Provider) Resources() []terraform.ResourceType { result = append(result, terraform.ResourceType{ Name: k, Importable: resource.Importer != nil, + + // Indicates that a provider is compiled against a new enough + // version of core to support the GetSchema method. + SchemaAvailable: true, }) } @@ -410,6 +438,10 @@ func (p *Provider) DataSources() []terraform.DataSource { for _, k := range keys { result = append(result, terraform.DataSource{ Name: k, + + // Indicates that a provider is compiled against a new enough + // version of core to support the GetSchema method. + SchemaAvailable: true, }) } diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go index ddba1096d88..97b357706e9 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go @@ -393,19 +393,43 @@ func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error return err } } + + for k, f := range tsm { + if isReservedResourceFieldName(k, f) { + return fmt.Errorf("%s is a reserved field name", k) + } + } } - // Resource-specific checks - for k, _ := range tsm { - if isReservedResourceFieldName(k) { - return fmt.Errorf("%s is a reserved field name for a resource", k) + // Data source + if r.isTopLevel() && !writable { + tsm = schemaMap(r.Schema) + for k, _ := range tsm { + if isReservedDataSourceFieldName(k) { + return fmt.Errorf("%s is a reserved field name", k) + } } } return schemaMap(r.Schema).InternalValidate(tsm) } -func isReservedResourceFieldName(name string) bool { +func isReservedDataSourceFieldName(name string) bool { + for _, reservedName := range config.ReservedDataSourceFields { + if name == reservedName { + return true + } + } + return false +} + +func isReservedResourceFieldName(name string, s *Schema) bool { + // Allow phasing out "id" + // See https://github.com/terraform-providers/terraform-provider-aws/pull/1626#issuecomment-328881415 + if name == "id" && (s.Deprecated != "" || s.Removed != "") { + return false + } + for _, reservedName := range config.ReservedResourceFields { if name == reservedName { return true @@ -450,7 +474,7 @@ func (r *Resource) TestResourceData() *ResourceData { // Returns true if the resource is "top level" i.e. not a sub-resource. func (r *Resource) isTopLevel() bool { // TODO: This is a heuristic; replace with a definitive attribute? - return r.Create != nil + return (r.Create != nil || r.Read != nil) } // Determines if a given InstanceState needs to be migrated by checking the diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/set.go b/vendor/github.com/hashicorp/terraform/helper/schema/set.go index de05f40eed9..bb194ee6505 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/set.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/set.go @@ -153,6 +153,31 @@ func (s *Set) Equal(raw interface{}) bool { return reflect.DeepEqual(s.m, other.m) } +// HashEqual simply checks to the keys the top-level map to the keys in the +// other set's top-level map to see if they are equal. This obviously assumes +// you have a properly working hash function - use HashResource if in doubt. +func (s *Set) HashEqual(raw interface{}) bool { + other, ok := raw.(*Set) + if !ok { + return false + } + + ks1 := make([]string, 0) + ks2 := make([]string, 0) + + for k := range s.m { + ks1 = append(ks1, k) + } + for k := range other.m { + ks2 = append(ks2, k) + } + + sort.Strings(ks1) + sort.Strings(ks2) + + return reflect.DeepEqual(ks1, ks2) +} + func (s *Set) GoString() string { return fmt.Sprintf("*Set(%#v)", s.m) } diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go index 9765bdbc6dc..353c9366afa 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go @@ -10,6 +10,8 @@ import ( // TestResourceDataRaw creates a ResourceData from a raw configuration map. func TestResourceDataRaw( t *testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData { + t.Helper() + c, err := config.NewRawConfig(raw) if err != nil { t.Fatalf("err: %s", err) diff --git a/vendor/github.com/hashicorp/terraform/helper/validation/validation.go b/vendor/github.com/hashicorp/terraform/helper/validation/validation.go index 221433b7cb1..2ac1028cc10 100644 --- a/vendor/github.com/hashicorp/terraform/helper/validation/validation.go +++ b/vendor/github.com/hashicorp/terraform/helper/validation/validation.go @@ -3,6 +3,8 @@ package validation import ( "fmt" "net" + "reflect" + "regexp" "strings" "github.com/hashicorp/terraform/helper/schema" @@ -104,6 +106,24 @@ func StringLenBetween(min, max int) schema.SchemaValidateFunc { } } +// NoZeroValues is a SchemaValidateFunc which tests if the provided value is +// not a zero value. It's useful in situations where you want to catch +// explicit zero values on things like required fields during validation. +func NoZeroValues(i interface{}, k string) (s []string, es []error) { + if reflect.ValueOf(i).Interface() == reflect.Zero(reflect.TypeOf(i)).Interface() { + switch reflect.TypeOf(i).Kind() { + case reflect.String: + es = append(es, fmt.Errorf("%s must not be empty", k)) + case reflect.Int, reflect.Float64: + es = append(es, fmt.Errorf("%s must not be zero", k)) + default: + // this validator should only ever be applied to TypeString, TypeInt and TypeFloat + panic(fmt.Errorf("can't use NoZeroValues with %T attribute %s", i, k)) + } + } + return +} + // CIDRNetwork returns a SchemaValidateFunc which tests if the provided value // is of type string, is in valid CIDR network notation, and has significant bits between min and max (inclusive) func CIDRNetwork(min, max int) schema.SchemaValidateFunc { @@ -138,9 +158,34 @@ func CIDRNetwork(min, max int) schema.SchemaValidateFunc { } } +// ValidateJsonString is a SchemaValidateFunc which tests to make sure the +// supplied string is valid JSON. func ValidateJsonString(v interface{}, k string) (ws []string, errors []error) { if _, err := structure.NormalizeJsonString(v); err != nil { errors = append(errors, fmt.Errorf("%q contains an invalid JSON: %s", k, err)) } return } + +// ValidateListUniqueStrings is a ValidateFunc that ensures a list has no +// duplicate items in it. It's useful for when a list is needed over a set +// because order matters, yet the items still need to be unique. +func ValidateListUniqueStrings(v interface{}, k string) (ws []string, errors []error) { + for n1, v1 := range v.([]interface{}) { + for n2, v2 := range v.([]interface{}) { + if v1.(string) == v2.(string) && n1 != n2 { + errors = append(errors, fmt.Errorf("%q: duplicate entry - %s", k, v1.(string))) + } + } + } + return +} + +// ValidateRegexp returns a SchemaValidateFunc which tests to make sure the +// supplied string is a valid regular expression. +func ValidateRegexp(v interface{}, k string) (ws []string, errors []error) { + if _, err := regexp.Compile(v.(string)); err != nil { + errors = append(errors, fmt.Errorf("%q: %s", k, err)) + } + return +} diff --git a/vendor/github.com/hashicorp/terraform/main.go b/vendor/github.com/hashicorp/terraform/main.go index ca4ec7c6201..77f441d20ab 100644 --- a/vendor/github.com/hashicorp/terraform/main.go +++ b/vendor/github.com/hashicorp/terraform/main.go @@ -6,11 +6,15 @@ import ( "io/ioutil" "log" "os" + "path/filepath" "runtime" "strings" "sync" + "github.com/mitchellh/colorstring" + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform/command/format" "github.com/hashicorp/terraform/helper/logging" "github.com/hashicorp/terraform/terraform" "github.com/mattn/go-colorable" @@ -95,7 +99,19 @@ func realMain() int { return wrappedMain() } +func init() { + Ui = &cli.PrefixedUi{ + AskPrefix: OutputPrefix, + OutputPrefix: OutputPrefix, + InfoPrefix: OutputPrefix, + ErrorPrefix: ErrorPrefix, + Ui: &cli.BasicUi{Writer: os.Stdout}, + } +} + func wrappedMain() int { + var err error + // We always need to close the DebugInfo before we exit. defer terraform.CloseDebugInfo() @@ -106,34 +122,40 @@ func wrappedMain() int { log.Printf("[INFO] Go runtime version: %s", runtime.Version()) log.Printf("[INFO] CLI args: %#v", os.Args) - // Load the configuration - config := BuiltinConfig - - // Load the configuration file if we have one, that can be used to - // define extra providers and provisioners. - clicfgFile, err := cliConfigFile() - if err != nil { - Ui.Error(fmt.Sprintf("Error loading CLI configuration: \n\n%s", err)) - return 1 - } - - if clicfgFile != "" { - usrcfg, err := LoadConfig(clicfgFile) - if err != nil { - Ui.Error(fmt.Sprintf("Error loading CLI configuration: \n\n%s", err)) - return 1 + config, diags := LoadConfig() + if len(diags) > 0 { + // Since we haven't instantiated a command.Meta yet, we need to do + // some things manually here and use some "safe" defaults for things + // that command.Meta could otherwise figure out in smarter ways. + Ui.Error("There are some problems with the CLI configuration:") + for _, diag := range diags { + earlyColor := &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, // Disable color to be conservative until we know better + Reset: true, + } + Ui.Error(format.Diagnostic(diag, earlyColor, 78)) } + if diags.HasErrors() { + Ui.Error("As a result of the above problems, Terraform may not behave as intended.\n\n") + // We continue to run anyway, since Terraform has reasonable defaults. + } + } + log.Printf("[DEBUG] CLI config is %#v", config) - config = *config.Merge(usrcfg) + // In tests, Commands may already be set to provide mock commands + if Commands == nil { + initCommands(config) } // Run checkpoint - go runCheckpoint(&config) + go runCheckpoint(config) // Make sure we clean up any managed plugins at the end of this defer plugin.CleanupClients() // Get the command line args. + binName := filepath.Base(os.Args[0]) args := os.Args[1:] // Build the CLI so far, we do this so we can query the subcommand. @@ -175,10 +197,15 @@ func wrappedMain() int { // Rebuild the CLI with any modified args. log.Printf("[INFO] CLI command args: %#v", args) cliRunner = &cli.CLI{ + Name: binName, Args: args, Commands: Commands, HelpFunc: helpFunc, HelpWriter: os.Stdout, + + Autocomplete: true, + AutocompleteInstall: "install-autocomplete", + AutocompleteUninstall: "uninstall-autocomplete", } // Pass in the overriding plugin paths from config diff --git a/vendor/github.com/hashicorp/terraform/plugin/client.go b/vendor/github.com/hashicorp/terraform/plugin/client.go index 3a5cb7af052..7e2f4fecb27 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/client.go +++ b/vendor/github.com/hashicorp/terraform/plugin/client.go @@ -1,8 +1,10 @@ package plugin import ( + "os" "os/exec" + hclog "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" "github.com/hashicorp/terraform/plugin/discovery" ) @@ -10,11 +12,18 @@ import ( // ClientConfig returns a configuration object that can be used to instantiate // a client for the plugin described by the given metadata. func ClientConfig(m discovery.PluginMeta) *plugin.ClientConfig { + logger := hclog.New(&hclog.LoggerOptions{ + Name: "plugin", + Level: hclog.Trace, + Output: os.Stderr, + }) + return &plugin.ClientConfig{ Cmd: exec.Command(m.Path), HandshakeConfig: Handshake, Managed: true, Plugins: PluginMap, + Logger: logger, } } diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go index f5bc4c1c70e..5c643f96f07 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go @@ -3,6 +3,7 @@ package discovery import ( "io/ioutil" "log" + "os" "path/filepath" "strings" ) @@ -59,7 +60,6 @@ func findPluginPaths(kind string, dirs []string) []string { fullName := item.Name() if !strings.HasPrefix(fullName, prefix) { - log.Printf("[DEBUG] skipping %q, not a %s", fullName, kind) continue } @@ -71,6 +71,12 @@ func findPluginPaths(kind string, dirs []string) []string { continue } + // Check that the file we found is usable + if !pathIsFile(absPath) { + log.Printf("[ERROR] ignoring non-file %s", absPath) + continue + } + log.Printf("[DEBUG] found %s %q", kind, fullName) ret = append(ret, filepath.Clean(absPath)) continue @@ -83,6 +89,12 @@ func findPluginPaths(kind string, dirs []string) []string { continue } + // Check that the file we found is usable + if !pathIsFile(absPath) { + log.Printf("[ERROR] ignoring non-file %s", absPath) + continue + } + log.Printf("[WARNING] found legacy %s %q", kind, fullName) ret = append(ret, filepath.Clean(absPath)) @@ -92,6 +104,17 @@ func findPluginPaths(kind string, dirs []string) []string { return ret } +// Returns true if and only if the given path refers to a file or a symlink +// to a file. +func pathIsFile(path string) bool { + info, err := os.Stat(path) + if err != nil { + return false + } + + return !info.IsDir() +} + // ResolvePluginPaths takes a list of paths to plugin executables (as returned // by e.g. FindPluginPaths) and produces a PluginMetaSet describing the // referenced plugins. diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go index 241b5cb3bf7..152aec04709 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go @@ -3,10 +3,12 @@ package discovery import ( "errors" "fmt" + "io" "io/ioutil" "log" "net/http" "os" + "path/filepath" "runtime" "strconv" "strings" @@ -16,6 +18,7 @@ import ( cleanhttp "github.com/hashicorp/go-cleanhttp" getter "github.com/hashicorp/go-getter" multierror "github.com/hashicorp/go-multierror" + "github.com/mitchellh/cli" ) // Releases are located by parsing the html listing from releases.hashicorp.com. @@ -47,6 +50,10 @@ type Installer interface { type ProviderInstaller struct { Dir string + // Cache is used to access and update a local cache of plugins if non-nil. + // Can be nil to disable caching. + Cache PluginCache + PluginProtocolVersion uint // OS and Arch specify the OS and architecture that should be used when @@ -58,6 +65,8 @@ type ProviderInstaller struct { // Skip checksum and signature verification SkipVerify bool + + Ui cli.Ui // Ui for output } // Get is part of an implementation of type Installer, and attempts to download @@ -98,6 +107,12 @@ func (i *ProviderInstaller) Get(provider string, req Constraints) (PluginMeta, e // sort them newest to oldest Versions(versions).Sort() + // Ensure that our installation directory exists + err = os.MkdirAll(i.Dir, os.ModePerm) + if err != nil { + return PluginMeta{}, fmt.Errorf("failed to create plugin dir %s: %s", i.Dir, err) + } + // take the first matching plugin we find for _, v := range versions { url := i.providerURL(provider, v.String()) @@ -116,8 +131,9 @@ func (i *ProviderInstaller) Get(provider string, req Constraints) (PluginMeta, e log.Printf("[DEBUG] fetching provider info for %s version %s", provider, v) if checkPlugin(url, i.PluginProtocolVersion) { - log.Printf("[DEBUG] getting provider %q version %q at %s", provider, v, url) - err := getter.Get(i.Dir, url) + i.Ui.Info(fmt.Sprintf("- Downloading plugin for provider %q (%s)...", provider, v.String())) + log.Printf("[DEBUG] getting provider %q version %q", provider, v) + err := i.install(provider, v, url) if err != nil { return PluginMeta{}, err } @@ -164,6 +180,98 @@ func (i *ProviderInstaller) Get(provider string, req Constraints) (PluginMeta, e return PluginMeta{}, ErrorNoVersionCompatible } +func (i *ProviderInstaller) install(provider string, version Version, url string) error { + if i.Cache != nil { + log.Printf("[DEBUG] looking for provider %s %s in plugin cache", provider, version) + cached := i.Cache.CachedPluginPath("provider", provider, version) + if cached == "" { + log.Printf("[DEBUG] %s %s not yet in cache, so downloading %s", provider, version, url) + err := getter.Get(i.Cache.InstallDir(), url) + if err != nil { + return err + } + // should now be in cache + cached = i.Cache.CachedPluginPath("provider", provider, version) + if cached == "" { + // should never happen if the getter is behaving properly + // and the plugins are packaged properly. + return fmt.Errorf("failed to find downloaded plugin in cache %s", i.Cache.InstallDir()) + } + } + + // Link or copy the cached binary into our install dir so the + // normal resolution machinery can find it. + filename := filepath.Base(cached) + targetPath := filepath.Join(i.Dir, filename) + + log.Printf("[DEBUG] installing %s %s to %s from local cache %s", provider, version, targetPath, cached) + + // Delete if we can. If there's nothing there already then no harm done. + // This is important because we can't create a link if there's + // already a file of the same name present. + // (any other error here we'll catch below when we try to write here) + os.Remove(targetPath) + + // We don't attempt linking on Windows because links are not + // comprehensively supported by all tools/apps in Windows and + // so we choose to be conservative to avoid creating any + // weird issues for Windows users. + linkErr := errors.New("link not supported for Windows") // placeholder error, never actually returned + if runtime.GOOS != "windows" { + // Try hard linking first. Hard links are preferable because this + // creates a self-contained directory that doesn't depend on the + // cache after install. + linkErr = os.Link(cached, targetPath) + + // If that failed, try a symlink. This _does_ depend on the cache + // after install, so the user must manage the cache more carefully + // in this case, but avoids creating redundant copies of the + // plugins on disk. + if linkErr != nil { + linkErr = os.Symlink(cached, targetPath) + } + } + + // If we still have an error then we'll try a copy as a fallback. + // In this case either the OS is Windows or the target filesystem + // can't support symlinks. + if linkErr != nil { + srcFile, err := os.Open(cached) + if err != nil { + return fmt.Errorf("failed to open cached plugin %s: %s", cached, err) + } + defer srcFile.Close() + + destFile, err := os.OpenFile(targetPath, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create %s: %s", targetPath, err) + } + + _, err = io.Copy(destFile, srcFile) + if err != nil { + destFile.Close() + return fmt.Errorf("failed to copy cached plugin from %s to %s: %s", cached, targetPath, err) + } + + err = destFile.Close() + if err != nil { + return fmt.Errorf("error creating %s: %s", targetPath, err) + } + } + + // One way or another, by the time we get here we should have either + // a link or a copy of the cached plugin within i.Dir, as expected. + } else { + log.Printf("[DEBUG] plugin cache is disabled, so downloading %s %s from %s", provider, version, url) + err := getter.Get(i.Dir, url) + if err != nil { + return err + } + } + + return nil +} + func (i *ProviderInstaller) PurgeUnused(used map[string]PluginMeta) (PluginMetaSet, error) { purge := make(PluginMetaSet) @@ -422,3 +530,7 @@ func getFile(url string) ([]byte, error) { } return data, nil } + +func GetReleaseHost() string { + return releaseHost +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go new file mode 100644 index 00000000000..1a100426482 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go @@ -0,0 +1,48 @@ +package discovery + +// PluginCache is an interface implemented by objects that are able to maintain +// a cache of plugins. +type PluginCache interface { + // CachedPluginPath returns a path where the requested plugin is already + // cached, or an empty string if the requested plugin is not yet cached. + CachedPluginPath(kind string, name string, version Version) string + + // InstallDir returns the directory that new plugins should be installed into + // in order to populate the cache. This directory should be used as the + // first argument to getter.Get when downloading plugins with go-getter. + // + // After installing into this directory, use CachedPluginPath to obtain the + // path where the plugin was installed. + InstallDir() string +} + +// NewLocalPluginCache returns a PluginCache that caches plugins in a +// given local directory. +func NewLocalPluginCache(dir string) PluginCache { + return &pluginCache{ + Dir: dir, + } +} + +type pluginCache struct { + Dir string +} + +func (c *pluginCache) CachedPluginPath(kind string, name string, version Version) string { + allPlugins := FindPlugins(kind, []string{c.Dir}) + plugins := allPlugins.WithName(name).WithVersion(version) + + if plugins.Count() == 0 { + // nothing cached + return "" + } + + // There should generally be only one plugin here; if there's more than + // one match for some reason then we'll just choose one arbitrarily. + plugin := plugins.Newest() + return plugin.Path +} + +func (c *pluginCache) InstallDir() string { + return c.Dir +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go index 473f7860131..d6a433c4e56 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go +++ b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go @@ -41,6 +41,24 @@ func (p *ResourceProvider) Stop() error { return err } +func (p *ResourceProvider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) { + var result ResourceProviderGetSchemaResponse + args := &ResourceProviderGetSchemaArgs{ + Req: req, + } + + err := p.Client.Call("Plugin.GetSchema", args, &result) + if err != nil { + return nil, err + } + + if result.Error != nil { + err = result.Error + } + + return result.Schema, err +} + func (p *ResourceProvider) Input( input terraform.UIInput, c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { @@ -312,6 +330,15 @@ type ResourceProviderStopResponse struct { Error *plugin.BasicError } +type ResourceProviderGetSchemaArgs struct { + Req *terraform.ProviderSchemaRequest +} + +type ResourceProviderGetSchemaResponse struct { + Schema *terraform.ProviderSchema + Error *plugin.BasicError +} + type ResourceProviderConfigureResponse struct { Error *plugin.BasicError } @@ -418,6 +445,18 @@ func (s *ResourceProviderServer) Stop( return nil } +func (s *ResourceProviderServer) GetSchema( + args *ResourceProviderGetSchemaArgs, + result *ResourceProviderGetSchemaResponse, +) error { + schema, err := s.Provider.GetSchema(args.Req) + result.Schema = schema + if err != nil { + result.Error = plugin.NewBasicError(err) + } + return nil +} + func (s *ResourceProviderServer) Input( args *ResourceProviderInputArgs, reply *ResourceProviderInputResponse) error { diff --git a/vendor/github.com/hashicorp/terraform/plugins.go b/vendor/github.com/hashicorp/terraform/plugins.go index bf239780623..cf2d5425357 100644 --- a/vendor/github.com/hashicorp/terraform/plugins.go +++ b/vendor/github.com/hashicorp/terraform/plugins.go @@ -1,8 +1,10 @@ package main import ( + "fmt" "log" "path/filepath" + "runtime" ) // globalPluginDirs returns directories that should be searched for @@ -18,7 +20,9 @@ func globalPluginDirs() []string { if err != nil { log.Printf("[ERROR] Error finding global config directory: %s", err) } else { + machineDir := fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH) ret = append(ret, filepath.Join(dir, "plugins")) + ret = append(ret, filepath.Join(dir, "plugins", machineDir)) } return ret diff --git a/vendor/github.com/hashicorp/terraform/state/remote/artifactory.go b/vendor/github.com/hashicorp/terraform/state/remote/artifactory.go new file mode 100644 index 00000000000..727e9faf037 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/state/remote/artifactory.go @@ -0,0 +1,117 @@ +package remote + +import ( + "crypto/md5" + "fmt" + "os" + "strings" + + artifactory "github.com/lusis/go-artifactory/src/artifactory.v401" +) + +const ARTIF_TFSTATE_NAME = "terraform.tfstate" + +func artifactoryFactory(conf map[string]string) (Client, error) { + userName, ok := conf["username"] + if !ok { + userName = os.Getenv("ARTIFACTORY_USERNAME") + if userName == "" { + return nil, fmt.Errorf( + "missing 'username' configuration or ARTIFACTORY_USERNAME environment variable") + } + } + password, ok := conf["password"] + if !ok { + password = os.Getenv("ARTIFACTORY_PASSWORD") + if password == "" { + return nil, fmt.Errorf( + "missing 'password' configuration or ARTIFACTORY_PASSWORD environment variable") + } + } + url, ok := conf["url"] + if !ok { + url = os.Getenv("ARTIFACTORY_URL") + if url == "" { + return nil, fmt.Errorf( + "missing 'url' configuration or ARTIFACTORY_URL environment variable") + } + } + repo, ok := conf["repo"] + if !ok { + return nil, fmt.Errorf( + "missing 'repo' configuration") + } + subpath, ok := conf["subpath"] + if !ok { + return nil, fmt.Errorf( + "missing 'subpath' configuration") + } + + clientConf := &artifactory.ClientConfig{ + BaseURL: url, + Username: userName, + Password: password, + } + nativeClient := artifactory.NewClient(clientConf) + + return &ArtifactoryClient{ + nativeClient: &nativeClient, + userName: userName, + password: password, + url: url, + repo: repo, + subpath: subpath, + }, nil + +} + +type ArtifactoryClient struct { + nativeClient *artifactory.ArtifactoryClient + userName string + password string + url string + repo string + subpath string +} + +func (c *ArtifactoryClient) Get() (*Payload, error) { + p := fmt.Sprintf("%s/%s/%s", c.repo, c.subpath, ARTIF_TFSTATE_NAME) + output, err := c.nativeClient.Get(p, make(map[string]string)) + if err != nil { + if strings.Contains(err.Error(), "404") { + return nil, nil + } + return nil, err + } + + // TODO: migrate to using X-Checksum-Md5 header from artifactory + // needs to be exposed by go-artifactory first + + hash := md5.Sum(output) + payload := &Payload{ + Data: output, + MD5: hash[:md5.Size], + } + + // If there was no data, then return nil + if len(payload.Data) == 0 { + return nil, nil + } + + return payload, nil +} + +func (c *ArtifactoryClient) Put(data []byte) error { + p := fmt.Sprintf("%s/%s/%s", c.repo, c.subpath, ARTIF_TFSTATE_NAME) + if _, err := c.nativeClient.Put(p, string(data), make(map[string]string)); err == nil { + return nil + } else { + return fmt.Errorf("Failed to upload state: %v", err) + } +} + +func (c *ArtifactoryClient) Delete() error { + p := fmt.Sprintf("%s/%s/%s", c.repo, c.subpath, ARTIF_TFSTATE_NAME) + err := c.nativeClient.Delete(p) + return err +} diff --git a/vendor/github.com/hashicorp/terraform/state/remote/etcd.go b/vendor/github.com/hashicorp/terraform/state/remote/etcd.go new file mode 100644 index 00000000000..7993603ff2e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/state/remote/etcd.go @@ -0,0 +1,78 @@ +package remote + +import ( + "crypto/md5" + "fmt" + "strings" + + etcdapi "github.com/coreos/etcd/client" + "golang.org/x/net/context" +) + +func etcdFactory(conf map[string]string) (Client, error) { + path, ok := conf["path"] + if !ok { + return nil, fmt.Errorf("missing 'path' configuration") + } + + endpoints, ok := conf["endpoints"] + if !ok || endpoints == "" { + return nil, fmt.Errorf("missing 'endpoints' configuration") + } + + config := etcdapi.Config{ + Endpoints: strings.Split(endpoints, " "), + } + if username, ok := conf["username"]; ok && username != "" { + config.Username = username + } + if password, ok := conf["password"]; ok && password != "" { + config.Password = password + } + + client, err := etcdapi.New(config) + if err != nil { + return nil, err + } + + return &EtcdClient{ + Client: client, + Path: path, + }, nil +} + +// EtcdClient is a remote client that stores data in etcd. +type EtcdClient struct { + Client etcdapi.Client + Path string +} + +func (c *EtcdClient) Get() (*Payload, error) { + resp, err := etcdapi.NewKeysAPI(c.Client).Get(context.Background(), c.Path, &etcdapi.GetOptions{Quorum: true}) + if err != nil { + if err, ok := err.(etcdapi.Error); ok && err.Code == etcdapi.ErrorCodeKeyNotFound { + return nil, nil + } + return nil, err + } + if resp.Node.Dir { + return nil, fmt.Errorf("path is a directory") + } + + data := []byte(resp.Node.Value) + md5 := md5.Sum(data) + return &Payload{ + Data: data, + MD5: md5[:], + }, nil +} + +func (c *EtcdClient) Put(data []byte) error { + _, err := etcdapi.NewKeysAPI(c.Client).Set(context.Background(), c.Path, string(data), nil) + return err +} + +func (c *EtcdClient) Delete() error { + _, err := etcdapi.NewKeysAPI(c.Client).Delete(context.Background(), c.Path, nil) + return err +} diff --git a/vendor/github.com/hashicorp/terraform/state/remote/file.go b/vendor/github.com/hashicorp/terraform/state/remote/file.go new file mode 100644 index 00000000000..f3cbdb45eee --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/state/remote/file.go @@ -0,0 +1,64 @@ +package remote + +import ( + "bytes" + "crypto/md5" + "fmt" + "io" + "os" +) + +func fileFactory(conf map[string]string) (Client, error) { + path, ok := conf["path"] + if !ok { + return nil, fmt.Errorf("missing 'path' configuration") + } + + return &FileClient{ + Path: path, + }, nil +} + +// FileClient is a remote client that stores data locally on disk. +// This is only used for development reasons to test remote state... locally. +type FileClient struct { + Path string +} + +func (c *FileClient) Get() (*Payload, error) { + var buf bytes.Buffer + f, err := os.Open(c.Path) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + + return nil, err + } + defer f.Close() + + if _, err := io.Copy(&buf, f); err != nil { + return nil, err + } + + md5 := md5.Sum(buf.Bytes()) + return &Payload{ + Data: buf.Bytes(), + MD5: md5[:], + }, nil +} + +func (c *FileClient) Put(data []byte) error { + f, err := os.Create(c.Path) + if err != nil { + return err + } + defer f.Close() + + _, err = f.Write(data) + return err +} + +func (c *FileClient) Delete() error { + return os.Remove(c.Path) +} diff --git a/vendor/github.com/hashicorp/terraform/state/remote/gcs.go b/vendor/github.com/hashicorp/terraform/state/remote/gcs.go new file mode 100644 index 00000000000..bfd5118cdcf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/state/remote/gcs.go @@ -0,0 +1,176 @@ +package remote + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "os" + "runtime" + "strings" + + "github.com/hashicorp/terraform/helper/pathorcontents" + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "google.golang.org/api/googleapi" + "google.golang.org/api/storage/v1" + + version "github.com/hashicorp/terraform/version" +) + +// accountFile represents the structure of the credentials JSON +type accountFile struct { + PrivateKeyId string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + ClientEmail string `json:"client_email"` + ClientId string `json:"client_id"` +} + +func parseJSON(result interface{}, contents string) error { + r := strings.NewReader(contents) + dec := json.NewDecoder(r) + + return dec.Decode(result) +} + +type GCSClient struct { + bucket string + path string + clientStorage *storage.Service + context context.Context +} + +func gcsFactory(conf map[string]string) (Client, error) { + var account accountFile + var client *http.Client + clientScopes := []string{ + "https://www.googleapis.com/auth/devstorage.full_control", + } + + bucketName, ok := conf["bucket"] + if !ok { + return nil, fmt.Errorf("missing 'bucket' configuration") + } + + pathName, ok := conf["path"] + if !ok { + return nil, fmt.Errorf("missing 'path' configuration") + } + + credentials, ok := conf["credentials"] + if !ok { + credentials = os.Getenv("GOOGLE_CREDENTIALS") + } + + if credentials != "" { + contents, _, err := pathorcontents.Read(credentials) + if err != nil { + return nil, fmt.Errorf("Error loading credentials: %s", err) + } + + // Assume account_file is a JSON string + if err := parseJSON(&account, contents); err != nil { + return nil, fmt.Errorf("Error parsing credentials '%s': %s", contents, err) + } + + // Get the token for use in our requests + log.Printf("[INFO] Requesting Google token...") + log.Printf("[INFO] -- Email: %s", account.ClientEmail) + log.Printf("[INFO] -- Scopes: %s", clientScopes) + log.Printf("[INFO] -- Private Key Length: %d", len(account.PrivateKey)) + + conf := jwt.Config{ + Email: account.ClientEmail, + PrivateKey: []byte(account.PrivateKey), + Scopes: clientScopes, + TokenURL: "https://accounts.google.com/o/oauth2/token", + } + + client = conf.Client(oauth2.NoContext) + + } else { + log.Printf("[INFO] Authenticating using DefaultClient") + err := error(nil) + client, err = google.DefaultClient(oauth2.NoContext, clientScopes...) + if err != nil { + return nil, err + } + } + versionString := version.Version + userAgent := fmt.Sprintf( + "(%s %s) Terraform/%s", runtime.GOOS, runtime.GOARCH, versionString) + + log.Printf("[INFO] Instantiating Google Storage Client...") + clientStorage, err := storage.New(client) + if err != nil { + return nil, err + } + clientStorage.UserAgent = userAgent + + return &GCSClient{ + clientStorage: clientStorage, + bucket: bucketName, + path: pathName, + }, nil + +} + +func (c *GCSClient) Get() (*Payload, error) { + // Read the object from bucket. + log.Printf("[INFO] Reading %s/%s", c.bucket, c.path) + + resp, err := c.clientStorage.Objects.Get(c.bucket, c.path).Download() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[INFO] %s/%s not found", c.bucket, c.path) + + return nil, nil + } + + return nil, fmt.Errorf("[WARN] Error retrieving object %s/%s: %s", c.bucket, c.path, err) + } + defer resp.Body.Close() + + var buf []byte + w := bytes.NewBuffer(buf) + n, err := io.Copy(w, resp.Body) + if err != nil { + log.Fatalf("[WARN] error buffering %q: %v", c.path, err) + } + log.Printf("[INFO] Downloaded %d bytes", n) + + payload := &Payload{ + Data: w.Bytes(), + } + + // If there was no data, then return nil + if len(payload.Data) == 0 { + return nil, nil + } + + return payload, nil +} + +func (c *GCSClient) Put(data []byte) error { + log.Printf("[INFO] Writing %s/%s", c.bucket, c.path) + + r := bytes.NewReader(data) + _, err := c.clientStorage.Objects.Insert(c.bucket, &storage.Object{Name: c.path}).Media(r).Do() + if err != nil { + return err + } + + return nil +} + +func (c *GCSClient) Delete() error { + log.Printf("[INFO] Deleting %s/%s", c.bucket, c.path) + + err := c.clientStorage.Objects.Delete(c.bucket, c.path).Do() + return err + +} diff --git a/vendor/github.com/hashicorp/terraform/state/remote/http.go b/vendor/github.com/hashicorp/terraform/state/remote/http.go new file mode 100644 index 00000000000..b30c15590cb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/state/remote/http.go @@ -0,0 +1,338 @@ +package remote + +import ( + "bytes" + "crypto/md5" + "crypto/tls" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + + "github.com/hashicorp/terraform/state" +) + +func httpFactory(conf map[string]string) (Client, error) { + address, ok := conf["address"] + if !ok { + return nil, fmt.Errorf("missing 'address' configuration") + } + + updateURL, err := url.Parse(address) + if err != nil { + return nil, fmt.Errorf("failed to parse address URL: %s", err) + } + if updateURL.Scheme != "http" && updateURL.Scheme != "https" { + return nil, fmt.Errorf("address must be HTTP or HTTPS") + } + updateMethod, ok := conf["update_method"] + if !ok { + updateMethod = "POST" + } + + var lockURL *url.URL + if lockAddress, ok := conf["lock_address"]; ok { + var err error + lockURL, err = url.Parse(lockAddress) + if err != nil { + return nil, fmt.Errorf("failed to parse lockAddress URL: %s", err) + } + if lockURL.Scheme != "http" && lockURL.Scheme != "https" { + return nil, fmt.Errorf("lockAddress must be HTTP or HTTPS") + } + } else { + lockURL = nil + } + lockMethod, ok := conf["lock_method"] + if !ok { + lockMethod = "LOCK" + } + + var unlockURL *url.URL + if unlockAddress, ok := conf["unlock_address"]; ok { + var err error + unlockURL, err = url.Parse(unlockAddress) + if err != nil { + return nil, fmt.Errorf("failed to parse unlockAddress URL: %s", err) + } + if unlockURL.Scheme != "http" && unlockURL.Scheme != "https" { + return nil, fmt.Errorf("unlockAddress must be HTTP or HTTPS") + } + } else { + unlockURL = nil + } + unlockMethod, ok := conf["unlock_method"] + if !ok { + unlockMethod = "UNLOCK" + } + + client := &http.Client{} + if skipRaw, ok := conf["skip_cert_verification"]; ok { + skip, err := strconv.ParseBool(skipRaw) + if err != nil { + return nil, fmt.Errorf("skip_cert_verification must be boolean") + } + if skip { + // Replace the client with one that ignores TLS verification + client = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, + } + } + } + + ret := &HTTPClient{ + URL: updateURL, + UpdateMethod: updateMethod, + + LockURL: lockURL, + LockMethod: lockMethod, + UnlockURL: unlockURL, + UnlockMethod: unlockMethod, + + Username: conf["username"], + Password: conf["password"], + + // accessible only for testing use + Client: client, + } + + return ret, nil +} + +// HTTPClient is a remote client that stores data in Consul or HTTP REST. +type HTTPClient struct { + // Update & Retrieve + URL *url.URL + UpdateMethod string + + // Locking + LockURL *url.URL + LockMethod string + UnlockURL *url.URL + UnlockMethod string + + // HTTP + Client *http.Client + Username string + Password string + + lockID string + jsonLockInfo []byte +} + +func (c *HTTPClient) httpRequest(method string, url *url.URL, data *[]byte, what string) (*http.Response, error) { + // If we have data we need a reader + var reader io.Reader = nil + if data != nil { + reader = bytes.NewReader(*data) + } + + // Create the request + req, err := http.NewRequest(method, url.String(), reader) + if err != nil { + return nil, fmt.Errorf("Failed to make %s HTTP request: %s", what, err) + } + // Setup basic auth + if c.Username != "" { + req.SetBasicAuth(c.Username, c.Password) + } + + // Work with data/body + if data != nil { + req.Header.Set("Content-Type", "application/json") + req.ContentLength = int64(len(*data)) + + // Generate the MD5 + hash := md5.Sum(*data) + b64 := base64.StdEncoding.EncodeToString(hash[:]) + req.Header.Set("Content-MD5", b64) + } + + // Make the request + resp, err := c.Client.Do(req) + if err != nil { + return nil, fmt.Errorf("Failed to %s: %v", what, err) + } + + return resp, nil +} + +func (c *HTTPClient) Lock(info *state.LockInfo) (string, error) { + if c.LockURL == nil { + return "", nil + } + c.lockID = "" + + jsonLockInfo := info.Marshal() + resp, err := c.httpRequest(c.LockMethod, c.LockURL, &jsonLockInfo, "lock") + if err != nil { + return "", err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + c.lockID = info.ID + c.jsonLockInfo = jsonLockInfo + return info.ID, nil + case http.StatusUnauthorized: + return "", fmt.Errorf("HTTP remote state endpoint requires auth") + case http.StatusForbidden: + return "", fmt.Errorf("HTTP remote state endpoint invalid auth") + case http.StatusConflict, http.StatusLocked: + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("HTTP remote state already locked, failed to read body") + } + existing := state.LockInfo{} + err = json.Unmarshal(body, &existing) + if err != nil { + return "", fmt.Errorf("HTTP remote state already locked, failed to unmarshal body") + } + return "", fmt.Errorf("HTTP remote state already locked: ID=%s", existing.ID) + default: + return "", fmt.Errorf("Unexpected HTTP response code %d", resp.StatusCode) + } +} + +func (c *HTTPClient) Unlock(id string) error { + if c.UnlockURL == nil { + return nil + } + + resp, err := c.httpRequest(c.UnlockMethod, c.UnlockURL, &c.jsonLockInfo, "unlock") + if err != nil { + return err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + return nil + default: + return fmt.Errorf("Unexpected HTTP response code %d", resp.StatusCode) + } +} + +func (c *HTTPClient) Get() (*Payload, error) { + resp, err := c.httpRequest("GET", c.URL, nil, "get state") + if err != nil { + return nil, err + } + defer resp.Body.Close() + + // Handle the common status codes + switch resp.StatusCode { + case http.StatusOK: + // Handled after + case http.StatusNoContent: + return nil, nil + case http.StatusNotFound: + return nil, nil + case http.StatusUnauthorized: + return nil, fmt.Errorf("HTTP remote state endpoint requires auth") + case http.StatusForbidden: + return nil, fmt.Errorf("HTTP remote state endpoint invalid auth") + case http.StatusInternalServerError: + return nil, fmt.Errorf("HTTP remote state internal server error") + default: + return nil, fmt.Errorf("Unexpected HTTP response code %d", resp.StatusCode) + } + + // Read in the body + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, resp.Body); err != nil { + return nil, fmt.Errorf("Failed to read remote state: %s", err) + } + + // Create the payload + payload := &Payload{ + Data: buf.Bytes(), + } + + // If there was no data, then return nil + if len(payload.Data) == 0 { + return nil, nil + } + + // Check for the MD5 + if raw := resp.Header.Get("Content-MD5"); raw != "" { + md5, err := base64.StdEncoding.DecodeString(raw) + if err != nil { + return nil, fmt.Errorf( + "Failed to decode Content-MD5 '%s': %s", raw, err) + } + + payload.MD5 = md5 + } else { + // Generate the MD5 + hash := md5.Sum(payload.Data) + payload.MD5 = hash[:] + } + + return payload, nil +} + +func (c *HTTPClient) Put(data []byte) error { + // Copy the target URL + base := *c.URL + + if c.lockID != "" { + query := base.Query() + query.Set("ID", c.lockID) + base.RawQuery = query.Encode() + } + + /* + // Set the force query parameter if needed + if force { + values := base.Query() + values.Set("force", "true") + base.RawQuery = values.Encode() + } + */ + + var method string = "POST" + if c.UpdateMethod != "" { + method = c.UpdateMethod + } + resp, err := c.httpRequest(method, &base, &data, "upload state") + if err != nil { + return err + } + defer resp.Body.Close() + + // Handle the error codes + switch resp.StatusCode { + case http.StatusOK: + return nil + default: + return fmt.Errorf("HTTP error: %d", resp.StatusCode) + } +} + +func (c *HTTPClient) Delete() error { + // Make the request + resp, err := c.httpRequest("DELETE", c.URL, nil, "delete state") + if err != nil { + return err + } + defer resp.Body.Close() + + // Handle the error codes + switch resp.StatusCode { + case http.StatusOK: + return nil + default: + return fmt.Errorf("HTTP error: %d", resp.StatusCode) + } +} diff --git a/vendor/github.com/hashicorp/terraform/state/remote/manta.go b/vendor/github.com/hashicorp/terraform/state/remote/manta.go new file mode 100644 index 00000000000..37782c43340 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/state/remote/manta.go @@ -0,0 +1,124 @@ +package remote + +import ( + "crypto/md5" + "encoding/pem" + "fmt" + "io/ioutil" + "log" + "os" + + joyentclient "github.com/joyent/gocommon/client" + joyenterrors "github.com/joyent/gocommon/errors" + "github.com/joyent/gomanta/manta" + joyentauth "github.com/joyent/gosign/auth" +) + +const DEFAULT_OBJECT_NAME = "terraform.tfstate" + +func mantaFactory(conf map[string]string) (Client, error) { + path, ok := conf["path"] + if !ok { + return nil, fmt.Errorf("missing 'path' configuration") + } + + objectName, ok := conf["objectName"] + if !ok { + objectName = DEFAULT_OBJECT_NAME + } + + creds, err := getCredentialsFromEnvironment() + + if err != nil { + return nil, fmt.Errorf("Error getting Manta credentials: %s", err.Error()) + } + + client := manta.New(joyentclient.NewClient( + creds.MantaEndpoint.URL, + "", + creds, + log.New(os.Stderr, "", log.LstdFlags), + )) + + return &MantaClient{ + Client: client, + Path: path, + ObjectName: objectName, + }, nil +} + +type MantaClient struct { + Client *manta.Client + Path string + ObjectName string +} + +func (c *MantaClient) Get() (*Payload, error) { + bytes, err := c.Client.GetObject(c.Path, c.ObjectName) + if err != nil { + if joyenterrors.IsResourceNotFound(err.(joyenterrors.Error).Cause()) { + return nil, nil + } + + return nil, err + } + + md5 := md5.Sum(bytes) + + return &Payload{ + Data: bytes, + MD5: md5[:], + }, nil +} + +func (c *MantaClient) Put(data []byte) error { + return c.Client.PutObject(c.Path, c.ObjectName, data) +} + +func (c *MantaClient) Delete() error { + return c.Client.DeleteObject(c.Path, c.ObjectName) +} + +func getCredentialsFromEnvironment() (cred *joyentauth.Credentials, err error) { + + user := os.Getenv("MANTA_USER") + keyId := os.Getenv("MANTA_KEY_ID") + url := os.Getenv("MANTA_URL") + keyMaterial := os.Getenv("MANTA_KEY_MATERIAL") + + if _, err := os.Stat(keyMaterial); err == nil { + // key material is a file path; try to read it + keyBytes, err := ioutil.ReadFile(keyMaterial) + if err != nil { + return nil, fmt.Errorf("Error reading key material from %s: %s", + keyMaterial, err) + } else { + block, _ := pem.Decode(keyBytes) + if block == nil { + return nil, fmt.Errorf( + "Failed to read key material '%s': no key found", keyMaterial) + } + + if block.Headers["Proc-Type"] == "4,ENCRYPTED" { + return nil, fmt.Errorf( + "Failed to read key '%s': password protected keys are\n"+ + "not currently supported. Please decrypt the key prior to use.", keyMaterial) + } + + keyMaterial = string(keyBytes) + } + } + + authentication, err := joyentauth.NewAuth(user, keyMaterial, "rsa-sha256") + if err != nil { + return nil, fmt.Errorf("Error constructing authentication for %s: %s", user, err) + } + + return &joyentauth.Credentials{ + UserAuthentication: authentication, + SdcKeyId: "", + SdcEndpoint: joyentauth.Endpoint{}, + MantaKeyId: keyId, + MantaEndpoint: joyentauth.Endpoint{URL: url}, + }, nil +} diff --git a/vendor/github.com/hashicorp/terraform/state/remote/remote.go b/vendor/github.com/hashicorp/terraform/state/remote/remote.go new file mode 100644 index 00000000000..132b77f703f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/state/remote/remote.go @@ -0,0 +1,54 @@ +package remote + +import ( + "fmt" + + "github.com/hashicorp/terraform/state" +) + +// Client is the interface that must be implemented for a remote state +// driver. It supports dumb put/get/delete, and the higher level structs +// handle persisting the state properly here. +type Client interface { + Get() (*Payload, error) + Put([]byte) error + Delete() error +} + +// ClientLocker is an optional interface that allows a remote state +// backend to enable state lock/unlock. +type ClientLocker interface { + Client + state.Locker +} + +// Payload is the return value from the remote state storage. +type Payload struct { + MD5 []byte + Data []byte +} + +// Factory is the factory function to create a remote client. +type Factory func(map[string]string) (Client, error) + +// NewClient returns a new Client with the given type and configuration. +// The client is looked up in the BuiltinClients variable. +func NewClient(t string, conf map[string]string) (Client, error) { + f, ok := BuiltinClients[t] + if !ok { + return nil, fmt.Errorf("unknown remote client type: %s", t) + } + + return f(conf) +} + +// BuiltinClients is the list of built-in clients that can be used with +// NewClient. +var BuiltinClients = map[string]Factory{ + "artifactory": artifactoryFactory, + "etcd": etcdFactory, + "gcs": gcsFactory, + "http": httpFactory, + "local": fileFactory, + "manta": mantaFactory, +} diff --git a/vendor/github.com/hashicorp/terraform/state/remote/state.go b/vendor/github.com/hashicorp/terraform/state/remote/state.go new file mode 100644 index 00000000000..575e4d18757 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/state/remote/state.go @@ -0,0 +1,139 @@ +package remote + +import ( + "bytes" + "log" + "sync" + + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/terraform" +) + +// State implements the State interfaces in the state package to handle +// reading and writing the remote state. This State on its own does no +// local caching so every persist will go to the remote storage and local +// writes will go to memory. +type State struct { + mu sync.Mutex + + Client Client + + state, readState *terraform.State +} + +// StateReader impl. +func (s *State) State() *terraform.State { + s.mu.Lock() + defer s.mu.Unlock() + + return s.state.DeepCopy() +} + +// StateWriter impl. +func (s *State) WriteState(state *terraform.State) error { + s.mu.Lock() + defer s.mu.Unlock() + + if s.readState != nil && !state.SameLineage(s.readState) { + // This can't error here, because we need to be able to overwrite the + // state in some cases, like `state push -force` or `workspace new + // -state=` + log.Printf("[WARN] incompatible state lineage; given %s but want %s", state.Lineage, s.readState.Lineage) + } + + // We create a deep copy of the state here, because the caller also has + // a reference to the given object and can potentially go on to mutate + // it after we return, but we want the snapshot at this point in time. + s.state = state.DeepCopy() + + // Force our new state to have the same serial as our read state. We'll + // update this if PersistState is called later. (We don't require nor trust + // the caller to properly maintain serial for transient state objects since + // the rest of Terraform treats state as an openly mutable object.) + // + // If we have no read state then we assume we're either writing a new + // state for the first time or we're migrating a state from elsewhere, + // and in both cases we wish to retain the lineage and serial from + // the given state. + if s.readState != nil { + s.state.Serial = s.readState.Serial + } + + return nil +} + +// StateRefresher impl. +func (s *State) RefreshState() error { + s.mu.Lock() + defer s.mu.Unlock() + + payload, err := s.Client.Get() + if err != nil { + return err + } + + // no remote state is OK + if payload == nil { + return nil + } + + state, err := terraform.ReadState(bytes.NewReader(payload.Data)) + if err != nil { + return err + } + + s.state = state + s.readState = s.state.DeepCopy() // our states must be separate instances so we can track changes + return nil +} + +// StatePersister impl. +func (s *State) PersistState() error { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.state.MarshalEqual(s.readState) { + // Our new state does not marshal as byte-for-byte identical to + // the old, so we need to increment the serial. + // Note that in WriteState we force the serial to match that of + // s.readState, if we have a readState. + s.state.Serial++ + } + + var buf bytes.Buffer + if err := terraform.WriteState(s.state, &buf); err != nil { + return err + } + + err := s.Client.Put(buf.Bytes()) + if err != nil { + return err + } + + // After we've successfully persisted, what we just wrote is our new + // reference state until someone calls RefreshState again. + s.readState = s.state.DeepCopy() + return nil +} + +// Lock calls the Client's Lock method if it's implemented. +func (s *State) Lock(info *state.LockInfo) (string, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if c, ok := s.Client.(ClientLocker); ok { + return c.Lock(info) + } + return "", nil +} + +// Unlock calls the Client's Unlock method if it's implemented. +func (s *State) Unlock(id string) error { + s.mu.Lock() + defer s.mu.Unlock() + + if c, ok := s.Client.(ClientLocker); ok { + return c.Unlock(id) + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/state/remote/testing.go b/vendor/github.com/hashicorp/terraform/state/remote/testing.go new file mode 100644 index 00000000000..b379b509dfe --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/state/remote/testing.go @@ -0,0 +1,97 @@ +package remote + +import ( + "bytes" + "testing" + + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/terraform" +) + +// TestClient is a generic function to test any client. +func TestClient(t *testing.T, c Client) { + var buf bytes.Buffer + s := state.TestStateInitial() + if err := terraform.WriteState(s, &buf); err != nil { + t.Fatalf("err: %s", err) + } + data := buf.Bytes() + + if err := c.Put(data); err != nil { + t.Fatalf("put: %s", err) + } + + p, err := c.Get() + if err != nil { + t.Fatalf("get: %s", err) + } + if !bytes.Equal(p.Data, data) { + t.Fatalf("bad: %#v", p) + } + + if err := c.Delete(); err != nil { + t.Fatalf("delete: %s", err) + } + + p, err = c.Get() + if err != nil { + t.Fatalf("get: %s", err) + } + if p != nil { + t.Fatalf("bad: %#v", p) + } +} + +// Test the lock implementation for a remote.Client. +// This test requires 2 client instances, in oder to have multiple remote +// clients since some implementations may tie the client to the lock, or may +// have reentrant locks. +func TestRemoteLocks(t *testing.T, a, b Client) { + lockerA, ok := a.(state.Locker) + if !ok { + t.Fatal("client A not a state.Locker") + } + + lockerB, ok := b.(state.Locker) + if !ok { + t.Fatal("client B not a state.Locker") + } + + infoA := state.NewLockInfo() + infoA.Operation = "test" + infoA.Who = "clientA" + + infoB := state.NewLockInfo() + infoB.Operation = "test" + infoB.Who = "clientB" + + lockIDA, err := lockerA.Lock(infoA) + if err != nil { + t.Fatal("unable to get initial lock:", err) + } + + _, err = lockerB.Lock(infoB) + if err == nil { + lockerA.Unlock(lockIDA) + t.Fatal("client B obtained lock while held by client A") + } + + if err := lockerA.Unlock(lockIDA); err != nil { + t.Fatal("error unlocking client A", err) + } + + lockIDB, err := lockerB.Lock(infoB) + if err != nil { + t.Fatal("unable to obtain lock from client B") + } + + if lockIDB == lockIDA { + t.Fatalf("duplicate lock IDs: %q", lockIDB) + } + + if err = lockerB.Unlock(lockIDB); err != nil { + t.Fatal("error unlocking client B:", err) + } + + // TODO: Should we enforce that Unlock requires the correct ID? +} diff --git a/vendor/github.com/hashicorp/terraform/state/state.go b/vendor/github.com/hashicorp/terraform/state/state.go index 293fc0daad4..d9708d1b460 100644 --- a/vendor/github.com/hashicorp/terraform/state/state.go +++ b/vendor/github.com/hashicorp/terraform/state/state.go @@ -15,6 +15,7 @@ import ( uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/version" ) var rngSource *rand.Rand @@ -158,7 +159,7 @@ func NewLockInfo() *LockInfo { info := &LockInfo{ ID: id, Who: fmt.Sprintf("%s@%s", userName, host), - Version: terraform.Version, + Version: version.Version, Created: time.Now().UTC(), } return info diff --git a/vendor/github.com/hashicorp/terraform/state/testing.go b/vendor/github.com/hashicorp/terraform/state/testing.go index 48261599115..3b92b33c907 100644 --- a/vendor/github.com/hashicorp/terraform/state/testing.go +++ b/vendor/github.com/hashicorp/terraform/state/testing.go @@ -11,6 +11,8 @@ import ( // that the given implementation is pre-loaded with the TestStateInitial // state. func TestState(t *testing.T, s State) { + t.Helper() + if err := s.RefreshState(); err != nil { t.Fatalf("err: %s", err) } diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/cache.go b/vendor/github.com/hashicorp/terraform/svchost/auth/cache.go new file mode 100644 index 00000000000..4f0d1689f6e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/auth/cache.go @@ -0,0 +1,45 @@ +package auth + +import ( + "github.com/hashicorp/terraform/svchost" +) + +// CachingCredentialsSource creates a new credentials source that wraps another +// and caches its results in memory, on a per-hostname basis. +// +// No means is provided for expiration of cached credentials, so a caching +// credentials source should have a limited lifetime (one Terraform operation, +// for example) to ensure that time-limited credentials don't expire before +// their cache entries do. +func CachingCredentialsSource(source CredentialsSource) CredentialsSource { + return &cachingCredentialsSource{ + source: source, + cache: map[svchost.Hostname]HostCredentials{}, + } +} + +type cachingCredentialsSource struct { + source CredentialsSource + cache map[svchost.Hostname]HostCredentials +} + +// ForHost passes the given hostname on to the wrapped credentials source and +// caches the result to return for future requests with the same hostname. +// +// Both credentials and non-credentials (nil) responses are cached. +// +// No cache entry is created if the wrapped source returns an error, to allow +// the caller to retry the failing operation. +func (s *cachingCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { + if cache, cached := s.cache[host]; cached { + return cache, nil + } + + result, err := s.source.ForHost(host) + if err != nil { + return result, err + } + + s.cache[host] = result + return result, nil +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/credentials.go b/vendor/github.com/hashicorp/terraform/svchost/auth/credentials.go new file mode 100644 index 00000000000..0bc6db4f10c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/auth/credentials.go @@ -0,0 +1,60 @@ +// Package auth contains types and functions to manage authentication +// credentials for service hosts. +package auth + +import ( + "net/http" + + "github.com/hashicorp/terraform/svchost" +) + +// Credentials is a list of CredentialsSource objects that can be tried in +// turn until one returns credentials for a host, or one returns an error. +// +// A Credentials is itself a CredentialsSource, wrapping its members. +// In principle one CredentialsSource can be nested inside another, though +// there is no good reason to do so. +type Credentials []CredentialsSource + +// NoCredentials is an empty CredentialsSource that always returns nil +// when asked for credentials. +var NoCredentials CredentialsSource = Credentials{} + +// A CredentialsSource is an object that may be able to provide credentials +// for a given host. +// +// Credentials lookups are not guaranteed to be concurrency-safe. Callers +// using these facilities in concurrent code must use external concurrency +// primitives to prevent race conditions. +type CredentialsSource interface { + // ForHost returns a non-nil HostCredentials if the source has credentials + // available for the host, and a nil HostCredentials if it does not. + // + // If an error is returned, progress through a list of CredentialsSources + // is halted and the error is returned to the user. + ForHost(host svchost.Hostname) (HostCredentials, error) +} + +// HostCredentials represents a single set of credentials for a particular +// host. +type HostCredentials interface { + // PrepareRequest modifies the given request in-place to apply the + // receiving credentials. The usual behavior of this method is to + // add some sort of Authorization header to the request. + PrepareRequest(req *http.Request) +} + +// ForHost iterates over the contained CredentialsSource objects and +// tries to obtain credentials for the given host from each one in turn. +// +// If any source returns either a non-nil HostCredentials or a non-nil error +// then this result is returned. Otherwise, the result is nil, nil. +func (c Credentials) ForHost(host svchost.Hostname) (HostCredentials, error) { + for _, source := range c { + creds, err := source.ForHost(host) + if creds != nil || err != nil { + return creds, err + } + } + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/from_map.go b/vendor/github.com/hashicorp/terraform/svchost/auth/from_map.go new file mode 100644 index 00000000000..f91006aece7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/auth/from_map.go @@ -0,0 +1,18 @@ +package auth + +// HostCredentialsFromMap converts a map of key-value pairs from a credentials +// definition provided by the user (e.g. in a config file, or via a credentials +// helper) into a HostCredentials object if possible, or returns nil if +// no credentials could be extracted from the map. +// +// This function ignores map keys it is unfamiliar with, to allow for future +// expansion of the credentials map format for new credential types. +func HostCredentialsFromMap(m map[string]interface{}) HostCredentials { + if m == nil { + return nil + } + if token, ok := m["token"].(string); ok { + return HostCredentialsToken(token) + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/helper_program.go b/vendor/github.com/hashicorp/terraform/svchost/auth/helper_program.go new file mode 100644 index 00000000000..d72ffe3c97b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/auth/helper_program.go @@ -0,0 +1,80 @@ +package auth + +import ( + "bytes" + "encoding/json" + "fmt" + "os/exec" + "path/filepath" + + "github.com/hashicorp/terraform/svchost" +) + +type helperProgramCredentialsSource struct { + executable string + args []string +} + +// HelperProgramCredentialsSource returns a CredentialsSource that runs the +// given program with the given arguments in order to obtain credentials. +// +// The given executable path must be an absolute path; it is the caller's +// responsibility to validate and process a relative path or other input +// provided by an end-user. If the given path is not absolute, this +// function will panic. +// +// When credentials are requested, the program will be run in a child process +// with the given arguments along with two additional arguments added to the +// end of the list: the literal string "get", followed by the requested +// hostname in ASCII compatibility form (punycode form). +func HelperProgramCredentialsSource(executable string, args ...string) CredentialsSource { + if !filepath.IsAbs(executable) { + panic("NewCredentialsSourceHelperProgram requires absolute path to executable") + } + + fullArgs := make([]string, len(args)+1) + fullArgs[0] = executable + copy(fullArgs[1:], args) + + return &helperProgramCredentialsSource{ + executable: executable, + args: fullArgs, + } +} + +func (s *helperProgramCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { + args := make([]string, len(s.args), len(s.args)+2) + copy(args, s.args) + args = append(args, "get") + args = append(args, string(host)) + + outBuf := bytes.Buffer{} + errBuf := bytes.Buffer{} + + cmd := exec.Cmd{ + Path: s.executable, + Args: args, + Stdin: nil, + Stdout: &outBuf, + Stderr: &errBuf, + } + err := cmd.Run() + if _, isExitErr := err.(*exec.ExitError); isExitErr { + errText := errBuf.String() + if errText == "" { + // Shouldn't happen for a well-behaved helper program + return nil, fmt.Errorf("error in %s, but it produced no error message", s.executable) + } + return nil, fmt.Errorf("error in %s: %s", s.executable, errText) + } else if err != nil { + return nil, fmt.Errorf("failed to run %s: %s", s.executable, err) + } + + var m map[string]interface{} + err = json.Unmarshal(outBuf.Bytes(), &m) + if err != nil { + return nil, fmt.Errorf("malformed output from %s: %s", s.executable, err) + } + + return HostCredentialsFromMap(m), nil +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/static.go b/vendor/github.com/hashicorp/terraform/svchost/auth/static.go new file mode 100644 index 00000000000..5373fddfcc8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/auth/static.go @@ -0,0 +1,28 @@ +package auth + +import ( + "github.com/hashicorp/terraform/svchost" +) + +// StaticCredentialsSource is a credentials source that retrieves credentials +// from the provided map. It returns nil if a requested hostname is not +// present in the map. +// +// The caller should not modify the given map after passing it to this function. +func StaticCredentialsSource(creds map[svchost.Hostname]map[string]interface{}) CredentialsSource { + return staticCredentialsSource(creds) +} + +type staticCredentialsSource map[svchost.Hostname]map[string]interface{} + +func (s staticCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { + if s == nil { + return nil, nil + } + + if m, exists := s[host]; exists { + return HostCredentialsFromMap(m), nil + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/token_credentials.go b/vendor/github.com/hashicorp/terraform/svchost/auth/token_credentials.go new file mode 100644 index 00000000000..8f771b0d9b4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/auth/token_credentials.go @@ -0,0 +1,20 @@ +package auth + +import ( + "net/http" +) + +// HostCredentialsToken is a HostCredentials implementation that represents a +// single "bearer token", to be sent to the server via an Authorization header +// with the auth type set to "Bearer" +type HostCredentialsToken string + +// PrepareRequest alters the given HTTP request by setting its Authorization +// header to the string "Bearer " followed by the encapsulated authentication +// token. +func (tc HostCredentialsToken) PrepareRequest(req *http.Request) { + if req.Header == nil { + req.Header = http.Header{} + } + req.Header.Set("Authorization", "Bearer "+string(tc)) +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/disco/disco.go b/vendor/github.com/hashicorp/terraform/svchost/disco/disco.go new file mode 100644 index 00000000000..dd3524a2742 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/disco/disco.go @@ -0,0 +1,208 @@ +// Package disco handles Terraform's remote service discovery protocol. +// +// This protocol allows mapping from a service hostname, as produced by the +// svchost package, to a set of services supported by that host and the +// endpoint information for each supported service. +package disco + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "mime" + "net/http" + "net/url" + "time" + + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/terraform/svchost" + "github.com/hashicorp/terraform/svchost/auth" + "github.com/hashicorp/terraform/version" +) + +const ( + discoPath = "/.well-known/terraform.json" + maxRedirects = 3 // arbitrary-but-small number to prevent runaway redirect loops + discoTimeout = 4 * time.Second // arbitrary-but-small time limit to prevent UI "hangs" during discovery + maxDiscoDocBytes = 1 * 1024 * 1024 // 1MB - to prevent abusive services from using loads of our memory +) + +var userAgent = fmt.Sprintf("Terraform/%s (service discovery)", version.String()) +var httpTransport = cleanhttp.DefaultPooledTransport() // overridden during tests, to skip TLS verification + +// Disco is the main type in this package, which allows discovery on given +// hostnames and caches the results by hostname to avoid repeated requests +// for the same information. +type Disco struct { + hostCache map[svchost.Hostname]Host + credsSrc auth.CredentialsSource + + // Transport is a custom http.Transport to use. + // A package default is used if this is nil. + Transport *http.Transport +} + +func NewDisco() *Disco { + return &Disco{} +} + +// SetCredentialsSource provides a credentials source that will be used to +// add credentials to outgoing discovery requests, where available. +// +// If this method is never called, no outgoing discovery requests will have +// credentials. +func (d *Disco) SetCredentialsSource(src auth.CredentialsSource) { + d.credsSrc = src +} + +// Discover runs the discovery protocol against the given hostname (which must +// already have been validated and prepared with svchost.ForComparison) and +// returns an object describing the services available at that host. +// +// If a given hostname supports no Terraform services at all, a non-nil but +// empty Host object is returned. When giving feedback to the end user about +// such situations, we say e.g. "the host doesn't provide a module +// registry", regardless of whether that is due to that service specifically +// being absent or due to the host not providing Terraform services at all, +// since we don't wish to expose the detail of whole-host discovery to an +// end-user. +func (d *Disco) Discover(host svchost.Hostname) Host { + if d.hostCache == nil { + d.hostCache = map[svchost.Hostname]Host{} + } + if cache, cached := d.hostCache[host]; cached { + return cache + } + + ret := d.discover(host) + d.hostCache[host] = ret + return ret +} + +// DiscoverServiceURL is a convenience wrapper for discovery on a given +// hostname and then looking up a particular service in the result. +func (d *Disco) DiscoverServiceURL(host svchost.Hostname, serviceID string) *url.URL { + return d.Discover(host).ServiceURL(serviceID) +} + +// discover implements the actual discovery process, with its result cached +// by the public-facing Discover method. +func (d *Disco) discover(host svchost.Hostname) Host { + discoURL := &url.URL{ + Scheme: "https", + Host: string(host), + Path: discoPath, + } + + t := d.Transport + if t == nil { + t = httpTransport + } + + client := &http.Client{ + Transport: t, + Timeout: discoTimeout, + + CheckRedirect: func(req *http.Request, via []*http.Request) error { + log.Printf("[DEBUG] Service discovery redirected to %s", req.URL) + if len(via) > maxRedirects { + return errors.New("too many redirects") // (this error message will never actually be seen) + } + return nil + }, + } + + var header = http.Header{} + header.Set("User-Agent", userAgent) + + req := &http.Request{ + Method: "GET", + URL: discoURL, + Header: header, + } + + if d.credsSrc != nil { + creds, err := d.credsSrc.ForHost(host) + if err == nil { + if creds != nil { + creds.PrepareRequest(req) // alters req to include credentials + } + } else { + log.Printf("[WARNING] Failed to get credentials for %s: %s (ignoring)", host, err) + } + } + + log.Printf("[DEBUG] Service discovery for %s at %s", host, discoURL) + + ret := Host{ + discoURL: discoURL, + } + + resp, err := client.Do(req) + if err != nil { + log.Printf("[WARNING] Failed to request discovery document: %s", err) + return ret // empty + } + if resp.StatusCode != 200 { + log.Printf("[WARNING] Failed to request discovery document: %s", resp.Status) + return ret // empty + } + + // If the client followed any redirects, we will have a new URL to use + // as our base for relative resolution. + ret.discoURL = resp.Request.URL + + contentType := resp.Header.Get("Content-Type") + mediaType, _, err := mime.ParseMediaType(contentType) + if err != nil { + log.Printf("[WARNING] Discovery URL has malformed Content-Type %q", contentType) + return ret // empty + } + if mediaType != "application/json" { + log.Printf("[DEBUG] Discovery URL returned Content-Type %q, rather than application/json", mediaType) + return ret // empty + } + + // (this doesn't catch chunked encoding, because ContentLength is -1 in that case...) + if resp.ContentLength > maxDiscoDocBytes { + // Size limit here is not a contractual requirement and so we may + // adjust it over time if we find a different limit is warranted. + log.Printf("[WARNING] Discovery doc response is too large (got %d bytes; limit %d)", resp.ContentLength, maxDiscoDocBytes) + return ret // empty + } + + // If the response is using chunked encoding then we can't predict + // its size, but we'll at least prevent reading the entire thing into + // memory. + lr := io.LimitReader(resp.Body, maxDiscoDocBytes) + + servicesBytes, err := ioutil.ReadAll(lr) + if err != nil { + log.Printf("[WARNING] Error reading discovery document body: %s", err) + return ret // empty + } + + var services map[string]interface{} + err = json.Unmarshal(servicesBytes, &services) + if err != nil { + log.Printf("[WARNING] Failed to decode discovery document as a JSON object: %s", err) + return ret // empty + } + + ret.services = services + return ret +} + +// Forget invalidates any cached record of the given hostname. If the host +// has no cache entry then this is a no-op. +func (d *Disco) Forget(host svchost.Hostname) { + delete(d.hostCache, host) +} + +// ForgetAll is like Forget, but for all of the hostnames that have cache entries. +func (d *Disco) ForgetAll() { + d.hostCache = nil +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/disco/host.go b/vendor/github.com/hashicorp/terraform/svchost/disco/host.go new file mode 100644 index 00000000000..faf58220af2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/disco/host.go @@ -0,0 +1,51 @@ +package disco + +import ( + "net/url" +) + +type Host struct { + discoURL *url.URL + services map[string]interface{} +} + +// ServiceURL returns the URL associated with the given service identifier, +// which should be of the form "servicename.vN". +// +// A non-nil result is always an absolute URL with a scheme of either https +// or http. +// +// If the requested service is not supported by the host, this method returns +// a nil URL. +// +// If the discovery document entry for the given service is invalid (not a URL), +// it is treated as absent, also returning a nil URL. +func (h Host) ServiceURL(id string) *url.URL { + if h.services == nil { + return nil // no services supported for an empty Host + } + + urlStr, ok := h.services[id].(string) + if !ok { + return nil + } + + ret, err := url.Parse(urlStr) + if err != nil { + return nil + } + if !ret.IsAbs() { + ret = h.discoURL.ResolveReference(ret) // make absolute using our discovery doc URL + } + if ret.Scheme != "https" && ret.Scheme != "http" { + return nil + } + if ret.User != nil { + // embedded username/password information is not permitted; credentials + // are handled out of band. + return nil + } + ret.Fragment = "" // fragment part is irrelevant, since we're not a browser + + return h.discoURL.ResolveReference(ret) +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/label_iter.go b/vendor/github.com/hashicorp/terraform/svchost/label_iter.go new file mode 100644 index 00000000000..af8ccbab208 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/label_iter.go @@ -0,0 +1,69 @@ +package svchost + +import ( + "strings" +) + +// A labelIter allows iterating over domain name labels. +// +// This type is copied from golang.org/x/net/idna, where it is used +// to segment hostnames into their separate labels for analysis. We use +// it for the same purpose here, in ForComparison. +type labelIter struct { + orig string + slice []string + curStart int + curEnd int + i int +} + +func (l *labelIter) reset() { + l.curStart = 0 + l.curEnd = 0 + l.i = 0 +} + +func (l *labelIter) done() bool { + return l.curStart >= len(l.orig) +} + +func (l *labelIter) result() string { + if l.slice != nil { + return strings.Join(l.slice, ".") + } + return l.orig +} + +func (l *labelIter) label() string { + if l.slice != nil { + return l.slice[l.i] + } + p := strings.IndexByte(l.orig[l.curStart:], '.') + l.curEnd = l.curStart + p + if p == -1 { + l.curEnd = len(l.orig) + } + return l.orig[l.curStart:l.curEnd] +} + +// next sets the value to the next label. It skips the last label if it is empty. +func (l *labelIter) next() { + l.i++ + if l.slice != nil { + if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { + l.curStart = len(l.orig) + } + } else { + l.curStart = l.curEnd + 1 + if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { + l.curStart = len(l.orig) + } + } +} + +func (l *labelIter) set(s string) { + if l.slice == nil { + l.slice = strings.Split(l.orig, ".") + } + l.slice[l.i] = s +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/svchost.go b/vendor/github.com/hashicorp/terraform/svchost/svchost.go new file mode 100644 index 00000000000..4eded142c7f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/svchost.go @@ -0,0 +1,207 @@ +// Package svchost deals with the representations of the so-called "friendly +// hostnames" that we use to represent systems that provide Terraform-native +// remote services, such as module registry, remote operations, etc. +// +// Friendly hostnames are specified such that, as much as possible, they +// are consistent with how web browsers think of hostnames, so that users +// can bring their intuitions about how hostnames behave when they access +// a Terraform Enterprise instance's web UI (or indeed any other website) +// and have this behave in a similar way. +package svchost + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "golang.org/x/net/idna" +) + +// Hostname is specialized name for string that indicates that the string +// has been converted to (or was already in) the storage and comparison form. +// +// Hostname values are not suitable for display in the user-interface. Use +// the ForDisplay method to obtain a form suitable for display in the UI. +// +// Unlike user-supplied hostnames, strings of type Hostname (assuming they +// were constructed by a function within this package) can be compared for +// equality using the standard Go == operator. +type Hostname string + +// acePrefix is the ASCII Compatible Encoding prefix, used to indicate that +// a domain name label is in "punycode" form. +const acePrefix = "xn--" + +// displayProfile is a very liberal idna profile that we use to do +// normalization for display without imposing validation rules. +var displayProfile = idna.New( + idna.MapForLookup(), + idna.Transitional(true), +) + +// ForDisplay takes a user-specified hostname and returns a normalized form of +// it suitable for display in the UI. +// +// If the input is so invalid that no normalization can be performed then +// this will return the input, assuming that the caller still wants to +// display _something_. This function is, however, more tolerant than the +// other functions in this package and will make a best effort to prepare +// _any_ given hostname for display. +// +// For validation, use either IsValid (for explicit validation) or +// ForComparison (which implicitly validates, returning an error if invalid). +func ForDisplay(given string) string { + var portPortion string + if colonPos := strings.Index(given, ":"); colonPos != -1 { + given, portPortion = given[:colonPos], given[colonPos:] + } + portPortion, _ = normalizePortPortion(portPortion) + + ascii, err := displayProfile.ToASCII(given) + if err != nil { + return given + portPortion + } + display, err := displayProfile.ToUnicode(ascii) + if err != nil { + return given + portPortion + } + return display + portPortion +} + +// IsValid returns true if the given user-specified hostname is a valid +// service hostname. +// +// Validity is determined by complying with the RFC 5891 requirements for +// names that are valid for domain lookup (section 5), with the additional +// requirement that user-supplied forms must not _already_ contain +// Punycode segments. +func IsValid(given string) bool { + _, err := ForComparison(given) + return err == nil +} + +// ForComparison takes a user-specified hostname and returns a normalized +// form of it suitable for storage and comparison. The result is not suitable +// for display to end-users because it uses Punycode to represent non-ASCII +// characters, and this form is unreadable for non-ASCII-speaking humans. +// +// The result is typed as Hostname -- a specialized name for string -- so that +// other APIs can make it clear within the type system whether they expect a +// user-specified or display-form hostname or a value already normalized for +// comparison. +// +// The returned Hostname is not valid if the returned error is non-nil. +func ForComparison(given string) (Hostname, error) { + var portPortion string + if colonPos := strings.Index(given, ":"); colonPos != -1 { + given, portPortion = given[:colonPos], given[colonPos:] + } + + var err error + portPortion, err = normalizePortPortion(portPortion) + if err != nil { + return Hostname(""), err + } + + if given == "" { + return Hostname(""), fmt.Errorf("empty string is not a valid hostname") + } + + // First we'll apply our additional constraint that Punycode must not + // be given directly by the user. This is not an IDN specification + // requirement, but we prohibit it to force users to use human-readable + // hostname forms within Terraform configuration. + labels := labelIter{orig: given} + for ; !labels.done(); labels.next() { + label := labels.label() + if label == "" { + return Hostname(""), fmt.Errorf( + "hostname contains empty label (two consecutive periods)", + ) + } + if strings.HasPrefix(label, acePrefix) { + return Hostname(""), fmt.Errorf( + "hostname label %q specified in punycode format; service hostnames must be given in unicode", + label, + ) + } + } + + result, err := idna.Lookup.ToASCII(given) + if err != nil { + return Hostname(""), err + } + return Hostname(result + portPortion), nil +} + +// ForDisplay returns a version of the receiver that is appropriate for display +// in the UI. This includes converting any punycode labels to their +// corresponding Unicode characters. +// +// A round-trip through ForComparison and this ForDisplay method does not +// guarantee the same result as calling this package's top-level ForDisplay +// function, since a round-trip through the Hostname type implies stricter +// handling than we do when doing basic display-only processing. +func (h Hostname) ForDisplay() string { + given := string(h) + var portPortion string + if colonPos := strings.Index(given, ":"); colonPos != -1 { + given, portPortion = given[:colonPos], given[colonPos:] + } + // We don't normalize the port portion here because we assume it's + // already been normalized on the way in. + + result, err := idna.Lookup.ToUnicode(given) + if err != nil { + // Should never happen, since type Hostname indicates that a string + // passed through our validation rules. + panic(fmt.Errorf("ForDisplay called on invalid Hostname: %s", err)) + } + return result + portPortion +} + +func (h Hostname) String() string { + return string(h) +} + +func (h Hostname) GoString() string { + return fmt.Sprintf("svchost.Hostname(%q)", string(h)) +} + +// normalizePortPortion attempts to normalize the "port portion" of a hostname, +// which begins with the first colon in the hostname and should be followed +// by a string of decimal digits. +// +// If the port portion is valid, a normalized version of it is returned along +// with a nil error. +// +// If the port portion is invalid, the input string is returned verbatim along +// with a non-nil error. +// +// An empty string is a valid port portion representing the absense of a port. +// If non-empty, the first character must be a colon. +func normalizePortPortion(s string) (string, error) { + if s == "" { + return s, nil + } + + if s[0] != ':' { + // should never happen, since caller tends to guarantee the presence + // of a colon due to how it's extracted from the string. + return s, errors.New("port portion is missing its initial colon") + } + + numStr := s[1:] + num, err := strconv.Atoi(numStr) + if err != nil { + return s, errors.New("port portion contains non-digit characters") + } + if num == 443 { + return "", nil // ":443" is the default + } + if num > 65535 { + return s, errors.New("port number is greater than 65535") + } + return fmt.Sprintf(":%d", num), nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context.go b/vendor/github.com/hashicorp/terraform/terraform/context.go index a814a85ddca..ed756c88daf 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/context.go +++ b/vendor/github.com/hashicorp/terraform/terraform/context.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/hcl" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/helper/experiment" + "github.com/hashicorp/terraform/version" ) // InputMode defines what sort of input will be asked for when Input @@ -123,7 +123,7 @@ type Context struct { func NewContext(opts *ContextOpts) (*Context, error) { // Validate the version requirement if it is given if opts.Module != nil { - if err := checkRequiredVersion(opts.Module); err != nil { + if err := CheckRequiredVersion(opts.Module); err != nil { return nil, err } } @@ -155,7 +155,7 @@ func NewContext(opts *ContextOpts) (*Context, error) { // Explicitly reset our state version to our current version so that // any operations we do will write out that our latest version // has run. - state.TFVersion = Version + state.TFVersion = version.Version // Determine parallelism, default to 10. We do this both to limit // CPU pressure but also to have an extra guard against rate throttling @@ -465,7 +465,7 @@ func (c *Context) Input(mode InputMode) error { } // Do the walk - if _, err := c.walk(graph, nil, walkInput); err != nil { + if _, err := c.walk(graph, walkInput); err != nil { return err } } @@ -506,7 +506,7 @@ func (c *Context) Apply() (*State, error) { } // Walk the graph - walker, err := c.walk(graph, graph, operation) + walker, err := c.walk(graph, operation) if len(walker.ValidationErrors) > 0 { err = multierror.Append(err, walker.ValidationErrors...) } @@ -533,13 +533,14 @@ func (c *Context) Plan() (*Plan, error) { State: c.state, Targets: c.targets, - TerraformVersion: VersionString(), + TerraformVersion: version.String(), ProviderSHA256s: c.providerSHA256s, } var operation walkOperation if c.destroy { operation = walkPlanDestroy + p.Destroy = true } else { // Set our state to be something temporary. We do this so that // the plan can update a fake state so that variables work, then @@ -575,7 +576,7 @@ func (c *Context) Plan() (*Plan, error) { } // Do the walk - walker, err := c.walk(graph, graph, operation) + walker, err := c.walk(graph, operation) if err != nil { return nil, err } @@ -630,7 +631,7 @@ func (c *Context) Refresh() (*State, error) { } // Do the walk - if _, err := c.walk(graph, graph, walkRefresh); err != nil { + if _, err := c.walk(graph, walkRefresh); err != nil { return nil, err } @@ -705,7 +706,7 @@ func (c *Context) Validate() ([]string, []error) { } // Walk - walker, err := c.walk(graph, graph, walkValidate) + walker, err := c.walk(graph, walkValidate) if err != nil { return nil, multierror.Append(errs, err).Errors } @@ -792,33 +793,11 @@ func (c *Context) releaseRun() { c.runContext = nil } -func (c *Context) walk( - graph, shadow *Graph, operation walkOperation) (*ContextGraphWalker, error) { +func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, error) { // Keep track of the "real" context which is the context that does // the real work: talking to real providers, modifying real state, etc. realCtx := c - // If we don't want shadowing, remove it - if !experiment.Enabled(experiment.X_shadow) { - shadow = nil - } - - // Just log this so we can see it in a debug log - if !c.shadow { - log.Printf("[WARN] terraform: shadow graph disabled") - shadow = nil - } - - // If we have a shadow graph, walk that as well - var shadowCtx *Context - var shadowCloser Shadow - if shadow != nil { - // Build the shadow context. In the process, override the real context - // with the one that is wrapped so that the shadow context can verify - // the results of the real. - realCtx, shadowCtx, shadowCloser = newShadowContext(c) - } - log.Printf("[DEBUG] Starting graph walk: %s", operation.String()) walker := &ContextGraphWalker{ @@ -837,90 +816,6 @@ func (c *Context) walk( close(watchStop) <-watchWait - // If we have a shadow graph and we interrupted the real graph, then - // we just close the shadow and never verify it. It is non-trivial to - // recreate the exact execution state up until an interruption so this - // isn't supported with shadows at the moment. - if shadowCloser != nil && c.sh.Stopped() { - // Ignore the error result, there is nothing we could care about - shadowCloser.CloseShadow() - - // Set it to nil so we don't do anything - shadowCloser = nil - } - - // If we have a shadow graph, wait for that to complete. - if shadowCloser != nil { - // Build the graph walker for the shadow. We also wrap this in - // a panicwrap so that panics are captured. For the shadow graph, - // we just want panics to be normal errors rather than to crash - // Terraform. - shadowWalker := GraphWalkerPanicwrap(&ContextGraphWalker{ - Context: shadowCtx, - Operation: operation, - }) - - // Kick off the shadow walk. This will block on any operations - // on the real walk so it is fine to start first. - log.Printf("[INFO] Starting shadow graph walk: %s", operation.String()) - shadowCh := make(chan error) - go func() { - shadowCh <- shadow.Walk(shadowWalker) - }() - - // Notify the shadow that we're done - if err := shadowCloser.CloseShadow(); err != nil { - c.shadowErr = multierror.Append(c.shadowErr, err) - } - - // Wait for the walk to end - log.Printf("[DEBUG] Waiting for shadow graph to complete...") - shadowWalkErr := <-shadowCh - - // Get any shadow errors - if err := shadowCloser.ShadowError(); err != nil { - c.shadowErr = multierror.Append(c.shadowErr, err) - } - - // Verify the contexts (compare) - if err := shadowContextVerify(realCtx, shadowCtx); err != nil { - c.shadowErr = multierror.Append(c.shadowErr, err) - } - - // At this point, if we're supposed to fail on error, then - // we PANIC. Some tests just verify that there is an error, - // so simply appending it to realErr and returning could hide - // shadow problems. - // - // This must be done BEFORE appending shadowWalkErr since the - // shadowWalkErr may include expected errors. - // - // We only do this if we don't have a real error. In the case of - // a real error, we can't guarantee what nodes were and weren't - // traversed in parallel scenarios so we can't guarantee no - // shadow errors. - if c.shadowErr != nil && contextFailOnShadowError && realErr == nil { - panic(multierror.Prefix(c.shadowErr, "shadow graph:")) - } - - // Now, if we have a walk error, we append that through - if shadowWalkErr != nil { - c.shadowErr = multierror.Append(c.shadowErr, shadowWalkErr) - } - - if c.shadowErr == nil { - log.Printf("[INFO] Shadow graph success!") - } else { - log.Printf("[ERROR] Shadow graph error: %s", c.shadowErr) - - // If we're supposed to fail on shadow errors, then report it - if contextFailOnShadowError { - realErr = multierror.Append(realErr, multierror.Prefix( - c.shadowErr, "shadow graph:")) - } - } - } - return walker, realErr } diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_import.go b/vendor/github.com/hashicorp/terraform/terraform/context_import.go index f1d57760df3..e9401431972 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/context_import.go +++ b/vendor/github.com/hashicorp/terraform/terraform/context_import.go @@ -66,7 +66,7 @@ func (c *Context) Import(opts *ImportOpts) (*State, error) { } // Walk it - if _, err := c.walk(graph, nil, walkImport); err != nil { + if _, err := c.walk(graph, walkImport); err != nil { return c.state, err } diff --git a/vendor/github.com/hashicorp/terraform/terraform/diff.go b/vendor/github.com/hashicorp/terraform/terraform/diff.go index fd1687e7ed6..9cbd5d88fa7 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/diff.go +++ b/vendor/github.com/hashicorp/terraform/terraform/diff.go @@ -23,6 +23,12 @@ const ( DiffUpdate DiffDestroy DiffDestroyCreate + + // DiffRefresh is only used in the UI for displaying diffs. + // Managed resource reads never appear in plan, and when data source + // reads appear they are represented as DiffCreate in core before + // transforming to DiffRefresh in the UI layer. + DiffRefresh // TODO: Actually use DiffRefresh in core too, for less confusion ) // multiVal matches the index key to a flatmapped set, list or map diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval.go b/vendor/github.com/hashicorp/terraform/terraform/eval.go index 3cb088a252a..10d9c228b4d 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval.go @@ -49,11 +49,11 @@ func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) { path = strings.Join(ctx.Path(), ".") } - log.Printf("[DEBUG] %s: eval: %T", path, n) + log.Printf("[TRACE] %s: eval: %T", path, n) output, err := n.Eval(ctx) if err != nil { if _, ok := err.(EvalEarlyExitError); ok { - log.Printf("[DEBUG] %s: eval: %T, err: %s", path, n, err) + log.Printf("[TRACE] %s: eval: %T, err: %s", path, n, err) } else { log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go index 2f6a4973e45..36c98458eb9 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go @@ -112,7 +112,7 @@ func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) { } state.init() - { + if resourceHasUserVisibleApply(n.Info) { // Call post-apply hook err := ctx.Hook(func(h Hook) (HookAction, error) { return h.PreApply(n.Info, state, diff) @@ -136,7 +136,7 @@ type EvalApplyPost struct { func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) { state := *n.State - { + if resourceHasUserVisibleApply(n.Info) { // Call post-apply hook err := ctx.Hook(func(h Hook) (HookAction, error) { return h.PostApply(n.Info, state, *n.Error) @@ -149,6 +149,22 @@ func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) { return nil, *n.Error } +// resourceHasUserVisibleApply returns true if the given resource is one where +// apply actions should be exposed to the user. +// +// Certain resources do apply actions only as an implementation detail, so +// these should not be advertised to code outside of this package. +func resourceHasUserVisibleApply(info *InstanceInfo) bool { + addr := info.ResourceAddress() + + // Only managed resources have user-visible apply actions. + // In particular, this excludes data resources since we "apply" these + // only as an implementation detail of removing them from state when + // they are destroyed. (When reading, they don't get here at all because + // we present them as "Refresh" actions.) + return addr.Mode == config.ManagedResourceMode +} + // EvalApplyProvisioners is an EvalNode implementation that executes // the provisioners for a resource. // diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go index c35f9083f89..bbc2b3667e0 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/version" ) // EvalCompareDiff is an EvalNode implementation that compares two diffs @@ -60,7 +61,7 @@ func (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) { "\n"+ "Also include as much context as you can about your config, state, "+ "and the steps you performed to trigger this error.\n", - n.Info.Id, Version, n.Info.Id, reason, one, two) + n.Info.Id, version.Version, n.Info.Id, reason, one, two) } return nil, nil diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go index 6825ff59092..78c8813605c 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go @@ -1,18 +1,27 @@ package terraform -import "github.com/hashicorp/terraform/config" +import ( + "log" + + "github.com/hashicorp/terraform/config" +) // EvalInterpolate is an EvalNode implementation that takes a raw // configuration and interpolates it. type EvalInterpolate struct { - Config *config.RawConfig - Resource *Resource - Output **ResourceConfig + Config *config.RawConfig + Resource *Resource + Output **ResourceConfig + ContinueOnErr bool } func (n *EvalInterpolate) Eval(ctx EvalContext) (interface{}, error) { rc, err := ctx.Interpolate(n.Config, n.Resource) if err != nil { + if n.ContinueOnErr { + log.Printf("[WARN] Interpolation %q failed: %s", n.Config.Key, err) + return nil, EvalEarlyExitError{} + } return nil, err } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_local.go b/vendor/github.com/hashicorp/terraform/terraform/eval_local.go new file mode 100644 index 00000000000..a4b2a505984 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_local.go @@ -0,0 +1,86 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" +) + +// EvalLocal is an EvalNode implementation that evaluates the +// expression for a local value and writes it into a transient part of +// the state. +type EvalLocal struct { + Name string + Value *config.RawConfig +} + +func (n *EvalLocal) Eval(ctx EvalContext) (interface{}, error) { + cfg, err := ctx.Interpolate(n.Value, nil) + if err != nil { + return nil, fmt.Errorf("local.%s: %s", n.Name, err) + } + + state, lock := ctx.State() + if state == nil { + return nil, fmt.Errorf("cannot write local value to nil state") + } + + // Get a write lock so we can access the state + lock.Lock() + defer lock.Unlock() + + // Look for the module state. If we don't have one, create it. + mod := state.ModuleByPath(ctx.Path()) + if mod == nil { + mod = state.AddModule(ctx.Path()) + } + + // Get the value from the config + var valueRaw interface{} = config.UnknownVariableValue + if cfg != nil { + var ok bool + valueRaw, ok = cfg.Get("value") + if !ok { + valueRaw = "" + } + if cfg.IsComputed("value") { + valueRaw = config.UnknownVariableValue + } + } + + if mod.Locals == nil { + // initialize + mod.Locals = map[string]interface{}{} + } + mod.Locals[n.Name] = valueRaw + + return nil, nil +} + +// EvalDeleteLocal is an EvalNode implementation that deletes a Local value +// from the state. Locals aren't persisted, but we don't need to evaluate them +// during destroy. +type EvalDeleteLocal struct { + Name string +} + +func (n *EvalDeleteLocal) Eval(ctx EvalContext) (interface{}, error) { + state, lock := ctx.State() + if state == nil { + return nil, nil + } + + // Get a write lock so we can access this instance + lock.Lock() + defer lock.Unlock() + + // Look for the module state. If we don't have one, create it. + mod := state.ModuleByPath(ctx.Path()) + if mod == nil { + return nil, nil + } + + delete(mod.Locals, n.Name) + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go index cf61781e5b9..657c70304cd 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go @@ -41,15 +41,16 @@ type EvalWriteOutput struct { Name string Sensitive bool Value *config.RawConfig + // ContinueOnErr allows interpolation to fail during Input + ContinueOnErr bool } // TODO: test func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { + // This has to run before we have a state lock, since interpolation also + // reads the state cfg, err := ctx.Interpolate(n.Value, nil) - if err != nil { - // Log error but continue anyway - log.Printf("[WARN] Output interpolation %q failed: %s", n.Name, err) - } + // handle the error after we have the module from the state state, lock := ctx.State() if state == nil { @@ -59,13 +60,32 @@ func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { // Get a write lock so we can access this instance lock.Lock() defer lock.Unlock() - // Look for the module state. If we don't have one, create it. mod := state.ModuleByPath(ctx.Path()) if mod == nil { mod = state.AddModule(ctx.Path()) } + // handling the interpolation error + if err != nil { + switch { + case featureOutputErrors: + if n.ContinueOnErr { + log.Printf("[ERROR] Output interpolation %q failed: %s", n.Name, err) + // if we're continueing, make sure the output is included, and + // marked as unknown + mod.Outputs[n.Name] = &OutputState{ + Type: "string", + Value: config.UnknownVariableValue, + } + return nil, EvalEarlyExitError{} + } + return nil, err + default: + log.Printf("[WARN] Output interpolation %q failed: %s", n.Name, err) + } + } + // Get the value from the config var valueRaw interface{} = config.UnknownVariableValue if cfg != nil { diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go index 126a0e63a9d..1f67e3d86b4 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go @@ -1,6 +1,8 @@ package terraform -import "fmt" +import ( + "fmt" +) // EvalReadState is an EvalNode implementation that reads the // primary InstanceState for a specific resource out of the state. diff --git a/vendor/github.com/hashicorp/terraform/terraform/features.go b/vendor/github.com/hashicorp/terraform/terraform/features.go new file mode 100644 index 00000000000..9bcf19d9c5a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/features.go @@ -0,0 +1,9 @@ +package terraform + +import ( + "os" +) + +// This file holds feature flags for the next release + +var featureOutputErrors = os.Getenv("TF_OUTPUT_ERRORS") != "" diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph.go b/vendor/github.com/hashicorp/terraform/terraform/graph.go index 48ce6a33667..735ec4ecebd 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph.go @@ -70,7 +70,7 @@ func (g *Graph) walk(walker GraphWalker) error { // Walk the graph. var walkFn dag.WalkFunc walkFn = func(v dag.Vertex) (rerr error) { - log.Printf("[DEBUG] vertex '%s.%s': walking", path, dag.VertexName(v)) + log.Printf("[TRACE] vertex '%s.%s': walking", path, dag.VertexName(v)) g.DebugVisitInfo(v, g.debugName) // If we have a panic wrap GraphWalker and a panic occurs, recover @@ -118,7 +118,7 @@ func (g *Graph) walk(walker GraphWalker) error { // Allow the walker to change our tree if needed. Eval, // then callback with the output. - log.Printf("[DEBUG] vertex '%s.%s': evaluating", path, dag.VertexName(v)) + log.Printf("[TRACE] vertex '%s.%s': evaluating", path, dag.VertexName(v)) g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path)) @@ -132,7 +132,7 @@ func (g *Graph) walk(walker GraphWalker) error { // If the node is dynamically expanded, then expand it if ev, ok := v.(GraphNodeDynamicExpandable); ok { log.Printf( - "[DEBUG] vertex '%s.%s': expanding/walking dynamic subgraph", + "[TRACE] vertex '%s.%s': expanding/walking dynamic subgraph", path, dag.VertexName(v)) @@ -154,7 +154,7 @@ func (g *Graph) walk(walker GraphWalker) error { // If the node has a subgraph, then walk the subgraph if sn, ok := v.(GraphNodeSubgraph); ok { log.Printf( - "[DEBUG] vertex '%s.%s': walking subgraph", + "[TRACE] vertex '%s.%s': walking subgraph", path, dag.VertexName(v)) diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go index 38a90f2775f..430195c7f22 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go @@ -108,6 +108,9 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer { // Add root variables &RootVariableTransformer{Module: b.Module}, + // Add the local values + &LocalTransformer{Module: b.Module}, + // Add the outputs &OutputTransformer{Module: b.Module}, @@ -117,6 +120,13 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer { // Connect references so ordering is correct &ReferenceTransformer{}, + // Reverse the edges to outputs and locals, so that + // interpolations don't fail during destroy. + GraphTransformIf( + func() bool { return b.Destroy }, + &DestroyValueReferenceTransformer{}, + ), + // Add the node to fix the state count boundaries &CountBoundaryTransformer{}, diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go index 4b29bbb4b8b..429e4248153 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go @@ -71,6 +71,9 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer { Module: b.Module, }, + // Add the local values + &LocalTransformer{Module: b.Module}, + // Add the outputs &OutputTransformer{Module: b.Module}, @@ -107,7 +110,9 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer { ), // Add module variables - &ModuleVariableTransformer{Module: b.Module}, + &ModuleVariableTransformer{ + Module: b.Module, + }, // Connect so that the references are ready for targeting. We'll // have to connect again later for providers and so on. diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go index 3d3e968fae9..beb5a4a4459 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go @@ -133,6 +133,9 @@ func (b *RefreshGraphBuilder) Steps() []GraphTransformer { &ParentProviderTransformer{}, &AttachProviderConfigTransformer{Module: b.Module}, + // Add the local values + &LocalTransformer{Module: b.Module}, + // Add the outputs &OutputTransformer{Module: b.Module}, diff --git a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go index 22ddce6c836..52ce1e886aa 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go +++ b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go @@ -90,6 +90,8 @@ func (i *Interpolater) Values( err = i.valueSimpleVar(scope, n, v, result) case *config.TerraformVariable: err = i.valueTerraformVar(scope, n, v, result) + case *config.LocalVariable: + err = i.valueLocalVar(scope, n, v, result) case *config.UserVariable: err = i.valueUserVar(scope, n, v, result) default: @@ -335,6 +337,59 @@ func (i *Interpolater) valueTerraformVar( return nil } +func (i *Interpolater) valueLocalVar( + scope *InterpolationScope, + n string, + v *config.LocalVariable, + result map[string]ast.Variable, +) error { + i.StateLock.RLock() + defer i.StateLock.RUnlock() + + modTree := i.Module + if len(scope.Path) > 1 { + modTree = i.Module.Child(scope.Path[1:]) + } + + // Get the resource from the configuration so we can verify + // that the resource is in the configuration and so we can access + // the configuration if we need to. + var cl *config.Local + for _, l := range modTree.Config().Locals { + if l.Name == v.Name { + cl = l + break + } + } + + if cl == nil { + return fmt.Errorf("%s: no local value of this name has been declared", n) + } + + // Get the relevant module + module := i.State.ModuleByPath(scope.Path) + if module == nil { + result[n] = unknownVariable() + return nil + } + + rawV, exists := module.Locals[v.Name] + if !exists { + result[n] = unknownVariable() + return nil + } + + varV, err := hil.InterfaceToVariable(rawV) + if err != nil { + // Should never happen, since interpolation should always produce + // something we can feed back in to interpolation. + return fmt.Errorf("%s: %s", n, err) + } + + result[n] = varV + return nil +} + func (i *Interpolater) valueUserVar( scope *InterpolationScope, n string, diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_local.go b/vendor/github.com/hashicorp/terraform/terraform/node_local.go new file mode 100644 index 00000000000..e58f1f987d1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_local.go @@ -0,0 +1,96 @@ +package terraform + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/config" +) + +// NodeLocal represents a named local value in a particular module. +// +// Local value nodes only have one operation, common to all walk types: +// evaluate the result and place it in state. +type NodeLocal struct { + PathValue []string + Config *config.Local +} + +func (n *NodeLocal) Name() string { + result := fmt.Sprintf("local.%s", n.Config.Name) + if len(n.PathValue) > 1 { + result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) + } + + return result +} + +// GraphNodeSubPath +func (n *NodeLocal) Path() []string { + return n.PathValue +} + +// RemovableIfNotTargeted +func (n *NodeLocal) RemoveIfNotTargeted() bool { + return true +} + +// GraphNodeReferenceable +func (n *NodeLocal) ReferenceableName() []string { + name := fmt.Sprintf("local.%s", n.Config.Name) + return []string{name} +} + +// GraphNodeReferencer +func (n *NodeLocal) References() []string { + var result []string + result = append(result, ReferencesFromConfig(n.Config.RawConfig)...) + for _, v := range result { + split := strings.Split(v, "/") + for i, s := range split { + split[i] = s + ".destroy" + } + + result = append(result, strings.Join(split, "/")) + } + + return result +} + +// GraphNodeEvalable +func (n *NodeLocal) EvalTree() EvalNode { + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalOpFilter{ + Ops: []walkOperation{ + walkInput, + walkValidate, + walkRefresh, + walkPlan, + walkApply, + }, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalLocal{ + Name: n.Config.Name, + Value: n.Config.RawConfig, + }, + }, + }, + }, + &EvalOpFilter{ + Ops: []walkOperation{ + walkPlanDestroy, + walkDestroy, + }, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalDeleteLocal{ + Name: n.Config.Name, + }, + }, + }, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go deleted file mode 100644 index 319df1e3a41..00000000000 --- a/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go +++ /dev/null @@ -1,29 +0,0 @@ -package terraform - -import ( - "fmt" -) - -// NodeDestroyableModule represents a module destruction. -type NodeDestroyableModuleVariable struct { - PathValue []string -} - -func (n *NodeDestroyableModuleVariable) Name() string { - result := "plan-destroy" - if len(n.PathValue) > 1 { - result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) - } - - return result -} - -// GraphNodeSubPath -func (n *NodeDestroyableModuleVariable) Path() []string { - return n.PathValue -} - -// GraphNodeEvalable -func (n *NodeDestroyableModuleVariable) EvalTree() EvalNode { - return &EvalDiffDestroyModule{Path: n.PathValue} -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go index 13fe8fc3ae4..66ff7d5e3b7 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go @@ -92,11 +92,24 @@ func (n *NodeApplyableModuleVariable) EvalTree() EvalNode { // within the variables mapping. var config *ResourceConfig variables := make(map[string]interface{}) + return &EvalSequence{ Nodes: []EvalNode{ - &EvalInterpolate{ - Config: n.Value, - Output: &config, + &EvalOpFilter{ + Ops: []walkOperation{walkInput}, + Node: &EvalInterpolate{ + Config: n.Value, + Output: &config, + ContinueOnErr: true, + }, + }, + &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkPlan, walkApply, + walkDestroy, walkValidate}, + Node: &EvalInterpolate{ + Config: n.Value, + Output: &config, + }, }, &EvalVariableBlock{ diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output.go b/vendor/github.com/hashicorp/terraform/terraform/node_output.go index 9017a63c40f..4fd246a10c7 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_output.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_output.go @@ -69,17 +69,33 @@ func (n *NodeApplyableOutput) References() []string { // GraphNodeEvalable func (n *NodeApplyableOutput) EvalTree() EvalNode { - return &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkPlan, walkApply, - walkDestroy, walkInput, walkValidate}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalWriteOutput{ + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalOpFilter{ + // Don't let interpolation errors stop Input, since it happens + // before Refresh. + Ops: []walkOperation{walkInput}, + Node: &EvalWriteOutput{ + Name: n.Config.Name, + Sensitive: n.Config.Sensitive, + Value: n.Config.RawConfig, + ContinueOnErr: true, + }, + }, + &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkValidate}, + Node: &EvalWriteOutput{ Name: n.Config.Name, Sensitive: n.Config.Sensitive, Value: n.Config.RawConfig, }, }, + &EvalOpFilter{ + Ops: []walkOperation{walkDestroy, walkPlanDestroy}, + Node: &EvalDeleteOutput{ + Name: n.Config.Name, + }, + }, }, } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/plan.go b/vendor/github.com/hashicorp/terraform/terraform/plan.go index 51d66529b43..0c13cdafd80 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/plan.go +++ b/vendor/github.com/hashicorp/terraform/terraform/plan.go @@ -10,6 +10,7 @@ import ( "sync" "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/version" ) func init() { @@ -26,18 +27,54 @@ func init() { // necessary to make a change: the state, diff, config, backend config, etc. // This is so that it can run alone without any other data. type Plan struct { - Diff *Diff - Module *module.Tree - State *State - Vars map[string]interface{} + // Diff describes the resource actions that must be taken when this + // plan is applied. + Diff *Diff + + // Module represents the entire configuration that was present when this + // plan was created. + Module *module.Tree + + // State is the Terraform state that was current when this plan was + // created. + // + // It is not allowed to apply a plan that has a stale state, since its + // diff could be outdated. + State *State + + // Vars retains the variables that were set when creating the plan, so + // that the same variables can be applied during apply. + Vars map[string]interface{} + + // Targets, if non-empty, contains a set of resource address strings that + // identify graph nodes that were selected as targets for plan. + // + // When targets are set, any graph node that is not directly targeted or + // indirectly targeted via dependencies is excluded from the graph. Targets []string + // TerraformVersion is the version of Terraform that was used to create + // this plan. + // + // It is not allowed to apply a plan created with a different version of + // Terraform, since the other fields of this structure may be interpreted + // in different ways between versions. TerraformVersion string - ProviderSHA256s map[string][]byte + + // ProviderSHA256s is a map giving the SHA256 hashes of the exact binaries + // used as plugins for each provider during plan. + // + // These must match between plan and apply to ensure that the diff is + // correctly interpreted, since different provider versions may have + // different attributes or attribute value constraints. + ProviderSHA256s map[string][]byte // Backend is the backend that this plan should use and store data with. Backend *BackendState + // Destroy indicates that this plan was created for a full destroy operation + Destroy bool + once sync.Once } @@ -67,6 +104,7 @@ func (p *Plan) contextOpts(base *ContextOpts) (*ContextOpts, error) { opts.Module = p.Module opts.Targets = p.Targets opts.ProviderSHA256s = p.ProviderSHA256s + opts.Destroy = p.Destroy if opts.State == nil { opts.State = p.State @@ -82,7 +120,7 @@ func (p *Plan) contextOpts(base *ContextOpts) (*ContextOpts, error) { log.Println("[WARNING] Plan state and ContextOpts state are not equal") } - thisVersion := VersionString() + thisVersion := version.String() if p.TerraformVersion != "" && p.TerraformVersion != thisVersion { return nil, fmt.Errorf( "plan was created with a different version of Terraform (created with %s, but running %s)", diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource.go b/vendor/github.com/hashicorp/terraform/terraform/resource.go index 0acf0beb2a1..a8cd8dd9f00 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource.go @@ -88,6 +88,46 @@ func (i *InstanceInfo) HumanId() string { i.Id) } +// ResourceAddress returns the address of the resource that the receiver is describing. +func (i *InstanceInfo) ResourceAddress() *ResourceAddress { + // GROSS: for tainted and deposed instances, their status gets appended + // to i.Id to create a unique id for the graph node. Historically these + // ids were displayed to the user, so it's designed to be human-readable: + // "aws_instance.bar.0 (deposed #0)" + // + // So here we detect such suffixes and try to interpret them back to + // their original meaning so we can then produce a ResourceAddress + // with a suitable InstanceType. + id := i.Id + instanceType := TypeInvalid + if idx := strings.Index(id, " ("); idx != -1 { + remain := id[idx:] + id = id[:idx] + + switch { + case strings.Contains(remain, "tainted"): + instanceType = TypeTainted + case strings.Contains(remain, "deposed"): + instanceType = TypeDeposed + } + } + + addr, err := parseResourceAddressInternal(id) + if err != nil { + // should never happen, since that would indicate a bug in the + // code that constructed this InstanceInfo. + panic(fmt.Errorf("InstanceInfo has invalid Id %s", id)) + } + if len(i.ModulePath) > 1 { + addr.Path = i.ModulePath[1:] // trim off "root" prefix, which is implied + } + if instanceType != TypeInvalid { + addr.InstanceTypeSet = true + addr.InstanceType = instanceType + } + return addr +} + func (i *InstanceInfo) uniqueId() string { prefix := i.HumanId() if v := i.uniqueExtra; v != "" { diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go index 8badca8053c..a64f5d846c8 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go @@ -42,9 +42,9 @@ func (r *ResourceAddress) Copy() *ResourceAddress { Type: r.Type, Mode: r.Mode, } - for _, p := range r.Path { - n.Path = append(n.Path, p) - } + + n.Path = append(n.Path, r.Path...) + return n } @@ -362,40 +362,41 @@ func (addr *ResourceAddress) Less(other *ResourceAddress) bool { switch { - case len(addr.Path) < len(other.Path): - return true + case len(addr.Path) != len(other.Path): + return len(addr.Path) < len(other.Path) case !reflect.DeepEqual(addr.Path, other.Path): // If the two paths are the same length but don't match, we'll just // cheat and compare the string forms since it's easier than - // comparing all of the path segments in turn. + // comparing all of the path segments in turn, and lexicographic + // comparison is correct for the module path portion. addrStr := addr.String() otherStr := other.String() return addrStr < otherStr - case addr.Mode == config.DataResourceMode && other.Mode != config.DataResourceMode: - return true + case addr.Mode != other.Mode: + return addr.Mode == config.DataResourceMode - case addr.Type < other.Type: - return true + case addr.Type != other.Type: + return addr.Type < other.Type - case addr.Name < other.Name: - return true + case addr.Name != other.Name: + return addr.Name < other.Name - case addr.Index < other.Index: + case addr.Index != other.Index: // Since "Index" is -1 for an un-indexed address, this also conveniently // sorts unindexed addresses before indexed ones, should they both // appear for some reason. - return true + return addr.Index < other.Index - case other.InstanceTypeSet && !addr.InstanceTypeSet: - return true + case addr.InstanceTypeSet != other.InstanceTypeSet: + return !addr.InstanceTypeSet - case addr.InstanceType < other.InstanceType: + case addr.InstanceType != other.InstanceType: // InstanceType is actually an enum, so this is just an arbitrary // sort based on the enum numeric values, and thus not particularly // meaningful. - return true + return addr.InstanceType < other.InstanceType default: return false diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go index 7d78f67ef92..93fd14fc803 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go @@ -21,6 +21,15 @@ type ResourceProvider interface { * Functions related to the provider *********************************************************************/ + // ProviderSchema returns the config schema for the main provider + // configuration, as would appear in a "provider" block in the + // configuration files. + // + // Currently not all providers support schema. Callers must therefore + // first call Resources and DataSources and ensure that at least one + // resource or data source has the SchemaAvailable flag set. + GetSchema(*ProviderSchemaRequest) (*ProviderSchema, error) + // Input is called to ask the provider to ask the user for input // for completing the configuration if necesarry. // @@ -183,11 +192,25 @@ type ResourceProviderCloser interface { type ResourceType struct { Name string // Name of the resource, example "instance" (no provider prefix) Importable bool // Whether this resource supports importing + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool } // DataSource is a data source that a resource provider implements. type DataSource struct { Name string + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool } // ResourceProviderResolver is an interface implemented by objects that are diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go index f5315339fb8..73cde0ccb71 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go @@ -1,6 +1,8 @@ package terraform -import "sync" +import ( + "sync" +) // MockResourceProvider implements ResourceProvider but mocks out all the // calls for testing purposes. @@ -12,6 +14,10 @@ type MockResourceProvider struct { CloseCalled bool CloseError error + GetSchemaCalled bool + GetSchemaRequest *ProviderSchemaRequest + GetSchemaReturn *ProviderSchema + GetSchemaReturnError error InputCalled bool InputInput UIInput InputConfig *ResourceConfig @@ -92,8 +98,19 @@ func (p *MockResourceProvider) Close() error { return p.CloseError } +func (p *MockResourceProvider) GetSchema(req *ProviderSchemaRequest) (*ProviderSchema, error) { + p.Lock() + defer p.Unlock() + + p.GetSchemaCalled = true + p.GetSchemaRequest = req + return p.GetSchemaReturn, p.GetSchemaReturnError +} + func (p *MockResourceProvider) Input( input UIInput, c *ResourceConfig) (*ResourceConfig, error) { + p.Lock() + defer p.Unlock() p.InputCalled = true p.InputInput = input p.InputConfig = c diff --git a/vendor/github.com/hashicorp/terraform/terraform/schemas.go b/vendor/github.com/hashicorp/terraform/terraform/schemas.go new file mode 100644 index 00000000000..ec46efcf7dd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/schemas.go @@ -0,0 +1,34 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config/configschema" +) + +type Schemas struct { + Providers ProviderSchemas +} + +// ProviderSchemas is a map from provider names to provider schemas. +// +// The names in this map are the direct plugin name (e.g. "aws") rather than +// any alias name (e.g. "aws.foo"), since. +type ProviderSchemas map[string]*ProviderSchema + +// ProviderSchema represents the schema for a provider's own configuration +// and the configuration for some or all of its resources and data sources. +// +// The completeness of this structure depends on how it was constructed. +// When constructed for a configuration, it will generally include only +// resource types and data sources used by that configuration. +type ProviderSchema struct { + Provider *configschema.Block + ResourceTypes map[string]*configschema.Block + DataSources map[string]*configschema.Block +} + +// ProviderSchemaRequest is used to describe to a ResourceProvider which +// aspects of schema are required, when calling the GetSchema method. +type ProviderSchemaRequest struct { + ResourceTypes []string + DataSources []string +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow.go b/vendor/github.com/hashicorp/terraform/terraform/shadow.go deleted file mode 100644 index 46325595f10..00000000000 --- a/vendor/github.com/hashicorp/terraform/terraform/shadow.go +++ /dev/null @@ -1,28 +0,0 @@ -package terraform - -// Shadow is the interface that any "shadow" structures must implement. -// -// A shadow structure is an interface implementation (typically) that -// shadows a real implementation and verifies that the same behavior occurs -// on both. The semantics of this behavior are up to the interface itself. -// -// A shadow NEVER modifies real values or state. It must always be safe to use. -// -// For example, a ResourceProvider shadow ensures that the same operations -// are done on the same resources with the same configurations. -// -// The typical usage of a shadow following this interface is to complete -// the real operations, then call CloseShadow which tells the shadow that -// the real side is done. Then, once the shadow is also complete, call -// ShadowError to find any errors that may have been caught. -type Shadow interface { - // CloseShadow tells the shadow that the REAL implementation is - // complete. Therefore, any calls that would block should now return - // immediately since no more changes will happen to the real side. - CloseShadow() error - - // ShadowError returns the errors that the shadow has found. - // This should be called AFTER CloseShadow and AFTER the shadow is - // known to be complete (no more calls to it). - ShadowError() error -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go deleted file mode 100644 index 116cf84f97c..00000000000 --- a/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go +++ /dev/null @@ -1,273 +0,0 @@ -package terraform - -import ( - "fmt" - "sync" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/shadow" -) - -// newShadowComponentFactory creates a shadowed contextComponentFactory -// so that requests to create new components result in both a real and -// shadow side. -func newShadowComponentFactory( - f contextComponentFactory) (contextComponentFactory, *shadowComponentFactory) { - // Create the shared data - shared := &shadowComponentFactoryShared{contextComponentFactory: f} - - // Create the real side - real := &shadowComponentFactory{ - shadowComponentFactoryShared: shared, - } - - // Create the shadow - shadow := &shadowComponentFactory{ - shadowComponentFactoryShared: shared, - Shadow: true, - } - - return real, shadow -} - -// shadowComponentFactory is the shadow side. Any components created -// with this factory are fake and will not cause real work to happen. -// -// Unlike other shadowers, the shadow component factory will allow the -// shadow to create _any_ component even if it is never requested on the -// real side. This is because errors will happen later downstream as function -// calls are made to the shadows that are never matched on the real side. -type shadowComponentFactory struct { - *shadowComponentFactoryShared - - Shadow bool // True if this should return the shadow - lock sync.Mutex -} - -func (f *shadowComponentFactory) ResourceProvider( - n, uid string) (ResourceProvider, error) { - f.lock.Lock() - defer f.lock.Unlock() - - real, shadow, err := f.shadowComponentFactoryShared.ResourceProvider(n, uid) - var result ResourceProvider = real - if f.Shadow { - result = shadow - } - - return result, err -} - -func (f *shadowComponentFactory) ResourceProvisioner( - n, uid string) (ResourceProvisioner, error) { - f.lock.Lock() - defer f.lock.Unlock() - - real, shadow, err := f.shadowComponentFactoryShared.ResourceProvisioner(n, uid) - var result ResourceProvisioner = real - if f.Shadow { - result = shadow - } - - return result, err -} - -// CloseShadow is called when the _real_ side is complete. This will cause -// all future blocking operations to return immediately on the shadow to -// ensure the shadow also completes. -func (f *shadowComponentFactory) CloseShadow() error { - // If we aren't the shadow, just return - if !f.Shadow { - return nil - } - - // Lock ourselves so we don't modify state - f.lock.Lock() - defer f.lock.Unlock() - - // Grab our shared state - shared := f.shadowComponentFactoryShared - - // If we're already closed, its an error - if shared.closed { - return fmt.Errorf("component factory shadow already closed") - } - - // Close all the providers and provisioners and return the error - var result error - for _, n := range shared.providerKeys { - _, shadow, err := shared.ResourceProvider(n, n) - if err == nil && shadow != nil { - if err := shadow.CloseShadow(); err != nil { - result = multierror.Append(result, err) - } - } - } - - for _, n := range shared.provisionerKeys { - _, shadow, err := shared.ResourceProvisioner(n, n) - if err == nil && shadow != nil { - if err := shadow.CloseShadow(); err != nil { - result = multierror.Append(result, err) - } - } - } - - // Mark ourselves as closed - shared.closed = true - - return result -} - -func (f *shadowComponentFactory) ShadowError() error { - // If we aren't the shadow, just return - if !f.Shadow { - return nil - } - - // Lock ourselves so we don't modify state - f.lock.Lock() - defer f.lock.Unlock() - - // Grab our shared state - shared := f.shadowComponentFactoryShared - - // If we're not closed, its an error - if !shared.closed { - return fmt.Errorf("component factory must be closed to retrieve errors") - } - - // Close all the providers and provisioners and return the error - var result error - for _, n := range shared.providerKeys { - _, shadow, err := shared.ResourceProvider(n, n) - if err == nil && shadow != nil { - if err := shadow.ShadowError(); err != nil { - result = multierror.Append(result, err) - } - } - } - - for _, n := range shared.provisionerKeys { - _, shadow, err := shared.ResourceProvisioner(n, n) - if err == nil && shadow != nil { - if err := shadow.ShadowError(); err != nil { - result = multierror.Append(result, err) - } - } - } - - return result -} - -// shadowComponentFactoryShared is shared data between the two factories. -// -// It is NOT SAFE to run any function on this struct in parallel. Lock -// access to this struct. -type shadowComponentFactoryShared struct { - contextComponentFactory - - closed bool - providers shadow.KeyedValue - providerKeys []string - provisioners shadow.KeyedValue - provisionerKeys []string -} - -// shadowResourceProviderFactoryEntry is the entry that is stored in -// the Shadows key/value for a provider. -type shadowComponentFactoryProviderEntry struct { - Real ResourceProvider - Shadow shadowResourceProvider - Err error -} - -type shadowComponentFactoryProvisionerEntry struct { - Real ResourceProvisioner - Shadow shadowResourceProvisioner - Err error -} - -func (f *shadowComponentFactoryShared) ResourceProvider( - n, uid string) (ResourceProvider, shadowResourceProvider, error) { - // Determine if we already have a value - raw, ok := f.providers.ValueOk(uid) - if !ok { - // Build the entry - var entry shadowComponentFactoryProviderEntry - - // No value, initialize. Create the original - p, err := f.contextComponentFactory.ResourceProvider(n, uid) - if err != nil { - entry.Err = err - p = nil // Just to be sure - } - - if p != nil { - // Create the shadow - real, shadow := newShadowResourceProvider(p) - entry.Real = real - entry.Shadow = shadow - - if f.closed { - shadow.CloseShadow() - } - } - - // Store the value - f.providers.SetValue(uid, &entry) - f.providerKeys = append(f.providerKeys, uid) - raw = &entry - } - - // Read the entry - entry, ok := raw.(*shadowComponentFactoryProviderEntry) - if !ok { - return nil, nil, fmt.Errorf("Unknown value for shadow provider: %#v", raw) - } - - // Return - return entry.Real, entry.Shadow, entry.Err -} - -func (f *shadowComponentFactoryShared) ResourceProvisioner( - n, uid string) (ResourceProvisioner, shadowResourceProvisioner, error) { - // Determine if we already have a value - raw, ok := f.provisioners.ValueOk(uid) - if !ok { - // Build the entry - var entry shadowComponentFactoryProvisionerEntry - - // No value, initialize. Create the original - p, err := f.contextComponentFactory.ResourceProvisioner(n, uid) - if err != nil { - entry.Err = err - p = nil // Just to be sure - } - - if p != nil { - // For now, just create a mock since we don't support provisioners yet - real, shadow := newShadowResourceProvisioner(p) - entry.Real = real - entry.Shadow = shadow - - if f.closed { - shadow.CloseShadow() - } - } - - // Store the value - f.provisioners.SetValue(uid, &entry) - f.provisionerKeys = append(f.provisionerKeys, uid) - raw = &entry - } - - // Read the entry - entry, ok := raw.(*shadowComponentFactoryProvisionerEntry) - if !ok { - return nil, nil, fmt.Errorf("Unknown value for shadow provisioner: %#v", raw) - } - - // Return - return entry.Real, entry.Shadow, entry.Err -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go deleted file mode 100644 index 5588af252c4..00000000000 --- a/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go +++ /dev/null @@ -1,158 +0,0 @@ -package terraform - -import ( - "fmt" - "strings" - - "github.com/hashicorp/go-multierror" - "github.com/mitchellh/copystructure" -) - -// newShadowContext creates a new context that will shadow the given context -// when walking the graph. The resulting context should be used _only once_ -// for a graph walk. -// -// The returned Shadow should be closed after the graph walk with the -// real context is complete. Errors from the shadow can be retrieved there. -// -// Most importantly, any operations done on the shadow context (the returned -// context) will NEVER affect the real context. All structures are deep -// copied, no real providers or resources are used, etc. -func newShadowContext(c *Context) (*Context, *Context, Shadow) { - // Copy the targets - targetRaw, err := copystructure.Copy(c.targets) - if err != nil { - panic(err) - } - - // Copy the variables - varRaw, err := copystructure.Copy(c.variables) - if err != nil { - panic(err) - } - - // Copy the provider inputs - providerInputRaw, err := copystructure.Copy(c.providerInputConfig) - if err != nil { - panic(err) - } - - // The factories - componentsReal, componentsShadow := newShadowComponentFactory(c.components) - - // Create the shadow - shadow := &Context{ - components: componentsShadow, - destroy: c.destroy, - diff: c.diff.DeepCopy(), - hooks: nil, - meta: c.meta, - module: c.module, - state: c.state.DeepCopy(), - targets: targetRaw.([]string), - variables: varRaw.(map[string]interface{}), - - // NOTE(mitchellh): This is not going to work for shadows that are - // testing that input results in the proper end state. At the time - // of writing, input is not used in any state-changing graph - // walks anyways, so this checks nothing. We set it to this to avoid - // any panics but even a "nil" value worked here. - uiInput: new(MockUIInput), - - // Hardcoded to 4 since parallelism in the shadow doesn't matter - // a ton since we're doing far less compared to the real side - // and our operations are MUCH faster. - parallelSem: NewSemaphore(4), - providerInputConfig: providerInputRaw.(map[string]map[string]interface{}), - } - - // Create the real context. This is effectively just a copy of - // the context given except we need to modify some of the values - // to point to the real side of a shadow so the shadow can compare values. - real := &Context{ - // The fields below are changed. - components: componentsReal, - - // The fields below are direct copies - destroy: c.destroy, - diff: c.diff, - // diffLock - no copy - hooks: c.hooks, - meta: c.meta, - module: c.module, - sh: c.sh, - state: c.state, - // stateLock - no copy - targets: c.targets, - uiInput: c.uiInput, - variables: c.variables, - - // l - no copy - parallelSem: c.parallelSem, - providerInputConfig: c.providerInputConfig, - runContext: c.runContext, - runContextCancel: c.runContextCancel, - shadowErr: c.shadowErr, - } - - return real, shadow, &shadowContextCloser{ - Components: componentsShadow, - } -} - -// shadowContextVerify takes the real and shadow context and verifies they -// have equal diffs and states. -func shadowContextVerify(real, shadow *Context) error { - var result error - - // The states compared must be pruned so they're minimal/clean - real.state.prune() - shadow.state.prune() - - // Compare the states - if !real.state.Equal(shadow.state) { - result = multierror.Append(result, fmt.Errorf( - "Real and shadow states do not match! "+ - "Real state:\n\n%s\n\n"+ - "Shadow state:\n\n%s\n\n", - real.state, shadow.state)) - } - - // Compare the diffs - if !real.diff.Equal(shadow.diff) { - result = multierror.Append(result, fmt.Errorf( - "Real and shadow diffs do not match! "+ - "Real diff:\n\n%s\n\n"+ - "Shadow diff:\n\n%s\n\n", - real.diff, shadow.diff)) - } - - return result -} - -// shadowContextCloser is the io.Closer returned by newShadowContext that -// closes all the shadows and returns the results. -type shadowContextCloser struct { - Components *shadowComponentFactory -} - -// Close closes the shadow context. -func (c *shadowContextCloser) CloseShadow() error { - return c.Components.CloseShadow() -} - -func (c *shadowContextCloser) ShadowError() error { - err := c.Components.ShadowError() - if err == nil { - return nil - } - - // This is a sad edge case: if the configuration contains uuid() at - // any point, we cannot reason aboyt the shadow execution. Tested - // with Context2Plan_shadowUuid. - if strings.Contains(err.Error(), "uuid()") { - err = nil - } - - return err -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go deleted file mode 100644 index 9741d7e7964..00000000000 --- a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go +++ /dev/null @@ -1,815 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "sync" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/shadow" -) - -// shadowResourceProvider implements ResourceProvider for the shadow -// eval context defined in eval_context_shadow.go. -// -// This is used to verify behavior with a real provider. This shouldn't -// be used directly. -type shadowResourceProvider interface { - ResourceProvider - Shadow -} - -// newShadowResourceProvider creates a new shadowed ResourceProvider. -// -// This will assume a well behaved real ResourceProvider. For example, -// it assumes that the `Resources` call underneath doesn't change values -// since once it is called on the real provider, it will be cached and -// returned in the shadow since number of calls to that shouldn't affect -// actual behavior. -// -// However, with calls like Apply, call order is taken into account, -// parameters are checked for equality, etc. -func newShadowResourceProvider(p ResourceProvider) (ResourceProvider, shadowResourceProvider) { - // Create the shared data - shared := shadowResourceProviderShared{} - - // Create the real provider that does actual work - real := &shadowResourceProviderReal{ - ResourceProvider: p, - Shared: &shared, - } - - // Create the shadow that watches the real value - shadow := &shadowResourceProviderShadow{ - Shared: &shared, - - resources: p.Resources(), - dataSources: p.DataSources(), - } - - return real, shadow -} - -// shadowResourceProviderReal is the real resource provider. Function calls -// to this will perform real work. This records the parameters and return -// values and call order for the shadow to reproduce. -type shadowResourceProviderReal struct { - ResourceProvider - - Shared *shadowResourceProviderShared -} - -func (p *shadowResourceProviderReal) Close() error { - var result error - if c, ok := p.ResourceProvider.(ResourceProviderCloser); ok { - result = c.Close() - } - - p.Shared.CloseErr.SetValue(result) - return result -} - -func (p *shadowResourceProviderReal) Input( - input UIInput, c *ResourceConfig) (*ResourceConfig, error) { - cCopy := c.DeepCopy() - - result, err := p.ResourceProvider.Input(input, c) - p.Shared.Input.SetValue(&shadowResourceProviderInput{ - Config: cCopy, - Result: result.DeepCopy(), - ResultErr: err, - }) - - return result, err -} - -func (p *shadowResourceProviderReal) Validate(c *ResourceConfig) ([]string, []error) { - warns, errs := p.ResourceProvider.Validate(c) - p.Shared.Validate.SetValue(&shadowResourceProviderValidate{ - Config: c.DeepCopy(), - ResultWarn: warns, - ResultErr: errs, - }) - - return warns, errs -} - -func (p *shadowResourceProviderReal) Configure(c *ResourceConfig) error { - cCopy := c.DeepCopy() - - err := p.ResourceProvider.Configure(c) - p.Shared.Configure.SetValue(&shadowResourceProviderConfigure{ - Config: cCopy, - Result: err, - }) - - return err -} - -func (p *shadowResourceProviderReal) Stop() error { - return p.ResourceProvider.Stop() -} - -func (p *shadowResourceProviderReal) ValidateResource( - t string, c *ResourceConfig) ([]string, []error) { - key := t - configCopy := c.DeepCopy() - - // Real operation - warns, errs := p.ResourceProvider.ValidateResource(t, c) - - // Initialize to ensure we always have a wrapper with a lock - p.Shared.ValidateResource.Init( - key, &shadowResourceProviderValidateResourceWrapper{}) - - // Get the result - raw := p.Shared.ValidateResource.Value(key) - wrapper, ok := raw.(*shadowResourceProviderValidateResourceWrapper) - if !ok { - // If this fails then we just continue with our day... the shadow - // will fail to but there isn't much we can do. - log.Printf( - "[ERROR] unknown value in ValidateResource shadow value: %#v", raw) - return warns, errs - } - - // Lock the wrapper for writing and record our call - wrapper.Lock() - defer wrapper.Unlock() - - wrapper.Calls = append(wrapper.Calls, &shadowResourceProviderValidateResource{ - Config: configCopy, - Warns: warns, - Errors: errs, - }) - - // With it locked, call SetValue again so that it triggers WaitForChange - p.Shared.ValidateResource.SetValue(key, wrapper) - - // Return the result - return warns, errs -} - -func (p *shadowResourceProviderReal) Apply( - info *InstanceInfo, - state *InstanceState, - diff *InstanceDiff) (*InstanceState, error) { - // Thse have to be copied before the call since call can modify - stateCopy := state.DeepCopy() - diffCopy := diff.DeepCopy() - - result, err := p.ResourceProvider.Apply(info, state, diff) - p.Shared.Apply.SetValue(info.uniqueId(), &shadowResourceProviderApply{ - State: stateCopy, - Diff: diffCopy, - Result: result.DeepCopy(), - ResultErr: err, - }) - - return result, err -} - -func (p *shadowResourceProviderReal) Diff( - info *InstanceInfo, - state *InstanceState, - desired *ResourceConfig) (*InstanceDiff, error) { - // Thse have to be copied before the call since call can modify - stateCopy := state.DeepCopy() - desiredCopy := desired.DeepCopy() - - result, err := p.ResourceProvider.Diff(info, state, desired) - p.Shared.Diff.SetValue(info.uniqueId(), &shadowResourceProviderDiff{ - State: stateCopy, - Desired: desiredCopy, - Result: result.DeepCopy(), - ResultErr: err, - }) - - return result, err -} - -func (p *shadowResourceProviderReal) Refresh( - info *InstanceInfo, - state *InstanceState) (*InstanceState, error) { - // Thse have to be copied before the call since call can modify - stateCopy := state.DeepCopy() - - result, err := p.ResourceProvider.Refresh(info, state) - p.Shared.Refresh.SetValue(info.uniqueId(), &shadowResourceProviderRefresh{ - State: stateCopy, - Result: result.DeepCopy(), - ResultErr: err, - }) - - return result, err -} - -func (p *shadowResourceProviderReal) ValidateDataSource( - t string, c *ResourceConfig) ([]string, []error) { - key := t - configCopy := c.DeepCopy() - - // Real operation - warns, errs := p.ResourceProvider.ValidateDataSource(t, c) - - // Initialize - p.Shared.ValidateDataSource.Init( - key, &shadowResourceProviderValidateDataSourceWrapper{}) - - // Get the result - raw := p.Shared.ValidateDataSource.Value(key) - wrapper, ok := raw.(*shadowResourceProviderValidateDataSourceWrapper) - if !ok { - // If this fails then we just continue with our day... the shadow - // will fail to but there isn't much we can do. - log.Printf( - "[ERROR] unknown value in ValidateDataSource shadow value: %#v", raw) - return warns, errs - } - - // Lock the wrapper for writing and record our call - wrapper.Lock() - defer wrapper.Unlock() - - wrapper.Calls = append(wrapper.Calls, &shadowResourceProviderValidateDataSource{ - Config: configCopy, - Warns: warns, - Errors: errs, - }) - - // Set it - p.Shared.ValidateDataSource.SetValue(key, wrapper) - - // Return the result - return warns, errs -} - -func (p *shadowResourceProviderReal) ReadDataDiff( - info *InstanceInfo, - desired *ResourceConfig) (*InstanceDiff, error) { - // These have to be copied before the call since call can modify - desiredCopy := desired.DeepCopy() - - result, err := p.ResourceProvider.ReadDataDiff(info, desired) - p.Shared.ReadDataDiff.SetValue(info.uniqueId(), &shadowResourceProviderReadDataDiff{ - Desired: desiredCopy, - Result: result.DeepCopy(), - ResultErr: err, - }) - - return result, err -} - -func (p *shadowResourceProviderReal) ReadDataApply( - info *InstanceInfo, - diff *InstanceDiff) (*InstanceState, error) { - // Thse have to be copied before the call since call can modify - diffCopy := diff.DeepCopy() - - result, err := p.ResourceProvider.ReadDataApply(info, diff) - p.Shared.ReadDataApply.SetValue(info.uniqueId(), &shadowResourceProviderReadDataApply{ - Diff: diffCopy, - Result: result.DeepCopy(), - ResultErr: err, - }) - - return result, err -} - -// shadowResourceProviderShadow is the shadow resource provider. Function -// calls never affect real resources. This is paired with the "real" side -// which must be called properly to enable recording. -type shadowResourceProviderShadow struct { - Shared *shadowResourceProviderShared - - // Cached values that are expected to not change - resources []ResourceType - dataSources []DataSource - - Error error // Error is the list of errors from the shadow - ErrorLock sync.Mutex -} - -type shadowResourceProviderShared struct { - // NOTE: Anytime a value is added here, be sure to add it to - // the Close() method so that it is closed. - - CloseErr shadow.Value - Input shadow.Value - Validate shadow.Value - Configure shadow.Value - ValidateResource shadow.KeyedValue - Apply shadow.KeyedValue - Diff shadow.KeyedValue - Refresh shadow.KeyedValue - ValidateDataSource shadow.KeyedValue - ReadDataDiff shadow.KeyedValue - ReadDataApply shadow.KeyedValue -} - -func (p *shadowResourceProviderShared) Close() error { - return shadow.Close(p) -} - -func (p *shadowResourceProviderShadow) CloseShadow() error { - err := p.Shared.Close() - if err != nil { - err = fmt.Errorf("close error: %s", err) - } - - return err -} - -func (p *shadowResourceProviderShadow) ShadowError() error { - return p.Error -} - -func (p *shadowResourceProviderShadow) Resources() []ResourceType { - return p.resources -} - -func (p *shadowResourceProviderShadow) DataSources() []DataSource { - return p.dataSources -} - -func (p *shadowResourceProviderShadow) Close() error { - v := p.Shared.CloseErr.Value() - if v == nil { - return nil - } - - return v.(error) -} - -func (p *shadowResourceProviderShadow) Input( - input UIInput, c *ResourceConfig) (*ResourceConfig, error) { - // Get the result of the input call - raw := p.Shared.Input.Value() - if raw == nil { - return nil, nil - } - - result, ok := raw.(*shadowResourceProviderInput) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'input' shadow value: %#v", raw)) - return nil, nil - } - - // Compare the parameters, which should be identical - if !c.Equal(result.Config) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Input had unequal configurations (real, then shadow):\n\n%#v\n\n%#v", - result.Config, c)) - p.ErrorLock.Unlock() - } - - // Return the results - return result.Result, result.ResultErr -} - -func (p *shadowResourceProviderShadow) Validate(c *ResourceConfig) ([]string, []error) { - // Get the result of the validate call - raw := p.Shared.Validate.Value() - if raw == nil { - return nil, nil - } - - result, ok := raw.(*shadowResourceProviderValidate) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'validate' shadow value: %#v", raw)) - return nil, nil - } - - // Compare the parameters, which should be identical - if !c.Equal(result.Config) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Validate had unequal configurations (real, then shadow):\n\n%#v\n\n%#v", - result.Config, c)) - p.ErrorLock.Unlock() - } - - // Return the results - return result.ResultWarn, result.ResultErr -} - -func (p *shadowResourceProviderShadow) Configure(c *ResourceConfig) error { - // Get the result of the call - raw := p.Shared.Configure.Value() - if raw == nil { - return nil - } - - result, ok := raw.(*shadowResourceProviderConfigure) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'configure' shadow value: %#v", raw)) - return nil - } - - // Compare the parameters, which should be identical - if !c.Equal(result.Config) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Configure had unequal configurations (real, then shadow):\n\n%#v\n\n%#v", - result.Config, c)) - p.ErrorLock.Unlock() - } - - // Return the results - return result.Result -} - -// Stop returns immediately. -func (p *shadowResourceProviderShadow) Stop() error { - return nil -} - -func (p *shadowResourceProviderShadow) ValidateResource(t string, c *ResourceConfig) ([]string, []error) { - // Unique key - key := t - - // Get the initial value - raw := p.Shared.ValidateResource.Value(key) - - // Find a validation with our configuration - var result *shadowResourceProviderValidateResource - for { - // Get the value - if raw == nil { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'ValidateResource' call for %q:\n\n%#v", - key, c)) - return nil, nil - } - - wrapper, ok := raw.(*shadowResourceProviderValidateResourceWrapper) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'ValidateResource' shadow value for %q: %#v", key, raw)) - return nil, nil - } - - // Look for the matching call with our configuration - wrapper.RLock() - for _, call := range wrapper.Calls { - if call.Config.Equal(c) { - result = call - break - } - } - wrapper.RUnlock() - - // If we found a result, exit - if result != nil { - break - } - - // Wait for a change so we can get the wrapper again - raw = p.Shared.ValidateResource.WaitForChange(key) - } - - return result.Warns, result.Errors -} - -func (p *shadowResourceProviderShadow) Apply( - info *InstanceInfo, - state *InstanceState, - diff *InstanceDiff) (*InstanceState, error) { - // Unique key - key := info.uniqueId() - raw := p.Shared.Apply.Value(key) - if raw == nil { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'apply' call for %q:\n\n%#v\n\n%#v", - key, state, diff)) - return nil, nil - } - - result, ok := raw.(*shadowResourceProviderApply) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'apply' shadow value for %q: %#v", key, raw)) - return nil, nil - } - - // Compare the parameters, which should be identical - if !state.Equal(result.State) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Apply %q: state had unequal states (real, then shadow):\n\n%#v\n\n%#v", - key, result.State, state)) - p.ErrorLock.Unlock() - } - - if !diff.Equal(result.Diff) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Apply %q: unequal diffs (real, then shadow):\n\n%#v\n\n%#v", - key, result.Diff, diff)) - p.ErrorLock.Unlock() - } - - return result.Result, result.ResultErr -} - -func (p *shadowResourceProviderShadow) Diff( - info *InstanceInfo, - state *InstanceState, - desired *ResourceConfig) (*InstanceDiff, error) { - // Unique key - key := info.uniqueId() - raw := p.Shared.Diff.Value(key) - if raw == nil { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'diff' call for %q:\n\n%#v\n\n%#v", - key, state, desired)) - return nil, nil - } - - result, ok := raw.(*shadowResourceProviderDiff) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'diff' shadow value for %q: %#v", key, raw)) - return nil, nil - } - - // Compare the parameters, which should be identical - if !state.Equal(result.State) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Diff %q had unequal states (real, then shadow):\n\n%#v\n\n%#v", - key, result.State, state)) - p.ErrorLock.Unlock() - } - if !desired.Equal(result.Desired) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Diff %q had unequal states (real, then shadow):\n\n%#v\n\n%#v", - key, result.Desired, desired)) - p.ErrorLock.Unlock() - } - - return result.Result, result.ResultErr -} - -func (p *shadowResourceProviderShadow) Refresh( - info *InstanceInfo, - state *InstanceState) (*InstanceState, error) { - // Unique key - key := info.uniqueId() - raw := p.Shared.Refresh.Value(key) - if raw == nil { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'refresh' call for %q:\n\n%#v", - key, state)) - return nil, nil - } - - result, ok := raw.(*shadowResourceProviderRefresh) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'refresh' shadow value: %#v", raw)) - return nil, nil - } - - // Compare the parameters, which should be identical - if !state.Equal(result.State) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Refresh %q had unequal states (real, then shadow):\n\n%#v\n\n%#v", - key, result.State, state)) - p.ErrorLock.Unlock() - } - - return result.Result, result.ResultErr -} - -func (p *shadowResourceProviderShadow) ValidateDataSource( - t string, c *ResourceConfig) ([]string, []error) { - // Unique key - key := t - - // Get the initial value - raw := p.Shared.ValidateDataSource.Value(key) - - // Find a validation with our configuration - var result *shadowResourceProviderValidateDataSource - for { - // Get the value - if raw == nil { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'ValidateDataSource' call for %q:\n\n%#v", - key, c)) - return nil, nil - } - - wrapper, ok := raw.(*shadowResourceProviderValidateDataSourceWrapper) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'ValidateDataSource' shadow value: %#v", raw)) - return nil, nil - } - - // Look for the matching call with our configuration - wrapper.RLock() - for _, call := range wrapper.Calls { - if call.Config.Equal(c) { - result = call - break - } - } - wrapper.RUnlock() - - // If we found a result, exit - if result != nil { - break - } - - // Wait for a change so we can get the wrapper again - raw = p.Shared.ValidateDataSource.WaitForChange(key) - } - - return result.Warns, result.Errors -} - -func (p *shadowResourceProviderShadow) ReadDataDiff( - info *InstanceInfo, - desired *ResourceConfig) (*InstanceDiff, error) { - // Unique key - key := info.uniqueId() - raw := p.Shared.ReadDataDiff.Value(key) - if raw == nil { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'ReadDataDiff' call for %q:\n\n%#v", - key, desired)) - return nil, nil - } - - result, ok := raw.(*shadowResourceProviderReadDataDiff) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'ReadDataDiff' shadow value for %q: %#v", key, raw)) - return nil, nil - } - - // Compare the parameters, which should be identical - if !desired.Equal(result.Desired) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "ReadDataDiff %q had unequal configs (real, then shadow):\n\n%#v\n\n%#v", - key, result.Desired, desired)) - p.ErrorLock.Unlock() - } - - return result.Result, result.ResultErr -} - -func (p *shadowResourceProviderShadow) ReadDataApply( - info *InstanceInfo, - d *InstanceDiff) (*InstanceState, error) { - // Unique key - key := info.uniqueId() - raw := p.Shared.ReadDataApply.Value(key) - if raw == nil { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'ReadDataApply' call for %q:\n\n%#v", - key, d)) - return nil, nil - } - - result, ok := raw.(*shadowResourceProviderReadDataApply) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'ReadDataApply' shadow value for %q: %#v", key, raw)) - return nil, nil - } - - // Compare the parameters, which should be identical - if !d.Equal(result.Diff) { - p.ErrorLock.Lock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "ReadDataApply: unequal diffs (real, then shadow):\n\n%#v\n\n%#v", - result.Diff, d)) - p.ErrorLock.Unlock() - } - - return result.Result, result.ResultErr -} - -func (p *shadowResourceProviderShadow) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) { - panic("import not supported by shadow graph") -} - -// The structs for the various function calls are put below. These structs -// are used to carry call information across the real/shadow boundaries. - -type shadowResourceProviderInput struct { - Config *ResourceConfig - Result *ResourceConfig - ResultErr error -} - -type shadowResourceProviderValidate struct { - Config *ResourceConfig - ResultWarn []string - ResultErr []error -} - -type shadowResourceProviderConfigure struct { - Config *ResourceConfig - Result error -} - -type shadowResourceProviderValidateResourceWrapper struct { - sync.RWMutex - - Calls []*shadowResourceProviderValidateResource -} - -type shadowResourceProviderValidateResource struct { - Config *ResourceConfig - Warns []string - Errors []error -} - -type shadowResourceProviderApply struct { - State *InstanceState - Diff *InstanceDiff - Result *InstanceState - ResultErr error -} - -type shadowResourceProviderDiff struct { - State *InstanceState - Desired *ResourceConfig - Result *InstanceDiff - ResultErr error -} - -type shadowResourceProviderRefresh struct { - State *InstanceState - Result *InstanceState - ResultErr error -} - -type shadowResourceProviderValidateDataSourceWrapper struct { - sync.RWMutex - - Calls []*shadowResourceProviderValidateDataSource -} - -type shadowResourceProviderValidateDataSource struct { - Config *ResourceConfig - Warns []string - Errors []error -} - -type shadowResourceProviderReadDataDiff struct { - Desired *ResourceConfig - Result *InstanceDiff - ResultErr error -} - -type shadowResourceProviderReadDataApply struct { - Diff *InstanceDiff - Result *InstanceState - ResultErr error -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go deleted file mode 100644 index 60a49088962..00000000000 --- a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go +++ /dev/null @@ -1,282 +0,0 @@ -package terraform - -import ( - "fmt" - "io" - "log" - "sync" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/shadow" -) - -// shadowResourceProvisioner implements ResourceProvisioner for the shadow -// eval context defined in eval_context_shadow.go. -// -// This is used to verify behavior with a real provisioner. This shouldn't -// be used directly. -type shadowResourceProvisioner interface { - ResourceProvisioner - Shadow -} - -// newShadowResourceProvisioner creates a new shadowed ResourceProvisioner. -func newShadowResourceProvisioner( - p ResourceProvisioner) (ResourceProvisioner, shadowResourceProvisioner) { - // Create the shared data - shared := shadowResourceProvisionerShared{ - Validate: shadow.ComparedValue{ - Func: shadowResourceProvisionerValidateCompare, - }, - } - - // Create the real provisioner that does actual work - real := &shadowResourceProvisionerReal{ - ResourceProvisioner: p, - Shared: &shared, - } - - // Create the shadow that watches the real value - shadow := &shadowResourceProvisionerShadow{ - Shared: &shared, - } - - return real, shadow -} - -// shadowResourceProvisionerReal is the real resource provisioner. Function calls -// to this will perform real work. This records the parameters and return -// values and call order for the shadow to reproduce. -type shadowResourceProvisionerReal struct { - ResourceProvisioner - - Shared *shadowResourceProvisionerShared -} - -func (p *shadowResourceProvisionerReal) Close() error { - var result error - if c, ok := p.ResourceProvisioner.(ResourceProvisionerCloser); ok { - result = c.Close() - } - - p.Shared.CloseErr.SetValue(result) - return result -} - -func (p *shadowResourceProvisionerReal) Validate(c *ResourceConfig) ([]string, []error) { - warns, errs := p.ResourceProvisioner.Validate(c) - p.Shared.Validate.SetValue(&shadowResourceProvisionerValidate{ - Config: c, - ResultWarn: warns, - ResultErr: errs, - }) - - return warns, errs -} - -func (p *shadowResourceProvisionerReal) Apply( - output UIOutput, s *InstanceState, c *ResourceConfig) error { - err := p.ResourceProvisioner.Apply(output, s, c) - - // Write the result, grab a lock for writing. This should nver - // block long since the operations below don't block. - p.Shared.ApplyLock.Lock() - defer p.Shared.ApplyLock.Unlock() - - key := s.ID - raw, ok := p.Shared.Apply.ValueOk(key) - if !ok { - // Setup a new value - raw = &shadow.ComparedValue{ - Func: shadowResourceProvisionerApplyCompare, - } - - // Set it - p.Shared.Apply.SetValue(key, raw) - } - - compareVal, ok := raw.(*shadow.ComparedValue) - if !ok { - // Just log and return so that we don't cause the real side - // any side effects. - log.Printf("[ERROR] unknown value in 'apply': %#v", raw) - return err - } - - // Write the resulting value - compareVal.SetValue(&shadowResourceProvisionerApply{ - Config: c, - ResultErr: err, - }) - - return err -} - -func (p *shadowResourceProvisionerReal) Stop() error { - return p.ResourceProvisioner.Stop() -} - -// shadowResourceProvisionerShadow is the shadow resource provisioner. Function -// calls never affect real resources. This is paired with the "real" side -// which must be called properly to enable recording. -type shadowResourceProvisionerShadow struct { - Shared *shadowResourceProvisionerShared - - Error error // Error is the list of errors from the shadow - ErrorLock sync.Mutex -} - -type shadowResourceProvisionerShared struct { - // NOTE: Anytime a value is added here, be sure to add it to - // the Close() method so that it is closed. - - CloseErr shadow.Value - Validate shadow.ComparedValue - Apply shadow.KeyedValue - ApplyLock sync.Mutex // For writing only -} - -func (p *shadowResourceProvisionerShared) Close() error { - closers := []io.Closer{ - &p.CloseErr, - } - - for _, c := range closers { - // This should never happen, but we don't panic because a panic - // could affect the real behavior of Terraform and a shadow should - // never be able to do that. - if err := c.Close(); err != nil { - return err - } - } - - return nil -} - -func (p *shadowResourceProvisionerShadow) CloseShadow() error { - err := p.Shared.Close() - if err != nil { - err = fmt.Errorf("close error: %s", err) - } - - return err -} - -func (p *shadowResourceProvisionerShadow) ShadowError() error { - return p.Error -} - -func (p *shadowResourceProvisionerShadow) Close() error { - v := p.Shared.CloseErr.Value() - if v == nil { - return nil - } - - return v.(error) -} - -func (p *shadowResourceProvisionerShadow) Validate(c *ResourceConfig) ([]string, []error) { - // Get the result of the validate call - raw := p.Shared.Validate.Value(c) - if raw == nil { - return nil, nil - } - - result, ok := raw.(*shadowResourceProvisionerValidate) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'validate' shadow value: %#v", raw)) - return nil, nil - } - - // We don't need to compare configurations because we key on the - // configuration so just return right away. - return result.ResultWarn, result.ResultErr -} - -func (p *shadowResourceProvisionerShadow) Apply( - output UIOutput, s *InstanceState, c *ResourceConfig) error { - // Get the value based on the key - key := s.ID - raw := p.Shared.Apply.Value(key) - if raw == nil { - return nil - } - - compareVal, ok := raw.(*shadow.ComparedValue) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'apply' shadow value: %#v", raw)) - return nil - } - - // With the compared value, we compare against our config - raw = compareVal.Value(c) - if raw == nil { - return nil - } - - result, ok := raw.(*shadowResourceProvisionerApply) - if !ok { - p.ErrorLock.Lock() - defer p.ErrorLock.Unlock() - p.Error = multierror.Append(p.Error, fmt.Errorf( - "Unknown 'apply' shadow value: %#v", raw)) - return nil - } - - return result.ResultErr -} - -func (p *shadowResourceProvisionerShadow) Stop() error { - // For the shadow, we always just return nil since a Stop indicates - // that we were interrupted and shadows are disabled during interrupts - // anyways. - return nil -} - -// The structs for the various function calls are put below. These structs -// are used to carry call information across the real/shadow boundaries. - -type shadowResourceProvisionerValidate struct { - Config *ResourceConfig - ResultWarn []string - ResultErr []error -} - -type shadowResourceProvisionerApply struct { - Config *ResourceConfig - ResultErr error -} - -func shadowResourceProvisionerValidateCompare(k, v interface{}) bool { - c, ok := k.(*ResourceConfig) - if !ok { - return false - } - - result, ok := v.(*shadowResourceProvisionerValidate) - if !ok { - return false - } - - return c.Equal(result.Config) -} - -func shadowResourceProvisionerApplyCompare(k, v interface{}) bool { - c, ok := k.(*ResourceConfig) - if !ok { - return false - } - - result, ok := v.(*shadowResourceProvisionerApply) - if !ok { - return false - } - - return c.Equal(result.Config) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state.go b/vendor/github.com/hashicorp/terraform/terraform/state.go index 0c46194d6fb..89a404847de 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/state.go +++ b/vendor/github.com/hashicorp/terraform/terraform/state.go @@ -20,6 +20,8 @@ import ( "github.com/hashicorp/terraform/config" "github.com/mitchellh/copystructure" "github.com/satori/go.uuid" + + tfversion "github.com/hashicorp/terraform/version" ) const ( @@ -664,7 +666,7 @@ func (s *State) FromFutureTerraform() bool { } v := version.Must(version.NewVersion(s.TFVersion)) - return SemVersion.LessThan(v) + return tfversion.SemVer.LessThan(v) } func (s *State) Init() { @@ -977,6 +979,10 @@ type ModuleState struct { // always disjoint, so the path represents amodule tree Path []string `json:"path"` + // Locals are kept only transiently in-memory, because we can always + // re-compute them. + Locals map[string]interface{} `json:"-"` + // Outputs declared by the module and maintained for each module // even though only the root module technically needs to be kept. // This allows operators to inspect values at the boundaries. @@ -1681,7 +1687,20 @@ func (s *InstanceState) Equal(other *InstanceState) bool { // We only do the deep check if both are non-nil. If one is nil // we treat it as equal since their lengths are both zero (check // above). - if !reflect.DeepEqual(s.Meta, other.Meta) { + // + // Since this can contain numeric values that may change types during + // serialization, let's compare the serialized values. + sMeta, err := json.Marshal(s.Meta) + if err != nil { + // marshaling primitives shouldn't ever error out + panic(err) + } + otherMeta, err := json.Marshal(other.Meta) + if err != nil { + panic(err) + } + + if !bytes.Equal(sMeta, otherMeta) { return false } } @@ -1891,7 +1910,7 @@ func ReadState(src io.Reader) (*State, error) { result = v3State default: return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - SemVersion.String(), versionIdentifier.Version) + tfversion.SemVer.String(), versionIdentifier.Version) } // If we reached this place we must have a result set @@ -1935,7 +1954,7 @@ func ReadStateV2(jsonBytes []byte) (*State, error) { // version that we don't understand if state.Version > StateVersion { return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - SemVersion.String(), state.Version) + tfversion.SemVer.String(), state.Version) } // Make sure the version is semantic @@ -1970,7 +1989,7 @@ func ReadStateV3(jsonBytes []byte) (*State, error) { // version that we don't understand if state.Version > StateVersion { return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - SemVersion.String(), state.Version) + tfversion.SemVer.String(), state.Version) } // Make sure the version is semantic diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go index 22be1ab62a4..d9bd362f2f6 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go @@ -119,6 +119,9 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error { return &NodeApplyableProvider{NodeAbstractProvider: a} } steps := []GraphTransformer{ + // Add the local values + &LocalTransformer{Module: t.Module}, + // Add outputs and metadata &OutputTransformer{Module: t.Module}, &AttachResourceConfigTransformer{Module: t.Module}, diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_local.go b/vendor/github.com/hashicorp/terraform/terraform/transform_local.go new file mode 100644 index 00000000000..95ecfc0a440 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_local.go @@ -0,0 +1,40 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config/module" +) + +// LocalTransformer is a GraphTransformer that adds all the local values +// from the configuration to the graph. +type LocalTransformer struct { + Module *module.Tree +} + +func (t *LocalTransformer) Transform(g *Graph) error { + return t.transformModule(g, t.Module) +} + +func (t *LocalTransformer) transformModule(g *Graph, m *module.Tree) error { + if m == nil { + // Can't have any locals if there's no config + return nil + } + + for _, local := range m.Config().Locals { + node := &NodeLocal{ + PathValue: normalizeModulePath(m.Path()), + Config: local, + } + + g.Add(node) + } + + // Also populate locals for child modules + for _, c := range m.Children() { + if err := t.transformModule(g, c); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go index c5452354d4b..24ddc7a0a44 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go @@ -76,6 +76,37 @@ func (t *ReferenceTransformer) Transform(g *Graph) error { return nil } +// DestroyReferenceTransformer is a GraphTransformer that reverses the edges +// for nodes that depend on an Output or Local value. Output and local nodes are +// removed during destroy, so anything which depends on them must be evaluated +// first. These can't be interpolated during destroy, so the stored value must +// be used anyway hence they don't need to be re-evaluated. +type DestroyValueReferenceTransformer struct{} + +func (t *DestroyValueReferenceTransformer) Transform(g *Graph) error { + vs := g.Vertices() + + for _, v := range vs { + switch v.(type) { + case *NodeApplyableOutput, *NodeLocal: + // OK + default: + continue + } + + // reverse any incoming edges so that the value is removed last + for _, e := range g.EdgesTo(v) { + source := e.Source() + log.Printf("[TRACE] output dep: %s", dag.VertexName(source)) + + g.RemoveEdge(e) + g.Connect(&DestroyEdge{S: v, T: source}) + } + } + + return nil +} + // ReferenceMap is a structure that can be used to efficiently check // for references on a graph. type ReferenceMap struct { @@ -296,6 +327,8 @@ func ReferenceFromInterpolatedVar(v config.InterpolatedVariable) []string { return []string{fmt.Sprintf("%s.%d/%s.N", id, idx, id)} case *config.UserVariable: return []string{fmt.Sprintf("var.%s", v.Name)} + case *config.LocalVariable: + return []string{fmt.Sprintf("local.%s", v.Name)} default: return nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go index 4f117b4f732..0cfcb0ac0d6 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go @@ -73,9 +73,11 @@ func (t *TargetsTransformer) Transform(g *Graph) error { if _, ok := v.(GraphNodeResource); ok { removable = true } + if vr, ok := v.(RemovableIfNotTargeted); ok { removable = vr.RemoveIfNotTargeted() } + if removable && !targetedNodes.Include(v) { log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v)) g.Remove(v) @@ -135,7 +137,10 @@ func (t *TargetsTransformer) selectTargetedNodes( } } } + return t.addDependencies(targetedNodes, g) +} +func (t *TargetsTransformer) addDependencies(targetedNodes *dag.Set, g *Graph) (*dag.Set, error) { // Handle nodes that need to be included if their dependencies are included. // This requires multiple passes since we need to catch transitive // dependencies if and only if they are via other nodes that also @@ -157,11 +162,6 @@ func (t *TargetsTransformer) selectTargetedNodes( } dependers = dependers.Filter(func(dv interface{}) bool { - // Can ignore nodes that are already targeted - /*if targetedNodes.Include(dv) { - return false - }*/ - _, ok := dv.(GraphNodeTargetDownstream) return ok }) @@ -180,6 +180,7 @@ func (t *TargetsTransformer) selectTargetedNodes( // depending on in case that informs its decision about whether // it is safe to be targeted. deps := g.DownEdges(v) + depsTargeted := deps.Intersection(targetedNodes) depsUntargeted := deps.Difference(depsTargeted) @@ -193,7 +194,44 @@ func (t *TargetsTransformer) selectTargetedNodes( } } - return targetedNodes, nil + return targetedNodes.Filter(func(dv interface{}) bool { + return filterPartialOutputs(dv, targetedNodes, g) + }), nil +} + +// Outputs may have been included transitively, but if any of their +// dependencies have been pruned they won't be resolvable. +// If nothing depends on the output, and the output is missing any +// dependencies, remove it from the graph. +// This essentially maintains the previous behavior where interpolation in +// outputs would fail silently, but can now surface errors where the output +// is required. +func filterPartialOutputs(v interface{}, targetedNodes *dag.Set, g *Graph) bool { + // should this just be done with TargetDownstream? + if _, ok := v.(*NodeApplyableOutput); !ok { + return true + } + + dependers := g.UpEdges(v) + for _, d := range dependers.List() { + if _, ok := d.(*NodeCountBoundary); ok { + continue + } + // as soon as we see a real dependency, we mark this as + // non-removable + return true + } + + depends := g.DownEdges(v) + + for _, d := range depends.List() { + if !targetedNodes.Include(d) { + log.Printf("[WARN] %s missing targeted dependency %s, removing from the graph", + dag.VertexName(v), dag.VertexName(d)) + return false + } + } + return true } func (t *TargetsTransformer) nodeIsTarget( diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go index 7852bc4237f..d828c921ca3 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go @@ -1,13 +1,18 @@ package terraform +import "sync" + // MockUIOutput is an implementation of UIOutput that can be used for tests. type MockUIOutput struct { + sync.Mutex OutputCalled bool OutputMessage string OutputFn func(string) } func (o *MockUIOutput) Output(v string) { + o.Lock() + defer o.Unlock() o.OutputCalled = true o.OutputMessage = v if o.OutputFn != nil { diff --git a/vendor/github.com/hashicorp/terraform/terraform/user_agent.go b/vendor/github.com/hashicorp/terraform/terraform/user_agent.go index 700be2ae202..76bdae09988 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/user_agent.go +++ b/vendor/github.com/hashicorp/terraform/terraform/user_agent.go @@ -3,6 +3,8 @@ package terraform import ( "fmt" "runtime" + + "github.com/hashicorp/terraform/version" ) // The standard Terraform User-Agent format @@ -10,5 +12,5 @@ const UserAgent = "Terraform %s (%s)" // Generate a UserAgent string func UserAgentString() string { - return fmt.Sprintf(UserAgent, VersionString(), runtime.Version()) + return fmt.Sprintf(UserAgent, version.String(), runtime.Version()) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/version.go b/vendor/github.com/hashicorp/terraform/terraform/version.go index d61b11ea2fe..ac730154f5e 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/version.go +++ b/vendor/github.com/hashicorp/terraform/terraform/version.go @@ -1,31 +1,10 @@ package terraform import ( - "fmt" - - "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/version" ) -// The main version number that is being run at the moment. -const Version = "0.10.0" - -// A pre-release marker for the version. If this is "" (empty string) -// then it means that it is a final release. Otherwise, this is a pre-release -// such as "dev" (in development), "beta", "rc1", etc. -var VersionPrerelease = "dev" - -// SemVersion is an instance of version.Version. This has the secondary -// benefit of verifying during tests and init time that our version is a -// proper semantic version, which should always be the case. -var SemVersion = version.Must(version.NewVersion(Version)) - -// VersionHeader is the header name used to send the current terraform version -// in http requests. -const VersionHeader = "Terraform-Version" - +// TODO: update providers to use the version package directly func VersionString() string { - if VersionPrerelease != "" { - return fmt.Sprintf("%s-%s", Version, VersionPrerelease) - } - return Version + return version.String() } diff --git a/vendor/github.com/hashicorp/terraform/terraform/version_required.go b/vendor/github.com/hashicorp/terraform/terraform/version_required.go index 3cbbf560854..1f430457823 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/version_required.go +++ b/vendor/github.com/hashicorp/terraform/terraform/version_required.go @@ -6,19 +6,21 @@ import ( "github.com/hashicorp/go-version" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" + + tfversion "github.com/hashicorp/terraform/version" ) -// checkRequiredVersion verifies that any version requirements specified by +// CheckRequiredVersion verifies that any version requirements specified by // the configuration are met. // // This checks the root module as well as any additional version requirements // from child modules. // // This is tested in context_test.go. -func checkRequiredVersion(m *module.Tree) error { +func CheckRequiredVersion(m *module.Tree) error { // Check any children for _, c := range m.Children() { - if err := checkRequiredVersion(c); err != nil { + if err := CheckRequiredVersion(c); err != nil { return err } } @@ -49,7 +51,7 @@ func checkRequiredVersion(m *module.Tree) error { tf.RequiredVersion, err) } - if !cs.Check(SemVersion) { + if !cs.Check(tfversion.SemVer) { return fmt.Errorf( "The currently running version of Terraform doesn't meet the\n"+ "version requirements explicitly specified by the configuration.\n"+ @@ -62,7 +64,7 @@ func checkRequiredVersion(m *module.Tree) error { " Current version: %s", module, tf.RequiredVersion, - SemVersion) + tfversion.SemVer) } return nil diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go new file mode 100644 index 00000000000..2c23f76ae5b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go @@ -0,0 +1,26 @@ +package tfdiags + +type Diagnostic interface { + Severity() Severity + Description() Description + Source() Source +} + +type Severity rune + +//go:generate stringer -type=Severity + +const ( + Error Severity = 'E' + Warning Severity = 'W' +) + +type Description struct { + Summary string + Detail string +} + +type Source struct { + Subject *SourceRange + Context *SourceRange +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go new file mode 100644 index 00000000000..667ba8096dc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go @@ -0,0 +1,181 @@ +package tfdiags + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/errwrap" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl2/hcl" +) + +// Diagnostics is a list of diagnostics. Diagnostics is intended to be used +// where a Go "error" might normally be used, allowing richer information +// to be conveyed (more context, support for warnings). +// +// A nil Diagnostics is a valid, empty diagnostics list, thus allowing +// heap allocation to be avoided in the common case where there are no +// diagnostics to report at all. +type Diagnostics []Diagnostic + +// Append is the main interface for constructing Diagnostics lists, taking +// an existing list (which may be nil) and appending the new objects to it +// after normalizing them to be implementations of Diagnostic. +// +// The usual pattern for a function that natively "speaks" diagnostics is: +// +// // Create a nil Diagnostics at the start of the function +// var diags diag.Diagnostics +// +// // At later points, build on it if errors / warnings occur: +// foo, err := DoSomethingRisky() +// if err != nil { +// diags = diags.Append(err) +// } +// +// // Eventually return the result and diagnostics in place of error +// return result, diags +// +// Append accepts a variety of different diagnostic-like types, including +// native Go errors and HCL diagnostics. It also knows how to unwrap +// a multierror.Error into separate error diagnostics. It can be passed +// another Diagnostics to concatenate the two lists. If given something +// it cannot handle, this function will panic. +func (diags Diagnostics) Append(new ...interface{}) Diagnostics { + for _, item := range new { + if item == nil { + continue + } + + switch ti := item.(type) { + case Diagnostic: + diags = append(diags, ti) + case Diagnostics: + diags = append(diags, ti...) // flatten + case diagnosticsAsError: + diags = diags.Append(ti.Diagnostics) // unwrap + case hcl.Diagnostics: + for _, hclDiag := range ti { + diags = append(diags, hclDiagnostic{hclDiag}) + } + case *hcl.Diagnostic: + diags = append(diags, hclDiagnostic{ti}) + case *multierror.Error: + for _, err := range ti.Errors { + diags = append(diags, nativeError{err}) + } + case error: + switch { + case errwrap.ContainsType(ti, Diagnostics(nil)): + // If we have an errwrap wrapper with a Diagnostics hiding + // inside then we'll unpick it here to get access to the + // individual diagnostics. + diags = diags.Append(errwrap.GetType(ti, Diagnostics(nil))) + case errwrap.ContainsType(ti, hcl.Diagnostics(nil)): + // Likewise, if we have HCL diagnostics we'll unpick that too. + diags = diags.Append(errwrap.GetType(ti, hcl.Diagnostics(nil))) + default: + diags = append(diags, nativeError{ti}) + } + default: + panic(fmt.Errorf("can't construct diagnostic(s) from %T", item)) + } + } + + // Given the above, we should never end up with a non-nil empty slice + // here, but we'll make sure of that so callers can rely on empty == nil + if len(diags) == 0 { + return nil + } + + return diags +} + +// HasErrors returns true if any of the diagnostics in the list have +// a severity of Error. +func (diags Diagnostics) HasErrors() bool { + for _, diag := range diags { + if diag.Severity() == Error { + return true + } + } + return false +} + +// ForRPC returns a version of the receiver that has been simplified so that +// it is friendly to RPC protocols. +// +// Currently this means that it can be serialized with encoding/gob and +// subsequently re-inflated. It may later grow to include other serialization +// formats. +// +// Note that this loses information about the original objects used to +// construct the diagnostics, so e.g. the errwrap API will not work as +// expected on an error-wrapped Diagnostics that came from ForRPC. +func (diags Diagnostics) ForRPC() Diagnostics { + ret := make(Diagnostics, len(diags)) + for i := range diags { + ret[i] = makeRPCFriendlyDiag(diags[i]) + } + return ret +} + +// Err flattens a diagnostics list into a single Go error, or to nil +// if the diagnostics list does not include any error-level diagnostics. +// +// This can be used to smuggle diagnostics through an API that deals in +// native errors, but unfortunately it will lose naked warnings (warnings +// that aren't accompanied by at least one error) since such APIs have no +// mechanism through which to report these. +// +// return result, diags.Error() +func (diags Diagnostics) Err() error { + if !diags.HasErrors() { + return nil + } + return diagnosticsAsError{diags} +} + +type diagnosticsAsError struct { + Diagnostics +} + +func (dae diagnosticsAsError) Error() string { + diags := dae.Diagnostics + switch { + case len(diags) == 0: + // should never happen, since we don't create this wrapper if + // there are no diagnostics in the list. + return "no errors" + case len(diags) == 1: + desc := diags[0].Description() + if desc.Detail == "" { + return desc.Summary + } + return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) + default: + var ret bytes.Buffer + fmt.Fprintf(&ret, "%d problems:\n", len(diags)) + for _, diag := range dae.Diagnostics { + desc := diag.Description() + if desc.Detail == "" { + fmt.Fprintf(&ret, "\n- %s", desc.Summary) + } else { + fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) + } + } + return ret.String() + } +} + +// WrappedErrors is an implementation of errwrap.Wrapper so that an error-wrapped +// diagnostics object can be picked apart by errwrap-aware code. +func (dae diagnosticsAsError) WrappedErrors() []error { + var errs []error + for _, diag := range dae.Diagnostics { + if wrapper, isErr := diag.(nativeError); isErr { + errs = append(errs, wrapper.err) + } + } + return errs +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/doc.go b/vendor/github.com/hashicorp/terraform/tfdiags/doc.go new file mode 100644 index 00000000000..c427879ebc7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/doc.go @@ -0,0 +1,16 @@ +// Package tfdiags is a utility package for representing errors and +// warnings in a manner that allows us to produce good messages for the +// user. +// +// "diag" is short for "diagnostics", and is meant as a general word for +// feedback to a user about potential or actual problems. +// +// A design goal for this package is for it to be able to provide rich +// messaging where possible but to also be pragmatic about dealing with +// generic errors produced by system components that _can't_ provide +// such rich messaging. As a consequence, the main types in this package -- +// Diagnostics and Diagnostic -- are designed so that they can be "smuggled" +// over an error channel and then be unpacked at the other end, so that +// error diagnostics (at least) can transit through APIs that are not +// aware of this package. +package tfdiags diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/error.go b/vendor/github.com/hashicorp/terraform/tfdiags/error.go new file mode 100644 index 00000000000..35edc3041b2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/error.go @@ -0,0 +1,23 @@ +package tfdiags + +// nativeError is a Diagnostic implementation that wraps a normal Go error +type nativeError struct { + err error +} + +var _ Diagnostic = nativeError{} + +func (e nativeError) Severity() Severity { + return Error +} + +func (e nativeError) Description() Description { + return Description{ + Summary: e.err.Error(), + } +} + +func (e nativeError) Source() Source { + // No source information available for a native error + return Source{} +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go b/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go new file mode 100644 index 00000000000..24851f4d004 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go @@ -0,0 +1,77 @@ +package tfdiags + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// hclDiagnostic is a Diagnostic implementation that wraps a HCL Diagnostic +type hclDiagnostic struct { + diag *hcl.Diagnostic +} + +var _ Diagnostic = hclDiagnostic{} + +func (d hclDiagnostic) Severity() Severity { + switch d.diag.Severity { + case hcl.DiagWarning: + return Warning + default: + return Error + } +} + +func (d hclDiagnostic) Description() Description { + return Description{ + Summary: d.diag.Summary, + Detail: d.diag.Detail, + } +} + +func (d hclDiagnostic) Source() Source { + var ret Source + if d.diag.Subject != nil { + rng := SourceRangeFromHCL(*d.diag.Subject) + ret.Subject = &rng + } + if d.diag.Context != nil { + rng := SourceRangeFromHCL(*d.diag.Context) + ret.Context = &rng + } + return ret +} + +// SourceRangeFromHCL constructs a SourceRange from the corresponding range +// type within the HCL package. +func SourceRangeFromHCL(hclRange hcl.Range) SourceRange { + return SourceRange{ + Filename: hclRange.Filename, + Start: SourcePos{ + Line: hclRange.Start.Line, + Column: hclRange.Start.Column, + Byte: hclRange.Start.Byte, + }, + End: SourcePos{ + Line: hclRange.End.Line, + Column: hclRange.End.Column, + Byte: hclRange.End.Byte, + }, + } +} + +// ToHCL constructs a HCL Range from the receiving SourceRange. This is the +// opposite of SourceRangeFromHCL. +func (r SourceRange) ToHCL() hcl.Range { + return hcl.Range{ + Filename: r.Filename, + Start: hcl.Pos{ + Line: r.Start.Line, + Column: r.Start.Column, + Byte: r.Start.Byte, + }, + End: hcl.Pos{ + Line: r.End.Line, + Column: r.End.Column, + Byte: r.End.Byte, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go b/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go new file mode 100644 index 00000000000..6cc95cc2ed3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go @@ -0,0 +1,53 @@ +package tfdiags + +import ( + "encoding/gob" +) + +type rpcFriendlyDiag struct { + Severity_ Severity + Summary_ string + Detail_ string + Subject_ *SourceRange + Context_ *SourceRange +} + +// rpcFriendlyDiag transforms a given diagnostic so that is more friendly to +// RPC. +// +// In particular, it currently returns an object that can be serialized and +// later re-inflated using gob. This definition may grow to include other +// serializations later. +func makeRPCFriendlyDiag(diag Diagnostic) Diagnostic { + desc := diag.Description() + source := diag.Source() + return &rpcFriendlyDiag{ + Severity_: diag.Severity(), + Summary_: desc.Summary, + Detail_: desc.Detail, + Subject_: source.Subject, + Context_: source.Context, + } +} + +func (d *rpcFriendlyDiag) Severity() Severity { + return d.Severity_ +} + +func (d *rpcFriendlyDiag) Description() Description { + return Description{ + Summary: d.Summary_, + Detail: d.Detail_, + } +} + +func (d *rpcFriendlyDiag) Source() Source { + return Source{ + Subject: d.Subject_, + Context: d.Context_, + } +} + +func init() { + gob.Register((*rpcFriendlyDiag)(nil)) +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go b/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go new file mode 100644 index 00000000000..edf9e639fdc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type=Severity"; DO NOT EDIT. + +package tfdiags + +import "fmt" + +const ( + _Severity_name_0 = "Error" + _Severity_name_1 = "Warning" +) + +var ( + _Severity_index_0 = [...]uint8{0, 5} + _Severity_index_1 = [...]uint8{0, 7} +) + +func (i Severity) String() string { + switch { + case i == 69: + return _Severity_name_0 + case i == 87: + return _Severity_name_1 + default: + return fmt.Sprintf("Severity(%d)", i) + } +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go b/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go new file mode 100644 index 00000000000..fb3ac989890 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go @@ -0,0 +1,25 @@ +package tfdiags + +type simpleWarning string + +var _ Diagnostic = simpleWarning("") + +// SimpleWarning constructs a simple (summary-only) warning diagnostic. +func SimpleWarning(msg string) Diagnostic { + return simpleWarning(msg) +} + +func (e simpleWarning) Severity() Severity { + return Warning +} + +func (e simpleWarning) Description() Description { + return Description{ + Summary: string(e), + } +} + +func (e simpleWarning) Source() Source { + // No source information available for a native error + return Source{} +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/source_range.go b/vendor/github.com/hashicorp/terraform/tfdiags/source_range.go new file mode 100644 index 00000000000..3031168d6a4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/source_range.go @@ -0,0 +1,35 @@ +package tfdiags + +import ( + "fmt" + "os" + "path/filepath" +) + +type SourceRange struct { + Filename string + Start, End SourcePos +} + +type SourcePos struct { + Line, Column, Byte int +} + +// StartString returns a string representation of the start of the range, +// including the filename and the line and column numbers. +func (r SourceRange) StartString() string { + filename := r.Filename + + // We'll try to relative-ize our filename here so it's less verbose + // in the common case of being in the current working directory. If not, + // we'll just show the full path. + wd, err := os.Getwd() + if err == nil { + relFn, err := filepath.Rel(wd, filename) + if err == nil { + filename = relFn + } + } + + return fmt.Sprintf("%s:%d,%d", filename, r.Start.Line, r.Start.Column) +} diff --git a/vendor/github.com/hashicorp/terraform/version.go b/vendor/github.com/hashicorp/terraform/version.go index 4959e0fe298..baefdc2d970 100644 --- a/vendor/github.com/hashicorp/terraform/version.go +++ b/vendor/github.com/hashicorp/terraform/version.go @@ -1,10 +1,12 @@ package main -import "github.com/hashicorp/terraform/terraform" +import ( + "github.com/hashicorp/terraform/version" +) // The git commit that was compiled. This will be filled in by the compiler. var GitCommit string -const Version = terraform.Version +const Version = version.Version -var VersionPrerelease = terraform.VersionPrerelease +var VersionPrerelease = version.Prerelease diff --git a/vendor/github.com/hashicorp/terraform/version/version.go b/vendor/github.com/hashicorp/terraform/version/version.go new file mode 100644 index 00000000000..e6ea9e74331 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/version/version.go @@ -0,0 +1,36 @@ +// The version package provides a location to set the release versions for all +// packages to consume, without creating import cycles. +// +// This pckage should not import any other terraform packages. +package version + +import ( + "fmt" + + version "github.com/hashicorp/go-version" +) + +// The main version number that is being run at the moment. +const Version = "0.10.9" + +// A pre-release marker for the version. If this is "" (empty string) +// then it means that it is a final release. Otherwise, this is a pre-release +// such as "dev" (in development), "beta", "rc1", etc. +var Prerelease = "dev" + +// SemVer is an instance of version.Version. This has the secondary +// benefit of verifying during tests and init time that our version is a +// proper semantic version, which should always be the case. +var SemVer = version.Must(version.NewVersion(Version)) + +// Header is the header name used to send the current terraform version +// in http requests. +const Header = "Terraform-Version" + +// String returns the complete version string, including prerelease +func String() string { + if Prerelease != "" { + return fmt.Sprintf("%s-%s", Version, Prerelease) + } + return Version +} diff --git a/vendor/github.com/joyent/gocommon/LICENSE b/vendor/github.com/joyent/gocommon/LICENSE new file mode 100644 index 00000000000..14e2f777f6c --- /dev/null +++ b/vendor/github.com/joyent/gocommon/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/joyent/gocommon/README.md b/vendor/github.com/joyent/gocommon/README.md new file mode 100644 index 00000000000..d396b09bbd7 --- /dev/null +++ b/vendor/github.com/joyent/gocommon/README.md @@ -0,0 +1,98 @@ +gocommon +======== + +Common Go library for Joyent's Triton and Manta. + +[![wercker status](https://app.wercker.com/status/2f63bf7f68bfdd46b979abad19c0bee0/s/master "wercker status")](https://app.wercker.com/project/byKey/2f63bf7f68bfdd46b979abad19c0bee0) + +## Installation + +Use `go-get` to install gocommon. +``` +go get github.com/joyent/gocommon +``` + +## Documentation + +Auto-generated documentation can be found on godoc. + +- [github.com/joyent/gocommon](http://godoc.org/github.com/joyent/gocommon) +- [github.com/joyent/gocommon/client](http://godoc.org/github.com/joyent/client) +- [github.com/joyent/gocommon/errors](http://godoc.org/github.com/joyent/gocommon/errors) +- [github.com/joyent/gocommon/http](http://godoc.org/github.com/joyent/gocommon/http) +- [github.com/joyent/gocommon/jpc](http://godoc.org/github.com/joyent/gocommon/jpc) +- [github.com/joyent/gocommon/testing](http://godoc.org/github.com/joyent/gocommon/testing) + + +## Contributing + +Report bugs and request features using [GitHub Issues](https://github.com/joyent/gocommon/issues), or contribute code via a [GitHub Pull Request](https://github.com/joyent/gocommon/pulls). Changes will be code reviewed before merging. In the near future, automated tests will be run, but in the meantime please `go fmt`, `go lint`, and test all contributions. + + +## Developing + +This library assumes a Go development environment setup based on [How to Write Go Code](https://golang.org/doc/code.html). Your GOPATH environment variable should be pointed at your workspace directory. + +You can now use `go get github.com/joyent/gocommon` to install the repository to the correct location, but if you are intending on contributing back a change you may want to consider cloning the repository via git yourself. This way you can have a single source tree for all Joyent Go projects with each repo having two remotes -- your own fork on GitHub and the upstream origin. + +For example if your GOPATH is `~/src/joyent/go` and you're working on multiple repos then that directory tree might look like: + +``` +~/src/joyent/go/ +|_ pkg/ +|_ src/ + |_ github.com + |_ joyent + |_ gocommon + |_ gomanta + |_ gosdc + |_ gosign +``` + +### Recommended Setup + +``` +$ mkdir -p ${GOPATH}/src/github.com/joyent +$ cd ${GOPATH}/src/github.com/joyent +$ git clone git@github.com:/gocommon.git + +# fetch dependencies +$ git clone git@github.com:/gosign.git +$ go get -v -t ./... + +# add upstream remote +$ cd gocommon +$ git remote add upstream git@github.com:joyent/gocommon.git +$ git remote -v +origin git@github.com:/gocommon.git (fetch) +origin git@github.com:/gocommon.git (push) +upstream git@github.com:joyent/gocommon.git (fetch) +upstream git@github.com:joyent/gocommon.git (push) +``` + +### Run Tests + +The library needs values for the `SDC_URL`, `MANTA_URL`, `MANTA_KEY_ID` and `SDC_KEY_ID` environment variables even though the tests are run locally. You can generate a temporary key and use its fingerprint for tests without adding the key to your Triton Cloud account. + +``` +# create a temporary key +ssh-keygen -b 2048 -C "Testing Key" -f /tmp/id_rsa -t rsa -P "" + +# set up environment +# note: leave the -E md5 argument off on older ssh-keygen +export KEY_ID=$(ssh-keygen -E md5 -lf /tmp/id_rsa | awk -F' ' '{print $2}' | cut -d':' -f2-) +export SDC_KEY_ID=${KEY_ID} +export MANTA_KEY_ID=${KEY_ID} +export SDC_URL=https://us-east-1.api.joyent.com +export MANTA_URL=https://us-east.manta.joyent.com + +cd ${GOPATH}/src/github.com/joyent/gocommon +go test ./... +``` + +### Build the Library + +``` +cd ${GOPATH}/src/github.com/joyent/gocommon +go build ./... +``` diff --git a/vendor/github.com/joyent/gocommon/client/client.go b/vendor/github.com/joyent/gocommon/client/client.go new file mode 100644 index 00000000000..b4d40fd1fed --- /dev/null +++ b/vendor/github.com/joyent/gocommon/client/client.go @@ -0,0 +1,110 @@ +// +// gocommon - Go library to interact with the JoyentCloud +// +// +// Copyright (c) 2013 Joyent Inc. +// +// Written by Daniele Stroppa +// + +package client + +import ( + "fmt" + "log" + "net/url" + "strings" + "sync" + "time" + + joyenthttp "github.com/joyent/gocommon/http" + "github.com/joyent/gosign/auth" +) + +const ( + // The HTTP request methods. + GET = "GET" + POST = "POST" + PUT = "PUT" + DELETE = "DELETE" + HEAD = "HEAD" + COPY = "COPY" +) + +// Client implementations sends service requests to the JoyentCloud. +type Client interface { + SendRequest(method, apiCall, rfc1123Date string, request *joyenthttp.RequestData, response *joyenthttp.ResponseData) (err error) + // MakeServiceURL prepares a full URL to a service endpoint, with optional + // URL parts. It uses the first endpoint it can find for the given service type. + MakeServiceURL(parts []string) string + SignURL(path string, expires time.Time) (string, error) +} + +// This client sends requests without authenticating. +type client struct { + mu sync.Mutex + logger *log.Logger + baseURL string + creds *auth.Credentials + httpClient *joyenthttp.Client +} + +var _ Client = (*client)(nil) + +func newClient(baseURL string, credentials *auth.Credentials, httpClient *joyenthttp.Client, logger *log.Logger) Client { + client := client{baseURL: baseURL, logger: logger, creds: credentials, httpClient: httpClient} + return &client +} + +func NewClient(baseURL, apiVersion string, credentials *auth.Credentials, logger *log.Logger) Client { + sharedHttpClient := joyenthttp.New(credentials, apiVersion, logger) + return newClient(baseURL, credentials, sharedHttpClient, logger) +} + +func (c *client) sendRequest(method, url, rfc1123Date string, request *joyenthttp.RequestData, response *joyenthttp.ResponseData) (err error) { + if request.ReqValue != nil || response.RespValue != nil { + err = c.httpClient.JsonRequest(method, url, rfc1123Date, request, response) + } else { + err = c.httpClient.BinaryRequest(method, url, rfc1123Date, request, response) + } + return +} + +func (c *client) SendRequest(method, apiCall, rfc1123Date string, request *joyenthttp.RequestData, response *joyenthttp.ResponseData) (err error) { + url := c.MakeServiceURL([]string{c.creds.UserAuthentication.User, apiCall}) + err = c.sendRequest(method, url, rfc1123Date, request, response) + return +} + +func makeURL(base string, parts []string) string { + if !strings.HasSuffix(base, "/") && len(parts) > 0 { + base += "/" + } + if parts[1] == "" { + return base + parts[0] + } + return base + strings.Join(parts, "/") +} + +func (c *client) MakeServiceURL(parts []string) string { + return makeURL(c.baseURL, parts) +} + +func (c *client) SignURL(path string, expires time.Time) (string, error) { + parsedURL, err := url.Parse(c.baseURL) + if err != nil { + return "", fmt.Errorf("bad Manta endpoint URL %q: %v", c.baseURL, err) + } + userAuthentication := c.creds.UserAuthentication + userAuthentication.Algorithm = "RSA-SHA1" + keyId := url.QueryEscape(fmt.Sprintf("/%s/keys/%s", userAuthentication.User, c.creds.MantaKeyId)) + params := fmt.Sprintf("algorithm=%s&expires=%d&keyId=%s", userAuthentication.Algorithm, expires.Unix(), keyId) + signingLine := fmt.Sprintf("GET\n%s\n%s\n%s", parsedURL.Host, path, params) + + signature, err := auth.GetSignature(userAuthentication, signingLine) + if err != nil { + return "", fmt.Errorf("cannot generate URL signature: %v", err) + } + signedURL := fmt.Sprintf("%s%s?%s&signature=%s", c.baseURL, path, params, url.QueryEscape(signature)) + return signedURL, nil +} diff --git a/vendor/github.com/joyent/gocommon/errors/errors.go b/vendor/github.com/joyent/gocommon/errors/errors.go new file mode 100644 index 00000000000..4c641cac467 --- /dev/null +++ b/vendor/github.com/joyent/gocommon/errors/errors.go @@ -0,0 +1,292 @@ +// +// gocommon - Go library to interact with the JoyentCloud +// This package provides an Error implementation which knows about types of error, and which has support +// for error causes. +// +// Copyright (c) 2013 Joyent Inc. +// +// Written by Daniele Stroppa +// + +package errors + +import "fmt" + +type Code string + +const ( + // Public available error types. + // These errors are provided because they are specifically required by business logic in the callers. + BadRequestError = Code("BadRequest") + InternalErrorError = Code("InternalError") + InvalidArgumentError = Code("InvalidArgument") + InvalidCredentialsError = Code("InvalidCredentials") + InvalidHeaderError = Code("InvalidHeader") + InvalidVersionError = Code("InvalidVersion") + MissingParameterError = Code("MissinParameter") + NotAuthorizedError = Code("NotAuthorized") + RequestThrottledError = Code("RequestThrottled") + RequestTooLargeError = Code("RequestTooLarge") + RequestMovedError = Code("RequestMoved") + ResourceNotFoundError = Code("ResourceNotFound") + UnknownErrorError = Code("UnknownError") +) + +// Error instances store an optional error cause. +type Error interface { + error + Cause() error +} + +type gojoyentError struct { + error + errcode Code + cause error +} + +// Type checks. +var _ Error = (*gojoyentError)(nil) + +// Code returns the error code. +func (err *gojoyentError) code() Code { + if err.errcode != UnknownErrorError { + return err.errcode + } + if e, ok := err.cause.(*gojoyentError); ok { + return e.code() + } + return UnknownErrorError +} + +// Cause returns the error cause. +func (err *gojoyentError) Cause() error { + return err.cause +} + +// CausedBy returns true if this error or its cause are of the specified error code. +func (err *gojoyentError) causedBy(code Code) bool { + if err.code() == code { + return true + } + if cause, ok := err.cause.(*gojoyentError); ok { + return cause.code() == code + } + return false +} + +// Error fulfills the error interface, taking account of any caused by error. +func (err *gojoyentError) Error() string { + if err.cause != nil { + return fmt.Sprintf("%v\ncaused by: %v", err.error, err.cause) + } + return err.error.Error() +} + +func IsBadRequest(err error) bool { + if e, ok := err.(*gojoyentError); ok { + return e.causedBy(BadRequestError) + } + return false +} + +func IsInternalError(err error) bool { + if e, ok := err.(*gojoyentError); ok { + return e.causedBy(InternalErrorError) + } + return false +} + +func IsInvalidArgument(err error) bool { + if e, ok := err.(*gojoyentError); ok { + return e.causedBy(InvalidArgumentError) + } + return false +} + +func IsInvalidCredentials(err error) bool { + if e, ok := err.(*gojoyentError); ok { + return e.causedBy(InvalidCredentialsError) + } + return false +} + +func IsInvalidHeader(err error) bool { + if e, ok := err.(*gojoyentError); ok { + return e.causedBy(InvalidHeaderError) + } + return false +} + +func IsInvalidVersion(err error) bool { + if e, ok := err.(*gojoyentError); ok { + return e.causedBy(InvalidVersionError) + } + return false +} + +func IsMissingParameter(err error) bool { + if e, ok := err.(*gojoyentError); ok { + return e.causedBy(MissingParameterError) + } + return false +} + +func IsNotAuthorized(err error) bool { + if e, ok := err.(*gojoyentError); ok { + return e.causedBy(NotAuthorizedError) + } + return false +} + +func IsRequestThrottled(err error) bool { + if e, ok := err.(*gojoyentError); ok { + return e.causedBy(RequestThrottledError) + } + return false +} + +func IsRequestTooLarge(err error) bool { + if e, ok := err.(*gojoyentError); ok { + return e.causedBy(RequestTooLargeError) + } + return false +} + +func IsRequestMoved(err error) bool { + if e, ok := err.(*gojoyentError); ok { + return e.causedBy(RequestMovedError) + } + return false +} + +func IsResourceNotFound(err error) bool { + if e, ok := err.(*gojoyentError); ok { + return e.causedBy(ResourceNotFoundError) + } + return false +} + +func IsUnknownError(err error) bool { + if e, ok := err.(*gojoyentError); ok { + return e.causedBy(UnknownErrorError) + } + return false +} + +// New creates a new Error instance with the specified cause. +func makeErrorf(code Code, cause error, format string, args ...interface{}) Error { + return &gojoyentError{ + errcode: code, + error: fmt.Errorf(format, args...), + cause: cause, + } +} + +// New creates a new UnknownError Error instance with the specified cause. +func Newf(cause error, format string, args ...interface{}) Error { + return makeErrorf(UnknownErrorError, cause, format, args...) +} + +// New creates a new BadRequest Error instance with the specified cause. +func NewBadRequestf(cause error, context interface{}, format string, args ...interface{}) Error { + if format == "" { + format = fmt.Sprintf("Bad Request: %s", context) + } + return makeErrorf(BadRequestError, cause, format, args...) +} + +// New creates a new InternalError Error instance with the specified cause. +func NewInternalErrorf(cause error, context interface{}, format string, args ...interface{}) Error { + if format == "" { + format = fmt.Sprintf("Internal Error: %s", context) + } + return makeErrorf(InternalErrorError, cause, format, args...) +} + +// New creates a new InvalidArgument Error instance with the specified cause. +func NewInvalidArgumentf(cause error, context interface{}, format string, args ...interface{}) Error { + if format == "" { + format = fmt.Sprintf("Invalid Argument: %s", context) + } + return makeErrorf(InvalidArgumentError, cause, format, args...) +} + +// New creates a new InvalidCredentials Error instance with the specified cause. +func NewInvalidCredentialsf(cause error, context interface{}, format string, args ...interface{}) Error { + if format == "" { + format = fmt.Sprintf("Invalid Credentials: %s", context) + } + return makeErrorf(InvalidCredentialsError, cause, format, args...) +} + +// New creates a new InvalidHeader Error instance with the specified cause. +func NewInvalidHeaderf(cause error, context interface{}, format string, args ...interface{}) Error { + if format == "" { + format = fmt.Sprintf("Invalid Header: %s", context) + } + return makeErrorf(InvalidHeaderError, cause, format, args...) +} + +// New creates a new InvalidVersion Error instance with the specified cause. +func NewInvalidVersionf(cause error, context interface{}, format string, args ...interface{}) Error { + if format == "" { + format = fmt.Sprintf("Invalid Version: %s", context) + } + return makeErrorf(InvalidVersionError, cause, format, args...) +} + +// New creates a new MissingParameter Error instance with the specified cause. +func NewMissingParameterf(cause error, context interface{}, format string, args ...interface{}) Error { + if format == "" { + format = fmt.Sprintf("Missing Parameter: %s", context) + } + return makeErrorf(MissingParameterError, cause, format, args...) +} + +// New creates a new NotAuthorized Error instance with the specified cause. +func NewNotAuthorizedf(cause error, context interface{}, format string, args ...interface{}) Error { + if format == "" { + format = fmt.Sprintf("Not Authorized: %s", context) + } + return makeErrorf(NotAuthorizedError, cause, format, args...) +} + +// New creates a new RequestThrottled Error instance with the specified cause. +func NewRequestThrottledf(cause error, context interface{}, format string, args ...interface{}) Error { + if format == "" { + format = fmt.Sprintf("Request Throttled: %s", context) + } + return makeErrorf(RequestThrottledError, cause, format, args...) +} + +// New creates a new RequestTooLarge Error instance with the specified cause. +func NewRequestTooLargef(cause error, context interface{}, format string, args ...interface{}) Error { + if format == "" { + format = fmt.Sprintf("Request Too Large: %s", context) + } + return makeErrorf(RequestTooLargeError, cause, format, args...) +} + +// New creates a new RequestMoved Error instance with the specified cause. +func NewRequestMovedf(cause error, context interface{}, format string, args ...interface{}) Error { + if format == "" { + format = fmt.Sprintf("Request Moved: %s", context) + } + return makeErrorf(RequestMovedError, cause, format, args...) +} + +// New creates a new ResourceNotFound Error instance with the specified cause. +func NewResourceNotFoundf(cause error, context interface{}, format string, args ...interface{}) Error { + if format == "" { + format = fmt.Sprintf("Resource Not Found: %s", context) + } + return makeErrorf(ResourceNotFoundError, cause, format, args...) +} + +// New creates a new UnknownError Error instance with the specified cause. +func NewUnknownErrorf(cause error, context interface{}, format string, args ...interface{}) Error { + if format == "" { + format = fmt.Sprintf("Unknown Error: %s", context) + } + return makeErrorf(UnknownErrorError, cause, format, args...) +} diff --git a/vendor/github.com/joyent/gocommon/gocommon.go b/vendor/github.com/joyent/gocommon/gocommon.go new file mode 100644 index 00000000000..f5f6d82941c --- /dev/null +++ b/vendor/github.com/joyent/gocommon/gocommon.go @@ -0,0 +1,21 @@ +/* + * The gocommon package collects common packages to interact with the Joyent Public Cloud and Joyent Manta services. + * + * The gocommon package is structured as follow: + * + * - gocommon/client. Client for sending requests. + * - gocommon/errors. Joyent specific errors. + * - gocommon/http. HTTP client for sending requests. + * - gocommon/jpc. This package provides common structures and functions across packages. + * - gocommon/testing. Testing Suite for local testing. + * + * Copyright (c) 2016 Joyent Inc. + * Written by Daniele Stroppa + * + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + */ + +package gocommon diff --git a/vendor/github.com/joyent/gocommon/http/client.go b/vendor/github.com/joyent/gocommon/http/client.go new file mode 100644 index 00000000000..2fde6df3960 --- /dev/null +++ b/vendor/github.com/joyent/gocommon/http/client.go @@ -0,0 +1,427 @@ +// +// gocommon - Go library to interact with the JoyentCloud +// An HTTP Client which sends json and binary requests, handling data marshalling and response processing. +// +// Copyright (c) 2013 Joyent Inc. +// +// Written by Daniele Stroppa +// + +package http + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "time" + + "github.com/joyent/gocommon" + "github.com/joyent/gocommon/errors" + "github.com/joyent/gocommon/jpc" + "github.com/joyent/gosign/auth" +) + +const ( + contentTypeJSON = "application/json" + contentTypeOctetStream = "application/octet-stream" +) + +type Client struct { + http.Client + maxSendAttempts int + credentials *auth.Credentials + apiVersion string + logger *log.Logger + trace bool +} + +type ErrorResponse struct { + Message string `json:"message"` + Code int `json:"code"` +} + +func (e *ErrorResponse) Error() string { + return fmt.Sprintf("Failed: %d: %s", e.Code, e.Message) +} + +type ErrorWrapper struct { + Error ErrorResponse `json:"error"` +} + +type RequestData struct { + ReqHeaders http.Header + Params *url.Values + ReqValue interface{} + ReqReader io.Reader + ReqLength int +} + +type ResponseData struct { + ExpectedStatus []int + RespHeaders *http.Header + RespValue interface{} + RespReader io.ReadCloser +} + +const ( + // The maximum number of times to try sending a request before we give up + // (assuming any unsuccessful attempts can be sensibly tried again). + MaxSendAttempts = 3 +) + +// New returns a new http *Client using the default net/http client. +func New(credentials *auth.Credentials, apiVersion string, logger *log.Logger) *Client { + return &Client{*http.DefaultClient, MaxSendAttempts, credentials, apiVersion, logger, false} +} + +// SetTrace allows control over whether requests will write their +// contents to the logger supplied during construction. Note that this +// is not safe to call from multiple go-routines. +func (client *Client) SetTrace(traceEnabled bool) { + client.trace = traceEnabled +} + +func gojoyentAgent() string { + return fmt.Sprintf("gocommon (%s)", gocommon.Version) +} + +func createHeaders(extraHeaders http.Header, credentials *auth.Credentials, contentType, rfc1123Date, + apiVersion string, isMantaRequest bool) (http.Header, error) { + + headers := make(http.Header) + if extraHeaders != nil { + for header, values := range extraHeaders { + for _, value := range values { + headers.Add(header, value) + } + } + } + if extraHeaders.Get("Content-Type") == "" { + headers.Add("Content-Type", contentType) + } + if extraHeaders.Get("Accept") == "" { + headers.Add("Accept", contentType) + } + if rfc1123Date != "" { + headers.Set("Date", rfc1123Date) + } else { + headers.Set("Date", getDateForRegion(credentials, isMantaRequest)) + } + authHeaders, err := auth.CreateAuthorizationHeader(headers, credentials, isMantaRequest) + if err != nil { + return http.Header{}, err + } + headers.Set("Authorization", authHeaders) + if apiVersion != "" { + headers.Set("X-Api-Version", apiVersion) + } + headers.Add("User-Agent", gojoyentAgent()) + return headers, nil +} + +func getDateForRegion(credentials *auth.Credentials, isManta bool) string { + if isManta { + location, _ := time.LoadLocation(jpc.Locations["us-east-1"]) + return time.Now().In(location).Format(time.RFC1123) + } else { + location, _ := time.LoadLocation(jpc.Locations[credentials.Region()]) + return time.Now().In(location).Format(time.RFC1123) + } +} + +// JsonRequest JSON encodes and sends the object in reqData.ReqValue (if any) to the specified URL. +// Optional method arguments are passed using the RequestData object. +// Relevant RequestData fields: +// ReqHeaders: additional HTTP header values to add to the request. +// ExpectedStatus: the allowed HTTP response status values, else an error is returned. +// ReqValue: the data object to send. +// RespValue: the data object to decode the result into. +func (c *Client) JsonRequest(method, url, rfc1123Date string, request *RequestData, response *ResponseData) (err error) { + err = nil + var body []byte + if request.Params != nil { + url += "?" + request.Params.Encode() + } + if request.ReqValue != nil { + body, err = json.Marshal(request.ReqValue) + if err != nil { + err = errors.Newf(err, "failed marshalling the request body") + return + } + } + headers, err := createHeaders(request.ReqHeaders, c.credentials, contentTypeJSON, rfc1123Date, c.apiVersion, + isMantaRequest(url, c.credentials.UserAuthentication.User)) + if err != nil { + return err + } + respBody, respHeader, err := c.sendRequest( + method, url, bytes.NewReader(body), len(body), headers, response.ExpectedStatus, c.logger) + if err != nil { + return + } + defer respBody.Close() + respData, err := ioutil.ReadAll(respBody) + if err != nil { + err = errors.Newf(err, "failed reading the response body") + return + } + + if len(respData) > 0 { + if response.RespValue != nil { + if dest, ok := response.RespValue.(*[]byte); ok { + *dest = respData + //err = decodeJSON(bytes.NewReader(respData), false, response.RespValue) + //if err != nil { + // err = errors.Newf(err, "failed unmarshaling/decoding the response body: %s", respData) + //} + } else { + err = json.Unmarshal(respData, response.RespValue) + if err != nil { + err = decodeJSON(bytes.NewReader(respData), true, response.RespValue) + if err != nil { + err = errors.Newf(err, "failed unmarshaling/decoding the response body: %s", respData) + } + } + } + } + } + + if respHeader != nil { + response.RespHeaders = respHeader + } + + return +} + +func decodeJSON(r io.Reader, multiple bool, into interface{}) error { + d := json.NewDecoder(r) + if multiple { + return decodeStream(d, into) + } + return d.Decode(into) +} + +func decodeStream(d *json.Decoder, into interface{}) error { + t := reflect.TypeOf(into) + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Slice { + return fmt.Errorf("unexpected type %s", t) + } + elemType := t.Elem().Elem() + slice := reflect.ValueOf(into).Elem() + for { + val := reflect.New(elemType) + if err := d.Decode(val.Interface()); err != nil { + if err == io.EOF { + break + } + return err + } + slice.Set(reflect.Append(slice, val.Elem())) + } + return nil +} + +// Sends the byte array in reqData.ReqValue (if any) to the specified URL. +// Optional method arguments are passed using the RequestData object. +// Relevant RequestData fields: +// ReqHeaders: additional HTTP header values to add to the request. +// ExpectedStatus: the allowed HTTP response status values, else an error is returned. +// ReqReader: an io.Reader providing the bytes to send. +// RespReader: assigned an io.ReadCloser instance used to read the returned data.. +func (c *Client) BinaryRequest(method, url, rfc1123Date string, request *RequestData, response *ResponseData) (err error) { + err = nil + + if request.Params != nil { + url += "?" + request.Params.Encode() + } + headers, err := createHeaders(request.ReqHeaders, c.credentials, contentTypeOctetStream, rfc1123Date, + c.apiVersion, isMantaRequest(url, c.credentials.UserAuthentication.User)) + if err != nil { + return err + } + respBody, respHeader, err := c.sendRequest( + method, url, request.ReqReader, request.ReqLength, headers, response.ExpectedStatus, c.logger) + if err != nil { + return + } + if response.RespReader != nil { + response.RespReader = respBody + } + if respHeader != nil { + response.RespHeaders = respHeader + } + return +} + +// Sends the specified request to URL and checks that the HTTP response status is as expected. +// reqReader: a reader returning the data to send. +// length: the number of bytes to send. +// headers: HTTP headers to include with the request. +// expectedStatus: a slice of allowed response status codes. +func (c *Client) sendRequest(method, URL string, reqReader io.Reader, length int, headers http.Header, + expectedStatus []int, logger *log.Logger) (rc io.ReadCloser, respHeader *http.Header, err error) { + reqData := make([]byte, length) + if reqReader != nil { + nrRead, err := io.ReadFull(reqReader, reqData) + if err != nil { + err = errors.Newf(err, "failed reading the request data, read %v of %v bytes", nrRead, length) + return rc, respHeader, err + } + } + rawResp, err := c.sendRateLimitedRequest(method, URL, headers, reqData, logger) + if err != nil { + return + } + + if logger != nil && c.trace { + logger.Printf("Request: %s %s\n", method, URL) + logger.Printf("Request header: %s\n", headers) + logger.Printf("Request body: %s\n", reqData) + logger.Printf("Response: %s\n", rawResp.Status) + logger.Printf("Response header: %s\n", rawResp.Header) + logger.Printf("Response body: %s\n", rawResp.Body) + logger.Printf("Response error: %s\n", err) + } + + foundStatus := false + if len(expectedStatus) == 0 { + expectedStatus = []int{http.StatusOK} + } + for _, status := range expectedStatus { + if rawResp.StatusCode == status { + foundStatus = true + break + } + } + if !foundStatus && len(expectedStatus) > 0 { + err = handleError(URL, rawResp) + rawResp.Body.Close() + return + } + return rawResp.Body, &rawResp.Header, err +} + +func (c *Client) sendRateLimitedRequest(method, URL string, headers http.Header, reqData []byte, + logger *log.Logger) (resp *http.Response, err error) { + for i := 0; i < c.maxSendAttempts; i++ { + var reqReader io.Reader + if reqData != nil { + reqReader = bytes.NewReader(reqData) + } + req, err := http.NewRequest(method, URL, reqReader) + if err != nil { + err = errors.Newf(err, "failed creating the request %s", URL) + return nil, err + } + // Setting req.Close to true to avoid malformed HTTP version "nullHTTP/1.1" error + // See http://stackoverflow.com/questions/17714494/golang-http-request-results-in-eof-errors-when-making-multiple-requests-successi + req.Close = true + for header, values := range headers { + for _, value := range values { + req.Header.Add(header, value) + } + } + req.ContentLength = int64(len(reqData)) + resp, err = c.Do(req) + if err != nil { + return nil, errors.Newf(err, "failed executing the request %s", URL) + } + if resp.StatusCode != http.StatusRequestEntityTooLarge || resp.Header.Get("Retry-After") == "" { + return resp, nil + } + resp.Body.Close() + retryAfter, err := strconv.ParseFloat(resp.Header.Get("Retry-After"), 64) + if err != nil { + return nil, errors.Newf(err, "Invalid Retry-After header %s", URL) + } + if retryAfter == 0 { + return nil, errors.Newf(err, "Resource limit exeeded at URL %s", URL) + } + if logger != nil { + logger.Println("Too many requests, retrying in %dms.", int(retryAfter*1000)) + } + time.Sleep(time.Duration(retryAfter) * time.Second) + } + return nil, errors.Newf(err, "Maximum number of attempts (%d) reached sending request to %s", c.maxSendAttempts, URL) +} + +type HttpError struct { + StatusCode int + Data map[string][]string + Url string + ResponseMessage string +} + +func (e *HttpError) Error() string { + return fmt.Sprintf("request %q returned unexpected status %d with body %q", + e.Url, + e.StatusCode, + e.ResponseMessage, + ) +} + +// The HTTP response status code was not one of those expected, so we construct an error. +// NotFound (404) codes have their own NotFound error type. +// We also make a guess at duplicate value errors. +func handleError(URL string, resp *http.Response) error { + errBytes, _ := ioutil.ReadAll(resp.Body) + errInfo := string(errBytes) + // Check if we have a JSON representation of the failure, if so decode it. + if resp.Header.Get("Content-Type") == contentTypeJSON { + var errResponse ErrorResponse + if err := json.Unmarshal(errBytes, &errResponse); err == nil { + errInfo = errResponse.Message + } + } + httpError := &HttpError{ + resp.StatusCode, map[string][]string(resp.Header), URL, errInfo, + } + switch resp.StatusCode { + case http.StatusBadRequest: + return errors.NewBadRequestf(httpError, "", "Bad request %s", URL) + case http.StatusUnauthorized: + return errors.NewNotAuthorizedf(httpError, "", "Unauthorised URL %s", URL) + //return errors.NewInvalidCredentialsf(httpError, "", "Unauthorised URL %s", URL) + case http.StatusForbidden: + //return errors. + case http.StatusNotFound: + return errors.NewResourceNotFoundf(httpError, "", "Resource not found %s", URL) + case http.StatusMethodNotAllowed: + //return errors. + case http.StatusNotAcceptable: + return errors.NewInvalidHeaderf(httpError, "", "Invalid Header %s", URL) + case http.StatusConflict: + return errors.NewMissingParameterf(httpError, "", "Missing parameters %s", URL) + //return errors.NewInvalidArgumentf(httpError, "", "Invalid parameter %s", URL) + case http.StatusRequestEntityTooLarge: + return errors.NewRequestTooLargef(httpError, "", "Request too large %s", URL) + case http.StatusUnsupportedMediaType: + //return errors. + case http.StatusServiceUnavailable: + return errors.NewInternalErrorf(httpError, "", "Internal error %s", URL) + case 420: + // SlowDown + return errors.NewRequestThrottledf(httpError, "", "Request throttled %s", URL) + case 422: + // Unprocessable Entity + return errors.NewInvalidArgumentf(httpError, "", "Invalid parameters %s", URL) + case 449: + // RetryWith + return errors.NewInvalidVersionf(httpError, "", "Invalid version %s", URL) + //RequestMovedError -> ? + } + + return errors.NewUnknownErrorf(httpError, "", "Unknown error %s", URL) +} + +func isMantaRequest(url, user string) bool { + return strings.Contains(url, "/"+user+"/stor") || strings.Contains(url, "/"+user+"/jobs") || strings.Contains(url, "/"+user+"/public") +} diff --git a/vendor/github.com/joyent/gocommon/jpc/jpc.go b/vendor/github.com/joyent/gocommon/jpc/jpc.go new file mode 100644 index 00000000000..0943f6a2800 --- /dev/null +++ b/vendor/github.com/joyent/gocommon/jpc/jpc.go @@ -0,0 +1,113 @@ +/* + * + * gocommon - Go library to interact with the JoyentCloud + * + * + * Copyright (c) 2016 Joyent Inc. + * + * Written by Daniele Stroppa + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + */ + +package jpc + +import ( + "fmt" + "io/ioutil" + "os" + "reflect" + "runtime" + + "github.com/joyent/gosign/auth" +) + +const ( + // Environment variables + TritonAccount = "TRITON_ACCOUNT" + TritonKeyId = "TRITON_KEY_ID" + TritonUrl = "TRITON_URL" + SdcAccount = "SDC_ACCOUNT" + SdcKeyId = "SDC_KEY_ID" + SdcUrl = "SDC_URL" + MantaUser = "MANTA_USER" + MantaKeyId = "MANTA_KEY_ID" + MantaUrl = "MANTA_URL" +) + +var Locations = map[string]string{ + "us-east-1": "America/New_York", + "us-west-1": "America/Los_Angeles", + "us-sw-1": "America/Los_Angeles", + "eu-ams-1": "Europe/Amsterdam", +} + +// getConfig returns the value of the first available environment +// variable, among the given ones. +func getConfig(envVars ...string) (value string) { + value = "" + for _, v := range envVars { + value = os.Getenv(v) + if value != "" { + break + } + } + return +} + +// getUserHome returns the value of HOME environment +// variable for the user environment. +func getUserHome() string { + if runtime.GOOS == "windows" { + return os.Getenv("APPDATA") + } else { + return os.Getenv("HOME") + } +} + +// credentialsFromEnv creates and initializes the credentials from the +// environment variables. +func credentialsFromEnv(key string) (*auth.Credentials, error) { + var keyName string + if key == "" { + keyName = getUserHome() + "/.ssh/id_rsa" + } else { + keyName = key + } + privateKey, err := ioutil.ReadFile(keyName) + if err != nil { + return nil, err + } + authentication, err := auth.NewAuth(getConfig(TritonAccount, SdcAccount, MantaUser), string(privateKey), "rsa-sha256") + if err != nil { + return nil, err + } + + return &auth.Credentials{ + UserAuthentication: authentication, + SdcKeyId: getConfig(TritonKeyId, SdcKeyId), + SdcEndpoint: auth.Endpoint{URL: getConfig(TritonUrl, SdcUrl)}, + MantaKeyId: getConfig(MantaKeyId), + MantaEndpoint: auth.Endpoint{URL: getConfig(MantaUrl)}, + }, nil +} + +// CompleteCredentialsFromEnv gets and verifies all the required +// authentication parameters have values in the environment. +func CompleteCredentialsFromEnv(keyName string) (cred *auth.Credentials, err error) { + cred, err = credentialsFromEnv(keyName) + if err != nil { + return nil, err + } + v := reflect.ValueOf(cred).Elem() + t := v.Type() + for i := 0; i < v.NumField(); i++ { + f := v.Field(i) + if f.String() == "" { + return nil, fmt.Errorf("Required environment variable not set for credentials attribute: %s", t.Field(i).Name) + } + } + return cred, nil +} diff --git a/vendor/github.com/joyent/gocommon/version.go b/vendor/github.com/joyent/gocommon/version.go new file mode 100644 index 00000000000..82fe84494bd --- /dev/null +++ b/vendor/github.com/joyent/gocommon/version.go @@ -0,0 +1,37 @@ +/* + * + * gocommon - Go library to interact with the JoyentCloud + * + * + * Copyright (c) 2016 Joyent Inc. + * + * Written by Daniele Stroppa + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + */ + +package gocommon + +import ( + "fmt" +) + +type VersionNum struct { + Major int + Minor int + Micro int +} + +func (v *VersionNum) String() string { + return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Micro) +} + +var VersionNumber = VersionNum{ + Major: 0, + Minor: 1, + Micro: 0, +} + +var Version = VersionNumber.String() diff --git a/vendor/github.com/joyent/gocommon/wercker.yml b/vendor/github.com/joyent/gocommon/wercker.yml new file mode 100644 index 00000000000..563acf6fe49 --- /dev/null +++ b/vendor/github.com/joyent/gocommon/wercker.yml @@ -0,0 +1,40 @@ +box: golang + +build: + steps: + # Sets the go workspace and places you package + # at the right place in the workspace tree + - setup-go-workspace: + package-dir: github.com/joyent/gocommon + + # Gets the dependencies + - script: + name: go get + code: | + go get -v -t ./... + + # Build the project + - script: + name: go build + code: | + go build ./... + + - script: + name: make a new key for testing + code: | + ssh-keygen -b 2048 \ + -C "Testing Key" \ + -f /root/.ssh/id_rsa \ + -t rsa \ + -P "" + + # Test the project + - script: + name: go test + code: | + export KEY_ID=$(ssh-keygen -lf /root/.ssh/id_rsa | awk -F' ' '{print $2}' | cut -d':' -f2-) + export SDC_KEY_ID=${KEY_ID} + export MANTA_KEY_ID=${KEY_ID} + export SDC_URL=https://us-east-1.api.joyent.com + export MANTA_URL=https://us-east.manta.joyent.com + go test ./... diff --git a/vendor/github.com/joyent/gomanta/LICENSE b/vendor/github.com/joyent/gomanta/LICENSE new file mode 100644 index 00000000000..14e2f777f6c --- /dev/null +++ b/vendor/github.com/joyent/gomanta/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/joyent/gomanta/manta/manta.go b/vendor/github.com/joyent/gomanta/manta/manta.go new file mode 100644 index 00000000000..f7307454f4f --- /dev/null +++ b/vendor/github.com/joyent/gomanta/manta/manta.go @@ -0,0 +1,459 @@ +/* +The gomanta/manta package interacts with the Manta API (http://apidocs.joyent.com/manta/api.html). + +This Source Code Form is subject to the terms of the Mozilla Public +License, v. 2.0. If a copy of the MPL was not distributed with this +file, You can obtain one at http://mozilla.org/MPL/2.0/. + +Copyright (c) 2016 Joyent Inc. +Written by Daniele Stroppa + +*/ +package manta + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "path" + "time" + + "github.com/joyent/gocommon/client" + "github.com/joyent/gocommon/errors" + + jh "github.com/joyent/gocommon/http" +) + +const ( + // The default version of the Manta API to use + DefaultAPIVersion = "7.1" + + // Manta API URL parts + apiStorage = "stor" + apiJobs = "jobs" + apiJobsLive = "live" + apiJobsIn = "in" + apiJobsOut = "out" + apiJobsFail = "fail" + apiJobsErr = "err" + apiJobsEnd = "end" + apiJobsCancel = "cancel" + apiJobsStatus = "status" +) + +// Client provides a means to access Joyent Manta +type Client struct { + client client.Client +} + +// New creates a new Client. +func New(client client.Client) *Client { + return &Client{client} +} + +// request represents an API request +type request struct { + method string + url string + reqValue interface{} + reqHeader http.Header + reqReader io.Reader + reqLength int + resp interface{} + respHeader *http.Header + expectedStatus int +} + +// Helper method to send an API request +func (c *Client) sendRequest(req request) (*jh.ResponseData, error) { + request := jh.RequestData{ + ReqValue: req.reqValue, + ReqHeaders: req.reqHeader, + ReqReader: req.reqReader, + ReqLength: req.reqLength, + } + if req.expectedStatus == 0 { + req.expectedStatus = http.StatusOK + } + respData := jh.ResponseData{ + RespValue: req.resp, + RespHeaders: req.respHeader, + ExpectedStatus: []int{req.expectedStatus}, + } + err := c.client.SendRequest(req.method, req.url, "", &request, &respData) + return &respData, err +} + +// Helper method to create the API URL +func makeURL(parts ...string) string { + return path.Join(parts...) +} + +// ListDirectoryOpts represent the option that can be specified +// when listing a directory. +type ListDirectoryOpts struct { + Limit int `json:"limit"` // Limit to the number of records returned (default and max is 1000) + Marker string `json:"marker"` // Key name at which to start the next listing +} + +// Entry represents an object stored in Manta, either a file or a directory +type Entry struct { + Name string `json:"name"` // Entry name + Etag string `json:"etag,omitempty"` // If type is 'object', object UUID + Size int `json:"size,omitempty"` // If type is 'object', object size (content-length) + Type string `json:"type"` // Entry type, one of 'directory' or 'object' + Mtime string `json:"mtime"` // ISO8601 timestamp of the last update +} + +// Creates a directory at the specified path. Any parent directory must exist. +// See API docs: http://apidocs.joyent.com/manta/api.html#PutDirectory +func (c *Client) PutDirectory(path string) error { + requestHeaders := make(http.Header) + requestHeaders.Set("Content-Type", "application/json; type=directory") + requestHeaders.Set("Accept", "*/*") + req := request{ + method: client.PUT, + url: makeURL(apiStorage, path), + reqHeader: requestHeaders, + expectedStatus: http.StatusNoContent, + } + if _, err := c.sendRequest(req); err != nil { + return errors.Newf(err, "failed to create directory: %s", path) + } + return nil +} + +// Returns the content of the specified directory, using the specified options. +// See API docs: http://apidocs.joyent.com/manta/api.html#ListDirectory +func (c *Client) ListDirectory(directory string, opts ListDirectoryOpts) ([]Entry, error) { + var resp []Entry + requestHeaders := make(http.Header) + requestHeaders.Set("Accept", "*/*") + req := request{ + method: client.GET, + url: makeURL(apiStorage, directory), + reqHeader: requestHeaders, + resp: &resp, + reqValue: opts, + } + if _, err := c.sendRequest(req); err != nil { + return nil, errors.Newf(err, "failed to list directory %s", directory) + } + return resp, nil +} + +// Deletes the specified directory. Directory must be empty. +// See API docs: http://apidocs.joyent.com/manta/api.html#DeleteDirectory +func (c *Client) DeleteDirectory(path string) error { + req := request{ + method: client.DELETE, + url: makeURL(apiStorage, path), + expectedStatus: http.StatusNoContent, + } + if _, err := c.sendRequest(req); err != nil { + return errors.Newf(err, "failed to delete directory %s", path) + } + return nil +} + +// Creates an object at the specified path. Any parent directory must exist. +// See API docs: http://apidocs.joyent.com/manta/api.html#PutObject +func (c *Client) PutObject(path, objectName string, object []byte) error { + r := bytes.NewReader(object) + req := request{ + method: client.PUT, + url: makeURL(apiStorage, path, objectName), + reqReader: r, + reqLength: len(object), + expectedStatus: http.StatusNoContent, + } + if _, err := c.sendRequest(req); err != nil { + return errors.Newf(err, "failed to create object: %s/%s", path, objectName) + } + return nil +} + +// Retrieves the specified object from the specified location. +// See API docs: http://apidocs.joyent.com/manta/api.html#GetObject +func (c *Client) GetObject(path, objectName string) ([]byte, error) { + var resp []byte + requestHeaders := make(http.Header) + requestHeaders.Set("Accept", "*/*") + req := request{ + method: client.GET, + url: makeURL(apiStorage, path, objectName), + reqHeader: requestHeaders, + resp: &resp, + } + respData, err := c.sendRequest(req) + if err != nil { + return nil, errors.Newf(err, "failed to get object %s/%s", path, objectName) + } + res, ok := respData.RespValue.(*[]byte) + if !ok { + return nil, errors.Newf(err, "failed to assert downloaded data as type *[]byte for object %s/%s", path, objectName) + } + return *res, nil +} + +// Deletes the specified object from the specified location. +// See API docs: http://apidocs.joyent.com/manta/api.html#DeleteObject +func (c *Client) DeleteObject(path, objectName string) error { + req := request{ + method: client.DELETE, + url: makeURL(apiStorage, path, objectName), + expectedStatus: http.StatusNoContent, + } + if _, err := c.sendRequest(req); err != nil { + return errors.Newf(err, "failed to delete object %s/%s", path, objectName) + } + return nil +} + +// Creates a link (similar to a Unix hard link) from location to path/linkName. +// See API docs: http://apidocs.joyent.com/manta/api.html#PutSnapLink +func (c *Client) PutSnapLink(path, linkName, location string) error { + requestHeaders := make(http.Header) + requestHeaders.Set("Accept", "application/json; type=link") + requestHeaders.Set("Location", location) + req := request{ + method: client.PUT, + url: makeURL(apiStorage, path, linkName), + reqHeader: requestHeaders, + expectedStatus: http.StatusNoContent, + } + if _, err := c.sendRequest(req); err != nil { + return errors.Newf(err, "failed to create snap link: %s/%s", path, linkName) + } + return nil +} + +// CreateJobOpts represent the option that can be specified +// when creating a job. +type CreateJobOpts struct { + Name string `json:"name,omitempty"` // Job Name (optional) + Phases []Phase `json:"phases"` // Tasks to execute as part of this job +} + +// Job represents the status of a job. +type Job struct { + Id string // Job unique identifier + Name string `json:"name,omitempty"` // Job Name + State string // Job state + Cancelled bool // Whether the job has been cancelled or not + InputDone bool // Whether the inputs for the job is still open or not + Stats JobStats `json:"stats,omitempty"` // Job statistics + TimeCreated string // Time the job was created at + TimeDone string `json:"timeDone,omitempty"` // Time the job was completed + TimeArchiveStarted string `json:"timeArchiveStarted,omitempty"` // Time the job archiving started + TimeArchiveDone string `json:"timeArchiveDone,omitempty"` // Time the job archiving completed + Phases []Phase `json:"phases"` // Job tasks + Options interface{} // Job options +} + +// JobStats represents statistics about a job +type JobStats struct { + Errors int // Number or errors + Outputs int // Number of output produced + Retries int // Number of retries + Tasks int // Total number of task in the job + TasksDone int // number of tasks done +} + +// Phase represents a task to be executed as part of a Job +type Phase struct { + Type string `json:"type,omitempty"` // Task type, one of 'map' or 'reduce' (optional) + Assets []string `json:"assets,omitempty"` // An array of objects to be placed in the compute zones (optional) + Exec string `json:"exec"` // The actual shell statement to execute + Init string `json:"init"` // Shell statement to execute in each compute zone before any tasks are executed + Count int `json:"count,omitempty"` // If type is 'reduce', an optional number of reducers for this phase (default is 1) + Memory int `json:"memory,omitempty"` // Amount of DRAM to give to your compute zone (in Mb, optional) + Disk int `json:"disk,omitempty"` // Amount of disk space to give to your compute zone (in Gb, optional) +} + +// JobError represents an error occurred during a job execution +type JobError struct { + Id string // Job Id + Phase string // Phase number of the failure + What string // A human readable summary of what failed + Code string // Error code + Message string // Human readable error message + Stderr string // A key that saved the stderr for the given command (optional) + Key string // The input key being processed when the task failed (optional) +} + +// Creates a job with the given options. +// See API docs: http://apidocs.joyent.com/manta/api.html#CreateJob +func (c *Client) CreateJob(opts CreateJobOpts) (string, error) { + var resp string + var respHeader http.Header + req := request{ + method: client.POST, + url: apiJobs, + reqValue: opts, + respHeader: &respHeader, + resp: &resp, + expectedStatus: http.StatusCreated, + } + respData, err := c.sendRequest(req) + if err != nil { + return "", errors.Newf(err, "failed to create job with name: %s", opts.Name) + } + return respData.RespHeaders.Get("Location"), nil +} + +// Submits inputs to an already created job. +// See API docs: http://apidocs.joyent.com/manta/api.html#AddJobInputs +func (c *Client) AddJobInputs(jobId string, jobInputs io.Reader) error { + inputData, errI := ioutil.ReadAll(jobInputs) + if errI != nil { + return errors.Newf(errI, "failed to read inputs for job %s", jobId) + } + requestHeaders := make(http.Header) + requestHeaders.Set("Accept", "*/*") + requestHeaders.Set("Content-Type", "text/plain") + req := request{ + method: client.POST, + url: makeURL(apiJobs, jobId, apiJobsLive, apiJobsIn), + reqValue: string(inputData), + reqHeader: requestHeaders, + expectedStatus: http.StatusNoContent, + } + if _, err := c.sendRequest(req); err != nil { + return errors.Newf(err, "failed to add inputs to job %s", jobId) + } + return nil +} + +// This closes input for a job, and finalize the job. +// See API docs: http://apidocs.joyent.com/manta/api.html#EndJobInput +func (c *Client) EndJobInputs(jobId string) error { + req := request{ + method: client.POST, + url: makeURL(apiJobs, jobId, apiJobsLive, apiJobsIn, apiJobsEnd), + expectedStatus: http.StatusAccepted, + } + if _, err := c.sendRequest(req); err != nil { + return errors.Newf(err, "failed to end inputs for job %s", jobId) + } + return nil +} + +// This cancels a job from doing any further work. +// Cancellation is asynchronous and "best effort"; there is no guarantee the job will actually stop +// See API docs: http://apidocs.joyent.com/manta/api.html#CancelJob +func (c *Client) CancelJob(jobId string) error { + req := request{ + method: client.POST, + url: makeURL(apiJobs, jobId, apiJobsLive, apiJobsCancel), + expectedStatus: http.StatusAccepted, + } + if _, err := c.sendRequest(req); err != nil { + return errors.Newf(err, "failed to cancel job %s", jobId) + } + return nil +} + +// Returns the list of jobs. +// Note you can filter the set of jobs down to only live jobs by setting the liveOnly flag. +// See API docs: http://apidocs.joyent.com/manta/api.html#ListJobs +func (c *Client) ListJobs(liveOnly bool) ([]Entry, error) { + var resp []Entry + var url string + if liveOnly { + url = fmt.Sprintf("%s?state=running", apiJobs) + } else { + url = apiJobs + } + req := request{ + method: client.GET, + url: url, + resp: &resp, + } + if _, err := c.sendRequest(req); err != nil { + return nil, errors.Newf(err, "failed to list jobs") + } + return resp, nil +} + +// Gets the high-level job container object for a given job. +// See API docs: http://apidocs.joyent.com/manta/api.html#GetJob +func (c *Client) GetJob(jobId string) (Job, error) { + var resp Job + req := request{ + method: client.GET, + url: makeURL(apiJobs, jobId, apiJobsLive, apiJobsStatus), + resp: &resp, + } + if _, err := c.sendRequest(req); err != nil { + return Job{}, errors.Newf(err, "failed to get job with id: %s", jobId) + } + return resp, nil +} + +// Returns the current "live" set of outputs from a given job. +// See API docs: http://apidocs.joyent.com/manta/api.html#GetJobOutput +func (c *Client) GetJobOutput(jobId string) (string, error) { + var resp string + req := request{ + method: client.GET, + url: makeURL(apiJobs, jobId, apiJobsLive, apiJobsOut), + resp: &resp, + } + if _, err := c.sendRequest(req); err != nil { + return "", errors.Newf(err, "failed to get output for job with id: %s", jobId) + } + return resp, nil +} + +// Returns the submitted input objects for a given job, available while the job is running. +// See API docs: http://apidocs.joyent.com/manta/api.html#GetJobInput +func (c *Client) GetJobInput(jobId string) (string, error) { + var resp string + req := request{ + method: client.GET, + url: makeURL(apiJobs, jobId, apiJobsLive, apiJobsIn), + resp: &resp, + } + if _, err := c.sendRequest(req); err != nil { + return "", errors.Newf(err, "failed to get input for job with id: %s", jobId) + } + return resp, nil +} + +// Returns the current "live" set of failures from a given job. +// See API docs: http://apidocs.joyent.com/manta/api.html#GetJobFailures +func (c *Client) GetJobFailures(jobId string) (interface{}, error) { + var resp interface{} + req := request{ + method: client.GET, + url: makeURL(apiJobs, jobId, apiJobsLive, apiJobsFail), + resp: &resp, + } + if _, err := c.sendRequest(req); err != nil { + return nil, errors.Newf(err, "failed to get failures for job with id: %s", jobId) + } + return resp, nil +} + +// Returns the current "live" set of errors from a given job. +// See API docs: http://apidocs.joyent.com/manta/api.html#GetJobErrors +func (c *Client) GetJobErrors(jobId string) ([]JobError, error) { + var resp []JobError + req := request{ + method: client.GET, + url: makeURL(apiJobs, jobId, apiJobsLive, apiJobsErr), + resp: &resp, + } + if _, err := c.sendRequest(req); err != nil { + return nil, errors.Newf(err, "failed to get errors for job with id: %s", jobId) + } + return resp, nil +} + +// Returns a signed URL to retrieve the object at path. +func (c *Client) SignURL(path string, expires time.Time) (string, error) { + return c.client.SignURL(path, expires) +} diff --git a/vendor/github.com/joyent/gosign/LICENSE b/vendor/github.com/joyent/gosign/LICENSE new file mode 100755 index 00000000000..14e2f777f6c --- /dev/null +++ b/vendor/github.com/joyent/gosign/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/joyent/gosign/auth/auth.go b/vendor/github.com/joyent/gosign/auth/auth.go new file mode 100644 index 00000000000..018e24c66bb --- /dev/null +++ b/vendor/github.com/joyent/gosign/auth/auth.go @@ -0,0 +1,132 @@ +// +// gosign - Go HTTP signing library for the Joyent Public Cloud and Joyent Manta +// +// +// Copyright (c) 2013 Joyent Inc. +// +// Written by Daniele Stroppa +// + +package auth + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "fmt" + "net/http" + "net/url" + "strings" +) + +const ( + // Authorization Headers + SdcSignature = "Signature keyId=\"/%s/keys/%s\",algorithm=\"%s\" %s" + MantaSignature = "Signature keyId=\"/%s/keys/%s\",algorithm=\"%s\",signature=\"%s\"" +) + +type Endpoint struct { + URL string +} + +type Auth struct { + User string + PrivateKey PrivateKey + Algorithm string +} + +type Credentials struct { + UserAuthentication *Auth + SdcKeyId string + SdcEndpoint Endpoint + MantaKeyId string + MantaEndpoint Endpoint +} + +type PrivateKey struct { + key *rsa.PrivateKey +} + +// NewAuth creates a new Auth. +func NewAuth(user, privateKey, algorithm string) (*Auth, error) { + block, _ := pem.Decode([]byte(privateKey)) + if block == nil { + return nil, fmt.Errorf("invalid private key data: %s", privateKey) + } + rsakey, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("An error occurred while parsing the key: %s", err) + } + return &Auth{user, PrivateKey{rsakey}, algorithm}, nil +} + +// The CreateAuthorizationHeader returns the Authorization header for the give request. +func CreateAuthorizationHeader(headers http.Header, credentials *Credentials, isMantaRequest bool) (string, error) { + if isMantaRequest { + signature, err := GetSignature(credentials.UserAuthentication, "date: "+headers.Get("Date")) + if err != nil { + return "", err + } + return fmt.Sprintf(MantaSignature, credentials.UserAuthentication.User, credentials.MantaKeyId, + credentials.UserAuthentication.Algorithm, signature), nil + } + signature, err := GetSignature(credentials.UserAuthentication, headers.Get("Date")) + if err != nil { + return "", err + } + return fmt.Sprintf(SdcSignature, credentials.UserAuthentication.User, credentials.SdcKeyId, + credentials.UserAuthentication.Algorithm, signature), nil +} + +// The GetSignature method signs the specified key according to http://apidocs.joyent.com/cloudapi/#issuing-requests +// and http://apidocs.joyent.com/manta/api.html#authentication. +func GetSignature(auth *Auth, signing string) (string, error) { + hashFunc := getHashFunction(auth.Algorithm) + hash := hashFunc.New() + hash.Write([]byte(signing)) + + digest := hash.Sum(nil) + + signed, err := rsa.SignPKCS1v15(rand.Reader, auth.PrivateKey.key, hashFunc, digest) + if err != nil { + return "", fmt.Errorf("An error occurred while signing the key: %s", err) + } + + return base64.StdEncoding.EncodeToString(signed), nil +} + +// Helper method to get the Hash function based on the algorithm +func getHashFunction(algorithm string) (hashFunc crypto.Hash) { + switch strings.ToLower(algorithm) { + case "rsa-sha1": + hashFunc = crypto.SHA1 + case "rsa-sha224", "rsa-sha256": + hashFunc = crypto.SHA256 + case "rsa-sha384", "rsa-sha512": + hashFunc = crypto.SHA512 + default: + hashFunc = crypto.SHA256 + } + return +} + +func (cred *Credentials) Region() string { + parsedUrl, err := url.Parse(cred.SdcEndpoint.URL) + if err != nil { + // Bogus URL - no region. + return "" + } + if strings.HasPrefix(parsedUrl.Host, "localhost") || strings.HasPrefix(parsedUrl.Host, "127.0.0.1") { + return "some-region" + } + + host := parsedUrl.Host + firstDotIdx := strings.Index(host, ".") + if firstDotIdx >= 0 { + return host[:firstDotIdx] + } + return host +} diff --git a/vendor/github.com/lusis/go-artifactory/LICENSE b/vendor/github.com/lusis/go-artifactory/LICENSE new file mode 100644 index 00000000000..7c588406b01 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/LICENSE @@ -0,0 +1,15 @@ +Apache License, Version 2.0 + +Copyright (c) 2016 John E. Vincent + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/api.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/api.go new file mode 100644 index 00000000000..a60755a8b0a --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/api.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/archive.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/archive.go new file mode 100644 index 00000000000..a60755a8b0a --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/archive.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/artifact.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/artifact.go new file mode 100644 index 00000000000..67ae1fda53f --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/artifact.go @@ -0,0 +1,67 @@ +package artifactory + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "strings" +) + +type FileInfo struct { + Uri string `json:"uri"` + DownloadUri string `json:"downloadUri"` + Repo string `json:"repo"` + Path string `json:"path"` + RemoteUrl string `json:"remoteUrl,omitempty"` + Created string `json:"created"` + CreatedBy string `json:"createdBy"` + LastModified string `json:"lastModified"` + ModifiedBy string `json:"modifiedBy"` + MimeType string `json:"mimeType"` + Size string `json:"size"` + Checksums struct { + SHA1 string `json:"sha1"` + MD5 string `json:"md5"` + } `json:"checksums"` + OriginalChecksums struct { + SHA1 string `json:"sha1"` + MD5 string `json:"md5"` + } `json:"originalChecksums,omitempty"` +} + +func (c *ArtifactoryClient) DeployArtifact(repoKey string, filename string, path string, properties map[string]string) (CreatedStorageItem, error) { + var res CreatedStorageItem + var fileProps []string + var finalUrl string + finalUrl = "/" + repoKey + "/" + if &path != nil { + finalUrl = finalUrl + path + } + baseFile := filepath.Base(filename) + finalUrl = finalUrl + "/" + baseFile + if len(properties) > 0 { + finalUrl = finalUrl + ";" + for k, v := range properties { + fileProps = append(fileProps, k+"="+v) + } + finalUrl = finalUrl + strings.Join(fileProps, ";") + } + data, err := os.Open(filename) + if err != nil { + return res, err + } + defer data.Close() + b, _ := ioutil.ReadAll(data) + d, err := c.Put(finalUrl, string(b), make(map[string]string)) + if err != nil { + return res, err + } else { + e := json.Unmarshal(d, &res) + if e != nil { + return res, e + } else { + return res, nil + } + } +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/bintray.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/bintray.go new file mode 100644 index 00000000000..a60755a8b0a --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/bintray.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/build.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/build.go new file mode 100644 index 00000000000..a60755a8b0a --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/build.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/client.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/client.go new file mode 100644 index 00000000000..e744c01f785 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/client.go @@ -0,0 +1,80 @@ +package artifactory + +import ( + "crypto/tls" + "fmt" + "net/http" + "os" +) + +type ClientConfig struct { + BaseURL string + Username string + Password string + Token string + AuthMethod string + VerifySSL bool + Client *http.Client + Transport *http.Transport +} + +type ArtifactoryClient struct { + Client *http.Client + Config *ClientConfig + Transport *http.Transport +} + +func NewClient(config *ClientConfig) (c ArtifactoryClient) { + verifySSL := func() bool { + if config.VerifySSL != true { + return false + } else { + return true + } + } + if config.Transport == nil { + config.Transport = new(http.Transport) + } + config.Transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: verifySSL()} + if config.Client == nil { + config.Client = new(http.Client) + } + config.Client.Transport = config.Transport + return ArtifactoryClient{Client: config.Client, Config: config} +} + +func clientConfigFrom(from string) (c *ClientConfig) { + conf := ClientConfig{} + switch from { + case "environment": + if os.Getenv("ARTIFACTORY_URL") == "" { + fmt.Printf("You must set the environment variable ARTIFACTORY_URL") + os.Exit(1) + } else { + conf.BaseURL = os.Getenv("ARTIFACTORY_URL") + } + if os.Getenv("ARTIFACTORY_TOKEN") == "" { + if os.Getenv("ARTIFACTORY_USERNAME") == "" || os.Getenv("ARTIFACTORY_PASSWORD") == "" { + fmt.Printf("You must set the environment variables ARTIFACTORY_USERNAME/ARTIFACTORY_PASSWORD\n") + os.Exit(1) + } else { + conf.AuthMethod = "basic" + } + } else { + conf.AuthMethod = "token" + } + } + if conf.AuthMethod == "token" { + conf.Token = os.Getenv("ARTIFACTORY_TOKEN") + } else { + conf.Username = os.Getenv("ARTIFACTORY_USERNAME") + conf.Password = os.Getenv("ARTIFACTORY_PASSWORD") + } + return &conf +} + +func NewClientFromEnv() (c ArtifactoryClient) { + config := clientConfigFrom("environment") + client := NewClient(config) + return client +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/compliance.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/compliance.go new file mode 100644 index 00000000000..a60755a8b0a --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/compliance.go @@ -0,0 +1 @@ +package artifactory diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/errors.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/errors.go new file mode 100644 index 00000000000..70551aad9aa --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/errors.go @@ -0,0 +1,10 @@ +package artifactory + +type ErrorsJson struct { + Errors []ErrorJson `json:"errors"` +} + +type ErrorJson struct { + Status string `json:"status"` + Message string `json:"message"` +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/groups.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/groups.go new file mode 100644 index 00000000000..a60c11229b9 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/groups.go @@ -0,0 +1,61 @@ +package artifactory + +import ( + "encoding/json" +) + +type Group struct { + Name string `json:"name"` + Uri string `json:"uri"` +} + +type GroupDetails struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + AutoJoin bool `json:"autoJoin,omitempty"` + Realm string `json:"realm,omitempty"` + RealmAttributes string `json:"realmAttributes,omitempty"` +} + +func (c *ArtifactoryClient) GetGroups() ([]Group, error) { + var res []Group + d, e := c.Get("/api/security/groups", make(map[string]string)) + if e != nil { + return res, e + } else { + err := json.Unmarshal(d, &res) + if err != nil { + return res, err + } else { + return res, e + } + } +} + +func (c *ArtifactoryClient) GetGroupDetails(u string) (GroupDetails, error) { + var res GroupDetails + d, e := c.Get("/api/security/groups/"+u, make(map[string]string)) + if e != nil { + return res, e + } else { + err := json.Unmarshal(d, &res) + if err != nil { + return res, err + } else { + return res, e + } + } +} + +func (c *ArtifactoryClient) CreateGroup(gname string, g GroupDetails) error { + j, jerr := json.Marshal(g) + if jerr != nil { + return jerr + } + o := make(map[string]string) + _, err := c.Put("/api/security/groups/"+gname, string(j), o) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/http.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/http.go new file mode 100644 index 00000000000..95a38c0b7b7 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/http.go @@ -0,0 +1,103 @@ +package artifactory + +import ( + "bytes" + "crypto/sha1" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" +) + +func (c *ArtifactoryClient) Get(path string, options map[string]string) ([]byte, error) { + return c.makeRequest("GET", path, options, nil) +} + +func (c *ArtifactoryClient) Post(path string, data string, options map[string]string) ([]byte, error) { + body := strings.NewReader(data) + return c.makeRequest("POST", path, options, body) +} + +func (c *ArtifactoryClient) Put(path string, data string, options map[string]string) ([]byte, error) { + body := strings.NewReader(data) + return c.makeRequest("PUT", path, options, body) +} + +func (c *ArtifactoryClient) Delete(path string) error { + _, err := c.makeRequest("DELETE", path, make(map[string]string), nil) + if err != nil { + return err + } else { + return nil + } +} + +func (c *ArtifactoryClient) makeRequest(method string, path string, options map[string]string, body io.Reader) ([]byte, error) { + qs := url.Values{} + for q, p := range options { + qs.Add(q, p) + } + var base_req_path string + // swapped out legacy code below for simply trimming the trailing slash + //if c.Config.BaseURL[:len(c.Config.BaseURL)-1] == "/" { + // base_req_path = c.Config.BaseURL + path + //} else { + // base_req_path = c.Config.BaseURL + "/" + path + //} + base_req_path = strings.TrimSuffix(c.Config.BaseURL, "/") + path + u, err := url.Parse(base_req_path) + if err != nil { + var data bytes.Buffer + return data.Bytes(), err + } + if len(options) != 0 { + u.RawQuery = qs.Encode() + } + buf := new(bytes.Buffer) + if body != nil { + buf.ReadFrom(body) + } + req, _ := http.NewRequest(method, u.String(), bytes.NewReader(buf.Bytes())) + if body != nil { + h := sha1.New() + h.Write(buf.Bytes()) + chkSum := h.Sum(nil) + req.Header.Add("X-Checksum-Sha1", fmt.Sprintf("%x", chkSum)) + } + req.Header.Add("user-agent", "artifactory-go."+VERSION) + req.Header.Add("X-Result-Detail", "info, properties") + req.Header.Add("Accept", "application/json") + if c.Config.AuthMethod == "basic" { + req.SetBasicAuth(c.Config.Username, c.Config.Password) + } else { + req.Header.Add("X-JFrog-Art-Api", c.Config.Token) + } + r, err := c.Client.Do(req) + if err != nil { + var data bytes.Buffer + return data.Bytes(), err + } else { + defer r.Body.Close() + data, err := ioutil.ReadAll(r.Body) + if r.StatusCode < 200 || r.StatusCode > 299 { + var ej ErrorsJson + uerr := json.Unmarshal(data, &ej) + if uerr != nil { + emsg := fmt.Sprintf("Non-2xx code returned: %d. Message follows:\n%s", r.StatusCode, string(data)) + return data, errors.New(emsg) + } else { + var emsgs []string + for _, i := range ej.Errors { + emsgs = append(emsgs, i.Message) + } + return data, errors.New(strings.Join(emsgs, "\n")) + } + } else { + return data, err + } + } +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/license.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/license.go new file mode 100644 index 00000000000..7b9cdb588d5 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/license.go @@ -0,0 +1,23 @@ +package artifactory + +import ( + "encoding/json" +) + +type LicenseInformation struct { + LicenseType string `json:"type"` + ValidThrough string `json:"validThrough"` + LicensedTo string `json:"licensedTo"` +} + +func (c *ArtifactoryClient) GetLicenseInformation() (LicenseInformation, error) { + o := make(map[string]string, 0) + var l LicenseInformation + d, e := c.Get("/api/system/license", o) + if e != nil { + return l, e + } else { + err := json.Unmarshal(d, &l) + return l, err + } +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/mimetypes.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/mimetypes.go new file mode 100644 index 00000000000..2ad1f504237 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/mimetypes.go @@ -0,0 +1,11 @@ +package artifactory + +const LOCAL_REPO_MIMETYPE string = "application/vnd.org.jfrog.artifactory.repositories.LocalRepositoryConfiguration+json" +const REMOTE_REPO_MIMETYPE string = "application/vnd.org.jfrog.artifactory.repositories.RemoteRepositoryConfiguration+json" +const VIRTUAL_REPO_MIMETYPE string = "application/vnd.org.jfrog.artifactory.repositories.VirtualRepositoryConfiguration+json" +const USER_MIMETYPE string = "application/vnd.org.jfrog.artifactory.security.User+json" +const GROUP_MIMETYPE string = "application/vnd.org.jfrog.artifactory.security.Group+json" +const PERMISSION_TARGET_MIMETYPE string = "application/vnd.org.jfrog.artifactory.security.PermissionTarget+json" +const IMPORT_SETTINGS_MIMETYPE string = "application/vnd.org.jfrog.artifactory.system.ImportSettings+json" +const EXPORT_SETTIGNS_MIMETYPE string = "application/vnd.org.jfrog.artifactory.system.ExportSettings+json" +const SYSTEM_VERSION_MIMETYPE string = "application/vnd.org.jfrog.artifactory.system.Version+json" diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/permissions_targets.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/permissions_targets.go new file mode 100644 index 00000000000..71ff71edc75 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/permissions_targets.go @@ -0,0 +1,53 @@ +package artifactory + +import ( + "encoding/json" +) + +type PermissionTarget struct { + Name string `json:"name"` + Uri string `json:"uri"` +} + +type PermissionTargetDetails struct { + Name string `json:"name,omitempty"` + IncludesPattern string `json:"includesPattern,omitempty"` + ExcludesPattern string `json:"excludesPattern,omitempty"` + Repositories []string `json:"repositories,omitempty"` + Principals Principals `json:"principals,omitempty"` +} + +type Principals struct { + Users map[string][]string `json:"users"` + Groups map[string][]string `json:"groups"` +} + +func (c *ArtifactoryClient) GetPermissionTargets() ([]PermissionTarget, error) { + var res []PermissionTarget + d, e := c.Get("/api/security/permissions", make(map[string]string)) + if e != nil { + return res, e + } else { + err := json.Unmarshal(d, &res) + if err != nil { + return res, err + } else { + return res, e + } + } +} + +func (c *ArtifactoryClient) GetPermissionTargetDetails(u string) (PermissionTargetDetails, error) { + var res PermissionTargetDetails + d, e := c.Get("/api/security/permissions/"+u, make(map[string]string)) + if e != nil { + return res, e + } else { + err := json.Unmarshal(d, &res) + if err != nil { + return res, err + } else { + return res, e + } + } +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/repos.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/repos.go new file mode 100644 index 00000000000..5a3ed8cd612 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/repos.go @@ -0,0 +1,151 @@ +package artifactory + +import ( + "encoding/json" + "fmt" +) + +type Repo struct { + Key string `json:"key"` + Rtype string `json:"type"` + Description string `json:"description,omitempty"` + Url string `json:"url,omitempty"` +} + +type RepoConfig interface { + MimeType() string +} + +type GenericRepoConfig struct { + Key string `json:"key,omitempty"` + RClass string `json:"rclass"` + PackageType string `json:"packageType,omitempty"` + Description string `json:"description,omitempty"` + Notes string `json:"notes,omitempty"` + IncludesPattern string `json:"includesPattern,omitempty"` + ExcludesPattern string `json:"excludesPattern,omitempty"` + HandleReleases bool `json:"handleReleases,omitempty"` + HandleSnapshots bool `json:"handleSnapshots,omitempty"` + MaxUniqueSnapshots int `json:"maxUniqueSnapshots,omitempty"` + SuppressPomConsistencyChecks bool `json:"supressPomConsistencyChecks,omitempty"` + BlackedOut bool `json:"blackedOut,omitempty"` + PropertySets []string `json:"propertySets,omitempty"` +} + +func (r GenericRepoConfig) MimeType() string { + return "" +} + +type LocalRepoConfig struct { + GenericRepoConfig + + LayoutRef string `json:"repoLayoutRef,omitempty"` + DebianTrivialLayout bool `json:"debianTrivialLayout,omitempty"` + ChecksumPolicyType string `json:"checksumPolicyType,omitempty"` + SnapshotVersionBehavior string `json:"snapshotVersionBehavior,omitempty"` + ArchiveBrowsingEnabled bool `json:"archiveBrowsingEnabled,omitempty"` + CalculateYumMetadata bool `json:"calculateYumMetadata,omitempty"` + YumRootDepth int `json:"yumRootDepth,omitempty"` +} + +func (r LocalRepoConfig) MimeType() string { + return LOCAL_REPO_MIMETYPE +} + +type RemoteRepoConfig struct { + GenericRepoConfig + + Url string `json:"url"` + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Proxy string `json:"proxy,omitempty"` + RemoteRepoChecksumPolicyType string `json:"remoteRepoChecksumPolicyType,omitempty"` + HardFail bool `json:"hardFail,omitempty"` + Offline bool `json:"offline,omitempty"` + StoreArtifactsLocally bool `json:"storeArtifactsLocally,omitempty"` + SocketTimeoutMillis int `json:"socketTimeoutMillis,omitempty"` + LocalAddress string `json:"localAddress,omitempty"` + RetrivialCachePeriodSecs int `json:"retrievalCachePeriodSecs,omitempty"` + FailedRetrievalCachePeriodSecs int `json:"failedRetrievalCachePeriodSecs,omitempty"` + MissedRetrievalCachePeriodSecs int `json:"missedRetrievalCachePeriodSecs,omitempty"` + UnusedArtifactsCleanupEnabled bool `json:"unusedArtifactCleanupEnabled,omitempty"` + UnusedArtifactsCleanupPeriodHours int `json:"unusedArtifactCleanupPeriodHours,omitempty"` + FetchJarsEagerly bool `json:"fetchJarsEagerly,omitempty"` + ShareConfiguration bool `json:"shareConfiguration,omitempty"` + SynchronizeProperties bool `json:"synchronizeProperties,omitempty"` + AllowAnyHostAuth bool `json:"allowAnyHostAuth,omitempty"` + EnableCookieManagement bool `json:"enableCookieManagement,omitempty"` + BowerRegistryUrl string `json:"bowerRegistryUrl,omitempty"` + VcsType string `json:"vcsType,omitempty"` + VcsGitProvider string `json:"vcsGitProvider,omitempty"` + VcsGitDownloader string `json:"vcsGitDownloader,omitempty"` +} + +func (r RemoteRepoConfig) MimeType() string { + return REMOTE_REPO_MIMETYPE +} + +type VirtualRepoConfig struct { + GenericRepoConfig + + Repositories []string `json:"repositories"` + DebianTrivialLayout bool `json:"debianTrivialLayout,omitempty"` + ArtifactoryRequestsCanRetrieveRemoteArtifacts bool `json:artifactoryRequestsCanRetrieveRemoteArtifacts,omitempty"` + KeyPair string `json:"keyPair,omitempty"` + PomRepositoryReferenceCleanupPolicy string `json:"pomRepositoryReferenceCleanupPolicy,omitempty"` +} + +func (r VirtualRepoConfig) MimeType() string { + return VIRTUAL_REPO_MIMETYPE +} + +func (client *ArtifactoryClient) GetRepos(rtype string) ([]Repo, error) { + o := make(map[string]string, 0) + if rtype != "all" { + o["type"] = rtype + } + var dat []Repo + d, e := client.Get("/api/repositories", o) + if e != nil { + return dat, e + } else { + err := json.Unmarshal(d, &dat) + if err != nil { + return dat, err + } else { + return dat, e + } + } +} + +func (client *ArtifactoryClient) GetRepo(key string) (RepoConfig, error) { + o := make(map[string]string, 0) + dat := new(GenericRepoConfig) + d, e := client.Get("/api/repositories/"+key, o) + if e != nil { + return *dat, e + } else { + err := json.Unmarshal(d, &dat) + if err != nil { + return *dat, err + } else { + switch dat.RClass { + case "local": + var cdat LocalRepoConfig + _ = json.Unmarshal(d, &cdat) + return cdat, nil + case "remote": + var cdat RemoteRepoConfig + _ = json.Unmarshal(d, &cdat) + return cdat, nil + case "virtual": + var cdat VirtualRepoConfig + _ = json.Unmarshal(d, &cdat) + return cdat, nil + default: + fmt.Printf("fallthrough to default\n") + return dat, nil + } + } + } +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/responses.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/responses.go new file mode 100644 index 00000000000..20a41110716 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/responses.go @@ -0,0 +1,9 @@ +package artifactory + +type GavcSearchResults struct { + Results []FileInfo `json:"results"` +} + +type Uri struct { + Uri string `json:"uri,omitempty"` +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/search.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/search.go new file mode 100644 index 00000000000..8c986d78f0e --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/search.go @@ -0,0 +1,47 @@ +package artifactory + +import ( + "encoding/json" + "strings" +) + +type Gavc struct { + GroupID string + ArtifactID string + Version string + Classifier string + Repos []string +} + +func (c *ArtifactoryClient) GAVCSearch(coords *Gavc) (files []FileInfo, e error) { + url := "/api/search/gavc" + params := make(map[string]string) + if &coords.GroupID != nil { + params["g"] = coords.GroupID + } + if &coords.ArtifactID != nil { + params["a"] = coords.ArtifactID + } + if &coords.Version != nil { + params["v"] = coords.Version + } + if &coords.Classifier != nil { + params["c"] = coords.Classifier + } + if &coords.Repos != nil { + params["repos"] = strings.Join(coords.Repos, ",") + } + d, err := c.Get(url, params) + if err != nil { + return files, err + } else { + var dat GavcSearchResults + err := json.Unmarshal(d, &dat) + if err != nil { + return files, err + } else { + files = dat.Results + return files, nil + } + } +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/security.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/security.go new file mode 100644 index 00000000000..f741309612d --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/security.go @@ -0,0 +1,6 @@ +package artifactory + +func (c *ArtifactoryClient) GetSystemSecurityConfiguration() (s string, e error) { + d, e := c.Get("/api/system/security", make(map[string]string)) + return string(d), e +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/storage.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/storage.go new file mode 100644 index 00000000000..b7967e11f25 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/storage.go @@ -0,0 +1,18 @@ +package artifactory + +type CreatedStorageItem struct { + URI string `json:"uri"` + DownloadURI string `json:"downloadUri"` + Repo string `json:"repo"` + Created string `json:"created"` + CreatedBy string `json:"createdBy"` + Size string `json:"size"` + MimeType string `json:"mimeType"` + Checksums ArtifactChecksums `json:"checksums"` + OriginalChecksums ArtifactChecksums `json:"originalChecksums"` +} + +type ArtifactChecksums struct { + MD5 string `json:"md5"` + SHA1 string `json:"sha1"` +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/system.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/system.go new file mode 100644 index 00000000000..d33c29c65f4 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/system.go @@ -0,0 +1,6 @@ +package artifactory + +func (c *ArtifactoryClient) GetGeneralConfiguration() (s string, e error) { + d, e := c.Get("/api/system/configuration", make(map[string]string)) + return string(d), e +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/users.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/users.go new file mode 100644 index 00000000000..9caf6adf743 --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/users.go @@ -0,0 +1,83 @@ +package artifactory + +import ( + "encoding/json" + "errors" +) + +type User struct { + Name string `json:"name"` + Uri string `json:"uri"` +} + +type UserDetails struct { + Name string `json:"name,omitempty"` + Email string `json:"email"` + Password string `json:"password"` + Admin bool `json:"admin,omitempty"` + ProfileUpdatable bool `json:"profileUpdatable,omitempty"` + InternalPasswordDisabled bool `json:"internalPasswordDisabled,omitempty"` + LastLoggedIn string `json:"lastLoggedIn,omitempty"` + Realm string `json:"realm,omitempty"` + Groups []string `json:"groups,omitempty"` +} + +func (c *ArtifactoryClient) GetUsers() ([]User, error) { + var res []User + d, e := c.Get("/api/security/users", make(map[string]string)) + if e != nil { + return res, e + } else { + err := json.Unmarshal(d, &res) + if err != nil { + return res, err + } else { + return res, e + } + } +} + +func (c *ArtifactoryClient) GetUserDetails(u string) (UserDetails, error) { + var res UserDetails + d, e := c.Get("/api/security/users/"+u, make(map[string]string)) + if e != nil { + return res, e + } else { + err := json.Unmarshal(d, &res) + if err != nil { + return res, err + } else { + return res, e + } + } +} + +func (c *ArtifactoryClient) CreateUser(uname string, u UserDetails) error { + if &u.Email == nil || &u.Password == nil { + return errors.New("Email and password are required to create users") + } + j, jerr := json.Marshal(u) + if jerr != nil { + return jerr + } + o := make(map[string]string) + _, err := c.Put("/api/security/users/"+uname, string(j), o) + if err != nil { + return err + } + return nil +} + +func (c *ArtifactoryClient) DeleteUser(uname string) error { + err := c.Delete("/api/security/users/" + uname) + if err != nil { + return err + } else { + return nil + } +} + +func (c *ArtifactoryClient) GetUserEncryptedPassword() (s string, err error) { + d, err := c.Get("/api/security/encryptedPassword", make(map[string]string)) + return string(d), err +} diff --git a/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/version.go b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/version.go new file mode 100644 index 00000000000..bca14077e1e --- /dev/null +++ b/vendor/github.com/lusis/go-artifactory/src/artifactory.v401/version.go @@ -0,0 +1,3 @@ +package artifactory + +const VERSION = "4.0.1" diff --git a/vendor/github.com/mitchellh/go-testing-interface/LICENSE b/vendor/github.com/mitchellh/go-testing-interface/LICENSE new file mode 100644 index 00000000000..a3866a291fd --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-testing-interface/README.md b/vendor/github.com/mitchellh/go-testing-interface/README.md new file mode 100644 index 00000000000..26781bbae88 --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/README.md @@ -0,0 +1,52 @@ +# go-testing-interface + +go-testing-interface is a Go library that exports an interface that +`*testing.T` implements as well as a runtime version you can use in its +place. + +The purpose of this library is so that you can export test helpers as a +public API without depending on the "testing" package, since you can't +create a `*testing.T` struct manually. This lets you, for example, use the +public testing APIs to generate mock data at runtime, rather than just at +test time. + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/go-testing-interface). + +Given a test helper written using `go-testing-interface` like this: + + import "github.com/mitchellh/go-testing-interface" + + func TestHelper(t testing.T) { + t.Fatal("I failed") + } + +You can call the test helper in a real test easily: + + import "testing" + + func TestThing(t *testing.T) { + TestHelper(t) + } + +You can also call the test helper at runtime if needed: + + import "github.com/mitchellh/go-testing-interface" + + func main() { + TestHelper(&testing.RuntimeT{}) + } + +## Why?! + +**Why would I call a test helper that takes a *testing.T at runtime?** + +You probably shouldn't. The only use case I've seen (and I've had) for this +is to implement a "dev mode" for a service where the test helpers are used +to populate mock data, create a mock DB, perhaps run service dependencies +in-memory, etc. + +Outside of a "dev mode", I've never seen a use case for this and I think +there shouldn't be one since the point of the `testing.T` interface is that +you can fail immediately. diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing.go b/vendor/github.com/mitchellh/go-testing-interface/testing.go new file mode 100644 index 00000000000..204afb42005 --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/testing.go @@ -0,0 +1,84 @@ +// +build !go1.9 + +package testing + +import ( + "fmt" + "log" +) + +// T is the interface that mimics the standard library *testing.T. +// +// In unit tests you can just pass a *testing.T struct. At runtime, outside +// of tests, you can pass in a RuntimeT struct from this package. +type T interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool +} + +// RuntimeT implements T and can be instantiated and run at runtime to +// mimic *testing.T behavior. Unlike *testing.T, this will simply panic +// for calls to Fatal. For calls to Error, you'll have to check the errors +// list to determine whether to exit yourself. Name and Skip methods are +// unimplemented noops. +type RuntimeT struct { + failed bool +} + +func (t *RuntimeT) Error(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.Fail() +} + +func (t *RuntimeT) Errorf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) + t.Fail() +} + +func (t *RuntimeT) Fatal(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.FailNow() +} + +func (t *RuntimeT) Fatalf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) + t.FailNow() +} + +func (t *RuntimeT) Fail() { + t.failed = true +} + +func (t *RuntimeT) FailNow() { + panic("testing.T failed, see logs for output (if any)") +} + +func (t *RuntimeT) Failed() bool { + return t.failed +} + +func (t *RuntimeT) Log(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) +} + +func (t *RuntimeT) Logf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) +} + +func (t *RuntimeT) Name() string { return "" } +func (t *RuntimeT) Skip(args ...interface{}) {} +func (t *RuntimeT) SkipNow() {} +func (t *RuntimeT) Skipf(format string, args ...interface{}) {} +func (t *RuntimeT) Skipped() bool { return false } diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go new file mode 100644 index 00000000000..31b42cadf8d --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go @@ -0,0 +1,108 @@ +// +build go1.9 + +// NOTE: This is a temporary copy of testing.go for Go 1.9 with the addition +// of "Helper" to the T interface. Go 1.9 at the time of typing is in RC +// and is set for release shortly. We'll support this on master as the default +// as soon as 1.9 is released. + +package testing + +import ( + "fmt" + "log" +) + +// T is the interface that mimics the standard library *testing.T. +// +// In unit tests you can just pass a *testing.T struct. At runtime, outside +// of tests, you can pass in a RuntimeT struct from this package. +type T interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool + Helper() +} + +// RuntimeT implements T and can be instantiated and run at runtime to +// mimic *testing.T behavior. Unlike *testing.T, this will simply panic +// for calls to Fatal. For calls to Error, you'll have to check the errors +// list to determine whether to exit yourself. +type RuntimeT struct { + skipped bool + failed bool +} + +func (t *RuntimeT) Error(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.Fail() +} + +func (t *RuntimeT) Errorf(format string, args ...interface{}) { + log.Printf(format, args...) + t.Fail() +} + +func (t *RuntimeT) Fail() { + t.failed = true +} + +func (t *RuntimeT) FailNow() { + panic("testing.T failed, see logs for output (if any)") +} + +func (t *RuntimeT) Failed() bool { + return t.failed +} + +func (t *RuntimeT) Fatal(args ...interface{}) { + log.Print(args...) + t.FailNow() +} + +func (t *RuntimeT) Fatalf(format string, args ...interface{}) { + log.Printf(format, args...) + t.FailNow() +} + +func (t *RuntimeT) Log(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) +} + +func (t *RuntimeT) Logf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) +} + +func (t *RuntimeT) Name() string { + return "" +} + +func (t *RuntimeT) Skip(args ...interface{}) { + log.Print(args...) + t.SkipNow() +} + +func (t *RuntimeT) SkipNow() { + t.skipped = true +} + +func (t *RuntimeT) Skipf(format string, args ...interface{}) { + log.Printf(format, args...) + t.SkipNow() +} + +func (t *RuntimeT) Skipped() bool { + return t.skipped +} + +func (t *RuntimeT) Helper() {} diff --git a/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md new file mode 100644 index 00000000000..22985159044 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-wordwrap/README.md b/vendor/github.com/mitchellh/go-wordwrap/README.md new file mode 100644 index 00000000000..60ae3117008 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/README.md @@ -0,0 +1,39 @@ +# go-wordwrap + +`go-wordwrap` (Golang package: `wordwrap`) is a package for Go that +automatically wraps words into multiple lines. The primary use case for this +is in formatting CLI output, but of course word wrapping is a generally useful +thing to do. + +## Installation and Usage + +Install using `go get github.com/mitchellh/go-wordwrap`. + +Full documentation is available at +http://godoc.org/github.com/mitchellh/go-wordwrap + +Below is an example of its usage ignoring errors: + +```go +wrapped := wordwrap.WrapString("foo bar baz", 3) +fmt.Println(wrapped) +``` + +Would output: + +``` +foo +bar +baz +``` + +## Word Wrap Algorithm + +This library doesn't use any clever algorithm for word wrapping. The wrapping +is actually very naive: whenever there is whitespace or an explicit linebreak. +The goal of this library is for word wrapping CLI output, so the input is +typically pretty well controlled human language. Because of this, the naive +approach typically works just fine. + +In the future, we'd like to make the algorithm more advanced. We would do +so without breaking the API. diff --git a/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go new file mode 100644 index 00000000000..ac67205bc2e --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go @@ -0,0 +1,73 @@ +package wordwrap + +import ( + "bytes" + "unicode" +) + +// WrapString wraps the given string within lim width in characters. +// +// Wrapping is currently naive and only happens at white-space. A future +// version of the library will implement smarter wrapping. This means that +// pathological cases can dramatically reach past the limit, such as a very +// long word. +func WrapString(s string, lim uint) string { + // Initialize a buffer with a slightly larger size to account for breaks + init := make([]byte, 0, len(s)) + buf := bytes.NewBuffer(init) + + var current uint + var wordBuf, spaceBuf bytes.Buffer + + for _, char := range s { + if char == '\n' { + if wordBuf.Len() == 0 { + if current+uint(spaceBuf.Len()) > lim { + current = 0 + } else { + current += uint(spaceBuf.Len()) + spaceBuf.WriteTo(buf) + } + spaceBuf.Reset() + } else { + current += uint(spaceBuf.Len() + wordBuf.Len()) + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + wordBuf.WriteTo(buf) + wordBuf.Reset() + } + buf.WriteRune(char) + current = 0 + } else if unicode.IsSpace(char) { + if spaceBuf.Len() == 0 || wordBuf.Len() > 0 { + current += uint(spaceBuf.Len() + wordBuf.Len()) + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + wordBuf.WriteTo(buf) + wordBuf.Reset() + } + + spaceBuf.WriteRune(char) + } else { + + wordBuf.WriteRune(char) + + if current+uint(spaceBuf.Len()+wordBuf.Len()) > lim && uint(wordBuf.Len()) < lim { + buf.WriteRune('\n') + current = 0 + spaceBuf.Reset() + } + } + } + + if wordBuf.Len() == 0 { + if current+uint(spaceBuf.Len()) <= lim { + spaceBuf.WriteTo(buf) + } + } else { + spaceBuf.WriteTo(buf) + wordBuf.WriteTo(buf) + } + + return buf.String() +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/LICENSE b/vendor/github.com/terraform-providers/terraform-provider-openstack/LICENSE new file mode 100644 index 00000000000..a612ad9813b --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/compute_instance_v2_networking.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/compute_instance_v2_networking.go new file mode 100644 index 00000000000..f7ae3ea54d1 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/compute_instance_v2_networking.go @@ -0,0 +1,488 @@ +// This set of code handles all functions required to configure networking +// on an openstack_compute_instance_v2 resource. +// +// This is a complicated task because it's not possible to obtain all +// information in a single API call. In fact, it even traverses multiple +// OpenStack services. +// +// The end result, from the user's point of view, is a structured set of +// understandable network information within the instance resource. +package openstack + +import ( + "fmt" + "log" + "os" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" + "github.com/hashicorp/terraform/helper/schema" +) + +// InstanceNIC is a structured representation of a Gophercloud servers.Server +// virtual NIC. +type InstanceNIC struct { + FixedIPv4 string + FixedIPv6 string + MAC string +} + +// InstanceAddresses is a collection of InstanceNICs, grouped by the +// network name. An instance/server could have multiple NICs on the same +// network. +type InstanceAddresses struct { + NetworkName string + InstanceNICs []InstanceNIC +} + +// InstanceNetwork represents a collection of network information that a +// Terraform instance needs to satisfy all network information requirements. +type InstanceNetwork struct { + UUID string + Name string + Port string + FixedIP string + AccessNetwork bool +} + +// getAllInstanceNetworks loops through the networks defined in the Terraform +// configuration and structures that information into something standard that +// can be consumed by both OpenStack and Terraform. +// +// This would be simple, except we have ensure both the network name and +// network ID have been determined. This isn't just for the convenience of a +// user specifying a human-readable network name, but the network information +// returned by an OpenStack instance only has the network name set! So if a +// user specified a network ID, there's no way to correlate it to the instance +// unless we know both the name and ID. +// +// Not only that, but we have to account for two OpenStack network services +// running: nova-network (legacy) and Neutron (current). +// +// In addition, if a port was specified, not all of the port information +// will be displayed, such as multiple fixed and floating IPs. This resource +// isn't currently configured for that type of flexibility. It's better to +// reference the actual port resource itself. +// +// So, let's begin the journey. +func getAllInstanceNetworks(d *schema.ResourceData, meta interface{}) ([]InstanceNetwork, error) { + var instanceNetworks []InstanceNetwork + + networks := d.Get("network").([]interface{}) + for _, v := range networks { + network := v.(map[string]interface{}) + networkID := network["uuid"].(string) + networkName := network["name"].(string) + portID := network["port"].(string) + + if networkID == "" && networkName == "" && portID == "" { + return nil, fmt.Errorf( + "At least one of network.uuid, network.name, or network.port must be set.") + } + + // If a user specified both an ID and name, that makes things easy + // since both name and ID are already satisfied. No need to query + // further. + if networkID != "" && networkName != "" { + v := InstanceNetwork{ + UUID: networkID, + Name: networkName, + Port: portID, + FixedIP: network["fixed_ip_v4"].(string), + AccessNetwork: network["access_network"].(bool), + } + instanceNetworks = append(instanceNetworks, v) + continue + } + + // But if at least one of name or ID was missing, we have to query + // for that other piece. + // + // Priority is given to a port since a network ID or name usually isn't + // specified when using a port. + // + // Next priority is given to the network ID since it's guaranteed to be + // an exact match. + queryType := "name" + queryTerm := networkName + if networkID != "" { + queryType = "id" + queryTerm = networkID + } + if portID != "" { + queryType = "port" + queryTerm = portID + } + + networkInfo, err := getInstanceNetworkInfo(d, meta, queryType, queryTerm) + if err != nil { + return nil, err + } + + v := InstanceNetwork{ + UUID: networkInfo["uuid"].(string), + Name: networkInfo["name"].(string), + Port: portID, + FixedIP: network["fixed_ip_v4"].(string), + AccessNetwork: network["access_network"].(bool), + } + + instanceNetworks = append(instanceNetworks, v) + } + + log.Printf("[DEBUG] getAllInstanceNetworks: %#v", instanceNetworks) + return instanceNetworks, nil +} + +// getInstanceNetworkInfo will query for network information in order to make +// an accurate determination of a network's name and a network's ID. +// +// We will try to first query the Neutron network service and fall back to the +// legacy nova-network service if that fails. +// +// If OS_NOVA_NETWORK is set, query nova-network even if Neutron is available. +// This is to be able to explicitly test the nova-network API. +func getInstanceNetworkInfo( + d *schema.ResourceData, meta interface{}, queryType, queryTerm string) (map[string]interface{}, error) { + + config := meta.(*Config) + + if _, ok := os.LookupEnv("OS_NOVA_NETWORK"); !ok { + networkClient, err := config.networkingV2Client(GetRegion(d, config)) + if err == nil { + networkInfo, err := getInstanceNetworkInfoNeutron(networkClient, queryType, queryTerm) + if err != nil { + return nil, fmt.Errorf("Error trying to get network information from the Network API: %s", err) + } + + return networkInfo, nil + } + } + + log.Printf("[DEBUG] Unable to obtain a network client") + + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return nil, fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + networkInfo, err := getInstanceNetworkInfoNovaNet(computeClient, queryType, queryTerm) + if err != nil { + return nil, fmt.Errorf("Error trying to get network information from the Nova API: %s", err) + } + + return networkInfo, nil +} + +// getInstanceNetworkInfoNovaNet will query the os-tenant-networks API for +// the network information. +func getInstanceNetworkInfoNovaNet( + client *gophercloud.ServiceClient, queryType, queryTerm string) (map[string]interface{}, error) { + + // If somehow a port ended up here, we should just error out. + if queryType == "port" { + return nil, fmt.Errorf( + "Unable to query a port (%s) using the Nova API", queryTerm) + } + + allPages, err := tenantnetworks.List(client).AllPages() + if err != nil { + return nil, fmt.Errorf( + "An error occured while querying the Nova API for network information: %s", err) + } + + networkList, err := tenantnetworks.ExtractNetworks(allPages) + if err != nil { + return nil, fmt.Errorf( + "An error occured while querying the Nova API for network information: %s", err) + } + + var networkFound bool + var network tenantnetworks.Network + + for _, v := range networkList { + if queryType == "id" && v.ID == queryTerm { + networkFound = true + network = v + break + } + + if queryType == "name" && v.Name == queryTerm { + networkFound = true + network = v + break + } + } + + if networkFound { + v := map[string]interface{}{ + "uuid": network.ID, + "name": network.Name, + } + + log.Printf("[DEBUG] getInstanceNetworkInfoNovaNet: %#v", v) + return v, nil + } + + return nil, fmt.Errorf("Could not find any matching network for %s %s", queryType, queryTerm) +} + +// getInstanceNetworkInfoNeutron will query the neutron API for the network +// information. +func getInstanceNetworkInfoNeutron( + client *gophercloud.ServiceClient, queryType, queryTerm string) (map[string]interface{}, error) { + + // If a port was specified, use it to look up the network ID + // and then query the network as if a network ID was originally used. + if queryType == "port" { + listOpts := ports.ListOpts{ + ID: queryTerm, + } + allPages, err := ports.List(client, listOpts).AllPages() + if err != nil { + return nil, fmt.Errorf("Unable to retrieve networks from the Network API: %s", err) + } + + allPorts, err := ports.ExtractPorts(allPages) + if err != nil { + return nil, fmt.Errorf("Unable to retrieve networks from the Network API: %s", err) + } + + var port ports.Port + switch len(allPorts) { + case 0: + return nil, fmt.Errorf("Could not find any matching port for %s %s", queryType, queryTerm) + case 1: + port = allPorts[0] + default: + return nil, fmt.Errorf("More than one port found for %s %s", queryType, queryTerm) + } + + queryType = "id" + queryTerm = port.NetworkID + } + + listOpts := networks.ListOpts{ + Status: "ACTIVE", + } + + switch queryType { + case "name": + listOpts.Name = queryTerm + default: + listOpts.ID = queryTerm + } + + allPages, err := networks.List(client, listOpts).AllPages() + if err != nil { + return nil, fmt.Errorf("Unable to retrieve networks from the Network API: %s", err) + } + + allNetworks, err := networks.ExtractNetworks(allPages) + if err != nil { + return nil, fmt.Errorf("Unable to retrieve networks from the Network API: %s", err) + } + + var network networks.Network + switch len(allNetworks) { + case 0: + return nil, fmt.Errorf("Could not find any matching network for %s %s", queryType, queryTerm) + case 1: + network = allNetworks[0] + default: + return nil, fmt.Errorf("More than one network found for %s %s", queryType, queryTerm) + } + + v := map[string]interface{}{ + "uuid": network.ID, + "name": network.Name, + } + + log.Printf("[DEBUG] getInstanceNetworkInfoNeutron: %#v", v) + return v, nil +} + +// getInstanceAddresses parses a Gophercloud server.Server's Address field into +// a structured InstanceAddresses struct. +func getInstanceAddresses(addresses map[string]interface{}) []InstanceAddresses { + var allInstanceAddresses []InstanceAddresses + + for networkName, v := range addresses { + instanceAddresses := InstanceAddresses{ + NetworkName: networkName, + } + + for _, v := range v.([]interface{}) { + instanceNIC := InstanceNIC{} + var exists bool + + v := v.(map[string]interface{}) + if v, ok := v["OS-EXT-IPS-MAC:mac_addr"].(string); ok { + instanceNIC.MAC = v + } + + if v["OS-EXT-IPS:type"] == "fixed" { + switch v["version"].(float64) { + case 6: + instanceNIC.FixedIPv6 = fmt.Sprintf("[%s]", v["addr"].(string)) + default: + instanceNIC.FixedIPv4 = v["addr"].(string) + } + } + + // To associate IPv4 and IPv6 on the right NIC, + // key on the mac address and fill in the blanks. + for i, v := range instanceAddresses.InstanceNICs { + if v.MAC == instanceNIC.MAC { + exists = true + if instanceNIC.FixedIPv6 != "" { + instanceAddresses.InstanceNICs[i].FixedIPv6 = instanceNIC.FixedIPv6 + } + if instanceNIC.FixedIPv4 != "" { + instanceAddresses.InstanceNICs[i].FixedIPv4 = instanceNIC.FixedIPv4 + } + } + } + + if !exists { + instanceAddresses.InstanceNICs = append(instanceAddresses.InstanceNICs, instanceNIC) + } + } + + allInstanceAddresses = append(allInstanceAddresses, instanceAddresses) + } + + log.Printf("[DEBUG] Addresses: %#v", addresses) + log.Printf("[DEBUG] allInstanceAddresses: %#v", allInstanceAddresses) + + return allInstanceAddresses +} + +// expandInstanceNetworks takes network information found in []InstanceNetwork +// and builds a Gophercloud []servers.Network for use in creating an Instance. +func expandInstanceNetworks(allInstanceNetworks []InstanceNetwork) []servers.Network { + var networks []servers.Network + for _, v := range allInstanceNetworks { + n := servers.Network{ + UUID: v.UUID, + Port: v.Port, + FixedIP: v.FixedIP, + } + networks = append(networks, n) + } + + return networks +} + +// flattenInstanceNetworks collects instance network information from different +// sources and aggregates it all together into a map array. +func flattenInstanceNetworks( + d *schema.ResourceData, meta interface{}) ([]map[string]interface{}, error) { + + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return nil, fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + server, err := servers.Get(computeClient, d.Id()).Extract() + if err != nil { + return nil, CheckDeleted(d, err, "server") + } + + allInstanceAddresses := getInstanceAddresses(server.Addresses) + allInstanceNetworks, err := getAllInstanceNetworks(d, meta) + if err != nil { + return nil, err + } + + networks := []map[string]interface{}{} + + // If there were no instance networks returned, this means that there + // was not a network specified in the Terraform configuration. When this + // happens, the instance will be launched on a "default" network, if one + // is available. If there isn't, the instance will fail to launch, so + // this is a safe assumption at this point. + if len(allInstanceNetworks) == 0 { + for _, instanceAddresses := range allInstanceAddresses { + for _, instanceNIC := range instanceAddresses.InstanceNICs { + v := map[string]interface{}{ + "name": instanceAddresses.NetworkName, + "fixed_ip_v4": instanceNIC.FixedIPv4, + "fixed_ip_v6": instanceNIC.FixedIPv6, + "mac": instanceNIC.MAC, + } + networks = append(networks, v) + } + } + + log.Printf("[DEBUG] flattenInstanceNetworks: %#v", networks) + return networks, nil + } + + // Loop through all networks and addresses, merge relevant address details. + for _, instanceNetwork := range allInstanceNetworks { + for _, instanceAddresses := range allInstanceAddresses { + if instanceNetwork.Name == instanceAddresses.NetworkName { + // Only use one NIC since it's possible the user defined another NIC + // on this same network in another Terraform network block. + instanceNIC := instanceAddresses.InstanceNICs[0] + copy(instanceAddresses.InstanceNICs, instanceAddresses.InstanceNICs[1:]) + v := map[string]interface{}{ + "name": instanceAddresses.NetworkName, + "fixed_ip_v4": instanceNIC.FixedIPv4, + "fixed_ip_v6": instanceNIC.FixedIPv6, + "mac": instanceNIC.MAC, + "uuid": instanceNetwork.UUID, + "port": instanceNetwork.Port, + "access_network": instanceNetwork.AccessNetwork, + } + networks = append(networks, v) + } + } + } + + log.Printf("[DEBUG] flattenInstanceNetworks: %#v", networks) + return networks, nil +} + +// getInstanceAccessAddresses determines the best IP address to communicate +// with the instance. It does this by looping through all networks and looking +// for a valid IP address. Priority is given to a network that was flagged as +// an access_network. +func getInstanceAccessAddresses( + d *schema.ResourceData, networks []map[string]interface{}) (string, string) { + + var hostv4, hostv6 string + + // Loop through all networks + // If the network has a valid fixed v4 or fixed v6 address + // and hostv4 or hostv6 is not set, set hostv4/hostv6. + // If the network is an "access_network" overwrite hostv4/hostv6. + for _, n := range networks { + var accessNetwork bool + + if an, ok := n["access_network"].(bool); ok && an { + accessNetwork = true + } + + if fixedIPv4, ok := n["fixed_ip_v4"].(string); ok && fixedIPv4 != "" { + if hostv4 == "" || accessNetwork { + hostv4 = fixedIPv4 + } + } + + if fixedIPv6, ok := n["fixed_ip_v6"].(string); ok && fixedIPv6 != "" { + if hostv6 == "" || accessNetwork { + hostv6 = fixedIPv6 + } + } + } + + log.Printf("[DEBUG] OpenStack Instance Network Access Addresses: %s, %s", hostv4, hostv6) + + return hostv4, hostv6 +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/config.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/config.go new file mode 100644 index 00000000000..ffbe72c06b1 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/config.go @@ -0,0 +1,223 @@ +package openstack + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "log" + "net/http" + "os" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth" + "github.com/hashicorp/terraform/helper/pathorcontents" + "github.com/hashicorp/terraform/terraform" +) + +type Config struct { + CACertFile string + ClientCertFile string + ClientKeyFile string + DomainID string + DomainName string + EndpointType string + IdentityEndpoint string + Insecure bool + Password string + Region string + Swauth bool + TenantID string + TenantName string + Token string + Username string + UserID string + + OsClient *gophercloud.ProviderClient +} + +func (c *Config) LoadAndValidate() error { + validEndpoint := false + validEndpoints := []string{ + "internal", "internalURL", + "admin", "adminURL", + "public", "publicURL", + "", + } + + for _, endpoint := range validEndpoints { + if c.EndpointType == endpoint { + validEndpoint = true + } + } + + if !validEndpoint { + return fmt.Errorf("Invalid endpoint type provided") + } + + ao := gophercloud.AuthOptions{ + DomainID: c.DomainID, + DomainName: c.DomainName, + IdentityEndpoint: c.IdentityEndpoint, + Password: c.Password, + TenantID: c.TenantID, + TenantName: c.TenantName, + TokenID: c.Token, + Username: c.Username, + UserID: c.UserID, + } + + client, err := openstack.NewClient(ao.IdentityEndpoint) + if err != nil { + return err + } + + // Set UserAgent + client.UserAgent.Prepend(terraform.UserAgentString()) + + config := &tls.Config{} + if c.CACertFile != "" { + caCert, _, err := pathorcontents.Read(c.CACertFile) + if err != nil { + return fmt.Errorf("Error reading CA Cert: %s", err) + } + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM([]byte(caCert)) + config.RootCAs = caCertPool + } + + if c.Insecure { + config.InsecureSkipVerify = true + } + + if c.ClientCertFile != "" && c.ClientKeyFile != "" { + clientCert, _, err := pathorcontents.Read(c.ClientCertFile) + if err != nil { + return fmt.Errorf("Error reading Client Cert: %s", err) + } + clientKey, _, err := pathorcontents.Read(c.ClientKeyFile) + if err != nil { + return fmt.Errorf("Error reading Client Key: %s", err) + } + + cert, err := tls.X509KeyPair([]byte(clientCert), []byte(clientKey)) + if err != nil { + return err + } + + config.Certificates = []tls.Certificate{cert} + config.BuildNameToCertificate() + } + + // if OS_DEBUG is set, log the requests and responses + var osDebug bool + if os.Getenv("OS_DEBUG") != "" { + osDebug = true + } + + transport := &http.Transport{Proxy: http.ProxyFromEnvironment, TLSClientConfig: config} + client.HTTPClient = http.Client{ + Transport: &LogRoundTripper{ + Rt: transport, + OsDebug: osDebug, + }, + } + + // If using Swift Authentication, there's no need to validate authentication normally. + if !c.Swauth { + err = openstack.Authenticate(client, ao) + if err != nil { + return err + } + } + + c.OsClient = client + + return nil +} + +func (c *Config) determineRegion(region string) string { + // If a resource-level region was not specified, and a provider-level region was set, + // use the provider-level region. + if region == "" && c.Region != "" { + region = c.Region + } + + log.Printf("[DEBUG] OpenStack Region is: %s", region) + return region +} + +func (c *Config) blockStorageV1Client(region string) (*gophercloud.ServiceClient, error) { + return openstack.NewBlockStorageV1(c.OsClient, gophercloud.EndpointOpts{ + Region: c.determineRegion(region), + Availability: c.getEndpointType(), + }) +} + +func (c *Config) blockStorageV2Client(region string) (*gophercloud.ServiceClient, error) { + return openstack.NewBlockStorageV2(c.OsClient, gophercloud.EndpointOpts{ + Region: c.determineRegion(region), + Availability: c.getEndpointType(), + }) +} + +func (c *Config) computeV2Client(region string) (*gophercloud.ServiceClient, error) { + return openstack.NewComputeV2(c.OsClient, gophercloud.EndpointOpts{ + Region: c.determineRegion(region), + Availability: c.getEndpointType(), + }) +} + +func (c *Config) dnsV2Client(region string) (*gophercloud.ServiceClient, error) { + return openstack.NewDNSV2(c.OsClient, gophercloud.EndpointOpts{ + Region: c.determineRegion(region), + Availability: c.getEndpointType(), + }) +} + +func (c *Config) identityV3Client(region string) (*gophercloud.ServiceClient, error) { + return openstack.NewIdentityV3(c.OsClient, gophercloud.EndpointOpts{ + Region: c.determineRegion(region), + Availability: c.getEndpointType(), + }) +} + +func (c *Config) imageV2Client(region string) (*gophercloud.ServiceClient, error) { + return openstack.NewImageServiceV2(c.OsClient, gophercloud.EndpointOpts{ + Region: c.determineRegion(region), + Availability: c.getEndpointType(), + }) +} + +func (c *Config) networkingV2Client(region string) (*gophercloud.ServiceClient, error) { + return openstack.NewNetworkV2(c.OsClient, gophercloud.EndpointOpts{ + Region: c.determineRegion(region), + Availability: c.getEndpointType(), + }) +} + +func (c *Config) objectStorageV1Client(region string) (*gophercloud.ServiceClient, error) { + // If Swift Authentication is being used, return a swauth client. + if c.Swauth { + return swauth.NewObjectStorageV1(c.OsClient, swauth.AuthOpts{ + User: c.Username, + Key: c.Password, + }) + } + + return openstack.NewObjectStorageV1(c.OsClient, gophercloud.EndpointOpts{ + Region: c.determineRegion(region), + Availability: c.getEndpointType(), + }) +} + +func (c *Config) getEndpointType() gophercloud.Availability { + if c.EndpointType == "internal" || c.EndpointType == "internalURL" { + return gophercloud.AvailabilityInternal + } + if c.EndpointType == "admin" || c.EndpointType == "adminURL" { + return gophercloud.AvailabilityAdmin + } + return gophercloud.AvailabilityPublic +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_images_image_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_images_image_v2.go new file mode 100644 index 00000000000..ffc62eed542 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_images_image_v2.go @@ -0,0 +1,283 @@ +package openstack + +import ( + "fmt" + "log" + "sort" + + "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceImagesImageV2() *schema.Resource { + return &schema.Resource{ + Read: dataSourceImagesImageV2Read, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "visibility": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "owner": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "size_min": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "size_max": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "sort_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "name", + }, + + "sort_direction": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "asc", + ValidateFunc: dataSourceImagesImageV2SortDirection, + }, + + "tag": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "most_recent": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + + // Computed values + "container_format": { + Type: schema.TypeString, + Computed: true, + }, + + "disk_format": { + Type: schema.TypeString, + Computed: true, + }, + + "min_disk_gb": { + Type: schema.TypeInt, + Computed: true, + }, + + "min_ram_mb": { + Type: schema.TypeInt, + Computed: true, + }, + + "protected": { + Type: schema.TypeBool, + Computed: true, + }, + + "checksum": { + Type: schema.TypeString, + Computed: true, + }, + + "size_bytes": { + Type: schema.TypeInt, + Computed: true, + }, + + "metadata": { + Type: schema.TypeMap, + Computed: true, + }, + + "updated_at": { + Type: schema.TypeString, + Computed: true, + }, + + "file": { + Type: schema.TypeString, + Computed: true, + }, + + "schema": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +// dataSourceImagesImageV2Read performs the image lookup. +func dataSourceImagesImageV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + imageClient, err := config.imageV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack image client: %s", err) + } + + visibility := resourceImagesImageV2VisibilityFromString(d.Get("visibility").(string)) + + listOpts := images.ListOpts{ + Name: d.Get("name").(string), + Visibility: visibility, + Owner: d.Get("owner").(string), + Status: images.ImageStatusActive, + SizeMin: int64(d.Get("size_min").(int)), + SizeMax: int64(d.Get("size_max").(int)), + SortKey: d.Get("sort_key").(string), + SortDir: d.Get("sort_direction").(string), + Tag: d.Get("tag").(string), + } + + log.Printf("[DEBUG] List Options: %#v", listOpts) + + var image images.Image + allPages, err := images.List(imageClient, listOpts).AllPages() + if err != nil { + return fmt.Errorf("Unable to query images: %s", err) + } + + allImages, err := images.ExtractImages(allPages) + if err != nil { + return fmt.Errorf("Unable to retrieve images: %s", err) + } + + properties := d.Get("properties").(map[string]interface{}) + imageProperties := resourceImagesImageV2ExpandProperties(properties) + if len(allImages) > 1 && len(imageProperties) > 0 { + var filteredImages []images.Image + for _, image := range allImages { + if len(image.Properties) > 0 { + match := true + for searchKey, searchValue := range imageProperties { + imageValue, ok := image.Properties[searchKey] + if !ok { + match = false + break + } + + if searchValue != imageValue { + match = false + break + } + } + + if match { + filteredImages = append(filteredImages, image) + } + } + } + allImages = filteredImages + } + + if len(allImages) < 1 { + return fmt.Errorf("Your query returned no results. " + + "Please change your search criteria and try again.") + } + + if len(allImages) > 1 { + recent := d.Get("most_recent").(bool) + log.Printf("[DEBUG] Multiple results found and `most_recent` is set to: %t", recent) + if recent { + image = mostRecentImage(allImages) + } else { + log.Printf("[DEBUG] Multiple results found: %#v", allImages) + return fmt.Errorf("Your query returned more than one result. Please try a more " + + "specific search criteria, or set `most_recent` attribute to true.") + } + } else { + image = allImages[0] + } + + log.Printf("[DEBUG] Single Image found: %s", image.ID) + return dataSourceImagesImageV2Attributes(d, &image) +} + +// dataSourceImagesImageV2Attributes populates the fields of an Image resource. +func dataSourceImagesImageV2Attributes(d *schema.ResourceData, image *images.Image) error { + log.Printf("[DEBUG] openstack_images_image details: %#v", image) + + d.SetId(image.ID) + d.Set("name", image.Name) + d.Set("tags", image.Tags) + d.Set("container_format", image.ContainerFormat) + d.Set("disk_format", image.DiskFormat) + d.Set("min_disk_gb", image.MinDiskGigabytes) + d.Set("min_ram_mb", image.MinRAMMegabytes) + d.Set("owner", image.Owner) + d.Set("protected", image.Protected) + d.Set("visibility", image.Visibility) + d.Set("checksum", image.Checksum) + d.Set("size_bytes", image.SizeBytes) + d.Set("metadata", image.Metadata) + d.Set("created_at", image.CreatedAt) + d.Set("updated_at", image.UpdatedAt) + d.Set("file", image.File) + d.Set("schema", image.Schema) + + return nil +} + +type imageSort []images.Image + +func (a imageSort) Len() int { return len(a) } +func (a imageSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a imageSort) Less(i, j int) bool { + itime := a[i].CreatedAt + jtime := a[j].CreatedAt + return itime.Unix() < jtime.Unix() +} + +// Returns the most recent Image out of a slice of images. +func mostRecentImage(images []images.Image) images.Image { + sortedImages := images + sort.Sort(imageSort(sortedImages)) + return sortedImages[len(sortedImages)-1] +} + +func dataSourceImagesImageV2SortDirection(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "asc" && value != "desc" { + err := fmt.Errorf("%s must be either asc or desc", k) + errors = append(errors, err) + } + return +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_networking_network_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_networking_network_v2.go new file mode 100644 index 00000000000..6377057b755 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_networking_network_v2.go @@ -0,0 +1,125 @@ +package openstack + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" +) + +func dataSourceNetworkingNetworkV2() *schema.Resource { + return &schema.Resource{ + Read: dataSourceNetworkingNetworkV2Read, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "network_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "status": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "matching_subnet_cidr": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "OS_TENANT_ID", + "OS_PROJECT_ID", + }, ""), + Description: descriptions["tenant_id"], + }, + "admin_state_up": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "shared": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceNetworkingNetworkV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + + listOpts := networks.ListOpts{ + ID: d.Get("network_id").(string), + Name: d.Get("name").(string), + TenantID: d.Get("tenant_id").(string), + } + + if v, ok := d.GetOk("status"); ok { + listOpts.Status = v.(string) + } + + pages, err := networks.List(networkingClient, listOpts).AllPages() + allNetworks, err := networks.ExtractNetworks(pages) + if err != nil { + return fmt.Errorf("Unable to retrieve networks: %s", err) + } + + var refinedNetworks []networks.Network + if cidr := d.Get("matching_subnet_cidr").(string); cidr != "" { + for _, n := range allNetworks { + for _, s := range n.Subnets { + subnet, err := subnets.Get(networkingClient, s).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + continue + } + return fmt.Errorf("Unable to retrieve network subnet: %s", err) + } + if cidr == subnet.CIDR { + refinedNetworks = append(refinedNetworks, n) + } + } + } + } else { + refinedNetworks = allNetworks + } + + if len(refinedNetworks) < 1 { + return fmt.Errorf("Your query returned no results. " + + "Please change your search criteria and try again.") + } + + if len(refinedNetworks) > 1 { + return fmt.Errorf("Your query returned more than one result." + + " Please try a more specific search criteria") + } + + network := refinedNetworks[0] + + log.Printf("[DEBUG] Retrieved Network %s: %+v", network.ID, network) + d.SetId(network.ID) + + d.Set("name", network.Name) + d.Set("admin_state_up", strconv.FormatBool(network.AdminStateUp)) + d.Set("shared", strconv.FormatBool(network.Shared)) + d.Set("tenant_id", network.TenantID) + d.Set("region", GetRegion(d, config)) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_networking_secgroup_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_networking_secgroup_v2.go new file mode 100644 index 00000000000..6f16782765d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/data_source_openstack_networking_secgroup_v2.go @@ -0,0 +1,76 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceNetworkingSecGroupV2() *schema.Resource { + return &schema.Resource{ + Read: dataSourceNetworkingSecGroupV2Read, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "secgroup_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + }, + } +} + +func dataSourceNetworkingSecGroupV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + + listOpts := groups.ListOpts{ + ID: d.Get("secgroup_id").(string), + Name: d.Get("name").(string), + TenantID: d.Get("tenant_id").(string), + } + + pages, err := groups.List(networkingClient, listOpts).AllPages() + allSecGroups, err := groups.ExtractGroups(pages) + if err != nil { + return fmt.Errorf("Unable to retrieve security groups: %s", err) + } + + if len(allSecGroups) < 1 { + return fmt.Errorf("No Security Group found with name: %s", d.Get("name")) + } + + if len(allSecGroups) > 1 { + return fmt.Errorf("More than one Security Group found with name: %s", d.Get("name")) + } + + secGroup := allSecGroups[0] + + log.Printf("[DEBUG] Retrieved Security Group %s: %+v", secGroup.ID, secGroup) + d.SetId(secGroup.ID) + + d.Set("name", secGroup.Name) + d.Set("description", secGroup.Description) + d.Set("tenant_id", secGroup.TenantID) + d.Set("region", GetRegion(d, config)) + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/lb_v2_shared.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/lb_v2_shared.go new file mode 100644 index 00000000000..4769e768a4a --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/lb_v2_shared.go @@ -0,0 +1,243 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" +) + +func waitForLBV2Listener(networkingClient *gophercloud.ServiceClient, id string, target string, pending []string, timeout time.Duration) error { + log.Printf("[DEBUG] Waiting for listener %s to become %s.", id, target) + + stateConf := &resource.StateChangeConf{ + Target: []string{target}, + Pending: pending, + Refresh: resourceLBV2ListenerRefreshFunc(networkingClient, id), + Timeout: timeout, + Delay: 5 * time.Second, + MinTimeout: 1 * time.Second, + } + + _, err := stateConf.WaitForState() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + switch target { + case "DELETED": + return nil + default: + return fmt.Errorf("Error: listener %s not found: %s", id, err) + } + } + return fmt.Errorf("Error waiting for listener %s to become %s: %s", id, target, err) + } + + return nil +} + +func resourceLBV2ListenerRefreshFunc(networkingClient *gophercloud.ServiceClient, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + listener, err := listeners.Get(networkingClient, id).Extract() + if err != nil { + return nil, "", err + } + + // The listener resource has no Status attribute, so a successful Get is the best we can do + return listener, "ACTIVE", nil + } +} + +func waitForLBV2LoadBalancer(networkingClient *gophercloud.ServiceClient, id string, target string, pending []string, timeout time.Duration) error { + log.Printf("[DEBUG] Waiting for loadbalancer %s to become %s.", id, target) + + stateConf := &resource.StateChangeConf{ + Target: []string{target}, + Pending: pending, + Refresh: resourceLBV2LoadBalancerRefreshFunc(networkingClient, id), + Timeout: timeout, + Delay: 5 * time.Second, + MinTimeout: 1 * time.Second, + } + + _, err := stateConf.WaitForState() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + switch target { + case "DELETED": + return nil + default: + return fmt.Errorf("Error: loadbalancer %s not found: %s", id, err) + } + } + return fmt.Errorf("Error waiting for loadbalancer %s to become %s: %s", id, target, err) + } + + return nil +} + +func resourceLBV2LoadBalancerRefreshFunc(networkingClient *gophercloud.ServiceClient, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + lb, err := loadbalancers.Get(networkingClient, id).Extract() + if err != nil { + return nil, "", err + } + + return lb, lb.ProvisioningStatus, nil + } +} + +func waitForLBV2Member(networkingClient *gophercloud.ServiceClient, poolID, memberID string, target string, pending []string, timeout time.Duration) error { + log.Printf("[DEBUG] Waiting for member %s to become %s.", memberID, target) + + stateConf := &resource.StateChangeConf{ + Target: []string{target}, + Pending: pending, + Refresh: resourceLBV2MemberRefreshFunc(networkingClient, poolID, memberID), + Timeout: timeout, + Delay: 5 * time.Second, + MinTimeout: 1 * time.Second, + } + + _, err := stateConf.WaitForState() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + switch target { + case "DELETED": + return nil + default: + return fmt.Errorf("Error: member %s not found: %s", memberID, err) + } + } + return fmt.Errorf("Error waiting for member %s to become %s: %s", memberID, target, err) + } + + return nil +} + +func resourceLBV2MemberRefreshFunc(networkingClient *gophercloud.ServiceClient, poolID, memberID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + member, err := pools.GetMember(networkingClient, poolID, memberID).Extract() + if err != nil { + return nil, "", err + } + + // The member resource has no Status attribute, so a successful Get is the best we can do + return member, "ACTIVE", nil + } +} + +func waitForLBV2Monitor(networkingClient *gophercloud.ServiceClient, id string, target string, pending []string, timeout time.Duration) error { + log.Printf("[DEBUG] Waiting for monitor %s to become %s.", id, target) + + stateConf := &resource.StateChangeConf{ + Target: []string{target}, + Pending: pending, + Refresh: resourceLBV2MonitorRefreshFunc(networkingClient, id), + Timeout: timeout, + Delay: 5 * time.Second, + MinTimeout: 1 * time.Second, + } + + _, err := stateConf.WaitForState() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + switch target { + case "DELETED": + return nil + default: + return fmt.Errorf("Error: monitor %s not found: %s", id, err) + } + } + return fmt.Errorf("Error waiting for monitor %s to become %s: %s", id, target, err) + } + + return nil +} + +func resourceLBV2MonitorRefreshFunc(networkingClient *gophercloud.ServiceClient, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + monitor, err := monitors.Get(networkingClient, id).Extract() + if err != nil { + return nil, "", err + } + + // The monitor resource has no Status attribute, so a successful Get is the best we can do + return monitor, "ACTIVE", nil + } +} + +func waitForLBV2Pool(networkingClient *gophercloud.ServiceClient, id string, target string, pending []string, timeout time.Duration) error { + log.Printf("[DEBUG] Waiting for pool %s to become %s.", id, target) + + stateConf := &resource.StateChangeConf{ + Target: []string{target}, + Pending: pending, + Refresh: resourceLBV2PoolRefreshFunc(networkingClient, id), + Timeout: timeout, + Delay: 5 * time.Second, + MinTimeout: 1 * time.Second, + } + + _, err := stateConf.WaitForState() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + switch target { + case "DELETED": + return nil + default: + return fmt.Errorf("Error: pool %s not found: %s", id, err) + } + } + return fmt.Errorf("Error waiting for pool %s to become %s: %s", id, target, err) + } + + return nil +} + +func resourceLBV2PoolRefreshFunc(networkingClient *gophercloud.ServiceClient, poolID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + pool, err := pools.Get(networkingClient, poolID).Extract() + if err != nil { + return nil, "", err + } + + // The pool resource has no Status attribute, so a successful Get is the best we can do + return pool, "ACTIVE", nil + } +} + +func waitForLBV2viaPool(networkingClient *gophercloud.ServiceClient, id string, target string, timeout time.Duration) error { + pool, err := pools.Get(networkingClient, id).Extract() + if err != nil { + return err + } + + if pool.Loadbalancers != nil { + // each pool has an LB in Octavia lbaasv2 API + lbID := pool.Loadbalancers[0].ID + return waitForLBV2LoadBalancer(networkingClient, lbID, target, nil, timeout) + } + + if pool.Listeners != nil { + // each pool has a listener in Neutron lbaasv2 API + listenerID := pool.Listeners[0].ID + listener, err := listeners.Get(networkingClient, listenerID).Extract() + if err != nil { + return err + } + if listener.Loadbalancers != nil { + lbID := listener.Loadbalancers[0].ID + return waitForLBV2LoadBalancer(networkingClient, lbID, target, nil, timeout) + } + } + + // got a pool but no LB - this is wrong + return fmt.Errorf("No Load Balancer on pool %s", id) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/provider.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/provider.go new file mode 100644 index 00000000000..1b50d24a96c --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/provider.go @@ -0,0 +1,261 @@ +package openstack + +import ( + "github.com/hashicorp/terraform/helper/mutexkv" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +// This is a global MutexKV for use within this plugin. +var osMutexKV = mutexkv.NewMutexKV() + +// Provider returns a schema.Provider for OpenStack. +func Provider() terraform.ResourceProvider { + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "auth_url": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("OS_AUTH_URL", nil), + Description: descriptions["auth_url"], + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: descriptions["region"], + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + }, + + "user_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_USERNAME", ""), + Description: descriptions["user_name"], + }, + + "user_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_USER_ID", ""), + Description: descriptions["user_name"], + }, + + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "OS_TENANT_ID", + "OS_PROJECT_ID", + }, ""), + Description: descriptions["tenant_id"], + }, + + "tenant_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "OS_TENANT_NAME", + "OS_PROJECT_NAME", + }, ""), + Description: descriptions["tenant_name"], + }, + + "password": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Sensitive: true, + DefaultFunc: schema.EnvDefaultFunc("OS_PASSWORD", ""), + Description: descriptions["password"], + }, + + "token": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_AUTH_TOKEN", ""), + Description: descriptions["token"], + }, + + "domain_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "OS_USER_DOMAIN_ID", + "OS_PROJECT_DOMAIN_ID", + "OS_DOMAIN_ID", + }, ""), + Description: descriptions["domain_id"], + }, + + "domain_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "OS_USER_DOMAIN_NAME", + "OS_PROJECT_DOMAIN_NAME", + "OS_DOMAIN_NAME", + "OS_DEFAULT_DOMAIN", + }, ""), + Description: descriptions["domain_name"], + }, + + "insecure": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_INSECURE", ""), + Description: descriptions["insecure"], + }, + + "endpoint_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_ENDPOINT_TYPE", ""), + }, + + "cacert_file": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_CACERT", ""), + Description: descriptions["cacert_file"], + }, + + "cert": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_CERT", ""), + Description: descriptions["cert"], + }, + + "key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_KEY", ""), + Description: descriptions["key"], + }, + + "swauth": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_SWAUTH", ""), + Description: descriptions["swauth"], + }, + }, + + DataSourcesMap: map[string]*schema.Resource{ + "openstack_images_image_v2": dataSourceImagesImageV2(), + "openstack_networking_network_v2": dataSourceNetworkingNetworkV2(), + "openstack_networking_secgroup_v2": dataSourceNetworkingSecGroupV2(), + }, + + ResourcesMap: map[string]*schema.Resource{ + "openstack_blockstorage_volume_v1": resourceBlockStorageVolumeV1(), + "openstack_blockstorage_volume_v2": resourceBlockStorageVolumeV2(), + "openstack_blockstorage_volume_attach_v2": resourceBlockStorageVolumeAttachV2(), + "openstack_compute_flavor_v2": resourceComputeFlavorV2(), + "openstack_compute_instance_v2": resourceComputeInstanceV2(), + "openstack_compute_keypair_v2": resourceComputeKeypairV2(), + "openstack_compute_secgroup_v2": resourceComputeSecGroupV2(), + "openstack_compute_servergroup_v2": resourceComputeServerGroupV2(), + "openstack_compute_floatingip_v2": resourceComputeFloatingIPV2(), + "openstack_compute_floatingip_associate_v2": resourceComputeFloatingIPAssociateV2(), + "openstack_compute_volume_attach_v2": resourceComputeVolumeAttachV2(), + "openstack_dns_recordset_v2": resourceDNSRecordSetV2(), + "openstack_dns_zone_v2": resourceDNSZoneV2(), + "openstack_fw_firewall_v1": resourceFWFirewallV1(), + "openstack_fw_policy_v1": resourceFWPolicyV1(), + "openstack_fw_rule_v1": resourceFWRuleV1(), + "openstack_identity_project_v3": resourceIdentityProjectV3(), + "openstack_identity_user_v3": resourceIdentityUserV3(), + "openstack_images_image_v2": resourceImagesImageV2(), + "openstack_lb_member_v1": resourceLBMemberV1(), + "openstack_lb_monitor_v1": resourceLBMonitorV1(), + "openstack_lb_pool_v1": resourceLBPoolV1(), + "openstack_lb_vip_v1": resourceLBVipV1(), + "openstack_lb_loadbalancer_v2": resourceLoadBalancerV2(), + "openstack_lb_listener_v2": resourceListenerV2(), + "openstack_lb_pool_v2": resourcePoolV2(), + "openstack_lb_member_v2": resourceMemberV2(), + "openstack_lb_monitor_v2": resourceMonitorV2(), + "openstack_networking_network_v2": resourceNetworkingNetworkV2(), + "openstack_networking_subnet_v2": resourceNetworkingSubnetV2(), + "openstack_networking_floatingip_v2": resourceNetworkingFloatingIPV2(), + "openstack_networking_port_v2": resourceNetworkingPortV2(), + "openstack_networking_router_v2": resourceNetworkingRouterV2(), + "openstack_networking_router_interface_v2": resourceNetworkingRouterInterfaceV2(), + "openstack_networking_router_route_v2": resourceNetworkingRouterRouteV2(), + "openstack_networking_secgroup_v2": resourceNetworkingSecGroupV2(), + "openstack_networking_secgroup_rule_v2": resourceNetworkingSecGroupRuleV2(), + "openstack_objectstorage_container_v1": resourceObjectStorageContainerV1(), + }, + + ConfigureFunc: configureProvider, + } +} + +var descriptions map[string]string + +func init() { + descriptions = map[string]string{ + "auth_url": "The Identity authentication URL.", + + "region": "The OpenStack region to connect to.", + + "user_name": "Username to login with.", + + "user_id": "User ID to login with.", + + "tenant_id": "The ID of the Tenant (Identity v2) or Project (Identity v3)\n" + + "to login with.", + + "tenant_name": "The name of the Tenant (Identity v2) or Project (Identity v3)\n" + + "to login with.", + + "password": "Password to login with.", + + "token": "Authentication token to use as an alternative to username/password.", + + "domain_id": "The ID of the Domain to scope to (Identity v3).", + + "domain_name": "The name of the Domain to scope to (Identity v3).", + + "insecure": "Trust self-signed certificates.", + + "cacert_file": "A Custom CA certificate.", + + "endpoint_type": "The catalog endpoint type to use.", + + "cert": "A client certificate to authenticate with.", + + "key": "A client private key to authenticate with.", + + "swauth": "Use Swift's authentication system instead of Keystone. Only used for\n" + + "interaction with Swift.", + } +} + +func configureProvider(d *schema.ResourceData) (interface{}, error) { + config := Config{ + CACertFile: d.Get("cacert_file").(string), + ClientCertFile: d.Get("cert").(string), + ClientKeyFile: d.Get("key").(string), + DomainID: d.Get("domain_id").(string), + DomainName: d.Get("domain_name").(string), + EndpointType: d.Get("endpoint_type").(string), + IdentityEndpoint: d.Get("auth_url").(string), + Insecure: d.Get("insecure").(bool), + Password: d.Get("password").(string), + Region: d.Get("region").(string), + Swauth: d.Get("swauth").(bool), + Token: d.Get("token").(string), + TenantID: d.Get("tenant_id").(string), + TenantName: d.Get("tenant_name").(string), + Username: d.Get("user_name").(string), + UserID: d.Get("user_id").(string), + } + + if err := config.LoadAndValidate(); err != nil { + return nil, err + } + + return &config, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_attach_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_attach_v2.go new file mode 100644 index 00000000000..b00491a16f5 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_attach_v2.go @@ -0,0 +1,414 @@ +package openstack + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceBlockStorageVolumeAttachV2() *schema.Resource { + return &schema.Resource{ + Create: resourceBlockStorageVolumeAttachV2Create, + Read: resourceBlockStorageVolumeAttachV2Read, + Delete: resourceBlockStorageVolumeAttachV2Delete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "volume_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "instance_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Removed: "instance_id is no longer used in this resource", + }, + + "host_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "device": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "attach_mode": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "ro" && value != "rw" { + errors = append(errors, fmt.Errorf( + "Only 'ro' and 'rw' are supported values for 'attach_mode'")) + } + return + }, + }, + + "initiator": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "multipath": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + + "os_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "platform": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "wwpn": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "wwnn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + // Volume attachment information + "data": &schema.Schema{ + Type: schema.TypeMap, + Computed: true, + Sensitive: true, + }, + + "driver_volume_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "mount_point_base": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceBlockStorageVolumeAttachV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + client, err := config.blockStorageV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + // initialize the connection + volumeId := d.Get("volume_id").(string) + connOpts := &volumeactions.InitializeConnectionOpts{} + if v, ok := d.GetOk("host_name"); ok { + connOpts.Host = v.(string) + } + + if v, ok := d.GetOk("multipath"); ok { + multipath := v.(bool) + connOpts.Multipath = &multipath + } + + if v, ok := d.GetOk("ip_address"); ok { + connOpts.IP = v.(string) + } + + if v, ok := d.GetOk("initiator"); ok { + connOpts.Initiator = v.(string) + } + + if v, ok := d.GetOk("os_type"); ok { + connOpts.OSType = v.(string) + } + + if v, ok := d.GetOk("platform"); ok { + connOpts.Platform = v.(string) + } + + if v, ok := d.GetOk("wwnns"); ok { + connOpts.Wwnns = v.(string) + } + + if v, ok := d.GetOk("wwpns"); ok { + var wwpns []string + for _, i := range v.([]string) { + wwpns = append(wwpns, i) + } + + connOpts.Wwpns = wwpns + } + + connInfo, err := volumeactions.InitializeConnection(client, volumeId, connOpts).Extract() + if err != nil { + return fmt.Errorf("Unable to create connection: %s", err) + } + + // Only uncomment this when debugging since connInfo contains sensitive information. + // log.Printf("[DEBUG] Volume Connection for %s: %#v", volumeId, connInfo) + + // Because this information is only returned upon creation, + // it must be set in Create. + if v, ok := connInfo["data"]; ok { + data := make(map[string]string) + for key, value := range v.(map[string]interface{}) { + if v, ok := value.(string); ok { + data[key] = v + } + } + + d.Set("data", data) + } + + if v, ok := connInfo["driver_volume_type"]; ok { + d.Set("driver_volume_type", v) + } + + if v, ok := connInfo["mount_point_base"]; ok { + d.Set("mount_point_base", v) + } + + // Once the connection has been made, tell Cinder to mark the volume as attached. + attachMode, err := blockStorageVolumeAttachV2AttachMode(d.Get("attach_mode").(string)) + if err != nil { + return nil + } + + attachOpts := &volumeactions.AttachOpts{ + HostName: d.Get("host_name").(string), + MountPoint: d.Get("device").(string), + Mode: attachMode, + } + + log.Printf("[DEBUG] Attachment Options: %#v", attachOpts) + + if err := volumeactions.Attach(client, volumeId, attachOpts).ExtractErr(); err != nil { + return err + } + + // Wait for the volume to become available. + log.Printf("[DEBUG] Waiting for volume (%s) to become available", volumeId) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"available", "attaching"}, + Target: []string{"in-use"}, + Refresh: VolumeV2StateRefreshFunc(client, volumeId), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for volume (%s) to become ready: %s", volumeId, err) + } + + // Once the volume has been marked as attached, + // retrieve a fresh copy of it with all information now available. + volume, err := volumes.Get(client, volumeId).Extract() + if err != nil { + return err + } + + // Search for the attachmentId + var attachmentId string + hostName := d.Get("host_name").(string) + for _, attachment := range volume.Attachments { + if hostName != "" && hostName == attachment.HostName { + attachmentId = attachment.AttachmentID + } + } + + if attachmentId == "" { + return fmt.Errorf("Unable to determine attachment ID.") + } + + // The ID must be a combination of the volume and attachment ID + // since a volume ID is required to retrieve an attachment ID. + id := fmt.Sprintf("%s/%s", volumeId, attachmentId) + d.SetId(id) + + return resourceBlockStorageVolumeAttachV2Read(d, meta) +} + +func resourceBlockStorageVolumeAttachV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + client, err := config.blockStorageV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + volumeId, attachmentId, err := blockStorageVolumeAttachV2ParseId(d.Id()) + if err != nil { + return err + } + + volume, err := volumes.Get(client, volumeId).Extract() + if err != nil { + return err + } + + log.Printf("[DEBUG] Retrieved volume %s: %#v", d.Id(), volume) + + var attachment volumes.Attachment + for _, v := range volume.Attachments { + if attachmentId == v.AttachmentID { + attachment = v + } + } + + log.Printf("[DEBUG] Retrieved volume attachment: %#v", attachment) + + return nil +} + +func resourceBlockStorageVolumeAttachV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + client, err := config.blockStorageV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + volumeId, attachmentId, err := blockStorageVolumeAttachV2ParseId(d.Id()) + + // Terminate the connection + termOpts := &volumeactions.TerminateConnectionOpts{} + if v, ok := d.GetOk("host_name"); ok { + termOpts.Host = v.(string) + } + + if v, ok := d.GetOk("multipath"); ok { + multipath := v.(bool) + termOpts.Multipath = &multipath + } + + if v, ok := d.GetOk("ip_address"); ok { + termOpts.IP = v.(string) + } + + if v, ok := d.GetOk("initiator"); ok { + termOpts.Initiator = v.(string) + } + + if v, ok := d.GetOk("os_type"); ok { + termOpts.OSType = v.(string) + } + + if v, ok := d.GetOk("platform"); ok { + termOpts.Platform = v.(string) + } + + if v, ok := d.GetOk("wwnns"); ok { + termOpts.Wwnns = v.(string) + } + + if v, ok := d.GetOk("wwpns"); ok { + var wwpns []string + for _, i := range v.([]string) { + wwpns = append(wwpns, i) + } + + termOpts.Wwpns = wwpns + } + + err = volumeactions.TerminateConnection(client, volumeId, termOpts).ExtractErr() + if err != nil { + return fmt.Errorf("Error terminating volume connection %s: %s", volumeId, err) + } + + // Detach the volume + detachOpts := volumeactions.DetachOpts{ + AttachmentID: attachmentId, + } + + log.Printf("[DEBUG] Detachment Options: %#v", detachOpts) + + if err := volumeactions.Detach(client, volumeId, detachOpts).ExtractErr(); err != nil { + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"in-use", "attaching", "detaching"}, + Target: []string{"available"}, + Refresh: VolumeV2StateRefreshFunc(client, volumeId), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for volume (%s) to become available: %s", volumeId, err) + } + + return nil +} + +func blockStorageVolumeAttachV2AttachMode(v string) (volumeactions.AttachMode, error) { + var attachMode volumeactions.AttachMode + var attachError error + switch v { + case "": + attachMode = "" + case "ro": + attachMode = volumeactions.ReadOnly + case "rw": + attachMode = volumeactions.ReadWrite + default: + attachError = fmt.Errorf("Invalid attach_mode specified") + } + + return attachMode, attachError +} + +func blockStorageVolumeAttachV2ParseId(id string) (string, string, error) { + parts := strings.Split(id, "/") + if len(parts) < 2 { + return "", "", fmt.Errorf("Unable to determine attachment ID") + } + + return parts[0], parts[1], nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_v1.go new file mode 100644 index 00000000000..7529f6341c7 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_v1.go @@ -0,0 +1,340 @@ +package openstack + +import ( + "bytes" + "fmt" + "log" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceBlockStorageVolumeV1() *schema.Resource { + return &schema.Resource{ + Create: resourceBlockStorageVolumeV1Create, + Read: resourceBlockStorageVolumeV1Read, + Update: resourceBlockStorageVolumeV1Update, + Delete: resourceBlockStorageVolumeV1Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "size": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "availability_zone": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "metadata": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: false, + Computed: true, + }, + "snapshot_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "source_vol_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "image_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "volume_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "attachment": &schema.Schema{ + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "instance_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "device": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + }, + Set: resourceVolumeAttachmentHash, + }, + }, + } +} + +func resourceBlockStorageVolumeV1Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + blockStorageClient, err := config.blockStorageV1Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + createOpts := &volumes.CreateOpts{ + Description: d.Get("description").(string), + AvailabilityZone: d.Get("availability_zone").(string), + Name: d.Get("name").(string), + Size: d.Get("size").(int), + SnapshotID: d.Get("snapshot_id").(string), + SourceVolID: d.Get("source_vol_id").(string), + ImageID: d.Get("image_id").(string), + VolumeType: d.Get("volume_type").(string), + Metadata: resourceContainerMetadataV2(d), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + v, err := volumes.Create(blockStorageClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack volume: %s", err) + } + log.Printf("[INFO] Volume ID: %s", v.ID) + + // Wait for the volume to become available. + log.Printf( + "[DEBUG] Waiting for volume (%s) to become available", + v.ID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"downloading", "creating"}, + Target: []string{"available"}, + Refresh: VolumeV1StateRefreshFunc(blockStorageClient, v.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for volume (%s) to become ready: %s", + v.ID, err) + } + + // Store the ID now + d.SetId(v.ID) + + return resourceBlockStorageVolumeV1Read(d, meta) +} + +func resourceBlockStorageVolumeV1Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + blockStorageClient, err := config.blockStorageV1Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + v, err := volumes.Get(blockStorageClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "volume") + } + + log.Printf("[DEBUG] Retrieved volume %s: %+v", d.Id(), v) + + d.Set("size", v.Size) + d.Set("description", v.Description) + d.Set("availability_zone", v.AvailabilityZone) + d.Set("name", v.Name) + d.Set("snapshot_id", v.SnapshotID) + d.Set("source_vol_id", v.SourceVolID) + d.Set("volume_type", v.VolumeType) + d.Set("metadata", v.Metadata) + d.Set("region", GetRegion(d, config)) + + attachments := make([]map[string]interface{}, len(v.Attachments)) + for i, attachment := range v.Attachments { + attachments[i] = make(map[string]interface{}) + attachments[i]["id"] = attachment["id"] + attachments[i]["instance_id"] = attachment["server_id"] + attachments[i]["device"] = attachment["device"] + log.Printf("[DEBUG] attachment: %v", attachment) + } + d.Set("attachment", attachments) + + return nil +} + +func resourceBlockStorageVolumeV1Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + blockStorageClient, err := config.blockStorageV1Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + updateOpts := volumes.UpdateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + } + + if d.HasChange("metadata") { + updateOpts.Metadata = resourceVolumeMetadataV1(d) + } + + _, err = volumes.Update(blockStorageClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack volume: %s", err) + } + + return resourceBlockStorageVolumeV1Read(d, meta) +} + +func resourceBlockStorageVolumeV1Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + blockStorageClient, err := config.blockStorageV1Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + v, err := volumes.Get(blockStorageClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "volume") + } + + // make sure this volume is detached from all instances before deleting + if len(v.Attachments) > 0 { + log.Printf("[DEBUG] detaching volumes") + if computeClient, err := config.computeV2Client(GetRegion(d, config)); err != nil { + return err + } else { + for _, volumeAttachment := range v.Attachments { + log.Printf("[DEBUG] Attachment: %v", volumeAttachment) + if err := volumeattach.Delete(computeClient, volumeAttachment["server_id"].(string), volumeAttachment["id"].(string)).ExtractErr(); err != nil { + return err + } + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"in-use", "attaching", "detaching"}, + Target: []string{"available"}, + Refresh: VolumeV1StateRefreshFunc(blockStorageClient, d.Id()), + Timeout: 10 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for volume (%s) to become available: %s", + d.Id(), err) + } + } + } + + // It's possible that this volume was used as a boot device and is currently + // in a "deleting" state from when the instance was terminated. + // If this is true, just move on. It'll eventually delete. + if v.Status != "deleting" { + if err := volumes.Delete(blockStorageClient, d.Id()).ExtractErr(); err != nil { + return CheckDeleted(d, err, "volume") + } + } + + // Wait for the volume to delete before moving on. + log.Printf("[DEBUG] Waiting for volume (%s) to delete", d.Id()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"deleting", "downloading", "available"}, + Target: []string{"deleted"}, + Refresh: VolumeV1StateRefreshFunc(blockStorageClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for volume (%s) to delete: %s", + d.Id(), err) + } + + d.SetId("") + return nil +} + +func resourceVolumeMetadataV1(d *schema.ResourceData) map[string]string { + m := make(map[string]string) + for key, val := range d.Get("metadata").(map[string]interface{}) { + m[key] = val.(string) + } + return m +} + +// VolumeV1StateRefreshFunc returns a resource.StateRefreshFunc that is used to watch +// an OpenStack volume. +func VolumeV1StateRefreshFunc(client *gophercloud.ServiceClient, volumeID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + v, err := volumes.Get(client, volumeID).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + return v, "deleted", nil + } + return nil, "", err + } + + if v.Status == "error" { + return v, v.Status, fmt.Errorf("There was an error creating the volume. " + + "Please check with your cloud admin or check the Block Storage " + + "API logs to see why this error occurred.") + } + + return v, v.Status, nil + } +} + +func resourceVolumeAttachmentHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + if m["instance_id"] != nil { + buf.WriteString(fmt.Sprintf("%s-", m["instance_id"].(string))) + } + return hashcode.String(buf.String()) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_v2.go new file mode 100644 index 00000000000..ea035cf6f5f --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_blockstorage_volume_v2.go @@ -0,0 +1,351 @@ +package openstack + +import ( + "bytes" + "fmt" + "log" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceBlockStorageVolumeV2() *schema.Resource { + return &schema.Resource{ + Create: resourceBlockStorageVolumeV2Create, + Read: resourceBlockStorageVolumeV2Read, + Update: resourceBlockStorageVolumeV2Update, + Delete: resourceBlockStorageVolumeV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "size": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "availability_zone": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "metadata": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: false, + Computed: true, + }, + "snapshot_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "source_vol_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "image_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "volume_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "consistency_group_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "source_replica": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "attachment": &schema.Schema{ + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "instance_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "device": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + }, + Set: resourceVolumeV2AttachmentHash, + }, + }, + } +} + +func resourceBlockStorageVolumeV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + blockStorageClient, err := config.blockStorageV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + createOpts := &volumes.CreateOpts{ + AvailabilityZone: d.Get("availability_zone").(string), + ConsistencyGroupID: d.Get("consistency_group_id").(string), + Description: d.Get("description").(string), + ImageID: d.Get("image_id").(string), + Metadata: resourceContainerMetadataV2(d), + Name: d.Get("name").(string), + Size: d.Get("size").(int), + SnapshotID: d.Get("snapshot_id").(string), + SourceReplica: d.Get("source_replica").(string), + SourceVolID: d.Get("source_vol_id").(string), + VolumeType: d.Get("volume_type").(string), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + v, err := volumes.Create(blockStorageClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack volume: %s", err) + } + log.Printf("[INFO] Volume ID: %s", v.ID) + + // Wait for the volume to become available. + log.Printf( + "[DEBUG] Waiting for volume (%s) to become available", + v.ID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"downloading", "creating"}, + Target: []string{"available"}, + Refresh: VolumeV2StateRefreshFunc(blockStorageClient, v.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for volume (%s) to become ready: %s", + v.ID, err) + } + + // Store the ID now + d.SetId(v.ID) + + return resourceBlockStorageVolumeV2Read(d, meta) +} + +func resourceBlockStorageVolumeV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + blockStorageClient, err := config.blockStorageV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + v, err := volumes.Get(blockStorageClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "volume") + } + + log.Printf("[DEBUG] Retrieved volume %s: %+v", d.Id(), v) + + d.Set("size", v.Size) + d.Set("description", v.Description) + d.Set("availability_zone", v.AvailabilityZone) + d.Set("name", v.Name) + d.Set("snapshot_id", v.SnapshotID) + d.Set("source_vol_id", v.SourceVolID) + d.Set("volume_type", v.VolumeType) + d.Set("metadata", v.Metadata) + d.Set("region", GetRegion(d, config)) + + attachments := make([]map[string]interface{}, len(v.Attachments)) + for i, attachment := range v.Attachments { + attachments[i] = make(map[string]interface{}) + attachments[i]["id"] = attachment.ID + attachments[i]["instance_id"] = attachment.ServerID + attachments[i]["device"] = attachment.Device + log.Printf("[DEBUG] attachment: %v", attachment) + } + d.Set("attachment", attachments) + + return nil +} + +func resourceBlockStorageVolumeV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + blockStorageClient, err := config.blockStorageV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + updateOpts := volumes.UpdateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + } + + if d.HasChange("metadata") { + updateOpts.Metadata = resourceVolumeMetadataV2(d) + } + + _, err = volumes.Update(blockStorageClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack volume: %s", err) + } + + return resourceBlockStorageVolumeV2Read(d, meta) +} + +func resourceBlockStorageVolumeV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + blockStorageClient, err := config.blockStorageV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + v, err := volumes.Get(blockStorageClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "volume") + } + + // make sure this volume is detached from all instances before deleting + if len(v.Attachments) > 0 { + log.Printf("[DEBUG] detaching volumes") + if computeClient, err := config.computeV2Client(GetRegion(d, config)); err != nil { + return err + } else { + for _, volumeAttachment := range v.Attachments { + log.Printf("[DEBUG] Attachment: %v", volumeAttachment) + if err := volumeattach.Delete(computeClient, volumeAttachment.ServerID, volumeAttachment.ID).ExtractErr(); err != nil { + return err + } + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"in-use", "attaching", "detaching"}, + Target: []string{"available"}, + Refresh: VolumeV2StateRefreshFunc(blockStorageClient, d.Id()), + Timeout: 10 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for volume (%s) to become available: %s", + d.Id(), err) + } + } + } + + // It's possible that this volume was used as a boot device and is currently + // in a "deleting" state from when the instance was terminated. + // If this is true, just move on. It'll eventually delete. + if v.Status != "deleting" { + if err := volumes.Delete(blockStorageClient, d.Id()).ExtractErr(); err != nil { + return CheckDeleted(d, err, "volume") + } + } + + // Wait for the volume to delete before moving on. + log.Printf("[DEBUG] Waiting for volume (%s) to delete", d.Id()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"deleting", "downloading", "available"}, + Target: []string{"deleted"}, + Refresh: VolumeV2StateRefreshFunc(blockStorageClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for volume (%s) to delete: %s", + d.Id(), err) + } + + d.SetId("") + return nil +} + +func resourceVolumeMetadataV2(d *schema.ResourceData) map[string]string { + m := make(map[string]string) + for key, val := range d.Get("metadata").(map[string]interface{}) { + m[key] = val.(string) + } + return m +} + +// VolumeV2StateRefreshFunc returns a resource.StateRefreshFunc that is used to watch +// an OpenStack volume. +func VolumeV2StateRefreshFunc(client *gophercloud.ServiceClient, volumeID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + v, err := volumes.Get(client, volumeID).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + return v, "deleted", nil + } + return nil, "", err + } + + if v.Status == "error" { + return v, v.Status, fmt.Errorf("There was an error creating the volume. " + + "Please check with your cloud admin or check the Block Storage " + + "API logs to see why this error occurred.") + } + + return v, v.Status, nil + } +} + +func resourceVolumeV2AttachmentHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + if m["instance_id"] != nil { + buf.WriteString(fmt.Sprintf("%s-", m["instance_id"].(string))) + } + return hashcode.String(buf.String()) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_flavor_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_flavor_v2.go new file mode 100644 index 00000000000..ce48b8f6e45 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_flavor_v2.go @@ -0,0 +1,143 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeFlavorV2() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeFlavorV2Create, + Read: resourceComputeFlavorV2Read, + Delete: resourceComputeFlavorV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "ram": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "vcpus": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "disk": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "swap": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + "rx_tx_factor": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + ForceNew: true, + Default: 1, + }, + "is_public": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "ephemeral": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceComputeFlavorV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + disk := d.Get("disk").(int) + swap := d.Get("swap").(int) + isPublic := d.Get("is_public").(bool) + ephemeral := d.Get("ephemeral").(int) + createOpts := flavors.CreateOpts{ + Name: d.Get("name").(string), + RAM: d.Get("ram").(int), + VCPUs: d.Get("vcpus").(int), + Disk: &disk, + Swap: &swap, + RxTxFactor: d.Get("rx_tx_factor").(float64), + IsPublic: &isPublic, + Ephemeral: &ephemeral, + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + fl, err := flavors.Create(computeClient, &createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack flavor: %s", err) + } + + d.SetId(fl.ID) + + return resourceComputeFlavorV2Read(d, meta) +} + +func resourceComputeFlavorV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + fl, err := flavors.Get(computeClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "flavor") + } + + d.Set("name", fl.Name) + d.Set("ram", fl.RAM) + d.Set("vcpus", fl.VCPUs) + d.Set("disk", fl.Disk) + d.Set("swap", fl.Swap) + d.Set("rx_tx_factor", fl.RxTxFactor) + d.Set("is_public", fl.IsPublic) + // d.Set("ephemeral", fl.Ephemeral) TODO: Implement this in gophercloud + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceComputeFlavorV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + err = flavors.Delete(computeClient, d.Id()).ExtractErr() + if err != nil { + return fmt.Errorf("Error deleting OpenStack flavor: %s", err) + } + d.SetId("") + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_floatingip_associate_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_floatingip_associate_v2.go new file mode 100644 index 00000000000..4f0d70345c9 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_floatingip_associate_v2.go @@ -0,0 +1,235 @@ +package openstack + +import ( + "fmt" + "log" + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + nfloatingips "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeFloatingIPAssociateV2() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeFloatingIPAssociateV2Create, + Read: resourceComputeFloatingIPAssociateV2Read, + Delete: resourceComputeFloatingIPAssociateV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "floating_ip": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "instance_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "fixed_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceComputeFloatingIPAssociateV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + floatingIP := d.Get("floating_ip").(string) + fixedIP := d.Get("fixed_ip").(string) + instanceId := d.Get("instance_id").(string) + + associateOpts := floatingips.AssociateOpts{ + FloatingIP: floatingIP, + FixedIP: fixedIP, + } + log.Printf("[DEBUG] Associate Options: %#v", associateOpts) + + err = floatingips.AssociateInstance(computeClient, instanceId, associateOpts).ExtractErr() + if err != nil { + return fmt.Errorf("Error associating Floating IP: %s", err) + } + + // There's an API call to get this information, but it has been + // deprecated. The Neutron API could be used, but I'm trying not + // to mix service APIs. Therefore, a faux ID will be used. + id := fmt.Sprintf("%s/%s/%s", floatingIP, instanceId, fixedIP) + d.SetId(id) + + // This API call is synchronous, so Create won't return until the IP + // is attached. No need to wait for a state. + + return resourceComputeFloatingIPAssociateV2Read(d, meta) +} + +func resourceComputeFloatingIPAssociateV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + // Obtain relevant info from parsing the ID + floatingIP, instanceId, fixedIP, err := parseComputeFloatingIPAssociateId(d.Id()) + if err != nil { + return err + } + + // Now check and see whether the floating IP still exists. + // First try to do this by querying the Network API. + networkEnabled := true + networkClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + networkEnabled = false + } + + var exists bool + if networkEnabled { + log.Printf("[DEBUG] Checking for Floating IP existence via Network API") + exists, err = resourceComputeFloatingIPAssociateV2NetworkExists(networkClient, floatingIP) + } else { + log.Printf("[DEBUG] Checking for Floating IP existence via Compute API") + exists, err = resourceComputeFloatingIPAssociateV2ComputeExists(computeClient, floatingIP) + } + + if err != nil { + return err + } + + if !exists { + d.SetId("") + } + + // Next, see if the instance still exists + instance, err := servers.Get(computeClient, instanceId).Extract() + if err != nil { + if CheckDeleted(d, err, "instance") == nil { + return nil + } + } + + // Finally, check and see if the floating ip is still associated with the instance. + var associated bool + for _, networkAddresses := range instance.Addresses { + for _, element := range networkAddresses.([]interface{}) { + address := element.(map[string]interface{}) + if address["OS-EXT-IPS:type"] == "floating" && address["addr"] == floatingIP { + associated = true + } + } + } + + if !associated { + d.SetId("") + } + + // Set the attributes pulled from the composed resource ID + d.Set("floating_ip", floatingIP) + d.Set("instance_id", instanceId) + d.Set("fixed_ip", fixedIP) + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceComputeFloatingIPAssociateV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + floatingIP := d.Get("floating_ip").(string) + instanceId := d.Get("instance_id").(string) + + disassociateOpts := floatingips.DisassociateOpts{ + FloatingIP: floatingIP, + } + log.Printf("[DEBUG] Disssociate Options: %#v", disassociateOpts) + + err = floatingips.DisassociateInstance(computeClient, instanceId, disassociateOpts).ExtractErr() + if err != nil { + return CheckDeleted(d, err, "floating ip association") + } + + return nil +} + +func parseComputeFloatingIPAssociateId(id string) (string, string, string, error) { + idParts := strings.Split(id, "/") + if len(idParts) < 3 { + return "", "", "", fmt.Errorf("Unable to determine floating ip association ID") + } + + floatingIP := idParts[0] + instanceId := idParts[1] + fixedIP := idParts[2] + + return floatingIP, instanceId, fixedIP, nil +} + +func resourceComputeFloatingIPAssociateV2NetworkExists(networkClient *gophercloud.ServiceClient, floatingIP string) (bool, error) { + listOpts := nfloatingips.ListOpts{ + FloatingIP: floatingIP, + } + allPages, err := nfloatingips.List(networkClient, listOpts).AllPages() + if err != nil { + return false, err + } + + allFips, err := nfloatingips.ExtractFloatingIPs(allPages) + if err != nil { + return false, err + } + + if len(allFips) > 1 { + return false, fmt.Errorf("There was a problem retrieving the floating IP") + } + + if len(allFips) == 0 { + return false, nil + } + + return true, nil +} + +func resourceComputeFloatingIPAssociateV2ComputeExists(computeClient *gophercloud.ServiceClient, floatingIP string) (bool, error) { + // If the Network API isn't available, fall back to the deprecated Compute API. + allPages, err := floatingips.List(computeClient).AllPages() + if err != nil { + return false, err + } + + allFips, err := floatingips.ExtractFloatingIPs(allPages) + if err != nil { + return false, err + } + + for _, f := range allFips { + if f.IP == floatingIP { + return true, nil + } + } + + return false, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_floatingip_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_floatingip_v2.go new file mode 100644 index 00000000000..9f85850bc84 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_floatingip_v2.go @@ -0,0 +1,111 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeFloatingIPV2() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeFloatingIPV2Create, + Read: resourceComputeFloatingIPV2Read, + Update: nil, + Delete: resourceComputeFloatingIPV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "pool": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_POOL_NAME", nil), + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "fixed_ip": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "instance_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeFloatingIPV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + createOpts := &floatingips.CreateOpts{ + Pool: d.Get("pool").(string), + } + log.Printf("[DEBUG] Create Options: %#v", createOpts) + newFip, err := floatingips.Create(computeClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating Floating IP: %s", err) + } + + d.SetId(newFip.ID) + + return resourceComputeFloatingIPV2Read(d, meta) +} + +func resourceComputeFloatingIPV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + fip, err := floatingips.Get(computeClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "floating ip") + } + + log.Printf("[DEBUG] Retrieved Floating IP %s: %+v", d.Id(), fip) + + d.Set("pool", fip.Pool) + d.Set("instance_id", fip.InstanceID) + d.Set("address", fip.IP) + d.Set("fixed_ip", fip.FixedIP) + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceComputeFloatingIPV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + log.Printf("[DEBUG] Deleting Floating IP %s", d.Id()) + if err := floatingips.Delete(computeClient, d.Id()).ExtractErr(); err != nil { + return fmt.Errorf("Error deleting Floating IP: %s", err) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_instance_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_instance_v2.go new file mode 100644 index 00000000000..adcf0b96a43 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_instance_v2.go @@ -0,0 +1,1082 @@ +package openstack + +import ( + "bytes" + "crypto/sha1" + "encoding/hex" + "fmt" + "log" + "os" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop" + "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors" + "github.com/gophercloud/gophercloud/openstack/compute/v2/images" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeInstanceV2() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeInstanceV2Create, + Read: resourceComputeInstanceV2Read, + Update: resourceComputeInstanceV2Update, + Delete: resourceComputeInstanceV2Delete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "image_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "image_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "flavor_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Computed: true, + DefaultFunc: schema.EnvDefaultFunc("OS_FLAVOR_ID", nil), + }, + "flavor_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Computed: true, + DefaultFunc: schema.EnvDefaultFunc("OS_FLAVOR_NAME", nil), + }, + "floating_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Removed: "Use the openstack_compute_floatingip_associate_v2 resource instead", + }, + "user_data": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + // just stash the hash for state & diff comparisons + StateFunc: func(v interface{}) string { + switch v.(type) { + case string: + hash := sha1.Sum([]byte(v.(string))) + return hex.EncodeToString(hash[:]) + default: + return "" + } + }, + }, + "security_groups": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: false, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "availability_zone": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "network": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uuid": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "port": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "fixed_ip_v4": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "fixed_ip_v6": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "floating_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Removed: "Use the openstack_compute_floatingip_associate_v2 resource instead", + }, + "mac": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "access_network": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, + "metadata": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: false, + }, + "config_drive": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "admin_pass": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "access_ip_v4": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: false, + }, + "access_ip_v6": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: false, + }, + "key_pair": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "block_device": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "uuid": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "volume_size": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + "destination_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "boot_index": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + "delete_on_termination": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + "guest_format": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + "volume": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Removed: "Use block_device or openstack_compute_volume_attach_v2 instead", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "volume_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "device": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + }, + "scheduler_hints": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "group": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "different_host": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "same_host": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "query": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "target_cell": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "build_near_host_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + Set: resourceComputeSchedulerHintsHash, + }, + "personality": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "file": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "content": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + Set: resourceComputeInstancePersonalityHash, + }, + "stop_before_destroy": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "force_delete": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "all_metadata": &schema.Schema{ + Type: schema.TypeMap, + Computed: true, + }, + }, + } +} + +func resourceComputeInstanceV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + var createOpts servers.CreateOptsBuilder + + // Determines the Image ID using the following rules: + // If a bootable block_device was specified, ignore the image altogether. + // If an image_id was specified, use it. + // If an image_name was specified, look up the image ID, report if error. + imageId, err := getImageIDFromConfig(computeClient, d) + if err != nil { + return err + } + + flavorId, err := getFlavorID(computeClient, d) + if err != nil { + return err + } + + // determine if block_device configuration is correct + // this includes valid combinations and required attributes + if err := checkBlockDeviceConfig(d); err != nil { + return err + } + + // Build a list of networks with the information given upon creation. + // Error out if an invalid network configuration was used. + allInstanceNetworks, err := getAllInstanceNetworks(d, meta) + if err != nil { + return err + } + + // Build a []servers.Network to pass into the create options. + networks := expandInstanceNetworks(allInstanceNetworks) + + configDrive := d.Get("config_drive").(bool) + + createOpts = &servers.CreateOpts{ + Name: d.Get("name").(string), + ImageRef: imageId, + FlavorRef: flavorId, + SecurityGroups: resourceInstanceSecGroupsV2(d), + AvailabilityZone: d.Get("availability_zone").(string), + Networks: networks, + Metadata: resourceInstanceMetadataV2(d), + ConfigDrive: &configDrive, + AdminPass: d.Get("admin_pass").(string), + UserData: []byte(d.Get("user_data").(string)), + Personality: resourceInstancePersonalityV2(d), + } + + if keyName, ok := d.Get("key_pair").(string); ok && keyName != "" { + createOpts = &keypairs.CreateOptsExt{ + CreateOptsBuilder: createOpts, + KeyName: keyName, + } + } + + if vL, ok := d.GetOk("block_device"); ok { + blockDevices, err := resourceInstanceBlockDevicesV2(d, vL.([]interface{})) + if err != nil { + return err + } + + createOpts = &bootfromvolume.CreateOptsExt{ + CreateOptsBuilder: createOpts, + BlockDevice: blockDevices, + } + } + + schedulerHintsRaw := d.Get("scheduler_hints").(*schema.Set).List() + if len(schedulerHintsRaw) > 0 { + log.Printf("[DEBUG] schedulerhints: %+v", schedulerHintsRaw) + schedulerHints := resourceInstanceSchedulerHintsV2(d, schedulerHintsRaw[0].(map[string]interface{})) + createOpts = &schedulerhints.CreateOptsExt{ + CreateOptsBuilder: createOpts, + SchedulerHints: schedulerHints, + } + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + + // If a block_device is used, use the bootfromvolume.Create function as it allows an empty ImageRef. + // Otherwise, use the normal servers.Create function. + var server *servers.Server + if _, ok := d.GetOk("block_device"); ok { + server, err = bootfromvolume.Create(computeClient, createOpts).Extract() + } else { + server, err = servers.Create(computeClient, createOpts).Extract() + } + + if err != nil { + return fmt.Errorf("Error creating OpenStack server: %s", err) + } + log.Printf("[INFO] Instance ID: %s", server.ID) + + // Store the ID now + d.SetId(server.ID) + + // Wait for the instance to become running so we can get some attributes + // that aren't available until later. + log.Printf( + "[DEBUG] Waiting for instance (%s) to become running", + server.ID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"BUILD"}, + Target: []string{"ACTIVE"}, + Refresh: ServerV2StateRefreshFunc(computeClient, server.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for instance (%s) to become ready: %s", + server.ID, err) + } + + return resourceComputeInstanceV2Read(d, meta) +} + +func resourceComputeInstanceV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + server, err := servers.Get(computeClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "server") + } + + log.Printf("[DEBUG] Retrieved Server %s: %+v", d.Id(), server) + + d.Set("name", server.Name) + + // Get the instance network and address information + networks, err := flattenInstanceNetworks(d, meta) + if err != nil { + return err + } + + // Determine the best IPv4 and IPv6 addresses to access the instance with + hostv4, hostv6 := getInstanceAccessAddresses(d, networks) + + // AccessIPv4/v6 isn't standard in OpenStack, but there have been reports + // of them being used in some environments. + if server.AccessIPv4 != "" && hostv4 == "" { + hostv4 = server.AccessIPv4 + } + + if server.AccessIPv6 != "" && hostv6 == "" { + hostv6 = server.AccessIPv6 + } + + d.Set("network", networks) + d.Set("access_ip_v4", hostv4) + d.Set("access_ip_v6", hostv6) + + // Determine the best IP address to use for SSH connectivity. + // Prefer IPv4 over IPv6. + var preferredSSHAddress string + if hostv4 != "" { + preferredSSHAddress = hostv4 + } else if hostv6 != "" { + preferredSSHAddress = hostv6 + } + + if preferredSSHAddress != "" { + // Initialize the connection info + d.SetConnInfo(map[string]string{ + "type": "ssh", + "host": preferredSSHAddress, + }) + } + + d.Set("all_metadata", server.Metadata) + + secGrpNames := []string{} + for _, sg := range server.SecurityGroups { + secGrpNames = append(secGrpNames, sg["name"].(string)) + } + d.Set("security_groups", secGrpNames) + + flavorId, ok := server.Flavor["id"].(string) + if !ok { + return fmt.Errorf("Error setting OpenStack server's flavor: %v", server.Flavor) + } + d.Set("flavor_id", flavorId) + + flavor, err := flavors.Get(computeClient, flavorId).Extract() + if err != nil { + return err + } + d.Set("flavor_name", flavor.Name) + + // Set the instance's image information appropriately + if err := setImageInformation(computeClient, server, d); err != nil { + return err + } + + // Build a custom struct for the availability zone extension + var serverWithAZ struct { + servers.Server + availabilityzones.ServerExt + } + + // Do another Get so the above work is not disturbed. + err = servers.Get(computeClient, d.Id()).ExtractInto(&serverWithAZ) + if err != nil { + return CheckDeleted(d, err, "server") + } + + // Set the availability zone + d.Set("availability_zone", serverWithAZ.AvailabilityZone) + + // Set the region + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceComputeInstanceV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + var updateOpts servers.UpdateOpts + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + + if updateOpts != (servers.UpdateOpts{}) { + _, err := servers.Update(computeClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack server: %s", err) + } + } + + if d.HasChange("metadata") { + oldMetadata, newMetadata := d.GetChange("metadata") + var metadataToDelete []string + + // Determine if any metadata keys were removed from the configuration. + // Then request those keys to be deleted. + for oldKey, _ := range oldMetadata.(map[string]interface{}) { + var found bool + for newKey, _ := range newMetadata.(map[string]interface{}) { + if oldKey == newKey { + found = true + } + } + + if !found { + metadataToDelete = append(metadataToDelete, oldKey) + } + } + + for _, key := range metadataToDelete { + err := servers.DeleteMetadatum(computeClient, d.Id(), key).ExtractErr() + if err != nil { + return fmt.Errorf("Error deleting metadata (%s) from server (%s): %s", key, d.Id(), err) + } + } + + // Update existing metadata and add any new metadata. + metadataOpts := make(servers.MetadataOpts) + for k, v := range newMetadata.(map[string]interface{}) { + metadataOpts[k] = v.(string) + } + + _, err := servers.UpdateMetadata(computeClient, d.Id(), metadataOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack server (%s) metadata: %s", d.Id(), err) + } + } + + if d.HasChange("security_groups") { + oldSGRaw, newSGRaw := d.GetChange("security_groups") + oldSGSet := oldSGRaw.(*schema.Set) + newSGSet := newSGRaw.(*schema.Set) + secgroupsToAdd := newSGSet.Difference(oldSGSet) + secgroupsToRemove := oldSGSet.Difference(newSGSet) + + log.Printf("[DEBUG] Security groups to add: %v", secgroupsToAdd) + + log.Printf("[DEBUG] Security groups to remove: %v", secgroupsToRemove) + + for _, g := range secgroupsToRemove.List() { + err := secgroups.RemoveServer(computeClient, d.Id(), g.(string)).ExtractErr() + if err != nil && err.Error() != "EOF" { + if _, ok := err.(gophercloud.ErrDefault404); ok { + continue + } + + return fmt.Errorf("Error removing security group (%s) from OpenStack server (%s): %s", g, d.Id(), err) + } else { + log.Printf("[DEBUG] Removed security group (%s) from instance (%s)", g, d.Id()) + } + } + + for _, g := range secgroupsToAdd.List() { + err := secgroups.AddServer(computeClient, d.Id(), g.(string)).ExtractErr() + if err != nil && err.Error() != "EOF" { + return fmt.Errorf("Error adding security group (%s) to OpenStack server (%s): %s", g, d.Id(), err) + } + log.Printf("[DEBUG] Added security group (%s) to instance (%s)", g, d.Id()) + } + } + + if d.HasChange("admin_pass") { + if newPwd, ok := d.Get("admin_pass").(string); ok { + err := servers.ChangeAdminPassword(computeClient, d.Id(), newPwd).ExtractErr() + if err != nil { + return fmt.Errorf("Error changing admin password of OpenStack server (%s): %s", d.Id(), err) + } + } + } + + if d.HasChange("flavor_id") || d.HasChange("flavor_name") { + var newFlavorId string + var err error + if d.HasChange("flavor_id") { + newFlavorId = d.Get("flavor_id").(string) + } else { + newFlavorName := d.Get("flavor_name").(string) + newFlavorId, err = flavors.IDFromName(computeClient, newFlavorName) + if err != nil { + return err + } + } + + resizeOpts := &servers.ResizeOpts{ + FlavorRef: newFlavorId, + } + log.Printf("[DEBUG] Resize configuration: %#v", resizeOpts) + err = servers.Resize(computeClient, d.Id(), resizeOpts).ExtractErr() + if err != nil { + return fmt.Errorf("Error resizing OpenStack server: %s", err) + } + + // Wait for the instance to finish resizing. + log.Printf("[DEBUG] Waiting for instance (%s) to finish resizing", d.Id()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"RESIZE"}, + Target: []string{"VERIFY_RESIZE"}, + Refresh: ServerV2StateRefreshFunc(computeClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for instance (%s) to resize: %s", d.Id(), err) + } + + // Confirm resize. + log.Printf("[DEBUG] Confirming resize") + err = servers.ConfirmResize(computeClient, d.Id()).ExtractErr() + if err != nil { + return fmt.Errorf("Error confirming resize of OpenStack server: %s", err) + } + + stateConf = &resource.StateChangeConf{ + Pending: []string{"VERIFY_RESIZE"}, + Target: []string{"ACTIVE"}, + Refresh: ServerV2StateRefreshFunc(computeClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for instance (%s) to confirm resize: %s", d.Id(), err) + } + } + + return resourceComputeInstanceV2Read(d, meta) +} + +func resourceComputeInstanceV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + if d.Get("stop_before_destroy").(bool) { + err = startstop.Stop(computeClient, d.Id()).ExtractErr() + if err != nil { + log.Printf("[WARN] Error stopping OpenStack instance: %s", err) + } else { + stopStateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: []string{"SHUTOFF"}, + Refresh: ServerV2StateRefreshFunc(computeClient, d.Id()), + Timeout: 3 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + log.Printf("[DEBUG] Waiting for instance (%s) to stop", d.Id()) + _, err = stopStateConf.WaitForState() + if err != nil { + log.Printf("[WARN] Error waiting for instance (%s) to stop: %s, proceeding to delete", d.Id(), err) + } + } + } + + if d.Get("force_delete").(bool) { + log.Printf("[DEBUG] Force deleting OpenStack Instance %s", d.Id()) + err = servers.ForceDelete(computeClient, d.Id()).ExtractErr() + if err != nil { + return fmt.Errorf("Error deleting OpenStack server: %s", err) + } + } else { + log.Printf("[DEBUG] Deleting OpenStack Instance %s", d.Id()) + err = servers.Delete(computeClient, d.Id()).ExtractErr() + if err != nil { + return fmt.Errorf("Error deleting OpenStack server: %s", err) + } + } + + // Wait for the instance to delete before moving on. + log.Printf("[DEBUG] Waiting for instance (%s) to delete", d.Id()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE", "SHUTOFF"}, + Target: []string{"DELETED", "SOFT_DELETED"}, + Refresh: ServerV2StateRefreshFunc(computeClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for instance (%s) to delete: %s", + d.Id(), err) + } + + d.SetId("") + return nil +} + +// ServerV2StateRefreshFunc returns a resource.StateRefreshFunc that is used to watch +// an OpenStack instance. +func ServerV2StateRefreshFunc(client *gophercloud.ServiceClient, instanceID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + s, err := servers.Get(client, instanceID).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + return s, "DELETED", nil + } + return nil, "", err + } + + return s, s.Status, nil + } +} + +func resourceInstanceSecGroupsV2(d *schema.ResourceData) []string { + rawSecGroups := d.Get("security_groups").(*schema.Set).List() + secgroups := make([]string, len(rawSecGroups)) + for i, raw := range rawSecGroups { + secgroups[i] = raw.(string) + } + return secgroups +} + +func resourceInstanceMetadataV2(d *schema.ResourceData) map[string]string { + m := make(map[string]string) + for key, val := range d.Get("metadata").(map[string]interface{}) { + m[key] = val.(string) + } + return m +} + +func resourceInstanceBlockDevicesV2(d *schema.ResourceData, bds []interface{}) ([]bootfromvolume.BlockDevice, error) { + blockDeviceOpts := make([]bootfromvolume.BlockDevice, len(bds)) + for i, bd := range bds { + bdM := bd.(map[string]interface{}) + blockDeviceOpts[i] = bootfromvolume.BlockDevice{ + UUID: bdM["uuid"].(string), + VolumeSize: bdM["volume_size"].(int), + BootIndex: bdM["boot_index"].(int), + DeleteOnTermination: bdM["delete_on_termination"].(bool), + GuestFormat: bdM["guest_format"].(string), + } + + sourceType := bdM["source_type"].(string) + switch sourceType { + case "blank": + blockDeviceOpts[i].SourceType = bootfromvolume.SourceBlank + case "image": + blockDeviceOpts[i].SourceType = bootfromvolume.SourceImage + case "snapshot": + blockDeviceOpts[i].SourceType = bootfromvolume.SourceSnapshot + case "volume": + blockDeviceOpts[i].SourceType = bootfromvolume.SourceVolume + default: + return blockDeviceOpts, fmt.Errorf("unknown block device source type %s", sourceType) + } + + destinationType := bdM["destination_type"].(string) + switch destinationType { + case "local": + blockDeviceOpts[i].DestinationType = bootfromvolume.DestinationLocal + case "volume": + blockDeviceOpts[i].DestinationType = bootfromvolume.DestinationVolume + default: + return blockDeviceOpts, fmt.Errorf("unknown block device destination type %s", destinationType) + } + } + + log.Printf("[DEBUG] Block Device Options: %+v", blockDeviceOpts) + return blockDeviceOpts, nil +} + +func resourceInstanceSchedulerHintsV2(d *schema.ResourceData, schedulerHintsRaw map[string]interface{}) schedulerhints.SchedulerHints { + differentHost := []string{} + if len(schedulerHintsRaw["different_host"].([]interface{})) > 0 { + for _, dh := range schedulerHintsRaw["different_host"].([]interface{}) { + differentHost = append(differentHost, dh.(string)) + } + } + + sameHost := []string{} + if len(schedulerHintsRaw["same_host"].([]interface{})) > 0 { + for _, sh := range schedulerHintsRaw["same_host"].([]interface{}) { + sameHost = append(sameHost, sh.(string)) + } + } + + query := make([]interface{}, len(schedulerHintsRaw["query"].([]interface{}))) + if len(schedulerHintsRaw["query"].([]interface{})) > 0 { + for _, q := range schedulerHintsRaw["query"].([]interface{}) { + query = append(query, q.(string)) + } + } + + schedulerHints := schedulerhints.SchedulerHints{ + Group: schedulerHintsRaw["group"].(string), + DifferentHost: differentHost, + SameHost: sameHost, + Query: query, + TargetCell: schedulerHintsRaw["target_cell"].(string), + BuildNearHostIP: schedulerHintsRaw["build_near_host_ip"].(string), + } + + return schedulerHints +} + +func getImageIDFromConfig(computeClient *gophercloud.ServiceClient, d *schema.ResourceData) (string, error) { + // If block_device was used, an Image does not need to be specified, unless an image/local + // combination was used. This emulates normal boot behavior. Otherwise, ignore the image altogether. + if vL, ok := d.GetOk("block_device"); ok { + needImage := false + for _, v := range vL.([]interface{}) { + vM := v.(map[string]interface{}) + if vM["source_type"] == "image" && vM["destination_type"] == "local" { + needImage = true + } + } + if !needImage { + return "", nil + } + } + + if imageId := d.Get("image_id").(string); imageId != "" { + return imageId, nil + } else { + // try the OS_IMAGE_ID environment variable + if v := os.Getenv("OS_IMAGE_ID"); v != "" { + return v, nil + } + } + + imageName := d.Get("image_name").(string) + if imageName == "" { + // try the OS_IMAGE_NAME environment variable + if v := os.Getenv("OS_IMAGE_NAME"); v != "" { + imageName = v + } + } + + if imageName != "" { + imageId, err := images.IDFromName(computeClient, imageName) + if err != nil { + return "", err + } + return imageId, nil + } + + return "", fmt.Errorf("Neither a boot device, image ID, or image name were able to be determined.") +} + +func setImageInformation(computeClient *gophercloud.ServiceClient, server *servers.Server, d *schema.ResourceData) error { + // If block_device was used, an Image does not need to be specified, unless an image/local + // combination was used. This emulates normal boot behavior. Otherwise, ignore the image altogether. + if vL, ok := d.GetOk("block_device"); ok { + needImage := false + for _, v := range vL.([]interface{}) { + vM := v.(map[string]interface{}) + if vM["source_type"] == "image" && vM["destination_type"] == "local" { + needImage = true + } + } + if !needImage { + d.Set("image_id", "Attempt to boot from volume - no image supplied") + return nil + } + } + + imageId := server.Image["id"].(string) + if imageId != "" { + d.Set("image_id", imageId) + if image, err := images.Get(computeClient, imageId).Extract(); err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + // If the image name can't be found, set the value to "Image not found". + // The most likely scenario is that the image no longer exists in the Image Service + // but the instance still has a record from when it existed. + d.Set("image_name", "Image not found") + return nil + } + return err + } else { + d.Set("image_name", image.Name) + } + } + + return nil +} + +func getFlavorID(client *gophercloud.ServiceClient, d *schema.ResourceData) (string, error) { + flavorId := d.Get("flavor_id").(string) + + if flavorId != "" { + return flavorId, nil + } + + flavorName := d.Get("flavor_name").(string) + return flavors.IDFromName(client, flavorName) +} + +func resourceComputeSchedulerHintsHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + + if m["group"] != nil { + buf.WriteString(fmt.Sprintf("%s-", m["group"].(string))) + } + + if m["target_cell"] != nil { + buf.WriteString(fmt.Sprintf("%s-", m["target_cell"].(string))) + } + + if m["build_host_near_ip"] != nil { + buf.WriteString(fmt.Sprintf("%s-", m["build_host_near_ip"].(string))) + } + + buf.WriteString(fmt.Sprintf("%s-", m["different_host"].([]interface{}))) + buf.WriteString(fmt.Sprintf("%s-", m["same_host"].([]interface{}))) + buf.WriteString(fmt.Sprintf("%s-", m["query"].([]interface{}))) + + return hashcode.String(buf.String()) +} + +func checkBlockDeviceConfig(d *schema.ResourceData) error { + if vL, ok := d.GetOk("block_device"); ok { + for _, v := range vL.([]interface{}) { + vM := v.(map[string]interface{}) + + if vM["source_type"] != "blank" && vM["uuid"] == "" { + return fmt.Errorf("You must specify a uuid for %s block device types", vM["source_type"]) + } + + if vM["source_type"] == "image" && vM["destination_type"] == "volume" { + if vM["volume_size"] == 0 { + return fmt.Errorf("You must specify a volume_size when creating a volume from an image") + } + } + + if vM["source_type"] == "blank" && vM["destination_type"] == "local" { + if vM["volume_size"] == 0 { + return fmt.Errorf("You must specify a volume_size when creating a blank block device") + } + } + } + } + + return nil +} + +func resourceComputeInstancePersonalityHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["file"].(string))) + + return hashcode.String(buf.String()) +} + +func resourceInstancePersonalityV2(d *schema.ResourceData) servers.Personality { + var personalities servers.Personality + + if v := d.Get("personality"); v != nil { + personalityList := v.(*schema.Set).List() + if len(personalityList) > 0 { + for _, p := range personalityList { + rawPersonality := p.(map[string]interface{}) + file := servers.File{ + Path: rawPersonality["file"].(string), + Contents: []byte(rawPersonality["content"].(string)), + } + + log.Printf("[DEBUG] OpenStack Compute Instance Personality: %+v", file) + + personalities = append(personalities, &file) + } + } + } + + return personalities +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_keypair_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_keypair_v2.go new file mode 100644 index 00000000000..5d2da47b355 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_keypair_v2.go @@ -0,0 +1,105 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeKeypairV2() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeKeypairV2Create, + Read: resourceComputeKeypairV2Read, + Delete: resourceComputeKeypairV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "public_key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceComputeKeypairV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + createOpts := KeyPairCreateOpts{ + keypairs.CreateOpts{ + Name: d.Get("name").(string), + PublicKey: d.Get("public_key").(string), + }, + MapValueSpecs(d), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + kp, err := keypairs.Create(computeClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack keypair: %s", err) + } + + d.SetId(kp.Name) + + return resourceComputeKeypairV2Read(d, meta) +} + +func resourceComputeKeypairV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + kp, err := keypairs.Get(computeClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "keypair") + } + + d.Set("name", kp.Name) + d.Set("public_key", kp.PublicKey) + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceComputeKeypairV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + err = keypairs.Delete(computeClient, d.Id()).ExtractErr() + if err != nil { + return fmt.Errorf("Error deleting OpenStack keypair: %s", err) + } + d.SetId("") + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_secgroup_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_secgroup_v2.go new file mode 100644 index 00000000000..e3cb3f6dcbe --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_secgroup_v2.go @@ -0,0 +1,398 @@ +package openstack + +import ( + "bytes" + "fmt" + "log" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeSecGroupV2() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeSecGroupV2Create, + Read: resourceComputeSecGroupV2Read, + Update: resourceComputeSecGroupV2Update, + Delete: resourceComputeSecGroupV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "rule": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "from_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: false, + }, + "to_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: false, + }, + "ip_protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "cidr": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + StateFunc: func(v interface{}) string { + return strings.ToLower(v.(string)) + }, + }, + "from_group_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "self": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: false, + }, + }, + }, + Set: secgroupRuleV2Hash, + }, + }, + } +} + +func resourceComputeSecGroupV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + // Before creating the security group, make sure all rules are valid. + if err := checkSecGroupV2RulesForErrors(d); err != nil { + return err + } + + // If all rules are valid, proceed with creating the security gruop. + createOpts := secgroups.CreateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + sg, err := secgroups.Create(computeClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack security group: %s", err) + } + + d.SetId(sg.ID) + + // Now that the security group has been created, iterate through each rule and create it + createRuleOptsList := resourceSecGroupRulesV2(d) + for _, createRuleOpts := range createRuleOptsList { + _, err := secgroups.CreateRule(computeClient, createRuleOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack security group rule: %s", err) + } + } + + return resourceComputeSecGroupV2Read(d, meta) +} + +func resourceComputeSecGroupV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + sg, err := secgroups.Get(computeClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "security group") + } + + d.Set("name", sg.Name) + d.Set("description", sg.Description) + + rtm, err := rulesToMap(computeClient, d, sg.Rules) + if err != nil { + return err + } + log.Printf("[DEBUG] rulesToMap(sg.Rules): %+v", rtm) + d.Set("rule", rtm) + + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceComputeSecGroupV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + updateOpts := secgroups.UpdateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + } + + log.Printf("[DEBUG] Updating Security Group (%s) with options: %+v", d.Id(), updateOpts) + + _, err = secgroups.Update(computeClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack security group (%s): %s", d.Id(), err) + } + + if d.HasChange("rule") { + oldSGRaw, newSGRaw := d.GetChange("rule") + oldSGRSet, newSGRSet := oldSGRaw.(*schema.Set), newSGRaw.(*schema.Set) + secgrouprulesToAdd := newSGRSet.Difference(oldSGRSet) + secgrouprulesToRemove := oldSGRSet.Difference(newSGRSet) + + log.Printf("[DEBUG] Security group rules to add: %v", secgrouprulesToAdd) + log.Printf("[DEBUG] Security groups rules to remove: %v", secgrouprulesToRemove) + + for _, rawRule := range secgrouprulesToAdd.List() { + createRuleOpts := resourceSecGroupRuleCreateOptsV2(d, rawRule) + rule, err := secgroups.CreateRule(computeClient, createRuleOpts).Extract() + if err != nil { + return fmt.Errorf("Error adding rule to OpenStack security group (%s): %s", d.Id(), err) + } + log.Printf("[DEBUG] Added rule (%s) to OpenStack security group (%s) ", rule.ID, d.Id()) + } + + for _, r := range secgrouprulesToRemove.List() { + rule := resourceSecGroupRuleV2(d, r) + err := secgroups.DeleteRule(computeClient, rule.ID).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + continue + } + + return fmt.Errorf("Error removing rule (%s) from OpenStack security group (%s)", rule.ID, d.Id()) + } else { + log.Printf("[DEBUG] Removed rule (%s) from OpenStack security group (%s): %s", rule.ID, d.Id(), err) + } + } + } + + return resourceComputeSecGroupV2Read(d, meta) +} + +func resourceComputeSecGroupV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: []string{"DELETED"}, + Refresh: SecGroupV2StateRefreshFunc(computeClient, d), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack security group: %s", err) + } + + d.SetId("") + return nil +} + +func resourceSecGroupRulesV2(d *schema.ResourceData) []secgroups.CreateRuleOpts { + rawRules := d.Get("rule").(*schema.Set).List() + createRuleOptsList := make([]secgroups.CreateRuleOpts, len(rawRules)) + for i, rawRule := range rawRules { + createRuleOptsList[i] = resourceSecGroupRuleCreateOptsV2(d, rawRule) + } + return createRuleOptsList +} + +func resourceSecGroupRuleCreateOptsV2(d *schema.ResourceData, rawRule interface{}) secgroups.CreateRuleOpts { + rawRuleMap := rawRule.(map[string]interface{}) + groupId := rawRuleMap["from_group_id"].(string) + if rawRuleMap["self"].(bool) { + groupId = d.Id() + } + return secgroups.CreateRuleOpts{ + ParentGroupID: d.Id(), + FromPort: rawRuleMap["from_port"].(int), + ToPort: rawRuleMap["to_port"].(int), + IPProtocol: rawRuleMap["ip_protocol"].(string), + CIDR: rawRuleMap["cidr"].(string), + FromGroupID: groupId, + } +} + +func checkSecGroupV2RulesForErrors(d *schema.ResourceData) error { + rawRules := d.Get("rule").(*schema.Set).List() + for _, rawRule := range rawRules { + rawRuleMap := rawRule.(map[string]interface{}) + + // only one of cidr, from_group_id, or self can be set + cidr := rawRuleMap["cidr"].(string) + groupId := rawRuleMap["from_group_id"].(string) + self := rawRuleMap["self"].(bool) + errorMessage := fmt.Errorf("Only one of cidr, from_group_id, or self can be set.") + + // if cidr is set, from_group_id and self cannot be set + if cidr != "" { + if groupId != "" || self { + return errorMessage + } + } + + // if from_group_id is set, cidr and self cannot be set + if groupId != "" { + if cidr != "" || self { + return errorMessage + } + } + + // if self is set, cidr and from_group_id cannot be set + if self { + if cidr != "" || groupId != "" { + return errorMessage + } + } + } + + return nil +} + +func resourceSecGroupRuleV2(d *schema.ResourceData, rawRule interface{}) secgroups.Rule { + rawRuleMap := rawRule.(map[string]interface{}) + return secgroups.Rule{ + ID: rawRuleMap["id"].(string), + ParentGroupID: d.Id(), + FromPort: rawRuleMap["from_port"].(int), + ToPort: rawRuleMap["to_port"].(int), + IPProtocol: rawRuleMap["ip_protocol"].(string), + IPRange: secgroups.IPRange{CIDR: rawRuleMap["cidr"].(string)}, + } +} + +func rulesToMap(computeClient *gophercloud.ServiceClient, d *schema.ResourceData, sgrs []secgroups.Rule) ([]map[string]interface{}, error) { + sgrMap := make([]map[string]interface{}, len(sgrs)) + for i, sgr := range sgrs { + groupId := "" + self := false + if sgr.Group.Name != "" { + if sgr.Group.Name == d.Get("name").(string) { + self = true + } else { + // Since Nova only returns the secgroup Name (and not the ID) for the group attribute, + // we need to look up all security groups and match the name. + // Nevermind that Nova wants the ID when setting the Group *and* that multiple groups + // with the same name can exist... + allPages, err := secgroups.List(computeClient).AllPages() + if err != nil { + return nil, err + } + securityGroups, err := secgroups.ExtractSecurityGroups(allPages) + if err != nil { + return nil, err + } + + for _, sg := range securityGroups { + if sg.Name == sgr.Group.Name { + groupId = sg.ID + } + } + } + } + + sgrMap[i] = map[string]interface{}{ + "id": sgr.ID, + "from_port": sgr.FromPort, + "to_port": sgr.ToPort, + "ip_protocol": sgr.IPProtocol, + "cidr": sgr.IPRange.CIDR, + "self": self, + "from_group_id": groupId, + } + } + return sgrMap, nil +} + +func secgroupRuleV2Hash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%d-", m["from_port"].(int))) + buf.WriteString(fmt.Sprintf("%d-", m["to_port"].(int))) + buf.WriteString(fmt.Sprintf("%s-", m["ip_protocol"].(string))) + buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["cidr"].(string)))) + buf.WriteString(fmt.Sprintf("%s-", m["from_group_id"].(string))) + buf.WriteString(fmt.Sprintf("%t-", m["self"].(bool))) + + return hashcode.String(buf.String()) +} + +func SecGroupV2StateRefreshFunc(computeClient *gophercloud.ServiceClient, d *schema.ResourceData) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete Security Group %s.\n", d.Id()) + + err := secgroups.Delete(computeClient, d.Id()).ExtractErr() + if err != nil { + return nil, "", err + } + + s, err := secgroups.Get(computeClient, d.Id()).Extract() + if err != nil { + err = CheckDeleted(d, err, "Security Group") + if err != nil { + return s, "", err + } else { + log.Printf("[DEBUG] Successfully deleted Security Group %s", d.Id()) + return s, "DELETED", nil + } + } + + log.Printf("[DEBUG] Security Group %s still active.\n", d.Id()) + return s, "ACTIVE", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_servergroup_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_servergroup_v2.go new file mode 100644 index 00000000000..45e8993ac32 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_servergroup_v2.go @@ -0,0 +1,138 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeServerGroupV2() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeServerGroupV2Create, + Read: resourceComputeServerGroupV2Read, + Update: nil, + Delete: resourceComputeServerGroupV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "policies": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "members": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceComputeServerGroupV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + createOpts := ServerGroupCreateOpts{ + servergroups.CreateOpts{ + Name: d.Get("name").(string), + Policies: resourceServerGroupPoliciesV2(d), + }, + MapValueSpecs(d), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + newSG, err := servergroups.Create(computeClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating ServerGroup: %s", err) + } + + d.SetId(newSG.ID) + + return resourceComputeServerGroupV2Read(d, meta) +} + +func resourceComputeServerGroupV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + sg, err := servergroups.Get(computeClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "server group") + } + + log.Printf("[DEBUG] Retrieved ServerGroup %s: %+v", d.Id(), sg) + + // Set the name + d.Set("name", sg.Name) + + // Set the policies + policies := []string{} + for _, p := range sg.Policies { + policies = append(policies, p) + } + d.Set("policies", policies) + + // Set the members + members := []string{} + for _, m := range sg.Members { + members = append(members, m) + } + d.Set("members", members) + + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceComputeServerGroupV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + log.Printf("[DEBUG] Deleting ServerGroup %s", d.Id()) + if err := servergroups.Delete(computeClient, d.Id()).ExtractErr(); err != nil { + return fmt.Errorf("Error deleting ServerGroup: %s", err) + } + + return nil +} + +func resourceServerGroupPoliciesV2(d *schema.ResourceData) []string { + rawPolicies := d.Get("policies").([]interface{}) + policies := make([]string, len(rawPolicies)) + for i, raw := range rawPolicies { + policies[i] = raw.(string) + } + return policies +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_volume_attach_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_volume_attach_v2.go new file mode 100644 index 00000000000..fa517414b4c --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_compute_volume_attach_v2.go @@ -0,0 +1,222 @@ +package openstack + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeVolumeAttachV2() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeVolumeAttachV2Create, + Read: resourceComputeVolumeAttachV2Read, + Delete: resourceComputeVolumeAttachV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "instance_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "volume_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "device": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + }, + } +} + +func resourceComputeVolumeAttachV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + instanceId := d.Get("instance_id").(string) + volumeId := d.Get("volume_id").(string) + + var device string + if v, ok := d.GetOk("device"); ok { + device = v.(string) + } + + attachOpts := volumeattach.CreateOpts{ + Device: device, + VolumeID: volumeId, + } + + log.Printf("[DEBUG] Creating volume attachment: %#v", attachOpts) + + attachment, err := volumeattach.Create(computeClient, instanceId, attachOpts).Extract() + if err != nil { + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ATTACHING"}, + Target: []string{"ATTACHED"}, + Refresh: resourceComputeVolumeAttachV2AttachFunc(computeClient, instanceId, attachment.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 30 * time.Second, + MinTimeout: 15 * time.Second, + } + + if _, err = stateConf.WaitForState(); err != nil { + return fmt.Errorf("Error attaching OpenStack volume: %s", err) + } + + log.Printf("[DEBUG] Created volume attachment: %#v", attachment) + + // Use the instance ID and attachment ID as the resource ID. + // This is because an attachment cannot be retrieved just by its ID alone. + id := fmt.Sprintf("%s/%s", instanceId, attachment.ID) + + d.SetId(id) + + return resourceComputeVolumeAttachV2Read(d, meta) +} + +func resourceComputeVolumeAttachV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + instanceId, attachmentId, err := parseComputeVolumeAttachmentId(d.Id()) + if err != nil { + return err + } + + attachment, err := volumeattach.Get(computeClient, instanceId, attachmentId).Extract() + if err != nil { + return CheckDeleted(d, err, "compute_volume_attach") + } + + log.Printf("[DEBUG] Retrieved volume attachment: %#v", attachment) + + d.Set("instance_id", attachment.ServerID) + d.Set("volume_id", attachment.VolumeID) + d.Set("device", attachment.Device) + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceComputeVolumeAttachV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + instanceId, attachmentId, err := parseComputeVolumeAttachmentId(d.Id()) + if err != nil { + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{""}, + Target: []string{"DETACHED"}, + Refresh: resourceComputeVolumeAttachV2DetachFunc(computeClient, instanceId, attachmentId), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 15 * time.Second, + MinTimeout: 15 * time.Second, + } + + if _, err = stateConf.WaitForState(); err != nil { + return fmt.Errorf("Error detaching OpenStack volume: %s", err) + } + + return nil +} + +func resourceComputeVolumeAttachV2AttachFunc( + computeClient *gophercloud.ServiceClient, instanceId, attachmentId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + va, err := volumeattach.Get(computeClient, instanceId, attachmentId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + return va, "ATTACHING", nil + } + return va, "", err + } + + return va, "ATTACHED", nil + } +} + +func resourceComputeVolumeAttachV2DetachFunc( + computeClient *gophercloud.ServiceClient, instanceId, attachmentId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to detach OpenStack volume %s from instance %s", + attachmentId, instanceId) + + va, err := volumeattach.Get(computeClient, instanceId, attachmentId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + return va, "DETACHED", nil + } + return va, "", err + } + + err = volumeattach.Delete(computeClient, instanceId, attachmentId).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + return va, "DETACHED", nil + } + + if _, ok := err.(gophercloud.ErrDefault400); ok { + return nil, "", nil + } + + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack Volume Attachment (%s) is still active.", attachmentId) + return nil, "", nil + } +} + +func parseComputeVolumeAttachmentId(id string) (string, string, error) { + idParts := strings.Split(id, "/") + if len(idParts) < 2 { + return "", "", fmt.Errorf("Unable to determine volume attachment ID") + } + + instanceId := idParts[0] + attachmentId := idParts[1] + + return instanceId, attachmentId, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_dns_recordset_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_dns_recordset_v2.go new file mode 100644 index 00000000000..1a7173b1d31 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_dns_recordset_v2.go @@ -0,0 +1,276 @@ +package openstack + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceDNSRecordSetV2() *schema.Resource { + return &schema.Resource{ + Create: resourceDNSRecordSetV2Create, + Read: resourceDNSRecordSetV2Read, + Update: resourceDNSRecordSetV2Update, + Delete: resourceDNSRecordSetV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "zone_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "records": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "ttl": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: false, + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceDNSRecordSetV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + dnsClient, err := config.dnsV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack DNS client: %s", err) + } + + recordsraw := d.Get("records").([]interface{}) + records := make([]string, len(recordsraw)) + for i, recordraw := range recordsraw { + records[i] = recordraw.(string) + } + + createOpts := RecordSetCreateOpts{ + recordsets.CreateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + Records: records, + TTL: d.Get("ttl").(int), + Type: d.Get("type").(string), + }, + MapValueSpecs(d), + } + + zoneID := d.Get("zone_id").(string) + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + n, err := recordsets.Create(dnsClient, zoneID, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack DNS record set: %s", err) + } + + log.Printf("[DEBUG] Waiting for DNS record set (%s) to become available", n.ID) + stateConf := &resource.StateChangeConf{ + Target: []string{"ACTIVE"}, + Pending: []string{"PENDING"}, + Refresh: waitForDNSRecordSet(dnsClient, zoneID, n.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + + id := fmt.Sprintf("%s/%s", zoneID, n.ID) + d.SetId(id) + + log.Printf("[DEBUG] Created OpenStack DNS record set %s: %#v", n.ID, n) + return resourceDNSRecordSetV2Read(d, meta) +} + +func resourceDNSRecordSetV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + dnsClient, err := config.dnsV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack DNS client: %s", err) + } + + // Obtain relevant info from parsing the ID + zoneID, recordsetID, err := parseDNSV2RecordSetID(d.Id()) + if err != nil { + return err + } + + n, err := recordsets.Get(dnsClient, zoneID, recordsetID).Extract() + if err != nil { + return CheckDeleted(d, err, "record_set") + } + + log.Printf("[DEBUG] Retrieved record set %s: %#v", recordsetID, n) + + d.Set("name", n.Name) + d.Set("description", n.Description) + d.Set("ttl", n.TTL) + d.Set("type", n.Type) + d.Set("records", n.Records) + d.Set("region", GetRegion(d, config)) + d.Set("zone_id", zoneID) + + return nil +} + +func resourceDNSRecordSetV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + dnsClient, err := config.dnsV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack DNS client: %s", err) + } + + var updateOpts recordsets.UpdateOpts + if d.HasChange("ttl") { + updateOpts.TTL = d.Get("ttl").(int) + } + + if d.HasChange("records") { + recordsraw := d.Get("records").([]interface{}) + records := make([]string, len(recordsraw)) + for i, recordraw := range recordsraw { + records[i] = recordraw.(string) + } + updateOpts.Records = records + } + + if d.HasChange("description") { + updateOpts.Description = d.Get("description").(string) + } + + // Obtain relevant info from parsing the ID + zoneID, recordsetID, err := parseDNSV2RecordSetID(d.Id()) + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating record set %s with options: %#v", recordsetID, updateOpts) + + _, err = recordsets.Update(dnsClient, zoneID, recordsetID, updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack DNS record set: %s", err) + } + + log.Printf("[DEBUG] Waiting for DNS record set (%s) to update", recordsetID) + stateConf := &resource.StateChangeConf{ + Target: []string{"ACTIVE"}, + Pending: []string{"PENDING"}, + Refresh: waitForDNSRecordSet(dnsClient, zoneID, recordsetID), + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + + return resourceDNSRecordSetV2Read(d, meta) +} + +func resourceDNSRecordSetV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + dnsClient, err := config.dnsV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack DNS client: %s", err) + } + + // Obtain relevant info from parsing the ID + zoneID, recordsetID, err := parseDNSV2RecordSetID(d.Id()) + if err != nil { + return err + } + + err = recordsets.Delete(dnsClient, zoneID, recordsetID).ExtractErr() + if err != nil { + return fmt.Errorf("Error deleting OpenStack DNS record set: %s", err) + } + + log.Printf("[DEBUG] Waiting for DNS record set (%s) to be deleted", recordsetID) + stateConf := &resource.StateChangeConf{ + Target: []string{"DELETED"}, + Pending: []string{"ACTIVE", "PENDING"}, + Refresh: waitForDNSRecordSet(dnsClient, zoneID, recordsetID), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + + d.SetId("") + return nil +} + +func waitForDNSRecordSet(dnsClient *gophercloud.ServiceClient, zoneID, recordsetId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + recordset, err := recordsets.Get(dnsClient, zoneID, recordsetId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + return recordset, "DELETED", nil + } + + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack DNS record set (%s) current status: %s", recordset.ID, recordset.Status) + return recordset, recordset.Status, nil + } +} + +func parseDNSV2RecordSetID(id string) (string, string, error) { + idParts := strings.Split(id, "/") + if len(idParts) != 2 { + return "", "", fmt.Errorf("Unable to determine DNS record set ID from raw ID: %s", id) + } + + zoneID := idParts[0] + recordsetID := idParts[1] + + return zoneID, recordsetID, nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_dns_zone_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_dns_zone_v2.go new file mode 100644 index 00000000000..a3028e194f6 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_dns_zone_v2.go @@ -0,0 +1,276 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/dns/v2/zones" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceDNSZoneV2() *schema.Resource { + return &schema.Resource{ + Create: resourceDNSZoneV2Create, + Read: resourceDNSZoneV2Read, + Update: resourceDNSZoneV2Update, + Delete: resourceDNSZoneV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "email": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: resourceDNSZoneV2ValidType, + }, + "attributes": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + "ttl": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: false, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "masters": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceDNSZoneV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + dnsClient, err := config.dnsV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack DNS client: %s", err) + } + + mastersraw := d.Get("masters").(*schema.Set).List() + masters := make([]string, len(mastersraw)) + for i, masterraw := range mastersraw { + masters[i] = masterraw.(string) + } + + attrsraw := d.Get("attributes").(map[string]interface{}) + attrs := make(map[string]string, len(attrsraw)) + for k, v := range attrsraw { + attrs[k] = v.(string) + } + + createOpts := ZoneCreateOpts{ + zones.CreateOpts{ + Name: d.Get("name").(string), + Type: d.Get("type").(string), + Attributes: attrs, + TTL: d.Get("ttl").(int), + Email: d.Get("email").(string), + Description: d.Get("description").(string), + Masters: masters, + }, + MapValueSpecs(d), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + n, err := zones.Create(dnsClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack DNS zone: %s", err) + } + + log.Printf("[DEBUG] Waiting for DNS Zone (%s) to become available", n.ID) + stateConf := &resource.StateChangeConf{ + Target: []string{"ACTIVE"}, + Pending: []string{"PENDING"}, + Refresh: waitForDNSZone(dnsClient, n.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + + d.SetId(n.ID) + + log.Printf("[DEBUG] Created OpenStack DNS Zone %s: %#v", n.ID, n) + return resourceDNSZoneV2Read(d, meta) +} + +func resourceDNSZoneV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + dnsClient, err := config.dnsV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack DNS client: %s", err) + } + + n, err := zones.Get(dnsClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "zone") + } + + log.Printf("[DEBUG] Retrieved Zone %s: %#v", d.Id(), n) + + d.Set("name", n.Name) + d.Set("email", n.Email) + d.Set("description", n.Description) + d.Set("ttl", n.TTL) + d.Set("type", n.Type) + d.Set("attributes", n.Attributes) + d.Set("masters", n.Masters) + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceDNSZoneV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + dnsClient, err := config.dnsV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack DNS client: %s", err) + } + + var updateOpts zones.UpdateOpts + if d.HasChange("email") { + updateOpts.Email = d.Get("email").(string) + } + if d.HasChange("ttl") { + updateOpts.TTL = d.Get("ttl").(int) + } + if d.HasChange("masters") { + mastersraw := d.Get("masters").(*schema.Set).List() + masters := make([]string, len(mastersraw)) + for i, masterraw := range mastersraw { + masters[i] = masterraw.(string) + } + updateOpts.Masters = masters + } + if d.HasChange("description") { + updateOpts.Description = d.Get("description").(string) + } + + log.Printf("[DEBUG] Updating Zone %s with options: %#v", d.Id(), updateOpts) + + _, err = zones.Update(dnsClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack DNS Zone: %s", err) + } + + log.Printf("[DEBUG] Waiting for DNS Zone (%s) to update", d.Id()) + stateConf := &resource.StateChangeConf{ + Target: []string{"ACTIVE"}, + Pending: []string{"PENDING"}, + Refresh: waitForDNSZone(dnsClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + + return resourceDNSZoneV2Read(d, meta) +} + +func resourceDNSZoneV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + dnsClient, err := config.dnsV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack DNS client: %s", err) + } + + _, err = zones.Delete(dnsClient, d.Id()).Extract() + if err != nil { + return fmt.Errorf("Error deleting OpenStack DNS Zone: %s", err) + } + + log.Printf("[DEBUG] Waiting for DNS Zone (%s) to become available", d.Id()) + stateConf := &resource.StateChangeConf{ + Target: []string{"DELETED"}, + Pending: []string{"ACTIVE", "PENDING"}, + Refresh: waitForDNSZone(dnsClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + + d.SetId("") + return nil +} + +func resourceDNSZoneV2ValidType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + validTypes := []string{ + "PRIMARY", + "SECONDARY", + } + + for _, v := range validTypes { + if value == v { + return + } + } + + err := fmt.Errorf("%s must be one of %s", k, validTypes) + errors = append(errors, err) + return +} + +func waitForDNSZone(dnsClient *gophercloud.ServiceClient, zoneId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + zone, err := zones.Get(dnsClient, zoneId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + return zone, "DELETED", nil + } + + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack DNS Zone (%s) current status: %s", zone.ID, zone.Status) + return zone, zone.Status, nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_firewall_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_firewall_v1.go new file mode 100644 index 00000000000..dfb5a65c5e1 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_firewall_v1.go @@ -0,0 +1,323 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceFWFirewallV1() *schema.Resource { + return &schema.Resource{ + Create: resourceFWFirewallV1Create, + Read: resourceFWFirewallV1Read, + Update: resourceFWFirewallV1Update, + Delete: resourceFWFirewallV1Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "policy_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "admin_state_up": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "associated_routers": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + ConflictsWith: []string{"no_routers"}, + Computed: true, + }, + "no_routers": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ConflictsWith: []string{"associated_routers"}, + }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceFWFirewallV1Create(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var createOpts firewalls.CreateOptsBuilder + + adminStateUp := d.Get("admin_state_up").(bool) + createOpts = FirewallCreateOpts{ + firewalls.CreateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + PolicyID: d.Get("policy_id").(string), + AdminStateUp: &adminStateUp, + TenantID: d.Get("tenant_id").(string), + }, + MapValueSpecs(d), + } + + associatedRoutersRaw := d.Get("associated_routers").(*schema.Set).List() + if len(associatedRoutersRaw) > 0 { + log.Printf("[DEBUG] Will attempt to associate Firewall with router(s): %+v", associatedRoutersRaw) + + var routerIds []string + for _, v := range associatedRoutersRaw { + routerIds = append(routerIds, v.(string)) + } + + createOpts = &routerinsertion.CreateOptsExt{ + CreateOptsBuilder: createOpts, + RouterIDs: routerIds, + } + } + + if d.Get("no_routers").(bool) { + routerIds := make([]string, 0) + log.Println("[DEBUG] No routers specified. Setting to empty slice") + createOpts = &routerinsertion.CreateOptsExt{ + CreateOptsBuilder: createOpts, + RouterIDs: routerIds, + } + } + + log.Printf("[DEBUG] Create firewall: %#v", createOpts) + + firewall, err := firewalls.Create(networkingClient, createOpts).Extract() + if err != nil { + return err + } + + log.Printf("[DEBUG] Firewall created: %#v", firewall) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING_CREATE"}, + Target: []string{"ACTIVE"}, + Refresh: waitForFirewallActive(networkingClient, firewall.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 0, + MinTimeout: 2 * time.Second, + } + + _, err = stateConf.WaitForState() + log.Printf("[DEBUG] Firewall (%s) is active.", firewall.ID) + + d.SetId(firewall.ID) + + return resourceFWFirewallV1Read(d, meta) +} + +func resourceFWFirewallV1Read(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Retrieve information about firewall: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var firewall Firewall + err = firewalls.Get(networkingClient, d.Id()).ExtractInto(&firewall) + if err != nil { + return CheckDeleted(d, err, "firewall") + } + + log.Printf("[DEBUG] Read OpenStack Firewall %s: %#v", d.Id(), firewall) + + d.Set("name", firewall.Name) + d.Set("description", firewall.Description) + d.Set("policy_id", firewall.PolicyID) + d.Set("admin_state_up", firewall.AdminStateUp) + d.Set("tenant_id", firewall.TenantID) + d.Set("associated_routers", firewall.RouterIDs) + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceFWFirewallV1Update(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + // PolicyID is required + opts := firewalls.UpdateOpts{ + PolicyID: d.Get("policy_id").(string), + } + + if d.HasChange("name") { + opts.Name = d.Get("name").(string) + } + + if d.HasChange("description") { + opts.Description = d.Get("description").(string) + } + + if d.HasChange("admin_state_up") { + adminStateUp := d.Get("admin_state_up").(bool) + opts.AdminStateUp = &adminStateUp + } + + var updateOpts firewalls.UpdateOptsBuilder + var routerIds []string + if d.HasChange("associated_routers") || d.HasChange("no_routers") { + // 'no_routers' = true means 'associated_routers' will be empty... + if d.Get("no_routers").(bool) { + log.Printf("[DEBUG] 'no_routers' is true.") + routerIds = make([]string, 0) + } else { + associatedRoutersRaw := d.Get("associated_routers").(*schema.Set).List() + for _, v := range associatedRoutersRaw { + routerIds = append(routerIds, v.(string)) + } + } + + updateOpts = routerinsertion.UpdateOptsExt{ + UpdateOptsBuilder: opts, + RouterIDs: routerIds, + } + } else { + updateOpts = opts + } + + log.Printf("[DEBUG] Updating firewall with id %s: %#v", d.Id(), updateOpts) + + err = firewalls.Update(networkingClient, d.Id(), updateOpts).Err + if err != nil { + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING_CREATE", "PENDING_UPDATE"}, + Target: []string{"ACTIVE"}, + Refresh: waitForFirewallActive(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 0, + MinTimeout: 2 * time.Second, + } + + _, err = stateConf.WaitForState() + + return resourceFWFirewallV1Read(d, meta) +} + +func resourceFWFirewallV1Delete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Destroy firewall: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + // Ensure the firewall was fully created/updated before being deleted. + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING_CREATE", "PENDING_UPDATE"}, + Target: []string{"ACTIVE"}, + Refresh: waitForFirewallActive(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutUpdate), + Delay: 0, + MinTimeout: 2 * time.Second, + } + + _, err = stateConf.WaitForState() + + err = firewalls.Delete(networkingClient, d.Id()).Err + + if err != nil { + return err + } + + stateConf = &resource.StateChangeConf{ + Pending: []string{"DELETING"}, + Target: []string{"DELETED"}, + Refresh: waitForFirewallDeletion(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 0, + MinTimeout: 2 * time.Second, + } + + _, err = stateConf.WaitForState() + + return err +} + +func waitForFirewallActive(networkingClient *gophercloud.ServiceClient, id string) resource.StateRefreshFunc { + + return func() (interface{}, string, error) { + var fw Firewall + + err := firewalls.Get(networkingClient, id).ExtractInto(&fw) + if err != nil { + return nil, "", err + } + return fw, fw.Status, nil + } +} + +func waitForFirewallDeletion(networkingClient *gophercloud.ServiceClient, id string) resource.StateRefreshFunc { + + return func() (interface{}, string, error) { + fw, err := firewalls.Get(networkingClient, id).Extract() + log.Printf("[DEBUG] Got firewall %s => %#v", id, fw) + + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Firewall %s is actually deleted", id) + return "", "DELETED", nil + } + return nil, "", fmt.Errorf("Unexpected error: %s", err) + } + + log.Printf("[DEBUG] Firewall %s deletion is pending", id) + return fw, "DELETING", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_policy_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_policy_v1.go new file mode 100644 index 00000000000..9012854aa13 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_policy_v1.go @@ -0,0 +1,231 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceFWPolicyV1() *schema.Resource { + return &schema.Resource{ + Create: resourceFWPolicyV1Create, + Read: resourceFWPolicyV1Read, + Update: resourceFWPolicyV1Update, + Delete: resourceFWPolicyV1Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "audited": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "shared": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "rules": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceFWPolicyV1Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + v := d.Get("rules").([]interface{}) + + log.Printf("[DEBUG] Rules found : %#v", v) + log.Printf("[DEBUG] Rules count : %d", len(v)) + + rules := make([]string, len(v)) + for i, v := range v { + rules[i] = v.(string) + } + + audited := d.Get("audited").(bool) + + opts := PolicyCreateOpts{ + policies.CreateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + Audited: &audited, + TenantID: d.Get("tenant_id").(string), + Rules: rules, + }, + MapValueSpecs(d), + } + + if r, ok := d.GetOk("shared"); ok { + shared := r.(bool) + opts.Shared = &shared + } + + log.Printf("[DEBUG] Create firewall policy: %#v", opts) + + policy, err := policies.Create(networkingClient, opts).Extract() + if err != nil { + return err + } + + log.Printf("[DEBUG] Firewall policy created: %#v", policy) + + d.SetId(policy.ID) + + return resourceFWPolicyV1Read(d, meta) +} + +func resourceFWPolicyV1Read(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Retrieve information about firewall policy: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + policy, err := policies.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "FW policy") + } + + log.Printf("[DEBUG] Read OpenStack Firewall Policy %s: %#v", d.Id(), policy) + + d.Set("name", policy.Name) + d.Set("description", policy.Description) + d.Set("shared", policy.Shared) + d.Set("audited", policy.Audited) + d.Set("tenant_id", policy.TenantID) + d.Set("rules", policy.Rules) + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceFWPolicyV1Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + opts := policies.UpdateOpts{} + + if d.HasChange("name") { + opts.Name = d.Get("name").(string) + } + + if d.HasChange("description") { + opts.Description = d.Get("description").(string) + } + + if d.HasChange("rules") { + v := d.Get("rules").([]interface{}) + + log.Printf("[DEBUG] Rules found : %#v", v) + log.Printf("[DEBUG] Rules count : %d", len(v)) + + rules := make([]string, len(v)) + for i, v := range v { + rules[i] = v.(string) + } + opts.Rules = rules + } + + log.Printf("[DEBUG] Updating firewall policy with id %s: %#v", d.Id(), opts) + + err = policies.Update(networkingClient, d.Id(), opts).Err + if err != nil { + return err + } + + return resourceFWPolicyV1Read(d, meta) +} + +func resourceFWPolicyV1Delete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Destroy firewall policy: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: []string{"DELETED"}, + Refresh: waitForFirewallPolicyDeletion(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 0, + MinTimeout: 2 * time.Second, + } + + if _, err = stateConf.WaitForState(); err != nil { + return err + } + + return nil +} + +func waitForFirewallPolicyDeletion(networkingClient *gophercloud.ServiceClient, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + err := policies.Delete(networkingClient, id).Err + if err == nil { + return "", "DELETED", nil + } + + if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { + if errCode.Actual == 409 { + // This error usually means that the policy is attached + // to a firewall. At this point, the firewall is probably + // being delete. So, we retry a few times. + return nil, "ACTIVE", nil + } + } + + return nil, "ACTIVE", err + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_rule_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_rule_v1.go new file mode 100644 index 00000000000..92793c46d99 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_fw_rule_v1.go @@ -0,0 +1,258 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceFWRuleV1() *schema.Resource { + return &schema.Resource{ + Create: resourceFWRuleV1Create, + Read: resourceFWRuleV1Read, + Update: resourceFWRuleV1Update, + Delete: resourceFWRuleV1Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "action": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "ip_version": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 4, + }, + "source_ip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "destination_ip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "source_port": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "destination_port": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceFWRuleV1Create(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + enabled := d.Get("enabled").(bool) + ipVersion := resourceFWRuleV1DetermineIPVersion(d.Get("ip_version").(int)) + protocol := resourceFWRuleV1DetermineProtocol(d.Get("protocol").(string)) + + ruleConfiguration := RuleCreateOpts{ + rules.CreateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + Protocol: protocol, + Action: d.Get("action").(string), + IPVersion: ipVersion, + SourceIPAddress: d.Get("source_ip_address").(string), + DestinationIPAddress: d.Get("destination_ip_address").(string), + SourcePort: d.Get("source_port").(string), + DestinationPort: d.Get("destination_port").(string), + Enabled: &enabled, + TenantID: d.Get("tenant_id").(string), + }, + MapValueSpecs(d), + } + + log.Printf("[DEBUG] Create firewall rule: %#v", ruleConfiguration) + + rule, err := rules.Create(networkingClient, ruleConfiguration).Extract() + + if err != nil { + return err + } + + log.Printf("[DEBUG] Firewall rule with id %s : %#v", rule.ID, rule) + + d.SetId(rule.ID) + + return resourceFWRuleV1Read(d, meta) +} + +func resourceFWRuleV1Read(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Retrieve information about firewall rule: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + rule, err := rules.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "FW rule") + } + + log.Printf("[DEBUG] Read OpenStack Firewall Rule %s: %#v", d.Id(), rule) + + d.Set("action", rule.Action) + d.Set("name", rule.Name) + d.Set("description", rule.Description) + d.Set("ip_version", rule.IPVersion) + d.Set("source_ip_address", rule.SourceIPAddress) + d.Set("destination_ip_address", rule.DestinationIPAddress) + d.Set("source_port", rule.SourcePort) + d.Set("destination_port", rule.DestinationPort) + d.Set("enabled", rule.Enabled) + + if rule.Protocol == "" { + d.Set("protocol", "any") + } else { + d.Set("protocol", rule.Protocol) + } + + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceFWRuleV1Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + name := d.Get("name").(string) + description := d.Get("description").(string) + protocol := d.Get("protocol").(string) + action := d.Get("action").(string) + ipVersion := resourceFWRuleV1DetermineIPVersion(d.Get("ip_version").(int)) + sourceIPAddress := d.Get("source_ip_address").(string) + sourcePort := d.Get("source_port").(string) + destinationIPAddress := d.Get("destination_ip_address").(string) + destinationPort := d.Get("destination_port").(string) + enabled := d.Get("enabled").(bool) + + opts := rules.UpdateOpts{ + Name: &name, + Description: &description, + Protocol: &protocol, + Action: &action, + IPVersion: &ipVersion, + SourceIPAddress: &sourceIPAddress, + DestinationIPAddress: &destinationIPAddress, + SourcePort: &sourcePort, + DestinationPort: &destinationPort, + Enabled: &enabled, + } + + log.Printf("[DEBUG] Updating firewall rules: %#v", opts) + err = rules.Update(networkingClient, d.Id(), opts).Err + if err != nil { + return err + } + + return resourceFWRuleV1Read(d, meta) +} + +func resourceFWRuleV1Delete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Destroy firewall rule: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + rule, err := rules.Get(networkingClient, d.Id()).Extract() + if err != nil { + return err + } + + if rule.PolicyID != "" { + _, err := policies.RemoveRule(networkingClient, rule.PolicyID, rule.ID).Extract() + if err != nil { + return err + } + } + + return rules.Delete(networkingClient, d.Id()).Err +} + +func resourceFWRuleV1DetermineIPVersion(ipv int) gophercloud.IPVersion { + // Determine the IP Version + var ipVersion gophercloud.IPVersion + switch ipv { + case 4: + ipVersion = gophercloud.IPv4 + case 6: + ipVersion = gophercloud.IPv6 + } + + return ipVersion +} + +func resourceFWRuleV1DetermineProtocol(p string) rules.Protocol { + var protocol rules.Protocol + switch p { + case "any": + protocol = rules.ProtocolAny + case "icmp": + protocol = rules.ProtocolICMP + case "tcp": + protocol = rules.ProtocolTCP + case "udp": + protocol = rules.ProtocolUDP + } + + return protocol +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_identity_project_v3.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_identity_project_v3.go new file mode 100644 index 00000000000..83dddb50406 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_identity_project_v3.go @@ -0,0 +1,184 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/gophercloud/gophercloud/openstack/identity/v3/projects" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceIdentityProjectV3() *schema.Resource { + return &schema.Resource{ + Create: resourceIdentityProjectV3Create, + Read: resourceIdentityProjectV3Read, + Update: resourceIdentityProjectV3Update, + Delete: resourceIdentityProjectV3Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "domain_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + + "is_domain": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "parent_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } +} + +func resourceIdentityProjectV3Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + identityClient, err := config.identityV3Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack identity client: %s", err) + } + + enabled := d.Get("enabled").(bool) + isDomain := d.Get("is_domain").(bool) + createOpts := projects.CreateOpts{ + Description: d.Get("description").(string), + DomainID: d.Get("domain_id").(string), + Enabled: &enabled, + IsDomain: &isDomain, + Name: d.Get("name").(string), + ParentID: d.Get("parent_id").(string), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + project, err := projects.Create(identityClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack project: %s", err) + } + + d.SetId(project.ID) + + return resourceIdentityProjectV3Read(d, meta) +} + +func resourceIdentityProjectV3Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + identityClient, err := config.identityV3Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack identity client: %s", err) + } + + project, err := projects.Get(identityClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "project") + } + + log.Printf("[DEBUG] Retrieved OpenStack project: %#v", project) + + d.Set("description", project.Description) + d.Set("domain_id", project.DomainID) + d.Set("enabled", project.Enabled) + d.Set("is_domain", project.IsDomain) + d.Set("name", project.Name) + d.Set("parent_id", project.ParentID) + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceIdentityProjectV3Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + identityClient, err := config.identityV3Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack identity client: %s", err) + } + + var hasChange bool + var updateOpts projects.UpdateOpts + + if d.HasChange("domain_id") { + hasChange = true + updateOpts.DomainID = d.Get("domain_id").(string) + } + + if d.HasChange("enabled") { + hasChange = true + enabled := d.Get("enabled").(bool) + updateOpts.Enabled = &enabled + } + + if d.HasChange("is_domain") { + hasChange = true + isDomain := d.Get("is_domain").(bool) + updateOpts.IsDomain = &isDomain + } + + if d.HasChange("name") { + hasChange = true + updateOpts.Name = d.Get("name").(string) + } + + if d.HasChange("parent_id") { + hasChange = true + updateOpts.ParentID = d.Get("parent_id").(string) + } + + if d.HasChange("description") { + hasChange = true + updateOpts.Description = d.Get("description").(string) + } + + if hasChange { + _, err := projects.Update(identityClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack project: %s", err) + } + } + + return resourceIdentityProjectV3Read(d, meta) +} + +func resourceIdentityProjectV3Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + identityClient, err := config.identityV3Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack identity client: %s", err) + } + + err = projects.Delete(identityClient, d.Id()).ExtractErr() + if err != nil { + return fmt.Errorf("Error deleting OpenStack project: %s", err) + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_identity_user_v3.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_identity_user_v3.go new file mode 100644 index 00000000000..b6c8008dde3 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_identity_user_v3.go @@ -0,0 +1,311 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/gophercloud/gophercloud/openstack/identity/v3/users" + "github.com/hashicorp/terraform/helper/schema" +) + +var userOptions = map[users.Option]string{ + users.IgnoreChangePasswordUponFirstUse: "ignore_change_password_upon_first_use", + users.IgnorePasswordExpiry: "ignore_password_expiry", + users.IgnoreLockoutFailureAttempts: "ignore_lockout_failure_attempts", + users.MultiFactorAuthEnabled: "multi_factor_auth_enabled", +} + +func resourceIdentityUserV3() *schema.Resource { + return &schema.Resource{ + Create: resourceIdentityUserV3Create, + Read: resourceIdentityUserV3Read, + Update: resourceIdentityUserV3Update, + Delete: resourceIdentityUserV3Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "default_project_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "domain_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + + "extra": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "password": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Sensitive: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + // The following are all specific options that must + // be bundled into user.Options + "ignore_change_password_upon_first_use": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + + "ignore_password_expiry": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + + "ignore_lockout_failure_attempts": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + + "multi_factor_auth_enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + + "multi_factor_auth_rule": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rule": &schema.Schema{ + Type: schema.TypeList, + MinItems: 1, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + } +} + +func resourceIdentityUserV3Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + identityClient, err := config.identityV3Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack identity client: %s", err) + } + + enabled := d.Get("enabled").(bool) + createOpts := users.CreateOpts{ + DefaultProjectID: d.Get("default_project_id").(string), + Description: d.Get("description").(string), + DomainID: d.Get("domain_id").(string), + Enabled: &enabled, + Extra: d.Get("extra").(map[string]interface{}), + Name: d.Get("name").(string), + } + + // Build the user options + options := map[users.Option]interface{}{} + for optionType, option := range userOptions { + if v, ok := d.GetOk(option); ok { + options[optionType] = v.(bool) + } + } + + // Build the MFA rules + mfaRules := resourceIdentityUserV3BuildMFARules(d.Get("multi_factor_auth_rule").([]interface{})) + if len(mfaRules) > 0 { + options[users.MultiFactorAuthRules] = mfaRules + } + + createOpts.Options = options + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + + // Add password here so it wouldn't go in the above log entry + createOpts.Password = d.Get("password").(string) + + user, err := users.Create(identityClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack user: %s", err) + } + + d.SetId(user.ID) + + return resourceIdentityUserV3Read(d, meta) +} + +func resourceIdentityUserV3Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + identityClient, err := config.identityV3Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack identity client: %s", err) + } + + user, err := users.Get(identityClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "user") + } + + log.Printf("[DEBUG] Retrieved OpenStack user: %#v", user) + + d.Set("default_project_id", user.DefaultProjectID) + d.Set("description", user.Description) + d.Set("domain_id", user.DomainID) + d.Set("enabled", user.Enabled) + d.Set("extra", user.Extra) + d.Set("name", user.Name) + d.Set("region", GetRegion(d, config)) + + options := user.Options + for _, option := range userOptions { + if v, ok := options[option]; ok { + d.Set(option, v.(bool)) + } + } + + mfaRules := []map[string]interface{}{} + if v, ok := options["multi_factor_auth_rules"].([]interface{}); ok { + for _, v := range v { + mfaRule := map[string]interface{}{ + "rule": v, + } + mfaRules = append(mfaRules, mfaRule) + } + + d.Set("multi_factor_auth_rule", mfaRules) + } + + return nil +} + +func resourceIdentityUserV3Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + identityClient, err := config.identityV3Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack identity client: %s", err) + } + + var hasChange bool + var updateOpts users.UpdateOpts + + if d.HasChange("default_project_id") { + hasChange = true + updateOpts.DefaultProjectID = d.Get("default_project_id").(string) + } + + if d.HasChange("description") { + hasChange = true + updateOpts.Description = d.Get("description").(string) + } + + if d.HasChange("domain_id") { + hasChange = true + updateOpts.DomainID = d.Get("domain_id").(string) + } + + if d.HasChange("enabled") { + hasChange = true + enabled := d.Get("enabled").(bool) + updateOpts.Enabled = &enabled + } + + if d.HasChange("extra") { + hasChange = true + updateOpts.Extra = d.Get("extra").(map[string]interface{}) + } + + if d.HasChange("name") { + hasChange = true + updateOpts.Name = d.Get("name").(string) + } + + // Determine if the options have changed + options := map[users.Option]interface{}{} + for optionType, option := range userOptions { + if d.HasChange(option) { + hasChange = true + options[optionType] = d.Get(option).(bool) + } + } + + // Build the MFA rules + if d.HasChange("multi_factor_auth_rule") { + mfaRules := resourceIdentityUserV3BuildMFARules(d.Get("multi_factor_auth_rule").([]interface{})) + if len(mfaRules) > 0 { + options[users.MultiFactorAuthRules] = mfaRules + } + } + + updateOpts.Options = options + + if hasChange { + log.Printf("[DEBUG] Update Options: %#v", updateOpts) + } + + if d.HasChange("password") { + hasChange = true + updateOpts.Password = d.Get("password").(string) + } + + if hasChange { + _, err := users.Update(identityClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack user: %s", err) + } + } + + return resourceIdentityUserV3Read(d, meta) +} + +func resourceIdentityUserV3Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + identityClient, err := config.identityV3Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack identity client: %s", err) + } + + err = users.Delete(identityClient, d.Id()).ExtractErr() + if err != nil { + return fmt.Errorf("Error deleting OpenStack user: %s", err) + } + + return nil +} + +func resourceIdentityUserV3BuildMFARules(rules []interface{}) []interface{} { + var mfaRules []interface{} + + for _, rule := range rules { + ruleMap := rule.(map[string]interface{}) + ruleList := ruleMap["rule"].([]interface{}) + mfaRules = append(mfaRules, ruleList) + } + + return mfaRules +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_images_image_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_images_image_v2.go new file mode 100644 index 00000000000..3d9dcf8e1c5 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_images_image_v2.go @@ -0,0 +1,526 @@ +package openstack + +import ( + "crypto/md5" + "encoding/hex" + "fmt" + "io" + "log" + "net/http" + "os" + "path/filepath" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata" + "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceImagesImageV2() *schema.Resource { + return &schema.Resource{ + Create: resourceImagesImageV2Create, + Read: resourceImagesImageV2Read, + Update: resourceImagesImageV2Update, + Delete: resourceImagesImageV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "checksum": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "container_format": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: resourceImagesImageV2ValidateContainerFormat, + }, + + "created_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "disk_format": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: resourceImagesImageV2ValidateDiskFormat, + }, + + "file": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "image_cache_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: fmt.Sprintf("%s/.terraform/image_cache", os.Getenv("HOME")), + }, + + "image_source_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"local_file_path"}, + }, + + "local_file_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"image_source_url"}, + }, + + "metadata": &schema.Schema{ + Type: schema.TypeMap, + Computed: true, + }, + + "min_disk_gb": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validatePositiveInt, + Default: 0, + }, + + "min_ram_mb": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validatePositiveInt, + Default: 0, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + + "owner": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "protected": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + }, + + "schema": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "size_bytes": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + + "status": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "tags": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "update_at": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "visibility": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + ValidateFunc: resourceImagesImageV2ValidateVisibility, + Default: "private", + }, + + "properties": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Computed: true, + }, + }, + } +} + +func resourceImagesImageV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + imageClient, err := config.imageV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack image client: %s", err) + } + + protected := d.Get("protected").(bool) + visibility := resourceImagesImageV2VisibilityFromString(d.Get("visibility").(string)) + + properties := d.Get("properties").(map[string]interface{}) + imageProperties := resourceImagesImageV2ExpandProperties(properties) + + createOpts := &images.CreateOpts{ + Name: d.Get("name").(string), + ContainerFormat: d.Get("container_format").(string), + DiskFormat: d.Get("disk_format").(string), + MinDisk: d.Get("min_disk_gb").(int), + MinRAM: d.Get("min_ram_mb").(int), + Protected: &protected, + Visibility: &visibility, + Properties: imageProperties, + } + + if v, ok := d.GetOk("tags"); ok { + tags := v.(*schema.Set).List() + createOpts.Tags = resourceImagesImageV2BuildTags(tags) + } + + d.Partial(true) + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + newImg, err := images.Create(imageClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating Image: %s", err) + } + + d.SetId(newImg.ID) + + // downloading/getting image file props + imgFilePath, err := resourceImagesImageV2File(d) + if err != nil { + return fmt.Errorf("Error opening file for Image: %s", err) + + } + fileSize, fileChecksum, err := resourceImagesImageV2FileProps(imgFilePath) + if err != nil { + return fmt.Errorf("Error getting file props: %s", err) + } + + // upload + imgFile, err := os.Open(imgFilePath) + if err != nil { + return fmt.Errorf("Error opening file %q: %s", imgFilePath, err) + } + defer imgFile.Close() + log.Printf("[WARN] Uploading image %s (%d bytes). This can be pretty long.", d.Id(), fileSize) + + res := imagedata.Upload(imageClient, d.Id(), imgFile) + if res.Err != nil { + return fmt.Errorf("Error while uploading file %q: %s", imgFilePath, res.Err) + } + + //wait for active + stateConf := &resource.StateChangeConf{ + Pending: []string{string(images.ImageStatusQueued), string(images.ImageStatusSaving)}, + Target: []string{string(images.ImageStatusActive)}, + Refresh: resourceImagesImageV2RefreshFunc(imageClient, d.Id(), fileSize, fileChecksum), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + if _, err = stateConf.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for Image: %s", err) + } + + d.Partial(false) + + return resourceImagesImageV2Read(d, meta) +} + +func resourceImagesImageV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + imageClient, err := config.imageV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack image client: %s", err) + } + + img, err := images.Get(imageClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "image") + } + + log.Printf("[DEBUG] Retrieved Image %s: %#v", d.Id(), img) + + d.Set("owner", img.Owner) + d.Set("status", img.Status) + d.Set("file", img.File) + d.Set("schema", img.Schema) + d.Set("checksum", img.Checksum) + d.Set("size_bytes", img.SizeBytes) + d.Set("metadata", img.Metadata) + d.Set("created_at", img.CreatedAt) + d.Set("update_at", img.UpdatedAt) + d.Set("container_format", img.ContainerFormat) + d.Set("disk_format", img.DiskFormat) + d.Set("min_disk_gb", img.MinDiskGigabytes) + d.Set("min_ram_mb", img.MinRAMMegabytes) + d.Set("file", img.File) + d.Set("name", img.Name) + d.Set("protected", img.Protected) + d.Set("size_bytes", img.SizeBytes) + d.Set("tags", img.Tags) + d.Set("visibility", img.Visibility) + d.Set("properties", img.Properties) + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceImagesImageV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + imageClient, err := config.imageV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack image client: %s", err) + } + + updateOpts := make(images.UpdateOpts, 0) + + if d.HasChange("visibility") { + visibility := resourceImagesImageV2VisibilityFromString(d.Get("visibility").(string)) + v := images.UpdateVisibility{Visibility: visibility} + updateOpts = append(updateOpts, v) + } + + if d.HasChange("name") { + v := images.ReplaceImageName{NewName: d.Get("name").(string)} + updateOpts = append(updateOpts, v) + } + + if d.HasChange("tags") { + tags := d.Get("tags").(*schema.Set).List() + v := images.ReplaceImageTags{ + NewTags: resourceImagesImageV2BuildTags(tags), + } + updateOpts = append(updateOpts, v) + } + + log.Printf("[DEBUG] Update Options: %#v", updateOpts) + + _, err = images.Update(imageClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating image: %s", err) + } + + return resourceImagesImageV2Read(d, meta) +} + +func resourceImagesImageV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + imageClient, err := config.imageV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack image client: %s", err) + } + + log.Printf("[DEBUG] Deleting Image %s", d.Id()) + if err := images.Delete(imageClient, d.Id()).Err; err != nil { + return fmt.Errorf("Error deleting Image: %s", err) + } + + d.SetId("") + return nil +} + +func resourceImagesImageV2ValidateVisibility(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + validVisibilities := []string{ + "public", + "private", + "shared", + "community", + } + + for _, v := range validVisibilities { + if value == v { + return + } + } + + err := fmt.Errorf("%s must be one of %s", k, validVisibilities) + errors = append(errors, err) + return +} + +func validatePositiveInt(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value > 0 { + return + } + errors = append(errors, fmt.Errorf("%q must be a positive integer", k)) + return +} + +var DiskFormats = [9]string{"ami", "ari", "aki", "vhd", "vmdk", "raw", "qcow2", "vdi", "iso"} + +func resourceImagesImageV2ValidateDiskFormat(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + for i := range DiskFormats { + if value == DiskFormats[i] { + return + } + } + errors = append(errors, fmt.Errorf("%q must be one of %v", k, DiskFormats)) + return +} + +var ContainerFormats = [9]string{"ami", "ari", "aki", "bare", "ovf"} + +func resourceImagesImageV2ValidateContainerFormat(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + for i := range ContainerFormats { + if value == ContainerFormats[i] { + return + } + } + errors = append(errors, fmt.Errorf("%q must be one of %v", k, ContainerFormats)) + return +} + +func resourceImagesImageV2VisibilityFromString(v string) images.ImageVisibility { + switch v { + case "public": + return images.ImageVisibilityPublic + case "private": + return images.ImageVisibilityPrivate + case "shared": + return images.ImageVisibilityShared + case "community": + return images.ImageVisibilityCommunity + } + + return "" +} + +func fileMD5Checksum(f *os.File) (string, error) { + hash := md5.New() + if _, err := io.Copy(hash, f); err != nil { + return "", err + } + return hex.EncodeToString(hash.Sum(nil)), nil +} + +func resourceImagesImageV2FileProps(filename string) (int64, string, error) { + var filesize int64 + var filechecksum string + + file, err := os.Open(filename) + if err != nil { + return -1, "", fmt.Errorf("Error opening file for Image: %s", err) + + } + defer file.Close() + + fstat, err := file.Stat() + if err != nil { + return -1, "", fmt.Errorf("Error reading image file %q: %s", file.Name(), err) + } + + filesize = fstat.Size() + filechecksum, err = fileMD5Checksum(file) + + if err != nil { + return -1, "", fmt.Errorf("Error computing image file %q checksum: %s", file.Name(), err) + } + + return filesize, filechecksum, nil +} + +func resourceImagesImageV2File(d *schema.ResourceData) (string, error) { + if filename := d.Get("local_file_path").(string); filename != "" { + return filename, nil + } else if furl := d.Get("image_source_url").(string); furl != "" { + dir := d.Get("image_cache_path").(string) + os.MkdirAll(dir, 0700) + filename := filepath.Join(dir, fmt.Sprintf("%x.img", md5.Sum([]byte(furl)))) + + if _, err := os.Stat(filename); err != nil { + if !os.IsNotExist(err) { + return "", fmt.Errorf("Error while trying to access file %q: %s", filename, err) + } + log.Printf("[DEBUG] File doens't exists %s. will download from %s", filename, furl) + file, err := os.Create(filename) + if err != nil { + return "", fmt.Errorf("Error creating file %q: %s", filename, err) + } + defer file.Close() + resp, err := http.Get(furl) + if err != nil { + return "", fmt.Errorf("Error downloading image from %q", furl) + } + defer resp.Body.Close() + + if _, err = io.Copy(file, resp.Body); err != nil { + return "", fmt.Errorf("Error downloading image %q to file %q: %s", furl, filename, err) + } + return filename, nil + } else { + log.Printf("[DEBUG] File exists %s", filename) + return filename, nil + } + } else { + return "", fmt.Errorf("Error in config. no file specified") + } +} + +func resourceImagesImageV2RefreshFunc(client *gophercloud.ServiceClient, id string, fileSize int64, checksum string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + img, err := images.Get(client, id).Extract() + if err != nil { + return nil, "", err + } + log.Printf("[DEBUG] OpenStack image status is: %s", img.Status) + + if img.Checksum != checksum || int64(img.SizeBytes) != fileSize { + return img, fmt.Sprintf("%s", img.Status), fmt.Errorf("Error wrong size %v or checksum %q", img.SizeBytes, img.Checksum) + } + + return img, fmt.Sprintf("%s", img.Status), nil + } +} + +func resourceImagesImageV2BuildTags(v []interface{}) []string { + var tags []string + for _, tag := range v { + tags = append(tags, tag.(string)) + } + + return tags +} + +func resourceImagesImageV2ExpandProperties(v map[string]interface{}) map[string]string { + properties := map[string]string{} + for key, value := range v { + if v, ok := value.(string); ok { + properties[key] = v + } + } + + return properties +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_listener_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_listener_v2.go new file mode 100644 index 00000000000..754d0b621ff --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_listener_v2.go @@ -0,0 +1,314 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners" +) + +func resourceListenerV2() *schema.Resource { + return &schema.Resource{ + Create: resourceListenerV2Create, + Read: resourceListenerV2Read, + Update: resourceListenerV2Update, + Delete: resourceListenerV2Delete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "TCP" && value != "HTTP" && value != "HTTPS" && value != "TERMINATED_HTTPS" { + errors = append(errors, fmt.Errorf( + "Only 'TCP', 'HTTP', 'HTTPS' and 'TERMINATED_HTTPS' are supported values for 'protocol'")) + } + return + }, + }, + + "protocol_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "loadbalancer_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "default_pool_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "connection_limit": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "default_tls_container_ref": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "sni_container_refs": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "admin_state_up": &schema.Schema{ + Type: schema.TypeBool, + Default: true, + Optional: true, + }, + }, + } +} + +func resourceListenerV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + adminStateUp := d.Get("admin_state_up").(bool) + var sniContainerRefs []string + if raw, ok := d.GetOk("sni_container_refs"); ok { + for _, v := range raw.([]interface{}) { + sniContainerRefs = append(sniContainerRefs, v.(string)) + } + } + createOpts := listeners.CreateOpts{ + Protocol: listeners.Protocol(d.Get("protocol").(string)), + ProtocolPort: d.Get("protocol_port").(int), + TenantID: d.Get("tenant_id").(string), + LoadbalancerID: d.Get("loadbalancer_id").(string), + Name: d.Get("name").(string), + DefaultPoolID: d.Get("default_pool_id").(string), + Description: d.Get("description").(string), + DefaultTlsContainerRef: d.Get("default_tls_container_ref").(string), + SniContainerRefs: sniContainerRefs, + AdminStateUp: &adminStateUp, + } + + if v, ok := d.GetOk("connection_limit"); ok { + connectionLimit := v.(int) + createOpts.ConnLimit = &connectionLimit + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + + // Wait for LoadBalancer to become active before continuing + lbID := createOpts.LoadbalancerID + timeout := d.Timeout(schema.TimeoutCreate) + err = waitForLBV2LoadBalancer(networkingClient, lbID, "ACTIVE", nil, timeout) + if err != nil { + return err + } + + log.Printf("[DEBUG] Attempting to create listener") + var listener *listeners.Listener + err = resource.Retry(timeout, func() *resource.RetryError { + listener, err = listeners.Create(networkingClient, createOpts).Extract() + if err != nil { + return checkForRetryableError(err) + } + return nil + }) + if err != nil { + return fmt.Errorf("Error creating listener: %s", err) + } + + // Wait for LoadBalancer to become active again before continuing + err = waitForLBV2LoadBalancer(networkingClient, lbID, "ACTIVE", nil, timeout) + if err != nil { + return err + } + + d.SetId(listener.ID) + + return resourceListenerV2Read(d, meta) +} + +func resourceListenerV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + listener, err := listeners.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "listener") + } + + log.Printf("[DEBUG] Retrieved listener %s: %#v", d.Id(), listener) + + d.Set("name", listener.Name) + d.Set("protocol", listener.Protocol) + d.Set("tenant_id", listener.TenantID) + d.Set("description", listener.Description) + d.Set("protocol_port", listener.ProtocolPort) + d.Set("admin_state_up", listener.AdminStateUp) + d.Set("default_pool_id", listener.DefaultPoolID) + d.Set("connection_limit", listener.ConnLimit) + d.Set("sni_container_refs", listener.SniContainerRefs) + d.Set("default_tls_container_ref", listener.DefaultTlsContainerRef) + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceListenerV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts listeners.UpdateOpts + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + if d.HasChange("description") { + updateOpts.Description = d.Get("description").(string) + } + if d.HasChange("connection_limit") { + connLimit := d.Get("connection_limit").(int) + updateOpts.ConnLimit = &connLimit + } + if d.HasChange("default_tls_container_ref") { + updateOpts.DefaultTlsContainerRef = d.Get("default_tls_container_ref").(string) + } + if d.HasChange("sni_container_refs") { + var sniContainerRefs []string + if raw, ok := d.GetOk("sni_container_refs"); ok { + for _, v := range raw.([]interface{}) { + sniContainerRefs = append(sniContainerRefs, v.(string)) + } + } + updateOpts.SniContainerRefs = sniContainerRefs + } + if d.HasChange("admin_state_up") { + asu := d.Get("admin_state_up").(bool) + updateOpts.AdminStateUp = &asu + } + + // Wait for LoadBalancer to become active before continuing + lbID := d.Get("loadbalancer_id").(string) + timeout := d.Timeout(schema.TimeoutUpdate) + err = waitForLBV2LoadBalancer(networkingClient, lbID, "ACTIVE", nil, timeout) + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating listener %s with options: %#v", d.Id(), updateOpts) + err = resource.Retry(timeout, func() *resource.RetryError { + _, err = listeners.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return checkForRetryableError(err) + } + return nil + }) + + if err != nil { + return fmt.Errorf("Error updating listener %s: %s", d.Id(), err) + } + + // Wait for LoadBalancer to become active again before continuing + err = waitForLBV2LoadBalancer(networkingClient, lbID, "ACTIVE", nil, timeout) + if err != nil { + return err + } + + return resourceListenerV2Read(d, meta) + +} + +func resourceListenerV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + // Wait for LoadBalancer to become active before continuing + lbID := d.Get("loadbalancer_id").(string) + timeout := d.Timeout(schema.TimeoutDelete) + err = waitForLBV2LoadBalancer(networkingClient, lbID, "ACTIVE", nil, timeout) + if err != nil { + return err + } + + log.Printf("[DEBUG] Deleting listener %s", d.Id()) + err = resource.Retry(timeout, func() *resource.RetryError { + err = listeners.Delete(networkingClient, d.Id()).ExtractErr() + if err != nil { + return checkForRetryableError(err) + } + return nil + }) + + if err != nil { + return fmt.Errorf("Error deleting listener %s: %s", d.Id(), err) + } + + // Wait for LoadBalancer to become active again before continuing + err = waitForLBV2LoadBalancer(networkingClient, lbID, "ACTIVE", nil, timeout) + if err != nil { + return err + } + + // Wait for Listener to delete + err = waitForLBV2Listener(networkingClient, d.Id(), "DELETED", nil, timeout) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_loadbalancer_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_loadbalancer_v2.go new file mode 100644 index 00000000000..af715680dfe --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_loadbalancer_v2.go @@ -0,0 +1,287 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" +) + +func resourceLoadBalancerV2() *schema.Resource { + return &schema.Resource{ + Create: resourceLoadBalancerV2Create, + Read: resourceLoadBalancerV2Read, + Update: resourceLoadBalancerV2Update, + Delete: resourceLoadBalancerV2Delete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(5 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "vip_subnet_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "vip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "vip_port_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "admin_state_up": &schema.Schema{ + Type: schema.TypeBool, + Default: true, + Optional: true, + }, + + "flavor": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "loadbalancer_provider": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "security_group_ids": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceLoadBalancerV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var lbProvider string + if v, ok := d.GetOk("loadbalancer_provider"); ok { + lbProvider = v.(string) + } + + adminStateUp := d.Get("admin_state_up").(bool) + createOpts := loadbalancers.CreateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + VipSubnetID: d.Get("vip_subnet_id").(string), + TenantID: d.Get("tenant_id").(string), + VipAddress: d.Get("vip_address").(string), + AdminStateUp: &adminStateUp, + Flavor: d.Get("flavor").(string), + Provider: lbProvider, + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + lb, err := loadbalancers.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating LoadBalancer: %s", err) + } + + // Wait for LoadBalancer to become active before continuing + timeout := d.Timeout(schema.TimeoutCreate) + err = waitForLBV2LoadBalancer(networkingClient, lb.ID, "ACTIVE", nil, timeout) + if err != nil { + return err + } + + // Once the loadbalancer has been created, apply any requested security groups + // to the port that was created behind the scenes. + if err := resourceLoadBalancerV2SecurityGroups(networkingClient, lb.VipPortID, d); err != nil { + return err + } + + // If all has been successful, set the ID on the resource + d.SetId(lb.ID) + + return resourceLoadBalancerV2Read(d, meta) +} + +func resourceLoadBalancerV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + lb, err := loadbalancers.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "loadbalancer") + } + + log.Printf("[DEBUG] Retrieved loadbalancer %s: %#v", d.Id(), lb) + + d.Set("name", lb.Name) + d.Set("description", lb.Description) + d.Set("vip_subnet_id", lb.VipSubnetID) + d.Set("tenant_id", lb.TenantID) + d.Set("vip_address", lb.VipAddress) + d.Set("vip_port_id", lb.VipPortID) + d.Set("admin_state_up", lb.AdminStateUp) + d.Set("flavor", lb.Flavor) + d.Set("loadbalancer_provider", lb.Provider) + d.Set("region", GetRegion(d, config)) + + // Get any security groups on the VIP Port + if lb.VipPortID != "" { + port, err := ports.Get(networkingClient, lb.VipPortID).Extract() + if err != nil { + return err + } + + d.Set("security_group_ids", port.SecurityGroups) + } + + return nil +} + +func resourceLoadBalancerV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts loadbalancers.UpdateOpts + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + if d.HasChange("description") { + updateOpts.Description = d.Get("description").(string) + } + if d.HasChange("admin_state_up") { + asu := d.Get("admin_state_up").(bool) + updateOpts.AdminStateUp = &asu + } + + // Wait for LoadBalancer to become active before continuing + timeout := d.Timeout(schema.TimeoutUpdate) + err = waitForLBV2LoadBalancer(networkingClient, d.Id(), "ACTIVE", nil, timeout) + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating loadbalancer %s with options: %#v", d.Id(), updateOpts) + err = resource.Retry(timeout, func() *resource.RetryError { + _, err = loadbalancers.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return checkForRetryableError(err) + } + return nil + }) + + // Wait for LoadBalancer to become active before continuing + err = waitForLBV2LoadBalancer(networkingClient, d.Id(), "ACTIVE", nil, timeout) + if err != nil { + return err + } + + // Security Groups get updated separately + if d.HasChange("security_group_ids") { + vipPortID := d.Get("vip_port_id").(string) + if err := resourceLoadBalancerV2SecurityGroups(networkingClient, vipPortID, d); err != nil { + return err + } + } + + return resourceLoadBalancerV2Read(d, meta) +} + +func resourceLoadBalancerV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + log.Printf("[DEBUG] Deleting loadbalancer %s", d.Id()) + timeout := d.Timeout(schema.TimeoutDelete) + err = resource.Retry(timeout, func() *resource.RetryError { + err = loadbalancers.Delete(networkingClient, d.Id()).ExtractErr() + if err != nil { + return checkForRetryableError(err) + } + return nil + }) + + // Wait for LoadBalancer to become delete + pending := []string{"PENDING_UPDATE", "PENDING_DELETE", "ACTIVE"} + err = waitForLBV2LoadBalancer(networkingClient, d.Id(), "DELETED", pending, timeout) + if err != nil { + return err + } + + return nil +} + +func resourceLoadBalancerV2SecurityGroups(networkingClient *gophercloud.ServiceClient, vipPortID string, d *schema.ResourceData) error { + if vipPortID != "" { + if v, ok := d.GetOk("security_group_ids"); ok { + securityGroups := resourcePortSecurityGroupsV2(v.(*schema.Set)) + updateOpts := ports.UpdateOpts{ + SecurityGroups: &securityGroups, + } + + log.Printf("[DEBUG] Adding security groups to loadbalancer "+ + "VIP Port %s: %#v", vipPortID, updateOpts) + + _, err := ports.Update(networkingClient, vipPortID, updateOpts).Extract() + if err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_member_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_member_v1.go new file mode 100644 index 00000000000..5b203371fef --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_member_v1.go @@ -0,0 +1,236 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members" +) + +func resourceLBMemberV1() *schema.Resource { + return &schema.Resource{ + Create: resourceLBMemberV1Create, + Read: resourceLBMemberV1Read, + Update: resourceLBMemberV1Update, + Delete: resourceLBMemberV1Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "pool_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "address": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "weight": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "admin_state_up": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: false, + Computed: true, + }, + }, + } +} + +func resourceLBMemberV1Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + createOpts := members.CreateOpts{ + TenantID: d.Get("tenant_id").(string), + PoolID: d.Get("pool_id").(string), + Address: d.Get("address").(string), + ProtocolPort: d.Get("port").(int), + } + + log.Printf("[DEBUG] OpenStack LB Member Create Options: %#v", createOpts) + m, err := members.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack LB member: %s", err) + } + log.Printf("[INFO] LB member ID: %s", m.ID) + + log.Printf("[DEBUG] Waiting for OpenStack LB member (%s) to become available.", m.ID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING_CREATE"}, + Target: []string{"ACTIVE", "INACTIVE", "CREATED", "DOWN"}, + Refresh: waitForLBMemberActive(networkingClient, m.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + d.SetId(m.ID) + + // Due to the way Gophercloud is currently set up, AdminStateUp must be set post-create + asu := d.Get("admin_state_up").(bool) + updateOpts := members.UpdateOpts{ + AdminStateUp: &asu, + } + + log.Printf("[DEBUG] OpenStack LB Member Update Options: %#v", createOpts) + m, err = members.Update(networkingClient, m.ID, updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack LB member: %s", err) + } + + return resourceLBMemberV1Read(d, meta) +} + +func resourceLBMemberV1Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + m, err := members.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "LB member") + } + + log.Printf("[DEBUG] Retrieved OpenStack LB member %s: %+v", d.Id(), m) + + d.Set("address", m.Address) + d.Set("pool_id", m.PoolID) + d.Set("port", m.ProtocolPort) + d.Set("weight", m.Weight) + d.Set("admin_state_up", m.AdminStateUp) + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceLBMemberV1Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts members.UpdateOpts + if d.HasChange("admin_state_up") { + asu := d.Get("admin_state_up").(bool) + updateOpts.AdminStateUp = &asu + } + + log.Printf("[DEBUG] Updating LB member %s with options: %+v", d.Id(), updateOpts) + + _, err = members.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack LB member: %s", err) + } + + return resourceLBMemberV1Read(d, meta) +} + +func resourceLBMemberV1Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + err = members.Delete(networkingClient, d.Id()).ExtractErr() + if err != nil { + CheckDeleted(d, err, "LB member") + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE", "PENDING_DELETE"}, + Target: []string{"DELETED"}, + Refresh: waitForLBMemberDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack LB member: %s", err) + } + + d.SetId("") + return nil +} + +func waitForLBMemberActive(networkingClient *gophercloud.ServiceClient, memberId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + m, err := members.Get(networkingClient, memberId).Extract() + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack LB member: %+v", m) + if m.Status == "ACTIVE" { + return m, "ACTIVE", nil + } + + return m, m.Status, nil + } +} + +func waitForLBMemberDelete(networkingClient *gophercloud.ServiceClient, memberId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack LB member %s", memberId) + + m, err := members.Get(networkingClient, memberId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack LB member %s", memberId) + return m, "DELETED", nil + } + return m, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack LB member %s still active.", memberId) + return m, "ACTIVE", nil + } + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_member_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_member_v2.go new file mode 100644 index 00000000000..13ea2b27af2 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_member_v2.go @@ -0,0 +1,256 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" +) + +func resourceMemberV2() *schema.Resource { + return &schema.Resource{ + Create: resourceMemberV2Create, + Read: resourceMemberV2Read, + Update: resourceMemberV2Update, + Delete: resourceMemberV2Delete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "protocol_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "weight": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value < 1 { + errors = append(errors, fmt.Errorf( + "Only numbers greater than 0 are supported values for 'weight'")) + } + return + }, + }, + + "subnet_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "admin_state_up": &schema.Schema{ + Type: schema.TypeBool, + Default: true, + Optional: true, + }, + + "pool_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceMemberV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + adminStateUp := d.Get("admin_state_up").(bool) + createOpts := pools.CreateMemberOpts{ + Name: d.Get("name").(string), + TenantID: d.Get("tenant_id").(string), + Address: d.Get("address").(string), + ProtocolPort: d.Get("protocol_port").(int), + Weight: d.Get("weight").(int), + AdminStateUp: &adminStateUp, + } + + // Must omit if not set + if v, ok := d.GetOk("subnet_id"); ok { + createOpts.SubnetID = v.(string) + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + + // Wait for LB to become active before continuing + poolID := d.Get("pool_id").(string) + timeout := d.Timeout(schema.TimeoutCreate) + err = waitForLBV2viaPool(networkingClient, poolID, "ACTIVE", timeout) + if err != nil { + return err + } + + log.Printf("[DEBUG] Attempting to create member") + var member *pools.Member + err = resource.Retry(timeout, func() *resource.RetryError { + member, err = pools.CreateMember(networkingClient, poolID, createOpts).Extract() + if err != nil { + return checkForRetryableError(err) + } + return nil + }) + + if err != nil { + return fmt.Errorf("Error creating member: %s", err) + } + + // Wait for LB to become ACTIVE again + err = waitForLBV2viaPool(networkingClient, poolID, "ACTIVE", timeout) + if err != nil { + return err + } + + d.SetId(member.ID) + + return resourceMemberV2Read(d, meta) +} + +func resourceMemberV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + member, err := pools.GetMember(networkingClient, d.Get("pool_id").(string), d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "member") + } + + log.Printf("[DEBUG] Retrieved member %s: %#v", d.Id(), member) + + d.Set("name", member.Name) + d.Set("weight", member.Weight) + d.Set("admin_state_up", member.AdminStateUp) + d.Set("tenant_id", member.TenantID) + d.Set("subnet_id", member.SubnetID) + d.Set("address", member.Address) + d.Set("protocol_port", member.ProtocolPort) + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceMemberV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts pools.UpdateMemberOpts + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + if d.HasChange("weight") { + updateOpts.Weight = d.Get("weight").(int) + } + if d.HasChange("admin_state_up") { + asu := d.Get("admin_state_up").(bool) + updateOpts.AdminStateUp = &asu + } + + // Wait for LB to become active before continuing + poolID := d.Get("pool_id").(string) + timeout := d.Timeout(schema.TimeoutUpdate) + err = waitForLBV2viaPool(networkingClient, poolID, "ACTIVE", timeout) + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating member %s with options: %#v", d.Id(), updateOpts) + err = resource.Retry(timeout, func() *resource.RetryError { + _, err = pools.UpdateMember(networkingClient, poolID, d.Id(), updateOpts).Extract() + if err != nil { + return checkForRetryableError(err) + } + return nil + }) + + if err != nil { + return fmt.Errorf("Unable to update member %s: %s", d.Id(), err) + } + + err = waitForLBV2viaPool(networkingClient, poolID, "ACTIVE", timeout) + if err != nil { + return err + } + + return resourceMemberV2Read(d, meta) +} + +func resourceMemberV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + // Wait for Pool to become active before continuing + poolID := d.Get("pool_id").(string) + timeout := d.Timeout(schema.TimeoutDelete) + err = waitForLBV2viaPool(networkingClient, poolID, "ACTIVE", timeout) + if err != nil { + return err + } + + log.Printf("[DEBUG] Attempting to delete member %s", d.Id()) + err = resource.Retry(timeout, func() *resource.RetryError { + err = pools.DeleteMember(networkingClient, poolID, d.Id()).ExtractErr() + if err != nil { + return checkForRetryableError(err) + } + return nil + }) + + // Wait for LB to become ACTIVE + err = waitForLBV2viaPool(networkingClient, poolID, "ACTIVE", timeout) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_monitor_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_monitor_v1.go new file mode 100644 index 00000000000..db6c5b2617f --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_monitor_v1.go @@ -0,0 +1,310 @@ +package openstack + +import ( + "fmt" + "log" + "strconv" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors" +) + +func resourceLBMonitorV1() *schema.Resource { + return &schema.Resource{ + Create: resourceLBMonitorV1Create, + Read: resourceLBMonitorV1Read, + Update: resourceLBMonitorV1Update, + Delete: resourceLBMonitorV1Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "delay": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: false, + }, + "timeout": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: false, + }, + "max_retries": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: false, + }, + "url_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "http_method": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "expected_codes": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "admin_state_up": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Computed: true, + }, + }, + } +} + +func resourceLBMonitorV1Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + createOpts := monitors.CreateOpts{ + TenantID: d.Get("tenant_id").(string), + Delay: d.Get("delay").(int), + Timeout: d.Get("timeout").(int), + MaxRetries: d.Get("max_retries").(int), + URLPath: d.Get("url_path").(string), + ExpectedCodes: d.Get("expected_codes").(string), + HTTPMethod: d.Get("http_method").(string), + } + + if v, ok := d.GetOk("type"); ok { + monitorType := resourceLBMonitorV1DetermineType(v.(string)) + createOpts.Type = monitorType + } + + asuRaw := d.Get("admin_state_up").(string) + if asuRaw != "" { + asu, err := strconv.ParseBool(asuRaw) + if err != nil { + return fmt.Errorf("admin_state_up, if provided, must be either 'true' or 'false'") + } + createOpts.AdminStateUp = &asu + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + m, err := monitors.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack LB Monitor: %s", err) + } + log.Printf("[INFO] LB Monitor ID: %s", m.ID) + + log.Printf("[DEBUG] Waiting for OpenStack LB Monitor (%s) to become available.", m.ID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING_CREATE"}, + Target: []string{"ACTIVE"}, + Refresh: waitForLBMonitorActive(networkingClient, m.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + d.SetId(m.ID) + + return resourceLBMonitorV1Read(d, meta) +} + +func resourceLBMonitorV1Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + m, err := monitors.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "LB monitor") + } + + log.Printf("[DEBUG] Retrieved OpenStack LB Monitor %s: %+v", d.Id(), m) + + d.Set("type", m.Type) + d.Set("delay", m.Delay) + d.Set("timeout", m.Timeout) + d.Set("max_retries", m.MaxRetries) + d.Set("tenant_id", m.TenantID) + d.Set("url_path", m.URLPath) + d.Set("http_method", m.HTTPMethod) + d.Set("expected_codes", m.ExpectedCodes) + d.Set("admin_state_up", strconv.FormatBool(m.AdminStateUp)) + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceLBMonitorV1Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + updateOpts := monitors.UpdateOpts{ + Delay: d.Get("delay").(int), + Timeout: d.Get("timeout").(int), + MaxRetries: d.Get("max_retries").(int), + URLPath: d.Get("url_path").(string), + HTTPMethod: d.Get("http_method").(string), + ExpectedCodes: d.Get("expected_codes").(string), + } + + if d.HasChange("admin_state_up") { + asuRaw := d.Get("admin_state_up").(string) + if asuRaw != "" { + asu, err := strconv.ParseBool(asuRaw) + if err != nil { + return fmt.Errorf("admin_state_up, if provided, must be either 'true' or 'false'") + } + updateOpts.AdminStateUp = &asu + } + } + + log.Printf("[DEBUG] Updating OpenStack LB Monitor %s with options: %+v", d.Id(), updateOpts) + + _, err = monitors.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack LB Monitor: %s", err) + } + + return resourceLBMonitorV1Read(d, meta) +} + +func resourceLBMonitorV1Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE", "PENDING_DELETE"}, + Target: []string{"DELETED"}, + Refresh: waitForLBMonitorDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack LB Monitor: %s", err) + } + + d.SetId("") + return nil +} + +func resourceLBMonitorV1DetermineType(t string) monitors.MonitorType { + var monitorType monitors.MonitorType + switch t { + case "PING": + monitorType = monitors.TypePING + case "TCP": + monitorType = monitors.TypeTCP + case "HTTP": + monitorType = monitors.TypeHTTP + case "HTTPS": + monitorType = monitors.TypeHTTPS + } + + return monitorType +} + +func waitForLBMonitorActive(networkingClient *gophercloud.ServiceClient, monitorId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + m, err := monitors.Get(networkingClient, monitorId).Extract() + if err != nil { + return nil, "", err + } + + // The monitor resource has no Status attribute, so a successful Get is the best we can do + log.Printf("[DEBUG] OpenStack LB Monitor: %+v", m) + return m, "ACTIVE", nil + } +} + +func waitForLBMonitorDelete(networkingClient *gophercloud.ServiceClient, monitorId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack LB Monitor %s", monitorId) + + m, err := monitors.Get(networkingClient, monitorId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack LB Monitor %s", monitorId) + return m, "DELETED", nil + } + + if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { + if errCode.Actual == 409 { + log.Printf("[DEBUG] OpenStack LB Monitor (%s) is waiting for Pool to delete.", monitorId) + return m, "PENDING", nil + } + } + + return m, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack LB Monitor: %+v", m) + err = monitors.Delete(networkingClient, monitorId).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack LB Monitor %s", monitorId) + return m, "DELETED", nil + } + + if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { + if errCode.Actual == 409 { + log.Printf("[DEBUG] OpenStack LB Monitor (%s) is waiting for Pool to delete.", monitorId) + return m, "PENDING", nil + } + } + + return m, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack LB Monitor %s still active.", monitorId) + return m, "ACTIVE", nil + } + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_monitor_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_monitor_v2.go new file mode 100644 index 00000000000..96287aa3729 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_monitor_v2.go @@ -0,0 +1,280 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors" +) + +func resourceMonitorV2() *schema.Resource { + return &schema.Resource{ + Create: resourceMonitorV2Create, + Read: resourceMonitorV2Read, + Update: resourceMonitorV2Update, + Delete: resourceMonitorV2Delete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "pool_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "delay": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + + "timeout": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + + "max_retries": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + + "url_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "http_method": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "expected_codes": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "admin_state_up": &schema.Schema{ + Type: schema.TypeBool, + Default: true, + Optional: true, + }, + }, + } +} + +func resourceMonitorV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + adminStateUp := d.Get("admin_state_up").(bool) + createOpts := monitors.CreateOpts{ + PoolID: d.Get("pool_id").(string), + TenantID: d.Get("tenant_id").(string), + Type: d.Get("type").(string), + Delay: d.Get("delay").(int), + Timeout: d.Get("timeout").(int), + MaxRetries: d.Get("max_retries").(int), + URLPath: d.Get("url_path").(string), + HTTPMethod: d.Get("http_method").(string), + ExpectedCodes: d.Get("expected_codes").(string), + Name: d.Get("name").(string), + AdminStateUp: &adminStateUp, + } + + timeout := d.Timeout(schema.TimeoutCreate) + poolID := createOpts.PoolID + err = waitForLBV2viaPool(networkingClient, poolID, "ACTIVE", timeout) + if err != nil { + return err + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + log.Printf("[DEBUG] Attempting to create monitor") + var monitor *monitors.Monitor + err = resource.Retry(timeout, func() *resource.RetryError { + monitor, err = monitors.Create(networkingClient, createOpts).Extract() + if err != nil { + return checkForRetryableError(err) + } + return nil + }) + + if err != nil { + return fmt.Errorf("Unable to create monitor: %s", err) + } + + err = waitForLBV2viaPool(networkingClient, poolID, "ACTIVE", timeout) + if err != nil { + return err + } + + d.SetId(monitor.ID) + + return resourceMonitorV2Read(d, meta) +} + +func resourceMonitorV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + monitor, err := monitors.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "monitor") + } + + log.Printf("[DEBUG] Retrieved monitor %s: %#v", d.Id(), monitor) + + d.Set("tenant_id", monitor.TenantID) + d.Set("type", monitor.Type) + d.Set("delay", monitor.Delay) + d.Set("timeout", monitor.Timeout) + d.Set("max_retries", monitor.MaxRetries) + d.Set("url_path", monitor.URLPath) + d.Set("http_method", monitor.HTTPMethod) + d.Set("expected_codes", monitor.ExpectedCodes) + d.Set("admin_state_up", monitor.AdminStateUp) + d.Set("name", monitor.Name) + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceMonitorV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts monitors.UpdateOpts + if d.HasChange("url_path") { + updateOpts.URLPath = d.Get("url_path").(string) + } + if d.HasChange("expected_codes") { + updateOpts.ExpectedCodes = d.Get("expected_codes").(string) + } + if d.HasChange("delay") { + updateOpts.Delay = d.Get("delay").(int) + } + if d.HasChange("timeout") { + updateOpts.Timeout = d.Get("timeout").(int) + } + if d.HasChange("max_retries") { + updateOpts.MaxRetries = d.Get("max_retries").(int) + } + if d.HasChange("admin_state_up") { + asu := d.Get("admin_state_up").(bool) + updateOpts.AdminStateUp = &asu + } + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + if d.HasChange("http_method") { + updateOpts.HTTPMethod = d.Get("http_method").(string) + } + + log.Printf("[DEBUG] Updating monitor %s with options: %#v", d.Id(), updateOpts) + timeout := d.Timeout(schema.TimeoutUpdate) + poolID := d.Get("pool_id").(string) + err = waitForLBV2viaPool(networkingClient, poolID, "ACTIVE", timeout) + if err != nil { + return err + } + + err = resource.Retry(timeout, func() *resource.RetryError { + _, err = monitors.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return checkForRetryableError(err) + } + return nil + }) + + if err != nil { + return fmt.Errorf("Unable to update monitor %s: %s", d.Id(), err) + } + + // Wait for LB to become active before continuing + err = waitForLBV2viaPool(networkingClient, poolID, "ACTIVE", timeout) + if err != nil { + return err + } + + return resourceMonitorV2Read(d, meta) +} + +func resourceMonitorV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + log.Printf("[DEBUG] Deleting monitor %s", d.Id()) + timeout := d.Timeout(schema.TimeoutUpdate) + poolID := d.Get("pool_id").(string) + err = waitForLBV2viaPool(networkingClient, poolID, "ACTIVE", timeout) + if err != nil { + return err + } + + err = resource.Retry(timeout, func() *resource.RetryError { + err = monitors.Delete(networkingClient, d.Id()).ExtractErr() + if err != nil { + return checkForRetryableError(err) + } + return nil + }) + + if err != nil { + return fmt.Errorf("Unable to delete monitor %s: %s", d.Id(), err) + } + + err = waitForLBV2viaPool(networkingClient, poolID, "ACTIVE", timeout) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_pool_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_pool_v1.go new file mode 100644 index 00000000000..66ed273c39d --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_pool_v1.go @@ -0,0 +1,344 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools" +) + +func resourceLBPoolV1() *schema.Resource { + return &schema.Resource{ + Create: resourceLBPoolV1Create, + Read: resourceLBPoolV1Read, + Update: resourceLBPoolV1Update, + Delete: resourceLBPoolV1Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "subnet_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "lb_method": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "lb_provider": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "member": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Removed: "Use openstack_lb_member_v1 instead.", + }, + "monitor_ids": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceLBPoolV1Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + createOpts := pools.CreateOpts{ + Name: d.Get("name").(string), + SubnetID: d.Get("subnet_id").(string), + TenantID: d.Get("tenant_id").(string), + Provider: d.Get("lb_provider").(string), + } + + if v, ok := d.GetOk("protocol"); ok { + protocol := resourceLBPoolV1DetermineProtocol(v.(string)) + createOpts.Protocol = protocol + } + + if v, ok := d.GetOk("lb_method"); ok { + lbMethod := resourceLBPoolV1DetermineLBMethod(v.(string)) + createOpts.LBMethod = lbMethod + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + p, err := pools.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack LB pool: %s", err) + } + log.Printf("[INFO] LB Pool ID: %s", p.ID) + + log.Printf("[DEBUG] Waiting for OpenStack LB pool (%s) to become available.", p.ID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING_CREATE"}, + Target: []string{"ACTIVE"}, + Refresh: waitForLBPoolActive(networkingClient, p.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + d.SetId(p.ID) + + if mIDs := resourcePoolMonitorIDsV1(d); mIDs != nil { + for _, mID := range mIDs { + _, err := pools.AssociateMonitor(networkingClient, p.ID, mID).Extract() + if err != nil { + return fmt.Errorf("Error associating monitor (%s) with OpenStack LB pool (%s): %s", mID, p.ID, err) + } + } + } + + return resourceLBPoolV1Read(d, meta) +} + +func resourceLBPoolV1Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + p, err := pools.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "LB pool") + } + + log.Printf("[DEBUG] Retrieved OpenStack LB Pool %s: %+v", d.Id(), p) + + d.Set("name", p.Name) + d.Set("protocol", p.Protocol) + d.Set("subnet_id", p.SubnetID) + d.Set("lb_method", p.LBMethod) + d.Set("lb_provider", p.Provider) + d.Set("tenant_id", p.TenantID) + d.Set("monitor_ids", p.MonitorIDs) + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceLBPoolV1Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts pools.UpdateOpts + // If either option changed, update both. + // Gophercloud complains if one is empty. + if d.HasChange("name") || d.HasChange("lb_method") { + updateOpts.Name = d.Get("name").(string) + + lbMethod := resourceLBPoolV1DetermineLBMethod(d.Get("lb_method").(string)) + updateOpts.LBMethod = lbMethod + } + + log.Printf("[DEBUG] Updating OpenStack LB Pool %s with options: %+v", d.Id(), updateOpts) + + _, err = pools.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack LB Pool: %s", err) + } + + if d.HasChange("monitor_ids") { + oldMIDsRaw, newMIDsRaw := d.GetChange("monitor_ids") + oldMIDsSet, newMIDsSet := oldMIDsRaw.(*schema.Set), newMIDsRaw.(*schema.Set) + monitorsToAdd := newMIDsSet.Difference(oldMIDsSet) + monitorsToRemove := oldMIDsSet.Difference(newMIDsSet) + + log.Printf("[DEBUG] Monitors to add: %v", monitorsToAdd) + + log.Printf("[DEBUG] Monitors to remove: %v", monitorsToRemove) + + for _, m := range monitorsToAdd.List() { + _, err := pools.AssociateMonitor(networkingClient, d.Id(), m.(string)).Extract() + if err != nil { + return fmt.Errorf("Error associating monitor (%s) with OpenStack server (%s): %s", m.(string), d.Id(), err) + } + log.Printf("[DEBUG] Associated monitor (%s) with pool (%s)", m.(string), d.Id()) + } + + for _, m := range monitorsToRemove.List() { + _, err := pools.DisassociateMonitor(networkingClient, d.Id(), m.(string)).Extract() + if err != nil { + return fmt.Errorf("Error disassociating monitor (%s) from OpenStack server (%s): %s", m.(string), d.Id(), err) + } + log.Printf("[DEBUG] Disassociated monitor (%s) from pool (%s)", m.(string), d.Id()) + } + } + + return resourceLBPoolV1Read(d, meta) +} + +func resourceLBPoolV1Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + // Make sure all monitors are disassociated first + if v, ok := d.GetOk("monitor_ids"); ok { + if monitorIDList, ok := v.([]interface{}); ok { + for _, monitorID := range monitorIDList { + mID := monitorID.(string) + log.Printf("[DEBUG] Attempting to disassociate monitor %s from pool %s", mID, d.Id()) + if res := pools.DisassociateMonitor(networkingClient, d.Id(), mID); res.Err != nil { + return fmt.Errorf("Error disassociating monitor %s from pool %s: %s", mID, d.Id(), err) + } + } + } + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE", "PENDING_DELETE"}, + Target: []string{"DELETED"}, + Refresh: waitForLBPoolDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack LB Pool: %s", err) + } + + d.SetId("") + return nil +} + +func resourcePoolMonitorIDsV1(d *schema.ResourceData) []string { + mIDsRaw := d.Get("monitor_ids").(*schema.Set) + mIDs := make([]string, mIDsRaw.Len()) + for i, raw := range mIDsRaw.List() { + mIDs[i] = raw.(string) + } + return mIDs +} + +func resourceLBPoolV1DetermineProtocol(v string) pools.LBProtocol { + var protocol pools.LBProtocol + switch v { + case "TCP": + protocol = pools.ProtocolTCP + case "HTTP": + protocol = pools.ProtocolHTTP + case "HTTPS": + protocol = pools.ProtocolHTTPS + } + + return protocol +} + +func resourceLBPoolV1DetermineLBMethod(v string) pools.LBMethod { + var lbMethod pools.LBMethod + switch v { + case "ROUND_ROBIN": + lbMethod = pools.LBMethodRoundRobin + case "LEAST_CONNECTIONS": + lbMethod = pools.LBMethodLeastConnections + } + + return lbMethod +} + +func waitForLBPoolActive(networkingClient *gophercloud.ServiceClient, poolId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + p, err := pools.Get(networkingClient, poolId).Extract() + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack LB Pool: %+v", p) + if p.Status == "ACTIVE" { + return p, "ACTIVE", nil + } + + return p, p.Status, nil + } +} + +func waitForLBPoolDelete(networkingClient *gophercloud.ServiceClient, poolId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack LB Pool %s", poolId) + + p, err := pools.Get(networkingClient, poolId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack LB Pool %s", poolId) + return p, "DELETED", nil + } + return p, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack LB Pool: %+v", p) + err = pools.Delete(networkingClient, poolId).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack LB Pool %s", poolId) + return p, "DELETED", nil + } + return p, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack LB Pool %s still active.", poolId) + return p, "ACTIVE", nil + } + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_pool_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_pool_v2.go new file mode 100644 index 00000000000..f37542cc22a --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_pool_v2.go @@ -0,0 +1,350 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" +) + +func resourcePoolV2() *schema.Resource { + return &schema.Resource{ + Create: resourcePoolV2Create, + Read: resourcePoolV2Read, + Update: resourcePoolV2Update, + Delete: resourcePoolV2Delete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "TCP" && value != "HTTP" && value != "HTTPS" { + errors = append(errors, fmt.Errorf( + "Only 'TCP', 'HTTP', and 'HTTPS' are supported values for 'protocol'")) + } + return + }, + }, + + // One of loadbalancer_id or listener_id must be provided + "loadbalancer_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + // One of loadbalancer_id or listener_id must be provided + "listener_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "lb_method": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "ROUND_ROBIN" && value != "LEAST_CONNECTIONS" && value != "SOURCE_IP" { + errors = append(errors, fmt.Errorf( + "Only 'ROUND_ROBIN', 'LEAST_CONNECTIONS', and 'SOURCE_IP' are supported values for 'lb_method'")) + } + return + }, + }, + + "persistence": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "SOURCE_IP" && value != "HTTP_COOKIE" && value != "APP_COOKIE" { + errors = append(errors, fmt.Errorf( + "Only 'SOURCE_IP', 'HTTP_COOKIE', and 'APP_COOKIE' are supported values for 'persistence'")) + } + return + }, + }, + + "cookie_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + + "admin_state_up": &schema.Schema{ + Type: schema.TypeBool, + Default: true, + Optional: true, + }, + }, + } +} + +func resourcePoolV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + adminStateUp := d.Get("admin_state_up").(bool) + var persistence pools.SessionPersistence + if p, ok := d.GetOk("persistence"); ok { + pV := (p.([]interface{}))[0].(map[string]interface{}) + + persistence = pools.SessionPersistence{ + Type: pV["type"].(string), + } + + if persistence.Type == "APP_COOKIE" { + if pV["cookie_name"].(string) == "" { + return fmt.Errorf( + "Persistence cookie_name needs to be set if using 'APP_COOKIE' persistence type.") + } else { + persistence.CookieName = pV["cookie_name"].(string) + } + } else { + if pV["cookie_name"].(string) != "" { + return fmt.Errorf( + "Persistence cookie_name can only be set if using 'APP_COOKIE' persistence type.") + } + } + } + + createOpts := pools.CreateOpts{ + TenantID: d.Get("tenant_id").(string), + Name: d.Get("name").(string), + Description: d.Get("description").(string), + Protocol: pools.Protocol(d.Get("protocol").(string)), + LoadbalancerID: d.Get("loadbalancer_id").(string), + ListenerID: d.Get("listener_id").(string), + LBMethod: pools.LBMethod(d.Get("lb_method").(string)), + AdminStateUp: &adminStateUp, + } + + // Must omit if not set + if persistence != (pools.SessionPersistence{}) { + createOpts.Persistence = &persistence + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + + // Wait for LoadBalancer to become active before continuing + timeout := d.Timeout(schema.TimeoutCreate) + lbID := createOpts.LoadbalancerID + listenerID := createOpts.ListenerID + if lbID != "" { + err = waitForLBV2LoadBalancer(networkingClient, lbID, "ACTIVE", nil, timeout) + if err != nil { + return err + } + } else if listenerID != "" { + // Wait for Listener to become active before continuing + err = waitForLBV2Listener(networkingClient, listenerID, "ACTIVE", nil, timeout) + if err != nil { + return err + } + } + + log.Printf("[DEBUG] Attempting to create pool") + var pool *pools.Pool + err = resource.Retry(timeout, func() *resource.RetryError { + pool, err = pools.Create(networkingClient, createOpts).Extract() + if err != nil { + return checkForRetryableError(err) + } + return nil + }) + + if err != nil { + return fmt.Errorf("Error creating pool: %s", err) + } + + // Wait for LoadBalancer to become active before continuing + if lbID != "" { + err = waitForLBV2LoadBalancer(networkingClient, lbID, "ACTIVE", nil, timeout) + } else { + // Pool exists by now so we can ask for lbID + err = waitForLBV2viaPool(networkingClient, pool.ID, "ACTIVE", timeout) + } + if err != nil { + return err + } + + d.SetId(pool.ID) + + return resourcePoolV2Read(d, meta) +} + +func resourcePoolV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + pool, err := pools.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "pool") + } + + log.Printf("[DEBUG] Retrieved pool %s: %#v", d.Id(), pool) + + d.Set("lb_method", pool.LBMethod) + d.Set("protocol", pool.Protocol) + d.Set("description", pool.Description) + d.Set("tenant_id", pool.TenantID) + d.Set("admin_state_up", pool.AdminStateUp) + d.Set("name", pool.Name) + d.Set("persistence", pool.Persistence) + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourcePoolV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts pools.UpdateOpts + if d.HasChange("lb_method") { + updateOpts.LBMethod = pools.LBMethod(d.Get("lb_method").(string)) + } + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + if d.HasChange("description") { + updateOpts.Description = d.Get("description").(string) + } + if d.HasChange("admin_state_up") { + asu := d.Get("admin_state_up").(bool) + updateOpts.AdminStateUp = &asu + } + + // Wait for LoadBalancer to become active before continuing + timeout := d.Timeout(schema.TimeoutUpdate) + lbID := d.Get("loadbalancer_id").(string) + if lbID != "" { + err = waitForLBV2LoadBalancer(networkingClient, lbID, "ACTIVE", nil, timeout) + } else { + err = waitForLBV2viaPool(networkingClient, d.Id(), "ACTIVE", timeout) + } + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating pool %s with options: %#v", d.Id(), updateOpts) + err = resource.Retry(timeout, func() *resource.RetryError { + _, err = pools.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return checkForRetryableError(err) + } + return nil + }) + + if err != nil { + return fmt.Errorf("Unable to update pool %s: %s", d.Id(), err) + } + + // Wait for LoadBalancer to become active before continuing + if lbID != "" { + err = waitForLBV2LoadBalancer(networkingClient, lbID, "ACTIVE", nil, timeout) + } else { + err = waitForLBV2viaPool(networkingClient, d.Id(), "ACTIVE", timeout) + } + if err != nil { + return err + } + + return resourcePoolV2Read(d, meta) +} + +func resourcePoolV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + // Wait for LoadBalancer to become active before continuing + timeout := d.Timeout(schema.TimeoutDelete) + lbID := d.Get("loadbalancer_id").(string) + if lbID != "" { + err = waitForLBV2LoadBalancer(networkingClient, lbID, "ACTIVE", nil, timeout) + if err != nil { + return err + } + } + + log.Printf("[DEBUG] Attempting to delete pool %s", d.Id()) + err = resource.Retry(timeout, func() *resource.RetryError { + err = pools.Delete(networkingClient, d.Id()).ExtractErr() + if err != nil { + return checkForRetryableError(err) + } + return nil + }) + + if lbID != "" { + err = waitForLBV2LoadBalancer(networkingClient, lbID, "ACTIVE", nil, timeout) + } else { + // Wait for Pool to delete + err = waitForLBV2Pool(networkingClient, d.Id(), "DELETED", nil, timeout) + } + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_vip_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_vip_v1.go new file mode 100644 index 00000000000..39acb5f4816 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_lb_vip_v1.go @@ -0,0 +1,401 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceLBVipV1() *schema.Resource { + return &schema.Resource{ + Create: resourceLBVipV1Create, + Read: resourceLBVipV1Read, + Update: resourceLBVipV1Update, + Delete: resourceLBVipV1Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "subnet_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "pool_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: false, + }, + "persistence": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: false, + }, + "conn_limit": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: false, + }, + "port_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + ForceNew: false, + }, + "floating_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "admin_state_up": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + ForceNew: false, + }, + }, + } +} + +func resourceLBVipV1Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + createOpts := vips.CreateOpts{ + Name: d.Get("name").(string), + SubnetID: d.Get("subnet_id").(string), + Protocol: d.Get("protocol").(string), + ProtocolPort: d.Get("port").(int), + PoolID: d.Get("pool_id").(string), + TenantID: d.Get("tenant_id").(string), + Address: d.Get("address").(string), + Description: d.Get("description").(string), + Persistence: resourceVipPersistenceV1(d), + ConnLimit: gophercloud.MaybeInt(d.Get("conn_limit").(int)), + } + + asu := d.Get("admin_state_up").(bool) + createOpts.AdminStateUp = &asu + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + p, err := vips.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack LB VIP: %s", err) + } + log.Printf("[INFO] LB VIP ID: %s", p.ID) + + log.Printf("[DEBUG] Waiting for OpenStack LB VIP (%s) to become available.", p.ID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING_CREATE"}, + Target: []string{"ACTIVE"}, + Refresh: waitForLBVIPActive(networkingClient, p.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return err + } + + floatingIP := d.Get("floating_ip").(string) + if floatingIP != "" { + lbVipV1AssignFloatingIP(floatingIP, p.PortID, networkingClient) + } + + d.SetId(p.ID) + + return resourceLBVipV1Read(d, meta) +} + +func resourceLBVipV1Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + p, err := vips.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "LB VIP") + } + + log.Printf("[DEBUG] Retrieved OpenStack LB VIP %s: %+v", d.Id(), p) + + d.Set("name", p.Name) + d.Set("subnet_id", p.SubnetID) + d.Set("protocol", p.Protocol) + d.Set("port", p.ProtocolPort) + d.Set("pool_id", p.PoolID) + d.Set("port_id", p.PortID) + d.Set("tenant_id", p.TenantID) + d.Set("address", p.Address) + d.Set("description", p.Description) + d.Set("conn_limit", p.ConnLimit) + d.Set("admin_state_up", p.AdminStateUp) + + // Set the persistence method being used + persistence := make(map[string]interface{}) + if p.Persistence.Type != "" { + persistence["type"] = p.Persistence.Type + } + if p.Persistence.CookieName != "" { + persistence["cookie_name"] = p.Persistence.CookieName + } + d.Set("persistence", persistence) + + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceLBVipV1Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts vips.UpdateOpts + if d.HasChange("name") { + v := d.Get("name").(string) + updateOpts.Name = &v + } + + if d.HasChange("pool_id") { + v := d.Get("pool_id").(string) + updateOpts.PoolID = &v + } + + if d.HasChange("description") { + v := d.Get("description").(string) + updateOpts.Description = &v + } + + if d.HasChange("conn_limit") { + updateOpts.ConnLimit = gophercloud.MaybeInt(d.Get("conn_limit").(int)) + } + + if d.HasChange("floating_ip") { + portID := d.Get("port_id").(string) + + // Searching for a floating IP assigned to the VIP + listOpts := floatingips.ListOpts{ + PortID: portID, + } + page, err := floatingips.List(networkingClient, listOpts).AllPages() + if err != nil { + return err + } + + fips, err := floatingips.ExtractFloatingIPs(page) + if err != nil { + return err + } + + // If a floating IP is found we unassign it + if len(fips) == 1 { + portID := "" + updateOpts := floatingips.UpdateOpts{ + PortID: &portID, + } + if err = floatingips.Update(networkingClient, fips[0].ID, updateOpts).Err; err != nil { + return err + } + } + + // Assign the updated floating IP + floatingIP := d.Get("floating_ip").(string) + if floatingIP != "" { + lbVipV1AssignFloatingIP(floatingIP, portID, networkingClient) + } + } + + if d.HasChange("admin_state_up") { + asu := d.Get("admin_state_up").(bool) + updateOpts.AdminStateUp = &asu + } + + // Persistence has to be included, even if it hasn't changed. + updateOpts.Persistence = resourceVipPersistenceV1(d) + + log.Printf("[DEBUG] Updating OpenStack LB VIP %s with options: %+v", d.Id(), updateOpts) + + _, err = vips.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack LB VIP: %s", err) + } + + return resourceLBVipV1Read(d, meta) +} + +func resourceLBVipV1Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE", "PENDING_DELETE"}, + Target: []string{"DELETED"}, + Refresh: waitForLBVIPDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack LB VIP: %s", err) + } + + d.SetId("") + return nil +} + +func resourceVipPersistenceV1(d *schema.ResourceData) *vips.SessionPersistence { + rawP := d.Get("persistence").(interface{}) + rawMap := rawP.(map[string]interface{}) + if len(rawMap) != 0 { + p := vips.SessionPersistence{} + if t, ok := rawMap["type"]; ok { + p.Type = t.(string) + } + if c, ok := rawMap["cookie_name"]; ok { + p.CookieName = c.(string) + } + return &p + } + return nil +} + +func lbVipV1AssignFloatingIP(floatingIP, portID string, networkingClient *gophercloud.ServiceClient) error { + log.Printf("[DEBUG] Assigning floating IP %s to VIP %s", floatingIP, portID) + + listOpts := floatingips.ListOpts{ + FloatingIP: floatingIP, + } + page, err := floatingips.List(networkingClient, listOpts).AllPages() + if err != nil { + return err + } + + fips, err := floatingips.ExtractFloatingIPs(page) + if err != nil { + return err + } + if len(fips) != 1 { + return fmt.Errorf("Unable to retrieve floating IP '%s'", floatingIP) + } + + updateOpts := floatingips.UpdateOpts{ + PortID: &portID, + } + if err = floatingips.Update(networkingClient, fips[0].ID, updateOpts).Err; err != nil { + return err + } + + return nil +} + +func waitForLBVIPActive(networkingClient *gophercloud.ServiceClient, vipId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + p, err := vips.Get(networkingClient, vipId).Extract() + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack LB VIP: %+v", p) + if p.Status == "ACTIVE" { + return p, "ACTIVE", nil + } + + return p, p.Status, nil + } +} + +func waitForLBVIPDelete(networkingClient *gophercloud.ServiceClient, vipId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack LB VIP %s", vipId) + + p, err := vips.Get(networkingClient, vipId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack LB VIP %s", vipId) + return p, "DELETED", nil + } + return p, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack LB VIP: %+v", p) + err = vips.Delete(networkingClient, vipId).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack LB VIP %s", vipId) + return p, "DELETED", nil + } + return p, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack LB VIP %s still active.", vipId) + return p, "ACTIVE", nil + } + +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_floatingip_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_floatingip_v2.go new file mode 100644 index 00000000000..8f2291021ae --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_floatingip_v2.go @@ -0,0 +1,298 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/pagination" +) + +func resourceNetworkingFloatingIPV2() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkFloatingIPV2Create, + Read: resourceNetworkFloatingIPV2Read, + Update: resourceNetworkFloatingIPV2Update, + Delete: resourceNetworkFloatingIPV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "pool": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: schema.EnvDefaultFunc("OS_POOL_NAME", nil), + }, + "port_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "fixed_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceNetworkFloatingIPV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack network client: %s", err) + } + + poolID, err := getNetworkID(d, meta, d.Get("pool").(string)) + if err != nil { + return fmt.Errorf("Error retrieving floating IP pool name: %s", err) + } + if len(poolID) == 0 { + return fmt.Errorf("No network found with name: %s", d.Get("pool").(string)) + } + createOpts := FloatingIPCreateOpts{ + floatingips.CreateOpts{ + FloatingNetworkID: poolID, + PortID: d.Get("port_id").(string), + TenantID: d.Get("tenant_id").(string), + FixedIP: d.Get("fixed_ip").(string), + }, + MapValueSpecs(d), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + floatingIP, err := floatingips.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error allocating floating IP: %s", err) + } + + log.Printf("[DEBUG] Waiting for OpenStack Neutron Floating IP (%s) to become available.", floatingIP.ID) + + stateConf := &resource.StateChangeConf{ + Target: []string{"ACTIVE"}, + Refresh: waitForFloatingIPActive(networkingClient, floatingIP.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + + d.SetId(floatingIP.ID) + + return resourceNetworkFloatingIPV2Read(d, meta) +} + +func resourceNetworkFloatingIPV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack network client: %s", err) + } + + floatingIP, err := floatingips.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "floating IP") + } + + d.Set("address", floatingIP.FloatingIP) + d.Set("port_id", floatingIP.PortID) + d.Set("fixed_ip", floatingIP.FixedIP) + poolName, err := getNetworkName(d, meta, floatingIP.FloatingNetworkID) + if err != nil { + return fmt.Errorf("Error retrieving floating IP pool name: %s", err) + } + d.Set("pool", poolName) + d.Set("tenant_id", floatingIP.TenantID) + + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceNetworkFloatingIPV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack network client: %s", err) + } + + var updateOpts floatingips.UpdateOpts + + if d.HasChange("port_id") { + portID := d.Get("port_id").(string) + updateOpts.PortID = &portID + } + + log.Printf("[DEBUG] Update Options: %#v", updateOpts) + + _, err = floatingips.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating floating IP: %s", err) + } + + return resourceNetworkFloatingIPV2Read(d, meta) +} + +func resourceNetworkFloatingIPV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack network client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: []string{"DELETED"}, + Refresh: waitForFloatingIPDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack Neutron Floating IP: %s", err) + } + + d.SetId("") + return nil +} + +func getNetworkID(d *schema.ResourceData, meta interface{}, networkName string) (string, error) { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return "", fmt.Errorf("Error creating OpenStack network client: %s", err) + } + + opts := networks.ListOpts{Name: networkName} + pager := networks.List(networkingClient, opts) + networkID := "" + + err = pager.EachPage(func(page pagination.Page) (bool, error) { + networkList, err := networks.ExtractNetworks(page) + if err != nil { + return false, err + } + + for _, n := range networkList { + if n.Name == networkName { + networkID = n.ID + return false, nil + } + } + + return true, nil + }) + + return networkID, err +} + +func getNetworkName(d *schema.ResourceData, meta interface{}, networkID string) (string, error) { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return "", fmt.Errorf("Error creating OpenStack network client: %s", err) + } + + opts := networks.ListOpts{ID: networkID} + pager := networks.List(networkingClient, opts) + networkName := "" + + err = pager.EachPage(func(page pagination.Page) (bool, error) { + networkList, err := networks.ExtractNetworks(page) + if err != nil { + return false, err + } + + for _, n := range networkList { + if n.ID == networkID { + networkName = n.Name + return false, nil + } + } + + return true, nil + }) + + return networkName, err +} + +func waitForFloatingIPActive(networkingClient *gophercloud.ServiceClient, fId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + f, err := floatingips.Get(networkingClient, fId).Extract() + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack Neutron Floating IP: %+v", f) + if f.Status == "DOWN" || f.Status == "ACTIVE" { + return f, "ACTIVE", nil + } + + return f, "", nil + } +} + +func waitForFloatingIPDelete(networkingClient *gophercloud.ServiceClient, fId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack Floating IP %s.\n", fId) + + f, err := floatingips.Get(networkingClient, fId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Floating IP %s", fId) + return f, "DELETED", nil + } + return f, "ACTIVE", err + } + + err = floatingips.Delete(networkingClient, fId).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Floating IP %s", fId) + return f, "DELETED", nil + } + return f, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack Floating IP %s still active.\n", fId) + return f, "ACTIVE", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_network_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_network_v2.go new file mode 100644 index 00000000000..1d24f1cc53a --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_network_v2.go @@ -0,0 +1,326 @@ +package openstack + +import ( + "fmt" + "log" + "strconv" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" +) + +func resourceNetworkingNetworkV2() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkingNetworkV2Create, + Read: resourceNetworkingNetworkV2Read, + Update: resourceNetworkingNetworkV2Update, + Delete: resourceNetworkingNetworkV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "admin_state_up": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Computed: true, + }, + "shared": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Computed: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "segments": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "physical_network": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "network_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "segmentation_id": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceNetworkingNetworkV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + createOpts := NetworkCreateOpts{ + networks.CreateOpts{ + Name: d.Get("name").(string), + TenantID: d.Get("tenant_id").(string), + }, + MapValueSpecs(d), + } + + asuRaw := d.Get("admin_state_up").(string) + if asuRaw != "" { + asu, err := strconv.ParseBool(asuRaw) + if err != nil { + return fmt.Errorf("admin_state_up, if provided, must be either 'true' or 'false'") + } + createOpts.AdminStateUp = &asu + } + + sharedRaw := d.Get("shared").(string) + if sharedRaw != "" { + shared, err := strconv.ParseBool(sharedRaw) + if err != nil { + return fmt.Errorf("shared, if provided, must be either 'true' or 'false': %v", err) + } + createOpts.Shared = &shared + } + + segments := resourceNetworkingNetworkV2Segments(d) + + n := &networks.Network{} + if len(segments) > 0 { + providerCreateOpts := provider.CreateOptsExt{ + CreateOptsBuilder: createOpts, + Segments: segments, + } + log.Printf("[DEBUG] Create Options: %#v", providerCreateOpts) + n, err = networks.Create(networkingClient, providerCreateOpts).Extract() + } else { + log.Printf("[DEBUG] Create Options: %#v", createOpts) + n, err = networks.Create(networkingClient, createOpts).Extract() + } + + if err != nil { + return fmt.Errorf("Error creating OpenStack Neutron network: %s", err) + } + + log.Printf("[INFO] Network ID: %s", n.ID) + + log.Printf("[DEBUG] Waiting for Network (%s) to become available", n.ID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"BUILD"}, + Target: []string{"ACTIVE"}, + Refresh: waitForNetworkActive(networkingClient, n.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + + d.SetId(n.ID) + + return resourceNetworkingNetworkV2Read(d, meta) +} + +func resourceNetworkingNetworkV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + n, err := networks.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "network") + } + + log.Printf("[DEBUG] Retrieved Network %s: %+v", d.Id(), n) + + d.Set("name", n.Name) + d.Set("admin_state_up", strconv.FormatBool(n.AdminStateUp)) + d.Set("shared", strconv.FormatBool(n.Shared)) + d.Set("tenant_id", n.TenantID) + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceNetworkingNetworkV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts networks.UpdateOpts + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + if d.HasChange("admin_state_up") { + asuRaw := d.Get("admin_state_up").(string) + if asuRaw != "" { + asu, err := strconv.ParseBool(asuRaw) + if err != nil { + return fmt.Errorf("admin_state_up, if provided, must be either 'true' or 'false'") + } + updateOpts.AdminStateUp = &asu + } + } + if d.HasChange("shared") { + sharedRaw := d.Get("shared").(string) + if sharedRaw != "" { + shared, err := strconv.ParseBool(sharedRaw) + if err != nil { + return fmt.Errorf("shared, if provided, must be either 'true' or 'false': %v", err) + } + updateOpts.Shared = &shared + } + } + + log.Printf("[DEBUG] Updating Network %s with options: %+v", d.Id(), updateOpts) + + _, err = networks.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack Neutron Network: %s", err) + } + + return resourceNetworkingNetworkV2Read(d, meta) +} + +func resourceNetworkingNetworkV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: []string{"DELETED"}, + Refresh: waitForNetworkDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack Neutron Network: %s", err) + } + + d.SetId("") + return nil +} + +func resourceNetworkingNetworkV2Segments(d *schema.ResourceData) (providerSegments []provider.Segment) { + segments := d.Get("segments").([]interface{}) + for _, v := range segments { + var segment provider.Segment + segmentMap := v.(map[string]interface{}) + + if v, ok := segmentMap["physical_network"].(string); ok { + segment.PhysicalNetwork = v + } + + if v, ok := segmentMap["network_type"].(string); ok { + segment.NetworkType = v + } + + if v, ok := segmentMap["segmentation_id"].(int); ok { + segment.SegmentationID = v + } + + providerSegments = append(providerSegments, segment) + } + return +} + +func waitForNetworkActive(networkingClient *gophercloud.ServiceClient, networkId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + n, err := networks.Get(networkingClient, networkId).Extract() + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack Neutron Network: %+v", n) + if n.Status == "DOWN" || n.Status == "ACTIVE" { + return n, "ACTIVE", nil + } + + return n, n.Status, nil + } +} + +func waitForNetworkDelete(networkingClient *gophercloud.ServiceClient, networkId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack Network %s.\n", networkId) + + n, err := networks.Get(networkingClient, networkId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Network %s", networkId) + return n, "DELETED", nil + } + return n, "ACTIVE", err + } + + err = networks.Delete(networkingClient, networkId).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Network %s", networkId) + return n, "DELETED", nil + } + if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { + if errCode.Actual == 409 { + return n, "ACTIVE", nil + } + } + return n, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack Network %s still active.\n", networkId) + return n, "ACTIVE", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_port_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_port_v2.go new file mode 100644 index 00000000000..4420a563f60 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_port_v2.go @@ -0,0 +1,467 @@ +package openstack + +import ( + "bytes" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" +) + +func resourceNetworkingPortV2() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkingPortV2Create, + Read: resourceNetworkingPortV2Read, + Update: resourceNetworkingPortV2Update, + Delete: resourceNetworkingPortV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "network_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "admin_state_up": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: false, + Computed: true, + }, + "mac_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "device_owner": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "security_group_ids": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "no_security_groups": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: false, + }, + "device_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "fixed_ip": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: false, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "subnet_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "allowed_address_pairs": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: false, + Set: allowedAddressPairsHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "mac_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + "all_fixed_ips": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "all_security_group_ids": &schema.Schema{ + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceNetworkingPortV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var securityGroups []string + v := d.Get("security_group_ids") + securityGroups = resourcePortSecurityGroupsV2(v.(*schema.Set)) + noSecurityGroups := d.Get("no_security_groups").(bool) + + // Check and make sure an invalid security group configuration wasn't given. + if noSecurityGroups && len(securityGroups) > 0 { + return fmt.Errorf("Cannot have both no_security_groups and security_group_ids set") + } + + createOpts := PortCreateOpts{ + ports.CreateOpts{ + Name: d.Get("name").(string), + AdminStateUp: resourcePortAdminStateUpV2(d), + NetworkID: d.Get("network_id").(string), + MACAddress: d.Get("mac_address").(string), + TenantID: d.Get("tenant_id").(string), + DeviceOwner: d.Get("device_owner").(string), + DeviceID: d.Get("device_id").(string), + FixedIPs: resourcePortFixedIpsV2(d), + AllowedAddressPairs: resourceAllowedAddressPairsV2(d), + }, + MapValueSpecs(d), + } + + if noSecurityGroups { + securityGroups = []string{} + createOpts.SecurityGroups = &securityGroups + } + + // Only set SecurityGroups if one was specified. + // Otherwise this would mimic the no_security_groups action. + if len(securityGroups) > 0 { + createOpts.SecurityGroups = &securityGroups + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + p, err := ports.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack Neutron network: %s", err) + } + log.Printf("[INFO] Network ID: %s", p.ID) + + log.Printf("[DEBUG] Waiting for OpenStack Neutron Port (%s) to become available.", p.ID) + + stateConf := &resource.StateChangeConf{ + Target: []string{"ACTIVE"}, + Refresh: waitForNetworkPortActive(networkingClient, p.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + + d.SetId(p.ID) + + return resourceNetworkingPortV2Read(d, meta) +} + +func resourceNetworkingPortV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + p, err := ports.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "port") + } + + log.Printf("[DEBUG] Retrieved Port %s: %+v", d.Id(), p) + + d.Set("name", p.Name) + d.Set("admin_state_up", p.AdminStateUp) + d.Set("network_id", p.NetworkID) + d.Set("mac_address", p.MACAddress) + d.Set("tenant_id", p.TenantID) + d.Set("device_owner", p.DeviceOwner) + d.Set("device_id", p.DeviceID) + + // Create a slice of all returned Fixed IPs. + // This will be in the order returned by the API, + // which is usually alpha-numeric. + var ips []string + for _, ipObject := range p.FixedIPs { + ips = append(ips, ipObject.IPAddress) + } + d.Set("all_fixed_ips", ips) + + // Set all security groups. + // This can be different from what the user specified since + // the port can have the "default" group automatically applied. + d.Set("all_security_group_ids", p.SecurityGroups) + + // Convert AllowedAddressPairs to list of map + var pairs []map[string]interface{} + for _, pairObject := range p.AllowedAddressPairs { + pair := make(map[string]interface{}) + pair["ip_address"] = pairObject.IPAddress + pair["mac_address"] = pairObject.MACAddress + pairs = append(pairs, pair) + } + d.Set("allowed_address_pairs", pairs) + + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceNetworkingPortV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + v := d.Get("security_group_ids").(*schema.Set) + securityGroups := resourcePortSecurityGroupsV2(v) + noSecurityGroups := d.Get("no_security_groups").(bool) + + // Check and make sure an invalid security group configuration wasn't given. + if noSecurityGroups && len(securityGroups) > 0 { + return fmt.Errorf("Cannot have both no_security_groups and security_group_ids set") + } + + var hasChange bool + var updateOpts ports.UpdateOpts + + if d.HasChange("allowed_address_pairs") { + hasChange = true + aap := resourceAllowedAddressPairsV2(d) + updateOpts.AllowedAddressPairs = &aap + } + + if d.HasChange("no_security_groups") { + if noSecurityGroups { + hasChange = true + v := []string{} + updateOpts.SecurityGroups = &v + } + } + + if d.HasChange("security_group_ids") { + hasChange = true + sgs := d.Get("security_group_ids").(*schema.Set) + securityGroups := resourcePortSecurityGroupsV2(sgs) + updateOpts.SecurityGroups = &securityGroups + } + + if d.HasChange("name") { + hasChange = true + updateOpts.Name = d.Get("name").(string) + } + + if d.HasChange("admin_state_up") { + hasChange = true + updateOpts.AdminStateUp = resourcePortAdminStateUpV2(d) + } + + if d.HasChange("device_owner") { + hasChange = true + updateOpts.DeviceOwner = d.Get("device_owner").(string) + } + + if d.HasChange("device_id") { + hasChange = true + updateOpts.DeviceID = d.Get("device_id").(string) + } + + if d.HasChange("fixed_ip") { + hasChange = true + updateOpts.FixedIPs = resourcePortFixedIpsV2(d) + } + + if hasChange { + log.Printf("[DEBUG] Updating Port %s with options: %+v", d.Id(), updateOpts) + + _, err = ports.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack Neutron Network: %s", err) + } + } + + return resourceNetworkingPortV2Read(d, meta) +} + +func resourceNetworkingPortV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: []string{"DELETED"}, + Refresh: waitForNetworkPortDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack Neutron Network: %s", err) + } + + d.SetId("") + return nil +} + +func resourcePortSecurityGroupsV2(v *schema.Set) []string { + var securityGroups []string + for _, v := range v.List() { + securityGroups = append(securityGroups, v.(string)) + } + return securityGroups +} + +func resourcePortFixedIpsV2(d *schema.ResourceData) interface{} { + rawIP := d.Get("fixed_ip").([]interface{}) + + if len(rawIP) == 0 { + return nil + } + + ip := make([]ports.IP, len(rawIP)) + for i, raw := range rawIP { + rawMap := raw.(map[string]interface{}) + ip[i] = ports.IP{ + SubnetID: rawMap["subnet_id"].(string), + IPAddress: rawMap["ip_address"].(string), + } + } + return ip +} + +func resourceAllowedAddressPairsV2(d *schema.ResourceData) []ports.AddressPair { + // ports.AddressPair + rawPairs := d.Get("allowed_address_pairs").(*schema.Set).List() + + pairs := make([]ports.AddressPair, len(rawPairs)) + for i, raw := range rawPairs { + rawMap := raw.(map[string]interface{}) + pairs[i] = ports.AddressPair{ + IPAddress: rawMap["ip_address"].(string), + MACAddress: rawMap["mac_address"].(string), + } + } + return pairs +} + +func resourcePortAdminStateUpV2(d *schema.ResourceData) *bool { + value := false + + if raw, ok := d.GetOk("admin_state_up"); ok && raw == true { + value = true + } + + return &value +} + +func allowedAddressPairsHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s", m["ip_address"].(string))) + + return hashcode.String(buf.String()) +} + +func waitForNetworkPortActive(networkingClient *gophercloud.ServiceClient, portId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + p, err := ports.Get(networkingClient, portId).Extract() + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack Neutron Port: %+v", p) + if p.Status == "DOWN" || p.Status == "ACTIVE" { + return p, "ACTIVE", nil + } + + return p, p.Status, nil + } +} + +func waitForNetworkPortDelete(networkingClient *gophercloud.ServiceClient, portId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack Neutron Port %s", portId) + + p, err := ports.Get(networkingClient, portId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Port %s", portId) + return p, "DELETED", nil + } + return p, "ACTIVE", err + } + + err = ports.Delete(networkingClient, portId).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Port %s", portId) + return p, "DELETED", nil + } + return p, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack Port %s still active.\n", portId) + return p, "ACTIVE", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_interface_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_interface_v2.go new file mode 100644 index 00000000000..4201b9617a4 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_interface_v2.go @@ -0,0 +1,208 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" +) + +func resourceNetworkingRouterInterfaceV2() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkingRouterInterfaceV2Create, + Read: resourceNetworkingRouterInterfaceV2Read, + Delete: resourceNetworkingRouterInterfaceV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "router_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "subnet_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "port_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } +} + +func resourceNetworkingRouterInterfaceV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + createOpts := routers.AddInterfaceOpts{ + SubnetID: d.Get("subnet_id").(string), + PortID: d.Get("port_id").(string), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + n, err := routers.AddInterface(networkingClient, d.Get("router_id").(string), createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack Neutron router interface: %s", err) + } + log.Printf("[INFO] Router interface Port ID: %s", n.PortID) + + log.Printf("[DEBUG] Waiting for Router Interface (%s) to become available", n.PortID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"BUILD", "PENDING_CREATE", "PENDING_UPDATE"}, + Target: []string{"ACTIVE"}, + Refresh: waitForRouterInterfaceActive(networkingClient, n.PortID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + + d.SetId(n.PortID) + + return resourceNetworkingRouterInterfaceV2Read(d, meta) +} + +func resourceNetworkingRouterInterfaceV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + n, err := ports.Get(networkingClient, d.Id()).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving OpenStack Neutron Router Interface: %s", err) + } + + log.Printf("[DEBUG] Retrieved Router Interface %s: %+v", d.Id(), n) + + d.Set("router_id", n.DeviceID) + d.Set("port_id", n.ID) + + // Set the subnet ID by looking at thet port's FixedIPs. + // If there's more than one FixedIP, do not set the subnet + // as it's not possible to confidently determine which subnet + // belongs to this interface. However, that situation should + // not happen. + if len(n.FixedIPs) == 1 { + d.Set("subnet_id", n.FixedIPs[0].SubnetID) + } + + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceNetworkingRouterInterfaceV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: []string{"DELETED"}, + Refresh: waitForRouterInterfaceDelete(networkingClient, d), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack Neutron Router Interface: %s", err) + } + + d.SetId("") + return nil +} + +func waitForRouterInterfaceActive(networkingClient *gophercloud.ServiceClient, rId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + r, err := ports.Get(networkingClient, rId).Extract() + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack Neutron Router Interface: %+v", r) + return r, r.Status, nil + } +} + +func waitForRouterInterfaceDelete(networkingClient *gophercloud.ServiceClient, d *schema.ResourceData) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + routerId := d.Get("router_id").(string) + routerInterfaceId := d.Id() + + log.Printf("[DEBUG] Attempting to delete OpenStack Router Interface %s.", routerInterfaceId) + + removeOpts := routers.RemoveInterfaceOpts{ + SubnetID: d.Get("subnet_id").(string), + PortID: d.Get("port_id").(string), + } + + r, err := ports.Get(networkingClient, routerInterfaceId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Router Interface %s", routerInterfaceId) + return r, "DELETED", nil + } + return r, "ACTIVE", err + } + + _, err = routers.RemoveInterface(networkingClient, routerId, removeOpts).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Router Interface %s.", routerInterfaceId) + return r, "DELETED", nil + } + if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { + if errCode.Actual == 409 { + log.Printf("[DEBUG] Router Interface %s is still in use.", routerInterfaceId) + return r, "ACTIVE", nil + } + } + + return r, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack Router Interface %s is still active.", routerInterfaceId) + return r, "ACTIVE", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_route_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_route_v2.go new file mode 100644 index 00000000000..c05e971f6e8 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_route_v2.go @@ -0,0 +1,225 @@ +package openstack + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" +) + +func resourceNetworkingRouterRouteV2() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkingRouterRouteV2Create, + Read: resourceNetworkingRouterRouteV2Read, + Delete: resourceNetworkingRouterRouteV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "router_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "destination_cidr": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "next_hop": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceNetworkingRouterRouteV2Create(d *schema.ResourceData, meta interface{}) error { + + routerId := d.Get("router_id").(string) + osMutexKV.Lock(routerId) + defer osMutexKV.Unlock(routerId) + + var destCidr string = d.Get("destination_cidr").(string) + var nextHop string = d.Get("next_hop").(string) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + n, err := routers.Get(networkingClient, routerId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving OpenStack Neutron Router: %s", err) + } + + var updateOpts routers.UpdateOpts + var routeExists bool = false + + var rts []routers.Route = n.Routes + for _, r := range rts { + + if r.DestinationCIDR == destCidr && r.NextHop == nextHop { + routeExists = true + break + } + } + + if !routeExists { + + if destCidr != "" && nextHop != "" { + r := routers.Route{DestinationCIDR: destCidr, NextHop: nextHop} + log.Printf( + "[INFO] Adding route %s", r) + rts = append(rts, r) + } + + updateOpts.Routes = rts + + log.Printf("[DEBUG] Updating Router %s with options: %+v", routerId, updateOpts) + + _, err = routers.Update(networkingClient, routerId, updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack Neutron Router: %s", err) + } + d.SetId(fmt.Sprintf("%s-route-%s-%s", routerId, destCidr, nextHop)) + + } else { + log.Printf("[DEBUG] Router %s has route already", routerId) + } + + return resourceNetworkingRouterRouteV2Read(d, meta) +} + +func resourceNetworkingRouterRouteV2Read(d *schema.ResourceData, meta interface{}) error { + + routerId := d.Get("router_id").(string) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + destCidr := d.Get("destination_cidr").(string) + nextHop := d.Get("next_hop").(string) + + routeIDParts := []string{} + if d.Id() != "" && strings.Contains(d.Id(), "-route-") { + routeIDParts = strings.Split(d.Id(), "-route-") + routeLastIDParts := strings.Split(routeIDParts[1], "-") + + if routerId == "" { + routerId = routeIDParts[0] + d.Set("router_id", routerId) + } + if destCidr == "" { + destCidr = routeLastIDParts[0] + } + if nextHop == "" { + nextHop = routeLastIDParts[1] + } + } + + n, err := routers.Get(networkingClient, routerId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving OpenStack Neutron Router: %s", err) + } + + log.Printf("[DEBUG] Retrieved Router %s: %+v", routerId, n) + + d.Set("next_hop", "") + d.Set("destination_cidr", "") + + for _, r := range n.Routes { + + if r.DestinationCIDR == destCidr && r.NextHop == nextHop { + d.Set("destination_cidr", destCidr) + d.Set("next_hop", nextHop) + break + } + } + + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceNetworkingRouterRouteV2Delete(d *schema.ResourceData, meta interface{}) error { + + routerId := d.Get("router_id").(string) + osMutexKV.Lock(routerId) + defer osMutexKV.Unlock(routerId) + + config := meta.(*Config) + + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + n, err := routers.Get(networkingClient, routerId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + return nil + } + + return fmt.Errorf("Error retrieving OpenStack Neutron Router: %s", err) + } + + var updateOpts routers.UpdateOpts + + var destCidr string = d.Get("destination_cidr").(string) + var nextHop string = d.Get("next_hop").(string) + + var oldRts []routers.Route = n.Routes + var newRts []routers.Route + + for _, r := range oldRts { + + if r.DestinationCIDR != destCidr || r.NextHop != nextHop { + newRts = append(newRts, r) + } + } + + if len(oldRts) != len(newRts) { + r := routers.Route{DestinationCIDR: destCidr, NextHop: nextHop} + log.Printf( + "[INFO] Deleting route %s", r) + updateOpts.Routes = newRts + + log.Printf("[DEBUG] Updating Router %s with options: %+v", routerId, updateOpts) + + _, err = routers.Update(networkingClient, routerId, updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack Neutron Router: %s", err) + } + } else { + return fmt.Errorf("Route did not exist already") + } + + return nil +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_v2.go new file mode 100644 index 00000000000..1d759622fef --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_router_v2.go @@ -0,0 +1,262 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" +) + +func resourceNetworkingRouterV2() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkingRouterV2Create, + Read: resourceNetworkingRouterV2Read, + Update: resourceNetworkingRouterV2Update, + Delete: resourceNetworkingRouterV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "admin_state_up": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: false, + Computed: true, + }, + "distributed": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Computed: true, + }, + "external_gateway": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Computed: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceNetworkingRouterV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + createOpts := RouterCreateOpts{ + routers.CreateOpts{ + Name: d.Get("name").(string), + TenantID: d.Get("tenant_id").(string), + }, + MapValueSpecs(d), + } + + if asuRaw, ok := d.GetOk("admin_state_up"); ok { + asu := asuRaw.(bool) + createOpts.AdminStateUp = &asu + } + + if dRaw, ok := d.GetOk("distributed"); ok { + d := dRaw.(bool) + createOpts.Distributed = &d + } + + externalGateway := d.Get("external_gateway").(string) + if externalGateway != "" { + gatewayInfo := routers.GatewayInfo{ + NetworkID: externalGateway, + } + createOpts.GatewayInfo = &gatewayInfo + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + n, err := routers.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack Neutron router: %s", err) + } + log.Printf("[INFO] Router ID: %s", n.ID) + + log.Printf("[DEBUG] Waiting for OpenStack Neutron Router (%s) to become available", n.ID) + stateConf := &resource.StateChangeConf{ + Pending: []string{"BUILD", "PENDING_CREATE", "PENDING_UPDATE"}, + Target: []string{"ACTIVE"}, + Refresh: waitForRouterActive(networkingClient, n.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + + d.SetId(n.ID) + + return resourceNetworkingRouterV2Read(d, meta) +} + +func resourceNetworkingRouterV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + n, err := routers.Get(networkingClient, d.Id()).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving OpenStack Neutron Router: %s", err) + } + + log.Printf("[DEBUG] Retrieved Router %s: %+v", d.Id(), n) + + d.Set("name", n.Name) + d.Set("admin_state_up", n.AdminStateUp) + d.Set("distributed", n.Distributed) + d.Set("tenant_id", n.TenantID) + d.Set("external_gateway", n.GatewayInfo.NetworkID) + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceNetworkingRouterV2Update(d *schema.ResourceData, meta interface{}) error { + routerId := d.Id() + osMutexKV.Lock(routerId) + defer osMutexKV.Unlock(routerId) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts routers.UpdateOpts + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + if d.HasChange("admin_state_up") { + asu := d.Get("admin_state_up").(bool) + updateOpts.AdminStateUp = &asu + } + if d.HasChange("external_gateway") { + externalGateway := d.Get("external_gateway").(string) + if externalGateway != "" { + gatewayInfo := routers.GatewayInfo{ + NetworkID: externalGateway, + } + updateOpts.GatewayInfo = &gatewayInfo + } + } + + log.Printf("[DEBUG] Updating Router %s with options: %+v", d.Id(), updateOpts) + + _, err = routers.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack Neutron Router: %s", err) + } + + return resourceNetworkingRouterV2Read(d, meta) +} + +func resourceNetworkingRouterV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: []string{"DELETED"}, + Refresh: waitForRouterDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack Neutron Router: %s", err) + } + + d.SetId("") + return nil +} + +func waitForRouterActive(networkingClient *gophercloud.ServiceClient, routerId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + r, err := routers.Get(networkingClient, routerId).Extract() + if err != nil { + return nil, r.Status, err + } + + log.Printf("[DEBUG] OpenStack Neutron Router: %+v", r) + return r, r.Status, nil + } +} + +func waitForRouterDelete(networkingClient *gophercloud.ServiceClient, routerId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack Router %s.\n", routerId) + + r, err := routers.Get(networkingClient, routerId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Router %s", routerId) + return r, "DELETED", nil + } + return r, "ACTIVE", err + } + + err = routers.Delete(networkingClient, routerId).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Router %s", routerId) + return r, "DELETED", nil + } + return r, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack Router %s still active.\n", routerId) + return r, "ACTIVE", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_secgroup_rule_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_secgroup_rule_v2.go new file mode 100644 index 00000000000..33e68eb2efa --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_secgroup_rule_v2.go @@ -0,0 +1,316 @@ +package openstack + +import ( + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules" +) + +func resourceNetworkingSecGroupRuleV2() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkingSecGroupRuleV2Create, + Read: resourceNetworkingSecGroupRuleV2Read, + Delete: resourceNetworkingSecGroupRuleV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "direction": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "ethertype": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "port_range_min": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + }, + "port_range_max": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + }, + "protocol": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "remote_group_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "remote_ip_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + StateFunc: func(v interface{}) string { + return strings.ToLower(v.(string)) + }, + }, + "security_group_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + }, + } +} + +func resourceNetworkingSecGroupRuleV2Create(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + portRangeMin := d.Get("port_range_min").(int) + portRangeMax := d.Get("port_range_max").(int) + protocol := d.Get("protocol").(string) + + if protocol == "" { + if portRangeMin != 0 || portRangeMax != 0 { + return fmt.Errorf("A protocol must be specified when using port_range_min and port_range_max") + } + } + + opts := rules.CreateOpts{ + SecGroupID: d.Get("security_group_id").(string), + PortRangeMin: d.Get("port_range_min").(int), + PortRangeMax: d.Get("port_range_max").(int), + RemoteGroupID: d.Get("remote_group_id").(string), + RemoteIPPrefix: d.Get("remote_ip_prefix").(string), + TenantID: d.Get("tenant_id").(string), + } + + if v, ok := d.GetOk("direction"); ok { + direction := resourceNetworkingSecGroupRuleV2DetermineDirection(v.(string)) + opts.Direction = direction + } + + if v, ok := d.GetOk("ethertype"); ok { + ethertype := resourceNetworkingSecGroupRuleV2DetermineEtherType(v.(string)) + opts.EtherType = ethertype + } + + if v, ok := d.GetOk("protocol"); ok { + protocol := resourceNetworkingSecGroupRuleV2DetermineProtocol(v.(string)) + opts.Protocol = protocol + } + + log.Printf("[DEBUG] Create OpenStack Neutron security group: %#v", opts) + + security_group_rule, err := rules.Create(networkingClient, opts).Extract() + if err != nil { + return err + } + + log.Printf("[DEBUG] OpenStack Neutron Security Group Rule created: %#v", security_group_rule) + + d.SetId(security_group_rule.ID) + + return resourceNetworkingSecGroupRuleV2Read(d, meta) +} + +func resourceNetworkingSecGroupRuleV2Read(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Retrieve information about security group rule: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + security_group_rule, err := rules.Get(networkingClient, d.Id()).Extract() + + if err != nil { + return CheckDeleted(d, err, "OpenStack Security Group Rule") + } + + d.Set("direction", security_group_rule.Direction) + d.Set("ethertype", security_group_rule.EtherType) + d.Set("protocol", security_group_rule.Protocol) + d.Set("port_range_min", security_group_rule.PortRangeMin) + d.Set("port_range_max", security_group_rule.PortRangeMax) + d.Set("remote_group_id", security_group_rule.RemoteGroupID) + d.Set("remote_ip_prefix", security_group_rule.RemoteIPPrefix) + d.Set("security_group_id", security_group_rule.SecGroupID) + d.Set("tenant_id", security_group_rule.TenantID) + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceNetworkingSecGroupRuleV2Delete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Destroy security group rule: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: []string{"DELETED"}, + Refresh: waitForSecGroupRuleDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack Neutron Security Group Rule: %s", err) + } + + d.SetId("") + return err +} + +func resourceNetworkingSecGroupRuleV2DetermineDirection(v string) rules.RuleDirection { + var direction rules.RuleDirection + switch v { + case "ingress": + direction = rules.DirIngress + case "egress": + direction = rules.DirEgress + } + + return direction +} + +func resourceNetworkingSecGroupRuleV2DetermineEtherType(v string) rules.RuleEtherType { + var etherType rules.RuleEtherType + switch v { + case "IPv4": + etherType = rules.EtherType4 + case "IPv6": + etherType = rules.EtherType6 + } + + return etherType +} + +func resourceNetworkingSecGroupRuleV2DetermineProtocol(v string) rules.RuleProtocol { + var protocol rules.RuleProtocol + + // Check and see if the requested protocol matched a list of known protocol names. + switch v { + case "tcp": + protocol = rules.ProtocolTCP + case "udp": + protocol = rules.ProtocolUDP + case "icmp": + protocol = rules.ProtocolICMP + case "ah": + protocol = rules.ProtocolAH + case "dccp": + protocol = rules.ProtocolDCCP + case "egp": + protocol = rules.ProtocolEGP + case "esp": + protocol = rules.ProtocolESP + case "gre": + protocol = rules.ProtocolGRE + case "igmp": + protocol = rules.ProtocolIGMP + case "ipv6-encap": + protocol = rules.ProtocolIPv6Encap + case "ipv6-frag": + protocol = rules.ProtocolIPv6Frag + case "ipv6-icmp": + protocol = rules.ProtocolIPv6ICMP + case "ipv6-nonxt": + protocol = rules.ProtocolIPv6NoNxt + case "ipv6-opts": + protocol = rules.ProtocolIPv6Opts + case "ipv6-route": + protocol = rules.ProtocolIPv6Route + case "ospf": + protocol = rules.ProtocolOSPF + case "pgm": + protocol = rules.ProtocolPGM + case "rsvp": + protocol = rules.ProtocolRSVP + case "sctp": + protocol = rules.ProtocolSCTP + case "udplite": + protocol = rules.ProtocolUDPLite + case "vrrp": + protocol = rules.ProtocolVRRP + } + + // If the protocol wasn't matched above, see if it's an integer. + if protocol == "" { + _, err := strconv.Atoi(v) + if err == nil { + protocol = rules.RuleProtocol(v) + } + } + + return protocol +} + +func waitForSecGroupRuleDelete(networkingClient *gophercloud.ServiceClient, secGroupRuleId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack Security Group Rule %s.\n", secGroupRuleId) + + r, err := rules.Get(networkingClient, secGroupRuleId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Neutron Security Group Rule %s", secGroupRuleId) + return r, "DELETED", nil + } + return r, "ACTIVE", err + } + + err = rules.Delete(networkingClient, secGroupRuleId).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Neutron Security Group Rule %s", secGroupRuleId) + return r, "DELETED", nil + } + return r, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack Neutron Security Group Rule %s still active.\n", secGroupRuleId) + return r, "ACTIVE", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_secgroup_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_secgroup_v2.go new file mode 100644 index 00000000000..1911c4b7055 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_secgroup_v2.go @@ -0,0 +1,215 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules" +) + +func resourceNetworkingSecGroupV2() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkingSecGroupV2Create, + Read: resourceNetworkingSecGroupV2Read, + Update: resourceNetworkingSecGroupV2Update, + Delete: resourceNetworkingSecGroupV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "delete_default_rules": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceNetworkingSecGroupV2Create(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + opts := groups.CreateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + TenantID: d.Get("tenant_id").(string), + } + + log.Printf("[DEBUG] Create OpenStack Neutron Security Group: %#v", opts) + + security_group, err := groups.Create(networkingClient, opts).Extract() + if err != nil { + return err + } + + // Delete the default security group rules if it has been requested. + deleteDefaultRules := d.Get("delete_default_rules").(bool) + if deleteDefaultRules { + security_group, err := groups.Get(networkingClient, security_group.ID).Extract() + if err != nil { + return err + } + for _, rule := range security_group.Rules { + if err := rules.Delete(networkingClient, rule.ID).ExtractErr(); err != nil { + return fmt.Errorf( + "There was a problem deleting a default security group rule: %s", err) + } + } + } + + log.Printf("[DEBUG] OpenStack Neutron Security Group created: %#v", security_group) + + d.SetId(security_group.ID) + + return resourceNetworkingSecGroupV2Read(d, meta) +} + +func resourceNetworkingSecGroupV2Read(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Retrieve information about security group: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + security_group, err := groups.Get(networkingClient, d.Id()).Extract() + + if err != nil { + return CheckDeleted(d, err, "OpenStack Neutron Security group") + } + + d.Set("description", security_group.Description) + d.Set("tenant_id", security_group.TenantID) + d.Set("name", security_group.Name) + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceNetworkingSecGroupV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var update bool + var updateOpts groups.UpdateOpts + + if d.HasChange("name") { + update = true + updateOpts.Name = d.Get("name").(string) + } + + if d.HasChange("description") { + update = true + updateOpts.Description = d.Get("description").(string) + } + + if update { + log.Printf("[DEBUG] Updating SecGroup %s with options: %#v", d.Id(), updateOpts) + _, err = groups.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack SecGroup: %s", err) + } + } + + return resourceNetworkingSecGroupV2Read(d, meta) +} + +func resourceNetworkingSecGroupV2Delete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Destroy security group: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: []string{"DELETED"}, + Refresh: waitForSecGroupDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack Neutron Security Group: %s", err) + } + + d.SetId("") + return err +} + +func waitForSecGroupDelete(networkingClient *gophercloud.ServiceClient, secGroupId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack Security Group %s.\n", secGroupId) + + r, err := groups.Get(networkingClient, secGroupId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Neutron Security Group %s", secGroupId) + return r, "DELETED", nil + } + return r, "ACTIVE", err + } + + err = groups.Delete(networkingClient, secGroupId).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Neutron Security Group %s", secGroupId) + return r, "DELETED", nil + } + if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { + if errCode.Actual == 409 { + return r, "ACTIVE", nil + } + } + return r, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack Neutron Security Group %s still active.\n", secGroupId) + return r, "ACTIVE", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_subnet_v2.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_subnet_v2.go new file mode 100644 index 00000000000..266d2de2eb5 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_networking_subnet_v2.go @@ -0,0 +1,423 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" +) + +func resourceNetworkingSubnetV2() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkingSubnetV2Create, + Read: resourceNetworkingSubnetV2Read, + Update: resourceNetworkingSubnetV2Update, + Delete: resourceNetworkingSubnetV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "network_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "cidr": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "allocation_pools": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "start": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "end": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "gateway_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Computed: true, + }, + "no_gateway": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: false, + }, + "ip_version": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 4, + ForceNew: true, + }, + "enable_dhcp": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: false, + Default: true, + }, + "dns_nameservers": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "host_routes": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: false, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "destination_cidr": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "next_hop": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceNetworkingSubnetV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + createOpts := SubnetCreateOpts{ + subnets.CreateOpts{ + NetworkID: d.Get("network_id").(string), + CIDR: d.Get("cidr").(string), + Name: d.Get("name").(string), + TenantID: d.Get("tenant_id").(string), + AllocationPools: resourceSubnetAllocationPoolsV2(d), + DNSNameservers: resourceSubnetDNSNameserversV2(d), + HostRoutes: resourceSubnetHostRoutesV2(d), + EnableDHCP: nil, + }, + MapValueSpecs(d), + } + + noGateway := d.Get("no_gateway").(bool) + gatewayIP := d.Get("gateway_ip").(string) + + if gatewayIP != "" && noGateway { + return fmt.Errorf("Both gateway_ip and no_gateway cannot be set") + } + + if gatewayIP != "" { + createOpts.GatewayIP = &gatewayIP + } + + if noGateway { + disableGateway := "" + createOpts.GatewayIP = &disableGateway + } + + enableDHCP := d.Get("enable_dhcp").(bool) + createOpts.EnableDHCP = &enableDHCP + + if v, ok := d.GetOk("ip_version"); ok { + ipVersion := resourceNetworkingSubnetV2DetermineIPVersion(v.(int)) + createOpts.IPVersion = ipVersion + } + + s, err := subnets.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack Neutron subnet: %s", err) + } + + log.Printf("[DEBUG] Waiting for Subnet (%s) to become available", s.ID) + stateConf := &resource.StateChangeConf{ + Target: []string{"ACTIVE"}, + Refresh: waitForSubnetActive(networkingClient, s.ID), + Timeout: d.Timeout(schema.TimeoutCreate), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + + d.SetId(s.ID) + + log.Printf("[DEBUG] Created Subnet %s: %#v", s.ID, s) + return resourceNetworkingSubnetV2Read(d, meta) +} + +func resourceNetworkingSubnetV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + s, err := subnets.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "subnet") + } + + log.Printf("[DEBUG] Retrieved Subnet %s: %#v", d.Id(), s) + + d.Set("network_id", s.NetworkID) + d.Set("cidr", s.CIDR) + d.Set("ip_version", s.IPVersion) + d.Set("name", s.Name) + d.Set("tenant_id", s.TenantID) + d.Set("gateway_ip", s.GatewayIP) + d.Set("dns_nameservers", s.DNSNameservers) + d.Set("host_routes", s.HostRoutes) + d.Set("enable_dhcp", s.EnableDHCP) + d.Set("network_id", s.NetworkID) + + // Set the allocation_pools + var allocationPools []map[string]interface{} + for _, v := range s.AllocationPools { + pool := make(map[string]interface{}) + pool["start"] = v.Start + pool["end"] = v.End + + allocationPools = append(allocationPools, pool) + } + d.Set("allocation_pools", allocationPools) + + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceNetworkingSubnetV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + // Check if both gateway_ip and no_gateway are set + if _, ok := d.GetOk("gateway_ip"); ok { + noGateway := d.Get("no_gateway").(bool) + if noGateway { + return fmt.Errorf("Both gateway_ip and no_gateway cannot be set.") + } + } + + var updateOpts subnets.UpdateOpts + + noGateway := d.Get("no_gateway").(bool) + gatewayIP := d.Get("gateway_ip").(string) + + if gatewayIP != "" && noGateway { + return fmt.Errorf("Both gateway_ip and no_gateway cannot be set") + } + + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + + if d.HasChange("gateway_ip") { + updateOpts.GatewayIP = nil + if v, ok := d.GetOk("gateway_ip"); ok { + gatewayIP := v.(string) + updateOpts.GatewayIP = &gatewayIP + } + } + + if d.HasChange("no_gateway") { + if d.Get("no_gateway").(bool) { + gatewayIP := "" + updateOpts.GatewayIP = &gatewayIP + } + } + + if d.HasChange("dns_nameservers") { + updateOpts.DNSNameservers = resourceSubnetDNSNameserversV2(d) + } + + if d.HasChange("host_routes") { + updateOpts.HostRoutes = resourceSubnetHostRoutesV2(d) + } + + if d.HasChange("enable_dhcp") { + v := d.Get("enable_dhcp").(bool) + updateOpts.EnableDHCP = &v + } + + if d.HasChange("allocation_pools") { + updateOpts.AllocationPools = resourceSubnetAllocationPoolsV2(d) + } + + log.Printf("[DEBUG] Updating Subnet %s with options: %+v", d.Id(), updateOpts) + + _, err = subnets.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack Neutron Subnet: %s", err) + } + + return resourceNetworkingSubnetV2Read(d, meta) +} + +func resourceNetworkingSubnetV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: []string{"DELETED"}, + Refresh: waitForSubnetDelete(networkingClient, d.Id()), + Timeout: d.Timeout(schema.TimeoutDelete), + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error deleting OpenStack Neutron Subnet: %s", err) + } + + d.SetId("") + return nil +} + +func resourceSubnetAllocationPoolsV2(d *schema.ResourceData) []subnets.AllocationPool { + rawAPs := d.Get("allocation_pools").([]interface{}) + aps := make([]subnets.AllocationPool, len(rawAPs)) + for i, raw := range rawAPs { + rawMap := raw.(map[string]interface{}) + aps[i] = subnets.AllocationPool{ + Start: rawMap["start"].(string), + End: rawMap["end"].(string), + } + } + return aps +} + +func resourceSubnetDNSNameserversV2(d *schema.ResourceData) []string { + rawDNSN := d.Get("dns_nameservers").(*schema.Set) + dnsn := make([]string, rawDNSN.Len()) + for i, raw := range rawDNSN.List() { + dnsn[i] = raw.(string) + } + return dnsn +} + +func resourceSubnetHostRoutesV2(d *schema.ResourceData) []subnets.HostRoute { + rawHR := d.Get("host_routes").([]interface{}) + hr := make([]subnets.HostRoute, len(rawHR)) + for i, raw := range rawHR { + rawMap := raw.(map[string]interface{}) + hr[i] = subnets.HostRoute{ + DestinationCIDR: rawMap["destination_cidr"].(string), + NextHop: rawMap["next_hop"].(string), + } + } + return hr +} + +func resourceNetworkingSubnetV2DetermineIPVersion(v int) gophercloud.IPVersion { + var ipVersion gophercloud.IPVersion + switch v { + case 4: + ipVersion = gophercloud.IPv4 + case 6: + ipVersion = gophercloud.IPv6 + } + + return ipVersion +} + +func waitForSubnetActive(networkingClient *gophercloud.ServiceClient, subnetId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + s, err := subnets.Get(networkingClient, subnetId).Extract() + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] OpenStack Neutron Subnet: %+v", s) + return s, "ACTIVE", nil + } +} + +func waitForSubnetDelete(networkingClient *gophercloud.ServiceClient, subnetId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + log.Printf("[DEBUG] Attempting to delete OpenStack Subnet %s.\n", subnetId) + + s, err := subnets.Get(networkingClient, subnetId).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Subnet %s", subnetId) + return s, "DELETED", nil + } + return s, "ACTIVE", err + } + + err = subnets.Delete(networkingClient, subnetId).ExtractErr() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Printf("[DEBUG] Successfully deleted OpenStack Subnet %s", subnetId) + return s, "DELETED", nil + } + if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { + if errCode.Actual == 409 { + return s, "ACTIVE", nil + } + } + return s, "ACTIVE", err + } + + log.Printf("[DEBUG] OpenStack Subnet %s still active.\n", subnetId) + return s, "ACTIVE", nil + } +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_objectstorage_container_v1.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_objectstorage_container_v1.go new file mode 100644 index 00000000000..e4cbe5be859 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/resource_openstack_objectstorage_container_v1.go @@ -0,0 +1,151 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceObjectStorageContainerV1() *schema.Resource { + return &schema.Resource{ + Create: resourceObjectStorageContainerV1Create, + Read: resourceObjectStorageContainerV1Read, + Update: resourceObjectStorageContainerV1Update, + Delete: resourceObjectStorageContainerV1Delete, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "container_read": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "container_sync_to": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "container_sync_key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "container_write": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "content_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "metadata": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: false, + }, + }, + } +} + +func resourceObjectStorageContainerV1Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + objectStorageClient, err := config.objectStorageV1Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack object storage client: %s", err) + } + + cn := d.Get("name").(string) + + createOpts := &containers.CreateOpts{ + ContainerRead: d.Get("container_read").(string), + ContainerSyncTo: d.Get("container_sync_to").(string), + ContainerSyncKey: d.Get("container_sync_key").(string), + ContainerWrite: d.Get("container_write").(string), + ContentType: d.Get("content_type").(string), + Metadata: resourceContainerMetadataV2(d), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + _, err = containers.Create(objectStorageClient, cn, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack container: %s", err) + } + log.Printf("[INFO] Container ID: %s", cn) + + // Store the ID now + d.SetId(cn) + + return resourceObjectStorageContainerV1Read(d, meta) +} + +func resourceObjectStorageContainerV1Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + d.Set("region", GetRegion(d, config)) + + return nil +} + +func resourceObjectStorageContainerV1Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + objectStorageClient, err := config.objectStorageV1Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack object storage client: %s", err) + } + + updateOpts := containers.UpdateOpts{ + ContainerRead: d.Get("container_read").(string), + ContainerSyncTo: d.Get("container_sync_to").(string), + ContainerSyncKey: d.Get("container_sync_key").(string), + ContainerWrite: d.Get("container_write").(string), + ContentType: d.Get("content_type").(string), + } + + if d.HasChange("metadata") { + updateOpts.Metadata = resourceContainerMetadataV2(d) + } + + _, err = containers.Update(objectStorageClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack container: %s", err) + } + + return resourceObjectStorageContainerV1Read(d, meta) +} + +func resourceObjectStorageContainerV1Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + objectStorageClient, err := config.objectStorageV1Client(GetRegion(d, config)) + if err != nil { + return fmt.Errorf("Error creating OpenStack object storage client: %s", err) + } + + _, err = containers.Delete(objectStorageClient, d.Id()).Extract() + if err != nil { + return fmt.Errorf("Error deleting OpenStack container: %s", err) + } + + d.SetId("") + return nil +} + +func resourceContainerMetadataV2(d *schema.ResourceData) map[string]string { + m := make(map[string]string) + for key, val := range d.Get("metadata").(map[string]interface{}) { + m[key] = val.(string) + } + return m +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/types.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/types.go new file mode 100644 index 00000000000..f69da63570c --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/types.go @@ -0,0 +1,353 @@ +package openstack + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "strings" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups" + "github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets" + "github.com/gophercloud/gophercloud/openstack/dns/v2/zones" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" + "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" +) + +// LogRoundTripper satisfies the http.RoundTripper interface and is used to +// customize the default http client RoundTripper to allow for logging. +type LogRoundTripper struct { + Rt http.RoundTripper + OsDebug bool +} + +// RoundTrip performs a round-trip HTTP request and logs relevant information about it. +func (lrt *LogRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) { + defer func() { + if request.Body != nil { + request.Body.Close() + } + }() + + // for future reference, this is how to access the Transport struct: + //tlsconfig := lrt.Rt.(*http.Transport).TLSClientConfig + + var err error + + if lrt.OsDebug { + log.Printf("[DEBUG] OpenStack Request URL: %s %s", request.Method, request.URL) + log.Printf("[DEBUG] Openstack Request Headers:\n%s", FormatHeaders(request.Header, "\n")) + + if request.Body != nil { + request.Body, err = lrt.logRequest(request.Body, request.Header.Get("Content-Type")) + if err != nil { + return nil, err + } + } + } + + response, err := lrt.Rt.RoundTrip(request) + if response == nil { + return nil, err + } + + if lrt.OsDebug { + log.Printf("[DEBUG] Openstack Response Code: %d", response.StatusCode) + log.Printf("[DEBUG] Openstack Response Headers:\n%s", FormatHeaders(response.Header, "\n")) + + response.Body, err = lrt.logResponse(response.Body, response.Header.Get("Content-Type")) + } + + return response, err +} + +// logRequest will log the HTTP Request details. +// If the body is JSON, it will attempt to be pretty-formatted. +func (lrt *LogRoundTripper) logRequest(original io.ReadCloser, contentType string) (io.ReadCloser, error) { + defer original.Close() + + var bs bytes.Buffer + _, err := io.Copy(&bs, original) + if err != nil { + return nil, err + } + + // Handle request contentType + if strings.HasPrefix(contentType, "application/json") { + debugInfo := lrt.formatJSON(bs.Bytes()) + log.Printf("[DEBUG] OpenStack Request Body: %s", debugInfo) + } else { + log.Printf("[DEBUG] OpenStack Request Body: %s", bs.String()) + } + + return ioutil.NopCloser(strings.NewReader(bs.String())), nil +} + +// logResponse will log the HTTP Response details. +// If the body is JSON, it will attempt to be pretty-formatted. +func (lrt *LogRoundTripper) logResponse(original io.ReadCloser, contentType string) (io.ReadCloser, error) { + if strings.HasPrefix(contentType, "application/json") { + var bs bytes.Buffer + defer original.Close() + _, err := io.Copy(&bs, original) + if err != nil { + return nil, err + } + debugInfo := lrt.formatJSON(bs.Bytes()) + if debugInfo != "" { + log.Printf("[DEBUG] OpenStack Response Body: %s", debugInfo) + } + return ioutil.NopCloser(strings.NewReader(bs.String())), nil + } + + log.Printf("[DEBUG] Not logging because OpenStack response body isn't JSON") + return original, nil +} + +// formatJSON will try to pretty-format a JSON body. +// It will also mask known fields which contain sensitive information. +func (lrt *LogRoundTripper) formatJSON(raw []byte) string { + var data map[string]interface{} + + err := json.Unmarshal(raw, &data) + if err != nil { + log.Printf("[DEBUG] Unable to parse OpenStack JSON: %s", err) + return string(raw) + } + + // Mask known password fields + if v, ok := data["auth"].(map[string]interface{}); ok { + if v, ok := v["identity"].(map[string]interface{}); ok { + if v, ok := v["password"].(map[string]interface{}); ok { + if v, ok := v["user"].(map[string]interface{}); ok { + v["password"] = "***" + } + } + } + } + + // Ignore the catalog + if v, ok := data["token"].(map[string]interface{}); ok { + if _, ok := v["catalog"]; ok { + return "" + } + } + + pretty, err := json.MarshalIndent(data, "", " ") + if err != nil { + log.Printf("[DEBUG] Unable to re-marshal OpenStack JSON: %s", err) + return string(raw) + } + + return string(pretty) +} + +// Firewall is an OpenStack firewall. +type Firewall struct { + firewalls.Firewall + routerinsertion.FirewallExt +} + +// FirewallCreateOpts represents the attributes used when creating a new firewall. +type FirewallCreateOpts struct { + firewalls.CreateOpts + ValueSpecs map[string]string `json:"value_specs,omitempty"` +} + +// ToFirewallCreateMap casts a CreateOptsExt struct to a map. +// It overrides firewalls.ToFirewallCreateMap to add the ValueSpecs field. +func (opts FirewallCreateOpts) ToFirewallCreateMap() (map[string]interface{}, error) { + return BuildRequest(opts, "firewall") +} + +//FirewallUpdateOpts +type FirewallUpdateOpts struct { + firewalls.UpdateOptsBuilder +} + +func (opts FirewallUpdateOpts) ToFirewallUpdateMap() (map[string]interface{}, error) { + return BuildRequest(opts, "firewall") +} + +// FloatingIPCreateOpts represents the attributes used when creating a new floating ip. +type FloatingIPCreateOpts struct { + floatingips.CreateOpts + ValueSpecs map[string]string `json:"value_specs,omitempty"` +} + +// ToFloatingIPCreateMap casts a CreateOpts struct to a map. +// It overrides floatingips.ToFloatingIPCreateMap to add the ValueSpecs field. +func (opts FloatingIPCreateOpts) ToFloatingIPCreateMap() (map[string]interface{}, error) { + return BuildRequest(opts, "floatingip") +} + +// KeyPairCreateOpts represents the attributes used when creating a new keypair. +type KeyPairCreateOpts struct { + keypairs.CreateOpts + ValueSpecs map[string]string `json:"value_specs,omitempty"` +} + +// ToKeyPairCreateMap casts a CreateOpts struct to a map. +// It overrides keypairs.ToKeyPairCreateMap to add the ValueSpecs field. +func (opts KeyPairCreateOpts) ToKeyPairCreateMap() (map[string]interface{}, error) { + return BuildRequest(opts, "keypair") +} + +// NetworkCreateOpts represents the attributes used when creating a new network. +type NetworkCreateOpts struct { + networks.CreateOpts + ValueSpecs map[string]string `json:"value_specs,omitempty"` +} + +// ToNetworkCreateMap casts a CreateOpts struct to a map. +// It overrides networks.ToNetworkCreateMap to add the ValueSpecs field. +func (opts NetworkCreateOpts) ToNetworkCreateMap() (map[string]interface{}, error) { + return BuildRequest(opts, "network") +} + +// PolicyCreateOpts represents the attributes used when creating a new firewall policy. +type PolicyCreateOpts struct { + policies.CreateOpts + ValueSpecs map[string]string `json:"value_specs,omitempty"` +} + +// ToPolicyCreateMap casts a CreateOpts struct to a map. +// It overrides policies.ToFirewallPolicyCreateMap to add the ValueSpecs field. +func (opts PolicyCreateOpts) ToFirewallPolicyCreateMap() (map[string]interface{}, error) { + return BuildRequest(opts, "firewall_policy") +} + +// PortCreateOpts represents the attributes used when creating a new port. +type PortCreateOpts struct { + ports.CreateOpts + ValueSpecs map[string]string `json:"value_specs,omitempty"` +} + +// ToPortCreateMap casts a CreateOpts struct to a map. +// It overrides ports.ToPortCreateMap to add the ValueSpecs field. +func (opts PortCreateOpts) ToPortCreateMap() (map[string]interface{}, error) { + return BuildRequest(opts, "port") +} + +// RecordSetCreateOpts represents the attributes used when creating a new DNS record set. +type RecordSetCreateOpts struct { + recordsets.CreateOpts + ValueSpecs map[string]string `json:"value_specs,omitempty"` +} + +// ToRecordSetCreateMap casts a CreateOpts struct to a map. +// It overrides recordsets.ToRecordSetCreateMap to add the ValueSpecs field. +func (opts RecordSetCreateOpts) ToRecordSetCreateMap() (map[string]interface{}, error) { + b, err := BuildRequest(opts, "") + if err != nil { + return nil, err + } + + if m, ok := b[""].(map[string]interface{}); ok { + return m, nil + } + + return nil, fmt.Errorf("Expected map but got %T", b[""]) +} + +// RouterCreateOpts represents the attributes used when creating a new router. +type RouterCreateOpts struct { + routers.CreateOpts + ValueSpecs map[string]string `json:"value_specs,omitempty"` +} + +// ToRouterCreateMap casts a CreateOpts struct to a map. +// It overrides routers.ToRouterCreateMap to add the ValueSpecs field. +func (opts RouterCreateOpts) ToRouterCreateMap() (map[string]interface{}, error) { + return BuildRequest(opts, "router") +} + +// RuleCreateOpts represents the attributes used when creating a new firewall rule. +type RuleCreateOpts struct { + rules.CreateOpts + ValueSpecs map[string]string `json:"value_specs,omitempty"` +} + +// ToRuleCreateMap casts a CreateOpts struct to a map. +// It overrides rules.ToRuleCreateMap to add the ValueSpecs field. +func (opts RuleCreateOpts) ToRuleCreateMap() (map[string]interface{}, error) { + b, err := BuildRequest(opts, "firewall_rule") + if err != nil { + return nil, err + } + + if m := b["firewall_rule"].(map[string]interface{}); m["protocol"] == "any" { + m["protocol"] = nil + } + + return b, nil +} + +// ServerGroupCreateOpts represents the attributes used when creating a new router. +type ServerGroupCreateOpts struct { + servergroups.CreateOpts + ValueSpecs map[string]string `json:"value_specs,omitempty"` +} + +// ToServerGroupCreateMap casts a CreateOpts struct to a map. +// It overrides routers.ToServerGroupCreateMap to add the ValueSpecs field. +func (opts ServerGroupCreateOpts) ToServerGroupCreateMap() (map[string]interface{}, error) { + return BuildRequest(opts, "server_group") +} + +// SubnetCreateOpts represents the attributes used when creating a new subnet. +type SubnetCreateOpts struct { + subnets.CreateOpts + ValueSpecs map[string]string `json:"value_specs,omitempty"` +} + +// ToSubnetCreateMap casts a CreateOpts struct to a map. +// It overrides subnets.ToSubnetCreateMap to add the ValueSpecs field. +func (opts SubnetCreateOpts) ToSubnetCreateMap() (map[string]interface{}, error) { + b, err := BuildRequest(opts, "subnet") + if err != nil { + return nil, err + } + + if m := b["subnet"].(map[string]interface{}); m["gateway_ip"] == "" { + m["gateway_ip"] = nil + } + + return b, nil +} + +// ZoneCreateOpts represents the attributes used when creating a new DNS zone. +type ZoneCreateOpts struct { + zones.CreateOpts + ValueSpecs map[string]string `json:"value_specs,omitempty"` +} + +// ToZoneCreateMap casts a CreateOpts struct to a map. +// It overrides zones.ToZoneCreateMap to add the ValueSpecs field. +func (opts ZoneCreateOpts) ToZoneCreateMap() (map[string]interface{}, error) { + b, err := BuildRequest(opts, "") + if err != nil { + return nil, err + } + + if m, ok := b[""].(map[string]interface{}); ok { + if opts.TTL > 0 { + m["ttl"] = opts.TTL + } + + return m, nil + } + + return nil, fmt.Errorf("Expected map but got %T", b[""]) +} diff --git a/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/util.go b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/util.go new file mode 100644 index 00000000000..8b88cacb6a7 --- /dev/null +++ b/vendor/github.com/terraform-providers/terraform-provider-openstack/openstack/util.go @@ -0,0 +1,114 @@ +package openstack + +import ( + "fmt" + "net/http" + "sort" + "strings" + + "github.com/Unknwon/com" + "github.com/gophercloud/gophercloud" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +// BuildRequest takes an opts struct and builds a request body for +// Gophercloud to execute +func BuildRequest(opts interface{}, parent string) (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + b = AddValueSpecs(b) + + return map[string]interface{}{parent: b}, nil +} + +// CheckDeleted checks the error to see if it's a 404 (Not Found) and, if so, +// sets the resource ID to the empty string instead of throwing an error. +func CheckDeleted(d *schema.ResourceData, err error, msg string) error { + if _, ok := err.(gophercloud.ErrDefault404); ok { + d.SetId("") + return nil + } + + return fmt.Errorf("%s: %s", msg, err) +} + +// GetRegion returns the region that was specified in the resource. If a +// region was not set, the provider-level region is checked. The provider-level +// region can either be set by the region argument or by OS_REGION_NAME. +func GetRegion(d *schema.ResourceData, config *Config) string { + if v, ok := d.GetOk("region"); ok { + return v.(string) + } + + return config.Region +} + +// AddValueSpecs expands the 'value_specs' object and removes 'value_specs' +// from the reqeust body. +func AddValueSpecs(body map[string]interface{}) map[string]interface{} { + if body["value_specs"] != nil { + for k, v := range body["value_specs"].(map[string]interface{}) { + body[k] = v + } + delete(body, "value_specs") + } + + return body +} + +// MapValueSpecs converts ResourceData into a map +func MapValueSpecs(d *schema.ResourceData) map[string]string { + m := make(map[string]string) + for key, val := range d.Get("value_specs").(map[string]interface{}) { + m[key] = val.(string) + } + return m +} + +// List of headers that need to be redacted +var REDACT_HEADERS = []string{"x-auth-token", "x-auth-key", "x-service-token", + "x-storage-token", "x-account-meta-temp-url-key", "x-account-meta-temp-url-key-2", + "x-container-meta-temp-url-key", "x-container-meta-temp-url-key-2", "set-cookie", + "x-subject-token"} + +// RedactHeaders processes a headers object, returning a redacted list +func RedactHeaders(headers http.Header) (processedHeaders []string) { + for name, header := range headers { + for _, v := range header { + if com.IsSliceContainsStr(REDACT_HEADERS, name) { + processedHeaders = append(processedHeaders, fmt.Sprintf("%v: %v", name, "***")) + } else { + processedHeaders = append(processedHeaders, fmt.Sprintf("%v: %v", name, v)) + } + } + } + return +} + +// FormatHeaders processes a headers object plus a deliminator, returning a string +func FormatHeaders(headers http.Header, seperator string) string { + redactedHeaders := RedactHeaders(headers) + sort.Strings(redactedHeaders) + + return strings.Join(redactedHeaders, seperator) +} + +func checkForRetryableError(err error) *resource.RetryError { + switch errCode := err.(type) { + case gophercloud.ErrDefault500: + return resource.RetryableError(err) + case gophercloud.ErrUnexpectedResponseCode: + switch errCode.Actual { + case 409, 503: + return resource.RetryableError(err) + default: + return resource.NonRetryableError(err) + } + default: + return resource.NonRetryableError(err) + } +} diff --git a/vendor/github.com/ugorji/go/LICENSE b/vendor/github.com/ugorji/go/LICENSE new file mode 100644 index 00000000000..95a0f0541cd --- /dev/null +++ b/vendor/github.com/ugorji/go/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2012-2015 Ugorji Nwoke. +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ugorji/go/codec/0doc.go b/vendor/github.com/ugorji/go/codec/0doc.go new file mode 100644 index 00000000000..78b32055f30 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/0doc.go @@ -0,0 +1,206 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +/* +High Performance, Feature-Rich Idiomatic Go 1.4+ codec/encoding library for +binc, msgpack, cbor, json + +Supported Serialization formats are: + + - msgpack: https://github.com/msgpack/msgpack + - binc: http://github.com/ugorji/binc + - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 + - json: http://json.org http://tools.ietf.org/html/rfc7159 + - simple: + +To install: + + go get github.com/ugorji/go/codec + +This package will carefully use 'unsafe' for performance reasons in specific places. +You can build without unsafe use by passing the safe or appengine tag +i.e. 'go install -tags=safe ...'. Note that unsafe is only supported for the last 3 +go sdk versions e.g. current go release is go 1.9, so we support unsafe use only from +go 1.7+ . This is because supporting unsafe requires knowledge of implementation details. + +For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer . + +The idiomatic Go support is as seen in other encoding packages in +the standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Support for go1.4 and above, while selectively using newer APIs for later releases + - Good code coverage ( > 70% ) + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. + - Careful selected use of 'unsafe' for targeted performance gains. + 100% mode exists where 'unsafe' is not used at all. + - Lock-free (sans mutex) concurrency for scaling to 100's of cores + - Multiple conversions: + Package coerces types where appropriate + e.g. decode an int in the stream into a float, etc. + - Corner Cases: + Overflows, nil maps/slices, nil values in streams are handled correctly + - Standard field renaming via tags + - Support for omitting empty fields during an encoding + - Encoding from any value and decoding into pointer to any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Extensions to support efficient encoding/decoding of any named types + - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces + - Decoding without a schema (into a interface{}). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Encode a struct as an array, and decode struct from an array in the data stream + - Comprehensive support for anonymous fields + - Fast (no-reflection) encoding/decoding of common maps and slices + - Code-generation for faster performance. + - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats + - Support indefinite-length formats to enable true streaming + (for formats which support it e.g. json, cbor) + - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. + This mostly applies to maps, where iteration order is non-deterministic. + - NIL in data stream decoded as zero value + - Never silently skip data when decoding. + User decides whether to return an error or silently skip data when keys or indexes + in the data stream do not map to fields in the struct. + - Detect and error when encoding a cyclic reference (instead of stack overflow shutdown) + - Encode/Decode from/to chan types (for iterative streaming support) + - Drop-in replacement for encoding/json. `json:` key in struct tag supported. + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Handle unique idiosyncrasies of codecs e.g. + - For messagepack, configure how ambiguities in handling raw bytes are resolved + - For messagepack, provide rpc server/client codec to support + msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + +Extension Support + +Users can register a function to handle the encoding or decoding of +their custom types. + +There are no restrictions on what the custom type can be. Some examples: + + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } + +As an illustration, MyStructWithUnexportedFields would normally be +encoded as an empty map because it has no exported fields, while UUID +would be encoded as a string. However, with extension support, you can +encode any of these however you like. + +Custom Encoding and Decoding + +This package maintains symmetry in the encoding and decoding halfs. +We determine how to encode or decode by walking this decision tree + + - is type a codec.Selfer? + - is there an extension registered for the type? + - is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler? + - is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler? + - is format text-based, and type an encoding.TextMarshaler? + - else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc + +This symmetry is important to reduce chances of issues happening because the +encoding and decoding sides are out of sync e.g. decoded via very specific +encoding.TextUnmarshaler but encoded via kind-specific generalized mode. + +Consequently, if a type only defines one-half of the symetry +(e.g. it implements UnmarshalJSON() but not MarshalJSON() ), +then that type doesn't satisfy the check and we will continue walking down the +decision tree. + +RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used +with the standard net/rpc package. + +Usage + +The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification. + +The Encoder and Decoder are NOT safe for concurrent use. + +Consequently, the usage model is basically: + + - Create and initialize the Handle before any use. + Once created, DO NOT modify it. + - Multiple Encoders or Decoders can now use the Handle concurrently. + They only read information off the Handle (never write). + - However, each Encoder or Decoder MUST not be used concurrently + - To re-use an Encoder/Decoder, call Reset(...) on it first. + This allows you use state maintained on the Encoder/Decoder. + +Sample usage model: + + // create and configure Handle + var ( + bh codec.BincHandle + mh codec.MsgpackHandle + ch codec.CborHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &bh // or mh to use msgpack + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) + +Running Tests + +To run tests, use the following: + + go test + +To run the full suite of tests, use the following: + + go test -tags alltests -run Suite + +You can run the tag 'safe' to run tests or build in safe mode. e.g. + + go test -tags safe -run Json + go test -tags "alltests safe" -run Suite + +Running Benchmarks + +Please see http://github.com/ugorji/go-codec-bench . + +*/ +package codec + diff --git a/vendor/github.com/ugorji/go/codec/README.md b/vendor/github.com/ugorji/go/codec/README.md new file mode 100644 index 00000000000..95c7d617693 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/README.md @@ -0,0 +1,187 @@ +# Codec + +High Performance, Feature-Rich Idiomatic Go codec/encoding library for +binc, msgpack, cbor, json. + +Supported Serialization formats are: + + - msgpack: https://github.com/msgpack/msgpack + - binc: http://github.com/ugorji/binc + - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 + - json: http://json.org http://tools.ietf.org/html/rfc7159 + - simple: + +To install: + + go get github.com/ugorji/go/codec + +This package will carefully use 'unsafe' for performance reasons in specific places. +You can build without unsafe use by passing the safe or appengine tag +i.e. 'go install -tags=safe ...'. Note that unsafe is only supported for the last 3 +go sdk versions e.g. current go release is go 1.9, so we support unsafe use only from +go 1.7+ . This is because supporting unsafe requires knowledge of implementation details. + +Online documentation: http://godoc.org/github.com/ugorji/go/codec +Detailed Usage/How-to Primer: http://ugorji.net/blog/go-codec-primer + +The idiomatic Go support is as seen in other encoding packages in +the standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Support for go1.4 and above, while selectively using newer APIs for later releases + - Good code coverage ( > 70% ) + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. + - Careful selected use of 'unsafe' for targeted performance gains. + 100% mode exists where 'unsafe' is not used at all. + - Lock-free (sans mutex) concurrency for scaling to 100's of cores + - Multiple conversions: + Package coerces types where appropriate + e.g. decode an int in the stream into a float, etc. + - Corner Cases: + Overflows, nil maps/slices, nil values in streams are handled correctly + - Standard field renaming via tags + - Support for omitting empty fields during an encoding + - Encoding from any value and decoding into pointer to any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Extensions to support efficient encoding/decoding of any named types + - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces + - Decoding without a schema (into a interface{}). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Encode a struct as an array, and decode struct from an array in the data stream + - Comprehensive support for anonymous fields + - Fast (no-reflection) encoding/decoding of common maps and slices + - Code-generation for faster performance. + - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats + - Support indefinite-length formats to enable true streaming + (for formats which support it e.g. json, cbor) + - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. + This mostly applies to maps, where iteration order is non-deterministic. + - NIL in data stream decoded as zero value + - Never silently skip data when decoding. + User decides whether to return an error or silently skip data when keys or indexes + in the data stream do not map to fields in the struct. + - Encode/Decode from/to chan types (for iterative streaming support) + - Drop-in replacement for encoding/json. `json:` key in struct tag supported. + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Handle unique idiosyncrasies of codecs e.g. + - For messagepack, configure how ambiguities in handling raw bytes are resolved + - For messagepack, provide rpc server/client codec to support + msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + +## Extension Support + +Users can register a function to handle the encoding or decoding of +their custom types. + +There are no restrictions on what the custom type can be. Some examples: + + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } + +As an illustration, MyStructWithUnexportedFields would normally be +encoded as an empty map because it has no exported fields, while UUID +would be encoded as a string. However, with extension support, you can +encode any of these however you like. + +## Custom Encoding and Decoding + +This package maintains symmetry in the encoding and decoding halfs. +We determine how to encode or decode by walking this decision tree + + - is type a codec.Selfer? + - is there an extension registered for the type? + - is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler? + - is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler? + - is format text-based, and type an encoding.TextMarshaler? + - else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc + +This symmetry is important to reduce chances of issues happening because the +encoding and decoding sides are out of sync e.g. decoded via very specific +encoding.TextUnmarshaler but encoded via kind-specific generalized mode. + +Consequently, if a type only defines one-half of the symetry +(e.g. it implements UnmarshalJSON() but not MarshalJSON() ), +then that type doesn't satisfy the check and we will continue walking down the +decision tree. + +## RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used +with the standard net/rpc package. + +## Usage + +Typical usage model: + + // create and configure Handle + var ( + bh codec.BincHandle + mh codec.MsgpackHandle + ch codec.CborHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &bh // or mh to use msgpack + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) + +## Running Tests + +To run tests, use the following: + + go test + +To run the full suite of tests, use the following: + + go test -tags alltests -run Suite + +You can run the tag 'safe' to run tests or build in safe mode. e.g. + + go test -tags safe -run Json + go test -tags "alltests safe" -run Suite + +## Running Benchmarks + +Please see http://github.com/ugorji/go-codec-bench . + diff --git a/vendor/github.com/ugorji/go/codec/binc.go b/vendor/github.com/ugorji/go/codec/binc.go new file mode 100644 index 00000000000..be5b7d33804 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/binc.go @@ -0,0 +1,946 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "math" + "reflect" + "time" +) + +const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning. + +// vd as low 4 bits (there are 16 slots) +const ( + bincVdSpecial byte = iota + bincVdPosInt + bincVdNegInt + bincVdFloat + + bincVdString + bincVdByteArray + bincVdArray + bincVdMap + + bincVdTimestamp + bincVdSmallInt + bincVdUnicodeOther + bincVdSymbol + + bincVdDecimal + _ // open slot + _ // open slot + bincVdCustomExt = 0x0f +) + +const ( + bincSpNil byte = iota + bincSpFalse + bincSpTrue + bincSpNan + bincSpPosInf + bincSpNegInf + bincSpZeroFloat + bincSpZero + bincSpNegOne +) + +const ( + bincFlBin16 byte = iota + bincFlBin32 + _ // bincFlBin32e + bincFlBin64 + _ // bincFlBin64e + // others not currently supported +) + +type bincEncDriver struct { + e *Encoder + w encWriter + m map[string]uint16 // symbols + b [scratchByteArrayLen]byte + s uint16 // symbols sequencer + // encNoSeparator + encDriverNoopContainerWriter +} + +func (e *bincEncDriver) IsBuiltinType(rt uintptr) bool { + return rt == timeTypId +} + +func (e *bincEncDriver) EncodeBuiltin(rt uintptr, v interface{}) { + if rt == timeTypId { + var bs []byte + switch x := v.(type) { + case time.Time: + bs = encodeTime(x) + case *time.Time: + bs = encodeTime(*x) + default: + e.e.errorf("binc error encoding builtin: expect time.Time, received %T", v) + } + e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs))) + e.w.writeb(bs) + } +} + +func (e *bincEncDriver) EncodeNil() { + e.w.writen1(bincVdSpecial<<4 | bincSpNil) +} + +func (e *bincEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(bincVdSpecial<<4 | bincSpTrue) + } else { + e.w.writen1(bincVdSpecial<<4 | bincSpFalse) + } +} + +func (e *bincEncDriver) EncodeFloat32(f float32) { + if f == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + return + } + e.w.writen1(bincVdFloat<<4 | bincFlBin32) + bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *bincEncDriver) EncodeFloat64(f float64) { + if f == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + return + } + bigen.PutUint64(e.b[:8], math.Float64bits(f)) + if bincDoPrune { + i := 7 + for ; i >= 0 && (e.b[i] == 0); i-- { + } + i++ + if i <= 6 { + e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64) + e.w.writen1(byte(i)) + e.w.writeb(e.b[:i]) + return + } + } + e.w.writen1(bincVdFloat<<4 | bincFlBin64) + e.w.writeb(e.b[:8]) +} + +func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) { + if lim == 4 { + bigen.PutUint32(e.b[:lim], uint32(v)) + } else { + bigen.PutUint64(e.b[:lim], v) + } + if bincDoPrune { + i := pruneSignExt(e.b[:lim], pos) + e.w.writen1(bd | lim - 1 - byte(i)) + e.w.writeb(e.b[i:lim]) + } else { + e.w.writen1(bd | lim - 1) + e.w.writeb(e.b[:lim]) + } +} + +func (e *bincEncDriver) EncodeInt(v int64) { + const nbd byte = bincVdNegInt << 4 + if v >= 0 { + e.encUint(bincVdPosInt<<4, true, uint64(v)) + } else if v == -1 { + e.w.writen1(bincVdSpecial<<4 | bincSpNegOne) + } else { + e.encUint(bincVdNegInt<<4, false, uint64(-v)) + } +} + +func (e *bincEncDriver) EncodeUint(v uint64) { + e.encUint(bincVdPosInt<<4, true, v) +} + +func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) { + if v == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZero) + } else if pos && v >= 1 && v <= 16 { + e.w.writen1(bincVdSmallInt<<4 | byte(v-1)) + } else if v <= math.MaxUint8 { + e.w.writen2(bd|0x0, byte(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd | 0x01) + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { + e.encIntegerPrune(bd, pos, v, 4) + } else { + e.encIntegerPrune(bd, pos, v, 8) + } +} + +func (e *bincEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) { + bs := ext.WriteExt(rv) + if bs == nil { + e.EncodeNil() + return + } + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) +} + +func (e *bincEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + +func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) { + e.encLen(bincVdCustomExt<<4, uint64(length)) + e.w.writen1(xtag) +} + +func (e *bincEncDriver) WriteArrayStart(length int) { + e.encLen(bincVdArray<<4, uint64(length)) +} + +func (e *bincEncDriver) WriteMapStart(length int) { + e.encLen(bincVdMap<<4, uint64(length)) +} + +func (e *bincEncDriver) EncodeString(c charEncoding, v string) { + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writestr(v) + } +} + +func (e *bincEncDriver) EncodeSymbol(v string) { + // if WriteSymbolsNoRefs { + // e.encodeString(c_UTF8, v) + // return + // } + + //symbols only offer benefit when string length > 1. + //This is because strings with length 1 take only 2 bytes to store + //(bd with embedded length, and single byte for string val). + + l := len(v) + if l == 0 { + e.encBytesLen(c_UTF8, 0) + return + } else if l == 1 { + e.encBytesLen(c_UTF8, 1) + e.w.writen1(v[0]) + return + } + if e.m == nil { + e.m = make(map[string]uint16, 16) + } + ui, ok := e.m[v] + if ok { + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8) + bigenHelper{e.b[:2], e.w}.writeUint16(ui) + } + } else { + e.s++ + ui = e.s + //ui = uint16(atomic.AddUint32(&e.s, 1)) + e.m[v] = ui + var lenprec uint8 + if l <= math.MaxUint8 { + // lenprec = 0 + } else if l <= math.MaxUint16 { + lenprec = 1 + } else if int64(l) <= math.MaxUint32 { + lenprec = 2 + } else { + lenprec = 3 + } + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) + bigenHelper{e.b[:2], e.w}.writeUint16(ui) + } + if lenprec == 0 { + e.w.writen1(byte(l)) + } else if lenprec == 1 { + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(l)) + } else if lenprec == 2 { + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(l)) + } else { + bigenHelper{e.b[:8], e.w}.writeUint64(uint64(l)) + } + e.w.writestr(v) + } +} + +func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writeb(v) + } +} + +func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { + //TODO: support bincUnicodeOther (for now, just use string or bytearray) + if c == c_RAW { + e.encLen(bincVdByteArray<<4, length) + } else { + e.encLen(bincVdString<<4, length) + } +} + +func (e *bincEncDriver) encLen(bd byte, l uint64) { + if l < 12 { + e.w.writen1(bd | uint8(l+4)) + } else { + e.encLenNumber(bd, l) + } +} + +func (e *bincEncDriver) encLenNumber(bd byte, v uint64) { + if v <= math.MaxUint8 { + e.w.writen2(bd, byte(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd | 0x01) + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { + e.w.writen1(bd | 0x02) + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v)) + } else { + e.w.writen1(bd | 0x03) + bigenHelper{e.b[:8], e.w}.writeUint64(uint64(v)) + } +} + +//------------------------------------ + +type bincDecSymbol struct { + s string + b []byte + i uint16 +} + +type bincDecDriver struct { + d *Decoder + h *BincHandle + r decReader + br bool // bytes reader + bdRead bool + bd byte + vd byte + vs byte + // noStreamingCodec + // decNoSeparator + b [scratchByteArrayLen]byte + + // linear searching on this slice is ok, + // because we typically expect < 32 symbols in each stream. + s []bincDecSymbol + decDriverNoopContainerReader +} + +func (d *bincDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.vd = d.bd >> 4 + d.vs = d.bd & 0x0f + d.bdRead = true +} + +func (d *bincDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *bincDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + if d.vd == bincVdSpecial && d.vs == bincSpNil { + return valueTypeNil + } else if d.vd == bincVdByteArray { + return valueTypeBytes + } else if d.vd == bincVdString { + return valueTypeString + } else if d.vd == bincVdArray { + return valueTypeArray + } else if d.vd == bincVdMap { + return valueTypeMap + } else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + } + return valueTypeUnset +} + +func (d *bincDecDriver) TryDecodeAsNil() bool { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return true + } + return false +} + +func (d *bincDecDriver) IsBuiltinType(rt uintptr) bool { + return rt == timeTypId +} + +func (d *bincDecDriver) DecodeBuiltin(rt uintptr, v interface{}) { + if !d.bdRead { + d.readNextBd() + } + if rt == timeTypId { + if d.vd != bincVdTimestamp { + d.d.errorf("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd) + return + } + tt, err := decodeTime(d.r.readx(int(d.vs))) + if err != nil { + panic(err) + } + var vt *time.Time = v.(*time.Time) + *vt = tt + d.bdRead = false + } +} + +func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { + if vs&0x8 == 0 { + d.r.readb(d.b[0:defaultLen]) + } else { + l := d.r.readn1() + if l > 8 { + d.d.errorf("At most 8 bytes used to represent float. Received: %v bytes", l) + return + } + for i := l; i < 8; i++ { + d.b[i] = 0 + } + d.r.readb(d.b[0:l]) + } +} + +func (d *bincDecDriver) decFloat() (f float64) { + //if true { f = math.Float64frombits(bigen.Uint64(d.r.readx(8))); break; } + if x := d.vs & 0x7; x == bincFlBin32 { + d.decFloatPre(d.vs, 4) + f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4]))) + } else if x == bincFlBin64 { + d.decFloatPre(d.vs, 8) + f = math.Float64frombits(bigen.Uint64(d.b[0:8])) + } else { + d.d.errorf("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs) + return + } + return +} + +func (d *bincDecDriver) decUint() (v uint64) { + // need to inline the code (interface conversion and type assertion expensive) + switch d.vs { + case 0: + v = uint64(d.r.readn1()) + case 1: + d.r.readb(d.b[6:8]) + v = uint64(bigen.Uint16(d.b[6:8])) + case 2: + d.b[4] = 0 + d.r.readb(d.b[5:8]) + v = uint64(bigen.Uint32(d.b[4:8])) + case 3: + d.r.readb(d.b[4:8]) + v = uint64(bigen.Uint32(d.b[4:8])) + case 4, 5, 6: + lim := int(7 - d.vs) + d.r.readb(d.b[lim:8]) + for i := 0; i < lim; i++ { + d.b[i] = 0 + } + v = uint64(bigen.Uint64(d.b[:8])) + case 7: + d.r.readb(d.b[:8]) + v = uint64(bigen.Uint64(d.b[:8])) + default: + d.d.errorf("unsigned integers with greater than 64 bits of precision not supported") + return + } + return +} + +func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) { + if !d.bdRead { + d.readNextBd() + } + vd, vs := d.vd, d.vs + if vd == bincVdPosInt { + ui = d.decUint() + } else if vd == bincVdNegInt { + ui = d.decUint() + neg = true + } else if vd == bincVdSmallInt { + ui = uint64(d.vs) + 1 + } else if vd == bincVdSpecial { + if vs == bincSpZero { + //i = 0 + } else if vs == bincSpNegOne { + neg = true + ui = 1 + } else { + d.d.errorf("numeric decode fails for special value: d.vs: 0x%x", d.vs) + return + } + } else { + d.d.errorf("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) + return + } + return +} + +func (d *bincDecDriver) DecodeInt(bitsize uint8) (i int64) { + ui, neg := d.decCheckInteger() + i, overflow := chkOvf.SignedInt(ui) + if overflow { + d.d.errorf("simple: overflow converting %v to signed integer", ui) + return + } + if neg { + i = -i + } + if chkOvf.Int(i, bitsize) { + d.d.errorf("binc: overflow integer: %v for num bits: %v", i, bitsize) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + ui, neg := d.decCheckInteger() + if neg { + d.d.errorf("Assigning negative signed value to unsigned type") + return + } + if chkOvf.Uint(ui, bitsize) { + d.d.errorf("binc: overflow integer: %v", ui) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + if !d.bdRead { + d.readNextBd() + } + vd, vs := d.vd, d.vs + if vd == bincVdSpecial { + d.bdRead = false + if vs == bincSpNan { + return math.NaN() + } else if vs == bincSpPosInf { + return math.Inf(1) + } else if vs == bincSpZeroFloat || vs == bincSpZero { + return + } else if vs == bincSpNegInf { + return math.Inf(-1) + } else { + d.d.errorf("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs) + return + } + } else if vd == bincVdFloat { + f = d.decFloat() + } else { + f = float64(d.DecodeInt(64)) + } + if chkOverflow32 && chkOvf.Float32(f) { + d.d.errorf("binc: float32 overflow: %v", f) + return + } + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *bincDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if bd := d.bd; bd == (bincVdSpecial | bincSpFalse) { + // b = false + } else if bd == (bincVdSpecial | bincSpTrue) { + b = true + } else { + d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) ReadMapStart() (length int) { + if !d.bdRead { + d.readNextBd() + } + if d.vd != bincVdMap { + d.d.errorf("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd) + return + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriver) ReadArrayStart() (length int) { + if !d.bdRead { + d.readNextBd() + } + if d.vd != bincVdArray { + d.d.errorf("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd) + return + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriver) decLen() int { + if d.vs > 3 { + return int(d.vs - 4) + } + return int(d.decLenNumber()) +} + +func (d *bincDecDriver) decLenNumber() (v uint64) { + if x := d.vs; x == 0 { + v = uint64(d.r.readn1()) + } else if x == 1 { + d.r.readb(d.b[6:8]) + v = uint64(bigen.Uint16(d.b[6:8])) + } else if x == 2 { + d.r.readb(d.b[4:8]) + v = uint64(bigen.Uint32(d.b[4:8])) + } else { + d.r.readb(d.b[:8]) + v = bigen.Uint64(d.b[:8]) + } + return +} + +func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) (bs2 []byte, s string) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return + } + var slen int = -1 + // var ok bool + switch d.vd { + case bincVdString, bincVdByteArray: + slen = d.decLen() + if zerocopy { + if d.br { + bs2 = d.r.readx(slen) + } else if len(bs) == 0 { + bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, d.b[:]) + } else { + bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs) + } + } else { + bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs) + } + if withString { + s = string(bs2) + } + case bincVdSymbol: + // zerocopy doesn't apply for symbols, + // as the values must be stored in a table for later use. + // + //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision, + //extract symbol + //if containsStringVal, read it and put in map + //else look in map for string value + var symbol uint16 + vs := d.vs + if vs&0x8 == 0 { + symbol = uint16(d.r.readn1()) + } else { + symbol = uint16(bigen.Uint16(d.r.readx(2))) + } + if d.s == nil { + d.s = make([]bincDecSymbol, 0, 16) + } + + if vs&0x4 == 0 { + for i := range d.s { + j := &d.s[i] + if j.i == symbol { + bs2 = j.b + if withString { + if j.s == "" && bs2 != nil { + j.s = string(bs2) + } + s = j.s + } + break + } + } + } else { + switch vs & 0x3 { + case 0: + slen = int(d.r.readn1()) + case 1: + slen = int(bigen.Uint16(d.r.readx(2))) + case 2: + slen = int(bigen.Uint32(d.r.readx(4))) + case 3: + slen = int(bigen.Uint64(d.r.readx(8))) + } + // since using symbols, do not store any part of + // the parameter bs in the map, as it might be a shared buffer. + // bs2 = decByteSlice(d.r, slen, bs) + bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, nil) + if withString { + s = string(bs2) + } + d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2}) + } + default: + d.d.errorf("Invalid d.vd. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x", + bincVdString, bincVdByteArray, bincVdSymbol, d.vd) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) DecodeString() (s string) { + // DecodeBytes does not accommodate symbols, whose impl stores string version in map. + // Use decStringAndBytes directly. + // return string(d.DecodeBytes(d.b[:], true, true)) + _, s = d.decStringAndBytes(d.b[:], true, true) + return +} + +func (d *bincDecDriver) DecodeStringAsBytes() (s []byte) { + s, _ = d.decStringAndBytes(d.b[:], false, true) + return +} + +func (d *bincDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return nil + } + var clen int + if d.vd == bincVdString || d.vd == bincVdByteArray { + clen = d.decLen() + } else { + d.d.errorf("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x", + bincVdString, bincVdByteArray, d.vd) + return + } + d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(clen) + } else if len(bs) == 0 { + bs = d.b[:] + } + } + return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) +} + +func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } + return +} + +func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.vd == bincVdCustomExt { + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + return + } + xbs = d.r.readx(l) + } else if d.vd == bincVdByteArray { + xbs = d.DecodeBytes(nil, true) + } else { + d.d.errorf("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := d.d.n + var decodeFurther bool + + switch d.vd { + case bincVdSpecial: + switch d.vs { + case bincSpNil: + n.v = valueTypeNil + case bincSpFalse: + n.v = valueTypeBool + n.b = false + case bincSpTrue: + n.v = valueTypeBool + n.b = true + case bincSpNan: + n.v = valueTypeFloat + n.f = math.NaN() + case bincSpPosInf: + n.v = valueTypeFloat + n.f = math.Inf(1) + case bincSpNegInf: + n.v = valueTypeFloat + n.f = math.Inf(-1) + case bincSpZeroFloat: + n.v = valueTypeFloat + n.f = float64(0) + case bincSpZero: + n.v = valueTypeUint + n.u = uint64(0) // int8(0) + case bincSpNegOne: + n.v = valueTypeInt + n.i = int64(-1) // int8(-1) + default: + d.d.errorf("decodeNaked: Unrecognized special value 0x%x", d.vs) + } + case bincVdSmallInt: + n.v = valueTypeUint + n.u = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1 + case bincVdPosInt: + n.v = valueTypeUint + n.u = d.decUint() + case bincVdNegInt: + n.v = valueTypeInt + n.i = -(int64(d.decUint())) + case bincVdFloat: + n.v = valueTypeFloat + n.f = d.decFloat() + case bincVdSymbol: + n.v = valueTypeSymbol + n.s = d.DecodeString() + case bincVdString: + n.v = valueTypeString + n.s = d.DecodeString() + case bincVdByteArray: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false) + case bincVdTimestamp: + n.v = valueTypeTimestamp + tt, err := decodeTime(d.r.readx(int(d.vs))) + if err != nil { + panic(err) + } + n.t = tt + case bincVdCustomExt: + n.v = valueTypeExt + l := d.decLen() + n.u = uint64(d.r.readn1()) + n.l = d.r.readx(l) + case bincVdArray: + n.v = valueTypeArray + decodeFurther = true + case bincVdMap: + n.v = valueTypeMap + decodeFurther = true + default: + d.d.errorf("decodeNaked: Unrecognized d.vd: 0x%x", d.vd) + } + + if !decodeFurther { + d.bdRead = false + } + if n.v == valueTypeUint && d.h.SignedInteger { + n.v = valueTypeInt + n.i = int64(n.u) + } + return +} + +//------------------------------------ + +//BincHandle is a Handle for the Binc Schema-Free Encoding Format +//defined at https://github.com/ugorji/binc . +// +//BincHandle currently supports all Binc features with the following EXCEPTIONS: +// - only integers up to 64 bits of precision are supported. +// big integers are unsupported. +// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types). +// extended precision and decimal IEEE 754 floats are unsupported. +// - Only UTF-8 strings supported. +// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported. +// +//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon. +type BincHandle struct { + BasicHandle + binaryEncodingType + noElemSeparators +} + +func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{b: ext}) +} + +func (h *BincHandle) newEncDriver(e *Encoder) encDriver { + return &bincEncDriver{e: e, w: e.w} +} + +func (h *BincHandle) newDecDriver(d *Decoder) decDriver { + return &bincDecDriver{d: d, h: h, r: d.r, br: d.bytes} +} + +func (_ *BincHandle) IsBuiltinType(rt uintptr) bool { + return rt == timeTypId +} + +func (e *bincEncDriver) reset() { + e.w = e.e.w + e.s = 0 + e.m = nil +} + +func (d *bincDecDriver) reset() { + d.r, d.br = d.d.r, d.d.bytes + d.s = nil + d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0 +} + +var _ decDriver = (*bincDecDriver)(nil) +var _ encDriver = (*bincEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/cbor.go b/vendor/github.com/ugorji/go/codec/cbor.go new file mode 100644 index 00000000000..3bc328f3062 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/cbor.go @@ -0,0 +1,662 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "math" + "reflect" +) + +const ( + cborMajorUint byte = iota + cborMajorNegInt + cborMajorBytes + cborMajorText + cborMajorArray + cborMajorMap + cborMajorTag + cborMajorOther +) + +const ( + cborBdFalse byte = 0xf4 + iota + cborBdTrue + cborBdNil + cborBdUndefined + cborBdExt + cborBdFloat16 + cborBdFloat32 + cborBdFloat64 +) + +const ( + cborBdIndefiniteBytes byte = 0x5f + cborBdIndefiniteString = 0x7f + cborBdIndefiniteArray = 0x9f + cborBdIndefiniteMap = 0xbf + cborBdBreak = 0xff +) + +const ( + CborStreamBytes byte = 0x5f + CborStreamString = 0x7f + CborStreamArray = 0x9f + CborStreamMap = 0xbf + CborStreamBreak = 0xff +) + +const ( + cborBaseUint byte = 0x00 + cborBaseNegInt = 0x20 + cborBaseBytes = 0x40 + cborBaseString = 0x60 + cborBaseArray = 0x80 + cborBaseMap = 0xa0 + cborBaseTag = 0xc0 + cborBaseSimple = 0xe0 +) + +// ------------------- + +type cborEncDriver struct { + noBuiltInTypes + encDriverNoopContainerWriter + // encNoSeparator + e *Encoder + w encWriter + h *CborHandle + x [8]byte +} + +func (e *cborEncDriver) EncodeNil() { + e.w.writen1(cborBdNil) +} + +func (e *cborEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(cborBdTrue) + } else { + e.w.writen1(cborBdFalse) + } +} + +func (e *cborEncDriver) EncodeFloat32(f float32) { + e.w.writen1(cborBdFloat32) + bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *cborEncDriver) EncodeFloat64(f float64) { + e.w.writen1(cborBdFloat64) + bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) +} + +func (e *cborEncDriver) encUint(v uint64, bd byte) { + if v <= 0x17 { + e.w.writen1(byte(v) + bd) + } else if v <= math.MaxUint8 { + e.w.writen2(bd+0x18, uint8(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd + 0x19) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { + e.w.writen1(bd + 0x1a) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(v)) + } else { // if v <= math.MaxUint64 { + e.w.writen1(bd + 0x1b) + bigenHelper{e.x[:8], e.w}.writeUint64(v) + } +} + +func (e *cborEncDriver) EncodeInt(v int64) { + if v < 0 { + e.encUint(uint64(-1-v), cborBaseNegInt) + } else { + e.encUint(uint64(v), cborBaseUint) + } +} + +func (e *cborEncDriver) EncodeUint(v uint64) { + e.encUint(v, cborBaseUint) +} + +func (e *cborEncDriver) encLen(bd byte, length int) { + e.encUint(uint64(length), bd) +} + +func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { + e.encUint(uint64(xtag), cborBaseTag) + if v := ext.ConvertExt(rv); v == nil { + e.EncodeNil() + } else { + en.encode(v) + } +} + +func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { + e.encUint(uint64(re.Tag), cborBaseTag) + if false && re.Data != nil { + en.encode(re.Data) + } else if re.Value != nil { + en.encode(re.Value) + } else { + e.EncodeNil() + } +} + +func (e *cborEncDriver) WriteArrayStart(length int) { + if e.h.IndefiniteLength { + e.w.writen1(cborBdIndefiniteArray) + } else { + e.encLen(cborBaseArray, length) + } +} + +func (e *cborEncDriver) WriteMapStart(length int) { + if e.h.IndefiniteLength { + e.w.writen1(cborBdIndefiniteMap) + } else { + e.encLen(cborBaseMap, length) + } +} + +func (e *cborEncDriver) WriteMapEnd() { + if e.h.IndefiniteLength { + e.w.writen1(cborBdBreak) + } +} + +func (e *cborEncDriver) WriteArrayEnd() { + if e.h.IndefiniteLength { + e.w.writen1(cborBdBreak) + } +} + +func (e *cborEncDriver) EncodeSymbol(v string) { + e.encStringBytesS(cborBaseString, v) +} + +func (e *cborEncDriver) EncodeString(c charEncoding, v string) { + e.encStringBytesS(cborBaseString, v) +} + +func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + if c == c_RAW { + e.encStringBytesS(cborBaseBytes, stringView(v)) + } else { + e.encStringBytesS(cborBaseString, stringView(v)) + } +} + +func (e *cborEncDriver) encStringBytesS(bb byte, v string) { + if e.h.IndefiniteLength { + if bb == cborBaseBytes { + e.w.writen1(cborBdIndefiniteBytes) + } else { + e.w.writen1(cborBdIndefiniteString) + } + blen := len(v) / 4 + if blen == 0 { + blen = 64 + } else if blen > 1024 { + blen = 1024 + } + for i := 0; i < len(v); { + var v2 string + i2 := i + blen + if i2 < len(v) { + v2 = v[i:i2] + } else { + v2 = v[i:] + } + e.encLen(bb, len(v2)) + e.w.writestr(v2) + i = i2 + } + e.w.writen1(cborBdBreak) + } else { + e.encLen(bb, len(v)) + e.w.writestr(v) + } +} + +// ---------------------- + +type cborDecDriver struct { + d *Decoder + h *CborHandle + r decReader + b [scratchByteArrayLen]byte + br bool // bytes reader + bdRead bool + bd byte + noBuiltInTypes + // decNoSeparator + decDriverNoopContainerReader +} + +func (d *cborDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *cborDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *cborDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdNil { + return valueTypeNil + } else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) { + return valueTypeBytes + } else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) { + return valueTypeString + } else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) { + return valueTypeArray + } else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) { + return valueTypeMap + } else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + } + return valueTypeUnset +} + +func (d *cborDecDriver) TryDecodeAsNil() bool { + if !d.bdRead { + d.readNextBd() + } + // treat Nil and Undefined as nil values + if d.bd == cborBdNil || d.bd == cborBdUndefined { + d.bdRead = false + return true + } + return false +} + +func (d *cborDecDriver) CheckBreak() bool { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdBreak { + d.bdRead = false + return true + } + return false +} + +func (d *cborDecDriver) decUint() (ui uint64) { + v := d.bd & 0x1f + if v <= 0x17 { + ui = uint64(v) + } else { + if v == 0x18 { + ui = uint64(d.r.readn1()) + } else if v == 0x19 { + ui = uint64(bigen.Uint16(d.r.readx(2))) + } else if v == 0x1a { + ui = uint64(bigen.Uint32(d.r.readx(4))) + } else if v == 0x1b { + ui = uint64(bigen.Uint64(d.r.readx(8))) + } else { + d.d.errorf("decUint: Invalid descriptor: %v", d.bd) + return + } + } + return +} + +func (d *cborDecDriver) decCheckInteger() (neg bool) { + if !d.bdRead { + d.readNextBd() + } + major := d.bd >> 5 + if major == cborMajorUint { + } else if major == cborMajorNegInt { + neg = true + } else { + d.d.errorf("invalid major: %v (bd: %v)", major, d.bd) + return + } + return +} + +func (d *cborDecDriver) DecodeInt(bitsize uint8) (i int64) { + neg := d.decCheckInteger() + ui := d.decUint() + // check if this number can be converted to an int without overflow + var overflow bool + if neg { + if i, overflow = chkOvf.SignedInt(ui + 1); overflow { + d.d.errorf("cbor: overflow converting %v to signed integer", ui+1) + return + } + i = -i + } else { + if i, overflow = chkOvf.SignedInt(ui); overflow { + d.d.errorf("cbor: overflow converting %v to signed integer", ui) + return + } + } + if chkOvf.Int(i, bitsize) { + d.d.errorf("cbor: overflow integer: %v", i) + return + } + d.bdRead = false + return +} + +func (d *cborDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + if d.decCheckInteger() { + d.d.errorf("Assigning negative signed value to unsigned type") + return + } + ui = d.decUint() + if chkOvf.Uint(ui, bitsize) { + d.d.errorf("cbor: overflow integer: %v", ui) + return + } + d.bdRead = false + return +} + +func (d *cborDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + if !d.bdRead { + d.readNextBd() + } + if bd := d.bd; bd == cborBdFloat16 { + f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readx(2))))) + } else if bd == cborBdFloat32 { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if bd == cborBdFloat64 { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else if bd >= cborBaseUint && bd < cborBaseBytes { + f = float64(d.DecodeInt(64)) + } else { + d.d.errorf("Float only valid from float16/32/64: Invalid descriptor: %v", bd) + return + } + if chkOverflow32 && chkOvf.Float32(f) { + d.d.errorf("cbor: float32 overflow: %v", f) + return + } + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *cborDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if bd := d.bd; bd == cborBdTrue { + b = true + } else if bd == cborBdFalse { + } else { + d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + return + } + d.bdRead = false + return +} + +func (d *cborDecDriver) ReadMapStart() (length int) { + if !d.bdRead { + d.readNextBd() + } + d.bdRead = false + if d.bd == cborBdIndefiniteMap { + return -1 + } + return d.decLen() +} + +func (d *cborDecDriver) ReadArrayStart() (length int) { + if !d.bdRead { + d.readNextBd() + } + d.bdRead = false + if d.bd == cborBdIndefiniteArray { + return -1 + } + return d.decLen() +} + +func (d *cborDecDriver) decLen() int { + return int(d.decUint()) +} + +func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte { + d.bdRead = false + for { + if d.CheckBreak() { + break + } + if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText { + d.d.errorf("cbor: expect bytes or string major type in indefinite string/bytes; got: %v, byte: %v", major, d.bd) + return nil + } + n := d.decLen() + oldLen := len(bs) + newLen := oldLen + n + if newLen > cap(bs) { + bs2 := make([]byte, newLen, 2*cap(bs)+n) + copy(bs2, bs) + bs = bs2 + } else { + bs = bs[:newLen] + } + d.r.readb(bs[oldLen:newLen]) + // bs = append(bs, d.r.readn()...) + d.bdRead = false + } + d.bdRead = false + return bs +} + +func (d *cborDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdNil || d.bd == cborBdUndefined { + d.bdRead = false + return nil + } + if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString { + d.bdRead = false + if bs == nil { + return d.decAppendIndefiniteBytes(zeroByteSlice) + } + return d.decAppendIndefiniteBytes(bs[:0]) + } + clen := d.decLen() + d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(clen) + } else if len(bs) == 0 { + bs = d.b[:] + } + } + return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) +} + +func (d *cborDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.b[:], true)) +} + +func (d *cborDecDriver) DecodeStringAsBytes() (s []byte) { + return d.DecodeBytes(d.b[:], true) +} + +func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if !d.bdRead { + d.readNextBd() + } + u := d.decUint() + d.bdRead = false + realxtag = u + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + d.d.decode(&re.Value) + } else if xtag != realxtag { + d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag) + return + } else { + var v interface{} + d.d.decode(&v) + ext.UpdateExt(rv, v) + } + d.bdRead = false + return +} + +func (d *cborDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := d.d.n + var decodeFurther bool + + switch d.bd { + case cborBdNil: + n.v = valueTypeNil + case cborBdFalse: + n.v = valueTypeBool + n.b = false + case cborBdTrue: + n.v = valueTypeBool + n.b = true + case cborBdFloat16, cborBdFloat32: + n.v = valueTypeFloat + n.f = d.DecodeFloat(true) + case cborBdFloat64: + n.v = valueTypeFloat + n.f = d.DecodeFloat(false) + case cborBdIndefiniteBytes: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false) + case cborBdIndefiniteString: + n.v = valueTypeString + n.s = d.DecodeString() + case cborBdIndefiniteArray: + n.v = valueTypeArray + decodeFurther = true + case cborBdIndefiniteMap: + n.v = valueTypeMap + decodeFurther = true + default: + switch { + case d.bd >= cborBaseUint && d.bd < cborBaseNegInt: + if d.h.SignedInteger { + n.v = valueTypeInt + n.i = d.DecodeInt(64) + } else { + n.v = valueTypeUint + n.u = d.DecodeUint(64) + } + case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes: + n.v = valueTypeInt + n.i = d.DecodeInt(64) + case d.bd >= cborBaseBytes && d.bd < cborBaseString: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false) + case d.bd >= cborBaseString && d.bd < cborBaseArray: + n.v = valueTypeString + n.s = d.DecodeString() + case d.bd >= cborBaseArray && d.bd < cborBaseMap: + n.v = valueTypeArray + decodeFurther = true + case d.bd >= cborBaseMap && d.bd < cborBaseTag: + n.v = valueTypeMap + decodeFurther = true + case d.bd >= cborBaseTag && d.bd < cborBaseSimple: + n.v = valueTypeExt + n.u = d.decUint() + n.l = nil + // d.bdRead = false + // d.d.decode(&re.Value) // handled by decode itself. + // decodeFurther = true + default: + d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) + return + } + } + + if !decodeFurther { + d.bdRead = false + } + return +} + +// ------------------------- + +// CborHandle is a Handle for the CBOR encoding format, +// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io . +// +// CBOR is comprehensively supported, including support for: +// - indefinite-length arrays/maps/bytes/strings +// - (extension) tags in range 0..0xffff (0 .. 65535) +// - half, single and double-precision floats +// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers) +// - nil, true, false, ... +// - arrays and maps, bytes and text strings +// +// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box. +// Users can implement them as needed (using SetExt), including spec-documented ones: +// - timestamp, BigNum, BigFloat, Decimals, Encoded Text (e.g. URL, regexp, base64, MIME Message), etc. +// +// To encode with indefinite lengths (streaming), users will use +// (Must)Encode methods of *Encoder, along with writing CborStreamXXX constants. +// +// For example, to encode "one-byte" as an indefinite length string: +// var buf bytes.Buffer +// e := NewEncoder(&buf, new(CborHandle)) +// buf.WriteByte(CborStreamString) +// e.MustEncode("one-") +// e.MustEncode("byte") +// buf.WriteByte(CborStreamBreak) +// encodedBytes := buf.Bytes() +// var vv interface{} +// NewDecoderBytes(buf.Bytes(), new(CborHandle)).MustDecode(&vv) +// // Now, vv contains the same string "one-byte" +// +type CborHandle struct { + binaryEncodingType + noElemSeparators + BasicHandle + + // IndefiniteLength=true, means that we encode using indefinitelength + IndefiniteLength bool +} + +func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{i: ext}) +} + +func (h *CborHandle) newEncDriver(e *Encoder) encDriver { + return &cborEncDriver{e: e, w: e.w, h: h} +} + +func (h *CborHandle) newDecDriver(d *Decoder) decDriver { + return &cborDecDriver{d: d, h: h, r: d.r, br: d.bytes} +} + +func (e *cborEncDriver) reset() { + e.w = e.e.w +} + +func (d *cborDecDriver) reset() { + d.r, d.br = d.d.r, d.d.bytes + d.bd, d.bdRead = 0, false +} + +var _ decDriver = (*cborDecDriver)(nil) +var _ encDriver = (*cborEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/decode.go b/vendor/github.com/ugorji/go/codec/decode.go new file mode 100644 index 00000000000..f949f9e7b8d --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/decode.go @@ -0,0 +1,2469 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "encoding" + "errors" + "fmt" + "io" + "reflect" + "sync" + "time" +) + +// Some tagging information for error messages. +const ( + msgBadDesc = "Unrecognized descriptor byte" + msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" +) + +var ( + onlyMapOrArrayCanDecodeIntoStructErr = errors.New("only encoded map or array can be decoded into a struct") + cannotDecodeIntoNilErr = errors.New("cannot decode into nil") + + decUnreadByteNothingToReadErr = errors.New("cannot unread - nothing has been read") + decUnreadByteLastByteNotReadErr = errors.New("cannot unread - last byte has not been read") + decUnreadByteUnknownErr = errors.New("cannot unread - reason unknown") +) + +// decReader abstracts the reading source, allowing implementations that can +// read from an io.Reader or directly off a byte slice with zero-copying. +type decReader interface { + unreadn1() + + // readx will use the implementation scratch buffer if possible i.e. n < len(scratchbuf), OR + // just return a view of the []byte being decoded from. + // Ensure you call detachZeroCopyBytes later if this needs to be sent outside codec control. + readx(n int) []byte + readb([]byte) + readn1() uint8 + numread() int // number of bytes read + track() + stopTrack() []byte + + // skip will skip any byte that matches, and return the first non-matching byte + skip(accept *bitset256) (token byte) + // readTo will read any byte that matches, stopping once no-longer matching. + readTo(in []byte, accept *bitset256) (out []byte) + // readUntil will read, only stopping once it matches the 'stop' byte. + readUntil(in []byte, stop byte) (out []byte) +} + +type decDriver interface { + // this will check if the next token is a break. + CheckBreak() bool + // Note: TryDecodeAsNil should be careful not to share any temporary []byte with + // the rest of the decDriver. This is because sometimes, we optimize by holding onto + // a transient []byte, and ensuring the only other call we make to the decDriver + // during that time is maybe a TryDecodeAsNil() call. + TryDecodeAsNil() bool + // vt is one of: Bytes, String, Nil, Slice or Map. Return unSet if not known. + ContainerType() (vt valueType) + // IsBuiltinType(rt uintptr) bool + DecodeBuiltin(rt uintptr, v interface{}) + + // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt. + // For maps and arrays, it will not do the decoding in-band, but will signal + // the decoder, so that is done later, by setting the decNaked.valueType field. + // + // Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). + // for extensions, DecodeNaked must read the tag and the []byte if it exists. + // if the []byte is not read, then kInterfaceNaked will treat it as a Handle + // that stores the subsequent value in-band, and complete reading the RawExt. + // + // extensions should also use readx to decode them, for efficiency. + // kInterface will extract the detached byte slice if it has to pass it outside its realm. + DecodeNaked() + DecodeInt(bitsize uint8) (i int64) + DecodeUint(bitsize uint8) (ui uint64) + DecodeFloat(chkOverflow32 bool) (f float64) + DecodeBool() (b bool) + // DecodeString can also decode symbols. + // It looks redundant as DecodeBytes is available. + // However, some codecs (e.g. binc) support symbols and can + // return a pre-stored string value, meaning that it can bypass + // the cost of []byte->string conversion. + DecodeString() (s string) + DecodeStringAsBytes() (v []byte) + + // DecodeBytes may be called directly, without going through reflection. + // Consequently, it must be designed to handle possible nil. + DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) + // DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) + + // decodeExt will decode into a *RawExt or into an extension. + DecodeExt(v interface{}, xtag uint64, ext Ext) (realxtag uint64) + // decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) + ReadArrayStart() int + ReadArrayElem() + ReadArrayEnd() + ReadMapStart() int + ReadMapElemKey() + ReadMapElemValue() + ReadMapEnd() + + reset() + uncacheRead() +} + +// type decNoSeparator struct {} +// func (_ decNoSeparator) ReadEnd() {} + +type decDriverNoopContainerReader struct{} + +func (_ decDriverNoopContainerReader) ReadArrayStart() (v int) { return } +func (_ decDriverNoopContainerReader) ReadArrayElem() {} +func (_ decDriverNoopContainerReader) ReadArrayEnd() {} +func (_ decDriverNoopContainerReader) ReadMapStart() (v int) { return } +func (_ decDriverNoopContainerReader) ReadMapElemKey() {} +func (_ decDriverNoopContainerReader) ReadMapElemValue() {} +func (_ decDriverNoopContainerReader) ReadMapEnd() {} +func (_ decDriverNoopContainerReader) CheckBreak() (v bool) { return } + +// func (_ decNoSeparator) uncacheRead() {} + +type DecodeOptions struct { + // MapType specifies type to use during schema-less decoding of a map in the stream. + // If nil, we use map[interface{}]interface{} + MapType reflect.Type + + // SliceType specifies type to use during schema-less decoding of an array in the stream. + // If nil, we use []interface{} + SliceType reflect.Type + + // MaxInitLen defines the maxinum initial length that we "make" a collection (string, slice, map, chan). + // If 0 or negative, we default to a sensible value based on the size of an element in the collection. + // + // For example, when decoding, a stream may say that it has 2^64 elements. + // We should not auto-matically provision a slice of that length, to prevent Out-Of-Memory crash. + // Instead, we provision up to MaxInitLen, fill that up, and start appending after that. + MaxInitLen int + + // If ErrorIfNoField, return an error when decoding a map + // from a codec stream into a struct, and no matching struct field is found. + ErrorIfNoField bool + + // If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded. + // For example, the stream contains an array of 8 items, but you are decoding into a [4]T array, + // or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set). + ErrorIfNoArrayExpand bool + + // If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64). + SignedInteger bool + + // MapValueReset controls how we decode into a map value. + // + // By default, we MAY retrieve the mapping for a key, and then decode into that. + // However, especially with big maps, that retrieval may be expensive and unnecessary + // if the stream already contains all that is necessary to recreate the value. + // + // If true, we will never retrieve the previous mapping, + // but rather decode into a new value and set that in the map. + // + // If false, we will retrieve the previous mapping if necessary e.g. + // the previous mapping is a pointer, or is a struct or array with pre-set state, + // or is an interface. + MapValueReset bool + + // SliceElementReset: on decoding a slice, reset the element to a zero value first. + // + // concern: if the slice already contained some garbage, we will decode into that garbage. + SliceElementReset bool + + // InterfaceReset controls how we decode into an interface. + // + // By default, when we see a field that is an interface{...}, + // or a map with interface{...} value, we will attempt decoding into the + // "contained" value. + // + // However, this prevents us from reading a string into an interface{} + // that formerly contained a number. + // + // If true, we will decode into a new "blank" value, and set that in the interface. + // If false, we will decode into whatever is contained in the interface. + InterfaceReset bool + + // InternString controls interning of strings during decoding. + // + // Some handles, e.g. json, typically will read map keys as strings. + // If the set of keys are finite, it may help reduce allocation to + // look them up from a map (than to allocate them afresh). + // + // Note: Handles will be smart when using the intern functionality. + // Every string should not be interned. + // An excellent use-case for interning is struct field names, + // or map keys where key type is string. + InternString bool + + // PreferArrayOverSlice controls whether to decode to an array or a slice. + // + // This only impacts decoding into a nil interface{}. + // Consequently, it has no effect on codecgen. + // + // *Note*: This only applies if using go1.5 and above, + // as it requires reflect.ArrayOf support which was absent before go1.5. + PreferArrayOverSlice bool + + // DeleteOnNilMapValue controls how to decode a nil value in the stream. + // + // If true, we will delete the mapping of the key. + // Else, just set the mapping to the zero value of the type. + DeleteOnNilMapValue bool + + // ReaderBufferSize is the size of the buffer used when reading. + // + // if > 0, we use a smart buffer internally for performance purposes. + ReaderBufferSize int +} + +// ------------------------------------ + +type bufioDecReader struct { + buf []byte + r io.Reader + + c int // cursor + n int // num read + err error + + trb bool + tr []byte + + b [8]byte +} + +func (z *bufioDecReader) reset(r io.Reader) { + z.r, z.c, z.n, z.err, z.trb = r, 0, 0, nil, false + if z.tr != nil { + z.tr = z.tr[:0] + } +} + +func (z *bufioDecReader) Read(p []byte) (n int, err error) { + if z.err != nil { + return 0, z.err + } + p0 := p + n = copy(p, z.buf[z.c:]) + z.c += n + if z.c == len(z.buf) { + z.c = 0 + } + z.n += n + if len(p) == n { + if z.c == 0 { + z.buf = z.buf[:1] + z.buf[0] = p[len(p)-1] + z.c = 1 + } + if z.trb { + z.tr = append(z.tr, p0[:n]...) + } + return + } + p = p[n:] + var n2 int + // if we are here, then z.buf is all read + if len(p) > len(z.buf) { + n2, err = decReadFull(z.r, p) + n += n2 + z.n += n2 + z.err = err + // don't return EOF if some bytes were read. keep for next time. + if n > 0 && err == io.EOF { + err = nil + } + // always keep last byte in z.buf + z.buf = z.buf[:1] + z.buf[0] = p[len(p)-1] + z.c = 1 + if z.trb { + z.tr = append(z.tr, p0[:n]...) + } + return + } + // z.c is now 0, and len(p) <= len(z.buf) + for len(p) > 0 && z.err == nil { + // println("len(p) loop starting ... ") + z.c = 0 + z.buf = z.buf[0:cap(z.buf)] + n2, err = z.r.Read(z.buf) + if n2 > 0 { + if err == io.EOF { + err = nil + } + z.buf = z.buf[:n2] + n2 = copy(p, z.buf) + z.c = n2 + n += n2 + z.n += n2 + p = p[n2:] + } + z.err = err + // println("... len(p) loop done") + } + if z.c == 0 { + z.buf = z.buf[:1] + z.buf[0] = p[len(p)-1] + z.c = 1 + } + if z.trb { + z.tr = append(z.tr, p0[:n]...) + } + return +} + +func (z *bufioDecReader) ReadByte() (b byte, err error) { + z.b[0] = 0 + _, err = z.Read(z.b[:1]) + b = z.b[0] + return +} + +func (z *bufioDecReader) UnreadByte() (err error) { + if z.err != nil { + return z.err + } + if z.c > 0 { + z.c-- + z.n-- + if z.trb { + z.tr = z.tr[:len(z.tr)-1] + } + return + } + return decUnreadByteNothingToReadErr +} + +func (z *bufioDecReader) numread() int { + return z.n +} + +func (z *bufioDecReader) readx(n int) (bs []byte) { + if n <= 0 || z.err != nil { + return + } + if z.c+n <= len(z.buf) { + bs = z.buf[z.c : z.c+n] + z.n += n + z.c += n + if z.trb { + z.tr = append(z.tr, bs...) + } + return + } + bs = make([]byte, n) + _, err := z.Read(bs) + if err != nil { + panic(err) + } + return +} + +func (z *bufioDecReader) readb(bs []byte) { + _, err := z.Read(bs) + if err != nil { + panic(err) + } +} + +// func (z *bufioDecReader) readn1eof() (b uint8, eof bool) { +// b, err := z.ReadByte() +// if err != nil { +// if err == io.EOF { +// eof = true +// } else { +// panic(err) +// } +// } +// return +// } + +func (z *bufioDecReader) readn1() (b uint8) { + b, err := z.ReadByte() + if err != nil { + panic(err) + } + return +} + +func (z *bufioDecReader) search(in []byte, accept *bitset256, stop, flag uint8) (token byte, out []byte) { + // flag: 1 (skip), 2 (readTo), 4 (readUntil) + if flag == 4 { + for i := z.c; i < len(z.buf); i++ { + if z.buf[i] == stop { + token = z.buf[i] + z.n = z.n + (i - z.c) - 1 + i++ + out = z.buf[z.c:i] + if z.trb { + z.tr = append(z.tr, z.buf[z.c:i]...) + } + z.c = i + return + } + } + } else { + for i := z.c; i < len(z.buf); i++ { + if !accept.isset(z.buf[i]) { + token = z.buf[i] + z.n = z.n + (i - z.c) - 1 + if flag == 1 { + i++ + } else { + out = z.buf[z.c:i] + } + if z.trb { + z.tr = append(z.tr, z.buf[z.c:i]...) + } + z.c = i + return + } + } + } + z.n += len(z.buf) - z.c + if flag != 1 { + out = append(in, z.buf[z.c:]...) + } + if z.trb { + z.tr = append(z.tr, z.buf[z.c:]...) + } + var n2 int + if z.err != nil { + return + } + for { + z.c = 0 + z.buf = z.buf[0:cap(z.buf)] + n2, z.err = z.r.Read(z.buf) + if n2 > 0 && z.err != nil { + z.err = nil + } + z.buf = z.buf[:n2] + if flag == 4 { + for i := 0; i < n2; i++ { + if z.buf[i] == stop { + token = z.buf[i] + z.n += i - 1 + i++ + out = append(out, z.buf[z.c:i]...) + if z.trb { + z.tr = append(z.tr, z.buf[z.c:i]...) + } + z.c = i + return + } + } + } else { + for i := 0; i < n2; i++ { + if !accept.isset(z.buf[i]) { + token = z.buf[i] + z.n += i - 1 + if flag == 1 { + i++ + } + if flag != 1 { + out = append(out, z.buf[z.c:i]...) + } + if z.trb { + z.tr = append(z.tr, z.buf[z.c:i]...) + } + z.c = i + return + } + } + } + if flag != 1 { + out = append(out, z.buf[:n2]...) + } + z.n += n2 + if z.err != nil { + return + } + if z.trb { + z.tr = append(z.tr, z.buf[:n2]...) + } + } +} + +func (z *bufioDecReader) skip(accept *bitset256) (token byte) { + token, _ = z.search(nil, accept, 0, 1) + return +} + +func (z *bufioDecReader) readTo(in []byte, accept *bitset256) (out []byte) { + _, out = z.search(in, accept, 0, 2) + return +} + +func (z *bufioDecReader) readUntil(in []byte, stop byte) (out []byte) { + _, out = z.search(in, nil, stop, 4) + return +} + +func (z *bufioDecReader) unreadn1() { + err := z.UnreadByte() + if err != nil { + panic(err) + } +} + +func (z *bufioDecReader) track() { + if z.tr != nil { + z.tr = z.tr[:0] + } + z.trb = true +} + +func (z *bufioDecReader) stopTrack() (bs []byte) { + z.trb = false + return z.tr +} + +// ioDecReader is a decReader that reads off an io.Reader. +// +// It also has a fallback implementation of ByteScanner if needed. +type ioDecReader struct { + r io.Reader // the reader passed in + + rr io.Reader + br io.ByteScanner + + l byte // last byte + ls byte // last byte status. 0: init-canDoNothing, 1: canRead, 2: canUnread + b [4]byte // tiny buffer for reading single bytes + trb bool // tracking bytes turned on + + // temp byte array re-used internally for efficiency during read. + // shares buffer with Decoder, so we keep size of struct within 8 words. + x *[scratchByteArrayLen]byte + n int // num read + tr []byte // tracking bytes read +} + +func (z *ioDecReader) reset(r io.Reader) { + z.r = r + z.rr = r + z.l, z.ls, z.n, z.trb = 0, 0, 0, false + if z.tr != nil { + z.tr = z.tr[:0] + } + var ok bool + if z.br, ok = r.(io.ByteScanner); !ok { + z.br = z + z.rr = z + } +} + +func (z *ioDecReader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return + } + var firstByte bool + if z.ls == 1 { + z.ls = 2 + p[0] = z.l + if len(p) == 1 { + n = 1 + return + } + firstByte = true + p = p[1:] + } + n, err = z.r.Read(p) + if n > 0 { + if err == io.EOF && n == len(p) { + err = nil // read was successful, so postpone EOF (till next time) + } + z.l = p[n-1] + z.ls = 2 + } + if firstByte { + n++ + } + return +} + +func (z *ioDecReader) ReadByte() (c byte, err error) { + n, err := z.Read(z.b[:1]) + if n == 1 { + c = z.b[0] + if err == io.EOF { + err = nil // read was successful, so postpone EOF (till next time) + } + } + return +} + +func (z *ioDecReader) UnreadByte() (err error) { + switch z.ls { + case 2: + z.ls = 1 + case 0: + err = decUnreadByteNothingToReadErr + case 1: + err = decUnreadByteLastByteNotReadErr + default: + err = decUnreadByteUnknownErr + } + return +} + +func (z *ioDecReader) numread() int { + return z.n +} + +func (z *ioDecReader) readx(n int) (bs []byte) { + if n <= 0 { + return + } + if n < len(z.x) { + bs = z.x[:n] + } else { + bs = make([]byte, n) + } + if _, err := decReadFull(z.rr, bs); err != nil { + panic(err) + } + z.n += len(bs) + if z.trb { + z.tr = append(z.tr, bs...) + } + return +} + +func (z *ioDecReader) readb(bs []byte) { + // if len(bs) == 0 { + // return + // } + if _, err := decReadFull(z.rr, bs); err != nil { + panic(err) + } + z.n += len(bs) + if z.trb { + z.tr = append(z.tr, bs...) + } +} + +func (z *ioDecReader) readn1eof() (b uint8, eof bool) { + b, err := z.br.ReadByte() + if err == nil { + z.n++ + if z.trb { + z.tr = append(z.tr, b) + } + } else if err == io.EOF { + eof = true + } else { + panic(err) + } + return +} + +func (z *ioDecReader) readn1() (b uint8) { + var err error + if b, err = z.br.ReadByte(); err == nil { + z.n++ + if z.trb { + z.tr = append(z.tr, b) + } + return + } + panic(err) +} + +func (z *ioDecReader) skip(accept *bitset256) (token byte) { + for { + var eof bool + token, eof = z.readn1eof() + if eof { + return + } + if accept.isset(token) { + continue + } + return + } +} + +func (z *ioDecReader) readTo(in []byte, accept *bitset256) (out []byte) { + out = in + for { + token, eof := z.readn1eof() + if eof { + return + } + if accept.isset(token) { + out = append(out, token) + } else { + z.unreadn1() + return + } + } +} + +func (z *ioDecReader) readUntil(in []byte, stop byte) (out []byte) { + out = in + for { + token, eof := z.readn1eof() + if eof { + panic(io.EOF) + } + out = append(out, token) + if token == stop { + return + } + } +} + +func (z *ioDecReader) unreadn1() { + err := z.br.UnreadByte() + if err != nil { + panic(err) + } + z.n-- + if z.trb { + if l := len(z.tr) - 1; l >= 0 { + z.tr = z.tr[:l] + } + } +} + +func (z *ioDecReader) track() { + if z.tr != nil { + z.tr = z.tr[:0] + } + z.trb = true +} + +func (z *ioDecReader) stopTrack() (bs []byte) { + z.trb = false + return z.tr +} + +// ------------------------------------ + +var bytesDecReaderCannotUnreadErr = errors.New("cannot unread last byte read") + +// bytesDecReader is a decReader that reads off a byte slice with zero copying +type bytesDecReader struct { + b []byte // data + c int // cursor + a int // available + t int // track start +} + +func (z *bytesDecReader) reset(in []byte) { + z.b = in + z.a = len(in) + z.c = 0 + z.t = 0 +} + +func (z *bytesDecReader) numread() int { + return z.c +} + +func (z *bytesDecReader) unreadn1() { + if z.c == 0 || len(z.b) == 0 { + panic(bytesDecReaderCannotUnreadErr) + } + z.c-- + z.a++ + return +} + +func (z *bytesDecReader) readx(n int) (bs []byte) { + // slicing from a non-constant start position is more expensive, + // as more computation is required to decipher the pointer start position. + // However, we do it only once, and it's better than reslicing both z.b and return value. + + if n <= 0 { + } else if z.a == 0 { + panic(io.EOF) + } else if n > z.a { + panic(io.ErrUnexpectedEOF) + } else { + c0 := z.c + z.c = c0 + n + z.a = z.a - n + bs = z.b[c0:z.c] + } + return +} + +func (z *bytesDecReader) readb(bs []byte) { + copy(bs, z.readx(len(bs))) +} + +func (z *bytesDecReader) readn1() (v uint8) { + if z.a == 0 { + panic(io.EOF) + } + v = z.b[z.c] + z.c++ + z.a-- + return +} + +// func (z *bytesDecReader) readn1eof() (v uint8, eof bool) { +// if z.a == 0 { +// eof = true +// return +// } +// v = z.b[z.c] +// z.c++ +// z.a-- +// return +// } + +func (z *bytesDecReader) skip(accept *bitset256) (token byte) { + if z.a == 0 { + return + } + blen := len(z.b) + for i := z.c; i < blen; i++ { + if !accept.isset(z.b[i]) { + token = z.b[i] + i++ + z.a -= (i - z.c) + z.c = i + return + } + } + z.a, z.c = 0, blen + return +} + +func (z *bytesDecReader) readTo(_ []byte, accept *bitset256) (out []byte) { + if z.a == 0 { + return + } + blen := len(z.b) + for i := z.c; i < blen; i++ { + if !accept.isset(z.b[i]) { + out = z.b[z.c:i] + z.a -= (i - z.c) + z.c = i + return + } + } + out = z.b[z.c:] + z.a, z.c = 0, blen + return +} + +func (z *bytesDecReader) readUntil(_ []byte, stop byte) (out []byte) { + if z.a == 0 { + panic(io.EOF) + } + blen := len(z.b) + for i := z.c; i < blen; i++ { + if z.b[i] == stop { + i++ + out = z.b[z.c:i] + z.a -= (i - z.c) + z.c = i + return + } + } + z.a, z.c = 0, blen + panic(io.EOF) +} + +func (z *bytesDecReader) track() { + z.t = z.c +} + +func (z *bytesDecReader) stopTrack() (bs []byte) { + return z.b[z.t:z.c] +} + +// ---------------------------------------- + +func (d *Decoder) builtin(f *codecFnInfo, rv reflect.Value) { + d.d.DecodeBuiltin(f.ti.rtid, rv2i(rv)) +} + +func (d *Decoder) rawExt(f *codecFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), 0, nil) +} + +func (d *Decoder) ext(f *codecFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), f.xfTag, f.xfFn) +} + +func (d *Decoder) getValueForUnmarshalInterface(rv reflect.Value, indir int8) (v interface{}) { + if indir == -1 { + v = rv2i(rv.Addr()) + } else if indir == 0 { + v = rv2i(rv) + } else { + for j := int8(0); j < indir; j++ { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + v = rv2i(rv) + } + return +} + +func (d *Decoder) selferUnmarshal(f *codecFnInfo, rv reflect.Value) { + d.getValueForUnmarshalInterface(rv, f.ti.csIndir).(Selfer).CodecDecodeSelf(d) +} + +func (d *Decoder) binaryUnmarshal(f *codecFnInfo, rv reflect.Value) { + bm := d.getValueForUnmarshalInterface(rv, f.ti.bunmIndir).(encoding.BinaryUnmarshaler) + xbs := d.d.DecodeBytes(nil, true) + if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { + panic(fnerr) + } +} + +func (d *Decoder) textUnmarshal(f *codecFnInfo, rv reflect.Value) { + tm := d.getValueForUnmarshalInterface(rv, f.ti.tunmIndir).(encoding.TextUnmarshaler) + fnerr := tm.UnmarshalText(d.d.DecodeStringAsBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +func (d *Decoder) jsonUnmarshal(f *codecFnInfo, rv reflect.Value) { + tm := d.getValueForUnmarshalInterface(rv, f.ti.junmIndir).(jsonUnmarshaler) + // bs := d.d.DecodeBytes(d.b[:], true, true) + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +func (d *Decoder) kErr(f *codecFnInfo, rv reflect.Value) { + d.errorf("no decoding function defined for kind %v", rv.Kind()) +} + +// var kIntfCtr uint64 + +func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) { + // nil interface: + // use some hieristics to decode it appropriately + // based on the detected next value in the stream. + n := d.naked() + d.d.DecodeNaked() + if n.v == valueTypeNil { + return + } + // We cannot decode non-nil stream value into nil interface with methods (e.g. io.Reader). + // if num := f.ti.rt.NumMethod(); num > 0 { + if f.ti.numMeth > 0 { + d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) + return + } + // var useRvn bool + switch n.v { + case valueTypeMap: + if d.mtid == 0 || d.mtid == mapIntfIntfTypId { + if n.lm < arrayCacheLen { + n.ma[n.lm] = nil + rvn = n.rr[decNakedMapIntfIntfIdx*arrayCacheLen+n.lm] + n.lm++ + d.decode(&n.ma[n.lm-1]) + n.lm-- + } else { + var v2 map[interface{}]interface{} + d.decode(&v2) + rvn = reflect.ValueOf(&v2).Elem() + } + } else if d.mtid == mapStrIntfTypId { // for json performance + if n.ln < arrayCacheLen { + n.na[n.ln] = nil + rvn = n.rr[decNakedMapStrIntfIdx*arrayCacheLen+n.ln] + n.ln++ + d.decode(&n.na[n.ln-1]) + n.ln-- + } else { + var v2 map[string]interface{} + d.decode(&v2) + rvn = reflect.ValueOf(&v2).Elem() + } + } else { + rvn = reflect.New(d.h.MapType) + if useLookupRecognizedTypes && d.mtr { // isRecognizedRtid(d.mtid) { + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = rvn.Elem() + d.decodeValue(rvn, nil, false, true) + } + } + case valueTypeArray: + if d.stid == 0 || d.stid == intfSliceTypId { + if n.ls < arrayCacheLen { + n.sa[n.ls] = nil + rvn = n.rr[decNakedSliceIntfIdx*arrayCacheLen+n.ls] + n.ls++ + d.decode(&n.sa[n.ls-1]) + n.ls-- + } else { + var v2 []interface{} + d.decode(&v2) + rvn = reflect.ValueOf(&v2).Elem() + } + if reflectArrayOfSupported && d.stid == 0 && d.h.PreferArrayOverSlice { + rvn2 := reflect.New(reflectArrayOf(rvn.Len(), intfTyp)).Elem() + reflect.Copy(rvn2, rvn) + rvn = rvn2 + } + } else { + rvn = reflect.New(d.h.SliceType) + if useLookupRecognizedTypes && d.str { // isRecognizedRtid(d.stid) { + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = rvn.Elem() + d.decodeValue(rvn, nil, false, true) + } + } + case valueTypeExt: + var v interface{} + tag, bytes := n.u, n.l // calling decode below might taint the values + if bytes == nil { + if n.li < arrayCacheLen { + n.ia[n.li] = nil + n.li++ + d.decode(&n.ia[n.li-1]) + // v = *(&n.ia[l]) + n.li-- + v = n.ia[n.li] + n.ia[n.li] = nil + } else { + d.decode(&v) + } + } + bfn := d.h.getExtForTag(tag) + if bfn == nil { + var re RawExt + re.Tag = tag + re.Data = detachZeroCopyBytes(d.bytes, nil, bytes) + re.Value = v + rvn = reflect.ValueOf(&re).Elem() + } else { + rvnA := reflect.New(bfn.rt) + if bytes != nil { + bfn.ext.ReadExt(rv2i(rvnA), bytes) + } else { + bfn.ext.UpdateExt(rv2i(rvnA), v) + } + rvn = rvnA.Elem() + } + case valueTypeNil: + // no-op + case valueTypeInt: + rvn = n.rr[decNakedIntIdx] // d.np.get(&n.i) + case valueTypeUint: + rvn = n.rr[decNakedUintIdx] // d.np.get(&n.u) + case valueTypeFloat: + rvn = n.rr[decNakedFloatIdx] // d.np.get(&n.f) + case valueTypeBool: + rvn = n.rr[decNakedBoolIdx] // d.np.get(&n.b) + case valueTypeString, valueTypeSymbol: + rvn = n.rr[decNakedStringIdx] // d.np.get(&n.s) + case valueTypeBytes: + rvn = n.rr[decNakedBytesIdx] // d.np.get(&n.l) + case valueTypeTimestamp: + rvn = n.rr[decNakedTimeIdx] // d.np.get(&n.t) + default: + panic(fmt.Errorf("kInterfaceNaked: unexpected valueType: %d", n.v)) + } + return +} + +func (d *Decoder) kInterface(f *codecFnInfo, rv reflect.Value) { + // Note: + // A consequence of how kInterface works, is that + // if an interface already contains something, we try + // to decode into what was there before. + // We do not replace with a generic value (as got from decodeNaked). + + // every interface passed here MUST be settable. + var rvn reflect.Value + if rv.IsNil() { + if rvn = d.kInterfaceNaked(f); rvn.IsValid() { + rv.Set(rvn) + } + return + } + if d.h.InterfaceReset { + if rvn = d.kInterfaceNaked(f); rvn.IsValid() { + rv.Set(rvn) + } else { + // reset to zero value based on current type in there. + rv.Set(reflect.Zero(rv.Elem().Type())) + } + return + } + + // now we have a non-nil interface value, meaning it contains a type + rvn = rv.Elem() + if d.d.TryDecodeAsNil() { + rv.Set(reflect.Zero(rvn.Type())) + return + } + + // Note: interface{} is settable, but underlying type may not be. + // Consequently, we MAY have to create a decodable value out of the underlying value, + // decode into it, and reset the interface itself. + // fmt.Printf(">>>> kInterface: rvn type: %v, rv type: %v\n", rvn.Type(), rv.Type()) + + rvn2, canDecode := isDecodeable(rvn) + if canDecode { + d.decodeValue(rvn2, nil, true, true) + return + } + + rvn2 = reflect.New(rvn.Type()).Elem() + rvn2.Set(rvn) + d.decodeValue(rvn2, nil, true, true) + rv.Set(rvn2) +} + +func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) { + // checking if recognized within kstruct is too expensive. + // only check where you can determine if valid outside the loop + // ie on homogenous collections: slices, arrays and maps. + // + // if true, we don't create too many decFn's. + // It's a delicate balance. + const checkRecognized bool = false // false: TODO + + fti := f.ti + dd := d.d + elemsep := d.hh.hasElemSeparators() + sfn := structFieldNode{v: rv, update: true} + ctyp := dd.ContainerType() + if ctyp == valueTypeMap { + containerLen := dd.ReadMapStart() + if containerLen == 0 { + dd.ReadMapEnd() + return + } + tisfi := fti.sfi + hasLen := containerLen >= 0 + + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + // rvkencname := dd.DecodeString() + if elemsep { + dd.ReadMapElemKey() + } + rvkencnameB := dd.DecodeStringAsBytes() + rvkencname := stringView(rvkencnameB) + // rvksi := ti.getForEncName(rvkencname) + if elemsep { + dd.ReadMapElemValue() + } + if k := fti.indexForEncName(rvkencname); k > -1 { + si := tisfi[k] + if dd.TryDecodeAsNil() { + si.setToZeroValue(rv) + } else { + d.decodeValue(sfn.field(si), nil, checkRecognized, true) + } + } else { + d.structFieldNotFound(-1, rvkencname) + } + // keepAlive4StringView(rvkencnameB) // maintain ref 4 stringView // not needed, as reference is outside loop + } + dd.ReadMapEnd() + } else if ctyp == valueTypeArray { + containerLen := dd.ReadArrayStart() + if containerLen == 0 { + dd.ReadArrayEnd() + return + } + // Not much gain from doing it two ways for array. + // Arrays are not used as much for structs. + hasLen := containerLen >= 0 + for j, si := range fti.sfip { + if (hasLen && j == containerLen) || (!hasLen && dd.CheckBreak()) { + break + } + if elemsep { + dd.ReadArrayElem() + } + if dd.TryDecodeAsNil() { + si.setToZeroValue(rv) + } else { + d.decodeValue(sfn.field(si), nil, checkRecognized, true) + } + } + if containerLen > len(fti.sfip) { + // read remaining values and throw away + for j := len(fti.sfip); j < containerLen; j++ { + if elemsep { + dd.ReadArrayElem() + } + d.structFieldNotFound(j, "") + } + } + dd.ReadArrayEnd() + } else { + d.error(onlyMapOrArrayCanDecodeIntoStructErr) + return + } +} + +func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) { + // A slice can be set from a map or array in stream. + // This way, the order can be kept (as order is lost with map). + ti := f.ti + dd := d.d + rtelem0 := ti.rt.Elem() + ctyp := dd.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + // you can only decode bytes or string in the stream into a slice or array of bytes + if !(ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) { + d.errorf("bytes or string in the stream must be decoded into a slice or array of bytes, not %v", ti.rt) + } + if f.seq == seqTypeChan { + bs2 := dd.DecodeBytes(nil, true) + ch := rv2i(rv).(chan<- byte) + for _, b := range bs2 { + ch <- b + } + } else { + rvbs := rv.Bytes() + bs2 := dd.DecodeBytes(rvbs, false) + if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) { + if rv.CanSet() { + rv.SetBytes(bs2) + } else { + copy(rvbs, bs2) + } + } + } + return + } + + // array := f.seq == seqTypeChan + + slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map) + + // an array can never return a nil slice. so no need to check f.array here. + if containerLenS == 0 { + if rv.CanSet() { + if f.seq == seqTypeSlice { + if rv.IsNil() { + rv.Set(reflect.MakeSlice(ti.rt, 0, 0)) + } else { + rv.SetLen(0) + } + } else if f.seq == seqTypeChan { + if rv.IsNil() { + rv.Set(reflect.MakeChan(ti.rt, 0)) + } + } + } + slh.End() + return + } + + rtelem0Size := int(rtelem0.Size()) + rtElem0Kind := rtelem0.Kind() + rtElem0Id := rt2id(rtelem0) + rtelem0Mut := !isImmutableKind(rtElem0Kind) + rtelem := rtelem0 + rtelemkind := rtelem.Kind() + for rtelemkind == reflect.Ptr { + rtelem = rtelem.Elem() + rtelemkind = rtelem.Kind() + } + + var fn *codecFn + + var rv0, rv9 reflect.Value + rv0 = rv + rvChanged := false + + rvlen := rv.Len() + rvcap := rv.Cap() + hasLen := containerLenS > 0 + if hasLen && f.seq == seqTypeSlice { + if containerLenS > rvcap { + oldRvlenGtZero := rvlen > 0 + rvlen = decInferLen(containerLenS, d.h.MaxInitLen, int(rtelem0.Size())) + if rvlen <= rvcap { + if rv.CanSet() { + rv.SetLen(rvlen) + } else { + rv = rv.Slice(0, rvlen) + rvChanged = true + } + } else { + rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) + rvcap = rvlen + rvChanged = true + } + if rvChanged && oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) { + reflect.Copy(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap) + } + } else if containerLenS != rvlen { + rvlen = containerLenS + if rv.CanSet() { + rv.SetLen(rvlen) + } else { + rv = rv.Slice(0, rvlen) + rvChanged = true + } + } + } + + var recognizedRtid, recognizedRtidPtr bool + if useLookupRecognizedTypes { + recognizedRtid = isRecognizedRtid(rtElem0Id) + recognizedRtidPtr = isRecognizedRtidPtr(rtElem0Id) + } + + // consider creating new element once, and just decoding into it. + var rtelem0Zero reflect.Value + var rtelem0ZeroValid bool + var decodeAsNil bool + var j int + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && (f.seq == seqTypeSlice || f.seq == seqTypeChan) && rv.IsNil() { + if hasLen { + rvlen = decInferLen(containerLenS, d.h.MaxInitLen, rtelem0Size) + } else { + rvlen = 8 + } + if f.seq == seqTypeSlice { + rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) + rvChanged = true + } else if f.seq == seqTypeChan { + rv.Set(reflect.MakeChan(ti.rt, rvlen)) + } + } + slh.ElemContainerState(j) + decodeAsNil = dd.TryDecodeAsNil() + if f.seq == seqTypeChan { + if decodeAsNil { + rv.Send(reflect.Zero(rtelem0)) + continue + } + if rtelem0Mut || !rv9.IsValid() { // || (rtElem0Kind == reflect.Ptr && rv9.IsNil()) { + rv9 = reflect.New(rtelem0).Elem() + } + if useLookupRecognizedTypes && (recognizedRtid || recognizedRtidPtr) { + d.decode(rv2i(rv9.Addr())) + } else { + if fn == nil { + fn = d.cf.get(rtelem, true, true) + } + d.decodeValue(rv9, fn, false, true) + } + rv.Send(rv9) + } else { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= rvlen { + if f.seq == seqTypeArray { + d.arrayCannotExpand(rvlen, j+1) + decodeIntoBlank = true + } else { // if f.seq == seqTypeSlice + // rv = reflect.Append(rv, reflect.Zero(rtelem0)) // uses append logic, plus varargs + var rvcap2 int + rv9, rvcap2, rvChanged = decExpandSliceRV(rv, ti.rt, rtelem0Size, 1, rvlen, rvcap) + rvlen++ + if rvChanged { + rv = rv9 + rvcap = rvcap2 + } + } + } + if decodeIntoBlank { + if !decodeAsNil { + d.swallow() + } + } else { + rv9 = rv.Index(j) + if d.h.SliceElementReset || decodeAsNil { + if !rtelem0ZeroValid { + rtelem0ZeroValid = true + rtelem0Zero = reflect.Zero(rtelem0) + } + rv9.Set(rtelem0Zero) + } + if decodeAsNil { + continue + } + + if useLookupRecognizedTypes && recognizedRtid { + d.decode(rv2i(rv9.Addr())) + } else if useLookupRecognizedTypes && recognizedRtidPtr { // && !rv9.IsNil() { + if rv9.IsNil() { + rv9.Set(reflect.New(rtelem)) + } + d.decode(rv2i(rv9)) + } else { + if fn == nil { + fn = d.cf.get(rtelem, true, true) + } + d.decodeValue(rv9, fn, false, true) + } + } + } + } + if f.seq == seqTypeSlice { + if j < rvlen { + if rv.CanSet() { + rv.SetLen(j) + } else { + rv = rv.Slice(0, j) + rvChanged = true + } + rvlen = j + } else if j == 0 && rv.IsNil() { + rv = reflect.MakeSlice(ti.rt, 0, 0) + rvChanged = true + } + } + slh.End() + + if rvChanged { + rv0.Set(rv) + } +} + +// func (d *Decoder) kArray(f *codecFnInfo, rv reflect.Value) { +// // d.decodeValueFn(rv.Slice(0, rv.Len())) +// f.kSlice(rv.Slice(0, rv.Len())) +// } + +func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) { + dd := d.d + containerLen := dd.ReadMapStart() + elemsep := d.hh.hasElemSeparators() + ti := f.ti + if rv.IsNil() { + rv.Set(makeMapReflect(ti.rt, containerLen)) + } + + if containerLen == 0 { + dd.ReadMapEnd() + return + } + + ktype, vtype := ti.rt.Key(), ti.rt.Elem() + ktypeId := rt2id(ktype) + vtypeId := rt2id(vtype) + vtypeKind := vtype.Kind() + var recognizedKtyp, recognizedVtyp, recognizedPtrKtyp, recognizedPtrVtyp bool + if useLookupRecognizedTypes { + recognizedKtyp = isRecognizedRtid(ktypeId) + recognizedVtyp = isRecognizedRtid(vtypeId) + recognizedPtrKtyp = isRecognizedRtidPtr(ktypeId) + recognizedPtrVtyp = isRecognizedRtidPtr(vtypeId) + } + + var keyFn, valFn *codecFn + var ktypeLo, vtypeLo reflect.Type + for ktypeLo = ktype; ktypeLo.Kind() == reflect.Ptr; ktypeLo = ktypeLo.Elem() { + } + + for vtypeLo = vtype; vtypeLo.Kind() == reflect.Ptr; vtypeLo = vtypeLo.Elem() { + } + + var mapGet, mapSet bool + rvvImmut := isImmutableKind(vtypeKind) + if !d.h.MapValueReset { + // if pointer, mapGet = true + // if interface, mapGet = true if !DecodeNakedAlways (else false) + // if builtin, mapGet = false + // else mapGet = true + if vtypeKind == reflect.Ptr { + mapGet = true + } else if vtypeKind == reflect.Interface { + if !d.h.InterfaceReset { + mapGet = true + } + } else if !rvvImmut { + mapGet = true + } + } + + var rvk, rvkp, rvv, rvz reflect.Value + rvkMut := !isImmutableKind(ktype.Kind()) // if ktype is immutable, then re-use the same rvk. + ktypeIsString := ktypeId == stringTypId + ktypeIsIntf := ktypeId == intfTypId + hasLen := containerLen > 0 + var kstrbs []byte + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if rvkMut || !rvkp.IsValid() { + rvkp = reflect.New(ktype) + rvk = rvkp.Elem() + } + if elemsep { + dd.ReadMapElemKey() + } + if dd.TryDecodeAsNil() { + // Previously, if a nil key, we just ignored the mapped value and continued. + // However, that makes the result of encoding and then decoding map[intf]intf{nil:nil} + // to be an empty map. + // Instead, we treat a nil key as the zero value of the type. + rvk.Set(reflect.Zero(ktype)) + } else if ktypeIsString { + kstrbs = dd.DecodeStringAsBytes() + rvk.SetString(stringView(kstrbs)) + // NOTE: if doing an insert, you MUST use a real string (not stringview) + } else if useLookupRecognizedTypes && recognizedKtyp { + d.decode(rv2i(rvkp)) + // rvk = rvkp.Elem() //TODO: remove, unnecessary + } else if useLookupRecognizedTypes && recognizedPtrKtyp { + if rvk.IsNil() { + rvk = reflect.New(ktypeLo) + } + d.decode(rv2i(rvk)) + } else { + if keyFn == nil { + keyFn = d.cf.get(ktypeLo, true, true) + } + d.decodeValue(rvk, keyFn, false, true) + } + // special case if a byte array. + if ktypeIsIntf { + if rvk2 := rvk.Elem(); rvk2.IsValid() { + rvk = rvk2 + if rvk.Type() == uint8SliceTyp { + rvk = reflect.ValueOf(d.string(rvk.Bytes())) + } + } + } + + if elemsep { + dd.ReadMapElemValue() + } + + // Brittle, but OK per TryDecodeAsNil() contract. + // i.e. TryDecodeAsNil never shares slices with other decDriver procedures + if dd.TryDecodeAsNil() { + if ktypeIsString { + rvk.SetString(d.string(kstrbs)) + } + if d.h.DeleteOnNilMapValue { + rv.SetMapIndex(rvk, reflect.Value{}) + } else { + rv.SetMapIndex(rvk, reflect.Zero(vtype)) + } + continue + } + + mapSet = true // set to false if u do a get, and its a non-nil pointer + if mapGet { + // mapGet true only in case where kind=Ptr|Interface or kind is otherwise mutable. + rvv = rv.MapIndex(rvk) + if !rvv.IsValid() { + rvv = reflect.New(vtype).Elem() + } else if vtypeKind == reflect.Ptr { + if rvv.IsNil() { + rvv = reflect.New(vtype).Elem() + } else { + mapSet = false + } + } else if vtypeKind == reflect.Interface { + // not addressable, and thus not settable. + // e MUST create a settable/addressable variant + rvv2 := reflect.New(rvv.Type()).Elem() + if !rvv.IsNil() { + rvv2.Set(rvv) + } + rvv = rvv2 + } + // else it is ~mutable, and we can just decode into it directly + } else if rvvImmut { + if !rvz.IsValid() { + rvz = reflect.New(vtype).Elem() + } + rvv = rvz + } else { + rvv = reflect.New(vtype).Elem() + } + + // We MUST be done with the stringview of the key, before decoding the value + // so that we don't bastardize the reused byte array. + if mapSet && ktypeIsString { + rvk.SetString(d.string(kstrbs)) + } + if useLookupRecognizedTypes && recognizedVtyp && rvv.CanAddr() { + d.decode(rv2i(rvv.Addr())) + } else if useLookupRecognizedTypes && recognizedPtrVtyp { + if rvv.IsNil() { + rvv = reflect.New(vtypeLo) + mapSet = true + } + d.decode(rv2i(rvv)) + } else { + if valFn == nil { + valFn = d.cf.get(vtypeLo, true, true) + } + d.decodeValue(rvv, valFn, false, true) + // d.decodeValueFn(rvv, valFn) + } + if mapSet { + rv.SetMapIndex(rvk, rvv) + } + // if ktypeIsString { + // // keepAlive4StringView(kstrbs) // not needed, as reference is outside loop + // } + } + + dd.ReadMapEnd() +} + +// decNaked is used to keep track of the primitives decoded. +// Without it, we would have to decode each primitive and wrap it +// in an interface{}, causing an allocation. +// In this model, the primitives are decoded in a "pseudo-atomic" fashion, +// so we can rest assured that no other decoding happens while these +// primitives are being decoded. +// +// maps and arrays are not handled by this mechanism. +// However, RawExt is, and we accommodate for extensions that decode +// RawExt from DecodeNaked, but need to decode the value subsequently. +// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat. +// +// However, decNaked also keeps some arrays of default maps and slices +// used in DecodeNaked. This way, we can get a pointer to it +// without causing a new heap allocation. +// +// kInterfaceNaked will ensure that there is no allocation for the common +// uses. +type decNaked struct { + // r RawExt // used for RawExt, uint, []byte. + u uint64 + i int64 + f float64 + l []byte + s string + t time.Time + + b bool + + inited bool + + v valueType + + li, lm, ln, ls int8 + + // array/stacks for reducing allocation + // keep arrays at the bottom? Chance is that they are not used much. + ia [arrayCacheLen]interface{} + ma [arrayCacheLen]map[interface{}]interface{} + na [arrayCacheLen]map[string]interface{} + sa [arrayCacheLen][]interface{} + // ra [2]RawExt + + rr [5 * arrayCacheLen]reflect.Value +} + +const ( + decNakedUintIdx = iota + decNakedIntIdx + decNakedFloatIdx + decNakedBytesIdx + decNakedStringIdx + decNakedTimeIdx + decNakedBoolIdx +) +const ( + _ = iota // maps to the scalars above + decNakedIntfIdx + decNakedMapIntfIntfIdx + decNakedMapStrIntfIdx + decNakedSliceIntfIdx +) + +func (n *decNaked) init() { + if n.inited { + return + } + // n.ms = n.ma[:0] + // n.is = n.ia[:0] + // n.ns = n.na[:0] + // n.ss = n.sa[:0] + + n.rr[decNakedUintIdx] = reflect.ValueOf(&n.u).Elem() + n.rr[decNakedIntIdx] = reflect.ValueOf(&n.i).Elem() + n.rr[decNakedFloatIdx] = reflect.ValueOf(&n.f).Elem() + n.rr[decNakedBytesIdx] = reflect.ValueOf(&n.l).Elem() + n.rr[decNakedStringIdx] = reflect.ValueOf(&n.s).Elem() + n.rr[decNakedTimeIdx] = reflect.ValueOf(&n.t).Elem() + n.rr[decNakedBoolIdx] = reflect.ValueOf(&n.b).Elem() + + for i := range [arrayCacheLen]struct{}{} { + n.rr[decNakedIntfIdx*arrayCacheLen+i] = reflect.ValueOf(&(n.ia[i])).Elem() + n.rr[decNakedMapIntfIntfIdx*arrayCacheLen+i] = reflect.ValueOf(&(n.ma[i])).Elem() + n.rr[decNakedMapStrIntfIdx*arrayCacheLen+i] = reflect.ValueOf(&(n.na[i])).Elem() + n.rr[decNakedSliceIntfIdx*arrayCacheLen+i] = reflect.ValueOf(&(n.sa[i])).Elem() + } + n.inited = true + // n.rr[] = reflect.ValueOf(&n.) +} + +func (n *decNaked) reset() { + if n == nil { + return + } + n.li, n.lm, n.ln, n.ls = 0, 0, 0, 0 +} + +type rtid2rv struct { + rtid uintptr + rv reflect.Value +} + +// A Decoder reads and decodes an object from an input stream in the codec format. +type Decoder struct { + // hopefully, reduce derefencing cost by laying the decReader inside the Decoder. + // Try to put things that go together to fit within a cache line (8 words). + + d decDriver + // NOTE: Decoder shouldn't call it's read methods, + // as the handler MAY need to do some coordination. + r decReader + hh Handle + h *BasicHandle + + mtr, mtrp, str, strp bool // + + be bool // is binary encoding + bytes bool // is bytes reader + js bool // is json handle + + // ---- cpu cache line boundary? + + rb bytesDecReader + ri ioDecReader + bi bufioDecReader + + // cr containerStateRecv + + n *decNaked + nsp *sync.Pool + + // ---- cpu cache line boundary? + + is map[string]string // used for interning strings + + // cache the mapTypeId and sliceTypeId for faster comparisons + mtid uintptr + stid uintptr + + b [scratchByteArrayLen]byte + // _ uintptr // for alignment purposes, so next one starts from a cache line + + err error + // ---- cpu cache line boundary? + + cf codecFner + // _ [64]byte // force alignment??? +} + +// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. +// +// For efficiency, Users are encouraged to pass in a memory buffered reader +// (eg bufio.Reader, bytes.Buffer). +func NewDecoder(r io.Reader, h Handle) *Decoder { + d := newDecoder(h) + d.Reset(r) + return d +} + +// NewDecoderBytes returns a Decoder which efficiently decodes directly +// from a byte slice with zero copying. +func NewDecoderBytes(in []byte, h Handle) *Decoder { + d := newDecoder(h) + d.ResetBytes(in) + return d +} + +var defaultDecNaked decNaked + +func newDecoder(h Handle) *Decoder { + d := &Decoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()} + + // NOTE: do not initialize d.n here. It is lazily initialized in d.naked() + + _, d.js = h.(*JsonHandle) + if d.h.InternString { + d.is = make(map[string]string, 32) + } + d.d = h.newDecDriver(d) + // d.cr, _ = d.d.(containerStateRecv) + return d +} + +// naked must be called before each call to .DecodeNaked, +// as they will use it. +func (d *Decoder) naked() *decNaked { + if d.n == nil { + // consider one of: + // - get from sync.Pool (if GC is frequent, there's no value here) + // - new alloc (safest. only init'ed if it a naked decode will be done) + // - field in Decoder (makes the Decoder struct very big) + // To support using a decoder where a DecodeNaked is not needed, + // we prefer #1 or #2. + // d.n = new(decNaked) // &d.nv // new(decNaked) // grab from a sync.Pool + // d.n.init() + var v interface{} + d.nsp, v = pool.decNaked() + d.n = v.(*decNaked) + } + return d.n +} + +func (d *Decoder) resetCommon() { + d.n.reset() + d.d.reset() + d.cf.reset(d.hh) + d.err = nil + // reset all things which were cached from the Handle, + // but could be changed. + d.mtid, d.stid = 0, 0 + d.mtr, d.mtrp, d.str, d.strp = false, false, false, false + if d.h.MapType != nil { + d.mtid = rt2id(d.h.MapType) + if useLookupRecognizedTypes { + d.mtr = isRecognizedRtid(d.mtid) + d.mtrp = isRecognizedRtidPtr(d.mtid) + } + } + if d.h.SliceType != nil { + d.stid = rt2id(d.h.SliceType) + if useLookupRecognizedTypes { + d.str = isRecognizedRtid(d.stid) + d.strp = isRecognizedRtidPtr(d.stid) + } + } +} + +func (d *Decoder) Reset(r io.Reader) { + if d.h.ReaderBufferSize > 0 { + d.bi.buf = make([]byte, 0, d.h.ReaderBufferSize) + d.bi.reset(r) + d.r = &d.bi + } else { + d.ri.x = &d.b + // d.s = d.sa[:0] + d.ri.reset(r) + d.r = &d.ri + } + d.resetCommon() +} + +func (d *Decoder) ResetBytes(in []byte) { + d.bytes = true + d.rb.reset(in) + d.r = &d.rb + d.resetCommon() +} + +// Decode decodes the stream from reader and stores the result in the +// value pointed to by v. v cannot be a nil pointer. v can also be +// a reflect.Value of a pointer. +// +// Note that a pointer to a nil interface is not a nil pointer. +// If you do not know what type of stream it is, pass in a pointer to a nil interface. +// We will decode and store a value in that nil interface. +// +// Sample usages: +// // Decoding into a non-nil typed value +// var f float32 +// err = codec.NewDecoder(r, handle).Decode(&f) +// +// // Decoding into nil interface +// var v interface{} +// dec := codec.NewDecoder(r, handle) +// err = dec.Decode(&v) +// +// When decoding into a nil interface{}, we will decode into an appropriate value based +// on the contents of the stream: +// - Numbers are decoded as float64, int64 or uint64. +// - Other values are decoded appropriately depending on the type: +// bool, string, []byte, time.Time, etc +// - Extensions are decoded as RawExt (if no ext function registered for the tag) +// Configurations exist on the Handle to override defaults +// (e.g. for MapType, SliceType and how to decode raw bytes). +// +// When decoding into a non-nil interface{} value, the mode of encoding is based on the +// type of the value. When a value is seen: +// - If an extension is registered for it, call that extension function +// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error +// - Else decode it based on its reflect.Kind +// +// There are some special rules when decoding into containers (slice/array/map/struct). +// Decode will typically use the stream contents to UPDATE the container. +// - A map can be decoded from a stream map, by updating matching keys. +// - A slice can be decoded from a stream array, +// by updating the first n elements, where n is length of the stream. +// - A slice can be decoded from a stream map, by decoding as if +// it contains a sequence of key-value pairs. +// - A struct can be decoded from a stream map, by updating matching fields. +// - A struct can be decoded from a stream array, +// by updating fields as they occur in the struct (by index). +// +// When decoding a stream map or array with length of 0 into a nil map or slice, +// we reset the destination map or slice to a zero-length value. +// +// However, when decoding a stream nil, we reset the destination container +// to its "zero" value (e.g. nil for slice/map, etc). +// +func (d *Decoder) Decode(v interface{}) (err error) { + defer panicToErrs2(&d.err, &err) + d.MustDecode(v) + return +} + +// MustDecode is like Decode, but panics if unable to Decode. +// This provides insight to the code location that triggered the error. +func (d *Decoder) MustDecode(v interface{}) { + // TODO: Top-level: ensure that v is a pointer and not nil. + if d.err != nil { + panic(d.err) + } + if d.d.TryDecodeAsNil() { + d.setZero(v) + } else { + d.decode(v) + } + if d.nsp != nil { + if d.n != nil { + d.nsp.Put(d.n) + d.n = nil + } + d.nsp = nil + } + d.n = nil + // xprintf(">>>>>>>> >>>>>>>> num decFns: %v\n", d.cf.sn) +} + +// // this is not a smart swallow, as it allocates objects and does unnecessary work. +// func (d *Decoder) swallowViaHammer() { +// var blank interface{} +// d.decodeValueNoFn(reflect.ValueOf(&blank).Elem()) +// } + +func (d *Decoder) swallow() { + // smarter decode that just swallows the content + dd := d.d + if dd.TryDecodeAsNil() { + return + } + elemsep := d.hh.hasElemSeparators() + switch dd.ContainerType() { + case valueTypeMap: + containerLen := dd.ReadMapStart() + hasLen := containerLen >= 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + // if clenGtEqualZero {if j >= containerLen {break} } else if dd.CheckBreak() {break} + if elemsep { + dd.ReadMapElemKey() + } + d.swallow() + if elemsep { + dd.ReadMapElemValue() + } + d.swallow() + } + dd.ReadMapEnd() + case valueTypeArray: + containerLen := dd.ReadArrayStart() + hasLen := containerLen >= 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if elemsep { + dd.ReadArrayElem() + } + d.swallow() + } + dd.ReadArrayEnd() + case valueTypeBytes: + dd.DecodeBytes(d.b[:], true) + case valueTypeString: + dd.DecodeStringAsBytes() + default: + // these are all primitives, which we can get from decodeNaked + // if RawExt using Value, complete the processing. + n := d.naked() + dd.DecodeNaked() + if n.v == valueTypeExt && n.l == nil { + if n.li < arrayCacheLen { + n.ia[n.li] = nil + n.li++ + d.decode(&n.ia[n.li-1]) + n.ia[n.li-1] = nil + n.li-- + } else { + var v2 interface{} + d.decode(&v2) + } + } + } +} + +func (d *Decoder) setZero(iv interface{}) { + if iv == nil || definitelyNil(iv) { + return + } + var canDecode bool + switch v := iv.(type) { + case *string: + *v = "" + case *bool: + *v = false + case *int: + *v = 0 + case *int8: + *v = 0 + case *int16: + *v = 0 + case *int32: + *v = 0 + case *int64: + *v = 0 + case *uint: + *v = 0 + case *uint8: + *v = 0 + case *uint16: + *v = 0 + case *uint32: + *v = 0 + case *uint64: + *v = 0 + case *float32: + *v = 0 + case *float64: + *v = 0 + case *[]uint8: + *v = nil + case *Raw: + *v = nil + case reflect.Value: + if v, canDecode = isDecodeable(v); canDecode && v.CanSet() { + v.Set(reflect.Zero(v.Type())) + } // TODO: else drain if chan, clear if map, set all to nil if slice??? + default: + if !fastpathDecodeSetZeroTypeSwitch(iv, d) { + v := reflect.ValueOf(iv) + if v, canDecode = isDecodeable(v); canDecode && v.CanSet() { + v.Set(reflect.Zero(v.Type())) + } // TODO: else drain if chan, clear if map, set all to nil if slice??? + } + } +} + +func (d *Decoder) decode(iv interface{}) { + // check nil and interfaces explicitly, + // so that type switches just have a run of constant non-interface types. + if iv == nil { + d.error(cannotDecodeIntoNilErr) + return + } + if v, ok := iv.(Selfer); ok { + v.CodecDecodeSelf(d) + return + } + + switch v := iv.(type) { + // case nil: + // case Selfer: + + case reflect.Value: + v = d.ensureDecodeable(v) + d.decodeValue(v, nil, false, true) // TODO: maybe ask to recognize ... + + case *string: + *v = d.d.DecodeString() + case *bool: + *v = d.d.DecodeBool() + case *int: + *v = int(d.d.DecodeInt(intBitsize)) + case *int8: + *v = int8(d.d.DecodeInt(8)) + case *int16: + *v = int16(d.d.DecodeInt(16)) + case *int32: + *v = int32(d.d.DecodeInt(32)) + case *int64: + *v = d.d.DecodeInt(64) + case *uint: + *v = uint(d.d.DecodeUint(uintBitsize)) + case *uint8: + *v = uint8(d.d.DecodeUint(8)) + case *uint16: + *v = uint16(d.d.DecodeUint(16)) + case *uint32: + *v = uint32(d.d.DecodeUint(32)) + case *uint64: + *v = d.d.DecodeUint(64) + case *float32: + *v = float32(d.d.DecodeFloat(true)) + case *float64: + *v = d.d.DecodeFloat(false) + case *[]uint8: + *v = d.d.DecodeBytes(*v, false) + + case *Raw: + *v = d.rawBytes() + + case *interface{}: + d.decodeValue(reflect.ValueOf(iv).Elem(), nil, false, true) // TODO: consider recognize here + // d.decodeValueNotNil(reflect.ValueOf(iv).Elem()) + + default: + if !fastpathDecodeTypeSwitch(iv, d) { + v := reflect.ValueOf(iv) + v = d.ensureDecodeable(v) + d.decodeValue(v, nil, false, false) + // d.decodeValueFallback(v) + } + } +} + +func (d *Decoder) decodeValue(rv reflect.Value, fn *codecFn, tryRecognized, chkAll bool) { + // If stream is not containing a nil value, then we can deref to the base + // non-pointer value, and decode into that. + var rvp reflect.Value + var rvpValid bool + if rv.Kind() == reflect.Ptr { + rvpValid = true + for { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rvp = rv + rv = rv.Elem() + if rv.Kind() != reflect.Ptr { + break + } + } + } + + if useLookupRecognizedTypes && tryRecognized && isRecognizedRtid(rv2rtid(rv)) { + if rvpValid { + d.decode(rv2i(rvp)) + return + } else if rv.CanAddr() { + d.decode(rv2i(rv.Addr())) + return + } + } + + if fn == nil { + // always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer + fn = d.cf.get(rv.Type(), chkAll, true) // chkAll, chkAll) + } + if fn.i.addr { + if rvpValid { + fn.fd(d, &fn.i, rvp) + } else if rv.CanAddr() { + fn.fd(d, &fn.i, rv.Addr()) + } else { + fn.fd(d, &fn.i, rv) + } + } else { + fn.fd(d, &fn.i, rv) + } + // return rv +} + +func (d *Decoder) structFieldNotFound(index int, rvkencname string) { + // NOTE: rvkencname may be a stringView, so don't pass it to another function. + if d.h.ErrorIfNoField { + if index >= 0 { + d.errorf("no matching struct field found when decoding stream array at index %v", index) + return + } else if rvkencname != "" { + d.errorf("no matching struct field found when decoding stream map with key " + rvkencname) + return + } + } + d.swallow() +} + +func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) { + if d.h.ErrorIfNoArrayExpand { + d.errorf("cannot expand array len during decode from %v to %v", sliceLen, streamLen) + } +} + +func isDecodeable(rv reflect.Value) (rv2 reflect.Value, canDecode bool) { + switch rv.Kind() { + case reflect.Array: + return rv, true + case reflect.Ptr: + if !rv.IsNil() { + return rv.Elem(), true + } + case reflect.Slice, reflect.Chan, reflect.Map: + if !rv.IsNil() { + return rv, true + } + } + return +} + +func (d *Decoder) ensureDecodeable(rv reflect.Value) (rv2 reflect.Value) { + // decode can take any reflect.Value that is a inherently addressable i.e. + // - array + // - non-nil chan (we will SEND to it) + // - non-nil slice (we will set its elements) + // - non-nil map (we will put into it) + // - non-nil pointer (we can "update" it) + rv2, canDecode := isDecodeable(rv) + if canDecode { + return + } + if !rv.IsValid() { + d.error(cannotDecodeIntoNilErr) + return + } + if !rv.CanInterface() { + d.errorf("cannot decode into a value without an interface: %v", rv) + return + } + rvi := rv2i(rv) + d.errorf("cannot decode into value of kind: %v, type: %T, %v", rv.Kind(), rvi, rvi) + return +} + +// func (d *Decoder) chkPtrValue(rv reflect.Value) { +// // We can only decode into a non-nil pointer +// if rv.Kind() == reflect.Ptr && !rv.IsNil() { +// return +// } +// d.errNotValidPtrValue(rv) +// } + +// func (d *Decoder) errNotValidPtrValue(rv reflect.Value) { +// if !rv.IsValid() { +// d.error(cannotDecodeIntoNilErr) +// return +// } +// if !rv.CanInterface() { +// d.errorf("cannot decode into a value without an interface: %v", rv) +// return +// } +// rvi := rv2i(rv) +// d.errorf("cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", rv.Kind(), rvi, rvi) +// } + +func (d *Decoder) error(err error) { + panic(err) +} + +func (d *Decoder) errorf(format string, params ...interface{}) { + params2 := make([]interface{}, len(params)+1) + params2[0] = d.r.numread() + copy(params2[1:], params) + err := fmt.Errorf("[pos %d]: "+format, params2...) + panic(err) +} + +// Possibly get an interned version of a string +// +// This should mostly be used for map keys, where the key type is string. +// This is because keys of a map/struct are typically reused across many objects. +func (d *Decoder) string(v []byte) (s string) { + if d.is == nil { + return string(v) // don't return stringView, as we need a real string here. + } + s, ok := d.is[string(v)] // no allocation here, per go implementation + if !ok { + s = string(v) // new allocation here + d.is[s] = s + } + return s +} + +// nextValueBytes returns the next value in the stream as a set of bytes. +func (d *Decoder) nextValueBytes() (bs []byte) { + d.d.uncacheRead() + d.r.track() + d.swallow() + bs = d.r.stopTrack() + return +} + +func (d *Decoder) rawBytes() []byte { + // ensure that this is not a view into the bytes + // i.e. make new copy always. + bs := d.nextValueBytes() + bs2 := make([]byte, len(bs)) + copy(bs2, bs) + return bs2 +} + +// -------------------------------------------------- + +// decSliceHelper assists when decoding into a slice, from a map or an array in the stream. +// A slice can be set from a map or array in stream. This supports the MapBySlice interface. +type decSliceHelper struct { + d *Decoder + // ct valueType + array bool +} + +func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) { + dd := d.d + ctyp := dd.ContainerType() + if ctyp == valueTypeArray { + x.array = true + clen = dd.ReadArrayStart() + } else if ctyp == valueTypeMap { + clen = dd.ReadMapStart() * 2 + } else { + d.errorf("only encoded map or array can be decoded into a slice (%d)", ctyp) + } + // x.ct = ctyp + x.d = d + return +} + +func (x decSliceHelper) End() { + if x.array { + x.d.d.ReadArrayEnd() + } else { + x.d.d.ReadMapEnd() + } +} + +func (x decSliceHelper) ElemContainerState(index int) { + if x.array { + x.d.d.ReadArrayElem() + } else { + if index%2 == 0 { + x.d.d.ReadMapElemKey() + } else { + x.d.d.ReadMapElemValue() + } + } +} + +func decByteSlice(r decReader, clen, maxInitLen int, bs []byte) (bsOut []byte) { + if clen == 0 { + return zeroByteSlice + } + if len(bs) == clen { + bsOut = bs + r.readb(bsOut) + } else if cap(bs) >= clen { + bsOut = bs[:clen] + r.readb(bsOut) + } else { + // bsOut = make([]byte, clen) + len2 := decInferLen(clen, maxInitLen, 1) + bsOut = make([]byte, len2) + r.readb(bsOut) + for len2 < clen { + len3 := decInferLen(clen-len2, maxInitLen, 1) + bs3 := bsOut + bsOut = make([]byte, len2+len3) + copy(bsOut, bs3) + r.readb(bsOut[len2:]) + len2 += len3 + } + } + return +} + +func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte) { + if xlen := len(in); xlen > 0 { + if isBytesReader || xlen <= scratchByteArrayLen { + if cap(dest) >= xlen { + out = dest[:xlen] + } else { + out = make([]byte, xlen) + } + copy(out, in) + return + } + } + return in +} + +// decInferLen will infer a sensible length, given the following: +// - clen: length wanted. +// - maxlen: max length to be returned. +// if <= 0, it is unset, and we infer it based on the unit size +// - unit: number of bytes for each element of the collection +func decInferLen(clen, maxlen, unit int) (rvlen int) { + // handle when maxlen is not set i.e. <= 0 + if clen <= 0 { + return + } + if unit == 0 { + return clen + } + if maxlen <= 0 { + // no maxlen defined. Use maximum of 256K memory, with a floor of 4K items. + // maxlen = 256 * 1024 / unit + // if maxlen < (4 * 1024) { + // maxlen = 4 * 1024 + // } + if unit < (256 / 4) { + maxlen = 256 * 1024 / unit + } else { + maxlen = 4 * 1024 + } + } + if clen > maxlen { + rvlen = maxlen + } else { + rvlen = clen + } + return +} + +func decExpandSliceRV(s reflect.Value, st reflect.Type, stElemSize, num, slen, scap int) ( + s2 reflect.Value, scap2 int, changed bool) { + l1 := slen + num // new slice length + if l1 < slen { + panic("expandSlice: slice overflow") + } + if l1 <= scap { + if s.CanSet() { + s.SetLen(l1) + } else { + s2 = s.Slice(0, l1) + scap2 = scap + changed = true + } + return + } + scap2 = growCap(scap, stElemSize, num) + s2 = reflect.MakeSlice(st, l1, scap2) + changed = true + reflect.Copy(s2, s) + return +} + +func decReadFull(r io.Reader, bs []byte) (n int, err error) { + var nn int + for n < len(bs) && err == nil { + nn, err = r.Read(bs[n:]) + if nn > 0 { + if err == io.EOF { + // leave EOF for next time + err = nil + } + n += nn + } + } + + // do not do this - it serves no purpose + // if n != len(bs) && err == io.EOF { err = io.ErrUnexpectedEOF } + return +} diff --git a/vendor/github.com/ugorji/go/codec/encode.go b/vendor/github.com/ugorji/go/codec/encode.go new file mode 100644 index 00000000000..508d04fb196 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/encode.go @@ -0,0 +1,1384 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "bufio" + "encoding" + "fmt" + "io" + "reflect" + "sort" + "sync" +) + +const defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024 + +// AsSymbolFlag defines what should be encoded as symbols. +type AsSymbolFlag uint8 + +const ( + // AsSymbolDefault is default. + // Currently, this means only encode struct field names as symbols. + // The default is subject to change. + AsSymbolDefault AsSymbolFlag = iota + + // AsSymbolAll means encode anything which could be a symbol as a symbol. + AsSymbolAll = 0xfe + + // AsSymbolNone means do not encode anything as a symbol. + AsSymbolNone = 1 << iota + + // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols. + AsSymbolMapStringKeysFlag + + // AsSymbolStructFieldName means encode struct field names as symbols. + AsSymbolStructFieldNameFlag +) + +// encWriter abstracts writing to a byte array or to an io.Writer. +type encWriter interface { + writeb([]byte) + writestr(string) + writen1(byte) + writen2(byte, byte) + atEndOfEncode() +} + +// encDriver abstracts the actual codec (binc vs msgpack, etc) +type encDriver interface { + // IsBuiltinType(rt uintptr) bool + EncodeBuiltin(rt uintptr, v interface{}) + EncodeNil() + EncodeInt(i int64) + EncodeUint(i uint64) + EncodeBool(b bool) + EncodeFloat32(f float32) + EncodeFloat64(f float64) + // encodeExtPreamble(xtag byte, length int) + EncodeRawExt(re *RawExt, e *Encoder) + EncodeExt(v interface{}, xtag uint64, ext Ext, e *Encoder) + WriteArrayStart(length int) + WriteArrayElem() + WriteArrayEnd() + WriteMapStart(length int) + WriteMapElemKey() + WriteMapElemValue() + WriteMapEnd() + EncodeString(c charEncoding, v string) + EncodeSymbol(v string) + EncodeStringBytes(c charEncoding, v []byte) + + //TODO + //encBignum(f *big.Int) + //encStringRunes(c charEncoding, v []rune) + + reset() + atEndOfEncode() +} + +type ioEncStringWriter interface { + WriteString(s string) (n int, err error) +} + +type ioEncFlusher interface { + Flush() error +} + +type encDriverAsis interface { + EncodeAsis(v []byte) +} + +// type encNoSeparator struct{} +// func (_ encNoSeparator) EncodeEnd() {} + +type encDriverNoopContainerWriter struct{} + +func (_ encDriverNoopContainerWriter) WriteArrayStart(length int) {} +func (_ encDriverNoopContainerWriter) WriteArrayElem() {} +func (_ encDriverNoopContainerWriter) WriteArrayEnd() {} +func (_ encDriverNoopContainerWriter) WriteMapStart(length int) {} +func (_ encDriverNoopContainerWriter) WriteMapElemKey() {} +func (_ encDriverNoopContainerWriter) WriteMapElemValue() {} +func (_ encDriverNoopContainerWriter) WriteMapEnd() {} +func (_ encDriverNoopContainerWriter) atEndOfEncode() {} + +// type ioEncWriterWriter interface { +// WriteByte(c byte) error +// WriteString(s string) (n int, err error) +// Write(p []byte) (n int, err error) +// } + +type EncodeOptions struct { + // Encode a struct as an array, and not as a map + StructToArray bool + + // Canonical representation means that encoding a value will always result in the same + // sequence of bytes. + // + // This only affects maps, as the iteration order for maps is random. + // + // The implementation MAY use the natural sort order for the map keys if possible: + // + // - If there is a natural sort order (ie for number, bool, string or []byte keys), + // then the map keys are first sorted in natural order and then written + // with corresponding map values to the strema. + // - If there is no natural sort order, then the map keys will first be + // encoded into []byte, and then sorted, + // before writing the sorted keys and the corresponding map values to the stream. + // + Canonical bool + + // CheckCircularRef controls whether we check for circular references + // and error fast during an encode. + // + // If enabled, an error is received if a pointer to a struct + // references itself either directly or through one of its fields (iteratively). + // + // This is opt-in, as there may be a performance hit to checking circular references. + CheckCircularRef bool + + // RecursiveEmptyCheck controls whether we descend into interfaces, structs and pointers + // when checking if a value is empty. + // + // Note that this may make OmitEmpty more expensive, as it incurs a lot more reflect calls. + RecursiveEmptyCheck bool + + // Raw controls whether we encode Raw values. + // This is a "dangerous" option and must be explicitly set. + // If set, we blindly encode Raw values as-is, without checking + // if they are a correct representation of a value in that format. + // If unset, we error out. + Raw bool + + // AsSymbols defines what should be encoded as symbols. + // + // Encoding as symbols can reduce the encoded size significantly. + // + // However, during decoding, each string to be encoded as a symbol must + // be checked to see if it has been seen before. Consequently, encoding time + // will increase if using symbols, because string comparisons has a clear cost. + // + // Sample values: + // AsSymbolNone + // AsSymbolAll + // AsSymbolMapStringKeys + // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag + AsSymbols AsSymbolFlag + + // WriterBufferSize is the size of the buffer used when writing. + // + // if > 0, we use a smart buffer internally for performance purposes. + WriterBufferSize int +} + +// --------------------------------------------- + +type simpleIoEncWriter struct { + io.Writer +} + +// type bufIoEncWriter struct { +// w io.Writer +// buf []byte +// err error +// } + +// func (x *bufIoEncWriter) Write(b []byte) (n int, err error) { +// if x.err != nil { +// return 0, x.err +// } +// if cap(x.buf)-len(x.buf) >= len(b) { +// x.buf = append(x.buf, b) +// return len(b), nil +// } +// n, err = x.w.Write(x.buf) +// if err != nil { +// x.err = err +// return 0, x.err +// } +// n, err = x.w.Write(b) +// x.err = err +// return +// } + +// ioEncWriter implements encWriter and can write to an io.Writer implementation +type ioEncWriter struct { + w io.Writer + ww io.Writer + bw io.ByteWriter + sw ioEncStringWriter + fw ioEncFlusher + b [8]byte +} + +func (z *ioEncWriter) WriteByte(b byte) (err error) { + // x.bs[0] = b + // _, err = x.ww.Write(x.bs[:]) + z.b[0] = b + _, err = z.w.Write(z.b[:1]) + return +} + +func (z *ioEncWriter) WriteString(s string) (n int, err error) { + return z.w.Write(bytesView(s)) +} + +func (z *ioEncWriter) writeb(bs []byte) { + // if len(bs) == 0 { + // return + // } + if _, err := z.ww.Write(bs); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) writestr(s string) { + // if len(s) == 0 { + // return + // } + if _, err := z.sw.WriteString(s); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) writen1(b byte) { + if err := z.bw.WriteByte(b); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) writen2(b1, b2 byte) { + var err error + if err = z.bw.WriteByte(b1); err == nil { + if err = z.bw.WriteByte(b2); err == nil { + return + } + } + panic(err) +} + +// func (z *ioEncWriter) writen5(b1, b2, b3, b4, b5 byte) { +// z.b[0], z.b[1], z.b[2], z.b[3], z.b[4] = b1, b2, b3, b4, b5 +// if _, err := z.ww.Write(z.b[:5]); err != nil { +// panic(err) +// } +// } + +func (z *ioEncWriter) atEndOfEncode() { + if z.fw != nil { + z.fw.Flush() + } +} + +// ---------------------------------------- + +// bytesEncWriter implements encWriter and can write to an byte slice. +// It is used by Marshal function. +type bytesEncWriter struct { + b []byte + c int // cursor + out *[]byte // write out on atEndOfEncode +} + +func (z *bytesEncWriter) writeb(s []byte) { + oc, a := z.growNoAlloc(len(s)) + if a { + z.growAlloc(len(s), oc) + } + copy(z.b[oc:], s) +} + +func (z *bytesEncWriter) writestr(s string) { + oc, a := z.growNoAlloc(len(s)) + if a { + z.growAlloc(len(s), oc) + } + copy(z.b[oc:], s) +} + +func (z *bytesEncWriter) writen1(b1 byte) { + oc, a := z.growNoAlloc(1) + if a { + z.growAlloc(1, oc) + } + z.b[oc] = b1 +} + +func (z *bytesEncWriter) writen2(b1, b2 byte) { + oc, a := z.growNoAlloc(2) + if a { + z.growAlloc(2, oc) + } + z.b[oc+1] = b2 + z.b[oc] = b1 +} + +func (z *bytesEncWriter) atEndOfEncode() { + *(z.out) = z.b[:z.c] +} + +// have a growNoalloc(n int), which can be inlined. +// if allocation is needed, then call growAlloc(n int) + +func (z *bytesEncWriter) growNoAlloc(n int) (oldcursor int, allocNeeded bool) { + oldcursor = z.c + z.c = z.c + n + if z.c > len(z.b) { + if z.c > cap(z.b) { + allocNeeded = true + } else { + z.b = z.b[:cap(z.b)] + } + } + return +} + +func (z *bytesEncWriter) growAlloc(n int, oldcursor int) { + // appendslice logic (if cap < 1024, *2, else *1.25): more expensive. many copy calls. + // bytes.Buffer model (2*cap + n): much better + // bs := make([]byte, 2*cap(z.b)+n) + bs := make([]byte, growCap(cap(z.b), 1, n)) + copy(bs, z.b[:oldcursor]) + z.b = bs +} + +// --------------------------------------------- + +func (e *Encoder) builtin(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeBuiltin(f.ti.rtid, rv2i(rv)) +} + +func (e *Encoder) raw(f *codecFnInfo, rv reflect.Value) { + e.rawBytes(rv2i(rv).(Raw)) +} + +func (e *Encoder) rawExt(f *codecFnInfo, rv reflect.Value) { + // rev := rv2i(rv).(RawExt) + // e.e.EncodeRawExt(&rev, e) + var re *RawExt + if rv.CanAddr() { + re = rv2i(rv.Addr()).(*RawExt) + } else { + rev := rv2i(rv).(RawExt) + re = &rev + } + e.e.EncodeRawExt(re, e) +} + +func (e *Encoder) ext(f *codecFnInfo, rv reflect.Value) { + // if this is a struct|array and it was addressable, then pass the address directly (not the value) + if k := rv.Kind(); (k == reflect.Struct || k == reflect.Array) && rv.CanAddr() { + rv = rv.Addr() + } + e.e.EncodeExt(rv2i(rv), f.xfTag, f.xfFn, e) +} + +func (e *Encoder) getValueForMarshalInterface(rv reflect.Value, indir int8) (v interface{}, proceed bool) { + if indir == 0 { + v = rv2i(rv) + } else if indir == -1 { + // If a non-pointer was passed to Encode(), then that value is not addressable. + // Take addr if addressable, else copy value to an addressable value. + if rv.CanAddr() { + v = rv2i(rv.Addr()) + } else { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2i(rv2) + } + } else { + for j := int8(0); j < indir; j++ { + if rv.IsNil() { + e.e.EncodeNil() + return + } + rv = rv.Elem() + } + v = rv2i(rv) + } + return v, true +} + +func (e *Encoder) selferMarshal(f *codecFnInfo, rv reflect.Value) { + if v, proceed := e.getValueForMarshalInterface(rv, f.ti.csIndir); proceed { + v.(Selfer).CodecEncodeSelf(e) + } +} + +func (e *Encoder) binaryMarshal(f *codecFnInfo, rv reflect.Value) { + if v, proceed := e.getValueForMarshalInterface(rv, f.ti.bmIndir); proceed { + bs, fnerr := v.(encoding.BinaryMarshaler).MarshalBinary() + e.marshal(bs, fnerr, false, c_RAW) + } +} + +func (e *Encoder) textMarshal(f *codecFnInfo, rv reflect.Value) { + if v, proceed := e.getValueForMarshalInterface(rv, f.ti.tmIndir); proceed { + bs, fnerr := v.(encoding.TextMarshaler).MarshalText() + e.marshal(bs, fnerr, false, c_UTF8) + } +} + +func (e *Encoder) jsonMarshal(f *codecFnInfo, rv reflect.Value) { + if v, proceed := e.getValueForMarshalInterface(rv, f.ti.jmIndir); proceed { + bs, fnerr := v.(jsonMarshaler).MarshalJSON() + e.marshal(bs, fnerr, true, c_UTF8) + } +} + +func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeBool(rv.Bool()) +} + +func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeString(c_UTF8, rv.String()) +} + +func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeFloat64(rv.Float()) +} + +func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeFloat32(float32(rv.Float())) +} + +func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kInvalid(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeNil() +} + +func (e *Encoder) kErr(f *codecFnInfo, rv reflect.Value) { + e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv) +} + +func (e *Encoder) kSlice(f *codecFnInfo, rv reflect.Value) { + ti := f.ti + ee := e.e + // array may be non-addressable, so we have to manage with care + // (don't call rv.Bytes, rv.Slice, etc). + // E.g. type struct S{B [2]byte}; + // Encode(S{}) will bomb on "panic: slice of unaddressable array". + if f.seq != seqTypeArray { + if rv.IsNil() { + ee.EncodeNil() + return + } + // If in this method, then there was no extension function defined. + // So it's okay to treat as []byte. + if ti.rtid == uint8SliceTypId { + ee.EncodeStringBytes(c_RAW, rv.Bytes()) + return + } + } + elemsep := e.hh.hasElemSeparators() + rtelem := ti.rt.Elem() + l := rv.Len() + if ti.rtid == uint8SliceTypId || rtelem.Kind() == reflect.Uint8 { + switch f.seq { + case seqTypeArray: + if rv.CanAddr() { + ee.EncodeStringBytes(c_RAW, rv.Slice(0, l).Bytes()) + } else { + var bs []byte + if l <= cap(e.b) { + bs = e.b[:l] + } else { + bs = make([]byte, l) + } + reflect.Copy(reflect.ValueOf(bs), rv) + ee.EncodeStringBytes(c_RAW, bs) + } + return + case seqTypeSlice: + ee.EncodeStringBytes(c_RAW, rv.Bytes()) + return + } + } + if ti.rtid == uint8SliceTypId && f.seq == seqTypeChan { + bs := e.b[:0] + // do not use range, so that the number of elements encoded + // does not change, and encoding does not hang waiting on someone to close chan. + // for b := range rv2i(rv).(<-chan byte) { bs = append(bs, b) } + ch := rv2i(rv).(<-chan byte) + for i := 0; i < l; i++ { + bs = append(bs, <-ch) + } + ee.EncodeStringBytes(c_RAW, bs) + return + } + + if ti.mbs { + if l%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", l) + return + } + ee.WriteMapStart(l / 2) + } else { + ee.WriteArrayStart(l) + } + + if l > 0 { + var fn *codecFn + var recognizedVtyp = useLookupRecognizedTypes && isRecognizedRtidOrPtr(rt2id(rtelem)) + if !recognizedVtyp { + for rtelem.Kind() == reflect.Ptr { + rtelem = rtelem.Elem() + } + // if kind is reflect.Interface, do not pre-determine the + // encoding type, because preEncodeValue may break it down to + // a concrete type and kInterface will bomb. + if rtelem.Kind() != reflect.Interface { + fn = e.cf.get(rtelem, true, true) + } + } + // TODO: Consider perf implication of encoding odd index values as symbols if type is string + for j := 0; j < l; j++ { + if elemsep { + if ti.mbs { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } else { + ee.WriteArrayElem() + } + } + if f.seq == seqTypeChan { + if rv2, ok2 := rv.Recv(); ok2 { + if useLookupRecognizedTypes && recognizedVtyp { + e.encode(rv2i(rv2)) + } else { + e.encodeValue(rv2, fn, true) + } + } else { + ee.EncodeNil() // WE HAVE TO DO SOMETHING, so nil if nothing received. + } + } else { + if useLookupRecognizedTypes && recognizedVtyp { + e.encode(rv2i(rv.Index(j))) + } else { + e.encodeValue(rv.Index(j), fn, true) + } + } + } + } + + if ti.mbs { + ee.WriteMapEnd() + } else { + ee.WriteArrayEnd() + } +} + +func (e *Encoder) kStructNoOmitempty(f *codecFnInfo, rv reflect.Value) { + fti := f.ti + elemsep := e.hh.hasElemSeparators() + tisfi := fti.sfip + toMap := !(fti.toArray || e.h.StructToArray) + if toMap { + tisfi = fti.sfi + } + ee := e.e + + sfn := structFieldNode{v: rv, update: false} + if toMap { + ee.WriteMapStart(len(tisfi)) + // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + if !elemsep { + for _, si := range tisfi { + if asSymbols { + ee.EncodeSymbol(si.encName) + } else { + ee.EncodeString(c_UTF8, si.encName) + } + e.encodeValue(sfn.field(si), nil, true) + } + } else { + for _, si := range tisfi { + ee.WriteMapElemKey() + if asSymbols { + ee.EncodeSymbol(si.encName) + } else { + ee.EncodeString(c_UTF8, si.encName) + } + ee.WriteMapElemValue() + e.encodeValue(sfn.field(si), nil, true) + } + } + ee.WriteMapEnd() + } else { + ee.WriteArrayStart(len(tisfi)) + if !elemsep { + for _, si := range tisfi { + e.encodeValue(sfn.field(si), nil, true) + } + } else { + for _, si := range tisfi { + ee.WriteArrayElem() + e.encodeValue(sfn.field(si), nil, true) + } + } + ee.WriteArrayEnd() + } +} + +func (e *Encoder) kStruct(f *codecFnInfo, rv reflect.Value) { + fti := f.ti + elemsep := e.hh.hasElemSeparators() + tisfi := fti.sfip + toMap := !(fti.toArray || e.h.StructToArray) + // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) + if toMap { + tisfi = fti.sfi + } + newlen := len(fti.sfi) + ee := e.e + + // Use sync.Pool to reduce allocating slices unnecessarily. + // The cost of sync.Pool is less than the cost of new allocation. + // + // Each element of the array pools one of encStructPool(8|16|32|64). + // It allows the re-use of slices up to 64 in length. + // A performance cost of encoding structs was collecting + // which values were empty and should be omitted. + // We needed slices of reflect.Value and string to collect them. + // This shared pool reduces the amount of unnecessary creation we do. + // The cost is that of locking sometimes, but sync.Pool is efficient + // enough to reduce thread contention. + + var spool *sync.Pool + var poolv interface{} + var fkvs []stringRv + if newlen <= 8 { + spool, poolv = pool.stringRv8() + fkvs = poolv.(*[8]stringRv)[:newlen] + } else if newlen <= 16 { + spool, poolv = pool.stringRv16() + fkvs = poolv.(*[16]stringRv)[:newlen] + } else if newlen <= 32 { + spool, poolv = pool.stringRv32() + fkvs = poolv.(*[32]stringRv)[:newlen] + } else if newlen <= 64 { + spool, poolv = pool.stringRv64() + fkvs = poolv.(*[64]stringRv)[:newlen] + } else if newlen <= 128 { + spool, poolv = pool.stringRv128() + fkvs = poolv.(*[128]stringRv)[:newlen] + } else { + fkvs = make([]stringRv, newlen) + } + + newlen = 0 + var kv stringRv + recur := e.h.RecursiveEmptyCheck + sfn := structFieldNode{v: rv, update: false} + for _, si := range tisfi { + // kv.r = si.field(rv, false) + kv.r = sfn.field(si) + if toMap { + if si.omitEmpty && isEmptyValue(kv.r, recur, recur) { + continue + } + kv.v = si.encName + } else { + // use the zero value. + // if a reference or struct, set to nil (so you do not output too much) + if si.omitEmpty && isEmptyValue(kv.r, recur, recur) { + switch kv.r.Kind() { + case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array, reflect.Map, reflect.Slice: + kv.r = reflect.Value{} //encode as nil + } + } + } + fkvs[newlen] = kv + newlen++ + } + + if toMap { + ee.WriteMapStart(newlen) + // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + if !elemsep { + for j := 0; j < newlen; j++ { + kv = fkvs[j] + if asSymbols { + ee.EncodeSymbol(kv.v) + } else { + ee.EncodeString(c_UTF8, kv.v) + } + e.encodeValue(kv.r, nil, true) + } + } else { + for j := 0; j < newlen; j++ { + kv = fkvs[j] + ee.WriteMapElemKey() + if asSymbols { + ee.EncodeSymbol(kv.v) + } else { + ee.EncodeString(c_UTF8, kv.v) + } + ee.WriteMapElemValue() + e.encodeValue(kv.r, nil, true) + } + } + ee.WriteMapEnd() + } else { + ee.WriteArrayStart(newlen) + if !elemsep { + for j := 0; j < newlen; j++ { + e.encodeValue(fkvs[j].r, nil, true) + } + } else { + for j := 0; j < newlen; j++ { + ee.WriteArrayElem() + e.encodeValue(fkvs[j].r, nil, true) + } + } + ee.WriteArrayEnd() + } + + // do not use defer. Instead, use explicit pool return at end of function. + // defer has a cost we are trying to avoid. + // If there is a panic and these slices are not returned, it is ok. + if spool != nil { + spool.Put(poolv) + } +} + +func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) { + ee := e.e + if rv.IsNil() { + ee.EncodeNil() + return + } + + l := rv.Len() + ee.WriteMapStart(l) + elemsep := e.hh.hasElemSeparators() + if l == 0 { + ee.WriteMapEnd() + return + } + var asSymbols bool + // determine the underlying key and val encFn's for the map. + // This eliminates some work which is done for each loop iteration i.e. + // rv.Type(), ref.ValueOf(rt).Pointer(), then check map/list for fn. + // + // However, if kind is reflect.Interface, do not pre-determine the + // encoding type, because preEncodeValue may break it down to + // a concrete type and kInterface will bomb. + var keyFn, valFn *codecFn + ti := f.ti + rtkey0 := ti.rt.Key() + rtkey := rtkey0 + rtval0 := ti.rt.Elem() + rtval := rtval0 + rtkeyid := rt2id(rtkey0) + rtvalid := rt2id(rtval0) + for rtval.Kind() == reflect.Ptr { + rtval = rtval.Elem() + } + if rtval.Kind() != reflect.Interface { + valFn = e.cf.get(rtval, true, true) + } + mks := rv.MapKeys() + + if e.h.Canonical { + e.kMapCanonical(rtkey, rv, mks, valFn, asSymbols) + ee.WriteMapEnd() + return + } + + var recognizedKtyp, recognizedVtyp bool + var keyTypeIsString = rtkeyid == stringTypId + if keyTypeIsString { + asSymbols = e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + } else { + if useLookupRecognizedTypes { + if recognizedKtyp = isRecognizedRtidOrPtr(rtkeyid); recognizedKtyp { + goto LABEL1 + } + } + for rtkey.Kind() == reflect.Ptr { + rtkey = rtkey.Elem() + } + if rtkey.Kind() != reflect.Interface { + rtkeyid = rt2id(rtkey) + keyFn = e.cf.get(rtkey, true, true) + } + } + + // for j, lmks := 0, len(mks); j < lmks; j++ { +LABEL1: + recognizedVtyp = useLookupRecognizedTypes && isRecognizedRtidOrPtr(rtvalid) + for j := range mks { + if elemsep { + ee.WriteMapElemKey() + } + if keyTypeIsString { + if asSymbols { + ee.EncodeSymbol(mks[j].String()) + } else { + ee.EncodeString(c_UTF8, mks[j].String()) + } + } else if useLookupRecognizedTypes && recognizedKtyp { + e.encode(rv2i(mks[j])) + } else { + e.encodeValue(mks[j], keyFn, true) + } + if elemsep { + ee.WriteMapElemValue() + } + if useLookupRecognizedTypes && recognizedVtyp { + e.encode(rv2i(rv.MapIndex(mks[j]))) + } else { + e.encodeValue(rv.MapIndex(mks[j]), valFn, true) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) kMapCanonical(rtkey reflect.Type, rv reflect.Value, mks []reflect.Value, valFn *codecFn, asSymbols bool) { + ee := e.e + elemsep := e.hh.hasElemSeparators() + // we previously did out-of-band if an extension was registered. + // This is not necessary, as the natural kind is sufficient for ordering. + + // WHAT IS THIS? rtkeyid can never be a []uint8, per spec + // if rtkeyid == uint8SliceTypId { + // mksv := make([]bytesRv, len(mks)) + // for i, k := range mks { + // v := &mksv[i] + // v.r = k + // v.v = k.Bytes() + // } + // sort.Sort(bytesRvSlice(mksv)) + // for i := range mksv { + // if elemsep { + // ee.WriteMapElemKey() + // } + // ee.EncodeStringBytes(c_RAW, mksv[i].v) + // if elemsep { + // ee.WriteMapElemValue() + // } + // e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + // } + // return + // } + + switch rtkey.Kind() { + case reflect.Bool: + mksv := make([]boolRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Bool() + } + sort.Sort(boolRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeBool(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.String: + mksv := make([]stringRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.String() + } + sort.Sort(stringRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(mksv[i].v) + } else { + ee.EncodeString(c_UTF8, mksv[i].v) + } + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: + mksv := make([]uintRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Uint() + } + sort.Sort(uintRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeUint(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + mksv := make([]intRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Int() + } + sort.Sort(intRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeInt(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Float32: + mksv := make([]floatRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + sort.Sort(floatRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(mksv[i].v)) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Float64: + mksv := make([]floatRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + sort.Sort(floatRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + default: + // out-of-band + // first encode each key to a []byte first, then sort them, then record + var mksv []byte = make([]byte, 0, len(mks)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + mksbv := make([]bytesRv, len(mks)) + for i, k := range mks { + v := &mksbv[i] + l := len(mksv) + e2.MustEncode(k) + v.r = k + v.v = mksv[l:] + } + sort.Sort(bytesRvSlice(mksbv)) + for j := range mksbv { + if elemsep { + ee.WriteMapElemKey() + } + e.asis(mksbv[j].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksbv[j].r), valFn, true) + } + } +} + +// // -------------------------------------------------- + +// An Encoder writes an object to an output stream in the codec format. +type Encoder struct { + // hopefully, reduce derefencing cost by laying the encWriter inside the Encoder + e encDriver + // NOTE: Encoder shouldn't call it's write methods, + // as the handler MAY need to do some coordination. + w encWriter + + hh Handle + h *BasicHandle + + // ---- cpu cache line boundary? + + wi ioEncWriter + wb bytesEncWriter + bw bufio.Writer + + // cr containerStateRecv + as encDriverAsis + // ---- cpu cache line boundary? + + ci set + err error + + b [scratchByteArrayLen]byte + cf codecFner +} + +// NewEncoder returns an Encoder for encoding into an io.Writer. +// +// For efficiency, Users are encouraged to pass in a memory buffered writer +// (eg bufio.Writer, bytes.Buffer). +func NewEncoder(w io.Writer, h Handle) *Encoder { + e := newEncoder(h) + e.Reset(w) + return e +} + +// NewEncoderBytes returns an encoder for encoding directly and efficiently +// into a byte slice, using zero-copying to temporary slices. +// +// It will potentially replace the output byte slice pointed to. +// After encoding, the out parameter contains the encoded contents. +func NewEncoderBytes(out *[]byte, h Handle) *Encoder { + e := newEncoder(h) + e.ResetBytes(out) + return e +} + +func newEncoder(h Handle) *Encoder { + e := &Encoder{hh: h, h: h.getBasicHandle()} + e.e = h.newEncDriver(e) + e.as, _ = e.e.(encDriverAsis) + // e.cr, _ = e.e.(containerStateRecv) + return e +} + +// Reset the Encoder with a new output stream. +// +// This accommodates using the state of the Encoder, +// where it has "cached" information about sub-engines. +func (e *Encoder) Reset(w io.Writer) { + var ok bool + e.wi.w = w + if e.h.WriterBufferSize > 0 { + bw := bufio.NewWriterSize(w, e.h.WriterBufferSize) + e.bw = *bw + e.wi.bw = &e.bw + e.wi.sw = &e.bw + e.wi.fw = &e.bw + e.wi.ww = &e.bw + } else { + if e.wi.bw, ok = w.(io.ByteWriter); !ok { + e.wi.bw = &e.wi + } + if e.wi.sw, ok = w.(ioEncStringWriter); !ok { + e.wi.sw = &e.wi + } + e.wi.fw, _ = w.(ioEncFlusher) + e.wi.ww = w + } + e.w = &e.wi + e.e.reset() + e.cf.reset(e.hh) + e.err = nil +} + +func (e *Encoder) ResetBytes(out *[]byte) { + in := *out + if in == nil { + in = make([]byte, defEncByteBufSize) + } + e.wb.b, e.wb.out, e.wb.c = in, out, 0 + e.w = &e.wb + e.e.reset() + e.cf.reset(e.hh) + e.err = nil +} + +// Encode writes an object into a stream. +// +// Encoding can be configured via the struct tag for the fields. +// The "codec" key in struct field's tag value is the key name, +// followed by an optional comma and options. +// Note that the "json" key is used in the absence of the "codec" key. +// +// To set an option on all fields (e.g. omitempty on all fields), you +// can create a field called _struct, and set flags on it. +// +// Struct values "usually" encode as maps. Each exported struct field is encoded unless: +// - the field's tag is "-", OR +// - the field is empty (empty or the zero value) and its tag specifies the "omitempty" option. +// +// When encoding as a map, the first string in the tag (before the comma) +// is the map key string to use when encoding. +// +// However, struct values may encode as arrays. This happens when: +// - StructToArray Encode option is set, OR +// - the tag on the _struct field sets the "toarray" option +// Note that omitempty is ignored when encoding struct values as arrays, +// as an entry must be encoded for each field, to maintain its position. +// +// Values with types that implement MapBySlice are encoded as stream maps. +// +// The empty values (for omitempty option) are false, 0, any nil pointer +// or interface value, and any array, slice, map, or string of length zero. +// +// Anonymous fields are encoded inline except: +// - the struct tag specifies a replacement name (first value) +// - the field is of an interface type +// +// Examples: +// +// // NOTE: 'json:' can be used as struct tag key, in place 'codec:' below. +// type MyStruct struct { +// _struct bool `codec:",omitempty"` //set omitempty for every field +// Field1 string `codec:"-"` //skip this field +// Field2 int `codec:"myName"` //Use key "myName" in encode stream +// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty. +// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty. +// io.Reader //use key "Reader". +// MyStruct `codec:"my1" //use key "my1". +// MyStruct //inline it +// ... +// } +// +// type MyStruct struct { +// _struct bool `codec:",toarray"` //encode struct as an array +// } +// +// The mode of encoding is based on the type of the value. When a value is seen: +// - If a Selfer, call its CodecEncodeSelf method +// - If an extension is registered for it, call that extension function +// - If it implements encoding.(Binary|Text|JSON)Marshaler, call its Marshal(Binary|Text|JSON) method +// - Else encode it based on its reflect.Kind +// +// Note that struct field names and keys in map[string]XXX will be treated as symbols. +// Some formats support symbols (e.g. binc) and will properly encode the string +// only once in the stream, and use a tag to refer to it thereafter. +func (e *Encoder) Encode(v interface{}) (err error) { + defer panicToErrs2(&e.err, &err) + e.MustEncode(v) + return +} + +// MustEncode is like Encode, but panics if unable to Encode. +// This provides insight to the code location that triggered the error. +func (e *Encoder) MustEncode(v interface{}) { + if e.err != nil { + panic(e.err) + } + e.encode(v) + e.e.atEndOfEncode() + e.w.atEndOfEncode() +} + +func (e *Encoder) encode(iv interface{}) { + if iv == nil || definitelyNil(iv) { + e.e.EncodeNil() + return + } + if v, ok := iv.(Selfer); ok { + v.CodecEncodeSelf(e) + return + } + + switch v := iv.(type) { + // case nil: + // e.e.EncodeNil() + // case Selfer: + // v.CodecEncodeSelf(e) + case Raw: + e.rawBytes(v) + case reflect.Value: + e.encodeValue(v, nil, true) + + case string: + e.e.EncodeString(c_UTF8, v) + case bool: + e.e.EncodeBool(v) + case int: + e.e.EncodeInt(int64(v)) + case int8: + e.e.EncodeInt(int64(v)) + case int16: + e.e.EncodeInt(int64(v)) + case int32: + e.e.EncodeInt(int64(v)) + case int64: + e.e.EncodeInt(v) + case uint: + e.e.EncodeUint(uint64(v)) + case uint8: + e.e.EncodeUint(uint64(v)) + case uint16: + e.e.EncodeUint(uint64(v)) + case uint32: + e.e.EncodeUint(uint64(v)) + case uint64: + e.e.EncodeUint(v) + case uintptr: + e.e.EncodeUint(uint64(v)) + case float32: + e.e.EncodeFloat32(v) + case float64: + e.e.EncodeFloat64(v) + + case []uint8: + e.e.EncodeStringBytes(c_RAW, v) + + case *string: + e.e.EncodeString(c_UTF8, *v) + case *bool: + e.e.EncodeBool(*v) + case *int: + e.e.EncodeInt(int64(*v)) + case *int8: + e.e.EncodeInt(int64(*v)) + case *int16: + e.e.EncodeInt(int64(*v)) + case *int32: + e.e.EncodeInt(int64(*v)) + case *int64: + e.e.EncodeInt(*v) + case *uint: + e.e.EncodeUint(uint64(*v)) + case *uint8: + e.e.EncodeUint(uint64(*v)) + case *uint16: + e.e.EncodeUint(uint64(*v)) + case *uint32: + e.e.EncodeUint(uint64(*v)) + case *uint64: + e.e.EncodeUint(*v) + case *uintptr: + e.e.EncodeUint(uint64(*v)) + case *float32: + e.e.EncodeFloat32(*v) + case *float64: + e.e.EncodeFloat64(*v) + + case *[]uint8: + e.e.EncodeStringBytes(c_RAW, *v) + + default: + if !fastpathEncodeTypeSwitch(iv, e) { + // checkfastpath=true (not false), as underlying slice/map type may be fast-path + e.encodeValue(reflect.ValueOf(iv), nil, true) + } + } +} + +func (e *Encoder) encodeValue(rv reflect.Value, fn *codecFn, checkFastpath bool) { + // if a valid fn is passed, it MUST BE for the dereferenced type of rv + var sptr uintptr +TOP: + switch rv.Kind() { + case reflect.Ptr: + if rv.IsNil() { + e.e.EncodeNil() + return + } + rv = rv.Elem() + if e.h.CheckCircularRef && rv.Kind() == reflect.Struct { + // TODO: Movable pointers will be an issue here. Future problem. + sptr = rv.UnsafeAddr() + break TOP + } + goto TOP + case reflect.Interface: + if rv.IsNil() { + e.e.EncodeNil() + return + } + rv = rv.Elem() + goto TOP + case reflect.Slice, reflect.Map: + if rv.IsNil() { + e.e.EncodeNil() + return + } + case reflect.Invalid, reflect.Func: + e.e.EncodeNil() + return + } + + if sptr != 0 && (&e.ci).add(sptr) { + e.errorf("circular reference found: # %d", sptr) + } + + if fn == nil { + rt := rv.Type() + // TODO: calling isRecognizedRtid here is a major slowdown + if false && useLookupRecognizedTypes && isRecognizedRtidOrPtr(rt2id(rt)) { + e.encode(rv2i(rv)) + return + } + // always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer + fn = e.cf.get(rt, checkFastpath, true) + } + fn.fe(e, &fn.i, rv) + if sptr != 0 { + (&e.ci).remove(sptr) + } +} + +func (e *Encoder) marshal(bs []byte, fnerr error, asis bool, c charEncoding) { + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + e.e.EncodeNil() + } else if asis { + e.asis(bs) + } else { + e.e.EncodeStringBytes(c, bs) + } +} + +func (e *Encoder) asis(v []byte) { + if e.as == nil { + e.w.writeb(v) + } else { + e.as.EncodeAsis(v) + } +} + +func (e *Encoder) rawBytes(vv Raw) { + v := []byte(vv) + if !e.h.Raw { + e.errorf("Raw values cannot be encoded: %v", v) + } + if e.as == nil { + e.w.writeb(v) + } else { + e.as.EncodeAsis(v) + } +} + +func (e *Encoder) errorf(format string, params ...interface{}) { + err := fmt.Errorf(format, params...) + panic(err) +} diff --git a/vendor/github.com/ugorji/go/codec/fast-path.generated.go b/vendor/github.com/ugorji/go/codec/fast-path.generated.go new file mode 100644 index 00000000000..07521543550 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/fast-path.generated.go @@ -0,0 +1,32728 @@ +// +build !notfastpath + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl +// ************************************************************ + +package codec + +// Fast path functions try to create a fast path encode or decode implementation +// for common maps and slices. +// +// We define the functions and register then in this single file +// so as not to pollute the encode.go and decode.go, and create a dependency in there. +// This file can be omitted without causing a build failure. +// +// The advantage of fast paths is: +// - Many calls bypass reflection altogether +// +// Currently support +// - slice of all builtin types, +// - map of all builtin types to string or interface value +// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8) +// This should provide adequate "typical" implementations. +// +// Note that fast track decode functions must handle values for which an address cannot be obtained. +// For example: +// m2 := map[string]int{} +// p2 := []interface{}{m2} +// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. +// + +import ( + "reflect" + "sort" +) + +const fastpathEnabled = true + +type fastpathT struct{} + +var fastpathTV fastpathT + +type fastpathE struct { + rtid uintptr + rt reflect.Type + encfn func(*Encoder, *codecFnInfo, reflect.Value) + decfn func(*Decoder, *codecFnInfo, reflect.Value) +} + +type fastpathA [271]fastpathE + +func (x *fastpathA) index(rtid uintptr) int { + // use binary search to grab the index (adapted from sort/search.go) + h, i, j := 0, 0, 271 // len(x) + for i < j { + h = i + (j-i)/2 + if x[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + } + if i < 271 && x[i].rtid == rtid { + return i + } + return -1 +} + +type fastpathAslice []fastpathE + +func (x fastpathAslice) Len() int { return len(x) } +func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid } +func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +var fastpathAV fastpathA + +// due to possible initialization loop error, make fastpath in an init() +func init() { + i := 0 + fn := func(v interface{}, + fe func(*Encoder, *codecFnInfo, reflect.Value), + fd func(*Decoder, *codecFnInfo, reflect.Value)) (f fastpathE) { + xrt := reflect.TypeOf(v) + xptr := rt2id(xrt) + if useLookupRecognizedTypes { + recognizedRtids = append(recognizedRtids, xptr) + recognizedRtidPtrs = append(recognizedRtidPtrs, rt2id(reflect.PtrTo(xrt))) + } + fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} + i++ + return + } + + fn([]interface{}(nil), (*Encoder).fastpathEncSliceIntfR, (*Decoder).fastpathDecSliceIntfR) + fn([]string(nil), (*Encoder).fastpathEncSliceStringR, (*Decoder).fastpathDecSliceStringR) + fn([]float32(nil), (*Encoder).fastpathEncSliceFloat32R, (*Decoder).fastpathDecSliceFloat32R) + fn([]float64(nil), (*Encoder).fastpathEncSliceFloat64R, (*Decoder).fastpathDecSliceFloat64R) + fn([]uint(nil), (*Encoder).fastpathEncSliceUintR, (*Decoder).fastpathDecSliceUintR) + fn([]uint16(nil), (*Encoder).fastpathEncSliceUint16R, (*Decoder).fastpathDecSliceUint16R) + fn([]uint32(nil), (*Encoder).fastpathEncSliceUint32R, (*Decoder).fastpathDecSliceUint32R) + fn([]uint64(nil), (*Encoder).fastpathEncSliceUint64R, (*Decoder).fastpathDecSliceUint64R) + fn([]uintptr(nil), (*Encoder).fastpathEncSliceUintptrR, (*Decoder).fastpathDecSliceUintptrR) + fn([]int(nil), (*Encoder).fastpathEncSliceIntR, (*Decoder).fastpathDecSliceIntR) + fn([]int8(nil), (*Encoder).fastpathEncSliceInt8R, (*Decoder).fastpathDecSliceInt8R) + fn([]int16(nil), (*Encoder).fastpathEncSliceInt16R, (*Decoder).fastpathDecSliceInt16R) + fn([]int32(nil), (*Encoder).fastpathEncSliceInt32R, (*Decoder).fastpathDecSliceInt32R) + fn([]int64(nil), (*Encoder).fastpathEncSliceInt64R, (*Decoder).fastpathDecSliceInt64R) + fn([]bool(nil), (*Encoder).fastpathEncSliceBoolR, (*Decoder).fastpathDecSliceBoolR) + + fn(map[interface{}]interface{}(nil), (*Encoder).fastpathEncMapIntfIntfR, (*Decoder).fastpathDecMapIntfIntfR) + fn(map[interface{}]string(nil), (*Encoder).fastpathEncMapIntfStringR, (*Decoder).fastpathDecMapIntfStringR) + fn(map[interface{}]uint(nil), (*Encoder).fastpathEncMapIntfUintR, (*Decoder).fastpathDecMapIntfUintR) + fn(map[interface{}]uint8(nil), (*Encoder).fastpathEncMapIntfUint8R, (*Decoder).fastpathDecMapIntfUint8R) + fn(map[interface{}]uint16(nil), (*Encoder).fastpathEncMapIntfUint16R, (*Decoder).fastpathDecMapIntfUint16R) + fn(map[interface{}]uint32(nil), (*Encoder).fastpathEncMapIntfUint32R, (*Decoder).fastpathDecMapIntfUint32R) + fn(map[interface{}]uint64(nil), (*Encoder).fastpathEncMapIntfUint64R, (*Decoder).fastpathDecMapIntfUint64R) + fn(map[interface{}]uintptr(nil), (*Encoder).fastpathEncMapIntfUintptrR, (*Decoder).fastpathDecMapIntfUintptrR) + fn(map[interface{}]int(nil), (*Encoder).fastpathEncMapIntfIntR, (*Decoder).fastpathDecMapIntfIntR) + fn(map[interface{}]int8(nil), (*Encoder).fastpathEncMapIntfInt8R, (*Decoder).fastpathDecMapIntfInt8R) + fn(map[interface{}]int16(nil), (*Encoder).fastpathEncMapIntfInt16R, (*Decoder).fastpathDecMapIntfInt16R) + fn(map[interface{}]int32(nil), (*Encoder).fastpathEncMapIntfInt32R, (*Decoder).fastpathDecMapIntfInt32R) + fn(map[interface{}]int64(nil), (*Encoder).fastpathEncMapIntfInt64R, (*Decoder).fastpathDecMapIntfInt64R) + fn(map[interface{}]float32(nil), (*Encoder).fastpathEncMapIntfFloat32R, (*Decoder).fastpathDecMapIntfFloat32R) + fn(map[interface{}]float64(nil), (*Encoder).fastpathEncMapIntfFloat64R, (*Decoder).fastpathDecMapIntfFloat64R) + fn(map[interface{}]bool(nil), (*Encoder).fastpathEncMapIntfBoolR, (*Decoder).fastpathDecMapIntfBoolR) + fn(map[string]interface{}(nil), (*Encoder).fastpathEncMapStringIntfR, (*Decoder).fastpathDecMapStringIntfR) + fn(map[string]string(nil), (*Encoder).fastpathEncMapStringStringR, (*Decoder).fastpathDecMapStringStringR) + fn(map[string]uint(nil), (*Encoder).fastpathEncMapStringUintR, (*Decoder).fastpathDecMapStringUintR) + fn(map[string]uint8(nil), (*Encoder).fastpathEncMapStringUint8R, (*Decoder).fastpathDecMapStringUint8R) + fn(map[string]uint16(nil), (*Encoder).fastpathEncMapStringUint16R, (*Decoder).fastpathDecMapStringUint16R) + fn(map[string]uint32(nil), (*Encoder).fastpathEncMapStringUint32R, (*Decoder).fastpathDecMapStringUint32R) + fn(map[string]uint64(nil), (*Encoder).fastpathEncMapStringUint64R, (*Decoder).fastpathDecMapStringUint64R) + fn(map[string]uintptr(nil), (*Encoder).fastpathEncMapStringUintptrR, (*Decoder).fastpathDecMapStringUintptrR) + fn(map[string]int(nil), (*Encoder).fastpathEncMapStringIntR, (*Decoder).fastpathDecMapStringIntR) + fn(map[string]int8(nil), (*Encoder).fastpathEncMapStringInt8R, (*Decoder).fastpathDecMapStringInt8R) + fn(map[string]int16(nil), (*Encoder).fastpathEncMapStringInt16R, (*Decoder).fastpathDecMapStringInt16R) + fn(map[string]int32(nil), (*Encoder).fastpathEncMapStringInt32R, (*Decoder).fastpathDecMapStringInt32R) + fn(map[string]int64(nil), (*Encoder).fastpathEncMapStringInt64R, (*Decoder).fastpathDecMapStringInt64R) + fn(map[string]float32(nil), (*Encoder).fastpathEncMapStringFloat32R, (*Decoder).fastpathDecMapStringFloat32R) + fn(map[string]float64(nil), (*Encoder).fastpathEncMapStringFloat64R, (*Decoder).fastpathDecMapStringFloat64R) + fn(map[string]bool(nil), (*Encoder).fastpathEncMapStringBoolR, (*Decoder).fastpathDecMapStringBoolR) + fn(map[float32]interface{}(nil), (*Encoder).fastpathEncMapFloat32IntfR, (*Decoder).fastpathDecMapFloat32IntfR) + fn(map[float32]string(nil), (*Encoder).fastpathEncMapFloat32StringR, (*Decoder).fastpathDecMapFloat32StringR) + fn(map[float32]uint(nil), (*Encoder).fastpathEncMapFloat32UintR, (*Decoder).fastpathDecMapFloat32UintR) + fn(map[float32]uint8(nil), (*Encoder).fastpathEncMapFloat32Uint8R, (*Decoder).fastpathDecMapFloat32Uint8R) + fn(map[float32]uint16(nil), (*Encoder).fastpathEncMapFloat32Uint16R, (*Decoder).fastpathDecMapFloat32Uint16R) + fn(map[float32]uint32(nil), (*Encoder).fastpathEncMapFloat32Uint32R, (*Decoder).fastpathDecMapFloat32Uint32R) + fn(map[float32]uint64(nil), (*Encoder).fastpathEncMapFloat32Uint64R, (*Decoder).fastpathDecMapFloat32Uint64R) + fn(map[float32]uintptr(nil), (*Encoder).fastpathEncMapFloat32UintptrR, (*Decoder).fastpathDecMapFloat32UintptrR) + fn(map[float32]int(nil), (*Encoder).fastpathEncMapFloat32IntR, (*Decoder).fastpathDecMapFloat32IntR) + fn(map[float32]int8(nil), (*Encoder).fastpathEncMapFloat32Int8R, (*Decoder).fastpathDecMapFloat32Int8R) + fn(map[float32]int16(nil), (*Encoder).fastpathEncMapFloat32Int16R, (*Decoder).fastpathDecMapFloat32Int16R) + fn(map[float32]int32(nil), (*Encoder).fastpathEncMapFloat32Int32R, (*Decoder).fastpathDecMapFloat32Int32R) + fn(map[float32]int64(nil), (*Encoder).fastpathEncMapFloat32Int64R, (*Decoder).fastpathDecMapFloat32Int64R) + fn(map[float32]float32(nil), (*Encoder).fastpathEncMapFloat32Float32R, (*Decoder).fastpathDecMapFloat32Float32R) + fn(map[float32]float64(nil), (*Encoder).fastpathEncMapFloat32Float64R, (*Decoder).fastpathDecMapFloat32Float64R) + fn(map[float32]bool(nil), (*Encoder).fastpathEncMapFloat32BoolR, (*Decoder).fastpathDecMapFloat32BoolR) + fn(map[float64]interface{}(nil), (*Encoder).fastpathEncMapFloat64IntfR, (*Decoder).fastpathDecMapFloat64IntfR) + fn(map[float64]string(nil), (*Encoder).fastpathEncMapFloat64StringR, (*Decoder).fastpathDecMapFloat64StringR) + fn(map[float64]uint(nil), (*Encoder).fastpathEncMapFloat64UintR, (*Decoder).fastpathDecMapFloat64UintR) + fn(map[float64]uint8(nil), (*Encoder).fastpathEncMapFloat64Uint8R, (*Decoder).fastpathDecMapFloat64Uint8R) + fn(map[float64]uint16(nil), (*Encoder).fastpathEncMapFloat64Uint16R, (*Decoder).fastpathDecMapFloat64Uint16R) + fn(map[float64]uint32(nil), (*Encoder).fastpathEncMapFloat64Uint32R, (*Decoder).fastpathDecMapFloat64Uint32R) + fn(map[float64]uint64(nil), (*Encoder).fastpathEncMapFloat64Uint64R, (*Decoder).fastpathDecMapFloat64Uint64R) + fn(map[float64]uintptr(nil), (*Encoder).fastpathEncMapFloat64UintptrR, (*Decoder).fastpathDecMapFloat64UintptrR) + fn(map[float64]int(nil), (*Encoder).fastpathEncMapFloat64IntR, (*Decoder).fastpathDecMapFloat64IntR) + fn(map[float64]int8(nil), (*Encoder).fastpathEncMapFloat64Int8R, (*Decoder).fastpathDecMapFloat64Int8R) + fn(map[float64]int16(nil), (*Encoder).fastpathEncMapFloat64Int16R, (*Decoder).fastpathDecMapFloat64Int16R) + fn(map[float64]int32(nil), (*Encoder).fastpathEncMapFloat64Int32R, (*Decoder).fastpathDecMapFloat64Int32R) + fn(map[float64]int64(nil), (*Encoder).fastpathEncMapFloat64Int64R, (*Decoder).fastpathDecMapFloat64Int64R) + fn(map[float64]float32(nil), (*Encoder).fastpathEncMapFloat64Float32R, (*Decoder).fastpathDecMapFloat64Float32R) + fn(map[float64]float64(nil), (*Encoder).fastpathEncMapFloat64Float64R, (*Decoder).fastpathDecMapFloat64Float64R) + fn(map[float64]bool(nil), (*Encoder).fastpathEncMapFloat64BoolR, (*Decoder).fastpathDecMapFloat64BoolR) + fn(map[uint]interface{}(nil), (*Encoder).fastpathEncMapUintIntfR, (*Decoder).fastpathDecMapUintIntfR) + fn(map[uint]string(nil), (*Encoder).fastpathEncMapUintStringR, (*Decoder).fastpathDecMapUintStringR) + fn(map[uint]uint(nil), (*Encoder).fastpathEncMapUintUintR, (*Decoder).fastpathDecMapUintUintR) + fn(map[uint]uint8(nil), (*Encoder).fastpathEncMapUintUint8R, (*Decoder).fastpathDecMapUintUint8R) + fn(map[uint]uint16(nil), (*Encoder).fastpathEncMapUintUint16R, (*Decoder).fastpathDecMapUintUint16R) + fn(map[uint]uint32(nil), (*Encoder).fastpathEncMapUintUint32R, (*Decoder).fastpathDecMapUintUint32R) + fn(map[uint]uint64(nil), (*Encoder).fastpathEncMapUintUint64R, (*Decoder).fastpathDecMapUintUint64R) + fn(map[uint]uintptr(nil), (*Encoder).fastpathEncMapUintUintptrR, (*Decoder).fastpathDecMapUintUintptrR) + fn(map[uint]int(nil), (*Encoder).fastpathEncMapUintIntR, (*Decoder).fastpathDecMapUintIntR) + fn(map[uint]int8(nil), (*Encoder).fastpathEncMapUintInt8R, (*Decoder).fastpathDecMapUintInt8R) + fn(map[uint]int16(nil), (*Encoder).fastpathEncMapUintInt16R, (*Decoder).fastpathDecMapUintInt16R) + fn(map[uint]int32(nil), (*Encoder).fastpathEncMapUintInt32R, (*Decoder).fastpathDecMapUintInt32R) + fn(map[uint]int64(nil), (*Encoder).fastpathEncMapUintInt64R, (*Decoder).fastpathDecMapUintInt64R) + fn(map[uint]float32(nil), (*Encoder).fastpathEncMapUintFloat32R, (*Decoder).fastpathDecMapUintFloat32R) + fn(map[uint]float64(nil), (*Encoder).fastpathEncMapUintFloat64R, (*Decoder).fastpathDecMapUintFloat64R) + fn(map[uint]bool(nil), (*Encoder).fastpathEncMapUintBoolR, (*Decoder).fastpathDecMapUintBoolR) + fn(map[uint8]interface{}(nil), (*Encoder).fastpathEncMapUint8IntfR, (*Decoder).fastpathDecMapUint8IntfR) + fn(map[uint8]string(nil), (*Encoder).fastpathEncMapUint8StringR, (*Decoder).fastpathDecMapUint8StringR) + fn(map[uint8]uint(nil), (*Encoder).fastpathEncMapUint8UintR, (*Decoder).fastpathDecMapUint8UintR) + fn(map[uint8]uint8(nil), (*Encoder).fastpathEncMapUint8Uint8R, (*Decoder).fastpathDecMapUint8Uint8R) + fn(map[uint8]uint16(nil), (*Encoder).fastpathEncMapUint8Uint16R, (*Decoder).fastpathDecMapUint8Uint16R) + fn(map[uint8]uint32(nil), (*Encoder).fastpathEncMapUint8Uint32R, (*Decoder).fastpathDecMapUint8Uint32R) + fn(map[uint8]uint64(nil), (*Encoder).fastpathEncMapUint8Uint64R, (*Decoder).fastpathDecMapUint8Uint64R) + fn(map[uint8]uintptr(nil), (*Encoder).fastpathEncMapUint8UintptrR, (*Decoder).fastpathDecMapUint8UintptrR) + fn(map[uint8]int(nil), (*Encoder).fastpathEncMapUint8IntR, (*Decoder).fastpathDecMapUint8IntR) + fn(map[uint8]int8(nil), (*Encoder).fastpathEncMapUint8Int8R, (*Decoder).fastpathDecMapUint8Int8R) + fn(map[uint8]int16(nil), (*Encoder).fastpathEncMapUint8Int16R, (*Decoder).fastpathDecMapUint8Int16R) + fn(map[uint8]int32(nil), (*Encoder).fastpathEncMapUint8Int32R, (*Decoder).fastpathDecMapUint8Int32R) + fn(map[uint8]int64(nil), (*Encoder).fastpathEncMapUint8Int64R, (*Decoder).fastpathDecMapUint8Int64R) + fn(map[uint8]float32(nil), (*Encoder).fastpathEncMapUint8Float32R, (*Decoder).fastpathDecMapUint8Float32R) + fn(map[uint8]float64(nil), (*Encoder).fastpathEncMapUint8Float64R, (*Decoder).fastpathDecMapUint8Float64R) + fn(map[uint8]bool(nil), (*Encoder).fastpathEncMapUint8BoolR, (*Decoder).fastpathDecMapUint8BoolR) + fn(map[uint16]interface{}(nil), (*Encoder).fastpathEncMapUint16IntfR, (*Decoder).fastpathDecMapUint16IntfR) + fn(map[uint16]string(nil), (*Encoder).fastpathEncMapUint16StringR, (*Decoder).fastpathDecMapUint16StringR) + fn(map[uint16]uint(nil), (*Encoder).fastpathEncMapUint16UintR, (*Decoder).fastpathDecMapUint16UintR) + fn(map[uint16]uint8(nil), (*Encoder).fastpathEncMapUint16Uint8R, (*Decoder).fastpathDecMapUint16Uint8R) + fn(map[uint16]uint16(nil), (*Encoder).fastpathEncMapUint16Uint16R, (*Decoder).fastpathDecMapUint16Uint16R) + fn(map[uint16]uint32(nil), (*Encoder).fastpathEncMapUint16Uint32R, (*Decoder).fastpathDecMapUint16Uint32R) + fn(map[uint16]uint64(nil), (*Encoder).fastpathEncMapUint16Uint64R, (*Decoder).fastpathDecMapUint16Uint64R) + fn(map[uint16]uintptr(nil), (*Encoder).fastpathEncMapUint16UintptrR, (*Decoder).fastpathDecMapUint16UintptrR) + fn(map[uint16]int(nil), (*Encoder).fastpathEncMapUint16IntR, (*Decoder).fastpathDecMapUint16IntR) + fn(map[uint16]int8(nil), (*Encoder).fastpathEncMapUint16Int8R, (*Decoder).fastpathDecMapUint16Int8R) + fn(map[uint16]int16(nil), (*Encoder).fastpathEncMapUint16Int16R, (*Decoder).fastpathDecMapUint16Int16R) + fn(map[uint16]int32(nil), (*Encoder).fastpathEncMapUint16Int32R, (*Decoder).fastpathDecMapUint16Int32R) + fn(map[uint16]int64(nil), (*Encoder).fastpathEncMapUint16Int64R, (*Decoder).fastpathDecMapUint16Int64R) + fn(map[uint16]float32(nil), (*Encoder).fastpathEncMapUint16Float32R, (*Decoder).fastpathDecMapUint16Float32R) + fn(map[uint16]float64(nil), (*Encoder).fastpathEncMapUint16Float64R, (*Decoder).fastpathDecMapUint16Float64R) + fn(map[uint16]bool(nil), (*Encoder).fastpathEncMapUint16BoolR, (*Decoder).fastpathDecMapUint16BoolR) + fn(map[uint32]interface{}(nil), (*Encoder).fastpathEncMapUint32IntfR, (*Decoder).fastpathDecMapUint32IntfR) + fn(map[uint32]string(nil), (*Encoder).fastpathEncMapUint32StringR, (*Decoder).fastpathDecMapUint32StringR) + fn(map[uint32]uint(nil), (*Encoder).fastpathEncMapUint32UintR, (*Decoder).fastpathDecMapUint32UintR) + fn(map[uint32]uint8(nil), (*Encoder).fastpathEncMapUint32Uint8R, (*Decoder).fastpathDecMapUint32Uint8R) + fn(map[uint32]uint16(nil), (*Encoder).fastpathEncMapUint32Uint16R, (*Decoder).fastpathDecMapUint32Uint16R) + fn(map[uint32]uint32(nil), (*Encoder).fastpathEncMapUint32Uint32R, (*Decoder).fastpathDecMapUint32Uint32R) + fn(map[uint32]uint64(nil), (*Encoder).fastpathEncMapUint32Uint64R, (*Decoder).fastpathDecMapUint32Uint64R) + fn(map[uint32]uintptr(nil), (*Encoder).fastpathEncMapUint32UintptrR, (*Decoder).fastpathDecMapUint32UintptrR) + fn(map[uint32]int(nil), (*Encoder).fastpathEncMapUint32IntR, (*Decoder).fastpathDecMapUint32IntR) + fn(map[uint32]int8(nil), (*Encoder).fastpathEncMapUint32Int8R, (*Decoder).fastpathDecMapUint32Int8R) + fn(map[uint32]int16(nil), (*Encoder).fastpathEncMapUint32Int16R, (*Decoder).fastpathDecMapUint32Int16R) + fn(map[uint32]int32(nil), (*Encoder).fastpathEncMapUint32Int32R, (*Decoder).fastpathDecMapUint32Int32R) + fn(map[uint32]int64(nil), (*Encoder).fastpathEncMapUint32Int64R, (*Decoder).fastpathDecMapUint32Int64R) + fn(map[uint32]float32(nil), (*Encoder).fastpathEncMapUint32Float32R, (*Decoder).fastpathDecMapUint32Float32R) + fn(map[uint32]float64(nil), (*Encoder).fastpathEncMapUint32Float64R, (*Decoder).fastpathDecMapUint32Float64R) + fn(map[uint32]bool(nil), (*Encoder).fastpathEncMapUint32BoolR, (*Decoder).fastpathDecMapUint32BoolR) + fn(map[uint64]interface{}(nil), (*Encoder).fastpathEncMapUint64IntfR, (*Decoder).fastpathDecMapUint64IntfR) + fn(map[uint64]string(nil), (*Encoder).fastpathEncMapUint64StringR, (*Decoder).fastpathDecMapUint64StringR) + fn(map[uint64]uint(nil), (*Encoder).fastpathEncMapUint64UintR, (*Decoder).fastpathDecMapUint64UintR) + fn(map[uint64]uint8(nil), (*Encoder).fastpathEncMapUint64Uint8R, (*Decoder).fastpathDecMapUint64Uint8R) + fn(map[uint64]uint16(nil), (*Encoder).fastpathEncMapUint64Uint16R, (*Decoder).fastpathDecMapUint64Uint16R) + fn(map[uint64]uint32(nil), (*Encoder).fastpathEncMapUint64Uint32R, (*Decoder).fastpathDecMapUint64Uint32R) + fn(map[uint64]uint64(nil), (*Encoder).fastpathEncMapUint64Uint64R, (*Decoder).fastpathDecMapUint64Uint64R) + fn(map[uint64]uintptr(nil), (*Encoder).fastpathEncMapUint64UintptrR, (*Decoder).fastpathDecMapUint64UintptrR) + fn(map[uint64]int(nil), (*Encoder).fastpathEncMapUint64IntR, (*Decoder).fastpathDecMapUint64IntR) + fn(map[uint64]int8(nil), (*Encoder).fastpathEncMapUint64Int8R, (*Decoder).fastpathDecMapUint64Int8R) + fn(map[uint64]int16(nil), (*Encoder).fastpathEncMapUint64Int16R, (*Decoder).fastpathDecMapUint64Int16R) + fn(map[uint64]int32(nil), (*Encoder).fastpathEncMapUint64Int32R, (*Decoder).fastpathDecMapUint64Int32R) + fn(map[uint64]int64(nil), (*Encoder).fastpathEncMapUint64Int64R, (*Decoder).fastpathDecMapUint64Int64R) + fn(map[uint64]float32(nil), (*Encoder).fastpathEncMapUint64Float32R, (*Decoder).fastpathDecMapUint64Float32R) + fn(map[uint64]float64(nil), (*Encoder).fastpathEncMapUint64Float64R, (*Decoder).fastpathDecMapUint64Float64R) + fn(map[uint64]bool(nil), (*Encoder).fastpathEncMapUint64BoolR, (*Decoder).fastpathDecMapUint64BoolR) + fn(map[uintptr]interface{}(nil), (*Encoder).fastpathEncMapUintptrIntfR, (*Decoder).fastpathDecMapUintptrIntfR) + fn(map[uintptr]string(nil), (*Encoder).fastpathEncMapUintptrStringR, (*Decoder).fastpathDecMapUintptrStringR) + fn(map[uintptr]uint(nil), (*Encoder).fastpathEncMapUintptrUintR, (*Decoder).fastpathDecMapUintptrUintR) + fn(map[uintptr]uint8(nil), (*Encoder).fastpathEncMapUintptrUint8R, (*Decoder).fastpathDecMapUintptrUint8R) + fn(map[uintptr]uint16(nil), (*Encoder).fastpathEncMapUintptrUint16R, (*Decoder).fastpathDecMapUintptrUint16R) + fn(map[uintptr]uint32(nil), (*Encoder).fastpathEncMapUintptrUint32R, (*Decoder).fastpathDecMapUintptrUint32R) + fn(map[uintptr]uint64(nil), (*Encoder).fastpathEncMapUintptrUint64R, (*Decoder).fastpathDecMapUintptrUint64R) + fn(map[uintptr]uintptr(nil), (*Encoder).fastpathEncMapUintptrUintptrR, (*Decoder).fastpathDecMapUintptrUintptrR) + fn(map[uintptr]int(nil), (*Encoder).fastpathEncMapUintptrIntR, (*Decoder).fastpathDecMapUintptrIntR) + fn(map[uintptr]int8(nil), (*Encoder).fastpathEncMapUintptrInt8R, (*Decoder).fastpathDecMapUintptrInt8R) + fn(map[uintptr]int16(nil), (*Encoder).fastpathEncMapUintptrInt16R, (*Decoder).fastpathDecMapUintptrInt16R) + fn(map[uintptr]int32(nil), (*Encoder).fastpathEncMapUintptrInt32R, (*Decoder).fastpathDecMapUintptrInt32R) + fn(map[uintptr]int64(nil), (*Encoder).fastpathEncMapUintptrInt64R, (*Decoder).fastpathDecMapUintptrInt64R) + fn(map[uintptr]float32(nil), (*Encoder).fastpathEncMapUintptrFloat32R, (*Decoder).fastpathDecMapUintptrFloat32R) + fn(map[uintptr]float64(nil), (*Encoder).fastpathEncMapUintptrFloat64R, (*Decoder).fastpathDecMapUintptrFloat64R) + fn(map[uintptr]bool(nil), (*Encoder).fastpathEncMapUintptrBoolR, (*Decoder).fastpathDecMapUintptrBoolR) + fn(map[int]interface{}(nil), (*Encoder).fastpathEncMapIntIntfR, (*Decoder).fastpathDecMapIntIntfR) + fn(map[int]string(nil), (*Encoder).fastpathEncMapIntStringR, (*Decoder).fastpathDecMapIntStringR) + fn(map[int]uint(nil), (*Encoder).fastpathEncMapIntUintR, (*Decoder).fastpathDecMapIntUintR) + fn(map[int]uint8(nil), (*Encoder).fastpathEncMapIntUint8R, (*Decoder).fastpathDecMapIntUint8R) + fn(map[int]uint16(nil), (*Encoder).fastpathEncMapIntUint16R, (*Decoder).fastpathDecMapIntUint16R) + fn(map[int]uint32(nil), (*Encoder).fastpathEncMapIntUint32R, (*Decoder).fastpathDecMapIntUint32R) + fn(map[int]uint64(nil), (*Encoder).fastpathEncMapIntUint64R, (*Decoder).fastpathDecMapIntUint64R) + fn(map[int]uintptr(nil), (*Encoder).fastpathEncMapIntUintptrR, (*Decoder).fastpathDecMapIntUintptrR) + fn(map[int]int(nil), (*Encoder).fastpathEncMapIntIntR, (*Decoder).fastpathDecMapIntIntR) + fn(map[int]int8(nil), (*Encoder).fastpathEncMapIntInt8R, (*Decoder).fastpathDecMapIntInt8R) + fn(map[int]int16(nil), (*Encoder).fastpathEncMapIntInt16R, (*Decoder).fastpathDecMapIntInt16R) + fn(map[int]int32(nil), (*Encoder).fastpathEncMapIntInt32R, (*Decoder).fastpathDecMapIntInt32R) + fn(map[int]int64(nil), (*Encoder).fastpathEncMapIntInt64R, (*Decoder).fastpathDecMapIntInt64R) + fn(map[int]float32(nil), (*Encoder).fastpathEncMapIntFloat32R, (*Decoder).fastpathDecMapIntFloat32R) + fn(map[int]float64(nil), (*Encoder).fastpathEncMapIntFloat64R, (*Decoder).fastpathDecMapIntFloat64R) + fn(map[int]bool(nil), (*Encoder).fastpathEncMapIntBoolR, (*Decoder).fastpathDecMapIntBoolR) + fn(map[int8]interface{}(nil), (*Encoder).fastpathEncMapInt8IntfR, (*Decoder).fastpathDecMapInt8IntfR) + fn(map[int8]string(nil), (*Encoder).fastpathEncMapInt8StringR, (*Decoder).fastpathDecMapInt8StringR) + fn(map[int8]uint(nil), (*Encoder).fastpathEncMapInt8UintR, (*Decoder).fastpathDecMapInt8UintR) + fn(map[int8]uint8(nil), (*Encoder).fastpathEncMapInt8Uint8R, (*Decoder).fastpathDecMapInt8Uint8R) + fn(map[int8]uint16(nil), (*Encoder).fastpathEncMapInt8Uint16R, (*Decoder).fastpathDecMapInt8Uint16R) + fn(map[int8]uint32(nil), (*Encoder).fastpathEncMapInt8Uint32R, (*Decoder).fastpathDecMapInt8Uint32R) + fn(map[int8]uint64(nil), (*Encoder).fastpathEncMapInt8Uint64R, (*Decoder).fastpathDecMapInt8Uint64R) + fn(map[int8]uintptr(nil), (*Encoder).fastpathEncMapInt8UintptrR, (*Decoder).fastpathDecMapInt8UintptrR) + fn(map[int8]int(nil), (*Encoder).fastpathEncMapInt8IntR, (*Decoder).fastpathDecMapInt8IntR) + fn(map[int8]int8(nil), (*Encoder).fastpathEncMapInt8Int8R, (*Decoder).fastpathDecMapInt8Int8R) + fn(map[int8]int16(nil), (*Encoder).fastpathEncMapInt8Int16R, (*Decoder).fastpathDecMapInt8Int16R) + fn(map[int8]int32(nil), (*Encoder).fastpathEncMapInt8Int32R, (*Decoder).fastpathDecMapInt8Int32R) + fn(map[int8]int64(nil), (*Encoder).fastpathEncMapInt8Int64R, (*Decoder).fastpathDecMapInt8Int64R) + fn(map[int8]float32(nil), (*Encoder).fastpathEncMapInt8Float32R, (*Decoder).fastpathDecMapInt8Float32R) + fn(map[int8]float64(nil), (*Encoder).fastpathEncMapInt8Float64R, (*Decoder).fastpathDecMapInt8Float64R) + fn(map[int8]bool(nil), (*Encoder).fastpathEncMapInt8BoolR, (*Decoder).fastpathDecMapInt8BoolR) + fn(map[int16]interface{}(nil), (*Encoder).fastpathEncMapInt16IntfR, (*Decoder).fastpathDecMapInt16IntfR) + fn(map[int16]string(nil), (*Encoder).fastpathEncMapInt16StringR, (*Decoder).fastpathDecMapInt16StringR) + fn(map[int16]uint(nil), (*Encoder).fastpathEncMapInt16UintR, (*Decoder).fastpathDecMapInt16UintR) + fn(map[int16]uint8(nil), (*Encoder).fastpathEncMapInt16Uint8R, (*Decoder).fastpathDecMapInt16Uint8R) + fn(map[int16]uint16(nil), (*Encoder).fastpathEncMapInt16Uint16R, (*Decoder).fastpathDecMapInt16Uint16R) + fn(map[int16]uint32(nil), (*Encoder).fastpathEncMapInt16Uint32R, (*Decoder).fastpathDecMapInt16Uint32R) + fn(map[int16]uint64(nil), (*Encoder).fastpathEncMapInt16Uint64R, (*Decoder).fastpathDecMapInt16Uint64R) + fn(map[int16]uintptr(nil), (*Encoder).fastpathEncMapInt16UintptrR, (*Decoder).fastpathDecMapInt16UintptrR) + fn(map[int16]int(nil), (*Encoder).fastpathEncMapInt16IntR, (*Decoder).fastpathDecMapInt16IntR) + fn(map[int16]int8(nil), (*Encoder).fastpathEncMapInt16Int8R, (*Decoder).fastpathDecMapInt16Int8R) + fn(map[int16]int16(nil), (*Encoder).fastpathEncMapInt16Int16R, (*Decoder).fastpathDecMapInt16Int16R) + fn(map[int16]int32(nil), (*Encoder).fastpathEncMapInt16Int32R, (*Decoder).fastpathDecMapInt16Int32R) + fn(map[int16]int64(nil), (*Encoder).fastpathEncMapInt16Int64R, (*Decoder).fastpathDecMapInt16Int64R) + fn(map[int16]float32(nil), (*Encoder).fastpathEncMapInt16Float32R, (*Decoder).fastpathDecMapInt16Float32R) + fn(map[int16]float64(nil), (*Encoder).fastpathEncMapInt16Float64R, (*Decoder).fastpathDecMapInt16Float64R) + fn(map[int16]bool(nil), (*Encoder).fastpathEncMapInt16BoolR, (*Decoder).fastpathDecMapInt16BoolR) + fn(map[int32]interface{}(nil), (*Encoder).fastpathEncMapInt32IntfR, (*Decoder).fastpathDecMapInt32IntfR) + fn(map[int32]string(nil), (*Encoder).fastpathEncMapInt32StringR, (*Decoder).fastpathDecMapInt32StringR) + fn(map[int32]uint(nil), (*Encoder).fastpathEncMapInt32UintR, (*Decoder).fastpathDecMapInt32UintR) + fn(map[int32]uint8(nil), (*Encoder).fastpathEncMapInt32Uint8R, (*Decoder).fastpathDecMapInt32Uint8R) + fn(map[int32]uint16(nil), (*Encoder).fastpathEncMapInt32Uint16R, (*Decoder).fastpathDecMapInt32Uint16R) + fn(map[int32]uint32(nil), (*Encoder).fastpathEncMapInt32Uint32R, (*Decoder).fastpathDecMapInt32Uint32R) + fn(map[int32]uint64(nil), (*Encoder).fastpathEncMapInt32Uint64R, (*Decoder).fastpathDecMapInt32Uint64R) + fn(map[int32]uintptr(nil), (*Encoder).fastpathEncMapInt32UintptrR, (*Decoder).fastpathDecMapInt32UintptrR) + fn(map[int32]int(nil), (*Encoder).fastpathEncMapInt32IntR, (*Decoder).fastpathDecMapInt32IntR) + fn(map[int32]int8(nil), (*Encoder).fastpathEncMapInt32Int8R, (*Decoder).fastpathDecMapInt32Int8R) + fn(map[int32]int16(nil), (*Encoder).fastpathEncMapInt32Int16R, (*Decoder).fastpathDecMapInt32Int16R) + fn(map[int32]int32(nil), (*Encoder).fastpathEncMapInt32Int32R, (*Decoder).fastpathDecMapInt32Int32R) + fn(map[int32]int64(nil), (*Encoder).fastpathEncMapInt32Int64R, (*Decoder).fastpathDecMapInt32Int64R) + fn(map[int32]float32(nil), (*Encoder).fastpathEncMapInt32Float32R, (*Decoder).fastpathDecMapInt32Float32R) + fn(map[int32]float64(nil), (*Encoder).fastpathEncMapInt32Float64R, (*Decoder).fastpathDecMapInt32Float64R) + fn(map[int32]bool(nil), (*Encoder).fastpathEncMapInt32BoolR, (*Decoder).fastpathDecMapInt32BoolR) + fn(map[int64]interface{}(nil), (*Encoder).fastpathEncMapInt64IntfR, (*Decoder).fastpathDecMapInt64IntfR) + fn(map[int64]string(nil), (*Encoder).fastpathEncMapInt64StringR, (*Decoder).fastpathDecMapInt64StringR) + fn(map[int64]uint(nil), (*Encoder).fastpathEncMapInt64UintR, (*Decoder).fastpathDecMapInt64UintR) + fn(map[int64]uint8(nil), (*Encoder).fastpathEncMapInt64Uint8R, (*Decoder).fastpathDecMapInt64Uint8R) + fn(map[int64]uint16(nil), (*Encoder).fastpathEncMapInt64Uint16R, (*Decoder).fastpathDecMapInt64Uint16R) + fn(map[int64]uint32(nil), (*Encoder).fastpathEncMapInt64Uint32R, (*Decoder).fastpathDecMapInt64Uint32R) + fn(map[int64]uint64(nil), (*Encoder).fastpathEncMapInt64Uint64R, (*Decoder).fastpathDecMapInt64Uint64R) + fn(map[int64]uintptr(nil), (*Encoder).fastpathEncMapInt64UintptrR, (*Decoder).fastpathDecMapInt64UintptrR) + fn(map[int64]int(nil), (*Encoder).fastpathEncMapInt64IntR, (*Decoder).fastpathDecMapInt64IntR) + fn(map[int64]int8(nil), (*Encoder).fastpathEncMapInt64Int8R, (*Decoder).fastpathDecMapInt64Int8R) + fn(map[int64]int16(nil), (*Encoder).fastpathEncMapInt64Int16R, (*Decoder).fastpathDecMapInt64Int16R) + fn(map[int64]int32(nil), (*Encoder).fastpathEncMapInt64Int32R, (*Decoder).fastpathDecMapInt64Int32R) + fn(map[int64]int64(nil), (*Encoder).fastpathEncMapInt64Int64R, (*Decoder).fastpathDecMapInt64Int64R) + fn(map[int64]float32(nil), (*Encoder).fastpathEncMapInt64Float32R, (*Decoder).fastpathDecMapInt64Float32R) + fn(map[int64]float64(nil), (*Encoder).fastpathEncMapInt64Float64R, (*Decoder).fastpathDecMapInt64Float64R) + fn(map[int64]bool(nil), (*Encoder).fastpathEncMapInt64BoolR, (*Decoder).fastpathDecMapInt64BoolR) + fn(map[bool]interface{}(nil), (*Encoder).fastpathEncMapBoolIntfR, (*Decoder).fastpathDecMapBoolIntfR) + fn(map[bool]string(nil), (*Encoder).fastpathEncMapBoolStringR, (*Decoder).fastpathDecMapBoolStringR) + fn(map[bool]uint(nil), (*Encoder).fastpathEncMapBoolUintR, (*Decoder).fastpathDecMapBoolUintR) + fn(map[bool]uint8(nil), (*Encoder).fastpathEncMapBoolUint8R, (*Decoder).fastpathDecMapBoolUint8R) + fn(map[bool]uint16(nil), (*Encoder).fastpathEncMapBoolUint16R, (*Decoder).fastpathDecMapBoolUint16R) + fn(map[bool]uint32(nil), (*Encoder).fastpathEncMapBoolUint32R, (*Decoder).fastpathDecMapBoolUint32R) + fn(map[bool]uint64(nil), (*Encoder).fastpathEncMapBoolUint64R, (*Decoder).fastpathDecMapBoolUint64R) + fn(map[bool]uintptr(nil), (*Encoder).fastpathEncMapBoolUintptrR, (*Decoder).fastpathDecMapBoolUintptrR) + fn(map[bool]int(nil), (*Encoder).fastpathEncMapBoolIntR, (*Decoder).fastpathDecMapBoolIntR) + fn(map[bool]int8(nil), (*Encoder).fastpathEncMapBoolInt8R, (*Decoder).fastpathDecMapBoolInt8R) + fn(map[bool]int16(nil), (*Encoder).fastpathEncMapBoolInt16R, (*Decoder).fastpathDecMapBoolInt16R) + fn(map[bool]int32(nil), (*Encoder).fastpathEncMapBoolInt32R, (*Decoder).fastpathDecMapBoolInt32R) + fn(map[bool]int64(nil), (*Encoder).fastpathEncMapBoolInt64R, (*Decoder).fastpathDecMapBoolInt64R) + fn(map[bool]float32(nil), (*Encoder).fastpathEncMapBoolFloat32R, (*Decoder).fastpathDecMapBoolFloat32R) + fn(map[bool]float64(nil), (*Encoder).fastpathEncMapBoolFloat64R, (*Decoder).fastpathDecMapBoolFloat64R) + fn(map[bool]bool(nil), (*Encoder).fastpathEncMapBoolBoolR, (*Decoder).fastpathDecMapBoolBoolR) + + sort.Sort(fastpathAslice(fastpathAV[:])) +} + +// -- encode + +// -- -- fast path type switch +func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { + + case []interface{}: + fastpathTV.EncSliceIntfV(v, e) + case *[]interface{}: + fastpathTV.EncSliceIntfV(*v, e) + + case map[interface{}]interface{}: + fastpathTV.EncMapIntfIntfV(v, e) + case *map[interface{}]interface{}: + fastpathTV.EncMapIntfIntfV(*v, e) + + case map[interface{}]string: + fastpathTV.EncMapIntfStringV(v, e) + case *map[interface{}]string: + fastpathTV.EncMapIntfStringV(*v, e) + + case map[interface{}]uint: + fastpathTV.EncMapIntfUintV(v, e) + case *map[interface{}]uint: + fastpathTV.EncMapIntfUintV(*v, e) + + case map[interface{}]uint8: + fastpathTV.EncMapIntfUint8V(v, e) + case *map[interface{}]uint8: + fastpathTV.EncMapIntfUint8V(*v, e) + + case map[interface{}]uint16: + fastpathTV.EncMapIntfUint16V(v, e) + case *map[interface{}]uint16: + fastpathTV.EncMapIntfUint16V(*v, e) + + case map[interface{}]uint32: + fastpathTV.EncMapIntfUint32V(v, e) + case *map[interface{}]uint32: + fastpathTV.EncMapIntfUint32V(*v, e) + + case map[interface{}]uint64: + fastpathTV.EncMapIntfUint64V(v, e) + case *map[interface{}]uint64: + fastpathTV.EncMapIntfUint64V(*v, e) + + case map[interface{}]uintptr: + fastpathTV.EncMapIntfUintptrV(v, e) + case *map[interface{}]uintptr: + fastpathTV.EncMapIntfUintptrV(*v, e) + + case map[interface{}]int: + fastpathTV.EncMapIntfIntV(v, e) + case *map[interface{}]int: + fastpathTV.EncMapIntfIntV(*v, e) + + case map[interface{}]int8: + fastpathTV.EncMapIntfInt8V(v, e) + case *map[interface{}]int8: + fastpathTV.EncMapIntfInt8V(*v, e) + + case map[interface{}]int16: + fastpathTV.EncMapIntfInt16V(v, e) + case *map[interface{}]int16: + fastpathTV.EncMapIntfInt16V(*v, e) + + case map[interface{}]int32: + fastpathTV.EncMapIntfInt32V(v, e) + case *map[interface{}]int32: + fastpathTV.EncMapIntfInt32V(*v, e) + + case map[interface{}]int64: + fastpathTV.EncMapIntfInt64V(v, e) + case *map[interface{}]int64: + fastpathTV.EncMapIntfInt64V(*v, e) + + case map[interface{}]float32: + fastpathTV.EncMapIntfFloat32V(v, e) + case *map[interface{}]float32: + fastpathTV.EncMapIntfFloat32V(*v, e) + + case map[interface{}]float64: + fastpathTV.EncMapIntfFloat64V(v, e) + case *map[interface{}]float64: + fastpathTV.EncMapIntfFloat64V(*v, e) + + case map[interface{}]bool: + fastpathTV.EncMapIntfBoolV(v, e) + case *map[interface{}]bool: + fastpathTV.EncMapIntfBoolV(*v, e) + + case []string: + fastpathTV.EncSliceStringV(v, e) + case *[]string: + fastpathTV.EncSliceStringV(*v, e) + + case map[string]interface{}: + fastpathTV.EncMapStringIntfV(v, e) + case *map[string]interface{}: + fastpathTV.EncMapStringIntfV(*v, e) + + case map[string]string: + fastpathTV.EncMapStringStringV(v, e) + case *map[string]string: + fastpathTV.EncMapStringStringV(*v, e) + + case map[string]uint: + fastpathTV.EncMapStringUintV(v, e) + case *map[string]uint: + fastpathTV.EncMapStringUintV(*v, e) + + case map[string]uint8: + fastpathTV.EncMapStringUint8V(v, e) + case *map[string]uint8: + fastpathTV.EncMapStringUint8V(*v, e) + + case map[string]uint16: + fastpathTV.EncMapStringUint16V(v, e) + case *map[string]uint16: + fastpathTV.EncMapStringUint16V(*v, e) + + case map[string]uint32: + fastpathTV.EncMapStringUint32V(v, e) + case *map[string]uint32: + fastpathTV.EncMapStringUint32V(*v, e) + + case map[string]uint64: + fastpathTV.EncMapStringUint64V(v, e) + case *map[string]uint64: + fastpathTV.EncMapStringUint64V(*v, e) + + case map[string]uintptr: + fastpathTV.EncMapStringUintptrV(v, e) + case *map[string]uintptr: + fastpathTV.EncMapStringUintptrV(*v, e) + + case map[string]int: + fastpathTV.EncMapStringIntV(v, e) + case *map[string]int: + fastpathTV.EncMapStringIntV(*v, e) + + case map[string]int8: + fastpathTV.EncMapStringInt8V(v, e) + case *map[string]int8: + fastpathTV.EncMapStringInt8V(*v, e) + + case map[string]int16: + fastpathTV.EncMapStringInt16V(v, e) + case *map[string]int16: + fastpathTV.EncMapStringInt16V(*v, e) + + case map[string]int32: + fastpathTV.EncMapStringInt32V(v, e) + case *map[string]int32: + fastpathTV.EncMapStringInt32V(*v, e) + + case map[string]int64: + fastpathTV.EncMapStringInt64V(v, e) + case *map[string]int64: + fastpathTV.EncMapStringInt64V(*v, e) + + case map[string]float32: + fastpathTV.EncMapStringFloat32V(v, e) + case *map[string]float32: + fastpathTV.EncMapStringFloat32V(*v, e) + + case map[string]float64: + fastpathTV.EncMapStringFloat64V(v, e) + case *map[string]float64: + fastpathTV.EncMapStringFloat64V(*v, e) + + case map[string]bool: + fastpathTV.EncMapStringBoolV(v, e) + case *map[string]bool: + fastpathTV.EncMapStringBoolV(*v, e) + + case []float32: + fastpathTV.EncSliceFloat32V(v, e) + case *[]float32: + fastpathTV.EncSliceFloat32V(*v, e) + + case map[float32]interface{}: + fastpathTV.EncMapFloat32IntfV(v, e) + case *map[float32]interface{}: + fastpathTV.EncMapFloat32IntfV(*v, e) + + case map[float32]string: + fastpathTV.EncMapFloat32StringV(v, e) + case *map[float32]string: + fastpathTV.EncMapFloat32StringV(*v, e) + + case map[float32]uint: + fastpathTV.EncMapFloat32UintV(v, e) + case *map[float32]uint: + fastpathTV.EncMapFloat32UintV(*v, e) + + case map[float32]uint8: + fastpathTV.EncMapFloat32Uint8V(v, e) + case *map[float32]uint8: + fastpathTV.EncMapFloat32Uint8V(*v, e) + + case map[float32]uint16: + fastpathTV.EncMapFloat32Uint16V(v, e) + case *map[float32]uint16: + fastpathTV.EncMapFloat32Uint16V(*v, e) + + case map[float32]uint32: + fastpathTV.EncMapFloat32Uint32V(v, e) + case *map[float32]uint32: + fastpathTV.EncMapFloat32Uint32V(*v, e) + + case map[float32]uint64: + fastpathTV.EncMapFloat32Uint64V(v, e) + case *map[float32]uint64: + fastpathTV.EncMapFloat32Uint64V(*v, e) + + case map[float32]uintptr: + fastpathTV.EncMapFloat32UintptrV(v, e) + case *map[float32]uintptr: + fastpathTV.EncMapFloat32UintptrV(*v, e) + + case map[float32]int: + fastpathTV.EncMapFloat32IntV(v, e) + case *map[float32]int: + fastpathTV.EncMapFloat32IntV(*v, e) + + case map[float32]int8: + fastpathTV.EncMapFloat32Int8V(v, e) + case *map[float32]int8: + fastpathTV.EncMapFloat32Int8V(*v, e) + + case map[float32]int16: + fastpathTV.EncMapFloat32Int16V(v, e) + case *map[float32]int16: + fastpathTV.EncMapFloat32Int16V(*v, e) + + case map[float32]int32: + fastpathTV.EncMapFloat32Int32V(v, e) + case *map[float32]int32: + fastpathTV.EncMapFloat32Int32V(*v, e) + + case map[float32]int64: + fastpathTV.EncMapFloat32Int64V(v, e) + case *map[float32]int64: + fastpathTV.EncMapFloat32Int64V(*v, e) + + case map[float32]float32: + fastpathTV.EncMapFloat32Float32V(v, e) + case *map[float32]float32: + fastpathTV.EncMapFloat32Float32V(*v, e) + + case map[float32]float64: + fastpathTV.EncMapFloat32Float64V(v, e) + case *map[float32]float64: + fastpathTV.EncMapFloat32Float64V(*v, e) + + case map[float32]bool: + fastpathTV.EncMapFloat32BoolV(v, e) + case *map[float32]bool: + fastpathTV.EncMapFloat32BoolV(*v, e) + + case []float64: + fastpathTV.EncSliceFloat64V(v, e) + case *[]float64: + fastpathTV.EncSliceFloat64V(*v, e) + + case map[float64]interface{}: + fastpathTV.EncMapFloat64IntfV(v, e) + case *map[float64]interface{}: + fastpathTV.EncMapFloat64IntfV(*v, e) + + case map[float64]string: + fastpathTV.EncMapFloat64StringV(v, e) + case *map[float64]string: + fastpathTV.EncMapFloat64StringV(*v, e) + + case map[float64]uint: + fastpathTV.EncMapFloat64UintV(v, e) + case *map[float64]uint: + fastpathTV.EncMapFloat64UintV(*v, e) + + case map[float64]uint8: + fastpathTV.EncMapFloat64Uint8V(v, e) + case *map[float64]uint8: + fastpathTV.EncMapFloat64Uint8V(*v, e) + + case map[float64]uint16: + fastpathTV.EncMapFloat64Uint16V(v, e) + case *map[float64]uint16: + fastpathTV.EncMapFloat64Uint16V(*v, e) + + case map[float64]uint32: + fastpathTV.EncMapFloat64Uint32V(v, e) + case *map[float64]uint32: + fastpathTV.EncMapFloat64Uint32V(*v, e) + + case map[float64]uint64: + fastpathTV.EncMapFloat64Uint64V(v, e) + case *map[float64]uint64: + fastpathTV.EncMapFloat64Uint64V(*v, e) + + case map[float64]uintptr: + fastpathTV.EncMapFloat64UintptrV(v, e) + case *map[float64]uintptr: + fastpathTV.EncMapFloat64UintptrV(*v, e) + + case map[float64]int: + fastpathTV.EncMapFloat64IntV(v, e) + case *map[float64]int: + fastpathTV.EncMapFloat64IntV(*v, e) + + case map[float64]int8: + fastpathTV.EncMapFloat64Int8V(v, e) + case *map[float64]int8: + fastpathTV.EncMapFloat64Int8V(*v, e) + + case map[float64]int16: + fastpathTV.EncMapFloat64Int16V(v, e) + case *map[float64]int16: + fastpathTV.EncMapFloat64Int16V(*v, e) + + case map[float64]int32: + fastpathTV.EncMapFloat64Int32V(v, e) + case *map[float64]int32: + fastpathTV.EncMapFloat64Int32V(*v, e) + + case map[float64]int64: + fastpathTV.EncMapFloat64Int64V(v, e) + case *map[float64]int64: + fastpathTV.EncMapFloat64Int64V(*v, e) + + case map[float64]float32: + fastpathTV.EncMapFloat64Float32V(v, e) + case *map[float64]float32: + fastpathTV.EncMapFloat64Float32V(*v, e) + + case map[float64]float64: + fastpathTV.EncMapFloat64Float64V(v, e) + case *map[float64]float64: + fastpathTV.EncMapFloat64Float64V(*v, e) + + case map[float64]bool: + fastpathTV.EncMapFloat64BoolV(v, e) + case *map[float64]bool: + fastpathTV.EncMapFloat64BoolV(*v, e) + + case []uint: + fastpathTV.EncSliceUintV(v, e) + case *[]uint: + fastpathTV.EncSliceUintV(*v, e) + + case map[uint]interface{}: + fastpathTV.EncMapUintIntfV(v, e) + case *map[uint]interface{}: + fastpathTV.EncMapUintIntfV(*v, e) + + case map[uint]string: + fastpathTV.EncMapUintStringV(v, e) + case *map[uint]string: + fastpathTV.EncMapUintStringV(*v, e) + + case map[uint]uint: + fastpathTV.EncMapUintUintV(v, e) + case *map[uint]uint: + fastpathTV.EncMapUintUintV(*v, e) + + case map[uint]uint8: + fastpathTV.EncMapUintUint8V(v, e) + case *map[uint]uint8: + fastpathTV.EncMapUintUint8V(*v, e) + + case map[uint]uint16: + fastpathTV.EncMapUintUint16V(v, e) + case *map[uint]uint16: + fastpathTV.EncMapUintUint16V(*v, e) + + case map[uint]uint32: + fastpathTV.EncMapUintUint32V(v, e) + case *map[uint]uint32: + fastpathTV.EncMapUintUint32V(*v, e) + + case map[uint]uint64: + fastpathTV.EncMapUintUint64V(v, e) + case *map[uint]uint64: + fastpathTV.EncMapUintUint64V(*v, e) + + case map[uint]uintptr: + fastpathTV.EncMapUintUintptrV(v, e) + case *map[uint]uintptr: + fastpathTV.EncMapUintUintptrV(*v, e) + + case map[uint]int: + fastpathTV.EncMapUintIntV(v, e) + case *map[uint]int: + fastpathTV.EncMapUintIntV(*v, e) + + case map[uint]int8: + fastpathTV.EncMapUintInt8V(v, e) + case *map[uint]int8: + fastpathTV.EncMapUintInt8V(*v, e) + + case map[uint]int16: + fastpathTV.EncMapUintInt16V(v, e) + case *map[uint]int16: + fastpathTV.EncMapUintInt16V(*v, e) + + case map[uint]int32: + fastpathTV.EncMapUintInt32V(v, e) + case *map[uint]int32: + fastpathTV.EncMapUintInt32V(*v, e) + + case map[uint]int64: + fastpathTV.EncMapUintInt64V(v, e) + case *map[uint]int64: + fastpathTV.EncMapUintInt64V(*v, e) + + case map[uint]float32: + fastpathTV.EncMapUintFloat32V(v, e) + case *map[uint]float32: + fastpathTV.EncMapUintFloat32V(*v, e) + + case map[uint]float64: + fastpathTV.EncMapUintFloat64V(v, e) + case *map[uint]float64: + fastpathTV.EncMapUintFloat64V(*v, e) + + case map[uint]bool: + fastpathTV.EncMapUintBoolV(v, e) + case *map[uint]bool: + fastpathTV.EncMapUintBoolV(*v, e) + + case map[uint8]interface{}: + fastpathTV.EncMapUint8IntfV(v, e) + case *map[uint8]interface{}: + fastpathTV.EncMapUint8IntfV(*v, e) + + case map[uint8]string: + fastpathTV.EncMapUint8StringV(v, e) + case *map[uint8]string: + fastpathTV.EncMapUint8StringV(*v, e) + + case map[uint8]uint: + fastpathTV.EncMapUint8UintV(v, e) + case *map[uint8]uint: + fastpathTV.EncMapUint8UintV(*v, e) + + case map[uint8]uint8: + fastpathTV.EncMapUint8Uint8V(v, e) + case *map[uint8]uint8: + fastpathTV.EncMapUint8Uint8V(*v, e) + + case map[uint8]uint16: + fastpathTV.EncMapUint8Uint16V(v, e) + case *map[uint8]uint16: + fastpathTV.EncMapUint8Uint16V(*v, e) + + case map[uint8]uint32: + fastpathTV.EncMapUint8Uint32V(v, e) + case *map[uint8]uint32: + fastpathTV.EncMapUint8Uint32V(*v, e) + + case map[uint8]uint64: + fastpathTV.EncMapUint8Uint64V(v, e) + case *map[uint8]uint64: + fastpathTV.EncMapUint8Uint64V(*v, e) + + case map[uint8]uintptr: + fastpathTV.EncMapUint8UintptrV(v, e) + case *map[uint8]uintptr: + fastpathTV.EncMapUint8UintptrV(*v, e) + + case map[uint8]int: + fastpathTV.EncMapUint8IntV(v, e) + case *map[uint8]int: + fastpathTV.EncMapUint8IntV(*v, e) + + case map[uint8]int8: + fastpathTV.EncMapUint8Int8V(v, e) + case *map[uint8]int8: + fastpathTV.EncMapUint8Int8V(*v, e) + + case map[uint8]int16: + fastpathTV.EncMapUint8Int16V(v, e) + case *map[uint8]int16: + fastpathTV.EncMapUint8Int16V(*v, e) + + case map[uint8]int32: + fastpathTV.EncMapUint8Int32V(v, e) + case *map[uint8]int32: + fastpathTV.EncMapUint8Int32V(*v, e) + + case map[uint8]int64: + fastpathTV.EncMapUint8Int64V(v, e) + case *map[uint8]int64: + fastpathTV.EncMapUint8Int64V(*v, e) + + case map[uint8]float32: + fastpathTV.EncMapUint8Float32V(v, e) + case *map[uint8]float32: + fastpathTV.EncMapUint8Float32V(*v, e) + + case map[uint8]float64: + fastpathTV.EncMapUint8Float64V(v, e) + case *map[uint8]float64: + fastpathTV.EncMapUint8Float64V(*v, e) + + case map[uint8]bool: + fastpathTV.EncMapUint8BoolV(v, e) + case *map[uint8]bool: + fastpathTV.EncMapUint8BoolV(*v, e) + + case []uint16: + fastpathTV.EncSliceUint16V(v, e) + case *[]uint16: + fastpathTV.EncSliceUint16V(*v, e) + + case map[uint16]interface{}: + fastpathTV.EncMapUint16IntfV(v, e) + case *map[uint16]interface{}: + fastpathTV.EncMapUint16IntfV(*v, e) + + case map[uint16]string: + fastpathTV.EncMapUint16StringV(v, e) + case *map[uint16]string: + fastpathTV.EncMapUint16StringV(*v, e) + + case map[uint16]uint: + fastpathTV.EncMapUint16UintV(v, e) + case *map[uint16]uint: + fastpathTV.EncMapUint16UintV(*v, e) + + case map[uint16]uint8: + fastpathTV.EncMapUint16Uint8V(v, e) + case *map[uint16]uint8: + fastpathTV.EncMapUint16Uint8V(*v, e) + + case map[uint16]uint16: + fastpathTV.EncMapUint16Uint16V(v, e) + case *map[uint16]uint16: + fastpathTV.EncMapUint16Uint16V(*v, e) + + case map[uint16]uint32: + fastpathTV.EncMapUint16Uint32V(v, e) + case *map[uint16]uint32: + fastpathTV.EncMapUint16Uint32V(*v, e) + + case map[uint16]uint64: + fastpathTV.EncMapUint16Uint64V(v, e) + case *map[uint16]uint64: + fastpathTV.EncMapUint16Uint64V(*v, e) + + case map[uint16]uintptr: + fastpathTV.EncMapUint16UintptrV(v, e) + case *map[uint16]uintptr: + fastpathTV.EncMapUint16UintptrV(*v, e) + + case map[uint16]int: + fastpathTV.EncMapUint16IntV(v, e) + case *map[uint16]int: + fastpathTV.EncMapUint16IntV(*v, e) + + case map[uint16]int8: + fastpathTV.EncMapUint16Int8V(v, e) + case *map[uint16]int8: + fastpathTV.EncMapUint16Int8V(*v, e) + + case map[uint16]int16: + fastpathTV.EncMapUint16Int16V(v, e) + case *map[uint16]int16: + fastpathTV.EncMapUint16Int16V(*v, e) + + case map[uint16]int32: + fastpathTV.EncMapUint16Int32V(v, e) + case *map[uint16]int32: + fastpathTV.EncMapUint16Int32V(*v, e) + + case map[uint16]int64: + fastpathTV.EncMapUint16Int64V(v, e) + case *map[uint16]int64: + fastpathTV.EncMapUint16Int64V(*v, e) + + case map[uint16]float32: + fastpathTV.EncMapUint16Float32V(v, e) + case *map[uint16]float32: + fastpathTV.EncMapUint16Float32V(*v, e) + + case map[uint16]float64: + fastpathTV.EncMapUint16Float64V(v, e) + case *map[uint16]float64: + fastpathTV.EncMapUint16Float64V(*v, e) + + case map[uint16]bool: + fastpathTV.EncMapUint16BoolV(v, e) + case *map[uint16]bool: + fastpathTV.EncMapUint16BoolV(*v, e) + + case []uint32: + fastpathTV.EncSliceUint32V(v, e) + case *[]uint32: + fastpathTV.EncSliceUint32V(*v, e) + + case map[uint32]interface{}: + fastpathTV.EncMapUint32IntfV(v, e) + case *map[uint32]interface{}: + fastpathTV.EncMapUint32IntfV(*v, e) + + case map[uint32]string: + fastpathTV.EncMapUint32StringV(v, e) + case *map[uint32]string: + fastpathTV.EncMapUint32StringV(*v, e) + + case map[uint32]uint: + fastpathTV.EncMapUint32UintV(v, e) + case *map[uint32]uint: + fastpathTV.EncMapUint32UintV(*v, e) + + case map[uint32]uint8: + fastpathTV.EncMapUint32Uint8V(v, e) + case *map[uint32]uint8: + fastpathTV.EncMapUint32Uint8V(*v, e) + + case map[uint32]uint16: + fastpathTV.EncMapUint32Uint16V(v, e) + case *map[uint32]uint16: + fastpathTV.EncMapUint32Uint16V(*v, e) + + case map[uint32]uint32: + fastpathTV.EncMapUint32Uint32V(v, e) + case *map[uint32]uint32: + fastpathTV.EncMapUint32Uint32V(*v, e) + + case map[uint32]uint64: + fastpathTV.EncMapUint32Uint64V(v, e) + case *map[uint32]uint64: + fastpathTV.EncMapUint32Uint64V(*v, e) + + case map[uint32]uintptr: + fastpathTV.EncMapUint32UintptrV(v, e) + case *map[uint32]uintptr: + fastpathTV.EncMapUint32UintptrV(*v, e) + + case map[uint32]int: + fastpathTV.EncMapUint32IntV(v, e) + case *map[uint32]int: + fastpathTV.EncMapUint32IntV(*v, e) + + case map[uint32]int8: + fastpathTV.EncMapUint32Int8V(v, e) + case *map[uint32]int8: + fastpathTV.EncMapUint32Int8V(*v, e) + + case map[uint32]int16: + fastpathTV.EncMapUint32Int16V(v, e) + case *map[uint32]int16: + fastpathTV.EncMapUint32Int16V(*v, e) + + case map[uint32]int32: + fastpathTV.EncMapUint32Int32V(v, e) + case *map[uint32]int32: + fastpathTV.EncMapUint32Int32V(*v, e) + + case map[uint32]int64: + fastpathTV.EncMapUint32Int64V(v, e) + case *map[uint32]int64: + fastpathTV.EncMapUint32Int64V(*v, e) + + case map[uint32]float32: + fastpathTV.EncMapUint32Float32V(v, e) + case *map[uint32]float32: + fastpathTV.EncMapUint32Float32V(*v, e) + + case map[uint32]float64: + fastpathTV.EncMapUint32Float64V(v, e) + case *map[uint32]float64: + fastpathTV.EncMapUint32Float64V(*v, e) + + case map[uint32]bool: + fastpathTV.EncMapUint32BoolV(v, e) + case *map[uint32]bool: + fastpathTV.EncMapUint32BoolV(*v, e) + + case []uint64: + fastpathTV.EncSliceUint64V(v, e) + case *[]uint64: + fastpathTV.EncSliceUint64V(*v, e) + + case map[uint64]interface{}: + fastpathTV.EncMapUint64IntfV(v, e) + case *map[uint64]interface{}: + fastpathTV.EncMapUint64IntfV(*v, e) + + case map[uint64]string: + fastpathTV.EncMapUint64StringV(v, e) + case *map[uint64]string: + fastpathTV.EncMapUint64StringV(*v, e) + + case map[uint64]uint: + fastpathTV.EncMapUint64UintV(v, e) + case *map[uint64]uint: + fastpathTV.EncMapUint64UintV(*v, e) + + case map[uint64]uint8: + fastpathTV.EncMapUint64Uint8V(v, e) + case *map[uint64]uint8: + fastpathTV.EncMapUint64Uint8V(*v, e) + + case map[uint64]uint16: + fastpathTV.EncMapUint64Uint16V(v, e) + case *map[uint64]uint16: + fastpathTV.EncMapUint64Uint16V(*v, e) + + case map[uint64]uint32: + fastpathTV.EncMapUint64Uint32V(v, e) + case *map[uint64]uint32: + fastpathTV.EncMapUint64Uint32V(*v, e) + + case map[uint64]uint64: + fastpathTV.EncMapUint64Uint64V(v, e) + case *map[uint64]uint64: + fastpathTV.EncMapUint64Uint64V(*v, e) + + case map[uint64]uintptr: + fastpathTV.EncMapUint64UintptrV(v, e) + case *map[uint64]uintptr: + fastpathTV.EncMapUint64UintptrV(*v, e) + + case map[uint64]int: + fastpathTV.EncMapUint64IntV(v, e) + case *map[uint64]int: + fastpathTV.EncMapUint64IntV(*v, e) + + case map[uint64]int8: + fastpathTV.EncMapUint64Int8V(v, e) + case *map[uint64]int8: + fastpathTV.EncMapUint64Int8V(*v, e) + + case map[uint64]int16: + fastpathTV.EncMapUint64Int16V(v, e) + case *map[uint64]int16: + fastpathTV.EncMapUint64Int16V(*v, e) + + case map[uint64]int32: + fastpathTV.EncMapUint64Int32V(v, e) + case *map[uint64]int32: + fastpathTV.EncMapUint64Int32V(*v, e) + + case map[uint64]int64: + fastpathTV.EncMapUint64Int64V(v, e) + case *map[uint64]int64: + fastpathTV.EncMapUint64Int64V(*v, e) + + case map[uint64]float32: + fastpathTV.EncMapUint64Float32V(v, e) + case *map[uint64]float32: + fastpathTV.EncMapUint64Float32V(*v, e) + + case map[uint64]float64: + fastpathTV.EncMapUint64Float64V(v, e) + case *map[uint64]float64: + fastpathTV.EncMapUint64Float64V(*v, e) + + case map[uint64]bool: + fastpathTV.EncMapUint64BoolV(v, e) + case *map[uint64]bool: + fastpathTV.EncMapUint64BoolV(*v, e) + + case []uintptr: + fastpathTV.EncSliceUintptrV(v, e) + case *[]uintptr: + fastpathTV.EncSliceUintptrV(*v, e) + + case map[uintptr]interface{}: + fastpathTV.EncMapUintptrIntfV(v, e) + case *map[uintptr]interface{}: + fastpathTV.EncMapUintptrIntfV(*v, e) + + case map[uintptr]string: + fastpathTV.EncMapUintptrStringV(v, e) + case *map[uintptr]string: + fastpathTV.EncMapUintptrStringV(*v, e) + + case map[uintptr]uint: + fastpathTV.EncMapUintptrUintV(v, e) + case *map[uintptr]uint: + fastpathTV.EncMapUintptrUintV(*v, e) + + case map[uintptr]uint8: + fastpathTV.EncMapUintptrUint8V(v, e) + case *map[uintptr]uint8: + fastpathTV.EncMapUintptrUint8V(*v, e) + + case map[uintptr]uint16: + fastpathTV.EncMapUintptrUint16V(v, e) + case *map[uintptr]uint16: + fastpathTV.EncMapUintptrUint16V(*v, e) + + case map[uintptr]uint32: + fastpathTV.EncMapUintptrUint32V(v, e) + case *map[uintptr]uint32: + fastpathTV.EncMapUintptrUint32V(*v, e) + + case map[uintptr]uint64: + fastpathTV.EncMapUintptrUint64V(v, e) + case *map[uintptr]uint64: + fastpathTV.EncMapUintptrUint64V(*v, e) + + case map[uintptr]uintptr: + fastpathTV.EncMapUintptrUintptrV(v, e) + case *map[uintptr]uintptr: + fastpathTV.EncMapUintptrUintptrV(*v, e) + + case map[uintptr]int: + fastpathTV.EncMapUintptrIntV(v, e) + case *map[uintptr]int: + fastpathTV.EncMapUintptrIntV(*v, e) + + case map[uintptr]int8: + fastpathTV.EncMapUintptrInt8V(v, e) + case *map[uintptr]int8: + fastpathTV.EncMapUintptrInt8V(*v, e) + + case map[uintptr]int16: + fastpathTV.EncMapUintptrInt16V(v, e) + case *map[uintptr]int16: + fastpathTV.EncMapUintptrInt16V(*v, e) + + case map[uintptr]int32: + fastpathTV.EncMapUintptrInt32V(v, e) + case *map[uintptr]int32: + fastpathTV.EncMapUintptrInt32V(*v, e) + + case map[uintptr]int64: + fastpathTV.EncMapUintptrInt64V(v, e) + case *map[uintptr]int64: + fastpathTV.EncMapUintptrInt64V(*v, e) + + case map[uintptr]float32: + fastpathTV.EncMapUintptrFloat32V(v, e) + case *map[uintptr]float32: + fastpathTV.EncMapUintptrFloat32V(*v, e) + + case map[uintptr]float64: + fastpathTV.EncMapUintptrFloat64V(v, e) + case *map[uintptr]float64: + fastpathTV.EncMapUintptrFloat64V(*v, e) + + case map[uintptr]bool: + fastpathTV.EncMapUintptrBoolV(v, e) + case *map[uintptr]bool: + fastpathTV.EncMapUintptrBoolV(*v, e) + + case []int: + fastpathTV.EncSliceIntV(v, e) + case *[]int: + fastpathTV.EncSliceIntV(*v, e) + + case map[int]interface{}: + fastpathTV.EncMapIntIntfV(v, e) + case *map[int]interface{}: + fastpathTV.EncMapIntIntfV(*v, e) + + case map[int]string: + fastpathTV.EncMapIntStringV(v, e) + case *map[int]string: + fastpathTV.EncMapIntStringV(*v, e) + + case map[int]uint: + fastpathTV.EncMapIntUintV(v, e) + case *map[int]uint: + fastpathTV.EncMapIntUintV(*v, e) + + case map[int]uint8: + fastpathTV.EncMapIntUint8V(v, e) + case *map[int]uint8: + fastpathTV.EncMapIntUint8V(*v, e) + + case map[int]uint16: + fastpathTV.EncMapIntUint16V(v, e) + case *map[int]uint16: + fastpathTV.EncMapIntUint16V(*v, e) + + case map[int]uint32: + fastpathTV.EncMapIntUint32V(v, e) + case *map[int]uint32: + fastpathTV.EncMapIntUint32V(*v, e) + + case map[int]uint64: + fastpathTV.EncMapIntUint64V(v, e) + case *map[int]uint64: + fastpathTV.EncMapIntUint64V(*v, e) + + case map[int]uintptr: + fastpathTV.EncMapIntUintptrV(v, e) + case *map[int]uintptr: + fastpathTV.EncMapIntUintptrV(*v, e) + + case map[int]int: + fastpathTV.EncMapIntIntV(v, e) + case *map[int]int: + fastpathTV.EncMapIntIntV(*v, e) + + case map[int]int8: + fastpathTV.EncMapIntInt8V(v, e) + case *map[int]int8: + fastpathTV.EncMapIntInt8V(*v, e) + + case map[int]int16: + fastpathTV.EncMapIntInt16V(v, e) + case *map[int]int16: + fastpathTV.EncMapIntInt16V(*v, e) + + case map[int]int32: + fastpathTV.EncMapIntInt32V(v, e) + case *map[int]int32: + fastpathTV.EncMapIntInt32V(*v, e) + + case map[int]int64: + fastpathTV.EncMapIntInt64V(v, e) + case *map[int]int64: + fastpathTV.EncMapIntInt64V(*v, e) + + case map[int]float32: + fastpathTV.EncMapIntFloat32V(v, e) + case *map[int]float32: + fastpathTV.EncMapIntFloat32V(*v, e) + + case map[int]float64: + fastpathTV.EncMapIntFloat64V(v, e) + case *map[int]float64: + fastpathTV.EncMapIntFloat64V(*v, e) + + case map[int]bool: + fastpathTV.EncMapIntBoolV(v, e) + case *map[int]bool: + fastpathTV.EncMapIntBoolV(*v, e) + + case []int8: + fastpathTV.EncSliceInt8V(v, e) + case *[]int8: + fastpathTV.EncSliceInt8V(*v, e) + + case map[int8]interface{}: + fastpathTV.EncMapInt8IntfV(v, e) + case *map[int8]interface{}: + fastpathTV.EncMapInt8IntfV(*v, e) + + case map[int8]string: + fastpathTV.EncMapInt8StringV(v, e) + case *map[int8]string: + fastpathTV.EncMapInt8StringV(*v, e) + + case map[int8]uint: + fastpathTV.EncMapInt8UintV(v, e) + case *map[int8]uint: + fastpathTV.EncMapInt8UintV(*v, e) + + case map[int8]uint8: + fastpathTV.EncMapInt8Uint8V(v, e) + case *map[int8]uint8: + fastpathTV.EncMapInt8Uint8V(*v, e) + + case map[int8]uint16: + fastpathTV.EncMapInt8Uint16V(v, e) + case *map[int8]uint16: + fastpathTV.EncMapInt8Uint16V(*v, e) + + case map[int8]uint32: + fastpathTV.EncMapInt8Uint32V(v, e) + case *map[int8]uint32: + fastpathTV.EncMapInt8Uint32V(*v, e) + + case map[int8]uint64: + fastpathTV.EncMapInt8Uint64V(v, e) + case *map[int8]uint64: + fastpathTV.EncMapInt8Uint64V(*v, e) + + case map[int8]uintptr: + fastpathTV.EncMapInt8UintptrV(v, e) + case *map[int8]uintptr: + fastpathTV.EncMapInt8UintptrV(*v, e) + + case map[int8]int: + fastpathTV.EncMapInt8IntV(v, e) + case *map[int8]int: + fastpathTV.EncMapInt8IntV(*v, e) + + case map[int8]int8: + fastpathTV.EncMapInt8Int8V(v, e) + case *map[int8]int8: + fastpathTV.EncMapInt8Int8V(*v, e) + + case map[int8]int16: + fastpathTV.EncMapInt8Int16V(v, e) + case *map[int8]int16: + fastpathTV.EncMapInt8Int16V(*v, e) + + case map[int8]int32: + fastpathTV.EncMapInt8Int32V(v, e) + case *map[int8]int32: + fastpathTV.EncMapInt8Int32V(*v, e) + + case map[int8]int64: + fastpathTV.EncMapInt8Int64V(v, e) + case *map[int8]int64: + fastpathTV.EncMapInt8Int64V(*v, e) + + case map[int8]float32: + fastpathTV.EncMapInt8Float32V(v, e) + case *map[int8]float32: + fastpathTV.EncMapInt8Float32V(*v, e) + + case map[int8]float64: + fastpathTV.EncMapInt8Float64V(v, e) + case *map[int8]float64: + fastpathTV.EncMapInt8Float64V(*v, e) + + case map[int8]bool: + fastpathTV.EncMapInt8BoolV(v, e) + case *map[int8]bool: + fastpathTV.EncMapInt8BoolV(*v, e) + + case []int16: + fastpathTV.EncSliceInt16V(v, e) + case *[]int16: + fastpathTV.EncSliceInt16V(*v, e) + + case map[int16]interface{}: + fastpathTV.EncMapInt16IntfV(v, e) + case *map[int16]interface{}: + fastpathTV.EncMapInt16IntfV(*v, e) + + case map[int16]string: + fastpathTV.EncMapInt16StringV(v, e) + case *map[int16]string: + fastpathTV.EncMapInt16StringV(*v, e) + + case map[int16]uint: + fastpathTV.EncMapInt16UintV(v, e) + case *map[int16]uint: + fastpathTV.EncMapInt16UintV(*v, e) + + case map[int16]uint8: + fastpathTV.EncMapInt16Uint8V(v, e) + case *map[int16]uint8: + fastpathTV.EncMapInt16Uint8V(*v, e) + + case map[int16]uint16: + fastpathTV.EncMapInt16Uint16V(v, e) + case *map[int16]uint16: + fastpathTV.EncMapInt16Uint16V(*v, e) + + case map[int16]uint32: + fastpathTV.EncMapInt16Uint32V(v, e) + case *map[int16]uint32: + fastpathTV.EncMapInt16Uint32V(*v, e) + + case map[int16]uint64: + fastpathTV.EncMapInt16Uint64V(v, e) + case *map[int16]uint64: + fastpathTV.EncMapInt16Uint64V(*v, e) + + case map[int16]uintptr: + fastpathTV.EncMapInt16UintptrV(v, e) + case *map[int16]uintptr: + fastpathTV.EncMapInt16UintptrV(*v, e) + + case map[int16]int: + fastpathTV.EncMapInt16IntV(v, e) + case *map[int16]int: + fastpathTV.EncMapInt16IntV(*v, e) + + case map[int16]int8: + fastpathTV.EncMapInt16Int8V(v, e) + case *map[int16]int8: + fastpathTV.EncMapInt16Int8V(*v, e) + + case map[int16]int16: + fastpathTV.EncMapInt16Int16V(v, e) + case *map[int16]int16: + fastpathTV.EncMapInt16Int16V(*v, e) + + case map[int16]int32: + fastpathTV.EncMapInt16Int32V(v, e) + case *map[int16]int32: + fastpathTV.EncMapInt16Int32V(*v, e) + + case map[int16]int64: + fastpathTV.EncMapInt16Int64V(v, e) + case *map[int16]int64: + fastpathTV.EncMapInt16Int64V(*v, e) + + case map[int16]float32: + fastpathTV.EncMapInt16Float32V(v, e) + case *map[int16]float32: + fastpathTV.EncMapInt16Float32V(*v, e) + + case map[int16]float64: + fastpathTV.EncMapInt16Float64V(v, e) + case *map[int16]float64: + fastpathTV.EncMapInt16Float64V(*v, e) + + case map[int16]bool: + fastpathTV.EncMapInt16BoolV(v, e) + case *map[int16]bool: + fastpathTV.EncMapInt16BoolV(*v, e) + + case []int32: + fastpathTV.EncSliceInt32V(v, e) + case *[]int32: + fastpathTV.EncSliceInt32V(*v, e) + + case map[int32]interface{}: + fastpathTV.EncMapInt32IntfV(v, e) + case *map[int32]interface{}: + fastpathTV.EncMapInt32IntfV(*v, e) + + case map[int32]string: + fastpathTV.EncMapInt32StringV(v, e) + case *map[int32]string: + fastpathTV.EncMapInt32StringV(*v, e) + + case map[int32]uint: + fastpathTV.EncMapInt32UintV(v, e) + case *map[int32]uint: + fastpathTV.EncMapInt32UintV(*v, e) + + case map[int32]uint8: + fastpathTV.EncMapInt32Uint8V(v, e) + case *map[int32]uint8: + fastpathTV.EncMapInt32Uint8V(*v, e) + + case map[int32]uint16: + fastpathTV.EncMapInt32Uint16V(v, e) + case *map[int32]uint16: + fastpathTV.EncMapInt32Uint16V(*v, e) + + case map[int32]uint32: + fastpathTV.EncMapInt32Uint32V(v, e) + case *map[int32]uint32: + fastpathTV.EncMapInt32Uint32V(*v, e) + + case map[int32]uint64: + fastpathTV.EncMapInt32Uint64V(v, e) + case *map[int32]uint64: + fastpathTV.EncMapInt32Uint64V(*v, e) + + case map[int32]uintptr: + fastpathTV.EncMapInt32UintptrV(v, e) + case *map[int32]uintptr: + fastpathTV.EncMapInt32UintptrV(*v, e) + + case map[int32]int: + fastpathTV.EncMapInt32IntV(v, e) + case *map[int32]int: + fastpathTV.EncMapInt32IntV(*v, e) + + case map[int32]int8: + fastpathTV.EncMapInt32Int8V(v, e) + case *map[int32]int8: + fastpathTV.EncMapInt32Int8V(*v, e) + + case map[int32]int16: + fastpathTV.EncMapInt32Int16V(v, e) + case *map[int32]int16: + fastpathTV.EncMapInt32Int16V(*v, e) + + case map[int32]int32: + fastpathTV.EncMapInt32Int32V(v, e) + case *map[int32]int32: + fastpathTV.EncMapInt32Int32V(*v, e) + + case map[int32]int64: + fastpathTV.EncMapInt32Int64V(v, e) + case *map[int32]int64: + fastpathTV.EncMapInt32Int64V(*v, e) + + case map[int32]float32: + fastpathTV.EncMapInt32Float32V(v, e) + case *map[int32]float32: + fastpathTV.EncMapInt32Float32V(*v, e) + + case map[int32]float64: + fastpathTV.EncMapInt32Float64V(v, e) + case *map[int32]float64: + fastpathTV.EncMapInt32Float64V(*v, e) + + case map[int32]bool: + fastpathTV.EncMapInt32BoolV(v, e) + case *map[int32]bool: + fastpathTV.EncMapInt32BoolV(*v, e) + + case []int64: + fastpathTV.EncSliceInt64V(v, e) + case *[]int64: + fastpathTV.EncSliceInt64V(*v, e) + + case map[int64]interface{}: + fastpathTV.EncMapInt64IntfV(v, e) + case *map[int64]interface{}: + fastpathTV.EncMapInt64IntfV(*v, e) + + case map[int64]string: + fastpathTV.EncMapInt64StringV(v, e) + case *map[int64]string: + fastpathTV.EncMapInt64StringV(*v, e) + + case map[int64]uint: + fastpathTV.EncMapInt64UintV(v, e) + case *map[int64]uint: + fastpathTV.EncMapInt64UintV(*v, e) + + case map[int64]uint8: + fastpathTV.EncMapInt64Uint8V(v, e) + case *map[int64]uint8: + fastpathTV.EncMapInt64Uint8V(*v, e) + + case map[int64]uint16: + fastpathTV.EncMapInt64Uint16V(v, e) + case *map[int64]uint16: + fastpathTV.EncMapInt64Uint16V(*v, e) + + case map[int64]uint32: + fastpathTV.EncMapInt64Uint32V(v, e) + case *map[int64]uint32: + fastpathTV.EncMapInt64Uint32V(*v, e) + + case map[int64]uint64: + fastpathTV.EncMapInt64Uint64V(v, e) + case *map[int64]uint64: + fastpathTV.EncMapInt64Uint64V(*v, e) + + case map[int64]uintptr: + fastpathTV.EncMapInt64UintptrV(v, e) + case *map[int64]uintptr: + fastpathTV.EncMapInt64UintptrV(*v, e) + + case map[int64]int: + fastpathTV.EncMapInt64IntV(v, e) + case *map[int64]int: + fastpathTV.EncMapInt64IntV(*v, e) + + case map[int64]int8: + fastpathTV.EncMapInt64Int8V(v, e) + case *map[int64]int8: + fastpathTV.EncMapInt64Int8V(*v, e) + + case map[int64]int16: + fastpathTV.EncMapInt64Int16V(v, e) + case *map[int64]int16: + fastpathTV.EncMapInt64Int16V(*v, e) + + case map[int64]int32: + fastpathTV.EncMapInt64Int32V(v, e) + case *map[int64]int32: + fastpathTV.EncMapInt64Int32V(*v, e) + + case map[int64]int64: + fastpathTV.EncMapInt64Int64V(v, e) + case *map[int64]int64: + fastpathTV.EncMapInt64Int64V(*v, e) + + case map[int64]float32: + fastpathTV.EncMapInt64Float32V(v, e) + case *map[int64]float32: + fastpathTV.EncMapInt64Float32V(*v, e) + + case map[int64]float64: + fastpathTV.EncMapInt64Float64V(v, e) + case *map[int64]float64: + fastpathTV.EncMapInt64Float64V(*v, e) + + case map[int64]bool: + fastpathTV.EncMapInt64BoolV(v, e) + case *map[int64]bool: + fastpathTV.EncMapInt64BoolV(*v, e) + + case []bool: + fastpathTV.EncSliceBoolV(v, e) + case *[]bool: + fastpathTV.EncSliceBoolV(*v, e) + + case map[bool]interface{}: + fastpathTV.EncMapBoolIntfV(v, e) + case *map[bool]interface{}: + fastpathTV.EncMapBoolIntfV(*v, e) + + case map[bool]string: + fastpathTV.EncMapBoolStringV(v, e) + case *map[bool]string: + fastpathTV.EncMapBoolStringV(*v, e) + + case map[bool]uint: + fastpathTV.EncMapBoolUintV(v, e) + case *map[bool]uint: + fastpathTV.EncMapBoolUintV(*v, e) + + case map[bool]uint8: + fastpathTV.EncMapBoolUint8V(v, e) + case *map[bool]uint8: + fastpathTV.EncMapBoolUint8V(*v, e) + + case map[bool]uint16: + fastpathTV.EncMapBoolUint16V(v, e) + case *map[bool]uint16: + fastpathTV.EncMapBoolUint16V(*v, e) + + case map[bool]uint32: + fastpathTV.EncMapBoolUint32V(v, e) + case *map[bool]uint32: + fastpathTV.EncMapBoolUint32V(*v, e) + + case map[bool]uint64: + fastpathTV.EncMapBoolUint64V(v, e) + case *map[bool]uint64: + fastpathTV.EncMapBoolUint64V(*v, e) + + case map[bool]uintptr: + fastpathTV.EncMapBoolUintptrV(v, e) + case *map[bool]uintptr: + fastpathTV.EncMapBoolUintptrV(*v, e) + + case map[bool]int: + fastpathTV.EncMapBoolIntV(v, e) + case *map[bool]int: + fastpathTV.EncMapBoolIntV(*v, e) + + case map[bool]int8: + fastpathTV.EncMapBoolInt8V(v, e) + case *map[bool]int8: + fastpathTV.EncMapBoolInt8V(*v, e) + + case map[bool]int16: + fastpathTV.EncMapBoolInt16V(v, e) + case *map[bool]int16: + fastpathTV.EncMapBoolInt16V(*v, e) + + case map[bool]int32: + fastpathTV.EncMapBoolInt32V(v, e) + case *map[bool]int32: + fastpathTV.EncMapBoolInt32V(*v, e) + + case map[bool]int64: + fastpathTV.EncMapBoolInt64V(v, e) + case *map[bool]int64: + fastpathTV.EncMapBoolInt64V(*v, e) + + case map[bool]float32: + fastpathTV.EncMapBoolFloat32V(v, e) + case *map[bool]float32: + fastpathTV.EncMapBoolFloat32V(*v, e) + + case map[bool]float64: + fastpathTV.EncMapBoolFloat64V(v, e) + case *map[bool]float64: + fastpathTV.EncMapBoolFloat64V(*v, e) + + case map[bool]bool: + fastpathTV.EncMapBoolBoolV(v, e) + case *map[bool]bool: + fastpathTV.EncMapBoolBoolV(*v, e) + + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +// -- -- fast path functions + +func (e *Encoder) fastpathEncSliceIntfR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceIntfV(rv2i(rv).([]interface{}), e) + } else { + fastpathTV.EncSliceIntfV(rv2i(rv).([]interface{}), e) + } +} +func (_ fastpathT) EncSliceIntfV(v []interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + e.encode(v2) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceIntfV(v []interface{}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + e.encode(v2) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceStringR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceStringV(rv2i(rv).([]string), e) + } else { + fastpathTV.EncSliceStringV(rv2i(rv).([]string), e) + } +} +func (_ fastpathT) EncSliceStringV(v []string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeString(c_UTF8, v2) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceStringV(v []string, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeString(c_UTF8, v2) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceFloat32R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceFloat32V(rv2i(rv).([]float32), e) + } else { + fastpathTV.EncSliceFloat32V(rv2i(rv).([]float32), e) + } +} +func (_ fastpathT) EncSliceFloat32V(v []float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeFloat32(v2) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceFloat32V(v []float32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeFloat32(v2) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceFloat64R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceFloat64V(rv2i(rv).([]float64), e) + } else { + fastpathTV.EncSliceFloat64V(rv2i(rv).([]float64), e) + } +} +func (_ fastpathT) EncSliceFloat64V(v []float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeFloat64(v2) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceFloat64V(v []float64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeFloat64(v2) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUintR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUintV(rv2i(rv).([]uint), e) + } else { + fastpathTV.EncSliceUintV(rv2i(rv).([]uint), e) + } +} +func (_ fastpathT) EncSliceUintV(v []uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceUintV(v []uint, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUint16R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint16V(rv2i(rv).([]uint16), e) + } else { + fastpathTV.EncSliceUint16V(rv2i(rv).([]uint16), e) + } +} +func (_ fastpathT) EncSliceUint16V(v []uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceUint16V(v []uint16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUint32R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint32V(rv2i(rv).([]uint32), e) + } else { + fastpathTV.EncSliceUint32V(rv2i(rv).([]uint32), e) + } +} +func (_ fastpathT) EncSliceUint32V(v []uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceUint32V(v []uint32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUint64R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint64V(rv2i(rv).([]uint64), e) + } else { + fastpathTV.EncSliceUint64V(rv2i(rv).([]uint64), e) + } +} +func (_ fastpathT) EncSliceUint64V(v []uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceUint64V(v []uint64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeUint(uint64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUintptrR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUintptrV(rv2i(rv).([]uintptr), e) + } else { + fastpathTV.EncSliceUintptrV(rv2i(rv).([]uintptr), e) + } +} +func (_ fastpathT) EncSliceUintptrV(v []uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + e.encode(v2) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceUintptrV(v []uintptr, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + e.encode(v2) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceIntR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceIntV(rv2i(rv).([]int), e) + } else { + fastpathTV.EncSliceIntV(rv2i(rv).([]int), e) + } +} +func (_ fastpathT) EncSliceIntV(v []int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeInt(int64(v2)) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceIntV(v []int, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeInt(int64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceInt8R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt8V(rv2i(rv).([]int8), e) + } else { + fastpathTV.EncSliceInt8V(rv2i(rv).([]int8), e) + } +} +func (_ fastpathT) EncSliceInt8V(v []int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeInt(int64(v2)) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceInt8V(v []int8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeInt(int64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceInt16R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt16V(rv2i(rv).([]int16), e) + } else { + fastpathTV.EncSliceInt16V(rv2i(rv).([]int16), e) + } +} +func (_ fastpathT) EncSliceInt16V(v []int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeInt(int64(v2)) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceInt16V(v []int16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeInt(int64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceInt32R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt32V(rv2i(rv).([]int32), e) + } else { + fastpathTV.EncSliceInt32V(rv2i(rv).([]int32), e) + } +} +func (_ fastpathT) EncSliceInt32V(v []int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeInt(int64(v2)) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceInt32V(v []int32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeInt(int64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceInt64R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt64V(rv2i(rv).([]int64), e) + } else { + fastpathTV.EncSliceInt64V(rv2i(rv).([]int64), e) + } +} +func (_ fastpathT) EncSliceInt64V(v []int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeInt(int64(v2)) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceInt64V(v []int64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeInt(int64(v2)) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceBoolR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceBoolV(rv2i(rv).([]bool), e) + } else { + fastpathTV.EncSliceBoolV(rv2i(rv).([]bool), e) + } +} +func (_ fastpathT) EncSliceBoolV(v []bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { + ee.WriteArrayElem() + } + ee.EncodeBool(v2) + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) EncAsMapSliceBoolV(v []bool, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + ee.EncodeBool(v2) + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfIntfV(rv2i(rv).(map[interface{}]interface{}), e) +} +func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfStringV(rv2i(rv).(map[interface{}]string), e) +} +func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUintV(rv2i(rv).(map[interface{}]uint), e) +} +func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUint8V(rv2i(rv).(map[interface{}]uint8), e) +} +func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUint16V(rv2i(rv).(map[interface{}]uint16), e) +} +func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUint32V(rv2i(rv).(map[interface{}]uint32), e) +} +func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUint64V(rv2i(rv).(map[interface{}]uint64), e) +} +func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUintptrV(rv2i(rv).(map[interface{}]uintptr), e) +} +func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfIntV(rv2i(rv).(map[interface{}]int), e) +} +func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfInt8V(rv2i(rv).(map[interface{}]int8), e) +} +func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfInt16V(rv2i(rv).(map[interface{}]int16), e) +} +func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfInt32V(rv2i(rv).(map[interface{}]int32), e) +} +func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfInt64V(rv2i(rv).(map[interface{}]int64), e) +} +func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfFloat32V(rv2i(rv).(map[interface{}]float32), e) +} +func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfFloat64V(rv2i(rv).(map[interface{}]float64), e) +} +func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfBoolV(rv2i(rv).(map[interface{}]bool), e) +} +func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.asis(v2[j].v) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[v2[j].i]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringIntfV(rv2i(rv).(map[string]interface{}), e) +} +func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + e.encode(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringStringV(rv2i(rv).(map[string]string), e) +} +func (_ fastpathT) EncMapStringStringV(v map[string]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[string(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUintV(rv2i(rv).(map[string]uint), e) +} +func (_ fastpathT) EncMapStringUintV(v map[string]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUint8V(rv2i(rv).(map[string]uint8), e) +} +func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUint16V(rv2i(rv).(map[string]uint16), e) +} +func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUint32V(rv2i(rv).(map[string]uint32), e) +} +func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUint64V(rv2i(rv).(map[string]uint64), e) +} +func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUintptrV(rv2i(rv).(map[string]uintptr), e) +} +func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + e.encode(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringIntV(rv2i(rv).(map[string]int), e) +} +func (_ fastpathT) EncMapStringIntV(v map[string]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringInt8V(rv2i(rv).(map[string]int8), e) +} +func (_ fastpathT) EncMapStringInt8V(v map[string]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringInt16V(rv2i(rv).(map[string]int16), e) +} +func (_ fastpathT) EncMapStringInt16V(v map[string]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringInt32V(rv2i(rv).(map[string]int32), e) +} +func (_ fastpathT) EncMapStringInt32V(v map[string]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringInt64V(rv2i(rv).(map[string]int64), e) +} +func (_ fastpathT) EncMapStringInt64V(v map[string]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringFloat32V(rv2i(rv).(map[string]float32), e) +} +func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringFloat64V(rv2i(rv).(map[string]float64), e) +} +func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringBoolV(rv2i(rv).(map[string]bool), e) +} +func (_ fastpathT) EncMapStringBoolV(v map[string]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[string(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + } + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32IntfV(rv2i(rv).(map[float32]interface{}), e) +} +func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32StringV(rv2i(rv).(map[float32]string), e) +} +func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32UintV(rv2i(rv).(map[float32]uint), e) +} +func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Uint8V(rv2i(rv).(map[float32]uint8), e) +} +func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Uint16V(rv2i(rv).(map[float32]uint16), e) +} +func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Uint32V(rv2i(rv).(map[float32]uint32), e) +} +func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Uint64V(rv2i(rv).(map[float32]uint64), e) +} +func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32UintptrV(rv2i(rv).(map[float32]uintptr), e) +} +func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32IntV(rv2i(rv).(map[float32]int), e) +} +func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Int8V(rv2i(rv).(map[float32]int8), e) +} +func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Int16V(rv2i(rv).(map[float32]int16), e) +} +func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Int32V(rv2i(rv).(map[float32]int32), e) +} +func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Int64V(rv2i(rv).(map[float32]int64), e) +} +func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Float32V(rv2i(rv).(map[float32]float32), e) +} +func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Float64V(rv2i(rv).(map[float32]float64), e) +} +func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32BoolV(rv2i(rv).(map[float32]bool), e) +} +func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[float32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64IntfV(rv2i(rv).(map[float64]interface{}), e) +} +func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64StringV(rv2i(rv).(map[float64]string), e) +} +func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64UintV(rv2i(rv).(map[float64]uint), e) +} +func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Uint8V(rv2i(rv).(map[float64]uint8), e) +} +func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Uint16V(rv2i(rv).(map[float64]uint16), e) +} +func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Uint32V(rv2i(rv).(map[float64]uint32), e) +} +func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Uint64V(rv2i(rv).(map[float64]uint64), e) +} +func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64UintptrV(rv2i(rv).(map[float64]uintptr), e) +} +func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64IntV(rv2i(rv).(map[float64]int), e) +} +func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Int8V(rv2i(rv).(map[float64]int8), e) +} +func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Int16V(rv2i(rv).(map[float64]int16), e) +} +func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Int32V(rv2i(rv).(map[float64]int32), e) +} +func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Int64V(rv2i(rv).(map[float64]int64), e) +} +func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Float32V(rv2i(rv).(map[float64]float32), e) +} +func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Float64V(rv2i(rv).(map[float64]float64), e) +} +func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64BoolV(rv2i(rv).(map[float64]bool), e) +} +func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(float64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[float64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintIntfV(rv2i(rv).(map[uint]interface{}), e) +} +func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintStringV(rv2i(rv).(map[uint]string), e) +} +func (_ fastpathT) EncMapUintStringV(v map[uint]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUintV(rv2i(rv).(map[uint]uint), e) +} +func (_ fastpathT) EncMapUintUintV(v map[uint]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUint8V(rv2i(rv).(map[uint]uint8), e) +} +func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUint16V(rv2i(rv).(map[uint]uint16), e) +} +func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUint32V(rv2i(rv).(map[uint]uint32), e) +} +func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUint64V(rv2i(rv).(map[uint]uint64), e) +} +func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUintptrV(rv2i(rv).(map[uint]uintptr), e) +} +func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintIntV(rv2i(rv).(map[uint]int), e) +} +func (_ fastpathT) EncMapUintIntV(v map[uint]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintInt8V(rv2i(rv).(map[uint]int8), e) +} +func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintInt16V(rv2i(rv).(map[uint]int16), e) +} +func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintInt32V(rv2i(rv).(map[uint]int32), e) +} +func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintInt64V(rv2i(rv).(map[uint]int64), e) +} +func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintFloat32V(rv2i(rv).(map[uint]float32), e) +} +func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintFloat64V(rv2i(rv).(map[uint]float64), e) +} +func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintBoolV(rv2i(rv).(map[uint]bool), e) +} +func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[uint(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), e) +} +func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8StringV(rv2i(rv).(map[uint8]string), e) +} +func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8UintV(rv2i(rv).(map[uint8]uint), e) +} +func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), e) +} +func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Uint16V(rv2i(rv).(map[uint8]uint16), e) +} +func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Uint32V(rv2i(rv).(map[uint8]uint32), e) +} +func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), e) +} +func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8UintptrV(rv2i(rv).(map[uint8]uintptr), e) +} +func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8IntV(rv2i(rv).(map[uint8]int), e) +} +func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Int8V(rv2i(rv).(map[uint8]int8), e) +} +func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Int16V(rv2i(rv).(map[uint8]int16), e) +} +func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Int32V(rv2i(rv).(map[uint8]int32), e) +} +func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Int64V(rv2i(rv).(map[uint8]int64), e) +} +func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Float32V(rv2i(rv).(map[uint8]float32), e) +} +func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Float64V(rv2i(rv).(map[uint8]float64), e) +} +func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8BoolV(rv2i(rv).(map[uint8]bool), e) +} +func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[uint8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16IntfV(rv2i(rv).(map[uint16]interface{}), e) +} +func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16StringV(rv2i(rv).(map[uint16]string), e) +} +func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16UintV(rv2i(rv).(map[uint16]uint), e) +} +func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Uint8V(rv2i(rv).(map[uint16]uint8), e) +} +func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Uint16V(rv2i(rv).(map[uint16]uint16), e) +} +func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Uint32V(rv2i(rv).(map[uint16]uint32), e) +} +func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Uint64V(rv2i(rv).(map[uint16]uint64), e) +} +func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16UintptrV(rv2i(rv).(map[uint16]uintptr), e) +} +func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16IntV(rv2i(rv).(map[uint16]int), e) +} +func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Int8V(rv2i(rv).(map[uint16]int8), e) +} +func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Int16V(rv2i(rv).(map[uint16]int16), e) +} +func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Int32V(rv2i(rv).(map[uint16]int32), e) +} +func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Int64V(rv2i(rv).(map[uint16]int64), e) +} +func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Float32V(rv2i(rv).(map[uint16]float32), e) +} +func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Float64V(rv2i(rv).(map[uint16]float64), e) +} +func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16BoolV(rv2i(rv).(map[uint16]bool), e) +} +func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[uint16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32IntfV(rv2i(rv).(map[uint32]interface{}), e) +} +func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32StringV(rv2i(rv).(map[uint32]string), e) +} +func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32UintV(rv2i(rv).(map[uint32]uint), e) +} +func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Uint8V(rv2i(rv).(map[uint32]uint8), e) +} +func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Uint16V(rv2i(rv).(map[uint32]uint16), e) +} +func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Uint32V(rv2i(rv).(map[uint32]uint32), e) +} +func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Uint64V(rv2i(rv).(map[uint32]uint64), e) +} +func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32UintptrV(rv2i(rv).(map[uint32]uintptr), e) +} +func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32IntV(rv2i(rv).(map[uint32]int), e) +} +func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Int8V(rv2i(rv).(map[uint32]int8), e) +} +func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Int16V(rv2i(rv).(map[uint32]int16), e) +} +func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Int32V(rv2i(rv).(map[uint32]int32), e) +} +func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Int64V(rv2i(rv).(map[uint32]int64), e) +} +func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Float32V(rv2i(rv).(map[uint32]float32), e) +} +func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Float64V(rv2i(rv).(map[uint32]float64), e) +} +func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32BoolV(rv2i(rv).(map[uint32]bool), e) +} +func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[uint32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), e) +} +func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64StringV(rv2i(rv).(map[uint64]string), e) +} +func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64UintV(rv2i(rv).(map[uint64]uint), e) +} +func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), e) +} +func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Uint16V(rv2i(rv).(map[uint64]uint16), e) +} +func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Uint32V(rv2i(rv).(map[uint64]uint32), e) +} +func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), e) +} +func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64UintptrV(rv2i(rv).(map[uint64]uintptr), e) +} +func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64IntV(rv2i(rv).(map[uint64]int), e) +} +func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Int8V(rv2i(rv).(map[uint64]int8), e) +} +func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Int16V(rv2i(rv).(map[uint64]int16), e) +} +func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Int32V(rv2i(rv).(map[uint64]int32), e) +} +func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Int64V(rv2i(rv).(map[uint64]int64), e) +} +func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Float32V(rv2i(rv).(map[uint64]float32), e) +} +func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Float64V(rv2i(rv).(map[uint64]float64), e) +} +func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64BoolV(rv2i(rv).(map[uint64]bool), e) +} +func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(uint64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[uint64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeUint(uint64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrIntfV(rv2i(rv).(map[uintptr]interface{}), e) +} +func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrStringV(rv2i(rv).(map[uintptr]string), e) +} +func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUintV(rv2i(rv).(map[uintptr]uint), e) +} +func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUint8V(rv2i(rv).(map[uintptr]uint8), e) +} +func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUint16V(rv2i(rv).(map[uintptr]uint16), e) +} +func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUint32V(rv2i(rv).(map[uintptr]uint32), e) +} +func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUint64V(rv2i(rv).(map[uintptr]uint64), e) +} +func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUintptrV(rv2i(rv).(map[uintptr]uintptr), e) +} +func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrIntV(rv2i(rv).(map[uintptr]int), e) +} +func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrInt8V(rv2i(rv).(map[uintptr]int8), e) +} +func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrInt16V(rv2i(rv).(map[uintptr]int16), e) +} +func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrInt32V(rv2i(rv).(map[uintptr]int32), e) +} +func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrInt64V(rv2i(rv).(map[uintptr]int64), e) +} +func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrFloat32V(rv2i(rv).(map[uintptr]float32), e) +} +func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrFloat64V(rv2i(rv).(map[uintptr]float64), e) +} +func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrBoolV(rv2i(rv).(map[uintptr]bool), e) +} +func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + e.encode(uintptr(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[uintptr(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + e.encode(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntIntfV(rv2i(rv).(map[int]interface{}), e) +} +func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntStringV(rv2i(rv).(map[int]string), e) +} +func (_ fastpathT) EncMapIntStringV(v map[int]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[int(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUintV(rv2i(rv).(map[int]uint), e) +} +func (_ fastpathT) EncMapIntUintV(v map[int]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUint8V(rv2i(rv).(map[int]uint8), e) +} +func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUint16V(rv2i(rv).(map[int]uint16), e) +} +func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUint32V(rv2i(rv).(map[int]uint32), e) +} +func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUint64V(rv2i(rv).(map[int]uint64), e) +} +func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUintptrV(rv2i(rv).(map[int]uintptr), e) +} +func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntIntV(rv2i(rv).(map[int]int), e) +} +func (_ fastpathT) EncMapIntIntV(v map[int]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntInt8V(rv2i(rv).(map[int]int8), e) +} +func (_ fastpathT) EncMapIntInt8V(v map[int]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntInt16V(rv2i(rv).(map[int]int16), e) +} +func (_ fastpathT) EncMapIntInt16V(v map[int]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntInt32V(rv2i(rv).(map[int]int32), e) +} +func (_ fastpathT) EncMapIntInt32V(v map[int]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntInt64V(rv2i(rv).(map[int]int64), e) +} +func (_ fastpathT) EncMapIntInt64V(v map[int]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntFloat32V(rv2i(rv).(map[int]float32), e) +} +func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntFloat64V(rv2i(rv).(map[int]float64), e) +} +func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntBoolV(rv2i(rv).(map[int]bool), e) +} +func (_ fastpathT) EncMapIntBoolV(v map[int]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[int(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8IntfV(rv2i(rv).(map[int8]interface{}), e) +} +func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8StringV(rv2i(rv).(map[int8]string), e) +} +func (_ fastpathT) EncMapInt8StringV(v map[int8]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8UintV(rv2i(rv).(map[int8]uint), e) +} +func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Uint8V(rv2i(rv).(map[int8]uint8), e) +} +func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Uint16V(rv2i(rv).(map[int8]uint16), e) +} +func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Uint32V(rv2i(rv).(map[int8]uint32), e) +} +func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Uint64V(rv2i(rv).(map[int8]uint64), e) +} +func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8UintptrV(rv2i(rv).(map[int8]uintptr), e) +} +func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8IntV(rv2i(rv).(map[int8]int), e) +} +func (_ fastpathT) EncMapInt8IntV(v map[int8]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Int8V(rv2i(rv).(map[int8]int8), e) +} +func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Int16V(rv2i(rv).(map[int8]int16), e) +} +func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Int32V(rv2i(rv).(map[int8]int32), e) +} +func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Int64V(rv2i(rv).(map[int8]int64), e) +} +func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Float32V(rv2i(rv).(map[int8]float32), e) +} +func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Float64V(rv2i(rv).(map[int8]float64), e) +} +func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8BoolV(rv2i(rv).(map[int8]bool), e) +} +func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int8(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[int8(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16IntfV(rv2i(rv).(map[int16]interface{}), e) +} +func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16StringV(rv2i(rv).(map[int16]string), e) +} +func (_ fastpathT) EncMapInt16StringV(v map[int16]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16UintV(rv2i(rv).(map[int16]uint), e) +} +func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Uint8V(rv2i(rv).(map[int16]uint8), e) +} +func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Uint16V(rv2i(rv).(map[int16]uint16), e) +} +func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Uint32V(rv2i(rv).(map[int16]uint32), e) +} +func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Uint64V(rv2i(rv).(map[int16]uint64), e) +} +func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16UintptrV(rv2i(rv).(map[int16]uintptr), e) +} +func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16IntV(rv2i(rv).(map[int16]int), e) +} +func (_ fastpathT) EncMapInt16IntV(v map[int16]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Int8V(rv2i(rv).(map[int16]int8), e) +} +func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Int16V(rv2i(rv).(map[int16]int16), e) +} +func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Int32V(rv2i(rv).(map[int16]int32), e) +} +func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Int64V(rv2i(rv).(map[int16]int64), e) +} +func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Float32V(rv2i(rv).(map[int16]float32), e) +} +func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Float64V(rv2i(rv).(map[int16]float64), e) +} +func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16BoolV(rv2i(rv).(map[int16]bool), e) +} +func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int16(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[int16(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32IntfV(rv2i(rv).(map[int32]interface{}), e) +} +func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32StringV(rv2i(rv).(map[int32]string), e) +} +func (_ fastpathT) EncMapInt32StringV(v map[int32]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32UintV(rv2i(rv).(map[int32]uint), e) +} +func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Uint8V(rv2i(rv).(map[int32]uint8), e) +} +func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Uint16V(rv2i(rv).(map[int32]uint16), e) +} +func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Uint32V(rv2i(rv).(map[int32]uint32), e) +} +func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Uint64V(rv2i(rv).(map[int32]uint64), e) +} +func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32UintptrV(rv2i(rv).(map[int32]uintptr), e) +} +func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32IntV(rv2i(rv).(map[int32]int), e) +} +func (_ fastpathT) EncMapInt32IntV(v map[int32]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Int8V(rv2i(rv).(map[int32]int8), e) +} +func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Int16V(rv2i(rv).(map[int32]int16), e) +} +func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Int32V(rv2i(rv).(map[int32]int32), e) +} +func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Int64V(rv2i(rv).(map[int32]int64), e) +} +func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Float32V(rv2i(rv).(map[int32]float32), e) +} +func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Float64V(rv2i(rv).(map[int32]float64), e) +} +func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32BoolV(rv2i(rv).(map[int32]bool), e) +} +func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int32(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[int32(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64IntfV(rv2i(rv).(map[int64]interface{}), e) +} +func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64StringV(rv2i(rv).(map[int64]string), e) +} +func (_ fastpathT) EncMapInt64StringV(v map[int64]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64UintV(rv2i(rv).(map[int64]uint), e) +} +func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Uint8V(rv2i(rv).(map[int64]uint8), e) +} +func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Uint16V(rv2i(rv).(map[int64]uint16), e) +} +func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Uint32V(rv2i(rv).(map[int64]uint32), e) +} +func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Uint64V(rv2i(rv).(map[int64]uint64), e) +} +func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64UintptrV(rv2i(rv).(map[int64]uintptr), e) +} +func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64IntV(rv2i(rv).(map[int64]int), e) +} +func (_ fastpathT) EncMapInt64IntV(v map[int64]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Int8V(rv2i(rv).(map[int64]int8), e) +} +func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Int16V(rv2i(rv).(map[int64]int16), e) +} +func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Int32V(rv2i(rv).(map[int64]int32), e) +} +func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Int64V(rv2i(rv).(map[int64]int64), e) +} +func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Float32V(rv2i(rv).(map[int64]float32), e) +} +func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Float64V(rv2i(rv).(map[int64]float64), e) +} +func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64BoolV(rv2i(rv).(map[int64]bool), e) +} +func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(int64(k2))) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[int64(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeInt(int64(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolIntfV(rv2i(rv).(map[bool]interface{}), e) +} +func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolStringV(rv2i(rv).(map[bool]string), e) +} +func (_ fastpathT) EncMapBoolStringV(v map[bool]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeString(c_UTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUintV(rv2i(rv).(map[bool]uint), e) +} +func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUint8V(rv2i(rv).(map[bool]uint8), e) +} +func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUint16V(rv2i(rv).(map[bool]uint16), e) +} +func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUint32V(rv2i(rv).(map[bool]uint32), e) +} +func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUint64V(rv2i(rv).(map[bool]uint64), e) +} +func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUintptrV(rv2i(rv).(map[bool]uintptr), e) +} +func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + e.encode(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolIntV(rv2i(rv).(map[bool]int), e) +} +func (_ fastpathT) EncMapBoolIntV(v map[bool]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolInt8V(rv2i(rv).(map[bool]int8), e) +} +func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolInt16V(rv2i(rv).(map[bool]int16), e) +} +func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolInt32V(rv2i(rv).(map[bool]int32), e) +} +func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolInt64V(rv2i(rv).(map[bool]int64), e) +} +func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolFloat32V(rv2i(rv).(map[bool]float32), e) +} +func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolFloat64V(rv2i(rv).(map[bool]float64), e) +} +func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolBoolV(rv2i(rv).(map[bool]bool), e) +} +func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + for _, k2 := range v2 { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(bool(k2)) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v[bool(k2)]) + } + } else { + for k2, v2 := range v { + if esep { + ee.WriteMapElemKey() + } + ee.EncodeBool(k2) + if esep { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +// -- decode + +// -- -- fast path type switch +func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { + switch v := iv.(type) { + + case []interface{}: + fastpathTV.DecSliceIntfV(v, false, d) + case *[]interface{}: + if v2, changed2 := fastpathTV.DecSliceIntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]interface{}: + fastpathTV.DecMapIntfIntfV(v, false, d) + case *map[interface{}]interface{}: + if v2, changed2 := fastpathTV.DecMapIntfIntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]string: + fastpathTV.DecMapIntfStringV(v, false, d) + case *map[interface{}]string: + if v2, changed2 := fastpathTV.DecMapIntfStringV(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]uint: + fastpathTV.DecMapIntfUintV(v, false, d) + case *map[interface{}]uint: + if v2, changed2 := fastpathTV.DecMapIntfUintV(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]uint8: + fastpathTV.DecMapIntfUint8V(v, false, d) + case *map[interface{}]uint8: + if v2, changed2 := fastpathTV.DecMapIntfUint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]uint16: + fastpathTV.DecMapIntfUint16V(v, false, d) + case *map[interface{}]uint16: + if v2, changed2 := fastpathTV.DecMapIntfUint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]uint32: + fastpathTV.DecMapIntfUint32V(v, false, d) + case *map[interface{}]uint32: + if v2, changed2 := fastpathTV.DecMapIntfUint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]uint64: + fastpathTV.DecMapIntfUint64V(v, false, d) + case *map[interface{}]uint64: + if v2, changed2 := fastpathTV.DecMapIntfUint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]uintptr: + fastpathTV.DecMapIntfUintptrV(v, false, d) + case *map[interface{}]uintptr: + if v2, changed2 := fastpathTV.DecMapIntfUintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]int: + fastpathTV.DecMapIntfIntV(v, false, d) + case *map[interface{}]int: + if v2, changed2 := fastpathTV.DecMapIntfIntV(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]int8: + fastpathTV.DecMapIntfInt8V(v, false, d) + case *map[interface{}]int8: + if v2, changed2 := fastpathTV.DecMapIntfInt8V(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]int16: + fastpathTV.DecMapIntfInt16V(v, false, d) + case *map[interface{}]int16: + if v2, changed2 := fastpathTV.DecMapIntfInt16V(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]int32: + fastpathTV.DecMapIntfInt32V(v, false, d) + case *map[interface{}]int32: + if v2, changed2 := fastpathTV.DecMapIntfInt32V(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]int64: + fastpathTV.DecMapIntfInt64V(v, false, d) + case *map[interface{}]int64: + if v2, changed2 := fastpathTV.DecMapIntfInt64V(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]float32: + fastpathTV.DecMapIntfFloat32V(v, false, d) + case *map[interface{}]float32: + if v2, changed2 := fastpathTV.DecMapIntfFloat32V(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]float64: + fastpathTV.DecMapIntfFloat64V(v, false, d) + case *map[interface{}]float64: + if v2, changed2 := fastpathTV.DecMapIntfFloat64V(*v, true, d); changed2 { + *v = v2 + } + + case map[interface{}]bool: + fastpathTV.DecMapIntfBoolV(v, false, d) + case *map[interface{}]bool: + if v2, changed2 := fastpathTV.DecMapIntfBoolV(*v, true, d); changed2 { + *v = v2 + } + + case []string: + fastpathTV.DecSliceStringV(v, false, d) + case *[]string: + if v2, changed2 := fastpathTV.DecSliceStringV(*v, true, d); changed2 { + *v = v2 + } + + case map[string]interface{}: + fastpathTV.DecMapStringIntfV(v, false, d) + case *map[string]interface{}: + if v2, changed2 := fastpathTV.DecMapStringIntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[string]string: + fastpathTV.DecMapStringStringV(v, false, d) + case *map[string]string: + if v2, changed2 := fastpathTV.DecMapStringStringV(*v, true, d); changed2 { + *v = v2 + } + + case map[string]uint: + fastpathTV.DecMapStringUintV(v, false, d) + case *map[string]uint: + if v2, changed2 := fastpathTV.DecMapStringUintV(*v, true, d); changed2 { + *v = v2 + } + + case map[string]uint8: + fastpathTV.DecMapStringUint8V(v, false, d) + case *map[string]uint8: + if v2, changed2 := fastpathTV.DecMapStringUint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[string]uint16: + fastpathTV.DecMapStringUint16V(v, false, d) + case *map[string]uint16: + if v2, changed2 := fastpathTV.DecMapStringUint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[string]uint32: + fastpathTV.DecMapStringUint32V(v, false, d) + case *map[string]uint32: + if v2, changed2 := fastpathTV.DecMapStringUint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[string]uint64: + fastpathTV.DecMapStringUint64V(v, false, d) + case *map[string]uint64: + if v2, changed2 := fastpathTV.DecMapStringUint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[string]uintptr: + fastpathTV.DecMapStringUintptrV(v, false, d) + case *map[string]uintptr: + if v2, changed2 := fastpathTV.DecMapStringUintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[string]int: + fastpathTV.DecMapStringIntV(v, false, d) + case *map[string]int: + if v2, changed2 := fastpathTV.DecMapStringIntV(*v, true, d); changed2 { + *v = v2 + } + + case map[string]int8: + fastpathTV.DecMapStringInt8V(v, false, d) + case *map[string]int8: + if v2, changed2 := fastpathTV.DecMapStringInt8V(*v, true, d); changed2 { + *v = v2 + } + + case map[string]int16: + fastpathTV.DecMapStringInt16V(v, false, d) + case *map[string]int16: + if v2, changed2 := fastpathTV.DecMapStringInt16V(*v, true, d); changed2 { + *v = v2 + } + + case map[string]int32: + fastpathTV.DecMapStringInt32V(v, false, d) + case *map[string]int32: + if v2, changed2 := fastpathTV.DecMapStringInt32V(*v, true, d); changed2 { + *v = v2 + } + + case map[string]int64: + fastpathTV.DecMapStringInt64V(v, false, d) + case *map[string]int64: + if v2, changed2 := fastpathTV.DecMapStringInt64V(*v, true, d); changed2 { + *v = v2 + } + + case map[string]float32: + fastpathTV.DecMapStringFloat32V(v, false, d) + case *map[string]float32: + if v2, changed2 := fastpathTV.DecMapStringFloat32V(*v, true, d); changed2 { + *v = v2 + } + + case map[string]float64: + fastpathTV.DecMapStringFloat64V(v, false, d) + case *map[string]float64: + if v2, changed2 := fastpathTV.DecMapStringFloat64V(*v, true, d); changed2 { + *v = v2 + } + + case map[string]bool: + fastpathTV.DecMapStringBoolV(v, false, d) + case *map[string]bool: + if v2, changed2 := fastpathTV.DecMapStringBoolV(*v, true, d); changed2 { + *v = v2 + } + + case []float32: + fastpathTV.DecSliceFloat32V(v, false, d) + case *[]float32: + if v2, changed2 := fastpathTV.DecSliceFloat32V(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]interface{}: + fastpathTV.DecMapFloat32IntfV(v, false, d) + case *map[float32]interface{}: + if v2, changed2 := fastpathTV.DecMapFloat32IntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]string: + fastpathTV.DecMapFloat32StringV(v, false, d) + case *map[float32]string: + if v2, changed2 := fastpathTV.DecMapFloat32StringV(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]uint: + fastpathTV.DecMapFloat32UintV(v, false, d) + case *map[float32]uint: + if v2, changed2 := fastpathTV.DecMapFloat32UintV(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]uint8: + fastpathTV.DecMapFloat32Uint8V(v, false, d) + case *map[float32]uint8: + if v2, changed2 := fastpathTV.DecMapFloat32Uint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]uint16: + fastpathTV.DecMapFloat32Uint16V(v, false, d) + case *map[float32]uint16: + if v2, changed2 := fastpathTV.DecMapFloat32Uint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]uint32: + fastpathTV.DecMapFloat32Uint32V(v, false, d) + case *map[float32]uint32: + if v2, changed2 := fastpathTV.DecMapFloat32Uint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]uint64: + fastpathTV.DecMapFloat32Uint64V(v, false, d) + case *map[float32]uint64: + if v2, changed2 := fastpathTV.DecMapFloat32Uint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]uintptr: + fastpathTV.DecMapFloat32UintptrV(v, false, d) + case *map[float32]uintptr: + if v2, changed2 := fastpathTV.DecMapFloat32UintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]int: + fastpathTV.DecMapFloat32IntV(v, false, d) + case *map[float32]int: + if v2, changed2 := fastpathTV.DecMapFloat32IntV(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]int8: + fastpathTV.DecMapFloat32Int8V(v, false, d) + case *map[float32]int8: + if v2, changed2 := fastpathTV.DecMapFloat32Int8V(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]int16: + fastpathTV.DecMapFloat32Int16V(v, false, d) + case *map[float32]int16: + if v2, changed2 := fastpathTV.DecMapFloat32Int16V(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]int32: + fastpathTV.DecMapFloat32Int32V(v, false, d) + case *map[float32]int32: + if v2, changed2 := fastpathTV.DecMapFloat32Int32V(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]int64: + fastpathTV.DecMapFloat32Int64V(v, false, d) + case *map[float32]int64: + if v2, changed2 := fastpathTV.DecMapFloat32Int64V(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]float32: + fastpathTV.DecMapFloat32Float32V(v, false, d) + case *map[float32]float32: + if v2, changed2 := fastpathTV.DecMapFloat32Float32V(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]float64: + fastpathTV.DecMapFloat32Float64V(v, false, d) + case *map[float32]float64: + if v2, changed2 := fastpathTV.DecMapFloat32Float64V(*v, true, d); changed2 { + *v = v2 + } + + case map[float32]bool: + fastpathTV.DecMapFloat32BoolV(v, false, d) + case *map[float32]bool: + if v2, changed2 := fastpathTV.DecMapFloat32BoolV(*v, true, d); changed2 { + *v = v2 + } + + case []float64: + fastpathTV.DecSliceFloat64V(v, false, d) + case *[]float64: + if v2, changed2 := fastpathTV.DecSliceFloat64V(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]interface{}: + fastpathTV.DecMapFloat64IntfV(v, false, d) + case *map[float64]interface{}: + if v2, changed2 := fastpathTV.DecMapFloat64IntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]string: + fastpathTV.DecMapFloat64StringV(v, false, d) + case *map[float64]string: + if v2, changed2 := fastpathTV.DecMapFloat64StringV(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]uint: + fastpathTV.DecMapFloat64UintV(v, false, d) + case *map[float64]uint: + if v2, changed2 := fastpathTV.DecMapFloat64UintV(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]uint8: + fastpathTV.DecMapFloat64Uint8V(v, false, d) + case *map[float64]uint8: + if v2, changed2 := fastpathTV.DecMapFloat64Uint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]uint16: + fastpathTV.DecMapFloat64Uint16V(v, false, d) + case *map[float64]uint16: + if v2, changed2 := fastpathTV.DecMapFloat64Uint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]uint32: + fastpathTV.DecMapFloat64Uint32V(v, false, d) + case *map[float64]uint32: + if v2, changed2 := fastpathTV.DecMapFloat64Uint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]uint64: + fastpathTV.DecMapFloat64Uint64V(v, false, d) + case *map[float64]uint64: + if v2, changed2 := fastpathTV.DecMapFloat64Uint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]uintptr: + fastpathTV.DecMapFloat64UintptrV(v, false, d) + case *map[float64]uintptr: + if v2, changed2 := fastpathTV.DecMapFloat64UintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]int: + fastpathTV.DecMapFloat64IntV(v, false, d) + case *map[float64]int: + if v2, changed2 := fastpathTV.DecMapFloat64IntV(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]int8: + fastpathTV.DecMapFloat64Int8V(v, false, d) + case *map[float64]int8: + if v2, changed2 := fastpathTV.DecMapFloat64Int8V(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]int16: + fastpathTV.DecMapFloat64Int16V(v, false, d) + case *map[float64]int16: + if v2, changed2 := fastpathTV.DecMapFloat64Int16V(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]int32: + fastpathTV.DecMapFloat64Int32V(v, false, d) + case *map[float64]int32: + if v2, changed2 := fastpathTV.DecMapFloat64Int32V(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]int64: + fastpathTV.DecMapFloat64Int64V(v, false, d) + case *map[float64]int64: + if v2, changed2 := fastpathTV.DecMapFloat64Int64V(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]float32: + fastpathTV.DecMapFloat64Float32V(v, false, d) + case *map[float64]float32: + if v2, changed2 := fastpathTV.DecMapFloat64Float32V(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]float64: + fastpathTV.DecMapFloat64Float64V(v, false, d) + case *map[float64]float64: + if v2, changed2 := fastpathTV.DecMapFloat64Float64V(*v, true, d); changed2 { + *v = v2 + } + + case map[float64]bool: + fastpathTV.DecMapFloat64BoolV(v, false, d) + case *map[float64]bool: + if v2, changed2 := fastpathTV.DecMapFloat64BoolV(*v, true, d); changed2 { + *v = v2 + } + + case []uint: + fastpathTV.DecSliceUintV(v, false, d) + case *[]uint: + if v2, changed2 := fastpathTV.DecSliceUintV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]interface{}: + fastpathTV.DecMapUintIntfV(v, false, d) + case *map[uint]interface{}: + if v2, changed2 := fastpathTV.DecMapUintIntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]string: + fastpathTV.DecMapUintStringV(v, false, d) + case *map[uint]string: + if v2, changed2 := fastpathTV.DecMapUintStringV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]uint: + fastpathTV.DecMapUintUintV(v, false, d) + case *map[uint]uint: + if v2, changed2 := fastpathTV.DecMapUintUintV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]uint8: + fastpathTV.DecMapUintUint8V(v, false, d) + case *map[uint]uint8: + if v2, changed2 := fastpathTV.DecMapUintUint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]uint16: + fastpathTV.DecMapUintUint16V(v, false, d) + case *map[uint]uint16: + if v2, changed2 := fastpathTV.DecMapUintUint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]uint32: + fastpathTV.DecMapUintUint32V(v, false, d) + case *map[uint]uint32: + if v2, changed2 := fastpathTV.DecMapUintUint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]uint64: + fastpathTV.DecMapUintUint64V(v, false, d) + case *map[uint]uint64: + if v2, changed2 := fastpathTV.DecMapUintUint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]uintptr: + fastpathTV.DecMapUintUintptrV(v, false, d) + case *map[uint]uintptr: + if v2, changed2 := fastpathTV.DecMapUintUintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]int: + fastpathTV.DecMapUintIntV(v, false, d) + case *map[uint]int: + if v2, changed2 := fastpathTV.DecMapUintIntV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]int8: + fastpathTV.DecMapUintInt8V(v, false, d) + case *map[uint]int8: + if v2, changed2 := fastpathTV.DecMapUintInt8V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]int16: + fastpathTV.DecMapUintInt16V(v, false, d) + case *map[uint]int16: + if v2, changed2 := fastpathTV.DecMapUintInt16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]int32: + fastpathTV.DecMapUintInt32V(v, false, d) + case *map[uint]int32: + if v2, changed2 := fastpathTV.DecMapUintInt32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]int64: + fastpathTV.DecMapUintInt64V(v, false, d) + case *map[uint]int64: + if v2, changed2 := fastpathTV.DecMapUintInt64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]float32: + fastpathTV.DecMapUintFloat32V(v, false, d) + case *map[uint]float32: + if v2, changed2 := fastpathTV.DecMapUintFloat32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]float64: + fastpathTV.DecMapUintFloat64V(v, false, d) + case *map[uint]float64: + if v2, changed2 := fastpathTV.DecMapUintFloat64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint]bool: + fastpathTV.DecMapUintBoolV(v, false, d) + case *map[uint]bool: + if v2, changed2 := fastpathTV.DecMapUintBoolV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]interface{}: + fastpathTV.DecMapUint8IntfV(v, false, d) + case *map[uint8]interface{}: + if v2, changed2 := fastpathTV.DecMapUint8IntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]string: + fastpathTV.DecMapUint8StringV(v, false, d) + case *map[uint8]string: + if v2, changed2 := fastpathTV.DecMapUint8StringV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]uint: + fastpathTV.DecMapUint8UintV(v, false, d) + case *map[uint8]uint: + if v2, changed2 := fastpathTV.DecMapUint8UintV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]uint8: + fastpathTV.DecMapUint8Uint8V(v, false, d) + case *map[uint8]uint8: + if v2, changed2 := fastpathTV.DecMapUint8Uint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]uint16: + fastpathTV.DecMapUint8Uint16V(v, false, d) + case *map[uint8]uint16: + if v2, changed2 := fastpathTV.DecMapUint8Uint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]uint32: + fastpathTV.DecMapUint8Uint32V(v, false, d) + case *map[uint8]uint32: + if v2, changed2 := fastpathTV.DecMapUint8Uint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]uint64: + fastpathTV.DecMapUint8Uint64V(v, false, d) + case *map[uint8]uint64: + if v2, changed2 := fastpathTV.DecMapUint8Uint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]uintptr: + fastpathTV.DecMapUint8UintptrV(v, false, d) + case *map[uint8]uintptr: + if v2, changed2 := fastpathTV.DecMapUint8UintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]int: + fastpathTV.DecMapUint8IntV(v, false, d) + case *map[uint8]int: + if v2, changed2 := fastpathTV.DecMapUint8IntV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]int8: + fastpathTV.DecMapUint8Int8V(v, false, d) + case *map[uint8]int8: + if v2, changed2 := fastpathTV.DecMapUint8Int8V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]int16: + fastpathTV.DecMapUint8Int16V(v, false, d) + case *map[uint8]int16: + if v2, changed2 := fastpathTV.DecMapUint8Int16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]int32: + fastpathTV.DecMapUint8Int32V(v, false, d) + case *map[uint8]int32: + if v2, changed2 := fastpathTV.DecMapUint8Int32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]int64: + fastpathTV.DecMapUint8Int64V(v, false, d) + case *map[uint8]int64: + if v2, changed2 := fastpathTV.DecMapUint8Int64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]float32: + fastpathTV.DecMapUint8Float32V(v, false, d) + case *map[uint8]float32: + if v2, changed2 := fastpathTV.DecMapUint8Float32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]float64: + fastpathTV.DecMapUint8Float64V(v, false, d) + case *map[uint8]float64: + if v2, changed2 := fastpathTV.DecMapUint8Float64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint8]bool: + fastpathTV.DecMapUint8BoolV(v, false, d) + case *map[uint8]bool: + if v2, changed2 := fastpathTV.DecMapUint8BoolV(*v, true, d); changed2 { + *v = v2 + } + + case []uint16: + fastpathTV.DecSliceUint16V(v, false, d) + case *[]uint16: + if v2, changed2 := fastpathTV.DecSliceUint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]interface{}: + fastpathTV.DecMapUint16IntfV(v, false, d) + case *map[uint16]interface{}: + if v2, changed2 := fastpathTV.DecMapUint16IntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]string: + fastpathTV.DecMapUint16StringV(v, false, d) + case *map[uint16]string: + if v2, changed2 := fastpathTV.DecMapUint16StringV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]uint: + fastpathTV.DecMapUint16UintV(v, false, d) + case *map[uint16]uint: + if v2, changed2 := fastpathTV.DecMapUint16UintV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]uint8: + fastpathTV.DecMapUint16Uint8V(v, false, d) + case *map[uint16]uint8: + if v2, changed2 := fastpathTV.DecMapUint16Uint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]uint16: + fastpathTV.DecMapUint16Uint16V(v, false, d) + case *map[uint16]uint16: + if v2, changed2 := fastpathTV.DecMapUint16Uint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]uint32: + fastpathTV.DecMapUint16Uint32V(v, false, d) + case *map[uint16]uint32: + if v2, changed2 := fastpathTV.DecMapUint16Uint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]uint64: + fastpathTV.DecMapUint16Uint64V(v, false, d) + case *map[uint16]uint64: + if v2, changed2 := fastpathTV.DecMapUint16Uint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]uintptr: + fastpathTV.DecMapUint16UintptrV(v, false, d) + case *map[uint16]uintptr: + if v2, changed2 := fastpathTV.DecMapUint16UintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]int: + fastpathTV.DecMapUint16IntV(v, false, d) + case *map[uint16]int: + if v2, changed2 := fastpathTV.DecMapUint16IntV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]int8: + fastpathTV.DecMapUint16Int8V(v, false, d) + case *map[uint16]int8: + if v2, changed2 := fastpathTV.DecMapUint16Int8V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]int16: + fastpathTV.DecMapUint16Int16V(v, false, d) + case *map[uint16]int16: + if v2, changed2 := fastpathTV.DecMapUint16Int16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]int32: + fastpathTV.DecMapUint16Int32V(v, false, d) + case *map[uint16]int32: + if v2, changed2 := fastpathTV.DecMapUint16Int32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]int64: + fastpathTV.DecMapUint16Int64V(v, false, d) + case *map[uint16]int64: + if v2, changed2 := fastpathTV.DecMapUint16Int64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]float32: + fastpathTV.DecMapUint16Float32V(v, false, d) + case *map[uint16]float32: + if v2, changed2 := fastpathTV.DecMapUint16Float32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]float64: + fastpathTV.DecMapUint16Float64V(v, false, d) + case *map[uint16]float64: + if v2, changed2 := fastpathTV.DecMapUint16Float64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint16]bool: + fastpathTV.DecMapUint16BoolV(v, false, d) + case *map[uint16]bool: + if v2, changed2 := fastpathTV.DecMapUint16BoolV(*v, true, d); changed2 { + *v = v2 + } + + case []uint32: + fastpathTV.DecSliceUint32V(v, false, d) + case *[]uint32: + if v2, changed2 := fastpathTV.DecSliceUint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]interface{}: + fastpathTV.DecMapUint32IntfV(v, false, d) + case *map[uint32]interface{}: + if v2, changed2 := fastpathTV.DecMapUint32IntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]string: + fastpathTV.DecMapUint32StringV(v, false, d) + case *map[uint32]string: + if v2, changed2 := fastpathTV.DecMapUint32StringV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]uint: + fastpathTV.DecMapUint32UintV(v, false, d) + case *map[uint32]uint: + if v2, changed2 := fastpathTV.DecMapUint32UintV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]uint8: + fastpathTV.DecMapUint32Uint8V(v, false, d) + case *map[uint32]uint8: + if v2, changed2 := fastpathTV.DecMapUint32Uint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]uint16: + fastpathTV.DecMapUint32Uint16V(v, false, d) + case *map[uint32]uint16: + if v2, changed2 := fastpathTV.DecMapUint32Uint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]uint32: + fastpathTV.DecMapUint32Uint32V(v, false, d) + case *map[uint32]uint32: + if v2, changed2 := fastpathTV.DecMapUint32Uint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]uint64: + fastpathTV.DecMapUint32Uint64V(v, false, d) + case *map[uint32]uint64: + if v2, changed2 := fastpathTV.DecMapUint32Uint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]uintptr: + fastpathTV.DecMapUint32UintptrV(v, false, d) + case *map[uint32]uintptr: + if v2, changed2 := fastpathTV.DecMapUint32UintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]int: + fastpathTV.DecMapUint32IntV(v, false, d) + case *map[uint32]int: + if v2, changed2 := fastpathTV.DecMapUint32IntV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]int8: + fastpathTV.DecMapUint32Int8V(v, false, d) + case *map[uint32]int8: + if v2, changed2 := fastpathTV.DecMapUint32Int8V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]int16: + fastpathTV.DecMapUint32Int16V(v, false, d) + case *map[uint32]int16: + if v2, changed2 := fastpathTV.DecMapUint32Int16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]int32: + fastpathTV.DecMapUint32Int32V(v, false, d) + case *map[uint32]int32: + if v2, changed2 := fastpathTV.DecMapUint32Int32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]int64: + fastpathTV.DecMapUint32Int64V(v, false, d) + case *map[uint32]int64: + if v2, changed2 := fastpathTV.DecMapUint32Int64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]float32: + fastpathTV.DecMapUint32Float32V(v, false, d) + case *map[uint32]float32: + if v2, changed2 := fastpathTV.DecMapUint32Float32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]float64: + fastpathTV.DecMapUint32Float64V(v, false, d) + case *map[uint32]float64: + if v2, changed2 := fastpathTV.DecMapUint32Float64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint32]bool: + fastpathTV.DecMapUint32BoolV(v, false, d) + case *map[uint32]bool: + if v2, changed2 := fastpathTV.DecMapUint32BoolV(*v, true, d); changed2 { + *v = v2 + } + + case []uint64: + fastpathTV.DecSliceUint64V(v, false, d) + case *[]uint64: + if v2, changed2 := fastpathTV.DecSliceUint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]interface{}: + fastpathTV.DecMapUint64IntfV(v, false, d) + case *map[uint64]interface{}: + if v2, changed2 := fastpathTV.DecMapUint64IntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]string: + fastpathTV.DecMapUint64StringV(v, false, d) + case *map[uint64]string: + if v2, changed2 := fastpathTV.DecMapUint64StringV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]uint: + fastpathTV.DecMapUint64UintV(v, false, d) + case *map[uint64]uint: + if v2, changed2 := fastpathTV.DecMapUint64UintV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]uint8: + fastpathTV.DecMapUint64Uint8V(v, false, d) + case *map[uint64]uint8: + if v2, changed2 := fastpathTV.DecMapUint64Uint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]uint16: + fastpathTV.DecMapUint64Uint16V(v, false, d) + case *map[uint64]uint16: + if v2, changed2 := fastpathTV.DecMapUint64Uint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]uint32: + fastpathTV.DecMapUint64Uint32V(v, false, d) + case *map[uint64]uint32: + if v2, changed2 := fastpathTV.DecMapUint64Uint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]uint64: + fastpathTV.DecMapUint64Uint64V(v, false, d) + case *map[uint64]uint64: + if v2, changed2 := fastpathTV.DecMapUint64Uint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]uintptr: + fastpathTV.DecMapUint64UintptrV(v, false, d) + case *map[uint64]uintptr: + if v2, changed2 := fastpathTV.DecMapUint64UintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]int: + fastpathTV.DecMapUint64IntV(v, false, d) + case *map[uint64]int: + if v2, changed2 := fastpathTV.DecMapUint64IntV(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]int8: + fastpathTV.DecMapUint64Int8V(v, false, d) + case *map[uint64]int8: + if v2, changed2 := fastpathTV.DecMapUint64Int8V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]int16: + fastpathTV.DecMapUint64Int16V(v, false, d) + case *map[uint64]int16: + if v2, changed2 := fastpathTV.DecMapUint64Int16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]int32: + fastpathTV.DecMapUint64Int32V(v, false, d) + case *map[uint64]int32: + if v2, changed2 := fastpathTV.DecMapUint64Int32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]int64: + fastpathTV.DecMapUint64Int64V(v, false, d) + case *map[uint64]int64: + if v2, changed2 := fastpathTV.DecMapUint64Int64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]float32: + fastpathTV.DecMapUint64Float32V(v, false, d) + case *map[uint64]float32: + if v2, changed2 := fastpathTV.DecMapUint64Float32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]float64: + fastpathTV.DecMapUint64Float64V(v, false, d) + case *map[uint64]float64: + if v2, changed2 := fastpathTV.DecMapUint64Float64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uint64]bool: + fastpathTV.DecMapUint64BoolV(v, false, d) + case *map[uint64]bool: + if v2, changed2 := fastpathTV.DecMapUint64BoolV(*v, true, d); changed2 { + *v = v2 + } + + case []uintptr: + fastpathTV.DecSliceUintptrV(v, false, d) + case *[]uintptr: + if v2, changed2 := fastpathTV.DecSliceUintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]interface{}: + fastpathTV.DecMapUintptrIntfV(v, false, d) + case *map[uintptr]interface{}: + if v2, changed2 := fastpathTV.DecMapUintptrIntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]string: + fastpathTV.DecMapUintptrStringV(v, false, d) + case *map[uintptr]string: + if v2, changed2 := fastpathTV.DecMapUintptrStringV(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]uint: + fastpathTV.DecMapUintptrUintV(v, false, d) + case *map[uintptr]uint: + if v2, changed2 := fastpathTV.DecMapUintptrUintV(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]uint8: + fastpathTV.DecMapUintptrUint8V(v, false, d) + case *map[uintptr]uint8: + if v2, changed2 := fastpathTV.DecMapUintptrUint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]uint16: + fastpathTV.DecMapUintptrUint16V(v, false, d) + case *map[uintptr]uint16: + if v2, changed2 := fastpathTV.DecMapUintptrUint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]uint32: + fastpathTV.DecMapUintptrUint32V(v, false, d) + case *map[uintptr]uint32: + if v2, changed2 := fastpathTV.DecMapUintptrUint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]uint64: + fastpathTV.DecMapUintptrUint64V(v, false, d) + case *map[uintptr]uint64: + if v2, changed2 := fastpathTV.DecMapUintptrUint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]uintptr: + fastpathTV.DecMapUintptrUintptrV(v, false, d) + case *map[uintptr]uintptr: + if v2, changed2 := fastpathTV.DecMapUintptrUintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]int: + fastpathTV.DecMapUintptrIntV(v, false, d) + case *map[uintptr]int: + if v2, changed2 := fastpathTV.DecMapUintptrIntV(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]int8: + fastpathTV.DecMapUintptrInt8V(v, false, d) + case *map[uintptr]int8: + if v2, changed2 := fastpathTV.DecMapUintptrInt8V(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]int16: + fastpathTV.DecMapUintptrInt16V(v, false, d) + case *map[uintptr]int16: + if v2, changed2 := fastpathTV.DecMapUintptrInt16V(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]int32: + fastpathTV.DecMapUintptrInt32V(v, false, d) + case *map[uintptr]int32: + if v2, changed2 := fastpathTV.DecMapUintptrInt32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]int64: + fastpathTV.DecMapUintptrInt64V(v, false, d) + case *map[uintptr]int64: + if v2, changed2 := fastpathTV.DecMapUintptrInt64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]float32: + fastpathTV.DecMapUintptrFloat32V(v, false, d) + case *map[uintptr]float32: + if v2, changed2 := fastpathTV.DecMapUintptrFloat32V(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]float64: + fastpathTV.DecMapUintptrFloat64V(v, false, d) + case *map[uintptr]float64: + if v2, changed2 := fastpathTV.DecMapUintptrFloat64V(*v, true, d); changed2 { + *v = v2 + } + + case map[uintptr]bool: + fastpathTV.DecMapUintptrBoolV(v, false, d) + case *map[uintptr]bool: + if v2, changed2 := fastpathTV.DecMapUintptrBoolV(*v, true, d); changed2 { + *v = v2 + } + + case []int: + fastpathTV.DecSliceIntV(v, false, d) + case *[]int: + if v2, changed2 := fastpathTV.DecSliceIntV(*v, true, d); changed2 { + *v = v2 + } + + case map[int]interface{}: + fastpathTV.DecMapIntIntfV(v, false, d) + case *map[int]interface{}: + if v2, changed2 := fastpathTV.DecMapIntIntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[int]string: + fastpathTV.DecMapIntStringV(v, false, d) + case *map[int]string: + if v2, changed2 := fastpathTV.DecMapIntStringV(*v, true, d); changed2 { + *v = v2 + } + + case map[int]uint: + fastpathTV.DecMapIntUintV(v, false, d) + case *map[int]uint: + if v2, changed2 := fastpathTV.DecMapIntUintV(*v, true, d); changed2 { + *v = v2 + } + + case map[int]uint8: + fastpathTV.DecMapIntUint8V(v, false, d) + case *map[int]uint8: + if v2, changed2 := fastpathTV.DecMapIntUint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[int]uint16: + fastpathTV.DecMapIntUint16V(v, false, d) + case *map[int]uint16: + if v2, changed2 := fastpathTV.DecMapIntUint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[int]uint32: + fastpathTV.DecMapIntUint32V(v, false, d) + case *map[int]uint32: + if v2, changed2 := fastpathTV.DecMapIntUint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int]uint64: + fastpathTV.DecMapIntUint64V(v, false, d) + case *map[int]uint64: + if v2, changed2 := fastpathTV.DecMapIntUint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int]uintptr: + fastpathTV.DecMapIntUintptrV(v, false, d) + case *map[int]uintptr: + if v2, changed2 := fastpathTV.DecMapIntUintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[int]int: + fastpathTV.DecMapIntIntV(v, false, d) + case *map[int]int: + if v2, changed2 := fastpathTV.DecMapIntIntV(*v, true, d); changed2 { + *v = v2 + } + + case map[int]int8: + fastpathTV.DecMapIntInt8V(v, false, d) + case *map[int]int8: + if v2, changed2 := fastpathTV.DecMapIntInt8V(*v, true, d); changed2 { + *v = v2 + } + + case map[int]int16: + fastpathTV.DecMapIntInt16V(v, false, d) + case *map[int]int16: + if v2, changed2 := fastpathTV.DecMapIntInt16V(*v, true, d); changed2 { + *v = v2 + } + + case map[int]int32: + fastpathTV.DecMapIntInt32V(v, false, d) + case *map[int]int32: + if v2, changed2 := fastpathTV.DecMapIntInt32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int]int64: + fastpathTV.DecMapIntInt64V(v, false, d) + case *map[int]int64: + if v2, changed2 := fastpathTV.DecMapIntInt64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int]float32: + fastpathTV.DecMapIntFloat32V(v, false, d) + case *map[int]float32: + if v2, changed2 := fastpathTV.DecMapIntFloat32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int]float64: + fastpathTV.DecMapIntFloat64V(v, false, d) + case *map[int]float64: + if v2, changed2 := fastpathTV.DecMapIntFloat64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int]bool: + fastpathTV.DecMapIntBoolV(v, false, d) + case *map[int]bool: + if v2, changed2 := fastpathTV.DecMapIntBoolV(*v, true, d); changed2 { + *v = v2 + } + + case []int8: + fastpathTV.DecSliceInt8V(v, false, d) + case *[]int8: + if v2, changed2 := fastpathTV.DecSliceInt8V(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]interface{}: + fastpathTV.DecMapInt8IntfV(v, false, d) + case *map[int8]interface{}: + if v2, changed2 := fastpathTV.DecMapInt8IntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]string: + fastpathTV.DecMapInt8StringV(v, false, d) + case *map[int8]string: + if v2, changed2 := fastpathTV.DecMapInt8StringV(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]uint: + fastpathTV.DecMapInt8UintV(v, false, d) + case *map[int8]uint: + if v2, changed2 := fastpathTV.DecMapInt8UintV(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]uint8: + fastpathTV.DecMapInt8Uint8V(v, false, d) + case *map[int8]uint8: + if v2, changed2 := fastpathTV.DecMapInt8Uint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]uint16: + fastpathTV.DecMapInt8Uint16V(v, false, d) + case *map[int8]uint16: + if v2, changed2 := fastpathTV.DecMapInt8Uint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]uint32: + fastpathTV.DecMapInt8Uint32V(v, false, d) + case *map[int8]uint32: + if v2, changed2 := fastpathTV.DecMapInt8Uint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]uint64: + fastpathTV.DecMapInt8Uint64V(v, false, d) + case *map[int8]uint64: + if v2, changed2 := fastpathTV.DecMapInt8Uint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]uintptr: + fastpathTV.DecMapInt8UintptrV(v, false, d) + case *map[int8]uintptr: + if v2, changed2 := fastpathTV.DecMapInt8UintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]int: + fastpathTV.DecMapInt8IntV(v, false, d) + case *map[int8]int: + if v2, changed2 := fastpathTV.DecMapInt8IntV(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]int8: + fastpathTV.DecMapInt8Int8V(v, false, d) + case *map[int8]int8: + if v2, changed2 := fastpathTV.DecMapInt8Int8V(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]int16: + fastpathTV.DecMapInt8Int16V(v, false, d) + case *map[int8]int16: + if v2, changed2 := fastpathTV.DecMapInt8Int16V(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]int32: + fastpathTV.DecMapInt8Int32V(v, false, d) + case *map[int8]int32: + if v2, changed2 := fastpathTV.DecMapInt8Int32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]int64: + fastpathTV.DecMapInt8Int64V(v, false, d) + case *map[int8]int64: + if v2, changed2 := fastpathTV.DecMapInt8Int64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]float32: + fastpathTV.DecMapInt8Float32V(v, false, d) + case *map[int8]float32: + if v2, changed2 := fastpathTV.DecMapInt8Float32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]float64: + fastpathTV.DecMapInt8Float64V(v, false, d) + case *map[int8]float64: + if v2, changed2 := fastpathTV.DecMapInt8Float64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int8]bool: + fastpathTV.DecMapInt8BoolV(v, false, d) + case *map[int8]bool: + if v2, changed2 := fastpathTV.DecMapInt8BoolV(*v, true, d); changed2 { + *v = v2 + } + + case []int16: + fastpathTV.DecSliceInt16V(v, false, d) + case *[]int16: + if v2, changed2 := fastpathTV.DecSliceInt16V(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]interface{}: + fastpathTV.DecMapInt16IntfV(v, false, d) + case *map[int16]interface{}: + if v2, changed2 := fastpathTV.DecMapInt16IntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]string: + fastpathTV.DecMapInt16StringV(v, false, d) + case *map[int16]string: + if v2, changed2 := fastpathTV.DecMapInt16StringV(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]uint: + fastpathTV.DecMapInt16UintV(v, false, d) + case *map[int16]uint: + if v2, changed2 := fastpathTV.DecMapInt16UintV(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]uint8: + fastpathTV.DecMapInt16Uint8V(v, false, d) + case *map[int16]uint8: + if v2, changed2 := fastpathTV.DecMapInt16Uint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]uint16: + fastpathTV.DecMapInt16Uint16V(v, false, d) + case *map[int16]uint16: + if v2, changed2 := fastpathTV.DecMapInt16Uint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]uint32: + fastpathTV.DecMapInt16Uint32V(v, false, d) + case *map[int16]uint32: + if v2, changed2 := fastpathTV.DecMapInt16Uint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]uint64: + fastpathTV.DecMapInt16Uint64V(v, false, d) + case *map[int16]uint64: + if v2, changed2 := fastpathTV.DecMapInt16Uint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]uintptr: + fastpathTV.DecMapInt16UintptrV(v, false, d) + case *map[int16]uintptr: + if v2, changed2 := fastpathTV.DecMapInt16UintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]int: + fastpathTV.DecMapInt16IntV(v, false, d) + case *map[int16]int: + if v2, changed2 := fastpathTV.DecMapInt16IntV(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]int8: + fastpathTV.DecMapInt16Int8V(v, false, d) + case *map[int16]int8: + if v2, changed2 := fastpathTV.DecMapInt16Int8V(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]int16: + fastpathTV.DecMapInt16Int16V(v, false, d) + case *map[int16]int16: + if v2, changed2 := fastpathTV.DecMapInt16Int16V(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]int32: + fastpathTV.DecMapInt16Int32V(v, false, d) + case *map[int16]int32: + if v2, changed2 := fastpathTV.DecMapInt16Int32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]int64: + fastpathTV.DecMapInt16Int64V(v, false, d) + case *map[int16]int64: + if v2, changed2 := fastpathTV.DecMapInt16Int64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]float32: + fastpathTV.DecMapInt16Float32V(v, false, d) + case *map[int16]float32: + if v2, changed2 := fastpathTV.DecMapInt16Float32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]float64: + fastpathTV.DecMapInt16Float64V(v, false, d) + case *map[int16]float64: + if v2, changed2 := fastpathTV.DecMapInt16Float64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int16]bool: + fastpathTV.DecMapInt16BoolV(v, false, d) + case *map[int16]bool: + if v2, changed2 := fastpathTV.DecMapInt16BoolV(*v, true, d); changed2 { + *v = v2 + } + + case []int32: + fastpathTV.DecSliceInt32V(v, false, d) + case *[]int32: + if v2, changed2 := fastpathTV.DecSliceInt32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]interface{}: + fastpathTV.DecMapInt32IntfV(v, false, d) + case *map[int32]interface{}: + if v2, changed2 := fastpathTV.DecMapInt32IntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]string: + fastpathTV.DecMapInt32StringV(v, false, d) + case *map[int32]string: + if v2, changed2 := fastpathTV.DecMapInt32StringV(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]uint: + fastpathTV.DecMapInt32UintV(v, false, d) + case *map[int32]uint: + if v2, changed2 := fastpathTV.DecMapInt32UintV(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]uint8: + fastpathTV.DecMapInt32Uint8V(v, false, d) + case *map[int32]uint8: + if v2, changed2 := fastpathTV.DecMapInt32Uint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]uint16: + fastpathTV.DecMapInt32Uint16V(v, false, d) + case *map[int32]uint16: + if v2, changed2 := fastpathTV.DecMapInt32Uint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]uint32: + fastpathTV.DecMapInt32Uint32V(v, false, d) + case *map[int32]uint32: + if v2, changed2 := fastpathTV.DecMapInt32Uint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]uint64: + fastpathTV.DecMapInt32Uint64V(v, false, d) + case *map[int32]uint64: + if v2, changed2 := fastpathTV.DecMapInt32Uint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]uintptr: + fastpathTV.DecMapInt32UintptrV(v, false, d) + case *map[int32]uintptr: + if v2, changed2 := fastpathTV.DecMapInt32UintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]int: + fastpathTV.DecMapInt32IntV(v, false, d) + case *map[int32]int: + if v2, changed2 := fastpathTV.DecMapInt32IntV(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]int8: + fastpathTV.DecMapInt32Int8V(v, false, d) + case *map[int32]int8: + if v2, changed2 := fastpathTV.DecMapInt32Int8V(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]int16: + fastpathTV.DecMapInt32Int16V(v, false, d) + case *map[int32]int16: + if v2, changed2 := fastpathTV.DecMapInt32Int16V(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]int32: + fastpathTV.DecMapInt32Int32V(v, false, d) + case *map[int32]int32: + if v2, changed2 := fastpathTV.DecMapInt32Int32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]int64: + fastpathTV.DecMapInt32Int64V(v, false, d) + case *map[int32]int64: + if v2, changed2 := fastpathTV.DecMapInt32Int64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]float32: + fastpathTV.DecMapInt32Float32V(v, false, d) + case *map[int32]float32: + if v2, changed2 := fastpathTV.DecMapInt32Float32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]float64: + fastpathTV.DecMapInt32Float64V(v, false, d) + case *map[int32]float64: + if v2, changed2 := fastpathTV.DecMapInt32Float64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int32]bool: + fastpathTV.DecMapInt32BoolV(v, false, d) + case *map[int32]bool: + if v2, changed2 := fastpathTV.DecMapInt32BoolV(*v, true, d); changed2 { + *v = v2 + } + + case []int64: + fastpathTV.DecSliceInt64V(v, false, d) + case *[]int64: + if v2, changed2 := fastpathTV.DecSliceInt64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]interface{}: + fastpathTV.DecMapInt64IntfV(v, false, d) + case *map[int64]interface{}: + if v2, changed2 := fastpathTV.DecMapInt64IntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]string: + fastpathTV.DecMapInt64StringV(v, false, d) + case *map[int64]string: + if v2, changed2 := fastpathTV.DecMapInt64StringV(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]uint: + fastpathTV.DecMapInt64UintV(v, false, d) + case *map[int64]uint: + if v2, changed2 := fastpathTV.DecMapInt64UintV(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]uint8: + fastpathTV.DecMapInt64Uint8V(v, false, d) + case *map[int64]uint8: + if v2, changed2 := fastpathTV.DecMapInt64Uint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]uint16: + fastpathTV.DecMapInt64Uint16V(v, false, d) + case *map[int64]uint16: + if v2, changed2 := fastpathTV.DecMapInt64Uint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]uint32: + fastpathTV.DecMapInt64Uint32V(v, false, d) + case *map[int64]uint32: + if v2, changed2 := fastpathTV.DecMapInt64Uint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]uint64: + fastpathTV.DecMapInt64Uint64V(v, false, d) + case *map[int64]uint64: + if v2, changed2 := fastpathTV.DecMapInt64Uint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]uintptr: + fastpathTV.DecMapInt64UintptrV(v, false, d) + case *map[int64]uintptr: + if v2, changed2 := fastpathTV.DecMapInt64UintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]int: + fastpathTV.DecMapInt64IntV(v, false, d) + case *map[int64]int: + if v2, changed2 := fastpathTV.DecMapInt64IntV(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]int8: + fastpathTV.DecMapInt64Int8V(v, false, d) + case *map[int64]int8: + if v2, changed2 := fastpathTV.DecMapInt64Int8V(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]int16: + fastpathTV.DecMapInt64Int16V(v, false, d) + case *map[int64]int16: + if v2, changed2 := fastpathTV.DecMapInt64Int16V(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]int32: + fastpathTV.DecMapInt64Int32V(v, false, d) + case *map[int64]int32: + if v2, changed2 := fastpathTV.DecMapInt64Int32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]int64: + fastpathTV.DecMapInt64Int64V(v, false, d) + case *map[int64]int64: + if v2, changed2 := fastpathTV.DecMapInt64Int64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]float32: + fastpathTV.DecMapInt64Float32V(v, false, d) + case *map[int64]float32: + if v2, changed2 := fastpathTV.DecMapInt64Float32V(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]float64: + fastpathTV.DecMapInt64Float64V(v, false, d) + case *map[int64]float64: + if v2, changed2 := fastpathTV.DecMapInt64Float64V(*v, true, d); changed2 { + *v = v2 + } + + case map[int64]bool: + fastpathTV.DecMapInt64BoolV(v, false, d) + case *map[int64]bool: + if v2, changed2 := fastpathTV.DecMapInt64BoolV(*v, true, d); changed2 { + *v = v2 + } + + case []bool: + fastpathTV.DecSliceBoolV(v, false, d) + case *[]bool: + if v2, changed2 := fastpathTV.DecSliceBoolV(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]interface{}: + fastpathTV.DecMapBoolIntfV(v, false, d) + case *map[bool]interface{}: + if v2, changed2 := fastpathTV.DecMapBoolIntfV(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]string: + fastpathTV.DecMapBoolStringV(v, false, d) + case *map[bool]string: + if v2, changed2 := fastpathTV.DecMapBoolStringV(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]uint: + fastpathTV.DecMapBoolUintV(v, false, d) + case *map[bool]uint: + if v2, changed2 := fastpathTV.DecMapBoolUintV(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]uint8: + fastpathTV.DecMapBoolUint8V(v, false, d) + case *map[bool]uint8: + if v2, changed2 := fastpathTV.DecMapBoolUint8V(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]uint16: + fastpathTV.DecMapBoolUint16V(v, false, d) + case *map[bool]uint16: + if v2, changed2 := fastpathTV.DecMapBoolUint16V(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]uint32: + fastpathTV.DecMapBoolUint32V(v, false, d) + case *map[bool]uint32: + if v2, changed2 := fastpathTV.DecMapBoolUint32V(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]uint64: + fastpathTV.DecMapBoolUint64V(v, false, d) + case *map[bool]uint64: + if v2, changed2 := fastpathTV.DecMapBoolUint64V(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]uintptr: + fastpathTV.DecMapBoolUintptrV(v, false, d) + case *map[bool]uintptr: + if v2, changed2 := fastpathTV.DecMapBoolUintptrV(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]int: + fastpathTV.DecMapBoolIntV(v, false, d) + case *map[bool]int: + if v2, changed2 := fastpathTV.DecMapBoolIntV(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]int8: + fastpathTV.DecMapBoolInt8V(v, false, d) + case *map[bool]int8: + if v2, changed2 := fastpathTV.DecMapBoolInt8V(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]int16: + fastpathTV.DecMapBoolInt16V(v, false, d) + case *map[bool]int16: + if v2, changed2 := fastpathTV.DecMapBoolInt16V(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]int32: + fastpathTV.DecMapBoolInt32V(v, false, d) + case *map[bool]int32: + if v2, changed2 := fastpathTV.DecMapBoolInt32V(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]int64: + fastpathTV.DecMapBoolInt64V(v, false, d) + case *map[bool]int64: + if v2, changed2 := fastpathTV.DecMapBoolInt64V(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]float32: + fastpathTV.DecMapBoolFloat32V(v, false, d) + case *map[bool]float32: + if v2, changed2 := fastpathTV.DecMapBoolFloat32V(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]float64: + fastpathTV.DecMapBoolFloat64V(v, false, d) + case *map[bool]float64: + if v2, changed2 := fastpathTV.DecMapBoolFloat64V(*v, true, d); changed2 { + *v = v2 + } + + case map[bool]bool: + fastpathTV.DecMapBoolBoolV(v, false, d) + case *map[bool]bool: + if v2, changed2 := fastpathTV.DecMapBoolBoolV(*v, true, d); changed2 { + *v = v2 + } + + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +func fastpathDecodeSetZeroTypeSwitch(iv interface{}, d *Decoder) bool { + switch v := iv.(type) { + + case *[]interface{}: + *v = nil + + case *map[interface{}]interface{}: + *v = nil + + case *map[interface{}]string: + *v = nil + + case *map[interface{}]uint: + *v = nil + + case *map[interface{}]uint8: + *v = nil + + case *map[interface{}]uint16: + *v = nil + + case *map[interface{}]uint32: + *v = nil + + case *map[interface{}]uint64: + *v = nil + + case *map[interface{}]uintptr: + *v = nil + + case *map[interface{}]int: + *v = nil + + case *map[interface{}]int8: + *v = nil + + case *map[interface{}]int16: + *v = nil + + case *map[interface{}]int32: + *v = nil + + case *map[interface{}]int64: + *v = nil + + case *map[interface{}]float32: + *v = nil + + case *map[interface{}]float64: + *v = nil + + case *map[interface{}]bool: + *v = nil + + case *[]string: + *v = nil + + case *map[string]interface{}: + *v = nil + + case *map[string]string: + *v = nil + + case *map[string]uint: + *v = nil + + case *map[string]uint8: + *v = nil + + case *map[string]uint16: + *v = nil + + case *map[string]uint32: + *v = nil + + case *map[string]uint64: + *v = nil + + case *map[string]uintptr: + *v = nil + + case *map[string]int: + *v = nil + + case *map[string]int8: + *v = nil + + case *map[string]int16: + *v = nil + + case *map[string]int32: + *v = nil + + case *map[string]int64: + *v = nil + + case *map[string]float32: + *v = nil + + case *map[string]float64: + *v = nil + + case *map[string]bool: + *v = nil + + case *[]float32: + *v = nil + + case *map[float32]interface{}: + *v = nil + + case *map[float32]string: + *v = nil + + case *map[float32]uint: + *v = nil + + case *map[float32]uint8: + *v = nil + + case *map[float32]uint16: + *v = nil + + case *map[float32]uint32: + *v = nil + + case *map[float32]uint64: + *v = nil + + case *map[float32]uintptr: + *v = nil + + case *map[float32]int: + *v = nil + + case *map[float32]int8: + *v = nil + + case *map[float32]int16: + *v = nil + + case *map[float32]int32: + *v = nil + + case *map[float32]int64: + *v = nil + + case *map[float32]float32: + *v = nil + + case *map[float32]float64: + *v = nil + + case *map[float32]bool: + *v = nil + + case *[]float64: + *v = nil + + case *map[float64]interface{}: + *v = nil + + case *map[float64]string: + *v = nil + + case *map[float64]uint: + *v = nil + + case *map[float64]uint8: + *v = nil + + case *map[float64]uint16: + *v = nil + + case *map[float64]uint32: + *v = nil + + case *map[float64]uint64: + *v = nil + + case *map[float64]uintptr: + *v = nil + + case *map[float64]int: + *v = nil + + case *map[float64]int8: + *v = nil + + case *map[float64]int16: + *v = nil + + case *map[float64]int32: + *v = nil + + case *map[float64]int64: + *v = nil + + case *map[float64]float32: + *v = nil + + case *map[float64]float64: + *v = nil + + case *map[float64]bool: + *v = nil + + case *[]uint: + *v = nil + + case *map[uint]interface{}: + *v = nil + + case *map[uint]string: + *v = nil + + case *map[uint]uint: + *v = nil + + case *map[uint]uint8: + *v = nil + + case *map[uint]uint16: + *v = nil + + case *map[uint]uint32: + *v = nil + + case *map[uint]uint64: + *v = nil + + case *map[uint]uintptr: + *v = nil + + case *map[uint]int: + *v = nil + + case *map[uint]int8: + *v = nil + + case *map[uint]int16: + *v = nil + + case *map[uint]int32: + *v = nil + + case *map[uint]int64: + *v = nil + + case *map[uint]float32: + *v = nil + + case *map[uint]float64: + *v = nil + + case *map[uint]bool: + *v = nil + + case *map[uint8]interface{}: + *v = nil + + case *map[uint8]string: + *v = nil + + case *map[uint8]uint: + *v = nil + + case *map[uint8]uint8: + *v = nil + + case *map[uint8]uint16: + *v = nil + + case *map[uint8]uint32: + *v = nil + + case *map[uint8]uint64: + *v = nil + + case *map[uint8]uintptr: + *v = nil + + case *map[uint8]int: + *v = nil + + case *map[uint8]int8: + *v = nil + + case *map[uint8]int16: + *v = nil + + case *map[uint8]int32: + *v = nil + + case *map[uint8]int64: + *v = nil + + case *map[uint8]float32: + *v = nil + + case *map[uint8]float64: + *v = nil + + case *map[uint8]bool: + *v = nil + + case *[]uint16: + *v = nil + + case *map[uint16]interface{}: + *v = nil + + case *map[uint16]string: + *v = nil + + case *map[uint16]uint: + *v = nil + + case *map[uint16]uint8: + *v = nil + + case *map[uint16]uint16: + *v = nil + + case *map[uint16]uint32: + *v = nil + + case *map[uint16]uint64: + *v = nil + + case *map[uint16]uintptr: + *v = nil + + case *map[uint16]int: + *v = nil + + case *map[uint16]int8: + *v = nil + + case *map[uint16]int16: + *v = nil + + case *map[uint16]int32: + *v = nil + + case *map[uint16]int64: + *v = nil + + case *map[uint16]float32: + *v = nil + + case *map[uint16]float64: + *v = nil + + case *map[uint16]bool: + *v = nil + + case *[]uint32: + *v = nil + + case *map[uint32]interface{}: + *v = nil + + case *map[uint32]string: + *v = nil + + case *map[uint32]uint: + *v = nil + + case *map[uint32]uint8: + *v = nil + + case *map[uint32]uint16: + *v = nil + + case *map[uint32]uint32: + *v = nil + + case *map[uint32]uint64: + *v = nil + + case *map[uint32]uintptr: + *v = nil + + case *map[uint32]int: + *v = nil + + case *map[uint32]int8: + *v = nil + + case *map[uint32]int16: + *v = nil + + case *map[uint32]int32: + *v = nil + + case *map[uint32]int64: + *v = nil + + case *map[uint32]float32: + *v = nil + + case *map[uint32]float64: + *v = nil + + case *map[uint32]bool: + *v = nil + + case *[]uint64: + *v = nil + + case *map[uint64]interface{}: + *v = nil + + case *map[uint64]string: + *v = nil + + case *map[uint64]uint: + *v = nil + + case *map[uint64]uint8: + *v = nil + + case *map[uint64]uint16: + *v = nil + + case *map[uint64]uint32: + *v = nil + + case *map[uint64]uint64: + *v = nil + + case *map[uint64]uintptr: + *v = nil + + case *map[uint64]int: + *v = nil + + case *map[uint64]int8: + *v = nil + + case *map[uint64]int16: + *v = nil + + case *map[uint64]int32: + *v = nil + + case *map[uint64]int64: + *v = nil + + case *map[uint64]float32: + *v = nil + + case *map[uint64]float64: + *v = nil + + case *map[uint64]bool: + *v = nil + + case *[]uintptr: + *v = nil + + case *map[uintptr]interface{}: + *v = nil + + case *map[uintptr]string: + *v = nil + + case *map[uintptr]uint: + *v = nil + + case *map[uintptr]uint8: + *v = nil + + case *map[uintptr]uint16: + *v = nil + + case *map[uintptr]uint32: + *v = nil + + case *map[uintptr]uint64: + *v = nil + + case *map[uintptr]uintptr: + *v = nil + + case *map[uintptr]int: + *v = nil + + case *map[uintptr]int8: + *v = nil + + case *map[uintptr]int16: + *v = nil + + case *map[uintptr]int32: + *v = nil + + case *map[uintptr]int64: + *v = nil + + case *map[uintptr]float32: + *v = nil + + case *map[uintptr]float64: + *v = nil + + case *map[uintptr]bool: + *v = nil + + case *[]int: + *v = nil + + case *map[int]interface{}: + *v = nil + + case *map[int]string: + *v = nil + + case *map[int]uint: + *v = nil + + case *map[int]uint8: + *v = nil + + case *map[int]uint16: + *v = nil + + case *map[int]uint32: + *v = nil + + case *map[int]uint64: + *v = nil + + case *map[int]uintptr: + *v = nil + + case *map[int]int: + *v = nil + + case *map[int]int8: + *v = nil + + case *map[int]int16: + *v = nil + + case *map[int]int32: + *v = nil + + case *map[int]int64: + *v = nil + + case *map[int]float32: + *v = nil + + case *map[int]float64: + *v = nil + + case *map[int]bool: + *v = nil + + case *[]int8: + *v = nil + + case *map[int8]interface{}: + *v = nil + + case *map[int8]string: + *v = nil + + case *map[int8]uint: + *v = nil + + case *map[int8]uint8: + *v = nil + + case *map[int8]uint16: + *v = nil + + case *map[int8]uint32: + *v = nil + + case *map[int8]uint64: + *v = nil + + case *map[int8]uintptr: + *v = nil + + case *map[int8]int: + *v = nil + + case *map[int8]int8: + *v = nil + + case *map[int8]int16: + *v = nil + + case *map[int8]int32: + *v = nil + + case *map[int8]int64: + *v = nil + + case *map[int8]float32: + *v = nil + + case *map[int8]float64: + *v = nil + + case *map[int8]bool: + *v = nil + + case *[]int16: + *v = nil + + case *map[int16]interface{}: + *v = nil + + case *map[int16]string: + *v = nil + + case *map[int16]uint: + *v = nil + + case *map[int16]uint8: + *v = nil + + case *map[int16]uint16: + *v = nil + + case *map[int16]uint32: + *v = nil + + case *map[int16]uint64: + *v = nil + + case *map[int16]uintptr: + *v = nil + + case *map[int16]int: + *v = nil + + case *map[int16]int8: + *v = nil + + case *map[int16]int16: + *v = nil + + case *map[int16]int32: + *v = nil + + case *map[int16]int64: + *v = nil + + case *map[int16]float32: + *v = nil + + case *map[int16]float64: + *v = nil + + case *map[int16]bool: + *v = nil + + case *[]int32: + *v = nil + + case *map[int32]interface{}: + *v = nil + + case *map[int32]string: + *v = nil + + case *map[int32]uint: + *v = nil + + case *map[int32]uint8: + *v = nil + + case *map[int32]uint16: + *v = nil + + case *map[int32]uint32: + *v = nil + + case *map[int32]uint64: + *v = nil + + case *map[int32]uintptr: + *v = nil + + case *map[int32]int: + *v = nil + + case *map[int32]int8: + *v = nil + + case *map[int32]int16: + *v = nil + + case *map[int32]int32: + *v = nil + + case *map[int32]int64: + *v = nil + + case *map[int32]float32: + *v = nil + + case *map[int32]float64: + *v = nil + + case *map[int32]bool: + *v = nil + + case *[]int64: + *v = nil + + case *map[int64]interface{}: + *v = nil + + case *map[int64]string: + *v = nil + + case *map[int64]uint: + *v = nil + + case *map[int64]uint8: + *v = nil + + case *map[int64]uint16: + *v = nil + + case *map[int64]uint32: + *v = nil + + case *map[int64]uint64: + *v = nil + + case *map[int64]uintptr: + *v = nil + + case *map[int64]int: + *v = nil + + case *map[int64]int8: + *v = nil + + case *map[int64]int16: + *v = nil + + case *map[int64]int32: + *v = nil + + case *map[int64]int64: + *v = nil + + case *map[int64]float32: + *v = nil + + case *map[int64]float64: + *v = nil + + case *map[int64]bool: + *v = nil + + case *[]bool: + *v = nil + + case *map[bool]interface{}: + *v = nil + + case *map[bool]string: + *v = nil + + case *map[bool]uint: + *v = nil + + case *map[bool]uint8: + *v = nil + + case *map[bool]uint16: + *v = nil + + case *map[bool]uint32: + *v = nil + + case *map[bool]uint64: + *v = nil + + case *map[bool]uintptr: + *v = nil + + case *map[bool]int: + *v = nil + + case *map[bool]int8: + *v = nil + + case *map[bool]int16: + *v = nil + + case *map[bool]int32: + *v = nil + + case *map[bool]int64: + *v = nil + + case *map[bool]float32: + *v = nil + + case *map[bool]float64: + *v = nil + + case *map[bool]bool: + *v = nil + + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +// -- -- fast path functions + +func (d *Decoder) fastpathDecSliceIntfR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]interface{}) + if v, changed := fastpathTV.DecSliceIntfV(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceIntfV(rv2i(rv).([]interface{}), !array, d) + } +} +func (f fastpathT) DecSliceIntfX(vp *[]interface{}, d *Decoder) { + if v, changed := f.DecSliceIntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceIntfV(v []interface{}, canChange bool, d *Decoder) (_ []interface{}, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []interface{}{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]interface{}, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) + } else { + xlen = 8 + } + v = make([]interface{}, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, nil) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + d.decode(&v[j]) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]interface{}, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceStringR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]string) + if v, changed := fastpathTV.DecSliceStringV(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceStringV(rv2i(rv).([]string), !array, d) + } +} +func (f fastpathT) DecSliceStringX(vp *[]string, d *Decoder) { + if v, changed := f.DecSliceStringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceStringV(v []string, canChange bool, d *Decoder) (_ []string, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []string{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]string, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) + } else { + xlen = 8 + } + v = make([]string, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, "") + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = dd.DecodeString() + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]string, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceFloat32R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]float32) + if v, changed := fastpathTV.DecSliceFloat32V(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceFloat32V(rv2i(rv).([]float32), !array, d) + } +} +func (f fastpathT) DecSliceFloat32X(vp *[]float32, d *Decoder) { + if v, changed := f.DecSliceFloat32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceFloat32V(v []float32, canChange bool, d *Decoder) (_ []float32, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []float32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]float32, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + } else { + xlen = 8 + } + v = make([]float32, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = float32(dd.DecodeFloat(true)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]float32, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceFloat64R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]float64) + if v, changed := fastpathTV.DecSliceFloat64V(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceFloat64V(rv2i(rv).([]float64), !array, d) + } +} +func (f fastpathT) DecSliceFloat64X(vp *[]float64, d *Decoder) { + if v, changed := f.DecSliceFloat64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceFloat64V(v []float64, canChange bool, d *Decoder) (_ []float64, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []float64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]float64, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]float64, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = dd.DecodeFloat(false) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]float64, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUintR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]uint) + if v, changed := fastpathTV.DecSliceUintV(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceUintV(rv2i(rv).([]uint), !array, d) + } +} +func (f fastpathT) DecSliceUintX(vp *[]uint, d *Decoder) { + if v, changed := f.DecSliceUintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUintV(v []uint, canChange bool, d *Decoder) (_ []uint, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]uint, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = uint(dd.DecodeUint(uintBitsize)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]uint, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUint16R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]uint16) + if v, changed := fastpathTV.DecSliceUint16V(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceUint16V(rv2i(rv).([]uint16), !array, d) + } +} +func (f fastpathT) DecSliceUint16X(vp *[]uint16, d *Decoder) { + if v, changed := f.DecSliceUint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint16V(v []uint16, canChange bool, d *Decoder) (_ []uint16, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint16{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint16, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) + } else { + xlen = 8 + } + v = make([]uint16, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = uint16(dd.DecodeUint(16)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]uint16, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUint32R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]uint32) + if v, changed := fastpathTV.DecSliceUint32V(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceUint32V(rv2i(rv).([]uint32), !array, d) + } +} +func (f fastpathT) DecSliceUint32X(vp *[]uint32, d *Decoder) { + if v, changed := f.DecSliceUint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint32V(v []uint32, canChange bool, d *Decoder) (_ []uint32, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint32, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + } else { + xlen = 8 + } + v = make([]uint32, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = uint32(dd.DecodeUint(32)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]uint32, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUint64R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]uint64) + if v, changed := fastpathTV.DecSliceUint64V(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceUint64V(rv2i(rv).([]uint64), !array, d) + } +} +func (f fastpathT) DecSliceUint64X(vp *[]uint64, d *Decoder) { + if v, changed := f.DecSliceUint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint64V(v []uint64, canChange bool, d *Decoder) (_ []uint64, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint64, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]uint64, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = dd.DecodeUint(64) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]uint64, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUintptrR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]uintptr) + if v, changed := fastpathTV.DecSliceUintptrV(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceUintptrV(rv2i(rv).([]uintptr), !array, d) + } +} +func (f fastpathT) DecSliceUintptrX(vp *[]uintptr, d *Decoder) { + if v, changed := f.DecSliceUintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUintptrV(v []uintptr, canChange bool, d *Decoder) (_ []uintptr, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uintptr{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uintptr, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]uintptr, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = uintptr(dd.DecodeUint(uintBitsize)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]uintptr, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceIntR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]int) + if v, changed := fastpathTV.DecSliceIntV(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceIntV(rv2i(rv).([]int), !array, d) + } +} +func (f fastpathT) DecSliceIntX(vp *[]int, d *Decoder) { + if v, changed := f.DecSliceIntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceIntV(v []int, canChange bool, d *Decoder) (_ []int, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]int, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = int(dd.DecodeInt(intBitsize)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]int, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceInt8R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]int8) + if v, changed := fastpathTV.DecSliceInt8V(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceInt8V(rv2i(rv).([]int8), !array, d) + } +} +func (f fastpathT) DecSliceInt8X(vp *[]int8, d *Decoder) { + if v, changed := f.DecSliceInt8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt8V(v []int8, canChange bool, d *Decoder) (_ []int8, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int8{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int8, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + } else { + xlen = 8 + } + v = make([]int8, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = int8(dd.DecodeInt(8)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]int8, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceInt16R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]int16) + if v, changed := fastpathTV.DecSliceInt16V(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceInt16V(rv2i(rv).([]int16), !array, d) + } +} +func (f fastpathT) DecSliceInt16X(vp *[]int16, d *Decoder) { + if v, changed := f.DecSliceInt16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt16V(v []int16, canChange bool, d *Decoder) (_ []int16, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int16{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int16, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) + } else { + xlen = 8 + } + v = make([]int16, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = int16(dd.DecodeInt(16)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]int16, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceInt32R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]int32) + if v, changed := fastpathTV.DecSliceInt32V(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceInt32V(rv2i(rv).([]int32), !array, d) + } +} +func (f fastpathT) DecSliceInt32X(vp *[]int32, d *Decoder) { + if v, changed := f.DecSliceInt32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt32V(v []int32, canChange bool, d *Decoder) (_ []int32, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int32, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + } else { + xlen = 8 + } + v = make([]int32, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = int32(dd.DecodeInt(32)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]int32, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceInt64R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]int64) + if v, changed := fastpathTV.DecSliceInt64V(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceInt64V(rv2i(rv).([]int64), !array, d) + } +} +func (f fastpathT) DecSliceInt64X(vp *[]int64, d *Decoder) { + if v, changed := f.DecSliceInt64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt64V(v []int64, canChange bool, d *Decoder) (_ []int64, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int64, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]int64, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = dd.DecodeInt(64) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]int64, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceBoolR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]bool) + if v, changed := fastpathTV.DecSliceBoolV(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.DecSliceBoolV(rv2i(rv).([]bool), !array, d) + } +} +func (f fastpathT) DecSliceBoolX(vp *[]bool, d *Decoder) { + if v, changed := f.DecSliceBoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecSliceBoolV(v []bool, canChange bool, d *Decoder) (_ []bool, changed bool) { + dd := d.d + + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []bool{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]bool, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + } else { + xlen = 8 + } + v = make([]bool, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, false) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = dd.DecodeBool() + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]bool, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]interface{}) + if v, changed := fastpathTV.DecMapIntfIntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfIntfV(rv2i(rv).(map[interface{}]interface{}), false, d) +} +func (f fastpathT) DecMapIntfIntfX(vp *map[interface{}]interface{}, d *Decoder) { + if v, changed := f.DecMapIntfIntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, canChange bool, + d *Decoder) (_ map[interface{}]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[interface{}]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk interface{} + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]string) + if v, changed := fastpathTV.DecMapIntfStringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfStringV(rv2i(rv).(map[interface{}]string), false, d) +} +func (f fastpathT) DecMapIntfStringX(vp *map[interface{}]string, d *Decoder) { + if v, changed := f.DecMapIntfStringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, canChange bool, + d *Decoder) (_ map[interface{}]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[interface{}]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint) + if v, changed := fastpathTV.DecMapIntfUintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfUintV(rv2i(rv).(map[interface{}]uint), false, d) +} +func (f fastpathT) DecMapIntfUintX(vp *map[interface{}]uint, d *Decoder) { + if v, changed := f.DecMapIntfUintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, canChange bool, + d *Decoder) (_ map[interface{}]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint8) + if v, changed := fastpathTV.DecMapIntfUint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfUint8V(rv2i(rv).(map[interface{}]uint8), false, d) +} +func (f fastpathT) DecMapIntfUint8X(vp *map[interface{}]uint8, d *Decoder) { + if v, changed := f.DecMapIntfUint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, canChange bool, + d *Decoder) (_ map[interface{}]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[interface{}]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint16) + if v, changed := fastpathTV.DecMapIntfUint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfUint16V(rv2i(rv).(map[interface{}]uint16), false, d) +} +func (f fastpathT) DecMapIntfUint16X(vp *map[interface{}]uint16, d *Decoder) { + if v, changed := f.DecMapIntfUint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, canChange bool, + d *Decoder) (_ map[interface{}]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[interface{}]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint32) + if v, changed := fastpathTV.DecMapIntfUint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfUint32V(rv2i(rv).(map[interface{}]uint32), false, d) +} +func (f fastpathT) DecMapIntfUint32X(vp *map[interface{}]uint32, d *Decoder) { + if v, changed := f.DecMapIntfUint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, canChange bool, + d *Decoder) (_ map[interface{}]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[interface{}]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint64) + if v, changed := fastpathTV.DecMapIntfUint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfUint64V(rv2i(rv).(map[interface{}]uint64), false, d) +} +func (f fastpathT) DecMapIntfUint64X(vp *map[interface{}]uint64, d *Decoder) { + if v, changed := f.DecMapIntfUint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, canChange bool, + d *Decoder) (_ map[interface{}]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uintptr) + if v, changed := fastpathTV.DecMapIntfUintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfUintptrV(rv2i(rv).(map[interface{}]uintptr), false, d) +} +func (f fastpathT) DecMapIntfUintptrX(vp *map[interface{}]uintptr, d *Decoder) { + if v, changed := f.DecMapIntfUintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, canChange bool, + d *Decoder) (_ map[interface{}]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int) + if v, changed := fastpathTV.DecMapIntfIntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfIntV(rv2i(rv).(map[interface{}]int), false, d) +} +func (f fastpathT) DecMapIntfIntX(vp *map[interface{}]int, d *Decoder) { + if v, changed := f.DecMapIntfIntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, canChange bool, + d *Decoder) (_ map[interface{}]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int8) + if v, changed := fastpathTV.DecMapIntfInt8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfInt8V(rv2i(rv).(map[interface{}]int8), false, d) +} +func (f fastpathT) DecMapIntfInt8X(vp *map[interface{}]int8, d *Decoder) { + if v, changed := f.DecMapIntfInt8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, canChange bool, + d *Decoder) (_ map[interface{}]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[interface{}]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int16) + if v, changed := fastpathTV.DecMapIntfInt16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfInt16V(rv2i(rv).(map[interface{}]int16), false, d) +} +func (f fastpathT) DecMapIntfInt16X(vp *map[interface{}]int16, d *Decoder) { + if v, changed := f.DecMapIntfInt16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, canChange bool, + d *Decoder) (_ map[interface{}]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[interface{}]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int32) + if v, changed := fastpathTV.DecMapIntfInt32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfInt32V(rv2i(rv).(map[interface{}]int32), false, d) +} +func (f fastpathT) DecMapIntfInt32X(vp *map[interface{}]int32, d *Decoder) { + if v, changed := f.DecMapIntfInt32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, canChange bool, + d *Decoder) (_ map[interface{}]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[interface{}]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int64) + if v, changed := fastpathTV.DecMapIntfInt64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfInt64V(rv2i(rv).(map[interface{}]int64), false, d) +} +func (f fastpathT) DecMapIntfInt64X(vp *map[interface{}]int64, d *Decoder) { + if v, changed := f.DecMapIntfInt64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, canChange bool, + d *Decoder) (_ map[interface{}]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]float32) + if v, changed := fastpathTV.DecMapIntfFloat32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfFloat32V(rv2i(rv).(map[interface{}]float32), false, d) +} +func (f fastpathT) DecMapIntfFloat32X(vp *map[interface{}]float32, d *Decoder) { + if v, changed := f.DecMapIntfFloat32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, canChange bool, + d *Decoder) (_ map[interface{}]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[interface{}]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]float64) + if v, changed := fastpathTV.DecMapIntfFloat64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfFloat64V(rv2i(rv).(map[interface{}]float64), false, d) +} +func (f fastpathT) DecMapIntfFloat64X(vp *map[interface{}]float64, d *Decoder) { + if v, changed := f.DecMapIntfFloat64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, canChange bool, + d *Decoder) (_ map[interface{}]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]bool) + if v, changed := fastpathTV.DecMapIntfBoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntfBoolV(rv2i(rv).(map[interface{}]bool), false, d) +} +func (f fastpathT) DecMapIntfBoolX(vp *map[interface{}]bool, d *Decoder) { + if v, changed := f.DecMapIntfBoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, canChange bool, + d *Decoder) (_ map[interface{}]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[interface{}]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk interface{} + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]interface{}) + if v, changed := fastpathTV.DecMapStringIntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringIntfV(rv2i(rv).(map[string]interface{}), false, d) +} +func (f fastpathT) DecMapStringIntfX(vp *map[string]interface{}, d *Decoder) { + if v, changed := f.DecMapStringIntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, canChange bool, + d *Decoder) (_ map[string]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[string]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk string + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]string) + if v, changed := fastpathTV.DecMapStringStringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringStringV(rv2i(rv).(map[string]string), false, d) +} +func (f fastpathT) DecMapStringStringX(vp *map[string]string, d *Decoder) { + if v, changed := f.DecMapStringStringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringStringV(v map[string]string, canChange bool, + d *Decoder) (_ map[string]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[string]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint) + if v, changed := fastpathTV.DecMapStringUintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringUintV(rv2i(rv).(map[string]uint), false, d) +} +func (f fastpathT) DecMapStringUintX(vp *map[string]uint, d *Decoder) { + if v, changed := f.DecMapStringUintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUintV(v map[string]uint, canChange bool, + d *Decoder) (_ map[string]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint8) + if v, changed := fastpathTV.DecMapStringUint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringUint8V(rv2i(rv).(map[string]uint8), false, d) +} +func (f fastpathT) DecMapStringUint8X(vp *map[string]uint8, d *Decoder) { + if v, changed := f.DecMapStringUint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, canChange bool, + d *Decoder) (_ map[string]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[string]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint16) + if v, changed := fastpathTV.DecMapStringUint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringUint16V(rv2i(rv).(map[string]uint16), false, d) +} +func (f fastpathT) DecMapStringUint16X(vp *map[string]uint16, d *Decoder) { + if v, changed := f.DecMapStringUint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, canChange bool, + d *Decoder) (_ map[string]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[string]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint32) + if v, changed := fastpathTV.DecMapStringUint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringUint32V(rv2i(rv).(map[string]uint32), false, d) +} +func (f fastpathT) DecMapStringUint32X(vp *map[string]uint32, d *Decoder) { + if v, changed := f.DecMapStringUint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, canChange bool, + d *Decoder) (_ map[string]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[string]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint64) + if v, changed := fastpathTV.DecMapStringUint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringUint64V(rv2i(rv).(map[string]uint64), false, d) +} +func (f fastpathT) DecMapStringUint64X(vp *map[string]uint64, d *Decoder) { + if v, changed := f.DecMapStringUint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, canChange bool, + d *Decoder) (_ map[string]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uintptr) + if v, changed := fastpathTV.DecMapStringUintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringUintptrV(rv2i(rv).(map[string]uintptr), false, d) +} +func (f fastpathT) DecMapStringUintptrX(vp *map[string]uintptr, d *Decoder) { + if v, changed := f.DecMapStringUintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, canChange bool, + d *Decoder) (_ map[string]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int) + if v, changed := fastpathTV.DecMapStringIntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringIntV(rv2i(rv).(map[string]int), false, d) +} +func (f fastpathT) DecMapStringIntX(vp *map[string]int, d *Decoder) { + if v, changed := f.DecMapStringIntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringIntV(v map[string]int, canChange bool, + d *Decoder) (_ map[string]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int8) + if v, changed := fastpathTV.DecMapStringInt8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringInt8V(rv2i(rv).(map[string]int8), false, d) +} +func (f fastpathT) DecMapStringInt8X(vp *map[string]int8, d *Decoder) { + if v, changed := f.DecMapStringInt8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt8V(v map[string]int8, canChange bool, + d *Decoder) (_ map[string]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[string]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int16) + if v, changed := fastpathTV.DecMapStringInt16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringInt16V(rv2i(rv).(map[string]int16), false, d) +} +func (f fastpathT) DecMapStringInt16X(vp *map[string]int16, d *Decoder) { + if v, changed := f.DecMapStringInt16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt16V(v map[string]int16, canChange bool, + d *Decoder) (_ map[string]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[string]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int32) + if v, changed := fastpathTV.DecMapStringInt32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringInt32V(rv2i(rv).(map[string]int32), false, d) +} +func (f fastpathT) DecMapStringInt32X(vp *map[string]int32, d *Decoder) { + if v, changed := f.DecMapStringInt32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt32V(v map[string]int32, canChange bool, + d *Decoder) (_ map[string]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[string]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int64) + if v, changed := fastpathTV.DecMapStringInt64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringInt64V(rv2i(rv).(map[string]int64), false, d) +} +func (f fastpathT) DecMapStringInt64X(vp *map[string]int64, d *Decoder) { + if v, changed := f.DecMapStringInt64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt64V(v map[string]int64, canChange bool, + d *Decoder) (_ map[string]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]float32) + if v, changed := fastpathTV.DecMapStringFloat32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringFloat32V(rv2i(rv).(map[string]float32), false, d) +} +func (f fastpathT) DecMapStringFloat32X(vp *map[string]float32, d *Decoder) { + if v, changed := f.DecMapStringFloat32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, canChange bool, + d *Decoder) (_ map[string]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[string]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]float64) + if v, changed := fastpathTV.DecMapStringFloat64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringFloat64V(rv2i(rv).(map[string]float64), false, d) +} +func (f fastpathT) DecMapStringFloat64X(vp *map[string]float64, d *Decoder) { + if v, changed := f.DecMapStringFloat64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, canChange bool, + d *Decoder) (_ map[string]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]bool) + if v, changed := fastpathTV.DecMapStringBoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapStringBoolV(rv2i(rv).(map[string]bool), false, d) +} +func (f fastpathT) DecMapStringBoolX(vp *map[string]bool, d *Decoder) { + if v, changed := f.DecMapStringBoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringBoolV(v map[string]bool, canChange bool, + d *Decoder) (_ map[string]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[string]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk string + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]interface{}) + if v, changed := fastpathTV.DecMapFloat32IntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32IntfV(rv2i(rv).(map[float32]interface{}), false, d) +} +func (f fastpathT) DecMapFloat32IntfX(vp *map[float32]interface{}, d *Decoder) { + if v, changed := f.DecMapFloat32IntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, canChange bool, + d *Decoder) (_ map[float32]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[float32]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk float32 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]string) + if v, changed := fastpathTV.DecMapFloat32StringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32StringV(rv2i(rv).(map[float32]string), false, d) +} +func (f fastpathT) DecMapFloat32StringX(vp *map[float32]string, d *Decoder) { + if v, changed := f.DecMapFloat32StringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, canChange bool, + d *Decoder) (_ map[float32]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[float32]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint) + if v, changed := fastpathTV.DecMapFloat32UintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32UintV(rv2i(rv).(map[float32]uint), false, d) +} +func (f fastpathT) DecMapFloat32UintX(vp *map[float32]uint, d *Decoder) { + if v, changed := f.DecMapFloat32UintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, canChange bool, + d *Decoder) (_ map[float32]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint8) + if v, changed := fastpathTV.DecMapFloat32Uint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32Uint8V(rv2i(rv).(map[float32]uint8), false, d) +} +func (f fastpathT) DecMapFloat32Uint8X(vp *map[float32]uint8, d *Decoder) { + if v, changed := f.DecMapFloat32Uint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, canChange bool, + d *Decoder) (_ map[float32]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[float32]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint16) + if v, changed := fastpathTV.DecMapFloat32Uint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32Uint16V(rv2i(rv).(map[float32]uint16), false, d) +} +func (f fastpathT) DecMapFloat32Uint16X(vp *map[float32]uint16, d *Decoder) { + if v, changed := f.DecMapFloat32Uint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, canChange bool, + d *Decoder) (_ map[float32]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[float32]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint32) + if v, changed := fastpathTV.DecMapFloat32Uint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32Uint32V(rv2i(rv).(map[float32]uint32), false, d) +} +func (f fastpathT) DecMapFloat32Uint32X(vp *map[float32]uint32, d *Decoder) { + if v, changed := f.DecMapFloat32Uint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, canChange bool, + d *Decoder) (_ map[float32]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[float32]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint64) + if v, changed := fastpathTV.DecMapFloat32Uint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32Uint64V(rv2i(rv).(map[float32]uint64), false, d) +} +func (f fastpathT) DecMapFloat32Uint64X(vp *map[float32]uint64, d *Decoder) { + if v, changed := f.DecMapFloat32Uint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, canChange bool, + d *Decoder) (_ map[float32]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uintptr) + if v, changed := fastpathTV.DecMapFloat32UintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32UintptrV(rv2i(rv).(map[float32]uintptr), false, d) +} +func (f fastpathT) DecMapFloat32UintptrX(vp *map[float32]uintptr, d *Decoder) { + if v, changed := f.DecMapFloat32UintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, canChange bool, + d *Decoder) (_ map[float32]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int) + if v, changed := fastpathTV.DecMapFloat32IntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32IntV(rv2i(rv).(map[float32]int), false, d) +} +func (f fastpathT) DecMapFloat32IntX(vp *map[float32]int, d *Decoder) { + if v, changed := f.DecMapFloat32IntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, canChange bool, + d *Decoder) (_ map[float32]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int8) + if v, changed := fastpathTV.DecMapFloat32Int8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32Int8V(rv2i(rv).(map[float32]int8), false, d) +} +func (f fastpathT) DecMapFloat32Int8X(vp *map[float32]int8, d *Decoder) { + if v, changed := f.DecMapFloat32Int8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, canChange bool, + d *Decoder) (_ map[float32]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[float32]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int16) + if v, changed := fastpathTV.DecMapFloat32Int16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32Int16V(rv2i(rv).(map[float32]int16), false, d) +} +func (f fastpathT) DecMapFloat32Int16X(vp *map[float32]int16, d *Decoder) { + if v, changed := f.DecMapFloat32Int16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, canChange bool, + d *Decoder) (_ map[float32]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[float32]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int32) + if v, changed := fastpathTV.DecMapFloat32Int32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32Int32V(rv2i(rv).(map[float32]int32), false, d) +} +func (f fastpathT) DecMapFloat32Int32X(vp *map[float32]int32, d *Decoder) { + if v, changed := f.DecMapFloat32Int32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, canChange bool, + d *Decoder) (_ map[float32]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[float32]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int64) + if v, changed := fastpathTV.DecMapFloat32Int64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32Int64V(rv2i(rv).(map[float32]int64), false, d) +} +func (f fastpathT) DecMapFloat32Int64X(vp *map[float32]int64, d *Decoder) { + if v, changed := f.DecMapFloat32Int64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, canChange bool, + d *Decoder) (_ map[float32]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]float32) + if v, changed := fastpathTV.DecMapFloat32Float32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32Float32V(rv2i(rv).(map[float32]float32), false, d) +} +func (f fastpathT) DecMapFloat32Float32X(vp *map[float32]float32, d *Decoder) { + if v, changed := f.DecMapFloat32Float32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, canChange bool, + d *Decoder) (_ map[float32]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[float32]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]float64) + if v, changed := fastpathTV.DecMapFloat32Float64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32Float64V(rv2i(rv).(map[float32]float64), false, d) +} +func (f fastpathT) DecMapFloat32Float64X(vp *map[float32]float64, d *Decoder) { + if v, changed := f.DecMapFloat32Float64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, canChange bool, + d *Decoder) (_ map[float32]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]bool) + if v, changed := fastpathTV.DecMapFloat32BoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat32BoolV(rv2i(rv).(map[float32]bool), false, d) +} +func (f fastpathT) DecMapFloat32BoolX(vp *map[float32]bool, d *Decoder) { + if v, changed := f.DecMapFloat32BoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, canChange bool, + d *Decoder) (_ map[float32]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[float32]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float32 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]interface{}) + if v, changed := fastpathTV.DecMapFloat64IntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64IntfV(rv2i(rv).(map[float64]interface{}), false, d) +} +func (f fastpathT) DecMapFloat64IntfX(vp *map[float64]interface{}, d *Decoder) { + if v, changed := f.DecMapFloat64IntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, canChange bool, + d *Decoder) (_ map[float64]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[float64]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk float64 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]string) + if v, changed := fastpathTV.DecMapFloat64StringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64StringV(rv2i(rv).(map[float64]string), false, d) +} +func (f fastpathT) DecMapFloat64StringX(vp *map[float64]string, d *Decoder) { + if v, changed := f.DecMapFloat64StringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, canChange bool, + d *Decoder) (_ map[float64]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[float64]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint) + if v, changed := fastpathTV.DecMapFloat64UintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64UintV(rv2i(rv).(map[float64]uint), false, d) +} +func (f fastpathT) DecMapFloat64UintX(vp *map[float64]uint, d *Decoder) { + if v, changed := f.DecMapFloat64UintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, canChange bool, + d *Decoder) (_ map[float64]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint8) + if v, changed := fastpathTV.DecMapFloat64Uint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64Uint8V(rv2i(rv).(map[float64]uint8), false, d) +} +func (f fastpathT) DecMapFloat64Uint8X(vp *map[float64]uint8, d *Decoder) { + if v, changed := f.DecMapFloat64Uint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, canChange bool, + d *Decoder) (_ map[float64]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[float64]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint16) + if v, changed := fastpathTV.DecMapFloat64Uint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64Uint16V(rv2i(rv).(map[float64]uint16), false, d) +} +func (f fastpathT) DecMapFloat64Uint16X(vp *map[float64]uint16, d *Decoder) { + if v, changed := f.DecMapFloat64Uint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, canChange bool, + d *Decoder) (_ map[float64]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[float64]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint32) + if v, changed := fastpathTV.DecMapFloat64Uint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64Uint32V(rv2i(rv).(map[float64]uint32), false, d) +} +func (f fastpathT) DecMapFloat64Uint32X(vp *map[float64]uint32, d *Decoder) { + if v, changed := f.DecMapFloat64Uint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, canChange bool, + d *Decoder) (_ map[float64]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float64]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint64) + if v, changed := fastpathTV.DecMapFloat64Uint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64Uint64V(rv2i(rv).(map[float64]uint64), false, d) +} +func (f fastpathT) DecMapFloat64Uint64X(vp *map[float64]uint64, d *Decoder) { + if v, changed := f.DecMapFloat64Uint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, canChange bool, + d *Decoder) (_ map[float64]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uintptr) + if v, changed := fastpathTV.DecMapFloat64UintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64UintptrV(rv2i(rv).(map[float64]uintptr), false, d) +} +func (f fastpathT) DecMapFloat64UintptrX(vp *map[float64]uintptr, d *Decoder) { + if v, changed := f.DecMapFloat64UintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, canChange bool, + d *Decoder) (_ map[float64]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int) + if v, changed := fastpathTV.DecMapFloat64IntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64IntV(rv2i(rv).(map[float64]int), false, d) +} +func (f fastpathT) DecMapFloat64IntX(vp *map[float64]int, d *Decoder) { + if v, changed := f.DecMapFloat64IntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, canChange bool, + d *Decoder) (_ map[float64]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int8) + if v, changed := fastpathTV.DecMapFloat64Int8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64Int8V(rv2i(rv).(map[float64]int8), false, d) +} +func (f fastpathT) DecMapFloat64Int8X(vp *map[float64]int8, d *Decoder) { + if v, changed := f.DecMapFloat64Int8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, canChange bool, + d *Decoder) (_ map[float64]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[float64]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int16) + if v, changed := fastpathTV.DecMapFloat64Int16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64Int16V(rv2i(rv).(map[float64]int16), false, d) +} +func (f fastpathT) DecMapFloat64Int16X(vp *map[float64]int16, d *Decoder) { + if v, changed := f.DecMapFloat64Int16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, canChange bool, + d *Decoder) (_ map[float64]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[float64]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int32) + if v, changed := fastpathTV.DecMapFloat64Int32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64Int32V(rv2i(rv).(map[float64]int32), false, d) +} +func (f fastpathT) DecMapFloat64Int32X(vp *map[float64]int32, d *Decoder) { + if v, changed := f.DecMapFloat64Int32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, canChange bool, + d *Decoder) (_ map[float64]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float64]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int64) + if v, changed := fastpathTV.DecMapFloat64Int64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64Int64V(rv2i(rv).(map[float64]int64), false, d) +} +func (f fastpathT) DecMapFloat64Int64X(vp *map[float64]int64, d *Decoder) { + if v, changed := f.DecMapFloat64Int64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, canChange bool, + d *Decoder) (_ map[float64]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]float32) + if v, changed := fastpathTV.DecMapFloat64Float32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64Float32V(rv2i(rv).(map[float64]float32), false, d) +} +func (f fastpathT) DecMapFloat64Float32X(vp *map[float64]float32, d *Decoder) { + if v, changed := f.DecMapFloat64Float32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, canChange bool, + d *Decoder) (_ map[float64]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float64]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]float64) + if v, changed := fastpathTV.DecMapFloat64Float64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64Float64V(rv2i(rv).(map[float64]float64), false, d) +} +func (f fastpathT) DecMapFloat64Float64X(vp *map[float64]float64, d *Decoder) { + if v, changed := f.DecMapFloat64Float64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, canChange bool, + d *Decoder) (_ map[float64]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]bool) + if v, changed := fastpathTV.DecMapFloat64BoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapFloat64BoolV(rv2i(rv).(map[float64]bool), false, d) +} +func (f fastpathT) DecMapFloat64BoolX(vp *map[float64]bool, d *Decoder) { + if v, changed := f.DecMapFloat64BoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, canChange bool, + d *Decoder) (_ map[float64]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[float64]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk float64 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]interface{}) + if v, changed := fastpathTV.DecMapUintIntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintIntfV(rv2i(rv).(map[uint]interface{}), false, d) +} +func (f fastpathT) DecMapUintIntfX(vp *map[uint]interface{}, d *Decoder) { + if v, changed := f.DecMapUintIntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, canChange bool, + d *Decoder) (_ map[uint]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]string) + if v, changed := fastpathTV.DecMapUintStringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintStringV(rv2i(rv).(map[uint]string), false, d) +} +func (f fastpathT) DecMapUintStringX(vp *map[uint]string, d *Decoder) { + if v, changed := f.DecMapUintStringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintStringV(v map[uint]string, canChange bool, + d *Decoder) (_ map[uint]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint) + if v, changed := fastpathTV.DecMapUintUintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintUintV(rv2i(rv).(map[uint]uint), false, d) +} +func (f fastpathT) DecMapUintUintX(vp *map[uint]uint, d *Decoder) { + if v, changed := f.DecMapUintUintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUintV(v map[uint]uint, canChange bool, + d *Decoder) (_ map[uint]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint8) + if v, changed := fastpathTV.DecMapUintUint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintUint8V(rv2i(rv).(map[uint]uint8), false, d) +} +func (f fastpathT) DecMapUintUint8X(vp *map[uint]uint8, d *Decoder) { + if v, changed := f.DecMapUintUint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, canChange bool, + d *Decoder) (_ map[uint]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint16) + if v, changed := fastpathTV.DecMapUintUint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintUint16V(rv2i(rv).(map[uint]uint16), false, d) +} +func (f fastpathT) DecMapUintUint16X(vp *map[uint]uint16, d *Decoder) { + if v, changed := f.DecMapUintUint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, canChange bool, + d *Decoder) (_ map[uint]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint32) + if v, changed := fastpathTV.DecMapUintUint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintUint32V(rv2i(rv).(map[uint]uint32), false, d) +} +func (f fastpathT) DecMapUintUint32X(vp *map[uint]uint32, d *Decoder) { + if v, changed := f.DecMapUintUint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, canChange bool, + d *Decoder) (_ map[uint]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint64) + if v, changed := fastpathTV.DecMapUintUint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintUint64V(rv2i(rv).(map[uint]uint64), false, d) +} +func (f fastpathT) DecMapUintUint64X(vp *map[uint]uint64, d *Decoder) { + if v, changed := f.DecMapUintUint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, canChange bool, + d *Decoder) (_ map[uint]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uintptr) + if v, changed := fastpathTV.DecMapUintUintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintUintptrV(rv2i(rv).(map[uint]uintptr), false, d) +} +func (f fastpathT) DecMapUintUintptrX(vp *map[uint]uintptr, d *Decoder) { + if v, changed := f.DecMapUintUintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, canChange bool, + d *Decoder) (_ map[uint]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int) + if v, changed := fastpathTV.DecMapUintIntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintIntV(rv2i(rv).(map[uint]int), false, d) +} +func (f fastpathT) DecMapUintIntX(vp *map[uint]int, d *Decoder) { + if v, changed := f.DecMapUintIntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintIntV(v map[uint]int, canChange bool, + d *Decoder) (_ map[uint]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int8) + if v, changed := fastpathTV.DecMapUintInt8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintInt8V(rv2i(rv).(map[uint]int8), false, d) +} +func (f fastpathT) DecMapUintInt8X(vp *map[uint]int8, d *Decoder) { + if v, changed := f.DecMapUintInt8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, canChange bool, + d *Decoder) (_ map[uint]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int16) + if v, changed := fastpathTV.DecMapUintInt16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintInt16V(rv2i(rv).(map[uint]int16), false, d) +} +func (f fastpathT) DecMapUintInt16X(vp *map[uint]int16, d *Decoder) { + if v, changed := f.DecMapUintInt16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, canChange bool, + d *Decoder) (_ map[uint]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int32) + if v, changed := fastpathTV.DecMapUintInt32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintInt32V(rv2i(rv).(map[uint]int32), false, d) +} +func (f fastpathT) DecMapUintInt32X(vp *map[uint]int32, d *Decoder) { + if v, changed := f.DecMapUintInt32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, canChange bool, + d *Decoder) (_ map[uint]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int64) + if v, changed := fastpathTV.DecMapUintInt64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintInt64V(rv2i(rv).(map[uint]int64), false, d) +} +func (f fastpathT) DecMapUintInt64X(vp *map[uint]int64, d *Decoder) { + if v, changed := f.DecMapUintInt64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, canChange bool, + d *Decoder) (_ map[uint]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]float32) + if v, changed := fastpathTV.DecMapUintFloat32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintFloat32V(rv2i(rv).(map[uint]float32), false, d) +} +func (f fastpathT) DecMapUintFloat32X(vp *map[uint]float32, d *Decoder) { + if v, changed := f.DecMapUintFloat32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, canChange bool, + d *Decoder) (_ map[uint]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]float64) + if v, changed := fastpathTV.DecMapUintFloat64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintFloat64V(rv2i(rv).(map[uint]float64), false, d) +} +func (f fastpathT) DecMapUintFloat64X(vp *map[uint]float64, d *Decoder) { + if v, changed := f.DecMapUintFloat64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, canChange bool, + d *Decoder) (_ map[uint]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]bool) + if v, changed := fastpathTV.DecMapUintBoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintBoolV(rv2i(rv).(map[uint]bool), false, d) +} +func (f fastpathT) DecMapUintBoolX(vp *map[uint]bool, d *Decoder) { + if v, changed := f.DecMapUintBoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, canChange bool, + d *Decoder) (_ map[uint]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]interface{}) + if v, changed := fastpathTV.DecMapUint8IntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), false, d) +} +func (f fastpathT) DecMapUint8IntfX(vp *map[uint8]interface{}, d *Decoder) { + if v, changed := f.DecMapUint8IntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, canChange bool, + d *Decoder) (_ map[uint8]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[uint8]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint8 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]string) + if v, changed := fastpathTV.DecMapUint8StringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8StringV(rv2i(rv).(map[uint8]string), false, d) +} +func (f fastpathT) DecMapUint8StringX(vp *map[uint8]string, d *Decoder) { + if v, changed := f.DecMapUint8StringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, canChange bool, + d *Decoder) (_ map[uint8]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[uint8]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint) + if v, changed := fastpathTV.DecMapUint8UintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8UintV(rv2i(rv).(map[uint8]uint), false, d) +} +func (f fastpathT) DecMapUint8UintX(vp *map[uint8]uint, d *Decoder) { + if v, changed := f.DecMapUint8UintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, canChange bool, + d *Decoder) (_ map[uint8]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint8) + if v, changed := fastpathTV.DecMapUint8Uint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), false, d) +} +func (f fastpathT) DecMapUint8Uint8X(vp *map[uint8]uint8, d *Decoder) { + if v, changed := f.DecMapUint8Uint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, canChange bool, + d *Decoder) (_ map[uint8]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[uint8]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint16) + if v, changed := fastpathTV.DecMapUint8Uint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8Uint16V(rv2i(rv).(map[uint8]uint16), false, d) +} +func (f fastpathT) DecMapUint8Uint16X(vp *map[uint8]uint16, d *Decoder) { + if v, changed := f.DecMapUint8Uint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, canChange bool, + d *Decoder) (_ map[uint8]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint8]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint32) + if v, changed := fastpathTV.DecMapUint8Uint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8Uint32V(rv2i(rv).(map[uint8]uint32), false, d) +} +func (f fastpathT) DecMapUint8Uint32X(vp *map[uint8]uint32, d *Decoder) { + if v, changed := f.DecMapUint8Uint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, canChange bool, + d *Decoder) (_ map[uint8]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint8]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint64) + if v, changed := fastpathTV.DecMapUint8Uint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), false, d) +} +func (f fastpathT) DecMapUint8Uint64X(vp *map[uint8]uint64, d *Decoder) { + if v, changed := f.DecMapUint8Uint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, canChange bool, + d *Decoder) (_ map[uint8]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uintptr) + if v, changed := fastpathTV.DecMapUint8UintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8UintptrV(rv2i(rv).(map[uint8]uintptr), false, d) +} +func (f fastpathT) DecMapUint8UintptrX(vp *map[uint8]uintptr, d *Decoder) { + if v, changed := f.DecMapUint8UintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, canChange bool, + d *Decoder) (_ map[uint8]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int) + if v, changed := fastpathTV.DecMapUint8IntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8IntV(rv2i(rv).(map[uint8]int), false, d) +} +func (f fastpathT) DecMapUint8IntX(vp *map[uint8]int, d *Decoder) { + if v, changed := f.DecMapUint8IntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, canChange bool, + d *Decoder) (_ map[uint8]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int8) + if v, changed := fastpathTV.DecMapUint8Int8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8Int8V(rv2i(rv).(map[uint8]int8), false, d) +} +func (f fastpathT) DecMapUint8Int8X(vp *map[uint8]int8, d *Decoder) { + if v, changed := f.DecMapUint8Int8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, canChange bool, + d *Decoder) (_ map[uint8]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[uint8]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int16) + if v, changed := fastpathTV.DecMapUint8Int16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8Int16V(rv2i(rv).(map[uint8]int16), false, d) +} +func (f fastpathT) DecMapUint8Int16X(vp *map[uint8]int16, d *Decoder) { + if v, changed := f.DecMapUint8Int16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, canChange bool, + d *Decoder) (_ map[uint8]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint8]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int32) + if v, changed := fastpathTV.DecMapUint8Int32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8Int32V(rv2i(rv).(map[uint8]int32), false, d) +} +func (f fastpathT) DecMapUint8Int32X(vp *map[uint8]int32, d *Decoder) { + if v, changed := f.DecMapUint8Int32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, canChange bool, + d *Decoder) (_ map[uint8]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint8]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int64) + if v, changed := fastpathTV.DecMapUint8Int64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8Int64V(rv2i(rv).(map[uint8]int64), false, d) +} +func (f fastpathT) DecMapUint8Int64X(vp *map[uint8]int64, d *Decoder) { + if v, changed := f.DecMapUint8Int64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, canChange bool, + d *Decoder) (_ map[uint8]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]float32) + if v, changed := fastpathTV.DecMapUint8Float32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8Float32V(rv2i(rv).(map[uint8]float32), false, d) +} +func (f fastpathT) DecMapUint8Float32X(vp *map[uint8]float32, d *Decoder) { + if v, changed := f.DecMapUint8Float32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, canChange bool, + d *Decoder) (_ map[uint8]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint8]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]float64) + if v, changed := fastpathTV.DecMapUint8Float64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8Float64V(rv2i(rv).(map[uint8]float64), false, d) +} +func (f fastpathT) DecMapUint8Float64X(vp *map[uint8]float64, d *Decoder) { + if v, changed := f.DecMapUint8Float64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, canChange bool, + d *Decoder) (_ map[uint8]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]bool) + if v, changed := fastpathTV.DecMapUint8BoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint8BoolV(rv2i(rv).(map[uint8]bool), false, d) +} +func (f fastpathT) DecMapUint8BoolX(vp *map[uint8]bool, d *Decoder) { + if v, changed := f.DecMapUint8BoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, canChange bool, + d *Decoder) (_ map[uint8]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[uint8]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint8 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]interface{}) + if v, changed := fastpathTV.DecMapUint16IntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16IntfV(rv2i(rv).(map[uint16]interface{}), false, d) +} +func (f fastpathT) DecMapUint16IntfX(vp *map[uint16]interface{}, d *Decoder) { + if v, changed := f.DecMapUint16IntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, canChange bool, + d *Decoder) (_ map[uint16]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[uint16]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint16 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]string) + if v, changed := fastpathTV.DecMapUint16StringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16StringV(rv2i(rv).(map[uint16]string), false, d) +} +func (f fastpathT) DecMapUint16StringX(vp *map[uint16]string, d *Decoder) { + if v, changed := f.DecMapUint16StringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, canChange bool, + d *Decoder) (_ map[uint16]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[uint16]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint) + if v, changed := fastpathTV.DecMapUint16UintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16UintV(rv2i(rv).(map[uint16]uint), false, d) +} +func (f fastpathT) DecMapUint16UintX(vp *map[uint16]uint, d *Decoder) { + if v, changed := f.DecMapUint16UintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, canChange bool, + d *Decoder) (_ map[uint16]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint8) + if v, changed := fastpathTV.DecMapUint16Uint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16Uint8V(rv2i(rv).(map[uint16]uint8), false, d) +} +func (f fastpathT) DecMapUint16Uint8X(vp *map[uint16]uint8, d *Decoder) { + if v, changed := f.DecMapUint16Uint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, canChange bool, + d *Decoder) (_ map[uint16]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint16]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint16) + if v, changed := fastpathTV.DecMapUint16Uint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16Uint16V(rv2i(rv).(map[uint16]uint16), false, d) +} +func (f fastpathT) DecMapUint16Uint16X(vp *map[uint16]uint16, d *Decoder) { + if v, changed := f.DecMapUint16Uint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, canChange bool, + d *Decoder) (_ map[uint16]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[uint16]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint32) + if v, changed := fastpathTV.DecMapUint16Uint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16Uint32V(rv2i(rv).(map[uint16]uint32), false, d) +} +func (f fastpathT) DecMapUint16Uint32X(vp *map[uint16]uint32, d *Decoder) { + if v, changed := f.DecMapUint16Uint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, canChange bool, + d *Decoder) (_ map[uint16]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint16]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint64) + if v, changed := fastpathTV.DecMapUint16Uint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16Uint64V(rv2i(rv).(map[uint16]uint64), false, d) +} +func (f fastpathT) DecMapUint16Uint64X(vp *map[uint16]uint64, d *Decoder) { + if v, changed := f.DecMapUint16Uint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, canChange bool, + d *Decoder) (_ map[uint16]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uintptr) + if v, changed := fastpathTV.DecMapUint16UintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16UintptrV(rv2i(rv).(map[uint16]uintptr), false, d) +} +func (f fastpathT) DecMapUint16UintptrX(vp *map[uint16]uintptr, d *Decoder) { + if v, changed := f.DecMapUint16UintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, canChange bool, + d *Decoder) (_ map[uint16]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int) + if v, changed := fastpathTV.DecMapUint16IntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16IntV(rv2i(rv).(map[uint16]int), false, d) +} +func (f fastpathT) DecMapUint16IntX(vp *map[uint16]int, d *Decoder) { + if v, changed := f.DecMapUint16IntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, canChange bool, + d *Decoder) (_ map[uint16]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int8) + if v, changed := fastpathTV.DecMapUint16Int8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16Int8V(rv2i(rv).(map[uint16]int8), false, d) +} +func (f fastpathT) DecMapUint16Int8X(vp *map[uint16]int8, d *Decoder) { + if v, changed := f.DecMapUint16Int8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, canChange bool, + d *Decoder) (_ map[uint16]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint16]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int16) + if v, changed := fastpathTV.DecMapUint16Int16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16Int16V(rv2i(rv).(map[uint16]int16), false, d) +} +func (f fastpathT) DecMapUint16Int16X(vp *map[uint16]int16, d *Decoder) { + if v, changed := f.DecMapUint16Int16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, canChange bool, + d *Decoder) (_ map[uint16]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[uint16]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int32) + if v, changed := fastpathTV.DecMapUint16Int32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16Int32V(rv2i(rv).(map[uint16]int32), false, d) +} +func (f fastpathT) DecMapUint16Int32X(vp *map[uint16]int32, d *Decoder) { + if v, changed := f.DecMapUint16Int32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, canChange bool, + d *Decoder) (_ map[uint16]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint16]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int64) + if v, changed := fastpathTV.DecMapUint16Int64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16Int64V(rv2i(rv).(map[uint16]int64), false, d) +} +func (f fastpathT) DecMapUint16Int64X(vp *map[uint16]int64, d *Decoder) { + if v, changed := f.DecMapUint16Int64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, canChange bool, + d *Decoder) (_ map[uint16]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]float32) + if v, changed := fastpathTV.DecMapUint16Float32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16Float32V(rv2i(rv).(map[uint16]float32), false, d) +} +func (f fastpathT) DecMapUint16Float32X(vp *map[uint16]float32, d *Decoder) { + if v, changed := f.DecMapUint16Float32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, canChange bool, + d *Decoder) (_ map[uint16]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint16]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]float64) + if v, changed := fastpathTV.DecMapUint16Float64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16Float64V(rv2i(rv).(map[uint16]float64), false, d) +} +func (f fastpathT) DecMapUint16Float64X(vp *map[uint16]float64, d *Decoder) { + if v, changed := f.DecMapUint16Float64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, canChange bool, + d *Decoder) (_ map[uint16]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]bool) + if v, changed := fastpathTV.DecMapUint16BoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint16BoolV(rv2i(rv).(map[uint16]bool), false, d) +} +func (f fastpathT) DecMapUint16BoolX(vp *map[uint16]bool, d *Decoder) { + if v, changed := f.DecMapUint16BoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, canChange bool, + d *Decoder) (_ map[uint16]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint16]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint16 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]interface{}) + if v, changed := fastpathTV.DecMapUint32IntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32IntfV(rv2i(rv).(map[uint32]interface{}), false, d) +} +func (f fastpathT) DecMapUint32IntfX(vp *map[uint32]interface{}, d *Decoder) { + if v, changed := f.DecMapUint32IntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, canChange bool, + d *Decoder) (_ map[uint32]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[uint32]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint32 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]string) + if v, changed := fastpathTV.DecMapUint32StringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32StringV(rv2i(rv).(map[uint32]string), false, d) +} +func (f fastpathT) DecMapUint32StringX(vp *map[uint32]string, d *Decoder) { + if v, changed := f.DecMapUint32StringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, canChange bool, + d *Decoder) (_ map[uint32]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[uint32]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint) + if v, changed := fastpathTV.DecMapUint32UintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32UintV(rv2i(rv).(map[uint32]uint), false, d) +} +func (f fastpathT) DecMapUint32UintX(vp *map[uint32]uint, d *Decoder) { + if v, changed := f.DecMapUint32UintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, canChange bool, + d *Decoder) (_ map[uint32]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint8) + if v, changed := fastpathTV.DecMapUint32Uint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32Uint8V(rv2i(rv).(map[uint32]uint8), false, d) +} +func (f fastpathT) DecMapUint32Uint8X(vp *map[uint32]uint8, d *Decoder) { + if v, changed := f.DecMapUint32Uint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, canChange bool, + d *Decoder) (_ map[uint32]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint32]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint16) + if v, changed := fastpathTV.DecMapUint32Uint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32Uint16V(rv2i(rv).(map[uint32]uint16), false, d) +} +func (f fastpathT) DecMapUint32Uint16X(vp *map[uint32]uint16, d *Decoder) { + if v, changed := f.DecMapUint32Uint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, canChange bool, + d *Decoder) (_ map[uint32]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint32]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint32) + if v, changed := fastpathTV.DecMapUint32Uint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32Uint32V(rv2i(rv).(map[uint32]uint32), false, d) +} +func (f fastpathT) DecMapUint32Uint32X(vp *map[uint32]uint32, d *Decoder) { + if v, changed := f.DecMapUint32Uint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, canChange bool, + d *Decoder) (_ map[uint32]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[uint32]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint64) + if v, changed := fastpathTV.DecMapUint32Uint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32Uint64V(rv2i(rv).(map[uint32]uint64), false, d) +} +func (f fastpathT) DecMapUint32Uint64X(vp *map[uint32]uint64, d *Decoder) { + if v, changed := f.DecMapUint32Uint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, canChange bool, + d *Decoder) (_ map[uint32]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uintptr) + if v, changed := fastpathTV.DecMapUint32UintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32UintptrV(rv2i(rv).(map[uint32]uintptr), false, d) +} +func (f fastpathT) DecMapUint32UintptrX(vp *map[uint32]uintptr, d *Decoder) { + if v, changed := f.DecMapUint32UintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, canChange bool, + d *Decoder) (_ map[uint32]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int) + if v, changed := fastpathTV.DecMapUint32IntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32IntV(rv2i(rv).(map[uint32]int), false, d) +} +func (f fastpathT) DecMapUint32IntX(vp *map[uint32]int, d *Decoder) { + if v, changed := f.DecMapUint32IntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, canChange bool, + d *Decoder) (_ map[uint32]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int8) + if v, changed := fastpathTV.DecMapUint32Int8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32Int8V(rv2i(rv).(map[uint32]int8), false, d) +} +func (f fastpathT) DecMapUint32Int8X(vp *map[uint32]int8, d *Decoder) { + if v, changed := f.DecMapUint32Int8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, canChange bool, + d *Decoder) (_ map[uint32]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint32]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int16) + if v, changed := fastpathTV.DecMapUint32Int16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32Int16V(rv2i(rv).(map[uint32]int16), false, d) +} +func (f fastpathT) DecMapUint32Int16X(vp *map[uint32]int16, d *Decoder) { + if v, changed := f.DecMapUint32Int16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, canChange bool, + d *Decoder) (_ map[uint32]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint32]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int32) + if v, changed := fastpathTV.DecMapUint32Int32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32Int32V(rv2i(rv).(map[uint32]int32), false, d) +} +func (f fastpathT) DecMapUint32Int32X(vp *map[uint32]int32, d *Decoder) { + if v, changed := f.DecMapUint32Int32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, canChange bool, + d *Decoder) (_ map[uint32]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[uint32]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int64) + if v, changed := fastpathTV.DecMapUint32Int64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32Int64V(rv2i(rv).(map[uint32]int64), false, d) +} +func (f fastpathT) DecMapUint32Int64X(vp *map[uint32]int64, d *Decoder) { + if v, changed := f.DecMapUint32Int64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, canChange bool, + d *Decoder) (_ map[uint32]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]float32) + if v, changed := fastpathTV.DecMapUint32Float32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32Float32V(rv2i(rv).(map[uint32]float32), false, d) +} +func (f fastpathT) DecMapUint32Float32X(vp *map[uint32]float32, d *Decoder) { + if v, changed := f.DecMapUint32Float32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, canChange bool, + d *Decoder) (_ map[uint32]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[uint32]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]float64) + if v, changed := fastpathTV.DecMapUint32Float64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32Float64V(rv2i(rv).(map[uint32]float64), false, d) +} +func (f fastpathT) DecMapUint32Float64X(vp *map[uint32]float64, d *Decoder) { + if v, changed := f.DecMapUint32Float64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, canChange bool, + d *Decoder) (_ map[uint32]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]bool) + if v, changed := fastpathTV.DecMapUint32BoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint32BoolV(rv2i(rv).(map[uint32]bool), false, d) +} +func (f fastpathT) DecMapUint32BoolX(vp *map[uint32]bool, d *Decoder) { + if v, changed := f.DecMapUint32BoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, canChange bool, + d *Decoder) (_ map[uint32]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint32]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint32 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]interface{}) + if v, changed := fastpathTV.DecMapUint64IntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), false, d) +} +func (f fastpathT) DecMapUint64IntfX(vp *map[uint64]interface{}, d *Decoder) { + if v, changed := f.DecMapUint64IntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, canChange bool, + d *Decoder) (_ map[uint64]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint64]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint64 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]string) + if v, changed := fastpathTV.DecMapUint64StringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64StringV(rv2i(rv).(map[uint64]string), false, d) +} +func (f fastpathT) DecMapUint64StringX(vp *map[uint64]string, d *Decoder) { + if v, changed := f.DecMapUint64StringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, canChange bool, + d *Decoder) (_ map[uint64]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint64]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint) + if v, changed := fastpathTV.DecMapUint64UintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64UintV(rv2i(rv).(map[uint64]uint), false, d) +} +func (f fastpathT) DecMapUint64UintX(vp *map[uint64]uint, d *Decoder) { + if v, changed := f.DecMapUint64UintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, canChange bool, + d *Decoder) (_ map[uint64]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint8) + if v, changed := fastpathTV.DecMapUint64Uint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), false, d) +} +func (f fastpathT) DecMapUint64Uint8X(vp *map[uint64]uint8, d *Decoder) { + if v, changed := f.DecMapUint64Uint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, canChange bool, + d *Decoder) (_ map[uint64]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint64]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint16) + if v, changed := fastpathTV.DecMapUint64Uint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64Uint16V(rv2i(rv).(map[uint64]uint16), false, d) +} +func (f fastpathT) DecMapUint64Uint16X(vp *map[uint64]uint16, d *Decoder) { + if v, changed := f.DecMapUint64Uint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, canChange bool, + d *Decoder) (_ map[uint64]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint64]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint32) + if v, changed := fastpathTV.DecMapUint64Uint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64Uint32V(rv2i(rv).(map[uint64]uint32), false, d) +} +func (f fastpathT) DecMapUint64Uint32X(vp *map[uint64]uint32, d *Decoder) { + if v, changed := f.DecMapUint64Uint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, canChange bool, + d *Decoder) (_ map[uint64]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint64]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint64) + if v, changed := fastpathTV.DecMapUint64Uint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), false, d) +} +func (f fastpathT) DecMapUint64Uint64X(vp *map[uint64]uint64, d *Decoder) { + if v, changed := f.DecMapUint64Uint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, canChange bool, + d *Decoder) (_ map[uint64]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uintptr) + if v, changed := fastpathTV.DecMapUint64UintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64UintptrV(rv2i(rv).(map[uint64]uintptr), false, d) +} +func (f fastpathT) DecMapUint64UintptrX(vp *map[uint64]uintptr, d *Decoder) { + if v, changed := f.DecMapUint64UintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, canChange bool, + d *Decoder) (_ map[uint64]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int) + if v, changed := fastpathTV.DecMapUint64IntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64IntV(rv2i(rv).(map[uint64]int), false, d) +} +func (f fastpathT) DecMapUint64IntX(vp *map[uint64]int, d *Decoder) { + if v, changed := f.DecMapUint64IntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, canChange bool, + d *Decoder) (_ map[uint64]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int8) + if v, changed := fastpathTV.DecMapUint64Int8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64Int8V(rv2i(rv).(map[uint64]int8), false, d) +} +func (f fastpathT) DecMapUint64Int8X(vp *map[uint64]int8, d *Decoder) { + if v, changed := f.DecMapUint64Int8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, canChange bool, + d *Decoder) (_ map[uint64]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint64]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int16) + if v, changed := fastpathTV.DecMapUint64Int16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64Int16V(rv2i(rv).(map[uint64]int16), false, d) +} +func (f fastpathT) DecMapUint64Int16X(vp *map[uint64]int16, d *Decoder) { + if v, changed := f.DecMapUint64Int16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, canChange bool, + d *Decoder) (_ map[uint64]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint64]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int32) + if v, changed := fastpathTV.DecMapUint64Int32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64Int32V(rv2i(rv).(map[uint64]int32), false, d) +} +func (f fastpathT) DecMapUint64Int32X(vp *map[uint64]int32, d *Decoder) { + if v, changed := f.DecMapUint64Int32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, canChange bool, + d *Decoder) (_ map[uint64]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint64]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int64) + if v, changed := fastpathTV.DecMapUint64Int64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64Int64V(rv2i(rv).(map[uint64]int64), false, d) +} +func (f fastpathT) DecMapUint64Int64X(vp *map[uint64]int64, d *Decoder) { + if v, changed := f.DecMapUint64Int64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, canChange bool, + d *Decoder) (_ map[uint64]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]float32) + if v, changed := fastpathTV.DecMapUint64Float32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64Float32V(rv2i(rv).(map[uint64]float32), false, d) +} +func (f fastpathT) DecMapUint64Float32X(vp *map[uint64]float32, d *Decoder) { + if v, changed := f.DecMapUint64Float32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, canChange bool, + d *Decoder) (_ map[uint64]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint64]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]float64) + if v, changed := fastpathTV.DecMapUint64Float64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64Float64V(rv2i(rv).(map[uint64]float64), false, d) +} +func (f fastpathT) DecMapUint64Float64X(vp *map[uint64]float64, d *Decoder) { + if v, changed := f.DecMapUint64Float64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, canChange bool, + d *Decoder) (_ map[uint64]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]bool) + if v, changed := fastpathTV.DecMapUint64BoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUint64BoolV(rv2i(rv).(map[uint64]bool), false, d) +} +func (f fastpathT) DecMapUint64BoolX(vp *map[uint64]bool, d *Decoder) { + if v, changed := f.DecMapUint64BoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, canChange bool, + d *Decoder) (_ map[uint64]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint64]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uint64 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]interface{}) + if v, changed := fastpathTV.DecMapUintptrIntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrIntfV(rv2i(rv).(map[uintptr]interface{}), false, d) +} +func (f fastpathT) DecMapUintptrIntfX(vp *map[uintptr]interface{}, d *Decoder) { + if v, changed := f.DecMapUintptrIntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, canChange bool, + d *Decoder) (_ map[uintptr]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uintptr]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk uintptr + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]string) + if v, changed := fastpathTV.DecMapUintptrStringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrStringV(rv2i(rv).(map[uintptr]string), false, d) +} +func (f fastpathT) DecMapUintptrStringX(vp *map[uintptr]string, d *Decoder) { + if v, changed := f.DecMapUintptrStringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, canChange bool, + d *Decoder) (_ map[uintptr]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uintptr]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint) + if v, changed := fastpathTV.DecMapUintptrUintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrUintV(rv2i(rv).(map[uintptr]uint), false, d) +} +func (f fastpathT) DecMapUintptrUintX(vp *map[uintptr]uint, d *Decoder) { + if v, changed := f.DecMapUintptrUintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, canChange bool, + d *Decoder) (_ map[uintptr]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint8) + if v, changed := fastpathTV.DecMapUintptrUint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrUint8V(rv2i(rv).(map[uintptr]uint8), false, d) +} +func (f fastpathT) DecMapUintptrUint8X(vp *map[uintptr]uint8, d *Decoder) { + if v, changed := f.DecMapUintptrUint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, canChange bool, + d *Decoder) (_ map[uintptr]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uintptr]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint16) + if v, changed := fastpathTV.DecMapUintptrUint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrUint16V(rv2i(rv).(map[uintptr]uint16), false, d) +} +func (f fastpathT) DecMapUintptrUint16X(vp *map[uintptr]uint16, d *Decoder) { + if v, changed := f.DecMapUintptrUint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, canChange bool, + d *Decoder) (_ map[uintptr]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uintptr]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint32) + if v, changed := fastpathTV.DecMapUintptrUint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrUint32V(rv2i(rv).(map[uintptr]uint32), false, d) +} +func (f fastpathT) DecMapUintptrUint32X(vp *map[uintptr]uint32, d *Decoder) { + if v, changed := f.DecMapUintptrUint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, canChange bool, + d *Decoder) (_ map[uintptr]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uintptr]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint64) + if v, changed := fastpathTV.DecMapUintptrUint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrUint64V(rv2i(rv).(map[uintptr]uint64), false, d) +} +func (f fastpathT) DecMapUintptrUint64X(vp *map[uintptr]uint64, d *Decoder) { + if v, changed := f.DecMapUintptrUint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, canChange bool, + d *Decoder) (_ map[uintptr]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uintptr) + if v, changed := fastpathTV.DecMapUintptrUintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrUintptrV(rv2i(rv).(map[uintptr]uintptr), false, d) +} +func (f fastpathT) DecMapUintptrUintptrX(vp *map[uintptr]uintptr, d *Decoder) { + if v, changed := f.DecMapUintptrUintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, canChange bool, + d *Decoder) (_ map[uintptr]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int) + if v, changed := fastpathTV.DecMapUintptrIntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrIntV(rv2i(rv).(map[uintptr]int), false, d) +} +func (f fastpathT) DecMapUintptrIntX(vp *map[uintptr]int, d *Decoder) { + if v, changed := f.DecMapUintptrIntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, canChange bool, + d *Decoder) (_ map[uintptr]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int8) + if v, changed := fastpathTV.DecMapUintptrInt8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrInt8V(rv2i(rv).(map[uintptr]int8), false, d) +} +func (f fastpathT) DecMapUintptrInt8X(vp *map[uintptr]int8, d *Decoder) { + if v, changed := f.DecMapUintptrInt8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, canChange bool, + d *Decoder) (_ map[uintptr]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uintptr]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int16) + if v, changed := fastpathTV.DecMapUintptrInt16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrInt16V(rv2i(rv).(map[uintptr]int16), false, d) +} +func (f fastpathT) DecMapUintptrInt16X(vp *map[uintptr]int16, d *Decoder) { + if v, changed := f.DecMapUintptrInt16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, canChange bool, + d *Decoder) (_ map[uintptr]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uintptr]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int32) + if v, changed := fastpathTV.DecMapUintptrInt32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrInt32V(rv2i(rv).(map[uintptr]int32), false, d) +} +func (f fastpathT) DecMapUintptrInt32X(vp *map[uintptr]int32, d *Decoder) { + if v, changed := f.DecMapUintptrInt32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, canChange bool, + d *Decoder) (_ map[uintptr]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uintptr]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int64) + if v, changed := fastpathTV.DecMapUintptrInt64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrInt64V(rv2i(rv).(map[uintptr]int64), false, d) +} +func (f fastpathT) DecMapUintptrInt64X(vp *map[uintptr]int64, d *Decoder) { + if v, changed := f.DecMapUintptrInt64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, canChange bool, + d *Decoder) (_ map[uintptr]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]float32) + if v, changed := fastpathTV.DecMapUintptrFloat32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrFloat32V(rv2i(rv).(map[uintptr]float32), false, d) +} +func (f fastpathT) DecMapUintptrFloat32X(vp *map[uintptr]float32, d *Decoder) { + if v, changed := f.DecMapUintptrFloat32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, canChange bool, + d *Decoder) (_ map[uintptr]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uintptr]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]float64) + if v, changed := fastpathTV.DecMapUintptrFloat64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrFloat64V(rv2i(rv).(map[uintptr]float64), false, d) +} +func (f fastpathT) DecMapUintptrFloat64X(vp *map[uintptr]float64, d *Decoder) { + if v, changed := f.DecMapUintptrFloat64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, canChange bool, + d *Decoder) (_ map[uintptr]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]bool) + if v, changed := fastpathTV.DecMapUintptrBoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapUintptrBoolV(rv2i(rv).(map[uintptr]bool), false, d) +} +func (f fastpathT) DecMapUintptrBoolX(vp *map[uintptr]bool, d *Decoder) { + if v, changed := f.DecMapUintptrBoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, canChange bool, + d *Decoder) (_ map[uintptr]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uintptr]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk uintptr + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]interface{}) + if v, changed := fastpathTV.DecMapIntIntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntIntfV(rv2i(rv).(map[int]interface{}), false, d) +} +func (f fastpathT) DecMapIntIntfX(vp *map[int]interface{}, d *Decoder) { + if v, changed := f.DecMapIntIntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, canChange bool, + d *Decoder) (_ map[int]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]string) + if v, changed := fastpathTV.DecMapIntStringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntStringV(rv2i(rv).(map[int]string), false, d) +} +func (f fastpathT) DecMapIntStringX(vp *map[int]string, d *Decoder) { + if v, changed := f.DecMapIntStringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntStringV(v map[int]string, canChange bool, + d *Decoder) (_ map[int]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint) + if v, changed := fastpathTV.DecMapIntUintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntUintV(rv2i(rv).(map[int]uint), false, d) +} +func (f fastpathT) DecMapIntUintX(vp *map[int]uint, d *Decoder) { + if v, changed := f.DecMapIntUintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUintV(v map[int]uint, canChange bool, + d *Decoder) (_ map[int]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint8) + if v, changed := fastpathTV.DecMapIntUint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntUint8V(rv2i(rv).(map[int]uint8), false, d) +} +func (f fastpathT) DecMapIntUint8X(vp *map[int]uint8, d *Decoder) { + if v, changed := f.DecMapIntUint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, canChange bool, + d *Decoder) (_ map[int]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint16) + if v, changed := fastpathTV.DecMapIntUint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntUint16V(rv2i(rv).(map[int]uint16), false, d) +} +func (f fastpathT) DecMapIntUint16X(vp *map[int]uint16, d *Decoder) { + if v, changed := f.DecMapIntUint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, canChange bool, + d *Decoder) (_ map[int]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint32) + if v, changed := fastpathTV.DecMapIntUint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntUint32V(rv2i(rv).(map[int]uint32), false, d) +} +func (f fastpathT) DecMapIntUint32X(vp *map[int]uint32, d *Decoder) { + if v, changed := f.DecMapIntUint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, canChange bool, + d *Decoder) (_ map[int]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint64) + if v, changed := fastpathTV.DecMapIntUint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntUint64V(rv2i(rv).(map[int]uint64), false, d) +} +func (f fastpathT) DecMapIntUint64X(vp *map[int]uint64, d *Decoder) { + if v, changed := f.DecMapIntUint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, canChange bool, + d *Decoder) (_ map[int]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uintptr) + if v, changed := fastpathTV.DecMapIntUintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntUintptrV(rv2i(rv).(map[int]uintptr), false, d) +} +func (f fastpathT) DecMapIntUintptrX(vp *map[int]uintptr, d *Decoder) { + if v, changed := f.DecMapIntUintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, canChange bool, + d *Decoder) (_ map[int]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int) + if v, changed := fastpathTV.DecMapIntIntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntIntV(rv2i(rv).(map[int]int), false, d) +} +func (f fastpathT) DecMapIntIntX(vp *map[int]int, d *Decoder) { + if v, changed := f.DecMapIntIntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntIntV(v map[int]int, canChange bool, + d *Decoder) (_ map[int]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int8) + if v, changed := fastpathTV.DecMapIntInt8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntInt8V(rv2i(rv).(map[int]int8), false, d) +} +func (f fastpathT) DecMapIntInt8X(vp *map[int]int8, d *Decoder) { + if v, changed := f.DecMapIntInt8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt8V(v map[int]int8, canChange bool, + d *Decoder) (_ map[int]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int16) + if v, changed := fastpathTV.DecMapIntInt16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntInt16V(rv2i(rv).(map[int]int16), false, d) +} +func (f fastpathT) DecMapIntInt16X(vp *map[int]int16, d *Decoder) { + if v, changed := f.DecMapIntInt16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt16V(v map[int]int16, canChange bool, + d *Decoder) (_ map[int]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int32) + if v, changed := fastpathTV.DecMapIntInt32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntInt32V(rv2i(rv).(map[int]int32), false, d) +} +func (f fastpathT) DecMapIntInt32X(vp *map[int]int32, d *Decoder) { + if v, changed := f.DecMapIntInt32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt32V(v map[int]int32, canChange bool, + d *Decoder) (_ map[int]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int64) + if v, changed := fastpathTV.DecMapIntInt64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntInt64V(rv2i(rv).(map[int]int64), false, d) +} +func (f fastpathT) DecMapIntInt64X(vp *map[int]int64, d *Decoder) { + if v, changed := f.DecMapIntInt64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt64V(v map[int]int64, canChange bool, + d *Decoder) (_ map[int]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]float32) + if v, changed := fastpathTV.DecMapIntFloat32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntFloat32V(rv2i(rv).(map[int]float32), false, d) +} +func (f fastpathT) DecMapIntFloat32X(vp *map[int]float32, d *Decoder) { + if v, changed := f.DecMapIntFloat32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, canChange bool, + d *Decoder) (_ map[int]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]float64) + if v, changed := fastpathTV.DecMapIntFloat64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntFloat64V(rv2i(rv).(map[int]float64), false, d) +} +func (f fastpathT) DecMapIntFloat64X(vp *map[int]float64, d *Decoder) { + if v, changed := f.DecMapIntFloat64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, canChange bool, + d *Decoder) (_ map[int]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]bool) + if v, changed := fastpathTV.DecMapIntBoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapIntBoolV(rv2i(rv).(map[int]bool), false, d) +} +func (f fastpathT) DecMapIntBoolX(vp *map[int]bool, d *Decoder) { + if v, changed := f.DecMapIntBoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntBoolV(v map[int]bool, canChange bool, + d *Decoder) (_ map[int]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]interface{}) + if v, changed := fastpathTV.DecMapInt8IntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8IntfV(rv2i(rv).(map[int8]interface{}), false, d) +} +func (f fastpathT) DecMapInt8IntfX(vp *map[int8]interface{}, d *Decoder) { + if v, changed := f.DecMapInt8IntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, canChange bool, + d *Decoder) (_ map[int8]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[int8]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int8 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]string) + if v, changed := fastpathTV.DecMapInt8StringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8StringV(rv2i(rv).(map[int8]string), false, d) +} +func (f fastpathT) DecMapInt8StringX(vp *map[int8]string, d *Decoder) { + if v, changed := f.DecMapInt8StringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8StringV(v map[int8]string, canChange bool, + d *Decoder) (_ map[int8]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[int8]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint) + if v, changed := fastpathTV.DecMapInt8UintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8UintV(rv2i(rv).(map[int8]uint), false, d) +} +func (f fastpathT) DecMapInt8UintX(vp *map[int8]uint, d *Decoder) { + if v, changed := f.DecMapInt8UintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, canChange bool, + d *Decoder) (_ map[int8]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint8) + if v, changed := fastpathTV.DecMapInt8Uint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8Uint8V(rv2i(rv).(map[int8]uint8), false, d) +} +func (f fastpathT) DecMapInt8Uint8X(vp *map[int8]uint8, d *Decoder) { + if v, changed := f.DecMapInt8Uint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, canChange bool, + d *Decoder) (_ map[int8]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[int8]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint16) + if v, changed := fastpathTV.DecMapInt8Uint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8Uint16V(rv2i(rv).(map[int8]uint16), false, d) +} +func (f fastpathT) DecMapInt8Uint16X(vp *map[int8]uint16, d *Decoder) { + if v, changed := f.DecMapInt8Uint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, canChange bool, + d *Decoder) (_ map[int8]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int8]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint32) + if v, changed := fastpathTV.DecMapInt8Uint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8Uint32V(rv2i(rv).(map[int8]uint32), false, d) +} +func (f fastpathT) DecMapInt8Uint32X(vp *map[int8]uint32, d *Decoder) { + if v, changed := f.DecMapInt8Uint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, canChange bool, + d *Decoder) (_ map[int8]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int8]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint64) + if v, changed := fastpathTV.DecMapInt8Uint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8Uint64V(rv2i(rv).(map[int8]uint64), false, d) +} +func (f fastpathT) DecMapInt8Uint64X(vp *map[int8]uint64, d *Decoder) { + if v, changed := f.DecMapInt8Uint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, canChange bool, + d *Decoder) (_ map[int8]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uintptr) + if v, changed := fastpathTV.DecMapInt8UintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8UintptrV(rv2i(rv).(map[int8]uintptr), false, d) +} +func (f fastpathT) DecMapInt8UintptrX(vp *map[int8]uintptr, d *Decoder) { + if v, changed := f.DecMapInt8UintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, canChange bool, + d *Decoder) (_ map[int8]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int) + if v, changed := fastpathTV.DecMapInt8IntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8IntV(rv2i(rv).(map[int8]int), false, d) +} +func (f fastpathT) DecMapInt8IntX(vp *map[int8]int, d *Decoder) { + if v, changed := f.DecMapInt8IntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8IntV(v map[int8]int, canChange bool, + d *Decoder) (_ map[int8]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int8) + if v, changed := fastpathTV.DecMapInt8Int8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8Int8V(rv2i(rv).(map[int8]int8), false, d) +} +func (f fastpathT) DecMapInt8Int8X(vp *map[int8]int8, d *Decoder) { + if v, changed := f.DecMapInt8Int8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, canChange bool, + d *Decoder) (_ map[int8]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[int8]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int16) + if v, changed := fastpathTV.DecMapInt8Int16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8Int16V(rv2i(rv).(map[int8]int16), false, d) +} +func (f fastpathT) DecMapInt8Int16X(vp *map[int8]int16, d *Decoder) { + if v, changed := f.DecMapInt8Int16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, canChange bool, + d *Decoder) (_ map[int8]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int8]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int32) + if v, changed := fastpathTV.DecMapInt8Int32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8Int32V(rv2i(rv).(map[int8]int32), false, d) +} +func (f fastpathT) DecMapInt8Int32X(vp *map[int8]int32, d *Decoder) { + if v, changed := f.DecMapInt8Int32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, canChange bool, + d *Decoder) (_ map[int8]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int8]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int64) + if v, changed := fastpathTV.DecMapInt8Int64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8Int64V(rv2i(rv).(map[int8]int64), false, d) +} +func (f fastpathT) DecMapInt8Int64X(vp *map[int8]int64, d *Decoder) { + if v, changed := f.DecMapInt8Int64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, canChange bool, + d *Decoder) (_ map[int8]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]float32) + if v, changed := fastpathTV.DecMapInt8Float32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8Float32V(rv2i(rv).(map[int8]float32), false, d) +} +func (f fastpathT) DecMapInt8Float32X(vp *map[int8]float32, d *Decoder) { + if v, changed := f.DecMapInt8Float32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, canChange bool, + d *Decoder) (_ map[int8]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int8]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]float64) + if v, changed := fastpathTV.DecMapInt8Float64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8Float64V(rv2i(rv).(map[int8]float64), false, d) +} +func (f fastpathT) DecMapInt8Float64X(vp *map[int8]float64, d *Decoder) { + if v, changed := f.DecMapInt8Float64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, canChange bool, + d *Decoder) (_ map[int8]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]bool) + if v, changed := fastpathTV.DecMapInt8BoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt8BoolV(rv2i(rv).(map[int8]bool), false, d) +} +func (f fastpathT) DecMapInt8BoolX(vp *map[int8]bool, d *Decoder) { + if v, changed := f.DecMapInt8BoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, canChange bool, + d *Decoder) (_ map[int8]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[int8]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int8 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]interface{}) + if v, changed := fastpathTV.DecMapInt16IntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16IntfV(rv2i(rv).(map[int16]interface{}), false, d) +} +func (f fastpathT) DecMapInt16IntfX(vp *map[int16]interface{}, d *Decoder) { + if v, changed := f.DecMapInt16IntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, canChange bool, + d *Decoder) (_ map[int16]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[int16]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int16 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]string) + if v, changed := fastpathTV.DecMapInt16StringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16StringV(rv2i(rv).(map[int16]string), false, d) +} +func (f fastpathT) DecMapInt16StringX(vp *map[int16]string, d *Decoder) { + if v, changed := f.DecMapInt16StringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16StringV(v map[int16]string, canChange bool, + d *Decoder) (_ map[int16]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[int16]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint) + if v, changed := fastpathTV.DecMapInt16UintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16UintV(rv2i(rv).(map[int16]uint), false, d) +} +func (f fastpathT) DecMapInt16UintX(vp *map[int16]uint, d *Decoder) { + if v, changed := f.DecMapInt16UintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, canChange bool, + d *Decoder) (_ map[int16]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint8) + if v, changed := fastpathTV.DecMapInt16Uint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16Uint8V(rv2i(rv).(map[int16]uint8), false, d) +} +func (f fastpathT) DecMapInt16Uint8X(vp *map[int16]uint8, d *Decoder) { + if v, changed := f.DecMapInt16Uint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, canChange bool, + d *Decoder) (_ map[int16]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int16]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint16) + if v, changed := fastpathTV.DecMapInt16Uint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16Uint16V(rv2i(rv).(map[int16]uint16), false, d) +} +func (f fastpathT) DecMapInt16Uint16X(vp *map[int16]uint16, d *Decoder) { + if v, changed := f.DecMapInt16Uint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, canChange bool, + d *Decoder) (_ map[int16]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[int16]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint32) + if v, changed := fastpathTV.DecMapInt16Uint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16Uint32V(rv2i(rv).(map[int16]uint32), false, d) +} +func (f fastpathT) DecMapInt16Uint32X(vp *map[int16]uint32, d *Decoder) { + if v, changed := f.DecMapInt16Uint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, canChange bool, + d *Decoder) (_ map[int16]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int16]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint64) + if v, changed := fastpathTV.DecMapInt16Uint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16Uint64V(rv2i(rv).(map[int16]uint64), false, d) +} +func (f fastpathT) DecMapInt16Uint64X(vp *map[int16]uint64, d *Decoder) { + if v, changed := f.DecMapInt16Uint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, canChange bool, + d *Decoder) (_ map[int16]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uintptr) + if v, changed := fastpathTV.DecMapInt16UintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16UintptrV(rv2i(rv).(map[int16]uintptr), false, d) +} +func (f fastpathT) DecMapInt16UintptrX(vp *map[int16]uintptr, d *Decoder) { + if v, changed := f.DecMapInt16UintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, canChange bool, + d *Decoder) (_ map[int16]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int) + if v, changed := fastpathTV.DecMapInt16IntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16IntV(rv2i(rv).(map[int16]int), false, d) +} +func (f fastpathT) DecMapInt16IntX(vp *map[int16]int, d *Decoder) { + if v, changed := f.DecMapInt16IntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16IntV(v map[int16]int, canChange bool, + d *Decoder) (_ map[int16]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int8) + if v, changed := fastpathTV.DecMapInt16Int8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16Int8V(rv2i(rv).(map[int16]int8), false, d) +} +func (f fastpathT) DecMapInt16Int8X(vp *map[int16]int8, d *Decoder) { + if v, changed := f.DecMapInt16Int8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, canChange bool, + d *Decoder) (_ map[int16]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int16]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int16) + if v, changed := fastpathTV.DecMapInt16Int16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16Int16V(rv2i(rv).(map[int16]int16), false, d) +} +func (f fastpathT) DecMapInt16Int16X(vp *map[int16]int16, d *Decoder) { + if v, changed := f.DecMapInt16Int16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, canChange bool, + d *Decoder) (_ map[int16]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[int16]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int32) + if v, changed := fastpathTV.DecMapInt16Int32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16Int32V(rv2i(rv).(map[int16]int32), false, d) +} +func (f fastpathT) DecMapInt16Int32X(vp *map[int16]int32, d *Decoder) { + if v, changed := f.DecMapInt16Int32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, canChange bool, + d *Decoder) (_ map[int16]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int16]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int64) + if v, changed := fastpathTV.DecMapInt16Int64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16Int64V(rv2i(rv).(map[int16]int64), false, d) +} +func (f fastpathT) DecMapInt16Int64X(vp *map[int16]int64, d *Decoder) { + if v, changed := f.DecMapInt16Int64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, canChange bool, + d *Decoder) (_ map[int16]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]float32) + if v, changed := fastpathTV.DecMapInt16Float32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16Float32V(rv2i(rv).(map[int16]float32), false, d) +} +func (f fastpathT) DecMapInt16Float32X(vp *map[int16]float32, d *Decoder) { + if v, changed := f.DecMapInt16Float32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, canChange bool, + d *Decoder) (_ map[int16]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int16]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]float64) + if v, changed := fastpathTV.DecMapInt16Float64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16Float64V(rv2i(rv).(map[int16]float64), false, d) +} +func (f fastpathT) DecMapInt16Float64X(vp *map[int16]float64, d *Decoder) { + if v, changed := f.DecMapInt16Float64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, canChange bool, + d *Decoder) (_ map[int16]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]bool) + if v, changed := fastpathTV.DecMapInt16BoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt16BoolV(rv2i(rv).(map[int16]bool), false, d) +} +func (f fastpathT) DecMapInt16BoolX(vp *map[int16]bool, d *Decoder) { + if v, changed := f.DecMapInt16BoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, canChange bool, + d *Decoder) (_ map[int16]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int16]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int16 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]interface{}) + if v, changed := fastpathTV.DecMapInt32IntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32IntfV(rv2i(rv).(map[int32]interface{}), false, d) +} +func (f fastpathT) DecMapInt32IntfX(vp *map[int32]interface{}, d *Decoder) { + if v, changed := f.DecMapInt32IntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, canChange bool, + d *Decoder) (_ map[int32]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[int32]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int32 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]string) + if v, changed := fastpathTV.DecMapInt32StringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32StringV(rv2i(rv).(map[int32]string), false, d) +} +func (f fastpathT) DecMapInt32StringX(vp *map[int32]string, d *Decoder) { + if v, changed := f.DecMapInt32StringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32StringV(v map[int32]string, canChange bool, + d *Decoder) (_ map[int32]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[int32]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint) + if v, changed := fastpathTV.DecMapInt32UintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32UintV(rv2i(rv).(map[int32]uint), false, d) +} +func (f fastpathT) DecMapInt32UintX(vp *map[int32]uint, d *Decoder) { + if v, changed := f.DecMapInt32UintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, canChange bool, + d *Decoder) (_ map[int32]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint8) + if v, changed := fastpathTV.DecMapInt32Uint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32Uint8V(rv2i(rv).(map[int32]uint8), false, d) +} +func (f fastpathT) DecMapInt32Uint8X(vp *map[int32]uint8, d *Decoder) { + if v, changed := f.DecMapInt32Uint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, canChange bool, + d *Decoder) (_ map[int32]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int32]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint16) + if v, changed := fastpathTV.DecMapInt32Uint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32Uint16V(rv2i(rv).(map[int32]uint16), false, d) +} +func (f fastpathT) DecMapInt32Uint16X(vp *map[int32]uint16, d *Decoder) { + if v, changed := f.DecMapInt32Uint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, canChange bool, + d *Decoder) (_ map[int32]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int32]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint32) + if v, changed := fastpathTV.DecMapInt32Uint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32Uint32V(rv2i(rv).(map[int32]uint32), false, d) +} +func (f fastpathT) DecMapInt32Uint32X(vp *map[int32]uint32, d *Decoder) { + if v, changed := f.DecMapInt32Uint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, canChange bool, + d *Decoder) (_ map[int32]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[int32]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint64) + if v, changed := fastpathTV.DecMapInt32Uint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32Uint64V(rv2i(rv).(map[int32]uint64), false, d) +} +func (f fastpathT) DecMapInt32Uint64X(vp *map[int32]uint64, d *Decoder) { + if v, changed := f.DecMapInt32Uint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, canChange bool, + d *Decoder) (_ map[int32]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uintptr) + if v, changed := fastpathTV.DecMapInt32UintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32UintptrV(rv2i(rv).(map[int32]uintptr), false, d) +} +func (f fastpathT) DecMapInt32UintptrX(vp *map[int32]uintptr, d *Decoder) { + if v, changed := f.DecMapInt32UintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, canChange bool, + d *Decoder) (_ map[int32]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int) + if v, changed := fastpathTV.DecMapInt32IntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32IntV(rv2i(rv).(map[int32]int), false, d) +} +func (f fastpathT) DecMapInt32IntX(vp *map[int32]int, d *Decoder) { + if v, changed := f.DecMapInt32IntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32IntV(v map[int32]int, canChange bool, + d *Decoder) (_ map[int32]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int8) + if v, changed := fastpathTV.DecMapInt32Int8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32Int8V(rv2i(rv).(map[int32]int8), false, d) +} +func (f fastpathT) DecMapInt32Int8X(vp *map[int32]int8, d *Decoder) { + if v, changed := f.DecMapInt32Int8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, canChange bool, + d *Decoder) (_ map[int32]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int32]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int16) + if v, changed := fastpathTV.DecMapInt32Int16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32Int16V(rv2i(rv).(map[int32]int16), false, d) +} +func (f fastpathT) DecMapInt32Int16X(vp *map[int32]int16, d *Decoder) { + if v, changed := f.DecMapInt32Int16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, canChange bool, + d *Decoder) (_ map[int32]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int32]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int32) + if v, changed := fastpathTV.DecMapInt32Int32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32Int32V(rv2i(rv).(map[int32]int32), false, d) +} +func (f fastpathT) DecMapInt32Int32X(vp *map[int32]int32, d *Decoder) { + if v, changed := f.DecMapInt32Int32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, canChange bool, + d *Decoder) (_ map[int32]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[int32]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int64) + if v, changed := fastpathTV.DecMapInt32Int64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32Int64V(rv2i(rv).(map[int32]int64), false, d) +} +func (f fastpathT) DecMapInt32Int64X(vp *map[int32]int64, d *Decoder) { + if v, changed := f.DecMapInt32Int64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, canChange bool, + d *Decoder) (_ map[int32]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]float32) + if v, changed := fastpathTV.DecMapInt32Float32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32Float32V(rv2i(rv).(map[int32]float32), false, d) +} +func (f fastpathT) DecMapInt32Float32X(vp *map[int32]float32, d *Decoder) { + if v, changed := f.DecMapInt32Float32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, canChange bool, + d *Decoder) (_ map[int32]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[int32]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]float64) + if v, changed := fastpathTV.DecMapInt32Float64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32Float64V(rv2i(rv).(map[int32]float64), false, d) +} +func (f fastpathT) DecMapInt32Float64X(vp *map[int32]float64, d *Decoder) { + if v, changed := f.DecMapInt32Float64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, canChange bool, + d *Decoder) (_ map[int32]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]bool) + if v, changed := fastpathTV.DecMapInt32BoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt32BoolV(rv2i(rv).(map[int32]bool), false, d) +} +func (f fastpathT) DecMapInt32BoolX(vp *map[int32]bool, d *Decoder) { + if v, changed := f.DecMapInt32BoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, canChange bool, + d *Decoder) (_ map[int32]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int32]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int32 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]interface{}) + if v, changed := fastpathTV.DecMapInt64IntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64IntfV(rv2i(rv).(map[int64]interface{}), false, d) +} +func (f fastpathT) DecMapInt64IntfX(vp *map[int64]interface{}, d *Decoder) { + if v, changed := f.DecMapInt64IntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, canChange bool, + d *Decoder) (_ map[int64]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int64]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk int64 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]string) + if v, changed := fastpathTV.DecMapInt64StringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64StringV(rv2i(rv).(map[int64]string), false, d) +} +func (f fastpathT) DecMapInt64StringX(vp *map[int64]string, d *Decoder) { + if v, changed := f.DecMapInt64StringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64StringV(v map[int64]string, canChange bool, + d *Decoder) (_ map[int64]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int64]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint) + if v, changed := fastpathTV.DecMapInt64UintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64UintV(rv2i(rv).(map[int64]uint), false, d) +} +func (f fastpathT) DecMapInt64UintX(vp *map[int64]uint, d *Decoder) { + if v, changed := f.DecMapInt64UintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, canChange bool, + d *Decoder) (_ map[int64]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint8) + if v, changed := fastpathTV.DecMapInt64Uint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64Uint8V(rv2i(rv).(map[int64]uint8), false, d) +} +func (f fastpathT) DecMapInt64Uint8X(vp *map[int64]uint8, d *Decoder) { + if v, changed := f.DecMapInt64Uint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, canChange bool, + d *Decoder) (_ map[int64]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int64]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint16) + if v, changed := fastpathTV.DecMapInt64Uint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64Uint16V(rv2i(rv).(map[int64]uint16), false, d) +} +func (f fastpathT) DecMapInt64Uint16X(vp *map[int64]uint16, d *Decoder) { + if v, changed := f.DecMapInt64Uint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, canChange bool, + d *Decoder) (_ map[int64]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int64]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint32) + if v, changed := fastpathTV.DecMapInt64Uint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64Uint32V(rv2i(rv).(map[int64]uint32), false, d) +} +func (f fastpathT) DecMapInt64Uint32X(vp *map[int64]uint32, d *Decoder) { + if v, changed := f.DecMapInt64Uint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, canChange bool, + d *Decoder) (_ map[int64]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int64]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint64) + if v, changed := fastpathTV.DecMapInt64Uint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64Uint64V(rv2i(rv).(map[int64]uint64), false, d) +} +func (f fastpathT) DecMapInt64Uint64X(vp *map[int64]uint64, d *Decoder) { + if v, changed := f.DecMapInt64Uint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, canChange bool, + d *Decoder) (_ map[int64]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uintptr) + if v, changed := fastpathTV.DecMapInt64UintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64UintptrV(rv2i(rv).(map[int64]uintptr), false, d) +} +func (f fastpathT) DecMapInt64UintptrX(vp *map[int64]uintptr, d *Decoder) { + if v, changed := f.DecMapInt64UintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, canChange bool, + d *Decoder) (_ map[int64]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int) + if v, changed := fastpathTV.DecMapInt64IntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64IntV(rv2i(rv).(map[int64]int), false, d) +} +func (f fastpathT) DecMapInt64IntX(vp *map[int64]int, d *Decoder) { + if v, changed := f.DecMapInt64IntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64IntV(v map[int64]int, canChange bool, + d *Decoder) (_ map[int64]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int8) + if v, changed := fastpathTV.DecMapInt64Int8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64Int8V(rv2i(rv).(map[int64]int8), false, d) +} +func (f fastpathT) DecMapInt64Int8X(vp *map[int64]int8, d *Decoder) { + if v, changed := f.DecMapInt64Int8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, canChange bool, + d *Decoder) (_ map[int64]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int64]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int16) + if v, changed := fastpathTV.DecMapInt64Int16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64Int16V(rv2i(rv).(map[int64]int16), false, d) +} +func (f fastpathT) DecMapInt64Int16X(vp *map[int64]int16, d *Decoder) { + if v, changed := f.DecMapInt64Int16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, canChange bool, + d *Decoder) (_ map[int64]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int64]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int32) + if v, changed := fastpathTV.DecMapInt64Int32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64Int32V(rv2i(rv).(map[int64]int32), false, d) +} +func (f fastpathT) DecMapInt64Int32X(vp *map[int64]int32, d *Decoder) { + if v, changed := f.DecMapInt64Int32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, canChange bool, + d *Decoder) (_ map[int64]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int64]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int64) + if v, changed := fastpathTV.DecMapInt64Int64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64Int64V(rv2i(rv).(map[int64]int64), false, d) +} +func (f fastpathT) DecMapInt64Int64X(vp *map[int64]int64, d *Decoder) { + if v, changed := f.DecMapInt64Int64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, canChange bool, + d *Decoder) (_ map[int64]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]float32) + if v, changed := fastpathTV.DecMapInt64Float32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64Float32V(rv2i(rv).(map[int64]float32), false, d) +} +func (f fastpathT) DecMapInt64Float32X(vp *map[int64]float32, d *Decoder) { + if v, changed := f.DecMapInt64Float32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, canChange bool, + d *Decoder) (_ map[int64]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int64]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]float64) + if v, changed := fastpathTV.DecMapInt64Float64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64Float64V(rv2i(rv).(map[int64]float64), false, d) +} +func (f fastpathT) DecMapInt64Float64X(vp *map[int64]float64, d *Decoder) { + if v, changed := f.DecMapInt64Float64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, canChange bool, + d *Decoder) (_ map[int64]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]bool) + if v, changed := fastpathTV.DecMapInt64BoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapInt64BoolV(rv2i(rv).(map[int64]bool), false, d) +} +func (f fastpathT) DecMapInt64BoolX(vp *map[int64]bool, d *Decoder) { + if v, changed := f.DecMapInt64BoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, canChange bool, + d *Decoder) (_ map[int64]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int64]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk int64 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]interface{}) + if v, changed := fastpathTV.DecMapBoolIntfV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolIntfV(rv2i(rv).(map[bool]interface{}), false, d) +} +func (f fastpathT) DecMapBoolIntfX(vp *map[bool]interface{}, d *Decoder) { + if v, changed := f.DecMapBoolIntfV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, canChange bool, + d *Decoder) (_ map[bool]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[bool]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := !d.h.MapValueReset && !d.h.InterfaceReset + var mk bool + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]string) + if v, changed := fastpathTV.DecMapBoolStringV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolStringV(rv2i(rv).(map[bool]string), false, d) +} +func (f fastpathT) DecMapBoolStringX(vp *map[bool]string, d *Decoder) { + if v, changed := f.DecMapBoolStringV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolStringV(v map[bool]string, canChange bool, + d *Decoder) (_ map[bool]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[bool]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint) + if v, changed := fastpathTV.DecMapBoolUintV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolUintV(rv2i(rv).(map[bool]uint), false, d) +} +func (f fastpathT) DecMapBoolUintX(vp *map[bool]uint, d *Decoder) { + if v, changed := f.DecMapBoolUintV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, canChange bool, + d *Decoder) (_ map[bool]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint8) + if v, changed := fastpathTV.DecMapBoolUint8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolUint8V(rv2i(rv).(map[bool]uint8), false, d) +} +func (f fastpathT) DecMapBoolUint8X(vp *map[bool]uint8, d *Decoder) { + if v, changed := f.DecMapBoolUint8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, canChange bool, + d *Decoder) (_ map[bool]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[bool]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint16) + if v, changed := fastpathTV.DecMapBoolUint16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolUint16V(rv2i(rv).(map[bool]uint16), false, d) +} +func (f fastpathT) DecMapBoolUint16X(vp *map[bool]uint16, d *Decoder) { + if v, changed := f.DecMapBoolUint16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, canChange bool, + d *Decoder) (_ map[bool]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[bool]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint32) + if v, changed := fastpathTV.DecMapBoolUint32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolUint32V(rv2i(rv).(map[bool]uint32), false, d) +} +func (f fastpathT) DecMapBoolUint32X(vp *map[bool]uint32, d *Decoder) { + if v, changed := f.DecMapBoolUint32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, canChange bool, + d *Decoder) (_ map[bool]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[bool]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint64) + if v, changed := fastpathTV.DecMapBoolUint64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolUint64V(rv2i(rv).(map[bool]uint64), false, d) +} +func (f fastpathT) DecMapBoolUint64X(vp *map[bool]uint64, d *Decoder) { + if v, changed := f.DecMapBoolUint64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, canChange bool, + d *Decoder) (_ map[bool]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uintptr) + if v, changed := fastpathTV.DecMapBoolUintptrV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolUintptrV(rv2i(rv).(map[bool]uintptr), false, d) +} +func (f fastpathT) DecMapBoolUintptrX(vp *map[bool]uintptr, d *Decoder) { + if v, changed := f.DecMapBoolUintptrV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, canChange bool, + d *Decoder) (_ map[bool]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int) + if v, changed := fastpathTV.DecMapBoolIntV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolIntV(rv2i(rv).(map[bool]int), false, d) +} +func (f fastpathT) DecMapBoolIntX(vp *map[bool]int, d *Decoder) { + if v, changed := f.DecMapBoolIntV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolIntV(v map[bool]int, canChange bool, + d *Decoder) (_ map[bool]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int8) + if v, changed := fastpathTV.DecMapBoolInt8V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolInt8V(rv2i(rv).(map[bool]int8), false, d) +} +func (f fastpathT) DecMapBoolInt8X(vp *map[bool]int8, d *Decoder) { + if v, changed := f.DecMapBoolInt8V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, canChange bool, + d *Decoder) (_ map[bool]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[bool]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int16) + if v, changed := fastpathTV.DecMapBoolInt16V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolInt16V(rv2i(rv).(map[bool]int16), false, d) +} +func (f fastpathT) DecMapBoolInt16X(vp *map[bool]int16, d *Decoder) { + if v, changed := f.DecMapBoolInt16V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, canChange bool, + d *Decoder) (_ map[bool]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[bool]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int32) + if v, changed := fastpathTV.DecMapBoolInt32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolInt32V(rv2i(rv).(map[bool]int32), false, d) +} +func (f fastpathT) DecMapBoolInt32X(vp *map[bool]int32, d *Decoder) { + if v, changed := f.DecMapBoolInt32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, canChange bool, + d *Decoder) (_ map[bool]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[bool]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int64) + if v, changed := fastpathTV.DecMapBoolInt64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolInt64V(rv2i(rv).(map[bool]int64), false, d) +} +func (f fastpathT) DecMapBoolInt64X(vp *map[bool]int64, d *Decoder) { + if v, changed := f.DecMapBoolInt64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, canChange bool, + d *Decoder) (_ map[bool]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]float32) + if v, changed := fastpathTV.DecMapBoolFloat32V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolFloat32V(rv2i(rv).(map[bool]float32), false, d) +} +func (f fastpathT) DecMapBoolFloat32X(vp *map[bool]float32, d *Decoder) { + if v, changed := f.DecMapBoolFloat32V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, canChange bool, + d *Decoder) (_ map[bool]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[bool]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]float64) + if v, changed := fastpathTV.DecMapBoolFloat64V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolFloat64V(rv2i(rv).(map[bool]float64), false, d) +} +func (f fastpathT) DecMapBoolFloat64X(vp *map[bool]float64, d *Decoder) { + if v, changed := f.DecMapBoolFloat64V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, canChange bool, + d *Decoder) (_ map[bool]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]bool) + if v, changed := fastpathTV.DecMapBoolBoolV(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.DecMapBoolBoolV(rv2i(rv).(map[bool]bool), false, d) +} +func (f fastpathT) DecMapBoolBoolX(vp *map[bool]bool, d *Decoder) { + if v, changed := f.DecMapBoolBoolV(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, canChange bool, + d *Decoder) (_ map[bool]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[bool]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + + var mk bool + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} diff --git a/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl b/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl new file mode 100644 index 00000000000..1df9504c55c --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl @@ -0,0 +1,465 @@ +// +build !notfastpath + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl +// ************************************************************ + +package codec + +// Fast path functions try to create a fast path encode or decode implementation +// for common maps and slices. +// +// We define the functions and register then in this single file +// so as not to pollute the encode.go and decode.go, and create a dependency in there. +// This file can be omitted without causing a build failure. +// +// The advantage of fast paths is: +// - Many calls bypass reflection altogether +// +// Currently support +// - slice of all builtin types, +// - map of all builtin types to string or interface value +// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8) +// This should provide adequate "typical" implementations. +// +// Note that fast track decode functions must handle values for which an address cannot be obtained. +// For example: +// m2 := map[string]int{} +// p2 := []interface{}{m2} +// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. +// + +import ( + "reflect" + "sort" +) + +const fastpathEnabled = true + +type fastpathT struct {} + +var fastpathTV fastpathT + +type fastpathE struct { + rtid uintptr + rt reflect.Type + encfn func(*Encoder, *codecFnInfo, reflect.Value) + decfn func(*Decoder, *codecFnInfo, reflect.Value) +} + +type fastpathA [{{ .FastpathLen }}]fastpathE + +func (x *fastpathA) index(rtid uintptr) int { + // use binary search to grab the index (adapted from sort/search.go) + h, i, j := 0, 0, {{ .FastpathLen }} // len(x) + for i < j { + h = i + (j-i)/2 + if x[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + } + if i < {{ .FastpathLen }} && x[i].rtid == rtid { + return i + } + return -1 +} + +type fastpathAslice []fastpathE + +func (x fastpathAslice) Len() int { return len(x) } +func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid } +func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +var fastpathAV fastpathA + +// due to possible initialization loop error, make fastpath in an init() +func init() { + i := 0 + fn := func(v interface{}, + fe func(*Encoder, *codecFnInfo, reflect.Value), + fd func(*Decoder, *codecFnInfo, reflect.Value)) (f fastpathE) { + xrt := reflect.TypeOf(v) + xptr := rt2id(xrt) + if useLookupRecognizedTypes { + recognizedRtids = append(recognizedRtids, xptr) + recognizedRtidPtrs = append(recognizedRtidPtrs, rt2id(reflect.PtrTo(xrt))) + } + fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} + i++ + return + } + + {{range .Values}}{{if not .Primitive}}{{if not .MapKey }} + fn([]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}} + + {{range .Values}}{{if not .Primitive}}{{if .MapKey }} + fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}} + + sort.Sort(fastpathAslice(fastpathAV[:])) +} + +// -- encode + +// -- -- fast path type switch +func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} + case []{{ .Elem }}:{{else}} + case map[{{ .MapKey }}]{{ .Elem }}:{{end}} + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e){{if not .MapKey }} + case *[]{{ .Elem }}:{{else}} + case *map[{{ .MapKey }}]{{ .Elem }}:{{end}} + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e) +{{end}}{{end}} + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +{{/* **** removing this block, as they are never called directly **** +func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} + case []{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e) + case *[]{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e) +{{end}}{{end}}{{end}} + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { +{{range .Values}}{{if not .Primitive}}{{if .MapKey }} + case map[{{ .MapKey }}]{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e) + case *map[{{ .MapKey }}]{{ .Elem }}: + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e) +{{end}}{{end}}{{end}} + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} +*/}} + +// -- -- fast path functions +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} + +func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(rv2i(rv).([]{{ .Elem }}), e) + } else { + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).([]{{ .Elem }}), e) + } +} +func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, e *Encoder) { + if v == nil { e.e.EncodeNil(); return } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + for _, v2 := range v { + if esep { ee.WriteArrayElem() } + {{ encmd .Elem "v2"}} + } + ee.WriteArrayEnd() +} + +func (_ fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + for j, v2 := range v { + if esep { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } + {{ encmd .Elem "v2"}} + } + ee.WriteMapEnd() +} + +{{end}}{{end}}{{end}} + +{{range .Values}}{{if not .Primitive}}{{if .MapKey }} + +func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), e) +} +func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, e *Encoder) { + if v == nil { e.e.EncodeNil(); return } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + {{if eq .MapKey "string"}}asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + {{end}}if e.h.Canonical { + {{if eq .MapKey "interface{}"}}{{/* out of band + */}}var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI {{/* put loop variables outside. seems currently needed for better perf */}} + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + for j := range v2 { + if esep { ee.WriteMapElemKey() } + e.asis(v2[j].v) + if esep { ee.WriteMapElemValue() } + e.encode(v[v2[j].i]) + } {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v)) + var i int + for k, _ := range v { + v2[i] = {{ $x }}(k) + i++ + } + sort.Sort({{ sorttype .MapKey false}}(v2)) + for _, k2 := range v2 { + if esep { ee.WriteMapElemKey() } + {{if eq .MapKey "string"}}if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + }{{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}} + if esep { ee.WriteMapElemValue() } + {{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }} + } {{end}} + } else { + for k2, v2 := range v { + if esep { ee.WriteMapElemKey() } + {{if eq .MapKey "string"}}if asSymbols { + ee.EncodeSymbol(k2) + } else { + ee.EncodeString(c_UTF8, k2) + }{{else}}{{ encmd .MapKey "k2"}}{{end}} + if esep { ee.WriteMapElemValue() } + {{ encmd .Elem "v2"}} + } + } + ee.WriteMapEnd() +} + +{{end}}{{end}}{{end}} + +// -- decode + +// -- -- fast path type switch +func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { + switch v := iv.(type) { +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} + case []{{ .Elem }}:{{else}} + case map[{{ .MapKey }}]{{ .Elem }}:{{end}} + fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, false, d){{if not .MapKey }} + case *[]{{ .Elem }}: {{else}} + case *map[{{ .MapKey }}]{{ .Elem }}: {{end}} + if v2, changed2 := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, true, d); changed2 { + *v = v2 + } +{{end}}{{end}} + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +func fastpathDecodeSetZeroTypeSwitch(iv interface{}, d *Decoder) bool { + switch v := iv.(type) { +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} + case *[]{{ .Elem }}: {{else}} + case *map[{{ .MapKey }}]{{ .Elem }}: {{end}} + *v = nil +{{end}}{{end}} + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + +// -- -- fast path functions +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} +{{/* +Slices can change if they +- did not come from an array +- are addressable (from a ptr) +- are settable (e.g. contained in an interface{}) +*/}} +func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]{{ .Elem }}) + if v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, !array, d); changed { + *vp = v + } + } else { + fastpathTV.{{ .MethodNamePfx "Dec" false }}V(rv2i(rv).([]{{ .Elem }}), !array, d) + } +} +func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, d *Decoder) { + if v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) { + dd := d.d + {{/* // if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil() */}} + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []{{ .Elem }}{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]{{ .Elem }}, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) + } else { + xlen = 8 + } + v = make([]{{ .Elem }}, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, {{ zerocmd .Elem }}) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + {{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }} + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]{{ .Elem }}, 0) + changed = true + } + } + slh.End() + return v, changed +} + +{{end}}{{end}}{{end}} + + +{{range .Values}}{{if not .Primitive}}{{if .MapKey }} +{{/* +Maps can change if they are +- addressable (from a ptr) +- settable (e.g. contained in an interface{}) +*/}} +func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }}) + if v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d); changed { + *vp = v + } + return + } + fastpathTV.{{ .MethodNamePfx "Dec" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), false, d) +} +func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, d *Decoder) { + if v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d); changed { + *vp = v + } +} +func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, canChange bool, + d *Decoder) (_ map[{{ .MapKey }}]{{ .Elem }}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + {{/* // if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil() */}} + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}) + v = make(map[{{ .MapKey }}]{{ .Elem }}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + {{ if eq .Elem "interface{}" }}mapGet := !d.h.MapValueReset && !d.h.InterfaceReset{{end}} + var mk {{ .MapKey }} + var mv {{ .Elem }} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { dd.ReadMapElemKey() } + {{ if eq .MapKey "interface{}" }}mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}} + }{{ else }}mk = {{ decmd .MapKey }}{{ end }} + if esep { dd.ReadMapElemValue() } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { delete(v, mk) } else { v[mk] = {{ zerocmd .Elem }} } + continue + } + {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil } + d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }} + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +{{end}}{{end}}{{end}} diff --git a/vendor/github.com/ugorji/go/codec/fast-path.not.go b/vendor/github.com/ugorji/go/codec/fast-path.not.go new file mode 100644 index 00000000000..9573d64ab01 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/fast-path.not.go @@ -0,0 +1,35 @@ +// +build notfastpath + +package codec + +import "reflect" + +const fastpathEnabled = false + +// The generated fast-path code is very large, and adds a few seconds to the build time. +// This causes test execution, execution of small tools which use codec, etc +// to take a long time. +// +// To mitigate, we now support the notfastpath tag. +// This tag disables fastpath during build, allowing for faster build, test execution, +// short-program runs, etc. + +func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false } +func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false } +func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false } +func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false } +func fastpathDecodeSetZeroTypeSwitch(iv interface{}, d *Decoder) bool { return false } + +type fastpathT struct{} +type fastpathE struct { + rtid uintptr + rt reflect.Type + encfn func(*Encoder, *codecFnInfo, reflect.Value) + decfn func(*Decoder, *codecFnInfo, reflect.Value) +} +type fastpathA [0]fastpathE + +func (x fastpathA) index(rtid uintptr) int { return -1 } + +var fastpathAV fastpathA +var fastpathTV fastpathT diff --git a/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl new file mode 100644 index 00000000000..d9940c0ad6f --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl @@ -0,0 +1,77 @@ +{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} +{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} +var {{var "c"}} bool {{/* // changed */}} +_ = {{var "c"}}{{end}} +if {{var "l"}} == 0 { + {{if isSlice }}if {{var "v"}} == nil { + {{var "v"}} = []{{ .Typ }}{} + {{var "c"}} = true + } else if len({{var "v"}}) != 0 { + {{var "v"}} = {{var "v"}}[:0] + {{var "c"}} = true + } {{end}} {{if isChan }}if {{var "v"}} == nil { + {{var "v"}} = make({{ .CTyp }}, 0) + {{var "c"}} = true + } {{end}} +} else { + {{var "hl"}} := {{var "l"}} > 0 + var {{var "rl"}} int; _ = {{var "rl"}} + {{if isSlice }} if {{var "hl"}} { + if {{var "l"}} > cap({{var "v"}}) { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + if {{var "rl"}} <= cap({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "rl"}}] + } else { + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + } + {{var "c"}} = true + } else if {{var "l"}} != len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "l"}}] + {{var "c"}} = true + } + } {{end}} + var {{var "j"}} int + // var {{var "dn"}} bool + for ; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { + {{if not isArray}} if {{var "j"}} == 0 && len({{var "v"}}) == 0 { + if {{var "hl"}} { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + } else { + {{var "rl"}} = 8 + } + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + {{var "c"}} = true + }{{end}} + {{var "h"}}.ElemContainerState({{var "j"}}) + // {{var "dn"}} = r.TryDecodeAsNil() + {{if isChan}}{{ $x := printf "%[1]vv%[2]v" .TempVar .Rand }}var {{var $x}} {{ .Typ }} + {{ decLineVar $x }} + {{var "v"}} <- {{ $x }} + {{else}} + // if indefinite, etc, then expand the slice if necessary + var {{var "db"}} bool + if {{var "j"}} >= len({{var "v"}}) { + {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}); {{var "c"}} = true + {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true + {{end}} + } + if {{var "db"}} { + z.DecSwallow() + } else { + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } + {{end}} + } + {{if isSlice}} if {{var "j"}} < len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "j"}}] + {{var "c"}} = true + } else if {{var "j"}} == 0 && {{var "v"}} == nil { + {{var "v"}} = make([]{{ .Typ }}, 0) + {{var "c"}} = true + } {{end}} +} +{{var "h"}}.End() +{{if not isArray }}if {{var "c"}} { + *{{ .Varname }} = {{var "v"}} +}{{end}} + diff --git a/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl new file mode 100644 index 00000000000..8323b54940d --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl @@ -0,0 +1,42 @@ +{{var "v"}} := *{{ .Varname }} +{{var "l"}} := r.ReadMapStart() +{{var "bh"}} := z.DecBasicHandle() +if {{var "v"}} == nil { + {{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) + {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) + *{{ .Varname }} = {{var "v"}} +} +var {{var "mk"}} {{ .KTyp }} +var {{var "mv"}} {{ .Typ }} +var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool +if {{var "bh"}}.MapValueReset { + {{if decElemKindPtr}}{{var "mg"}} = true + {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } + {{else if not decElemKindImmutable}}{{var "mg"}} = true + {{end}} } +if {{var "l"}} != 0 { +{{var "hl"}} := {{var "l"}} > 0 + for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { + r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}} + {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} +{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { + {{var "mk"}} = string({{var "bv"}}) + }{{ end }}{{if decElemKindPtr}} + {{var "ms"}} = true{{end}} + if {{var "mg"}} { + {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] + if {{var "mok"}} { + {{var "ms"}} = false + } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} + } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} + r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}} + {{var "mdn"}} = false + {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }} + if {{var "mdn"}} { + if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} } + } else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { + {{var "v"}}[{{var "mk"}}] = {{var "mv"}} + } +} +} // else len==0: TODO: Should we clear map entries? +r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}} diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.generated.go b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go new file mode 100644 index 00000000000..c89a14d676b --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go @@ -0,0 +1,245 @@ +/* // +build ignore */ + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl +// ************************************************************ + +package codec + +import ( + "encoding" + "reflect" +) + +// GenVersion is the current version of codecgen. +const GenVersion = 8 + +// This file is used to generate helper code for codecgen. +// The values here i.e. genHelper(En|De)coder are not to be used directly by +// library users. They WILL change continuously and without notice. +// +// To help enforce this, we create an unexported type with exported members. +// The only way to get the type is via the one exported type that we control (somewhat). +// +// When static codecs are created for types, they will use this value +// to perform encoding or decoding of primitives or known slice or map types. + +// GenHelperEncoder is exported so that it can be used externally by codecgen. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) { + return genHelperEncoder{e: e}, e.e +} + +// GenHelperDecoder is exported so that it can be used externally by codecgen. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) { + return genHelperDecoder{d: d}, d.d +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperEncoder struct { + e *Encoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperDecoder struct { + d *Decoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBasicHandle() *BasicHandle { + return f.e.h +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinary() bool { + return f.e.cf.be // f.e.hh.isBinaryEncoding() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncFallback(iv interface{}) { + // println(">>>>>>>>> EncFallback") + // f.e.encodeI(iv, false, false) + f.e.encodeValue(reflect.ValueOf(iv), nil, false) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { + bs, fnerr := iv.MarshalText() + f.e.marshal(bs, fnerr, false, c_UTF8) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { + bs, fnerr := iv.MarshalJSON() + f.e.marshal(bs, fnerr, true, c_UTF8) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { + bs, fnerr := iv.MarshalBinary() + f.e.marshal(bs, fnerr, false, c_RAW) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncRaw(iv Raw) { + f.e.rawBytes(iv) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) TimeRtidIfBinc() uintptr { + if _, ok := f.e.hh.(*BincHandle); ok { + return timeTypId + } + return 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) IsJSONHandle() bool { + return f.e.cf.js +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) HasExtensions() bool { + return len(f.e.h.extHandle) != 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncExt(v interface{}) (r bool) { + rt := reflect.TypeOf(v) + if rt.Kind() == reflect.Ptr { + rt = rt.Elem() + } + rtid := rt2id(rt) + if xfFn := f.e.h.getExt(rtid); xfFn != nil { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) + return true + } + return false +} + +// ---------------- DECODER FOLLOWS ----------------- + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBasicHandle() *BasicHandle { + return f.d.h +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinary() bool { + return f.d.be // f.d.hh.isBinaryEncoding() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSwallow() { + f.d.swallow() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchBuffer() []byte { + return f.d.b[:] +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { + // println(">>>>>>>>> DecFallback") + rv := reflect.ValueOf(iv) + if chkPtr { + rv = f.d.ensureDecodeable(rv) + } + f.d.decodeValue(rv, nil, false, false) + // f.d.decodeValueFallback(rv) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { + return f.d.decSliceHelperStart() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { + f.d.structFieldNotFound(index, name) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { + f.d.arrayCannotExpand(sliceLen, streamLen) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { + fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { + // bs := f.dd.DecodeStringAsBytes() + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { + fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true)) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecRaw() []byte { + return f.d.rawBytes() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) TimeRtidIfBinc() uintptr { + if _, ok := f.d.hh.(*BincHandle); ok { + return timeTypId + } + return 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) IsJSONHandle() bool { + return f.d.js +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) HasExtensions() bool { + return len(f.d.h.extHandle) != 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecExt(v interface{}) (r bool) { + rt := reflect.TypeOf(v).Elem() + rtid := rt2id(rt) + if xfFn := f.d.h.getExt(rtid); xfFn != nil { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) + return true + } + return false +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) { + return decInferLen(clen, maxlen, unit) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) StringView(v []byte) string { + return stringView(v) +} diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl new file mode 100644 index 00000000000..79b6145c99b --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl @@ -0,0 +1,220 @@ +/* // +build ignore */ + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl +// ************************************************************ + +package codec + +import ( + "encoding" + "reflect" +) + +// GenVersion is the current version of codecgen. +const GenVersion = {{ .Version }} + +// This file is used to generate helper code for codecgen. +// The values here i.e. genHelper(En|De)coder are not to be used directly by +// library users. They WILL change continuously and without notice. +// +// To help enforce this, we create an unexported type with exported members. +// The only way to get the type is via the one exported type that we control (somewhat). +// +// When static codecs are created for types, they will use this value +// to perform encoding or decoding of primitives or known slice or map types. + +// GenHelperEncoder is exported so that it can be used externally by codecgen. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) { + return genHelperEncoder{e:e}, e.e +} + +// GenHelperDecoder is exported so that it can be used externally by codecgen. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) { + return genHelperDecoder{d:d}, d.d +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperEncoder struct { + e *Encoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperDecoder struct { + d *Decoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBasicHandle() *BasicHandle { + return f.e.h +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinary() bool { + return f.e.cf.be // f.e.hh.isBinaryEncoding() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncFallback(iv interface{}) { + // println(">>>>>>>>> EncFallback") + // f.e.encodeI(iv, false, false) + f.e.encodeValue(reflect.ValueOf(iv), nil, false) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { + bs, fnerr := iv.MarshalText() + f.e.marshal(bs, fnerr, false, c_UTF8) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { + bs, fnerr := iv.MarshalJSON() + f.e.marshal(bs, fnerr, true, c_UTF8) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { + bs, fnerr := iv.MarshalBinary() + f.e.marshal(bs, fnerr, false, c_RAW) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncRaw(iv Raw) { + f.e.rawBytes(iv) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) TimeRtidIfBinc() uintptr { + if _, ok := f.e.hh.(*BincHandle); ok { + return timeTypId + } + return 0 +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) IsJSONHandle() bool { + return f.e.cf.js +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) HasExtensions() bool { + return len(f.e.h.extHandle) != 0 +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncExt(v interface{}) (r bool) { + rt := reflect.TypeOf(v) + if rt.Kind() == reflect.Ptr { + rt = rt.Elem() + } + rtid := rt2id(rt) + if xfFn := f.e.h.getExt(rtid); xfFn != nil { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) + return true + } + return false +} + +// ---------------- DECODER FOLLOWS ----------------- + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBasicHandle() *BasicHandle { + return f.d.h +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinary() bool { + return f.d.be // f.d.hh.isBinaryEncoding() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSwallow() { + f.d.swallow() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchBuffer() []byte { + return f.d.b[:] +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { + // println(">>>>>>>>> DecFallback") + rv := reflect.ValueOf(iv) + if chkPtr { + rv = f.d.ensureDecodeable(rv) + } + f.d.decodeValue(rv, nil, false, false) + // f.d.decodeValueFallback(rv) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { + return f.d.decSliceHelperStart() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { + f.d.structFieldNotFound(index, name) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { + f.d.arrayCannotExpand(sliceLen, streamLen) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { + fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes()) + if fnerr != nil { + panic(fnerr) + } +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { + // bs := f.dd.DecodeStringAsBytes() + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { + fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true)) + if fnerr != nil { + panic(fnerr) + } +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecRaw() []byte { + return f.d.rawBytes() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) TimeRtidIfBinc() uintptr { + if _, ok := f.d.hh.(*BincHandle); ok { + return timeTypId + } + return 0 +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) IsJSONHandle() bool { + return f.d.js +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) HasExtensions() bool { + return len(f.d.h.extHandle) != 0 +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecExt(v interface{}) (r bool) { + rt := reflect.TypeOf(v).Elem() + rtid := rt2id(rt) + if xfFn := f.d.h.getExt(rtid); xfFn != nil { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) + return true + } + return false +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) { + return decInferLen(clen, maxlen, unit) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) StringView(v []byte) string { + return stringView(v) +} + diff --git a/vendor/github.com/ugorji/go/codec/gen.generated.go b/vendor/github.com/ugorji/go/codec/gen.generated.go new file mode 100644 index 00000000000..b50a6024dde --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen.generated.go @@ -0,0 +1,132 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl + +const genDecMapTmpl = ` +{{var "v"}} := *{{ .Varname }} +{{var "l"}} := r.ReadMapStart() +{{var "bh"}} := z.DecBasicHandle() +if {{var "v"}} == nil { + {{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) + {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) + *{{ .Varname }} = {{var "v"}} +} +var {{var "mk"}} {{ .KTyp }} +var {{var "mv"}} {{ .Typ }} +var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool +if {{var "bh"}}.MapValueReset { + {{if decElemKindPtr}}{{var "mg"}} = true + {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } + {{else if not decElemKindImmutable}}{{var "mg"}} = true + {{end}} } +if {{var "l"}} != 0 { +{{var "hl"}} := {{var "l"}} > 0 + for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { + r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}} + {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} +{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { + {{var "mk"}} = string({{var "bv"}}) + }{{ end }}{{if decElemKindPtr}} + {{var "ms"}} = true{{end}} + if {{var "mg"}} { + {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] + if {{var "mok"}} { + {{var "ms"}} = false + } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} + } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} + r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}} + {{var "mdn"}} = false + {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }} + if {{var "mdn"}} { + if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} } + } else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { + {{var "v"}}[{{var "mk"}}] = {{var "mv"}} + } +} +} // else len==0: TODO: Should we clear map entries? +r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}} +` + +const genDecListTmpl = ` +{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} +{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} +var {{var "c"}} bool {{/* // changed */}} +_ = {{var "c"}}{{end}} +if {{var "l"}} == 0 { + {{if isSlice }}if {{var "v"}} == nil { + {{var "v"}} = []{{ .Typ }}{} + {{var "c"}} = true + } else if len({{var "v"}}) != 0 { + {{var "v"}} = {{var "v"}}[:0] + {{var "c"}} = true + } {{end}} {{if isChan }}if {{var "v"}} == nil { + {{var "v"}} = make({{ .CTyp }}, 0) + {{var "c"}} = true + } {{end}} +} else { + {{var "hl"}} := {{var "l"}} > 0 + var {{var "rl"}} int; _ = {{var "rl"}} + {{if isSlice }} if {{var "hl"}} { + if {{var "l"}} > cap({{var "v"}}) { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + if {{var "rl"}} <= cap({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "rl"}}] + } else { + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + } + {{var "c"}} = true + } else if {{var "l"}} != len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "l"}}] + {{var "c"}} = true + } + } {{end}} + var {{var "j"}} int + // var {{var "dn"}} bool + for ; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { + {{if not isArray}} if {{var "j"}} == 0 && len({{var "v"}}) == 0 { + if {{var "hl"}} { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + } else { + {{var "rl"}} = 8 + } + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + {{var "c"}} = true + }{{end}} + {{var "h"}}.ElemContainerState({{var "j"}}) + // {{var "dn"}} = r.TryDecodeAsNil() + {{if isChan}}{{ $x := printf "%[1]vv%[2]v" .TempVar .Rand }}var {{var $x}} {{ .Typ }} + {{ decLineVar $x }} + {{var "v"}} <- {{ $x }} + {{else}} + // if indefinite, etc, then expand the slice if necessary + var {{var "db"}} bool + if {{var "j"}} >= len({{var "v"}}) { + {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}); {{var "c"}} = true + {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true + {{end}} + } + if {{var "db"}} { + z.DecSwallow() + } else { + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } + {{end}} + } + {{if isSlice}} if {{var "j"}} < len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "j"}}] + {{var "c"}} = true + } else if {{var "j"}} == 0 && {{var "v"}} == nil { + {{var "v"}} = make([]{{ .Typ }}, 0) + {{var "c"}} = true + } {{end}} +} +{{var "h"}}.End() +{{if not isArray }}if {{var "c"}} { + *{{ .Varname }} = {{var "v"}} +}{{end}} + +` + diff --git a/vendor/github.com/ugorji/go/codec/gen.go b/vendor/github.com/ugorji/go/codec/gen.go new file mode 100644 index 00000000000..7d430e5d41b --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen.go @@ -0,0 +1,2030 @@ +// +build codecgen.exec + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "go/format" + "io" + "io/ioutil" + "math/rand" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "text/template" + "time" + "unicode" + "unicode/utf8" +) + +// --------------------------------------------------- +// codecgen supports the full cycle of reflection-based codec: +// - RawExt +// - Raw +// - Builtins +// - Extensions +// - (Binary|Text|JSON)(Unm|M)arshal +// - generic by-kind +// +// This means that, for dynamic things, we MUST use reflection to at least get the reflect.Type. +// In those areas, we try to only do reflection or interface-conversion when NECESSARY: +// - Extensions, only if Extensions are configured. +// +// However, codecgen doesn't support the following: +// - Canonical option. (codecgen IGNORES it currently) +// This is just because it has not been implemented. +// +// During encode/decode, Selfer takes precedence. +// A type implementing Selfer will know how to encode/decode itself statically. +// +// The following field types are supported: +// array: [n]T +// slice: []T +// map: map[K]V +// primitive: [u]int[n], float(32|64), bool, string +// struct +// +// --------------------------------------------------- +// Note that a Selfer cannot call (e|d).(En|De)code on itself, +// as this will cause a circular reference, as (En|De)code will call Selfer methods. +// Any type that implements Selfer must implement completely and not fallback to (En|De)code. +// +// In addition, code in this file manages the generation of fast-path implementations of +// encode/decode of slices/maps of primitive keys/values. +// +// Users MUST re-generate their implementations whenever the code shape changes. +// The generated code will panic if it was generated with a version older than the supporting library. +// --------------------------------------------------- +// +// codec framework is very feature rich. +// When encoding or decoding into an interface, it depends on the runtime type of the interface. +// The type of the interface may be a named type, an extension, etc. +// Consequently, we fallback to runtime codec for encoding/decoding interfaces. +// In addition, we fallback for any value which cannot be guaranteed at runtime. +// This allows us support ANY value, including any named types, specifically those which +// do not implement our interfaces (e.g. Selfer). +// +// This explains some slowness compared to other code generation codecs (e.g. msgp). +// This reduction in speed is only seen when your refers to interfaces, +// e.g. type T struct { A interface{}; B []interface{}; C map[string]interface{} } +// +// codecgen will panic if the file was generated with an old version of the library in use. +// +// Note: +// It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil. +// This way, there isn't a function call overhead just to see that we should not enter a block of code. +// +// Note: +// codecgen-generated code depends on the variables defined by fast-path.generated.go. +// consequently, you cannot run with tags "codecgen notfastpath". + +// GenVersion is the current version of codecgen. +// +// NOTE: Increment this value each time codecgen changes fundamentally. +// Fundamental changes are: +// - helper methods change (signature change, new ones added, some removed, etc) +// - codecgen command line changes +// +// v1: Initial Version +// v2: +// v3: Changes for Kubernetes: +// changes in signature of some unpublished helper methods and codecgen cmdline arguments. +// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen) +// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections. +// v6: removed unsafe from gen, and now uses codecgen.exec tag +const genVersion = 8 + +const ( + genCodecPkg = "codec1978" + genTempVarPfx = "yy" + genTopLevelVarName = "x" + + // ignore canBeNil parameter, and always set to true. + // This is because nil can appear anywhere, so we should always check. + genAnythingCanBeNil = true + + // if genUseOneFunctionForDecStructMap, make a single codecDecodeSelferFromMap function; + // else make codecDecodeSelferFromMap{LenPrefix,CheckBreak} so that conditionals + // are not executed a lot. + // + // From testing, it didn't make much difference in runtime, so keep as true (one function only) + genUseOneFunctionForDecStructMap = true +) + +type genStructMapStyle uint8 + +const ( + genStructMapStyleConsolidated genStructMapStyle = iota + genStructMapStyleLenPrefix + genStructMapStyleCheckBreak +) + +var ( + genAllTypesSamePkgErr = errors.New("All types must be in the same package") + genExpectArrayOrMapErr = errors.New("unexpected type. Expecting array/map/slice") + genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__") + genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`) +) + +// genRunner holds some state used during a Gen run. +type genRunner struct { + w io.Writer // output + c uint64 // counter used for generating varsfx + t []reflect.Type // list of types to run selfer on + + tc reflect.Type // currently running selfer on this type + te map[uintptr]bool // types for which the encoder has been created + td map[uintptr]bool // types for which the decoder has been created + cp string // codec import path + + im map[string]reflect.Type // imports to add + imn map[string]string // package names of imports to add + imc uint64 // counter for import numbers + + is map[reflect.Type]struct{} // types seen during import search + bp string // base PkgPath, for which we are generating for + + cpfx string // codec package prefix + + tm map[reflect.Type]struct{} // types for which enc/dec must be generated + ts []reflect.Type // types for which enc/dec must be generated + + xs string // top level variable/constant suffix + hn string // fn helper type name + + ti *TypeInfos + // rr *rand.Rand // random generator for file-specific types + + nx bool // no extensions +} + +// Gen will write a complete go file containing Selfer implementations for each +// type passed. All the types must be in the same package. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func Gen(w io.Writer, buildTags, pkgName, uid string, noExtensions bool, + ti *TypeInfos, typ ...reflect.Type) { + // All types passed to this method do not have a codec.Selfer method implemented directly. + // codecgen already checks the AST and skips any types that define the codec.Selfer methods. + // Consequently, there's no need to check and trim them if they implement codec.Selfer + + if len(typ) == 0 { + return + } + x := genRunner{ + w: w, + t: typ, + te: make(map[uintptr]bool), + td: make(map[uintptr]bool), + im: make(map[string]reflect.Type), + imn: make(map[string]string), + is: make(map[reflect.Type]struct{}), + tm: make(map[reflect.Type]struct{}), + ts: []reflect.Type{}, + bp: genImportPath(typ[0]), + xs: uid, + ti: ti, + nx: noExtensions, + } + if x.ti == nil { + x.ti = defTypeInfos + } + if x.xs == "" { + rr := rand.New(rand.NewSource(time.Now().UnixNano())) + x.xs = strconv.FormatInt(rr.Int63n(9999), 10) + } + + // gather imports first: + x.cp = genImportPath(reflect.TypeOf(x)) + x.imn[x.cp] = genCodecPkg + for _, t := range typ { + // fmt.Printf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) + if genImportPath(t) != x.bp { + panic(genAllTypesSamePkgErr) + } + x.genRefPkgs(t) + } + if buildTags != "" { + x.line("// +build " + buildTags) + x.line("") + } + x.line(` + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +`) + x.line("package " + pkgName) + x.line("") + x.line("import (") + if x.cp != x.bp { + x.cpfx = genCodecPkg + "." + x.linef("%s \"%s\"", genCodecPkg, x.cp) + } + // use a sorted set of im keys, so that we can get consistent output + imKeys := make([]string, 0, len(x.im)) + for k, _ := range x.im { + imKeys = append(imKeys, k) + } + sort.Strings(imKeys) + for _, k := range imKeys { // for k, _ := range x.im { + x.linef("%s \"%s\"", x.imn[k], k) + } + // add required packages + for _, k := range [...]string{"reflect", "runtime", "fmt", "errors"} { + if _, ok := x.im[k]; !ok { + x.line("\"" + k + "\"") + } + } + x.line(")") + x.line("") + + x.line("const (") + x.linef("// ----- content types ----") + x.linef("codecSelferC_UTF8%s = %v", x.xs, int64(c_UTF8)) + x.linef("codecSelferC_RAW%s = %v", x.xs, int64(c_RAW)) + x.linef("// ----- value types used ----") + x.linef("codecSelferValueTypeArray%s = %v", x.xs, int64(valueTypeArray)) + x.linef("codecSelferValueTypeMap%s = %v", x.xs, int64(valueTypeMap)) + x.linef("// ----- containerStateValues ----") + x.linef("codecSelfer_containerMapKey%s = %v", x.xs, int64(containerMapKey)) + x.linef("codecSelfer_containerMapValue%s = %v", x.xs, int64(containerMapValue)) + x.linef("codecSelfer_containerMapEnd%s = %v", x.xs, int64(containerMapEnd)) + x.linef("codecSelfer_containerArrayElem%s = %v", x.xs, int64(containerArrayElem)) + x.linef("codecSelfer_containerArrayEnd%s = %v", x.xs, int64(containerArrayEnd)) + x.line(")") + x.line("var (") + x.line("codecSelferBitsize" + x.xs + " = uint8(reflect.TypeOf(uint(0)).Bits())") + x.line("codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + " = errors.New(`only encoded map or array can be decoded into a struct`)") + x.line(")") + x.line("") + + x.hn = "codecSelfer" + x.xs + x.line("type " + x.hn + " struct{}") + x.line("") + + x.varsfxreset() + x.line("func init() {") + x.linef("if %sGenVersion != %v {", x.cpfx, genVersion) + x.line("_, file, _, _ := runtime.Caller(0)") + x.line(`err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `) + x.linef(`%v, %sGenVersion, file)`, genVersion, x.cpfx) + x.line("panic(err)") + x.linef("}") + x.line("if false { // reference the types, but skip this branch at build/run time") + var n int + // for k, t := range x.im { + for _, k := range imKeys { + t := x.im[k] + x.linef("var v%v %s.%s", n, x.imn[k], t.Name()) + n++ + } + if n > 0 { + x.out("_") + for i := 1; i < n; i++ { + x.out(", _") + } + x.out(" = v0") + for i := 1; i < n; i++ { + x.outf(", v%v", i) + } + } + x.line("} ") // close if false + x.line("}") // close init + x.line("") + + // generate rest of type info + for _, t := range typ { + x.tc = t + x.selfer(true) + x.selfer(false) + } + + for _, t := range x.ts { + rtid := rt2id(t) + // generate enc functions for all these slice/map types. + x.varsfxreset() + x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx) + x.genRequiredMethodVars(true) + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + x.encListFallback("v", t) + case reflect.Map: + x.encMapFallback("v", t) + default: + panic(genExpectArrayOrMapErr) + } + x.line("}") + x.line("") + + // generate dec functions for all these slice/map types. + x.varsfxreset() + x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx) + x.genRequiredMethodVars(false) + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + x.decListFallback("v", rtid, t) + case reflect.Map: + x.decMapFallback("v", rtid, t) + default: + panic(genExpectArrayOrMapErr) + } + x.line("}") + x.line("") + } + + x.line("") +} + +func (x *genRunner) checkForSelfer(t reflect.Type, varname string) bool { + // return varname != genTopLevelVarName && t != x.tc + // the only time we checkForSelfer is if we are not at the TOP of the generated code. + return varname != genTopLevelVarName +} + +func (x *genRunner) arr2str(t reflect.Type, s string) string { + if t.Kind() == reflect.Array { + return s + } + return "" +} + +func (x *genRunner) genRequiredMethodVars(encode bool) { + x.line("var h " + x.hn) + if encode { + x.line("z, r := " + x.cpfx + "GenHelperEncoder(e)") + } else { + x.line("z, r := " + x.cpfx + "GenHelperDecoder(d)") + } + x.line("_, _, _ = h, z, r") +} + +func (x *genRunner) genRefPkgs(t reflect.Type) { + if _, ok := x.is[t]; ok { + return + } + // fmt.Printf(">>>>>>: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) + x.is[t] = struct{}{} + tpkg, tname := genImportPath(t), t.Name() + if tpkg != "" && tpkg != x.bp && tpkg != x.cp && tname != "" && tname[0] >= 'A' && tname[0] <= 'Z' { + if _, ok := x.im[tpkg]; !ok { + x.im[tpkg] = t + if idx := strings.LastIndex(tpkg, "/"); idx < 0 { + x.imn[tpkg] = tpkg + } else { + x.imc++ + x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + genGoIdentifier(tpkg[idx+1:], false) + } + } + } + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Ptr, reflect.Chan: + x.genRefPkgs(t.Elem()) + case reflect.Map: + x.genRefPkgs(t.Elem()) + x.genRefPkgs(t.Key()) + case reflect.Struct: + for i := 0; i < t.NumField(); i++ { + if fname := t.Field(i).Name; fname != "" && fname[0] >= 'A' && fname[0] <= 'Z' { + x.genRefPkgs(t.Field(i).Type) + } + } + } +} + +func (x *genRunner) line(s string) { + x.out(s) + if len(s) == 0 || s[len(s)-1] != '\n' { + x.out("\n") + } +} + +func (x *genRunner) varsfx() string { + x.c++ + return strconv.FormatUint(x.c, 10) +} + +func (x *genRunner) varsfxreset() { + x.c = 0 +} + +func (x *genRunner) out(s string) { + if _, err := io.WriteString(x.w, s); err != nil { + panic(err) + } +} + +func (x *genRunner) linef(s string, params ...interface{}) { + x.line(fmt.Sprintf(s, params...)) +} + +func (x *genRunner) outf(s string, params ...interface{}) { + x.out(fmt.Sprintf(s, params...)) +} + +func (x *genRunner) genTypeName(t reflect.Type) (n string) { + // defer func() { fmt.Printf(">>>> ####: genTypeName: t: %v, name: '%s'\n", t, n) }() + + // if the type has a PkgPath, which doesn't match the current package, + // then include it. + // We cannot depend on t.String() because it includes current package, + // or t.PkgPath because it includes full import path, + // + var ptrPfx string + for t.Kind() == reflect.Ptr { + ptrPfx += "*" + t = t.Elem() + } + if tn := t.Name(); tn != "" { + return ptrPfx + x.genTypeNamePrim(t) + } + switch t.Kind() { + case reflect.Map: + return ptrPfx + "map[" + x.genTypeName(t.Key()) + "]" + x.genTypeName(t.Elem()) + case reflect.Slice: + return ptrPfx + "[]" + x.genTypeName(t.Elem()) + case reflect.Array: + return ptrPfx + "[" + strconv.FormatInt(int64(t.Len()), 10) + "]" + x.genTypeName(t.Elem()) + case reflect.Chan: + return ptrPfx + t.ChanDir().String() + " " + x.genTypeName(t.Elem()) + default: + if t == intfTyp { + return ptrPfx + "interface{}" + } else { + return ptrPfx + x.genTypeNamePrim(t) + } + } +} + +func (x *genRunner) genTypeNamePrim(t reflect.Type) (n string) { + if t.Name() == "" { + return t.String() + } else if genImportPath(t) == "" || genImportPath(t) == genImportPath(x.tc) { + return t.Name() + } else { + return x.imn[genImportPath(t)] + "." + t.Name() + // return t.String() // best way to get the package name inclusive + } +} + +func (x *genRunner) genZeroValueR(t reflect.Type) string { + // if t is a named type, w + switch t.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func, + reflect.Slice, reflect.Map, reflect.Invalid: + return "nil" + case reflect.Bool: + return "false" + case reflect.String: + return `""` + case reflect.Struct, reflect.Array: + return x.genTypeName(t) + "{}" + default: // all numbers + return "0" + } +} + +func (x *genRunner) genMethodNameT(t reflect.Type) (s string) { + return genMethodNameT(t, x.tc) +} + +func (x *genRunner) selfer(encode bool) { + t := x.tc + t0 := t + // always make decode use a pointer receiver, + // and structs always use a ptr receiver (encode|decode) + isptr := !encode || t.Kind() == reflect.Struct + x.varsfxreset() + fnSigPfx := "func (x " + if isptr { + fnSigPfx += "*" + } + fnSigPfx += x.genTypeName(t) + + x.out(fnSigPfx) + if isptr { + t = reflect.PtrTo(t) + } + if encode { + x.line(") CodecEncodeSelf(e *" + x.cpfx + "Encoder) {") + x.genRequiredMethodVars(true) + // x.enc(genTopLevelVarName, t) + x.encVar(genTopLevelVarName, t) + } else { + x.line(") CodecDecodeSelf(d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + // do not use decVar, as there is no need to check TryDecodeAsNil + // or way to elegantly handle that, and also setting it to a + // non-nil value doesn't affect the pointer passed. + // x.decVar(genTopLevelVarName, t, false) + x.dec(genTopLevelVarName, t0) + } + x.line("}") + x.line("") + + if encode || t0.Kind() != reflect.Struct { + return + } + + // write is containerMap + if genUseOneFunctionForDecStructMap { + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleConsolidated) + x.line("}") + x.line("") + } else { + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleLenPrefix) + x.line("}") + x.line("") + + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleCheckBreak) + x.line("}") + x.line("") + } + + // write containerArray + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructArray(genTopLevelVarName, "l", "return", rt2id(t0), t0) + x.line("}") + x.line("") + +} + +// used for chan, array, slice, map +func (x *genRunner) xtraSM(varname string, encode bool, t reflect.Type) { + if encode { + x.linef("h.enc%s((%s%s)(%s), e)", x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), varname) + } else { + x.linef("h.dec%s((*%s)(%s), d)", x.genMethodNameT(t), x.genTypeName(t), varname) + } + x.registerXtraT(t) +} + +func (x *genRunner) registerXtraT(t reflect.Type) { + // recursively register the types + if _, ok := x.tm[t]; ok { + return + } + var tkey reflect.Type + switch t.Kind() { + case reflect.Chan, reflect.Slice, reflect.Array: + case reflect.Map: + tkey = t.Key() + default: + return + } + x.tm[t] = struct{}{} + x.ts = append(x.ts, t) + // check if this refers to any xtra types eg. a slice of array: add the array + x.registerXtraT(t.Elem()) + if tkey != nil { + x.registerXtraT(tkey) + } +} + +// encVar will encode a variable. +// The parameter, t, is the reflect.Type of the variable itself +func (x *genRunner) encVar(varname string, t reflect.Type) { + // fmt.Printf(">>>>>> varname: %s, t: %v\n", varname, t) + var checkNil bool + switch t.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Map, reflect.Chan: + checkNil = true + } + if checkNil { + x.linef("if %s == nil { r.EncodeNil() } else { ", varname) + } + switch t.Kind() { + case reflect.Ptr: + switch t.Elem().Kind() { + case reflect.Struct, reflect.Array: + x.enc(varname, genNonPtr(t)) + default: + i := x.varsfx() + x.line(genTempVarPfx + i + " := *" + varname) + x.enc(genTempVarPfx+i, genNonPtr(t)) + } + case reflect.Struct, reflect.Array: + i := x.varsfx() + x.line(genTempVarPfx + i + " := &" + varname) + x.enc(genTempVarPfx+i, t) + default: + x.enc(varname, t) + } + + if checkNil { + x.line("}") + } + +} + +// enc will encode a variable (varname) of type t, +// except t is of kind reflect.Struct or reflect.Array, wherein varname is of type ptrTo(T) (to prevent copying) +func (x *genRunner) enc(varname string, t reflect.Type) { + rtid := rt2id(t) + // We call CodecEncodeSelf if one of the following are honored: + // - the type already implements Selfer, call that + // - the type has a Selfer implementation just created, use that + // - the type is in the list of the ones we will generate for, but it is not currently being generated + + mi := x.varsfx() + tptr := reflect.PtrTo(t) + tk := t.Kind() + if x.checkForSelfer(t, varname) { + if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T + if tptr.Implements(selferTyp) || t.Implements(selferTyp) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + } else { // varname is of type T + if t.Implements(selferTyp) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } else if tptr.Implements(selferTyp) { + x.linef("%ssf%s := &%s", genTempVarPfx, mi, varname) + x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi) + return + } + } + + if _, ok := x.te[rtid]; ok { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + } + + inlist := false + for _, t0 := range x.t { + if t == t0 { + inlist = true + if x.checkForSelfer(t, varname) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + break + } + } + + var rtidAdded bool + if t == x.tc { + x.te[rtid] = true + rtidAdded = true + } + + // check if + // - type is RawExt, Raw + // - the type implements (Text|JSON|Binary)(Unm|M)arshal + x.linef("%sm%s := z.EncBinary()", genTempVarPfx, mi) + x.linef("_ = %sm%s", genTempVarPfx, mi) + x.line("if false {") //start if block + defer func() { x.line("}") }() //end if block + + if t == rawTyp { + x.linef("} else { z.EncRaw(%v)", varname) + return + } + if t == rawExtTyp { + x.linef("} else { r.EncodeRawExt(%v, e)", varname) + return + } + // HACK: Support for Builtins. + // Currently, only Binc supports builtins, and the only builtin type is time.Time. + // Have a method that returns the rtid for time.Time if Handle is Binc. + if t == timeTyp { + vrtid := genTempVarPfx + "m" + x.varsfx() + x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid) + x.linef("r.EncodeBuiltin(%s, %s)", vrtid, varname) + } + // only check for extensions if the type is named, and has a packagePath. + if !x.nx && genImportPath(t) != "" && t.Name() != "" { + // first check if extensions are configued, before doing the interface conversion + x.linef("} else if z.HasExtensions() && z.EncExt(%s) {", varname) + } + if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T + if t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) { + x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname) + } + if t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) { + x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname) + } else if t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) { + x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname) + } + } else { // varname is of type T + if t.Implements(binaryMarshalerTyp) { + x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname) + } else if tptr.Implements(binaryMarshalerTyp) { + x.linef("} else if %sm%s { z.EncBinaryMarshal(&%v) ", genTempVarPfx, mi, varname) + } + if t.Implements(jsonMarshalerTyp) { + x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname) + } else if tptr.Implements(jsonMarshalerTyp) { + x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(&%v) ", genTempVarPfx, mi, varname) + } else if t.Implements(textMarshalerTyp) { + x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname) + } else if tptr.Implements(textMarshalerTyp) { + x.linef("} else if !%sm%s { z.EncTextMarshal(&%v) ", genTempVarPfx, mi, varname) + } + } + x.line("} else {") + + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x.line("r.EncodeInt(int64(" + varname + "))") + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x.line("r.EncodeUint(uint64(" + varname + "))") + case reflect.Float32: + x.line("r.EncodeFloat32(float32(" + varname + "))") + case reflect.Float64: + x.line("r.EncodeFloat64(float64(" + varname + "))") + case reflect.Bool: + x.line("r.EncodeBool(bool(" + varname + "))") + case reflect.String: + x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(" + varname + "))") + case reflect.Chan: + x.xtraSM(varname, true, t) + // x.encListFallback(varname, rtid, t) + case reflect.Array: + x.xtraSM(varname, true, t) + case reflect.Slice: + // if nil, call dedicated function + // if a []uint8, call dedicated function + // if a known fastpath slice, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if rtid == uint8SliceTypId { + x.line("r.EncodeStringBytes(codecSelferC_RAW" + x.xs + ", []byte(" + varname + "))") + } else if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)") + } else { + x.xtraSM(varname, true, t) + // x.encListFallback(varname, rtid, t) + } + case reflect.Map: + // if nil, call dedicated function + // if a known fastpath map, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + // x.line("if " + varname + " == nil { \nr.EncodeNil()\n } else { ") + if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)") + } else { + x.xtraSM(varname, true, t) + // x.encMapFallback(varname, rtid, t) + } + case reflect.Struct: + if !inlist { + delete(x.te, rtid) + x.line("z.EncFallback(" + varname + ")") + break + } + x.encStruct(varname, rtid, t) + default: + if rtidAdded { + delete(x.te, rtid) + } + x.line("z.EncFallback(" + varname + ")") + } +} + +func (x *genRunner) encZero(t reflect.Type) { + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x.line("r.EncodeInt(0)") + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x.line("r.EncodeUint(0)") + case reflect.Float32: + x.line("r.EncodeFloat32(0)") + case reflect.Float64: + x.line("r.EncodeFloat64(0)") + case reflect.Bool: + x.line("r.EncodeBool(false)") + case reflect.String: + x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + `, "")`) + default: + x.line("r.EncodeNil()") + } +} + +func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) { + // Use knowledge from structfieldinfo (mbs, encodable fields. Ignore omitempty. ) + // replicate code in kStruct i.e. for each field, deref type to non-pointer, and call x.enc on it + + // if t === type currently running selfer on, do for all + ti := x.ti.get(rtid, t) + i := x.varsfx() + sepVarname := genTempVarPfx + "sep" + i + numfieldsvar := genTempVarPfx + "q" + i + ti2arrayvar := genTempVarPfx + "r" + i + struct2arrvar := genTempVarPfx + "2arr" + i + + x.line(sepVarname + " := !z.EncBinary()") + x.linef("%s := z.EncBasicHandle().StructToArray", struct2arrvar) + tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. + // due to omitEmpty, we need to calculate the + // number of non-empty things we write out first. + // This is required as we need to pre-determine the size of the container, + // to support length-prefixing. + if ti.anyOmitEmpty { + x.linef("var %s [%v]bool", numfieldsvar, len(tisfi)) + x.linef("_ = %s", numfieldsvar) + } + x.linef("_, _ = %s, %s", sepVarname, struct2arrvar) + x.linef("const %s bool = %v", ti2arrayvar, ti.toArray) + var nn int + if ti.anyOmitEmpty { + for j, si := range tisfi { + if !si.omitEmpty { + nn++ + continue + } + var t2 reflect.StructField + var omitline string + { + t2typ := t + varname3 := varname + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(int(ix)) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + omitline += varname3 + " != nil && " + } + } + } + // never check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc. + // also, for maps/slices/arrays, check if len ! 0 (not if == zero value) + switch t2.Type.Kind() { + case reflect.Struct: + omitline += " true" + case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan: + omitline += "len(" + varname + "." + t2.Name + ") != 0" + default: + omitline += varname + "." + t2.Name + " != " + x.genZeroValueR(t2.Type) + } + x.linef("%s[%v] = %s", numfieldsvar, j, omitline) + } + } + // x.linef("var %snn%s int", genTempVarPfx, i) + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { + x.linef("r.WriteArrayStart(%d)", len(tisfi)) + x.linef("} else {") // if not ti.toArray + if ti.anyOmitEmpty { + x.linef("var %snn%s = %v", genTempVarPfx, i, nn) + x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i) + x.linef("r.WriteMapStart(%snn%s)", genTempVarPfx, i) + x.linef("%snn%s = %v", genTempVarPfx, i, 0) + } else { + x.linef("r.WriteMapStart(%d)", len(tisfi)) + } + x.line("}") // close if not StructToArray + + for j, si := range tisfi { + i := x.varsfx() + isNilVarName := genTempVarPfx + "n" + i + var labelUsed bool + var t2 reflect.StructField + { + t2typ := t + varname3 := varname + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } + // fmt.Printf("%%%% %v, ix: %v\n", t2typ, ix) + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(int(ix)) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + if !labelUsed { + x.line("var " + isNilVarName + " bool") + } + x.line("if " + varname3 + " == nil { " + isNilVarName + " = true ") + x.line("goto LABEL" + i) + x.line("}") + labelUsed = true + // "varname3 = new(" + x.genTypeName(t3.Elem()) + ") }") + } + } + // t2 = t.FieldByIndex(si.is) + } + if labelUsed { + x.line("LABEL" + i + ":") + } + // if the type of the field is a Selfer, or one of the ones + + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray + if labelUsed { + x.linef("if %s { r.WriteArrayElem(); r.EncodeNil() } else { ", isNilVarName) + // x.linef("if %s { z.EncSendContainerState(codecSelfer_containerArrayElem%s); r.EncodeNil() } else { ", isNilVarName, x.xs) + } + x.line("r.WriteArrayElem()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + if si.omitEmpty { + x.linef("if %s[%v] {", numfieldsvar, j) + } + x.encVar(varname+"."+t2.Name, t2.Type) + if si.omitEmpty { + x.linef("} else {") + x.encZero(t2.Type) + x.linef("}") + } + if labelUsed { + x.line("}") + } + + x.linef("} else {") // if not ti.toArray + + if si.omitEmpty { + x.linef("if %s[%v] {", numfieldsvar, j) + } + x.line("r.WriteMapElemKey()") // x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs) + x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(\"" + si.encName + "\"))") + x.line("r.WriteMapElemValue()") // x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs) + if labelUsed { + x.line("if " + isNilVarName + " { r.EncodeNil() } else { ") + x.encVar(varname+"."+t2.Name, t2.Type) + x.line("}") + } else { + x.encVar(varname+"."+t2.Name, t2.Type) + } + if si.omitEmpty { + x.line("}") + } + x.linef("} ") // end if/else ti.toArray + } + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { + x.line("r.WriteArrayEnd()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) + x.line("} else {") + x.line("r.WriteMapEnd()") // x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) + x.line("}") + +} + +func (x *genRunner) encListFallback(varname string, t reflect.Type) { + if t.AssignableTo(uint8SliceTyp) { + x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, []byte(%s))", x.xs, varname) + return + } + if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { + x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, ([%v]byte(%s))[:])", x.xs, t.Len(), varname) + return + } + i := x.varsfx() + g := genTempVarPfx + x.line("r.WriteArrayStart(len(" + varname + "))") + if t.Kind() == reflect.Chan { + x.linef("for %si%s, %si2%s := 0, len(%s); %si%s < %si2%s; %si%s++ {", g, i, g, i, varname, g, i, g, i, g, i) + x.line("r.WriteArrayElem()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + x.linef("%sv%s := <-%s", g, i, varname) + } else { + // x.linef("for %si%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) + x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname) + x.line("r.WriteArrayElem()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + } + x.encVar(genTempVarPfx+"v"+i, t.Elem()) + x.line("}") + x.line("r.WriteArrayEnd()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) +} + +func (x *genRunner) encMapFallback(varname string, t reflect.Type) { + // TODO: expand this to handle canonical. + i := x.varsfx() + x.line("r.WriteMapStart(len(" + varname + "))") + x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) + // x.line("for " + genTempVarPfx + "k" + i + ", " + genTempVarPfx + "v" + i + " := range " + varname + " {") + x.line("r.WriteMapElemKey()") // f("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs) + x.encVar(genTempVarPfx+"k"+i, t.Key()) + x.line("r.WriteMapElemValue()") // f("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs) + x.encVar(genTempVarPfx+"v"+i, t.Elem()) + x.line("}") + x.line("r.WriteMapEnd()") // f("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) +} + +func (x *genRunner) decVar(varname, decodedNilVarname string, t reflect.Type, canBeNil bool) { + // We only encode as nil if a nillable value. + // This removes some of the wasted checks for TryDecodeAsNil. + // We need to think about this more, to see what happens if omitempty, etc + // cause a nil value to be stored when something is expected. + // This could happen when decoding from a struct encoded as an array. + // For that, decVar should be called with canNil=true, to force true as its value. + i := x.varsfx() + if !canBeNil { + canBeNil = genAnythingCanBeNil || !genIsImmutable(t) + } + if canBeNil { + x.line("if r.TryDecodeAsNil() {") + if decodedNilVarname != "" { + x.line(decodedNilVarname + " = true") + } else if t.Kind() == reflect.Ptr { + x.line("if " + varname + " != nil { ") + + // if varname is a field of a struct (has a dot in it), + // then just set it to nil + if strings.IndexByte(varname, '.') != -1 { + x.line(varname + " = nil") + } else { + x.line("*" + varname + " = " + x.genZeroValueR(t.Elem())) + } + x.line("}") + } else { + x.line(varname + " = " + x.genZeroValueR(t)) + } + x.line("} else {") + } else { + x.line("// cannot be nil") + } + if t.Kind() != reflect.Ptr { + if x.decTryAssignPrimitive(varname, t) { + x.line(genTempVarPfx + "v" + i + " := &" + varname) + x.dec(genTempVarPfx+"v"+i, t) + } + } else { + x.linef("if %s == nil { %s = new(%s) }", varname, varname, x.genTypeName(t.Elem())) + // Ensure we set underlying ptr to a non-nil value (so we can deref to it later). + // There's a chance of a **T in here which is nil. + var ptrPfx string + for t = t.Elem(); t.Kind() == reflect.Ptr; t = t.Elem() { + ptrPfx += "*" + x.linef("if %s%s == nil { %s%s = new(%s)}", + ptrPfx, varname, ptrPfx, varname, x.genTypeName(t)) + } + // if varname has [ in it, then create temp variable for this ptr thingie + if strings.Index(varname, "[") >= 0 { + varname2 := genTempVarPfx + "w" + i + x.line(varname2 + " := " + varname) + varname = varname2 + } + + if ptrPfx == "" { + x.dec(varname, t) + } else { + x.line(genTempVarPfx + "z" + i + " := " + ptrPfx + varname) + x.dec(genTempVarPfx+"z"+i, t) + } + + } + + if canBeNil { + x.line("} ") + } +} + +// dec will decode a variable (varname) of type ptrTo(t). +// t is always a basetype (i.e. not of kind reflect.Ptr). +func (x *genRunner) dec(varname string, t reflect.Type) { + // assumptions: + // - the varname is to a pointer already. No need to take address of it + // - t is always a baseType T (not a *T, etc). + rtid := rt2id(t) + tptr := reflect.PtrTo(t) + if x.checkForSelfer(t, varname) { + if t.Implements(selferTyp) || tptr.Implements(selferTyp) { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + if _, ok := x.td[rtid]; ok { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + } + + inlist := false + for _, t0 := range x.t { + if t == t0 { + inlist = true + if x.checkForSelfer(t, varname) { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + break + } + } + + var rtidAdded bool + if t == x.tc { + x.td[rtid] = true + rtidAdded = true + } + + // check if + // - type is Raw, RawExt + // - the type implements (Text|JSON|Binary)(Unm|M)arshal + mi := x.varsfx() + x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi) + x.linef("_ = %sm%s", genTempVarPfx, mi) + x.line("if false {") //start if block + defer func() { x.line("}") }() //end if block + + if t == rawTyp { + x.linef("} else { *%v = z.DecRaw()", varname) + return + } + if t == rawExtTyp { + x.linef("} else { r.DecodeExt(%v, 0, nil)", varname) + return + } + + // HACK: Support for Builtins. + // Currently, only Binc supports builtins, and the only builtin type is time.Time. + // Have a method that returns the rtid for time.Time if Handle is Binc. + if t == timeTyp { + vrtid := genTempVarPfx + "m" + x.varsfx() + x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid) + x.linef("r.DecodeBuiltin(%s, %s)", vrtid, varname) + } + // only check for extensions if the type is named, and has a packagePath. + if !x.nx && genImportPath(t) != "" && t.Name() != "" { + // first check if extensions are configued, before doing the interface conversion + x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname) + } + + if t.Implements(binaryUnmarshalerTyp) || tptr.Implements(binaryUnmarshalerTyp) { + x.linef("} else if %sm%s { z.DecBinaryUnmarshal(%v) ", genTempVarPfx, mi, varname) + } + if t.Implements(jsonUnmarshalerTyp) || tptr.Implements(jsonUnmarshalerTyp) { + x.linef("} else if !%sm%s && z.IsJSONHandle() { z.DecJSONUnmarshal(%v)", genTempVarPfx, mi, varname) + } else if t.Implements(textUnmarshalerTyp) || tptr.Implements(textUnmarshalerTyp) { + x.linef("} else if !%sm%s { z.DecTextUnmarshal(%v)", genTempVarPfx, mi, varname) + } + + x.line("} else {") + + // Since these are pointers, we cannot share, and have to use them one by one + switch t.Kind() { + case reflect.Int: + x.line("*((*int)(" + varname + ")) = int(r.DecodeInt(codecSelferBitsize" + x.xs + "))") + // x.line("z.DecInt((*int)(" + varname + "))") + case reflect.Int8: + x.line("*((*int8)(" + varname + ")) = int8(r.DecodeInt(8))") + // x.line("z.DecInt8((*int8)(" + varname + "))") + case reflect.Int16: + x.line("*((*int16)(" + varname + ")) = int16(r.DecodeInt(16))") + // x.line("z.DecInt16((*int16)(" + varname + "))") + case reflect.Int32: + x.line("*((*int32)(" + varname + ")) = int32(r.DecodeInt(32))") + // x.line("z.DecInt32((*int32)(" + varname + "))") + case reflect.Int64: + x.line("*((*int64)(" + varname + ")) = int64(r.DecodeInt(64))") + // x.line("z.DecInt64((*int64)(" + varname + "))") + + case reflect.Uint: + x.line("*((*uint)(" + varname + ")) = uint(r.DecodeUint(codecSelferBitsize" + x.xs + "))") + // x.line("z.DecUint((*uint)(" + varname + "))") + case reflect.Uint8: + x.line("*((*uint8)(" + varname + ")) = uint8(r.DecodeUint(8))") + // x.line("z.DecUint8((*uint8)(" + varname + "))") + case reflect.Uint16: + x.line("*((*uint16)(" + varname + ")) = uint16(r.DecodeUint(16))") + //x.line("z.DecUint16((*uint16)(" + varname + "))") + case reflect.Uint32: + x.line("*((*uint32)(" + varname + ")) = uint32(r.DecodeUint(32))") + //x.line("z.DecUint32((*uint32)(" + varname + "))") + case reflect.Uint64: + x.line("*((*uint64)(" + varname + ")) = uint64(r.DecodeUint(64))") + //x.line("z.DecUint64((*uint64)(" + varname + "))") + case reflect.Uintptr: + x.line("*((*uintptr)(" + varname + ")) = uintptr(r.DecodeUint(codecSelferBitsize" + x.xs + "))") + + case reflect.Float32: + x.line("*((*float32)(" + varname + ")) = float32(r.DecodeFloat(true))") + //x.line("z.DecFloat32((*float32)(" + varname + "))") + case reflect.Float64: + x.line("*((*float64)(" + varname + ")) = float64(r.DecodeFloat(false))") + // x.line("z.DecFloat64((*float64)(" + varname + "))") + + case reflect.Bool: + x.line("*((*bool)(" + varname + ")) = r.DecodeBool()") + // x.line("z.DecBool((*bool)(" + varname + "))") + case reflect.String: + x.line("*((*string)(" + varname + ")) = r.DecodeString()") + // x.line("z.DecString((*string)(" + varname + "))") + case reflect.Array, reflect.Chan: + x.xtraSM(varname, false, t) + // x.decListFallback(varname, rtid, true, t) + case reflect.Slice: + // if a []uint8, call dedicated function + // if a known fastpath slice, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if rtid == uint8SliceTypId { + x.line("*" + varname + " = r.DecodeBytes(*(*[]byte)(" + varname + "), false)") + } else if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", d)") + } else { + x.xtraSM(varname, false, t) + // x.decListFallback(varname, rtid, false, t) + } + case reflect.Map: + // if a known fastpath map, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", d)") + } else { + x.xtraSM(varname, false, t) + // x.decMapFallback(varname, rtid, t) + } + case reflect.Struct: + if inlist { + x.decStruct(varname, rtid, t) + } else { + // delete(x.td, rtid) + x.line("z.DecFallback(" + varname + ", false)") + } + default: + if rtidAdded { + delete(x.te, rtid) + } + x.line("z.DecFallback(" + varname + ", true)") + } +} + +func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type) (tryAsPtr bool) { + // This should only be used for exact primitives (ie un-named types). + // Named types may be implementations of Selfer, Unmarshaler, etc. + // They should be handled by dec(...) + + if t.Name() != "" { + tryAsPtr = true + return + } + + switch t.Kind() { + case reflect.Int: + x.linef("%s = r.DecodeInt(codecSelferBitsize%s)", varname, x.xs) + case reflect.Int8: + x.linef("%s = r.DecodeInt(8)", varname) + case reflect.Int16: + x.linef("%s = r.DecodeInt(16)", varname) + case reflect.Int32: + x.linef("%s = r.DecodeInt(32)", varname) + case reflect.Int64: + x.linef("%s = r.DecodeInt(64)", varname) + + case reflect.Uint: + x.linef("%s = r.DecodeUint(codecSelferBitsize%s)", varname, x.xs) + case reflect.Uint8: + x.linef("%s = r.DecodeUint(8)", varname) + case reflect.Uint16: + x.linef("%s = r.DecodeUint(16)", varname) + case reflect.Uint32: + x.linef("%s = r.DecodeUint(32)", varname) + case reflect.Uint64: + x.linef("%s = r.DecodeUint(64)", varname) + case reflect.Uintptr: + x.linef("%s = r.DecodeUint(codecSelferBitsize%s)", varname, x.xs) + + case reflect.Float32: + x.linef("%s = r.DecodeFloat(true)", varname) + case reflect.Float64: + x.linef("%s = r.DecodeFloat(false)", varname) + + case reflect.Bool: + x.linef("%s = r.DecodeBool()", varname) + case reflect.String: + x.linef("%s = r.DecodeString()", varname) + default: + tryAsPtr = true + } + return +} + +func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) { + if t.AssignableTo(uint8SliceTyp) { + x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false)") + return + } + if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { + x.linef("r.DecodeBytes( ((*[%s]byte)(%s))[:], true)", t.Len(), varname) + return + } + type tstruc struct { + TempVar string + Rand string + Varname string + CTyp string + Typ string + Immutable bool + Size int + } + telem := t.Elem() + ts := tstruc{genTempVarPfx, x.varsfx(), varname, x.genTypeName(t), x.genTypeName(telem), genIsImmutable(telem), int(telem.Size())} + + funcs := make(template.FuncMap) + + funcs["decLineVar"] = func(varname string) string { + x.decVar(varname, "", telem, false) + return "" + } + // funcs["decLine"] = func(pfx string) string { + // x.decVar(ts.TempVar+pfx+ts.Rand, "", reflect.PtrTo(telem), false) + // return "" + // } + funcs["var"] = func(s string) string { + return ts.TempVar + s + ts.Rand + } + funcs["zero"] = func() string { + return x.genZeroValueR(telem) + } + funcs["isArray"] = func() bool { + return t.Kind() == reflect.Array + } + funcs["isSlice"] = func() bool { + return t.Kind() == reflect.Slice + } + funcs["isChan"] = func() bool { + return t.Kind() == reflect.Chan + } + tm, err := template.New("").Funcs(funcs).Parse(genDecListTmpl) + if err != nil { + panic(err) + } + if err = tm.Execute(x.w, &ts); err != nil { + panic(err) + } +} + +func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) { + type tstruc struct { + TempVar string + Sfx string + Rand string + Varname string + KTyp string + Typ string + Size int + } + telem := t.Elem() + tkey := t.Key() + ts := tstruc{ + genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(tkey), + x.genTypeName(telem), int(telem.Size() + tkey.Size()), + } + + funcs := make(template.FuncMap) + funcs["decElemZero"] = func() string { + return x.genZeroValueR(telem) + } + funcs["decElemKindImmutable"] = func() bool { + return genIsImmutable(telem) + } + funcs["decElemKindPtr"] = func() bool { + return telem.Kind() == reflect.Ptr + } + funcs["decElemKindIntf"] = func() bool { + return telem.Kind() == reflect.Interface + } + funcs["decLineVarK"] = func(varname string) string { + x.decVar(varname, "", tkey, false) + return "" + } + funcs["decLineVar"] = func(varname, decodedNilVarname string) string { + x.decVar(varname, decodedNilVarname, telem, false) + return "" + } + // funcs["decLineK"] = func(pfx string) string { + // x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(tkey), false) + // return "" + // } + // funcs["decLine"] = func(pfx string) string { + // x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false) + // return "" + // } + funcs["var"] = func(s string) string { + return ts.TempVar + s + ts.Rand + } + + tm, err := template.New("").Funcs(funcs).Parse(genDecMapTmpl) + if err != nil { + panic(err) + } + if err = tm.Execute(x.w, &ts); err != nil { + panic(err) + } +} + +func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintptr, t reflect.Type) { + ti := x.ti.get(rtid, t) + tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. + x.line("switch (" + kName + ") {") + for _, si := range tisfi { + x.line("case \"" + si.encName + "\":") + var t2 reflect.StructField + { + //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. + // t2 = t.FieldByIndex(si.is) + t2typ := t + varname3 := varname + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(int(ix)) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem())) + } + } + } + x.decVar(varname+"."+t2.Name, "", t2.Type, false) + } + x.line("default:") + // pass the slice here, so that the string will not escape, and maybe save allocation + x.line("z.DecStructFieldNotFound(-1, " + kName + ")") + x.line("} // end switch " + kName) +} + +func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type, style genStructMapStyle) { + tpfx := genTempVarPfx + i := x.varsfx() + kName := tpfx + "s" + i + + // x.line("var " + kName + "Arr = [32]byte{} // default string to decode into") + // x.line("var " + kName + "Slc = " + kName + "Arr[:] // default slice to decode into") + // use the scratch buffer to avoid allocation (most field names are < 32). + + x.line("var " + kName + "Slc = z.DecScratchBuffer() // default slice to decode into") + + x.line("_ = " + kName + "Slc") + switch style { + case genStructMapStyleLenPrefix: + x.linef("for %sj%s := 0; %sj%s < %s; %sj%s++ {", tpfx, i, tpfx, i, lenvarname, tpfx, i) + case genStructMapStyleCheckBreak: + x.linef("for %sj%s := 0; !r.CheckBreak(); %sj%s++ {", tpfx, i, tpfx, i) + default: // 0, otherwise. + x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length + x.linef("for %sj%s := 0; ; %sj%s++ {", tpfx, i, tpfx, i) + x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname) + x.line("} else { if r.CheckBreak() { break }; }") + } + x.line("r.ReadMapElemKey()") // f("z.DecSendContainerState(codecSelfer_containerMapKey%s)", x.xs) + x.line(kName + "Slc = r.DecodeStringAsBytes()") + // let string be scoped to this loop alone, so it doesn't escape. + x.line(kName + " := string(" + kName + "Slc)") + x.line("r.ReadMapElemValue()") // f("z.DecSendContainerState(codecSelfer_containerMapValue%s)", x.xs) + x.decStructMapSwitch(kName, varname, rtid, t) + + x.line("} // end for " + tpfx + "j" + i) + x.line("r.ReadMapEnd()") // f("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) +} + +func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) { + tpfx := genTempVarPfx + i := x.varsfx() + ti := x.ti.get(rtid, t) + tisfi := ti.sfip // always use sequence from file. decStruct expects same thing. + x.linef("var %sj%s int", tpfx, i) + x.linef("var %sb%s bool", tpfx, i) // break + x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length + for _, si := range tisfi { + var t2 reflect.StructField + { + //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. + // t2 = t.FieldByIndex(si.is) + t2typ := t + varname3 := varname + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(int(ix)) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem())) + } + } + } + + x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", + tpfx, i, tpfx, i, tpfx, i, + tpfx, i, lenvarname, tpfx, i) + x.linef("if %sb%s { r.ReadArrayEnd(); %s }", tpfx, i, breakString) + // x.linef("if %sb%s { z.DecSendContainerState(codecSelfer_containerArrayEnd%s); %s }", tpfx, i, x.xs, breakString) + x.line("r.ReadArrayElem()") // f("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + x.decVar(varname+"."+t2.Name, "", t2.Type, true) + } + // read remaining values and throw away. + x.line("for {") + x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", + tpfx, i, tpfx, i, tpfx, i, + tpfx, i, lenvarname, tpfx, i) + x.linef("if %sb%s { break }", tpfx, i) + x.line("r.ReadArrayElem()") // f("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i) + x.line("}") + x.line("r.ReadArrayEnd()") // f("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) +} + +func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) { + // if container is map + i := x.varsfx() + x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i) + x.linef("if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs) + x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()") + x.linef("if %sl%s == 0 {", genTempVarPfx, i) + x.line("r.ReadMapEnd()") // f("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) + if genUseOneFunctionForDecStructMap { + x.line("} else { ") + x.linef("x.codecDecodeSelfFromMap(%sl%s, d)", genTempVarPfx, i) + } else { + x.line("} else if " + genTempVarPfx + "l" + i + " > 0 { ") + x.line("x.codecDecodeSelfFromMapLenPrefix(" + genTempVarPfx + "l" + i + ", d)") + x.line("} else {") + x.line("x.codecDecodeSelfFromMapCheckBreak(" + genTempVarPfx + "l" + i + ", d)") + } + x.line("}") + + // else if container is array + x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs) + x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()") + x.linef("if %sl%s == 0 {", genTempVarPfx, i) + x.line("r.ReadArrayEnd()") // f("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) + x.line("} else { ") + x.linef("x.codecDecodeSelfFromArray(%sl%s, d)", genTempVarPfx, i) + x.line("}") + // else panic + x.line("} else { ") + x.line("panic(codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + ")") + x.line("} ") +} + +// -------- + +type genV struct { + // genV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice + MapKey string + Elem string + Primitive string + Size int +} + +func (x *genRunner) newGenV(t reflect.Type) (v genV) { + switch t.Kind() { + case reflect.Slice, reflect.Array: + te := t.Elem() + v.Elem = x.genTypeName(te) + v.Size = int(te.Size()) + case reflect.Map: + te, tk := t.Elem(), t.Key() + v.Elem = x.genTypeName(te) + v.MapKey = x.genTypeName(tk) + v.Size = int(te.Size() + tk.Size()) + default: + panic("unexpected type for newGenV. Requires map or slice type") + } + return +} + +func (x *genV) MethodNamePfx(prefix string, prim bool) string { + var name []byte + if prefix != "" { + name = append(name, prefix...) + } + if prim { + name = append(name, genTitleCaseName(x.Primitive)...) + } else { + if x.MapKey == "" { + name = append(name, "Slice"...) + } else { + name = append(name, "Map"...) + name = append(name, genTitleCaseName(x.MapKey)...) + } + name = append(name, genTitleCaseName(x.Elem)...) + } + return string(name) + +} + +// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise. +// +// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled, +// where PkgPath returns the full path, including the vendoring pre-fix that should have been stripped. +// We strip it here. +func genImportPath(t reflect.Type) (s string) { + s = t.PkgPath() + if genCheckVendor { + // HACK: always handle vendoring. It should be typically on in go 1.6, 1.7 + s = stripVendor(s) + } + return +} + +// A go identifier is (letter|_)[letter|number|_]* +func genGoIdentifier(s string, checkFirstChar bool) string { + b := make([]byte, 0, len(s)) + t := make([]byte, 4) + var n int + for i, r := range s { + if checkFirstChar && i == 0 && !unicode.IsLetter(r) { + b = append(b, '_') + } + // r must be unicode_letter, unicode_digit or _ + if unicode.IsLetter(r) || unicode.IsDigit(r) { + n = utf8.EncodeRune(t, r) + b = append(b, t[:n]...) + } else { + b = append(b, '_') + } + } + return string(b) +} + +func genNonPtr(t reflect.Type) reflect.Type { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + +func genTitleCaseName(s string) string { + switch s { + case "interface{}", "interface {}": + return "Intf" + default: + return strings.ToUpper(s[0:1]) + s[1:] + } +} + +func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) { + var ptrPfx string + for t.Kind() == reflect.Ptr { + ptrPfx += "Ptrto" + t = t.Elem() + } + tstr := t.String() + if tn := t.Name(); tn != "" { + if tRef != nil && genImportPath(t) == genImportPath(tRef) { + return ptrPfx + tn + } else { + if genQNameRegex.MatchString(tstr) { + return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } + } + switch t.Kind() { + case reflect.Map: + return ptrPfx + "Map" + genMethodNameT(t.Key(), tRef) + genMethodNameT(t.Elem(), tRef) + case reflect.Slice: + return ptrPfx + "Slice" + genMethodNameT(t.Elem(), tRef) + case reflect.Array: + return ptrPfx + "Array" + strconv.FormatInt(int64(t.Len()), 10) + genMethodNameT(t.Elem(), tRef) + case reflect.Chan: + var cx string + switch t.ChanDir() { + case reflect.SendDir: + cx = "ChanSend" + case reflect.RecvDir: + cx = "ChanRecv" + default: + cx = "Chan" + } + return ptrPfx + cx + genMethodNameT(t.Elem(), tRef) + default: + if t == intfTyp { + return ptrPfx + "Interface" + } else { + if tRef != nil && genImportPath(t) == genImportPath(tRef) { + if t.Name() != "" { + return ptrPfx + t.Name() + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } else { + // best way to get the package name inclusive + // return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + // return ptrPfx + genBase64enc.EncodeToString([]byte(tstr)) + if t.Name() != "" && genQNameRegex.MatchString(tstr) { + return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } + } + } +} + +// genCustomNameForType base64encodes the t.String() value in such a way +// that it can be used within a function name. +func genCustomTypeName(tstr string) string { + len2 := genBase64enc.EncodedLen(len(tstr)) + bufx := make([]byte, len2) + genBase64enc.Encode(bufx, []byte(tstr)) + for i := len2 - 1; i >= 0; i-- { + if bufx[i] == '=' { + len2-- + } else { + break + } + } + return string(bufx[:len2]) +} + +func genIsImmutable(t reflect.Type) (v bool) { + return isImmutableKind(t.Kind()) +} + +type genInternal struct { + Version int + Values []genV +} + +func (x genInternal) FastpathLen() (l int) { + for _, v := range x.Values { + if v.Primitive == "" { + l++ + } + } + return +} + +func genInternalZeroValue(s string) string { + switch s { + case "interface{}", "interface {}": + return "nil" + case "bool": + return "false" + case "string": + return `""` + default: + return "0" + } +} + +func genInternalNonZeroValue(s string) string { + switch s { + case "interface{}", "interface {}": + return `"string-is-an-interface"` // return string, to remove ambiguity + case "bool": + return "true" + case "string": + return `"some-string"` + case "float32", "float64", "float", "double": + return "10.1" + default: + return "10" + } +} + +func genInternalEncCommandAsString(s string, vname string) string { + switch s { + case "uint", "uint8", "uint16", "uint32", "uint64": + return "ee.EncodeUint(uint64(" + vname + "))" + case "int", "int8", "int16", "int32", "int64": + return "ee.EncodeInt(int64(" + vname + "))" + case "string": + return "ee.EncodeString(c_UTF8, " + vname + ")" + case "float32": + return "ee.EncodeFloat32(" + vname + ")" + case "float64": + return "ee.EncodeFloat64(" + vname + ")" + case "bool": + return "ee.EncodeBool(" + vname + ")" + case "symbol": + return "ee.EncodeSymbol(" + vname + ")" + default: + return "e.encode(" + vname + ")" + } +} + +func genInternalDecCommandAsString(s string) string { + switch s { + case "uint": + return "uint(dd.DecodeUint(uintBitsize))" + case "uint8": + return "uint8(dd.DecodeUint(8))" + case "uint16": + return "uint16(dd.DecodeUint(16))" + case "uint32": + return "uint32(dd.DecodeUint(32))" + case "uint64": + return "dd.DecodeUint(64)" + case "uintptr": + return "uintptr(dd.DecodeUint(uintBitsize))" + case "int": + return "int(dd.DecodeInt(intBitsize))" + case "int8": + return "int8(dd.DecodeInt(8))" + case "int16": + return "int16(dd.DecodeInt(16))" + case "int32": + return "int32(dd.DecodeInt(32))" + case "int64": + return "dd.DecodeInt(64)" + + case "string": + return "dd.DecodeString()" + case "float32": + return "float32(dd.DecodeFloat(true))" + case "float64": + return "dd.DecodeFloat(false)" + case "bool": + return "dd.DecodeBool()" + default: + panic(errors.New("gen internal: unknown type for decode: " + s)) + } +} + +func genInternalSortType(s string, elem bool) string { + for _, v := range [...]string{"int", "uint", "float", "bool", "string"} { + if strings.HasPrefix(s, v) { + if elem { + if v == "int" || v == "uint" || v == "float" { + return v + "64" + } else { + return v + } + } + return v + "Slice" + } + } + panic("sorttype: unexpected type: " + s) +} + +func stripVendor(s string) string { + // HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later. + // if s contains /vendor/ OR startsWith vendor/, then return everything after it. + const vendorStart = "vendor/" + const vendorInline = "/vendor/" + if i := strings.LastIndex(s, vendorInline); i >= 0 { + s = s[i+len(vendorInline):] + } else if strings.HasPrefix(s, vendorStart) { + s = s[len(vendorStart):] + } + return s +} + +// var genInternalMu sync.Mutex +var genInternalV = genInternal{Version: genVersion} +var genInternalTmplFuncs template.FuncMap +var genInternalOnce sync.Once + +func genInternalInit() { + types := [...]string{ + "interface{}", + "string", + "float32", + "float64", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintptr", + "int", + "int8", + "int16", + "int32", + "int64", + "bool", + } + // keep as slice, so it is in specific iteration order. + // Initial order was uint64, string, interface{}, int, int64 + mapvaltypes := [...]string{ + "interface{}", + "string", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintptr", + "int", + "int8", + "int16", + "int32", + "int64", + "float32", + "float64", + "bool", + } + wordSizeBytes := int(intBitsize) / 8 + + mapvaltypes2 := map[string]int{ + "interface{}": 2 * wordSizeBytes, + "string": 2 * wordSizeBytes, + "uint": 1 * wordSizeBytes, + "uint8": 1, + "uint16": 2, + "uint32": 4, + "uint64": 8, + "uintptr": 1 * wordSizeBytes, + "int": 1 * wordSizeBytes, + "int8": 1, + "int16": 2, + "int32": 4, + "int64": 8, + "float32": 4, + "float64": 8, + "bool": 1, + } + var gt = genInternal{Version: genVersion} + + // For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function + for _, s := range types { + gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]}) + if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already. + gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]}) + } + if _, ok := mapvaltypes2[s]; !ok { + gt.Values = append(gt.Values, genV{MapKey: s, Elem: s, Size: 2 * mapvaltypes2[s]}) + } + for _, ms := range mapvaltypes { + gt.Values = append(gt.Values, genV{MapKey: s, Elem: ms, Size: mapvaltypes2[s] + mapvaltypes2[ms]}) + } + } + + funcs := make(template.FuncMap) + // funcs["haspfx"] = strings.HasPrefix + funcs["encmd"] = genInternalEncCommandAsString + funcs["decmd"] = genInternalDecCommandAsString + funcs["zerocmd"] = genInternalZeroValue + funcs["nonzerocmd"] = genInternalNonZeroValue + funcs["hasprefix"] = strings.HasPrefix + funcs["sorttype"] = genInternalSortType + + genInternalV = gt + genInternalTmplFuncs = funcs +} + +// genInternalGoFile is used to generate source files from templates. +// It is run by the program author alone. +// Unfortunately, it has to be exported so that it can be called from a command line tool. +// *** DO NOT USE *** +func genInternalGoFile(r io.Reader, w io.Writer) (err error) { + genInternalOnce.Do(genInternalInit) + + gt := genInternalV + + t := template.New("").Funcs(genInternalTmplFuncs) + + tmplstr, err := ioutil.ReadAll(r) + if err != nil { + return + } + + if t, err = t.Parse(string(tmplstr)); err != nil { + return + } + + var out bytes.Buffer + err = t.Execute(&out, gt) + if err != nil { + return + } + + bout, err := format.Source(out.Bytes()) + if err != nil { + w.Write(out.Bytes()) // write out if error, so we can still see. + // w.Write(bout) // write out if error, as much as possible, so we can still see. + return + } + w.Write(bout) + return +} diff --git a/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go b/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go new file mode 100644 index 00000000000..7567e2c07b4 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go @@ -0,0 +1,14 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.5 + +package codec + +import "reflect" + +const reflectArrayOfSupported = true + +func reflectArrayOf(count int, elem reflect.Type) reflect.Type { + return reflect.ArrayOf(count, elem) +} diff --git a/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go b/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go new file mode 100644 index 00000000000..ec94bd0c0af --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go @@ -0,0 +1,14 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.5 + +package codec + +import "reflect" + +const reflectArrayOfSupported = false + +func reflectArrayOf(count int, elem reflect.Type) reflect.Type { + panic("codec: reflect.ArrayOf unsupported in this go version") +} diff --git a/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go b/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go new file mode 100644 index 00000000000..51fe40e5bf2 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go @@ -0,0 +1,15 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.9 + +package codec + +import "reflect" + +func makeMapReflect(t reflect.Type, size int) reflect.Value { + if size < 0 { + return reflect.MakeMapWithSize(t, 4) + } + return reflect.MakeMapWithSize(t, size) +} diff --git a/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go b/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go new file mode 100644 index 00000000000..d4b9c2c8d9a --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go @@ -0,0 +1,12 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.9 + +package codec + +import "reflect" + +func makeMapReflect(t reflect.Type, size int) reflect.Value { + return reflect.MakeMap(t) +} diff --git a/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go b/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go new file mode 100644 index 00000000000..dcd8c3d11ce --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go @@ -0,0 +1,17 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.4 + +package codec + +// This codec package will only work for go1.4 and above. +// This is for the following reasons: +// - go 1.4 was released in 2014 +// - go runtime is written fully in go +// - interface only holds pointers +// - reflect.Value is stabilized as 3 words + +func init() { + panic("codec: go 1.3 and below are not supported") +} diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go new file mode 100644 index 00000000000..68626e1ce74 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go @@ -0,0 +1,10 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.5,!go1.6 + +package codec + +import "os" + +var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1" diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go new file mode 100644 index 00000000000..344f5967bee --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go @@ -0,0 +1,10 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.6,!go1.7 + +package codec + +import "os" + +var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0" diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go new file mode 100644 index 00000000000..de91d29407f --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go @@ -0,0 +1,8 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.7 + +package codec + +const genCheckVendor = true diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go new file mode 100644 index 00000000000..9d007bfed4c --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go @@ -0,0 +1,8 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.5 + +package codec + +var genCheckVendor = false diff --git a/vendor/github.com/ugorji/go/codec/helper.go b/vendor/github.com/ugorji/go/codec/helper.go new file mode 100644 index 00000000000..df2febbf801 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/helper.go @@ -0,0 +1,1963 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// Contains code shared by both encode and decode. + +// Some shared ideas around encoding/decoding +// ------------------------------------------ +// +// If an interface{} is passed, we first do a type assertion to see if it is +// a primitive type or a map/slice of primitive types, and use a fastpath to handle it. +// +// If we start with a reflect.Value, we are already in reflect.Value land and +// will try to grab the function for the underlying Type and directly call that function. +// This is more performant than calling reflect.Value.Interface(). +// +// This still helps us bypass many layers of reflection, and give best performance. +// +// Containers +// ------------ +// Containers in the stream are either associative arrays (key-value pairs) or +// regular arrays (indexed by incrementing integers). +// +// Some streams support indefinite-length containers, and use a breaking +// byte-sequence to denote that the container has come to an end. +// +// Some streams also are text-based, and use explicit separators to denote the +// end/beginning of different values. +// +// During encode, we use a high-level condition to determine how to iterate through +// the container. That decision is based on whether the container is text-based (with +// separators) or binary (without separators). If binary, we do not even call the +// encoding of separators. +// +// During decode, we use a different high-level condition to determine how to iterate +// through the containers. That decision is based on whether the stream contained +// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that +// it has to be binary, and we do not even try to read separators. +// +// Philosophy +// ------------ +// On decode, this codec will update containers appropriately: +// - If struct, update fields from stream into fields of struct. +// If field in stream not found in struct, handle appropriately (based on option). +// If a struct field has no corresponding value in the stream, leave it AS IS. +// If nil in stream, set value to nil/zero value. +// - If map, update map from stream. +// If the stream value is NIL, set the map to nil. +// - if slice, try to update up to length of array in stream. +// if container len is less than stream array length, +// and container cannot be expanded, handled (based on option). +// This means you can decode 4-element stream array into 1-element array. +// +// ------------------------------------ +// On encode, user can specify omitEmpty. This means that the value will be omitted +// if the zero value. The problem may occur during decode, where omitted values do not affect +// the value being decoded into. This means that if decoding into a struct with an +// int field with current value=5, and the field is omitted in the stream, then after +// decoding, the value will still be 5 (not 0). +// omitEmpty only works if you guarantee that you always decode into zero-values. +// +// ------------------------------------ +// We could have truncated a map to remove keys not available in the stream, +// or set values in the struct which are not in the stream to their zero values. +// We decided against it because there is no efficient way to do it. +// We may introduce it as an option later. +// However, that will require enabling it for both runtime and code generation modes. +// +// To support truncate, we need to do 2 passes over the container: +// map +// - first collect all keys (e.g. in k1) +// - for each key in stream, mark k1 that the key should not be removed +// - after updating map, do second pass and call delete for all keys in k1 which are not marked +// struct: +// - for each field, track the *typeInfo s1 +// - iterate through all s1, and for each one not marked, set value to zero +// - this involves checking the possible anonymous fields which are nil ptrs. +// too much work. +// +// ------------------------------------------ +// Error Handling is done within the library using panic. +// +// This way, the code doesn't have to keep checking if an error has happened, +// and we don't have to keep sending the error value along with each call +// or storing it in the En|Decoder and checking it constantly along the way. +// +// The disadvantage is that small functions which use panics cannot be inlined. +// The code accounts for that by only using panics behind an interface; +// since interface calls cannot be inlined, this is irrelevant. +// +// We considered storing the error is En|Decoder. +// - once it has its err field set, it cannot be used again. +// - panicing will be optional, controlled by const flag. +// - code should always check error first and return early. +// We eventually decided against it as it makes the code clumsier to always +// check for these error conditions. + +import ( + "bytes" + "encoding" + "encoding/binary" + "errors" + "fmt" + "math" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "time" +) + +const ( + scratchByteArrayLen = 32 + // initCollectionCap = 16 // 32 is defensive. 16 is preferred. + + // Support encoding.(Binary|Text)(Unm|M)arshaler. + // This constant flag will enable or disable it. + supportMarshalInterfaces = true + + // for debugging, set this to false, to catch panic traces. + // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. + recoverPanicToErr = true + + // arrayCacheLen is the length of the cache used in encoder or decoder for + // allowing zero-alloc initialization. + arrayCacheLen = 8 + + // We tried an optimization, where we detect if a type is one of the known types + // we optimized for (e.g. int, []uint64, etc). + // + // However, we notice some worse performance when using this optimization. + // So we hide it behind a flag, to turn on if needed. + useLookupRecognizedTypes = false + + // using recognized allows us to do d.decode(interface{}) instead of d.decodeValue(reflect.Value) + // when we can infer that the kind of the interface{} is one of the ones hard-coded in the + // type switch for known types or the ones defined by fast-path. + // + // However, it seems we get better performance when we don't recognize, and just let + // reflection handle it. + // + // Reasoning is as below: + // typeswitch is a binary search with a branch to a code-point. + // getdecfn is a binary search with a call to a function pointer. + // + // both are about the same. + // + // so: why prefer typeswitch? + // + // is recognized does the following: + // - lookup rtid + // - check if in sorted list + // - calls decode(type switch) + // - 1 or 2 binary search to a point in code + // - branch there + // + // vs getdecfn + // - lookup rtid + // - check in sorted list for a function pointer + // - calls it to decode using reflection (optimized) + + // always set xDebug = false before releasing software + xDebug = true +) + +var ( + oneByteArr = [1]byte{0} + zeroByteSlice = oneByteArr[:0:0] +) + +var refBitset bitset32 + +var pool pooler + +func init() { + pool.init() + + refBitset.set(byte(reflect.Map)) + refBitset.set(byte(reflect.Ptr)) + refBitset.set(byte(reflect.Func)) + refBitset.set(byte(reflect.Chan)) +} + +// type findCodecFnMode uint8 + +// const ( +// findCodecFnModeMap findCodecFnMode = iota +// findCodecFnModeBinarySearch +// findCodecFnModeLinearSearch +// ) + +type charEncoding uint8 + +const ( + c_RAW charEncoding = iota + c_UTF8 + c_UTF16LE + c_UTF16BE + c_UTF32LE + c_UTF32BE +) + +// valueType is the stream type +type valueType uint8 + +const ( + valueTypeUnset valueType = iota + valueTypeNil + valueTypeInt + valueTypeUint + valueTypeFloat + valueTypeBool + valueTypeString + valueTypeSymbol + valueTypeBytes + valueTypeMap + valueTypeArray + valueTypeTimestamp + valueTypeExt + + // valueTypeInvalid = 0xff +) + +func (x valueType) String() string { + switch x { + case valueTypeNil: + return "Nil" + case valueTypeInt: + return "Int" + case valueTypeUint: + return "Uint" + case valueTypeFloat: + return "Float" + case valueTypeBool: + return "Bool" + case valueTypeString: + return "String" + case valueTypeSymbol: + return "Symbol" + case valueTypeBytes: + return "Bytes" + case valueTypeMap: + return "Map" + case valueTypeArray: + return "Array" + case valueTypeTimestamp: + return "Timestamp" + case valueTypeExt: + return "Ext" + } + return strconv.FormatInt(int64(x), 10) +} + +type seqType uint8 + +const ( + _ seqType = iota + seqTypeArray + seqTypeSlice + seqTypeChan +) + +// note that containerMapStart and containerArraySend are not sent. +// This is because the ReadXXXStart and EncodeXXXStart already does these. +type containerState uint8 + +const ( + _ containerState = iota + + containerMapStart // slot left open, since Driver method already covers it + containerMapKey + containerMapValue + containerMapEnd + containerArrayStart // slot left open, since Driver methods already cover it + containerArrayElem + containerArrayEnd +) + +// sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo +type sfiIdx struct { + name string + index int +} + +// do not recurse if a containing type refers to an embedded type +// which refers back to its containing type (via a pointer). +// The second time this back-reference happens, break out, +// so as not to cause an infinite loop. +const rgetMaxRecursion = 2 + +// Anecdotally, we believe most types have <= 12 fields. +// Java's PMD rules set TooManyFields threshold to 15. +const typeInfoLoadArrayLen = 12 + +type typeInfoLoad struct { + fNames []string + encNames []string + etypes []uintptr + sfis []*structFieldInfo +} + +type typeInfoLoadArray struct { + fNames [typeInfoLoadArrayLen]string + encNames [typeInfoLoadArrayLen]string + etypes [typeInfoLoadArrayLen]uintptr + sfis [typeInfoLoadArrayLen]*structFieldInfo + sfiidx [typeInfoLoadArrayLen]sfiIdx +} + +// type containerStateRecv interface { +// sendContainerState(containerState) +// } + +// mirror json.Marshaler and json.Unmarshaler here, +// so we don't import the encoding/json package +type jsonMarshaler interface { + MarshalJSON() ([]byte, error) +} +type jsonUnmarshaler interface { + UnmarshalJSON([]byte) error +} + +// type byteAccepter func(byte) bool + +var ( + bigen = binary.BigEndian + structInfoFieldName = "_struct" + + mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) + mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) + intfSliceTyp = reflect.TypeOf([]interface{}(nil)) + intfTyp = intfSliceTyp.Elem() + + stringTyp = reflect.TypeOf("") + timeTyp = reflect.TypeOf(time.Time{}) + rawExtTyp = reflect.TypeOf(RawExt{}) + rawTyp = reflect.TypeOf(Raw{}) + uint8SliceTyp = reflect.TypeOf([]uint8(nil)) + + mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() + + binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() + binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() + + textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + + jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem() + jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem() + + selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem() + + uint8SliceTypId = rt2id(uint8SliceTyp) + rawExtTypId = rt2id(rawExtTyp) + rawTypId = rt2id(rawTyp) + intfTypId = rt2id(intfTyp) + timeTypId = rt2id(timeTyp) + stringTypId = rt2id(stringTyp) + + mapStrIntfTypId = rt2id(mapStrIntfTyp) + mapIntfIntfTypId = rt2id(mapIntfIntfTyp) + intfSliceTypId = rt2id(intfSliceTyp) + // mapBySliceTypId = rt2id(mapBySliceTyp) + + intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits()) + uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits()) + + bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} + bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + + chkOvf checkOverflow + + noFieldNameToStructFieldInfoErr = errors.New("no field name passed to parseStructFieldInfo") +) + +var defTypeInfos = NewTypeInfos([]string{"codec", "json"}) + +var immutableKindsSet = [32]bool{ + // reflect.Invalid: , + reflect.Bool: true, + reflect.Int: true, + reflect.Int8: true, + reflect.Int16: true, + reflect.Int32: true, + reflect.Int64: true, + reflect.Uint: true, + reflect.Uint8: true, + reflect.Uint16: true, + reflect.Uint32: true, + reflect.Uint64: true, + reflect.Uintptr: true, + reflect.Float32: true, + reflect.Float64: true, + reflect.Complex64: true, + reflect.Complex128: true, + // reflect.Array + // reflect.Chan + // reflect.Func: true, + // reflect.Interface + // reflect.Map + // reflect.Ptr + // reflect.Slice + reflect.String: true, + // reflect.Struct + // reflect.UnsafePointer +} + +var recognizedRtids []uintptr +var recognizedRtidPtrs []uintptr +var recognizedRtidOrPtrs []uintptr + +func init() { + if !useLookupRecognizedTypes { + return + } + for _, v := range [...]interface{}{ + float32(0), + float64(0), + uintptr(0), + uint(0), + uint8(0), + uint16(0), + uint32(0), + uint64(0), + uintptr(0), + int(0), + int8(0), + int16(0), + int32(0), + int64(0), + bool(false), + string(""), + Raw{}, + []byte(nil), + } { + rt := reflect.TypeOf(v) + recognizedRtids = append(recognizedRtids, rt2id(rt)) + recognizedRtidPtrs = append(recognizedRtidPtrs, rt2id(reflect.PtrTo(rt))) + } +} + +func containsU(s []uintptr, v uintptr) bool { + // return false // TODO: REMOVE + h, i, j := 0, 0, len(s) + for i < j { + h = i + (j-i)/2 + if s[h] < v { + i = h + 1 + } else { + j = h + } + } + if i < len(s) && s[i] == v { + return true + } + return false +} + +func isRecognizedRtid(rtid uintptr) bool { + return containsU(recognizedRtids, rtid) +} + +func isRecognizedRtidPtr(rtid uintptr) bool { + return containsU(recognizedRtidPtrs, rtid) +} + +func isRecognizedRtidOrPtr(rtid uintptr) bool { + return containsU(recognizedRtidOrPtrs, rtid) +} + +// Selfer defines methods by which a value can encode or decode itself. +// +// Any type which implements Selfer will be able to encode or decode itself. +// Consequently, during (en|de)code, this takes precedence over +// (text|binary)(M|Unm)arshal or extension support. +type Selfer interface { + CodecEncodeSelf(*Encoder) + CodecDecodeSelf(*Decoder) +} + +// MapBySlice represents a slice which should be encoded as a map in the stream. +// The slice contains a sequence of key-value pairs. +// This affords storing a map in a specific sequence in the stream. +// +// The support of MapBySlice affords the following: +// - A slice type which implements MapBySlice will be encoded as a map +// - A slice can be decoded from a map in the stream +type MapBySlice interface { + MapBySlice() +} + +// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. +// +// BasicHandle encapsulates the common options and extension functions. +type BasicHandle struct { + // TypeInfos is used to get the type info for any type. + // + // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json + TypeInfos *TypeInfos + + extHandle + EncodeOptions + DecodeOptions + noBuiltInTypeChecker +} + +func (x *BasicHandle) getBasicHandle() *BasicHandle { + return x +} + +func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + if x.TypeInfos == nil { + return defTypeInfos.get(rtid, rt) + } + return x.TypeInfos.get(rtid, rt) +} + +// Handle is the interface for a specific encoding format. +// +// Typically, a Handle is pre-configured before first time use, +// and not modified while in use. Such a pre-configured Handle +// is safe for concurrent access. +type Handle interface { + getBasicHandle() *BasicHandle + newEncDriver(w *Encoder) encDriver + newDecDriver(r *Decoder) decDriver + isBinary() bool + hasElemSeparators() bool + IsBuiltinType(rtid uintptr) bool +} + +// Raw represents raw formatted bytes. +// We "blindly" store it during encode and store the raw bytes during decode. +// Note: it is dangerous during encode, so we may gate the behaviour behind an Encode flag which must be explicitly set. +type Raw []byte + +// RawExt represents raw unprocessed extension data. +// Some codecs will decode extension data as a *RawExt if there is no registered extension for the tag. +// +// Only one of Data or Value is nil. If Data is nil, then the content of the RawExt is in the Value. +type RawExt struct { + Tag uint64 + // Data is the []byte which represents the raw ext. If Data is nil, ext is exposed in Value. + // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types + Data []byte + // Value represents the extension, if Data is nil. + // Value is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types. + Value interface{} +} + +// BytesExt handles custom (de)serialization of types to/from []byte. +// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types. +type BytesExt interface { + // WriteExt converts a value to a []byte. + // + // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array. + WriteExt(v interface{}) []byte + + // ReadExt updates a value from a []byte. + ReadExt(dst interface{}, src []byte) +} + +// InterfaceExt handles custom (de)serialization of types to/from another interface{} value. +// The Encoder or Decoder will then handle the further (de)serialization of that known type. +// +// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types. +type InterfaceExt interface { + // ConvertExt converts a value into a simpler interface for easy encoding e.g. convert time.Time to int64. + // + // Note: v *may* be a pointer to the extension type, if the extension type was a struct or array. + ConvertExt(v interface{}) interface{} + + // UpdateExt updates a value from a simpler interface for easy decoding e.g. convert int64 to time.Time. + UpdateExt(dst interface{}, src interface{}) +} + +// Ext handles custom (de)serialization of custom types / extensions. +type Ext interface { + BytesExt + InterfaceExt +} + +// addExtWrapper is a wrapper implementation to support former AddExt exported method. +type addExtWrapper struct { + encFn func(reflect.Value) ([]byte, error) + decFn func(reflect.Value, []byte) error +} + +func (x addExtWrapper) WriteExt(v interface{}) []byte { + bs, err := x.encFn(reflect.ValueOf(v)) + if err != nil { + panic(err) + } + return bs +} + +func (x addExtWrapper) ReadExt(v interface{}, bs []byte) { + if err := x.decFn(reflect.ValueOf(v), bs); err != nil { + panic(err) + } +} + +func (x addExtWrapper) ConvertExt(v interface{}) interface{} { + return x.WriteExt(v) +} + +func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) { + x.ReadExt(dest, v.([]byte)) +} + +type setExtWrapper struct { + b BytesExt + i InterfaceExt +} + +func (x *setExtWrapper) WriteExt(v interface{}) []byte { + if x.b == nil { + panic("BytesExt.WriteExt is not supported") + } + return x.b.WriteExt(v) +} + +func (x *setExtWrapper) ReadExt(v interface{}, bs []byte) { + if x.b == nil { + panic("BytesExt.WriteExt is not supported") + + } + x.b.ReadExt(v, bs) +} + +func (x *setExtWrapper) ConvertExt(v interface{}) interface{} { + if x.i == nil { + panic("InterfaceExt.ConvertExt is not supported") + + } + return x.i.ConvertExt(v) +} + +func (x *setExtWrapper) UpdateExt(dest interface{}, v interface{}) { + if x.i == nil { + panic("InterfaceExxt.UpdateExt is not supported") + + } + x.i.UpdateExt(dest, v) +} + +type binaryEncodingType struct{} + +func (_ binaryEncodingType) isBinary() bool { return true } + +type textEncodingType struct{} + +func (_ textEncodingType) isBinary() bool { return false } + +// noBuiltInTypes is embedded into many types which do not support builtins +// e.g. msgpack, simple, cbor. + +type noBuiltInTypeChecker struct{} + +func (_ noBuiltInTypeChecker) IsBuiltinType(rt uintptr) bool { return false } + +type noBuiltInTypes struct{ noBuiltInTypeChecker } + +func (_ noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {} +func (_ noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {} + +// type noStreamingCodec struct{} +// func (_ noStreamingCodec) CheckBreak() bool { return false } +// func (_ noStreamingCodec) hasElemSeparators() bool { return false } + +type noElemSeparators struct{} + +func (_ noElemSeparators) hasElemSeparators() (v bool) { return } + +// bigenHelper. +// Users must already slice the x completely, because we will not reslice. +type bigenHelper struct { + x []byte // must be correctly sliced to appropriate len. slicing is a cost. + w encWriter +} + +func (z bigenHelper) writeUint16(v uint16) { + bigen.PutUint16(z.x, v) + z.w.writeb(z.x) +} + +func (z bigenHelper) writeUint32(v uint32) { + bigen.PutUint32(z.x, v) + z.w.writeb(z.x) +} + +func (z bigenHelper) writeUint64(v uint64) { + bigen.PutUint64(z.x, v) + z.w.writeb(z.x) +} + +type extTypeTagFn struct { + rtid uintptr + rt reflect.Type + tag uint64 + ext Ext +} + +type extHandle []extTypeTagFn + +// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead. +// +// AddExt registes an encode and decode function for a reflect.Type. +// AddExt internally calls SetExt. +// To deregister an Ext, call AddExt with nil encfn and/or nil decfn. +func (o *extHandle) AddExt( + rt reflect.Type, tag byte, + encfn func(reflect.Value) ([]byte, error), decfn func(reflect.Value, []byte) error, +) (err error) { + if encfn == nil || decfn == nil { + return o.SetExt(rt, uint64(tag), nil) + } + return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn}) +} + +// DEPRECATED: Use SetBytesExt or SetInterfaceExt on the Handle instead. +// +// Note that the type must be a named type, and specifically not +// a pointer or Interface. An error is returned if that is not honored. +// +// To Deregister an ext, call SetExt with nil Ext +func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) { + // o is a pointer, because we may need to initialize it + if rt.PkgPath() == "" || rt.Kind() == reflect.Interface { + err = fmt.Errorf("codec.Handle.AddExt: Takes named type, not a pointer or interface: %T", + reflect.Zero(rt).Interface()) + return + } + + rtid := rt2id(rt) + for _, v := range *o { + if v.rtid == rtid { + v.tag, v.ext = tag, ext + return + } + } + + if *o == nil { + *o = make([]extTypeTagFn, 0, 4) + } + *o = append(*o, extTypeTagFn{rtid, rt, tag, ext}) + return +} + +func (o extHandle) getExt(rtid uintptr) *extTypeTagFn { + var v *extTypeTagFn + for i := range o { + v = &o[i] + if v.rtid == rtid { + return v + } + } + return nil +} + +func (o extHandle) getExtForTag(tag uint64) *extTypeTagFn { + var v *extTypeTagFn + for i := range o { + v = &o[i] + if v.tag == tag { + return v + } + } + return nil +} + +const maxLevelsEmbedding = 16 + +type structFieldInfo struct { + encName string // encode name + fieldName string // field name + + is [maxLevelsEmbedding]uint16 // (recursive/embedded) field index in struct + nis uint8 // num levels of embedding. if 1, then it's not embedded. + omitEmpty bool + toArray bool // if field is _struct, is the toArray set? +} + +func (si *structFieldInfo) setToZeroValue(v reflect.Value) { + if v, valid := si.field(v, false); valid { + v.Set(reflect.Zero(v.Type())) + } +} + +// rv returns the field of the struct. +// If anonymous, it returns an Invalid +func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value, valid bool) { + // replicate FieldByIndex + for i, x := range si.is { + if uint8(i) == si.nis { + break + } + if v, valid = baseStructRv(v, update); !valid { + return + } + v = v.Field(int(x)) + } + + return v, true +} + +func (si *structFieldInfo) fieldval(v reflect.Value, update bool) reflect.Value { + v, _ = si.field(v, update) + return v +} + +func parseStructFieldInfo(fname string, stag string) *structFieldInfo { + // if fname == "" { + // panic(noFieldNameToStructFieldInfoErr) + // } + si := structFieldInfo{ + encName: fname, + } + + if stag != "" { + for i, s := range strings.Split(stag, ",") { + if i == 0 { + if s != "" { + si.encName = s + } + } else { + if s == "omitempty" { + si.omitEmpty = true + } else if s == "toarray" { + si.toArray = true + } + } + } + } + // si.encNameBs = []byte(si.encName) + return &si +} + +type sfiSortedByEncName []*structFieldInfo + +func (p sfiSortedByEncName) Len() int { + return len(p) +} + +func (p sfiSortedByEncName) Less(i, j int) bool { + return p[i].encName < p[j].encName +} + +func (p sfiSortedByEncName) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +const structFieldNodeNumToCache = 4 + +type structFieldNodeCache struct { + rv [structFieldNodeNumToCache]reflect.Value + idx [structFieldNodeNumToCache]uint32 + num uint8 +} + +func (x *structFieldNodeCache) get(key uint32) (fv reflect.Value, valid bool) { + // defer func() { fmt.Printf(">>>> found in cache2? %v\n", valid) }() + for i, k := range &x.idx { + if uint8(i) == x.num { + return // break + } + if key == k { + return x.rv[i], true + } + } + return +} + +func (x *structFieldNodeCache) tryAdd(fv reflect.Value, key uint32) { + if x.num < structFieldNodeNumToCache { + x.rv[x.num] = fv + x.idx[x.num] = key + x.num++ + return + } +} + +type structFieldNode struct { + v reflect.Value + cache2 structFieldNodeCache + cache3 structFieldNodeCache + update bool +} + +func (x *structFieldNode) field(si *structFieldInfo) (fv reflect.Value) { + // return si.fieldval(x.v, x.update) + // Note: we only cache if nis=2 or nis=3 i.e. up to 2 levels of embedding + // This mostly saves us time on the repeated calls to v.Elem, v.Field, etc. + var valid bool + switch si.nis { + case 1: + fv = x.v.Field(int(si.is[0])) + case 2: + if fv, valid = x.cache2.get(uint32(si.is[0])); valid { + fv = fv.Field(int(si.is[1])) + return + } + fv = x.v.Field(int(si.is[0])) + if fv, valid = baseStructRv(fv, x.update); !valid { + return + } + x.cache2.tryAdd(fv, uint32(si.is[0])) + fv = fv.Field(int(si.is[1])) + case 3: + var key uint32 = uint32(si.is[0])<<16 | uint32(si.is[1]) + if fv, valid = x.cache3.get(key); valid { + fv = fv.Field(int(si.is[2])) + return + } + fv = x.v.Field(int(si.is[0])) + if fv, valid = baseStructRv(fv, x.update); !valid { + return + } + fv = fv.Field(int(si.is[1])) + if fv, valid = baseStructRv(fv, x.update); !valid { + return + } + x.cache3.tryAdd(fv, key) + fv = fv.Field(int(si.is[2])) + default: + fv, _ = si.field(x.v, x.update) + } + return +} + +func baseStructRv(v reflect.Value, update bool) (v2 reflect.Value, valid bool) { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + if !update { + return + } + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + return v, true +} + +// typeInfo keeps information about each type referenced in the encode/decode sequence. +// +// During an encode/decode sequence, we work as below: +// - If base is a built in type, en/decode base value +// - If base is registered as an extension, en/decode base value +// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method +// - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method +// - Else decode appropriately based on the reflect.Kind +type typeInfo struct { + sfi []*structFieldInfo // sorted. Used when enc/dec struct to map. + sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array. + + rt reflect.Type + rtid uintptr + // rv0 reflect.Value // saved zero value, used if immutableKind + + numMeth uint16 // number of methods + + // baseId gives pointer to the base reflect.Type, after deferencing + // the pointers. E.g. base type of ***time.Time is time.Time. + base reflect.Type + baseId uintptr + baseIndir int8 // number of indirections to get to base + + anyOmitEmpty bool + + mbs bool // base type (T or *T) is a MapBySlice + + bm bool // base type (T or *T) is a binaryMarshaler + bunm bool // base type (T or *T) is a binaryUnmarshaler + bmIndir int8 // number of indirections to get to binaryMarshaler type + bunmIndir int8 // number of indirections to get to binaryUnmarshaler type + + tm bool // base type (T or *T) is a textMarshaler + tunm bool // base type (T or *T) is a textUnmarshaler + tmIndir int8 // number of indirections to get to textMarshaler type + tunmIndir int8 // number of indirections to get to textUnmarshaler type + + jm bool // base type (T or *T) is a jsonMarshaler + junm bool // base type (T or *T) is a jsonUnmarshaler + jmIndir int8 // number of indirections to get to jsonMarshaler type + junmIndir int8 // number of indirections to get to jsonUnmarshaler type + + cs bool // base type (T or *T) is a Selfer + csIndir int8 // number of indirections to get to Selfer type + + toArray bool // whether this (struct) type should be encoded as an array +} + +// define length beyond which we do a binary search instead of a linear search. +// From our testing, linear search seems faster than binary search up to 16-field structs. +// However, we set to 8 similar to what python does for hashtables. +const indexForEncNameBinarySearchThreshold = 8 + +func (ti *typeInfo) indexForEncName(name string) int { + // NOTE: name may be a stringView, so don't pass it to another function. + //tisfi := ti.sfi + sfilen := len(ti.sfi) + if sfilen < indexForEncNameBinarySearchThreshold { + for i, si := range ti.sfi { + if si.encName == name { + return i + } + } + return -1 + } + // binary search. adapted from sort/search.go. + h, i, j := 0, 0, sfilen + for i < j { + h = i + (j-i)/2 + if ti.sfi[h].encName < name { + i = h + 1 + } else { + j = h + } + } + if i < sfilen && ti.sfi[i].encName == name { + return i + } + return -1 +} + +type rtid2ti struct { + rtid uintptr + ti *typeInfo +} + +// TypeInfos caches typeInfo for each type on first inspection. +// +// It is configured with a set of tag keys, which are used to get +// configuration for the type. +type TypeInfos struct { + infos atomicTypeInfoSlice // formerly map[uintptr]*typeInfo, now *[]rtid2ti + mu sync.Mutex + tags []string +} + +// NewTypeInfos creates a TypeInfos given a set of struct tags keys. +// +// This allows users customize the struct tag keys which contain configuration +// of their types. +func NewTypeInfos(tags []string) *TypeInfos { + return &TypeInfos{tags: tags} +} + +func (x *TypeInfos) structTag(t reflect.StructTag) (s string) { + // check for tags: codec, json, in that order. + // this allows seamless support for many configured structs. + for _, x := range x.tags { + s = t.Get(x) + if s != "" { + return s + } + } + return +} + +func (x *TypeInfos) find(sp *[]rtid2ti, rtid uintptr) (idx int, ti *typeInfo) { + // binary search. adapted from sort/search.go. + // if sp == nil { + // return -1, nil + // } + s := *sp + h, i, j := 0, 0, len(s) + for i < j { + h = i + (j-i)/2 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + } + if i < len(s) && s[i].rtid == rtid { + return i, s[i].ti + } + return i, nil +} + +func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + sp := x.infos.load() + var idx int + if sp != nil { + idx, pti = x.find(sp, rtid) + if pti != nil { + return + } + } + + // do not hold lock while computing this. + // it may lead to duplication, but that's ok. + ti := typeInfo{rt: rt, rtid: rtid} + // ti.rv0 = reflect.Zero(rt) + + ti.numMeth = uint16(rt.NumMethod()) + var ok bool + var indir int8 + if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok { + ti.bm, ti.bmIndir = true, indir + } + if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok { + ti.bunm, ti.bunmIndir = true, indir + } + if ok, indir = implementsIntf(rt, textMarshalerTyp); ok { + ti.tm, ti.tmIndir = true, indir + } + if ok, indir = implementsIntf(rt, textUnmarshalerTyp); ok { + ti.tunm, ti.tunmIndir = true, indir + } + if ok, indir = implementsIntf(rt, jsonMarshalerTyp); ok { + ti.jm, ti.jmIndir = true, indir + } + if ok, indir = implementsIntf(rt, jsonUnmarshalerTyp); ok { + ti.junm, ti.junmIndir = true, indir + } + if ok, indir = implementsIntf(rt, selferTyp); ok { + ti.cs, ti.csIndir = true, indir + } + if ok, _ = implementsIntf(rt, mapBySliceTyp); ok { + ti.mbs = true + } + + pt := rt + var ptIndir int8 + // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { } + for pt.Kind() == reflect.Ptr { + pt = pt.Elem() + ptIndir++ + } + if ptIndir == 0 { + ti.base = rt + ti.baseId = rtid + } else { + ti.base = pt + ti.baseId = rt2id(pt) + ti.baseIndir = ptIndir + } + + if rt.Kind() == reflect.Struct { + var omitEmpty bool + if f, ok := rt.FieldByName(structInfoFieldName); ok { + siInfo := parseStructFieldInfo(structInfoFieldName, x.structTag(f.Tag)) + ti.toArray = siInfo.toArray + omitEmpty = siInfo.omitEmpty + } + pp, pi := pool.tiLoad() + pv := pi.(*typeInfoLoadArray) + pv.etypes[0] = ti.baseId + vv := typeInfoLoad{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]} + x.rget(rt, rtid, omitEmpty, nil, &vv) + ti.sfip, ti.sfi, ti.anyOmitEmpty = rgetResolveSFI(vv.sfis, pv.sfiidx[:0]) + pp.Put(pi) + } + // sfi = sfip + + var vs []rtid2ti + x.mu.Lock() + sp = x.infos.load() + if sp == nil { + pti = &ti + vs = []rtid2ti{{rtid, pti}} + x.infos.store(&vs) + } else { + idx, pti = x.find(sp, rtid) + if pti == nil { + s := *sp + pti = &ti + vs = make([]rtid2ti, len(s)+1) + copy(vs, s[:idx]) + vs[idx] = rtid2ti{rtid, pti} + copy(vs[idx+1:], s[idx:]) + x.infos.store(&vs) + } + } + x.mu.Unlock() + return +} + +func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool, + indexstack []uint16, pv *typeInfoLoad, +) { + // Read up fields and store how to access the value. + // + // It uses go's rules for message selectors, + // which say that the field with the shallowest depth is selected. + // + // Note: we consciously use slices, not a map, to simulate a set. + // Typically, types have < 16 fields, + // and iteration using equals is faster than maps there + flen := rt.NumField() + if flen > (1< maxLevelsEmbedding-1 { + panic(fmt.Errorf("codec: only supports up to %v depth of embedding - type has %v depth", maxLevelsEmbedding-1, len(indexstack))) + } + si.nis = uint8(len(indexstack)) + 1 + copy(si.is[:], indexstack) + si.is[len(indexstack)] = j + + if omitEmpty { + si.omitEmpty = true + } + pv.sfis = append(pv.sfis, si) + } +} + +// resolves the struct field info got from a call to rget. +// Returns a trimmed, unsorted and sorted []*structFieldInfo. +func rgetResolveSFI(x []*structFieldInfo, pv []sfiIdx) (y, z []*structFieldInfo, anyOmitEmpty bool) { + var n int + for i, v := range x { + xn := v.encName // TODO: fieldName or encName? use encName for now. + var found bool + for j, k := range pv { + if k.name == xn { + // one of them must be reset to nil, and the index updated appropriately to the other one + if v.nis == x[k.index].nis { + } else if v.nis < x[k.index].nis { + pv[j].index = i + if x[k.index] != nil { + x[k.index] = nil + n++ + } + } else { + if x[i] != nil { + x[i] = nil + n++ + } + } + found = true + break + } + } + if !found { + pv = append(pv, sfiIdx{xn, i}) + } + } + + // remove all the nils + y = make([]*structFieldInfo, len(x)-n) + n = 0 + for _, v := range x { + if v == nil { + continue + } + if !anyOmitEmpty && v.omitEmpty { + anyOmitEmpty = true + } + y[n] = v + n++ + } + + z = make([]*structFieldInfo, len(y)) + copy(z, y) + sort.Sort(sfiSortedByEncName(z)) + return +} + +func xprintf(format string, a ...interface{}) { + if xDebug { + fmt.Fprintf(os.Stderr, format, a...) + } +} + +func panicToErr(err *error) { + if recoverPanicToErr { + if x := recover(); x != nil { + // if false && xDebug { + // fmt.Printf("panic'ing with: %v\n", x) + // debug.PrintStack() + // } + panicValToErr(x, err) + } + } +} + +func panicToErrs2(err1, err2 *error) { + if recoverPanicToErr { + if x := recover(); x != nil { + panicValToErr(x, err1) + panicValToErr(x, err2) + } + } +} + +// func doPanic(tag string, format string, params ...interface{}) { +// params2 := make([]interface{}, len(params)+1) +// params2[0] = tag +// copy(params2[1:], params) +// panic(fmt.Errorf("%s: "+format, params2...)) +// } + +func isImmutableKind(k reflect.Kind) (v bool) { + return immutableKindsSet[k] + // return false || + // k == reflect.Int || + // k == reflect.Int8 || + // k == reflect.Int16 || + // k == reflect.Int32 || + // k == reflect.Int64 || + // k == reflect.Uint || + // k == reflect.Uint8 || + // k == reflect.Uint16 || + // k == reflect.Uint32 || + // k == reflect.Uint64 || + // k == reflect.Uintptr || + // k == reflect.Float32 || + // k == reflect.Float64 || + // k == reflect.Bool || + // k == reflect.String +} + +// ---- + +type codecFnInfo struct { + ti *typeInfo + xfFn Ext + xfTag uint64 + seq seqType + addr bool +} + +// codecFn encapsulates the captured variables and the encode function. +// This way, we only do some calculations one times, and pass to the +// code block that should be called (encapsulated in a function) +// instead of executing the checks every time. +type codecFn struct { + i codecFnInfo + fe func(*Encoder, *codecFnInfo, reflect.Value) + fd func(*Decoder, *codecFnInfo, reflect.Value) +} + +type codecRtidFn struct { + rtid uintptr + fn codecFn +} + +type codecFner struct { + hh Handle + h *BasicHandle + cs [arrayCacheLen]*[arrayCacheLen]codecRtidFn + s []*[arrayCacheLen]codecRtidFn + sn uint32 + be bool + js bool + cf [arrayCacheLen]codecRtidFn +} + +func (c *codecFner) reset(hh Handle) { + c.hh = hh + c.h = hh.getBasicHandle() + _, c.js = hh.(*JsonHandle) + c.be = hh.isBinary() +} + +func (c *codecFner) get(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *codecFn) { + rtid := rt2id(rt) + var j uint32 + var sn uint32 = c.sn + if sn == 0 { + c.s = c.cs[:1] + c.s[0] = &c.cf + c.cf[0].rtid = rtid + fn = &(c.cf[0].fn) + c.sn = 1 + } else { + LOOP1: + for _, x := range c.s { + for i := range x { + if j == sn { + break LOOP1 + } + if x[i].rtid == rtid { + fn = &(x[i].fn) + return + } + j++ + } + } + sx, sy := sn/arrayCacheLen, sn%arrayCacheLen + if sy == 0 { + c.s = append(c.s, &[arrayCacheLen]codecRtidFn{}) + } + c.s[sx][sy].rtid = rtid + fn = &(c.s[sx][sy].fn) + c.sn++ + } + + ti := c.h.getTypeInfo(rtid, rt) + fi := &(fn.i) + fi.ti = ti + + if checkCodecSelfer && ti.cs { + fn.fe = (*Encoder).selferMarshal + fn.fd = (*Decoder).selferUnmarshal + } else if rtid == rawTypId { + fn.fe = (*Encoder).raw + fn.fd = (*Decoder).raw + } else if rtid == rawExtTypId { + fn.fe = (*Encoder).rawExt + fn.fd = (*Decoder).rawExt + fn.i.addr = true + } else if c.hh.IsBuiltinType(rtid) { + fn.fe = (*Encoder).builtin + fn.fd = (*Decoder).builtin + fn.i.addr = true + } else if xfFn := c.h.getExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fe = (*Encoder).ext + fn.fd = (*Decoder).ext + fn.i.addr = true + } else if supportMarshalInterfaces && c.be && ti.bm && ti.bunm { + fn.fe = (*Encoder).binaryMarshal + fn.fd = (*Decoder).binaryUnmarshal + } else if supportMarshalInterfaces && !c.be && c.js && ti.jm && ti.junm { + //If JSON, we should check JSONMarshal before textMarshal + fn.fe = (*Encoder).jsonMarshal + fn.fd = (*Decoder).jsonUnmarshal + } else if supportMarshalInterfaces && !c.be && ti.tm && ti.tunm { + fn.fe = (*Encoder).textMarshal + fn.fd = (*Decoder).textUnmarshal + } else { + rk := rt.Kind() + if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { + if rt.PkgPath() == "" { // un-named slice or map + if idx := fastpathAV.index(rtid); idx != -1 { + fn.fe = fastpathAV[idx].encfn + fn.fd = fastpathAV[idx].decfn + fn.i.addr = true + } + } else { + // use mapping for underlying type if there + var rtu reflect.Type + if rk == reflect.Map { + rtu = reflect.MapOf(rt.Key(), rt.Elem()) + } else { + rtu = reflect.SliceOf(rt.Elem()) + } + rtuid := rt2id(rtu) + if idx := fastpathAV.index(rtuid); idx != -1 { + xfnf := fastpathAV[idx].encfn + xrt := fastpathAV[idx].rt + fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) { + xfnf(e, xf, xrv.Convert(xrt)) + } + fn.i.addr = true + xfnf2 := fastpathAV[idx].decfn + fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) { + xfnf2(d, xf, xrv.Convert(reflect.PtrTo(xrt))) + } + } + } + } + if fn.fe == nil && fn.fd == nil { + switch rk { + case reflect.Bool: + fn.fe = (*Encoder).kBool + fn.fd = (*Decoder).kBool + case reflect.String: + fn.fe = (*Encoder).kString + fn.fd = (*Decoder).kString + case reflect.Int: + fn.fd = (*Decoder).kInt + fn.fe = (*Encoder).kInt + case reflect.Int8: + fn.fe = (*Encoder).kInt + fn.fd = (*Decoder).kInt8 + case reflect.Int16: + fn.fe = (*Encoder).kInt + fn.fd = (*Decoder).kInt16 + case reflect.Int32: + fn.fe = (*Encoder).kInt + fn.fd = (*Decoder).kInt32 + case reflect.Int64: + fn.fe = (*Encoder).kInt + fn.fd = (*Decoder).kInt64 + case reflect.Uint: + fn.fd = (*Decoder).kUint + fn.fe = (*Encoder).kUint + case reflect.Uint8: + fn.fe = (*Encoder).kUint + fn.fd = (*Decoder).kUint8 + case reflect.Uint16: + fn.fe = (*Encoder).kUint + fn.fd = (*Decoder).kUint16 + case reflect.Uint32: + fn.fe = (*Encoder).kUint + fn.fd = (*Decoder).kUint32 + case reflect.Uint64: + fn.fe = (*Encoder).kUint + fn.fd = (*Decoder).kUint64 + // case reflect.Ptr: + // fn.fd = (*Decoder).kPtr + case reflect.Uintptr: + fn.fe = (*Encoder).kUint + fn.fd = (*Decoder).kUintptr + case reflect.Float32: + fn.fe = (*Encoder).kFloat32 + fn.fd = (*Decoder).kFloat32 + case reflect.Float64: + fn.fe = (*Encoder).kFloat64 + fn.fd = (*Decoder).kFloat64 + case reflect.Invalid: + fn.fe = (*Encoder).kInvalid + case reflect.Chan: + fi.seq = seqTypeChan + fn.fe = (*Encoder).kSlice + fn.fd = (*Decoder).kSlice + case reflect.Slice: + fi.seq = seqTypeSlice + fn.fe = (*Encoder).kSlice + fn.fd = (*Decoder).kSlice + case reflect.Array: + fi.seq = seqTypeArray + fn.fe = (*Encoder).kSlice + fi.addr = false + rt2 := reflect.SliceOf(rt.Elem()) + fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) { + // println(">>>>>> decoding an array ... ") + d.cf.get(rt2, true, false).fd(d, xf, xrv.Slice(0, xrv.Len())) + // println(">>>>>> decoding an array ... DONE") + } + // fn.fd = (*Decoder).kArray + case reflect.Struct: + if ti.anyOmitEmpty { + fn.fe = (*Encoder).kStruct + } else { + fn.fe = (*Encoder).kStructNoOmitempty + } + fn.fd = (*Decoder).kStruct + // reflect.Ptr and reflect.Interface are handled already by preEncodeValue + // case reflect.Ptr: + // fn.fe = (*Encoder).kPtr + // case reflect.Interface: + // fn.fe = (*Encoder).kInterface + case reflect.Map: + fn.fe = (*Encoder).kMap + fn.fd = (*Decoder).kMap + case reflect.Interface: + // encode: reflect.Interface are handled already by preEncodeValue + fn.fd = (*Decoder).kInterface + default: + fn.fe = (*Encoder).kErr + fn.fd = (*Decoder).kErr + } + } + } + + return +} + +// ---- + +// these functions must be inlinable, and not call anybody +type checkOverflow struct{} + +func (_ checkOverflow) Float32(f float64) (overflow bool) { + if f < 0 { + f = -f + } + return math.MaxFloat32 < f && f <= math.MaxFloat64 +} + +func (_ checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) { + if bitsize == 0 || bitsize >= 64 || v == 0 { + return + } + if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { + overflow = true + } + return +} + +func (_ checkOverflow) Int(v int64, bitsize uint8) (overflow bool) { + if bitsize == 0 || bitsize >= 64 || v == 0 { + return + } + if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { + overflow = true + } + return +} + +func (_ checkOverflow) SignedInt(v uint64) (i int64, overflow bool) { + //e.g. -127 to 128 for int8 + pos := (v >> 63) == 0 + ui2 := v & 0x7fffffffffffffff + if pos { + if ui2 > math.MaxInt64 { + overflow = true + return + } + } else { + if ui2 > math.MaxInt64-1 { + overflow = true + return + } + } + i = int64(v) + return +} + +// ------------------ SORT ----------------- + +func isNaN(f float64) bool { return f != f } + +// ----------------------- + +type intSlice []int64 +type uintSlice []uint64 +type uintptrSlice []uintptr +type floatSlice []float64 +type boolSlice []bool +type stringSlice []string +type bytesSlice [][]byte + +func (p intSlice) Len() int { return len(p) } +func (p intSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p intSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p uintSlice) Len() int { return len(p) } +func (p uintSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p uintSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p uintptrSlice) Len() int { return len(p) } +func (p uintptrSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p uintptrSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p floatSlice) Len() int { return len(p) } +func (p floatSlice) Less(i, j int) bool { + return p[i] < p[j] || isNaN(p[i]) && !isNaN(p[j]) +} +func (p floatSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p stringSlice) Len() int { return len(p) } +func (p stringSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p stringSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p bytesSlice) Len() int { return len(p) } +func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[i], p[j]) == -1 } +func (p bytesSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p boolSlice) Len() int { return len(p) } +func (p boolSlice) Less(i, j int) bool { return !p[i] && p[j] } +func (p boolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// --------------------- + +type intRv struct { + v int64 + r reflect.Value +} +type intRvSlice []intRv +type uintRv struct { + v uint64 + r reflect.Value +} +type uintRvSlice []uintRv +type floatRv struct { + v float64 + r reflect.Value +} +type floatRvSlice []floatRv +type boolRv struct { + v bool + r reflect.Value +} +type boolRvSlice []boolRv +type stringRv struct { + v string + r reflect.Value +} +type stringRvSlice []stringRv +type bytesRv struct { + v []byte + r reflect.Value +} +type bytesRvSlice []bytesRv + +func (p intRvSlice) Len() int { return len(p) } +func (p intRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } +func (p intRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p uintRvSlice) Len() int { return len(p) } +func (p uintRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } +func (p uintRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p floatRvSlice) Len() int { return len(p) } +func (p floatRvSlice) Less(i, j int) bool { + return p[i].v < p[j].v || isNaN(p[i].v) && !isNaN(p[j].v) +} +func (p floatRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p stringRvSlice) Len() int { return len(p) } +func (p stringRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } +func (p stringRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p bytesRvSlice) Len() int { return len(p) } +func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 } +func (p bytesRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p boolRvSlice) Len() int { return len(p) } +func (p boolRvSlice) Less(i, j int) bool { return !p[i].v && p[j].v } +func (p boolRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// ----------------- + +type bytesI struct { + v []byte + i interface{} +} + +type bytesISlice []bytesI + +func (p bytesISlice) Len() int { return len(p) } +func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 } +func (p bytesISlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// ----------------- + +type set []uintptr + +func (s *set) add(v uintptr) (exists bool) { + // e.ci is always nil, or len >= 1 + x := *s + if x == nil { + x = make([]uintptr, 1, 8) + x[0] = v + *s = x + return + } + // typically, length will be 1. make this perform. + if len(x) == 1 { + if j := x[0]; j == 0 { + x[0] = v + } else if j == v { + exists = true + } else { + x = append(x, v) + *s = x + } + return + } + // check if it exists + for _, j := range x { + if j == v { + exists = true + return + } + } + // try to replace a "deleted" slot + for i, j := range x { + if j == 0 { + x[i] = v + return + } + } + // if unable to replace deleted slot, just append it. + x = append(x, v) + *s = x + return +} + +func (s *set) remove(v uintptr) (exists bool) { + x := *s + if len(x) == 0 { + return + } + if len(x) == 1 { + if x[0] == v { + x[0] = 0 + } + return + } + for i, j := range x { + if j == v { + exists = true + x[i] = 0 // set it to 0, as way to delete it. + // copy(x[i:], x[i+1:]) + // x = x[:len(x)-1] + return + } + } + return +} + +// ------ + +// bitset types are better than [256]bool, because they permit the whole +// bitset array being on a single cache line and use less memory. + +// given x > 0 and n > 0 and x is exactly 2^n, then pos/x === pos>>n AND pos%x === pos&(x-1). +// consequently, pos/32 === pos>>5, pos/16 === pos>>4, pos/8 === pos>>3, pos%8 == pos&7 + +type bitset256 [32]byte + +func (x *bitset256) set(pos byte) { + x[pos>>3] |= (1 << (pos & 7)) +} +func (x *bitset256) unset(pos byte) { + x[pos>>3] &^= (1 << (pos & 7)) +} +func (x *bitset256) isset(pos byte) bool { + return x[pos>>3]&(1<<(pos&7)) != 0 +} + +type bitset128 [16]byte + +func (x *bitset128) set(pos byte) { + x[pos>>3] |= (1 << (pos & 7)) +} +func (x *bitset128) unset(pos byte) { + x[pos>>3] &^= (1 << (pos & 7)) +} +func (x *bitset128) isset(pos byte) bool { + return x[pos>>3]&(1<<(pos&7)) != 0 +} + +type bitset32 [4]byte + +func (x *bitset32) set(pos byte) { + x[pos>>3] |= (1 << (pos & 7)) +} +func (x *bitset32) unset(pos byte) { + x[pos>>3] &^= (1 << (pos & 7)) +} +func (x *bitset32) isset(pos byte) bool { + return x[pos>>3]&(1<<(pos&7)) != 0 +} + +// ------------ + +type pooler struct { + // for stringRV + strRv8, strRv16, strRv32, strRv64, strRv128 sync.Pool + // for the decNaked + dn sync.Pool + tiload sync.Pool +} + +func (p *pooler) init() { + p.strRv8.New = func() interface{} { return new([8]stringRv) } + p.strRv16.New = func() interface{} { return new([16]stringRv) } + p.strRv32.New = func() interface{} { return new([32]stringRv) } + p.strRv64.New = func() interface{} { return new([64]stringRv) } + p.strRv128.New = func() interface{} { return new([128]stringRv) } + p.dn.New = func() interface{} { x := new(decNaked); x.init(); return x } + p.tiload.New = func() interface{} { return new(typeInfoLoadArray) } +} + +func (p *pooler) stringRv8() (sp *sync.Pool, v interface{}) { + return &p.strRv8, p.strRv8.Get() +} +func (p *pooler) stringRv16() (sp *sync.Pool, v interface{}) { + return &p.strRv16, p.strRv16.Get() +} +func (p *pooler) stringRv32() (sp *sync.Pool, v interface{}) { + return &p.strRv32, p.strRv32.Get() +} +func (p *pooler) stringRv64() (sp *sync.Pool, v interface{}) { + return &p.strRv64, p.strRv64.Get() +} +func (p *pooler) stringRv128() (sp *sync.Pool, v interface{}) { + return &p.strRv128, p.strRv128.Get() +} +func (p *pooler) decNaked() (sp *sync.Pool, v interface{}) { + return &p.dn, p.dn.Get() +} +func (p *pooler) tiLoad() (sp *sync.Pool, v interface{}) { + return &p.tiload, p.tiload.Get() +} diff --git a/vendor/github.com/ugorji/go/codec/helper_internal.go b/vendor/github.com/ugorji/go/codec/helper_internal.go new file mode 100644 index 00000000000..eb18e2ccaee --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/helper_internal.go @@ -0,0 +1,221 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// All non-std package dependencies live in this file, +// so porting to different environment is easy (just update functions). + +import ( + "errors" + "fmt" + "math" + "reflect" +) + +func panicValToErr(panicVal interface{}, err *error) { + if panicVal == nil { + return + } + // case nil + switch xerr := panicVal.(type) { + case error: + *err = xerr + case string: + *err = errors.New(xerr) + default: + *err = fmt.Errorf("%v", panicVal) + } + return +} + +func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool { + switch v.Kind() { + case reflect.Invalid: + return true + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if deref { + if v.IsNil() { + return true + } + return hIsEmptyValue(v.Elem(), deref, checkStruct) + } else { + return v.IsNil() + } + case reflect.Struct: + if !checkStruct { + return false + } + // return true if all fields are empty. else return false. + // we cannot use equality check, because some fields may be maps/slices/etc + // and consequently the structs are not comparable. + // return v.Interface() == reflect.Zero(v.Type()).Interface() + for i, n := 0, v.NumField(); i < n; i++ { + if !hIsEmptyValue(v.Field(i), deref, checkStruct) { + return false + } + } + return true + } + return false +} + +func isEmptyValue(v reflect.Value, deref, checkStruct bool) bool { + return hIsEmptyValue(v, deref, checkStruct) +} + +func pruneSignExt(v []byte, pos bool) (n int) { + if len(v) < 2 { + } else if pos && v[0] == 0 { + for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { + } + } else if !pos && v[0] == 0xff { + for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { + } + } + return +} + +func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) { + if typ == nil { + return + } + rt := typ + // The type might be a pointer and we need to keep + // dereferencing to the base type until we find an implementation. + for { + if rt.Implements(iTyp) { + return true, indir + } + if p := rt; p.Kind() == reflect.Ptr { + indir++ + if indir >= math.MaxInt8 { // insane number of indirections + return false, 0 + } + rt = p.Elem() + continue + } + break + } + // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy. + if typ.Kind() != reflect.Ptr { + // Not a pointer, but does the pointer work? + if reflect.PtrTo(typ).Implements(iTyp) { + return true, -1 + } + } + return false, 0 +} + +// validate that this function is correct ... +// culled from OGRE (Object-Oriented Graphics Rendering Engine) +// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html) +func halfFloatToFloatBits(yy uint16) (d uint32) { + y := uint32(yy) + s := (y >> 15) & 0x01 + e := (y >> 10) & 0x1f + m := y & 0x03ff + + if e == 0 { + if m == 0 { // plu or minus 0 + return s << 31 + } else { // Denormalized number -- renormalize it + for (m & 0x00000400) == 0 { + m <<= 1 + e -= 1 + } + e += 1 + const zz uint32 = 0x0400 + m &= ^zz + } + } else if e == 31 { + if m == 0 { // Inf + return (s << 31) | 0x7f800000 + } else { // NaN + return (s << 31) | 0x7f800000 | (m << 13) + } + } + e = e + (127 - 15) + m = m << 13 + return (s << 31) | (e << 23) | m +} + +// GrowCap will return a new capacity for a slice, given the following: +// - oldCap: current capacity +// - unit: in-memory size of an element +// - num: number of elements to add +func growCap(oldCap, unit, num int) (newCap int) { + // appendslice logic (if cap < 1024, *2, else *1.25): + // leads to many copy calls, especially when copying bytes. + // bytes.Buffer model (2*cap + n): much better for bytes. + // smarter way is to take the byte-size of the appended element(type) into account + + // maintain 3 thresholds: + // t1: if cap <= t1, newcap = 2x + // t2: if cap <= t2, newcap = 1.75x + // t3: if cap <= t3, newcap = 1.5x + // else newcap = 1.25x + // + // t1, t2, t3 >= 1024 always. + // i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same) + // + // With this, appending for bytes increase by: + // 100% up to 4K + // 75% up to 8K + // 50% up to 16K + // 25% beyond that + + // unit can be 0 e.g. for struct{}{}; handle that appropriately + var t1, t2, t3 int // thresholds + if unit <= 1 { + t1, t2, t3 = 4*1024, 8*1024, 16*1024 + } else if unit < 16 { + t3 = 16 / unit * 1024 + t1 = t3 * 1 / 4 + t2 = t3 * 2 / 4 + } else { + t1, t2, t3 = 1024, 1024, 1024 + } + + var x int // temporary variable + + // x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively + if oldCap <= t1 { // [0,t1] + x = 8 + } else if oldCap > t3 { // (t3,infinity] + x = 5 + } else if oldCap <= t2 { // (t1,t2] + x = 7 + } else { // (t2,t3] + x = 6 + } + newCap = x * oldCap / 4 + + if num > 0 { + newCap += num + } + + // ensure newCap is a multiple of 64 (if it is > 64) or 16. + if newCap > 64 { + if x = newCap % 64; x != 0 { + x = newCap / 64 + newCap = 64 * (x + 1) + } + } else { + if x = newCap % 16; x != 0 { + x = newCap / 16 + newCap = 16 * (x + 1) + } + } + return +} diff --git a/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go new file mode 100644 index 00000000000..ef5b73f49d2 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go @@ -0,0 +1,160 @@ +// +build !go1.7 safe appengine + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "reflect" + "sync/atomic" +) + +const safeMode = true + +// stringView returns a view of the []byte as a string. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +// +// Usage: Always maintain a reference to v while result of this call is in use, +// and call keepAlive4BytesView(v) at point where done with view. +func stringView(v []byte) string { + return string(v) +} + +// bytesView returns a view of the string as a []byte. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +// +// Usage: Always maintain a reference to v while result of this call is in use, +// and call keepAlive4BytesView(v) at point where done with view. +func bytesView(v string) []byte { + return []byte(v) +} + +func definitelyNil(v interface{}) bool { + // this is a best-effort option. + // We just return false, so we don't unneessarily incur the cost of reflection this early. + return false + // rv := reflect.ValueOf(v) + // switch rv.Kind() { + // case reflect.Invalid: + // return true + // case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Slice, reflect.Map, reflect.Func: + // return rv.IsNil() + // default: + // return false + // } +} + +// // keepAlive4BytesView maintains a reference to the input parameter for bytesView. +// // +// // Usage: call this at point where done with the bytes view. +// func keepAlive4BytesView(v string) {} + +// // keepAlive4BytesView maintains a reference to the input parameter for stringView. +// // +// // Usage: call this at point where done with the string view. +// func keepAlive4StringView(v []byte) {} + +func rv2i(rv reflect.Value) interface{} { + return rv.Interface() +} + +func rt2id(rt reflect.Type) uintptr { + return reflect.ValueOf(rt).Pointer() +} + +func rv2rtid(rv reflect.Value) uintptr { + return reflect.ValueOf(rv.Type()).Pointer() +} + +// -------------------------- +// type ptrToRvMap struct{} + +// func (_ *ptrToRvMap) init() {} +// func (_ *ptrToRvMap) get(i interface{}) reflect.Value { +// return reflect.ValueOf(i).Elem() +// } + +// -------------------------- +type atomicTypeInfoSlice struct { + v atomic.Value +} + +func (x *atomicTypeInfoSlice) load() *[]rtid2ti { + i := x.v.Load() + if i == nil { + return nil + } + return i.(*[]rtid2ti) +} + +func (x *atomicTypeInfoSlice) store(p *[]rtid2ti) { + x.v.Store(p) +} + +// -------------------------- +func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) { + rv.SetBytes(d.rawBytes()) +} + +func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) { + rv.SetString(d.d.DecodeString()) +} + +func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) { + rv.SetBool(d.d.DecodeBool()) +} + +func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + rv.SetFloat(d.d.DecodeFloat(true)) +} + +func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + rv.SetFloat(d.d.DecodeFloat(false)) +} + +func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(d.d.DecodeInt(intBitsize)) +} + +func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(d.d.DecodeInt(8)) +} + +func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(d.d.DecodeInt(16)) +} + +func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(d.d.DecodeInt(32)) +} + +func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(d.d.DecodeInt(64)) +} + +func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(d.d.DecodeUint(uintBitsize)) +} + +func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(d.d.DecodeUint(uintBitsize)) +} + +func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(d.d.DecodeUint(8)) +} + +func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(d.d.DecodeUint(16)) +} + +func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(d.d.DecodeUint(32)) +} + +func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(d.d.DecodeUint(64)) +} diff --git a/vendor/github.com/ugorji/go/codec/helper_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_unsafe.go new file mode 100644 index 00000000000..e2c4afeceea --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/helper_unsafe.go @@ -0,0 +1,431 @@ +// +build !safe +// +build !appengine +// +build go1.7 + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +// This file has unsafe variants of some helper methods. +// NOTE: See helper_not_unsafe.go for the usage information. + +// var zeroRTv [4]uintptr + +const safeMode = false +const unsafeFlagIndir = 1 << 7 // keep in sync with GO_ROOT/src/reflect/value.go + +type unsafeString struct { + Data uintptr + Len int +} + +type unsafeSlice struct { + Data uintptr + Len int + Cap int +} + +type unsafeIntf struct { + typ unsafe.Pointer + word unsafe.Pointer +} + +type unsafeReflectValue struct { + typ unsafe.Pointer + ptr unsafe.Pointer + flag uintptr +} + +func stringView(v []byte) string { + if len(v) == 0 { + return "" + } + + bx := (*unsafeSlice)(unsafe.Pointer(&v)) + sx := unsafeString{bx.Data, bx.Len} + return *(*string)(unsafe.Pointer(&sx)) +} + +func bytesView(v string) []byte { + if len(v) == 0 { + return zeroByteSlice + } + + sx := (*unsafeString)(unsafe.Pointer(&v)) + bx := unsafeSlice{sx.Data, sx.Len, sx.Len} + return *(*[]byte)(unsafe.Pointer(&bx)) +} + +func definitelyNil(v interface{}) bool { + // There is no global way of checking if an interface is nil. + // For true references (map, ptr, func, chan), you can just look + // at the word of the interface. However, for slices, you have to dereference + // the word, and get a pointer to the 3-word interface value. + + // var ui *unsafeIntf = (*unsafeIntf)(unsafe.Pointer(&v)) + // var word unsafe.Pointer = ui.word + // // fmt.Printf(">>>> definitely nil: isnil: %v, TYPE: \t%T, word: %v, *word: %v, type: %v, nil: %v\n", v == nil, v, word, *((*unsafe.Pointer)(word)), ui.typ, nil) + // return word == nil // || *((*unsafe.Pointer)(word)) == nil + return ((*unsafeIntf)(unsafe.Pointer(&v))).word == nil +} + +// func keepAlive4BytesView(v string) { +// runtime.KeepAlive(v) +// } + +// func keepAlive4StringView(v []byte) { +// runtime.KeepAlive(v) +// } + +// TODO: consider a more generally-known optimization for reflect.Value ==> Interface +// +// Currently, we use this fragile method that taps into implememtation details from +// the source go stdlib reflect/value.go, +// and trims the implementation. +func rv2i(rv reflect.Value) interface{} { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + // true references (map, func, chan, ptr - NOT slice) may be double-referenced as flagIndir + var ptr unsafe.Pointer + // kk := reflect.Kind(urv.flag & (1<<5 - 1)) + // if (kk == reflect.Map || kk == reflect.Ptr || kk == reflect.Chan || kk == reflect.Func) && urv.flag&unsafeFlagIndir != 0 { + if refBitset.isset(byte(urv.flag&(1<<5-1))) && urv.flag&unsafeFlagIndir != 0 { + ptr = *(*unsafe.Pointer)(urv.ptr) + } else { + ptr = urv.ptr + } + return *(*interface{})(unsafe.Pointer(&unsafeIntf{typ: urv.typ, word: ptr})) + // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) + // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +} + +func rt2id(rt reflect.Type) uintptr { + return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word) +} + +func rv2rtid(rv reflect.Value) uintptr { + return uintptr((*unsafeReflectValue)(unsafe.Pointer(&rv)).typ) +} + +// func rv0t(rt reflect.Type) reflect.Value { +// ut := (*unsafeIntf)(unsafe.Pointer(&rt)) +// // we need to determine whether ifaceIndir, and then whether to just pass 0 as the ptr +// uv := unsafeReflectValue{ut.word, &zeroRTv, flag(rt.Kind())} +// return *(*reflect.Value)(unsafe.Pointer(&uv}) +// } + +// -------------------------- +type atomicTypeInfoSlice struct { + v unsafe.Pointer +} + +func (x *atomicTypeInfoSlice) load() *[]rtid2ti { + return (*[]rtid2ti)(atomic.LoadPointer(&x.v)) +} + +func (x *atomicTypeInfoSlice) store(p *[]rtid2ti) { + atomic.StorePointer(&x.v, unsafe.Pointer(p)) +} + +// -------------------------- +func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + // if urv.flag&unsafeFlagIndir != 0 { + // urv.ptr = *(*unsafe.Pointer)(urv.ptr) + // } + *(*[]byte)(urv.ptr) = d.rawBytes() +} + +func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*string)(urv.ptr) = d.d.DecodeString() +} + +func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*bool)(urv.ptr) = d.d.DecodeBool() +} + +func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*float32)(urv.ptr) = float32(d.d.DecodeFloat(true)) +} + +func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*float64)(urv.ptr) = d.d.DecodeFloat(false) +} + +func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int)(urv.ptr) = int(d.d.DecodeInt(intBitsize)) +} + +func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int8)(urv.ptr) = int8(d.d.DecodeInt(8)) +} + +func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int16)(urv.ptr) = int16(d.d.DecodeInt(16)) +} + +func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int32)(urv.ptr) = int32(d.d.DecodeInt(32)) +} + +func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int64)(urv.ptr) = d.d.DecodeInt(64) +} + +func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint)(urv.ptr) = uint(d.d.DecodeUint(uintBitsize)) +} + +func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uintptr)(urv.ptr) = uintptr(d.d.DecodeUint(uintBitsize)) +} + +func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint8)(urv.ptr) = uint8(d.d.DecodeUint(8)) +} + +func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint16)(urv.ptr) = uint16(d.d.DecodeUint(16)) +} + +func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint32)(urv.ptr) = uint32(d.d.DecodeUint(32)) +} + +func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint64)(urv.ptr) = d.d.DecodeUint(64) +} + +// ------------ + +// func rt2id(rt reflect.Type) uintptr { +// return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word) +// // var i interface{} = rt +// // // ui := (*unsafeIntf)(unsafe.Pointer(&i)) +// // return ((*unsafeIntf)(unsafe.Pointer(&i))).word +// } + +// func rv2i(rv reflect.Value) interface{} { +// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) +// // non-reference type: already indir +// // reference type: depend on flagIndir property ('cos maybe was double-referenced) +// // const (unsafeRvFlagKindMask = 1<<5 - 1 , unsafeRvFlagIndir = 1 << 7 ) +// // rvk := reflect.Kind(urv.flag & (1<<5 - 1)) +// // if (rvk == reflect.Chan || +// // rvk == reflect.Func || +// // rvk == reflect.Interface || +// // rvk == reflect.Map || +// // rvk == reflect.Ptr || +// // rvk == reflect.UnsafePointer) && urv.flag&(1<<8) != 0 { +// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type()) +// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// // } +// if urv.flag&(1<<5-1) == uintptr(reflect.Map) && urv.flag&(1<<7) != 0 { +// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type()) +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// } +// // fmt.Printf(">>>>> ++++ direct reference: %v, %v\n", rvk, rv.Type()) +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// } + +// const ( +// unsafeRvFlagKindMask = 1<<5 - 1 +// unsafeRvKindDirectIface = 1 << 5 +// unsafeRvFlagIndir = 1 << 7 +// unsafeRvFlagAddr = 1 << 8 +// unsafeRvFlagMethod = 1 << 9 + +// _USE_RV_INTERFACE bool = false +// _UNSAFE_RV_DEBUG = true +// ) + +// type unsafeRtype struct { +// _ [2]uintptr +// _ uint32 +// _ uint8 +// _ uint8 +// _ uint8 +// kind uint8 +// _ [2]uintptr +// _ int32 +// } + +// func _rv2i(rv reflect.Value) interface{} { +// // Note: From use, +// // - it's never an interface +// // - the only calls here are for ifaceIndir types. +// // (though that conditional is wrong) +// // To know for sure, we need the value of t.kind (which is not exposed). +// // +// // Need to validate the path: type is indirect ==> only value is indirect ==> default (value is direct) +// // - Type indirect, Value indirect: ==> numbers, boolean, slice, struct, array, string +// // - Type Direct, Value indirect: ==> map??? +// // - Type Direct, Value direct: ==> pointers, unsafe.Pointer, func, chan, map +// // +// // TRANSLATES TO: +// // if typeIndirect { } else if valueIndirect { } else { } +// // +// // Since we don't deal with funcs, then "flagNethod" is unset, and can be ignored. + +// if _USE_RV_INTERFACE { +// return rv.Interface() +// } +// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + +// // if urv.flag&unsafeRvFlagMethod != 0 || urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) { +// // println("***** IS flag method or interface: delegating to rv.Interface()") +// // return rv.Interface() +// // } + +// // if urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) { +// // println("***** IS Interface: delegate to rv.Interface") +// // return rv.Interface() +// // } +// // if urv.flag&unsafeRvFlagKindMask&unsafeRvKindDirectIface == 0 { +// // if urv.flag&unsafeRvFlagAddr == 0 { +// // println("***** IS ifaceIndir typ") +// // // ui := unsafeIntf{word: urv.ptr, typ: urv.typ} +// // // return *(*interface{})(unsafe.Pointer(&ui)) +// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// // } +// // } else if urv.flag&unsafeRvFlagIndir != 0 { +// // println("***** IS flagindir") +// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// // } else { +// // println("***** NOT flagindir") +// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// // } +// // println("***** default: delegate to rv.Interface") + +// urt := (*unsafeRtype)(unsafe.Pointer(urv.typ)) +// if _UNSAFE_RV_DEBUG { +// fmt.Printf(">>>> start: %v: ", rv.Type()) +// fmt.Printf("%v - %v\n", *urv, *urt) +// } +// if urt.kind&unsafeRvKindDirectIface == 0 { +// if _UNSAFE_RV_DEBUG { +// fmt.Printf("**** +ifaceIndir type: %v\n", rv.Type()) +// } +// // println("***** IS ifaceIndir typ") +// // if true || urv.flag&unsafeRvFlagAddr == 0 { +// // // println(" ***** IS NOT addr") +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// // } +// } else if urv.flag&unsafeRvFlagIndir != 0 { +// if _UNSAFE_RV_DEBUG { +// fmt.Printf("**** +flagIndir type: %v\n", rv.Type()) +// } +// // println("***** IS flagindir") +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// } else { +// if _UNSAFE_RV_DEBUG { +// fmt.Printf("**** -flagIndir type: %v\n", rv.Type()) +// } +// // println("***** NOT flagindir") +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// } +// // println("***** default: delegating to rv.Interface()") +// // return rv.Interface() +// } + +// var staticM0 = make(map[string]uint64) +// var staticI0 = (int32)(-5) + +// func staticRv2iTest() { +// i0 := (int32)(-5) +// m0 := make(map[string]uint16) +// m0["1"] = 1 +// for _, i := range []interface{}{ +// (int)(7), +// (uint)(8), +// (int16)(-9), +// (uint16)(19), +// (uintptr)(77), +// (bool)(true), +// float32(-32.7), +// float64(64.9), +// complex(float32(19), 5), +// complex(float64(-32), 7), +// [4]uint64{1, 2, 3, 4}, +// (chan<- int)(nil), // chan, +// rv2i, // func +// io.Writer(ioutil.Discard), +// make(map[string]uint), +// (map[string]uint)(nil), +// staticM0, +// m0, +// &m0, +// i0, +// &i0, +// &staticI0, +// &staticM0, +// []uint32{6, 7, 8}, +// "abc", +// Raw{}, +// RawExt{}, +// &Raw{}, +// &RawExt{}, +// unsafe.Pointer(&i0), +// } { +// i2 := rv2i(reflect.ValueOf(i)) +// eq := reflect.DeepEqual(i, i2) +// fmt.Printf(">>>> %v == %v? %v\n", i, i2, eq) +// } +// // os.Exit(0) +// } + +// func init() { +// staticRv2iTest() +// } + +// func rv2i(rv reflect.Value) interface{} { +// if _USE_RV_INTERFACE || rv.Kind() == reflect.Interface || rv.CanAddr() { +// return rv.Interface() +// } +// // var i interface{} +// // ui := (*unsafeIntf)(unsafe.Pointer(&i)) +// var ui unsafeIntf +// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) +// // fmt.Printf("urv: flag: %b, typ: %b, ptr: %b\n", urv.flag, uintptr(urv.typ), uintptr(urv.ptr)) +// if (urv.flag&unsafeRvFlagKindMask)&unsafeRvKindDirectIface == 0 { +// if urv.flag&unsafeRvFlagAddr != 0 { +// println("***** indirect and addressable! Needs typed move - delegate to rv.Interface()") +// return rv.Interface() +// } +// println("****** indirect type/kind") +// ui.word = urv.ptr +// } else if urv.flag&unsafeRvFlagIndir != 0 { +// println("****** unsafe rv flag indir") +// ui.word = *(*unsafe.Pointer)(urv.ptr) +// } else { +// println("****** default: assign prt to word directly") +// ui.word = urv.ptr +// } +// // ui.word = urv.ptr +// ui.typ = urv.typ +// // fmt.Printf("(pointers) ui.typ: %p, word: %p\n", ui.typ, ui.word) +// // fmt.Printf("(binary) ui.typ: %b, word: %b\n", uintptr(ui.typ), uintptr(ui.word)) +// return *(*interface{})(unsafe.Pointer(&ui)) +// // return i +// } diff --git a/vendor/github.com/ugorji/go/codec/json.go b/vendor/github.com/ugorji/go/codec/json.go new file mode 100644 index 00000000000..a2276070d51 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/json.go @@ -0,0 +1,1167 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// By default, this json support uses base64 encoding for bytes, because you cannot +// store and read any arbitrary string in json (only unicode). +// However, the user can configre how to encode/decode bytes. +// +// This library specifically supports UTF-8 for encoding and decoding only. +// +// Note that the library will happily encode/decode things which are not valid +// json e.g. a map[int64]string. We do it for consistency. With valid json, +// we will encode and decode appropriately. +// Users can specify their map type if necessary to force it. +// +// Note: +// - we cannot use strconv.Quote and strconv.Unquote because json quotes/unquotes differently. +// We implement it here. +// - Also, strconv.ParseXXX for floats and integers +// - only works on strings resulting in unnecessary allocation and []byte-string conversion. +// - it does a lot of redundant checks, because json numbers are simpler that what it supports. +// - We parse numbers (floats and integers) directly here. +// We only delegate parsing floats if it is a hairy float which could cause a loss of precision. +// In that case, we delegate to strconv.ParseFloat. +// +// Note: +// - encode does not beautify. There is no whitespace when encoding. +// - rpc calls which take single integer arguments or write single numeric arguments will need care. + +// Top-level methods of json(End|Dec)Driver (which are implementations of (en|de)cDriver +// MUST not call one-another. + +import ( + "bytes" + "encoding/base64" + "reflect" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +//-------------------------------- + +var jsonLiterals = [...]byte{ + '"', + 't', 'r', 'u', 'e', + '"', + '"', + 'f', 'a', 'l', 's', 'e', + '"', + '"', + 'n', 'u', 'l', 'l', + '"', +} + +const ( + jsonLitTrueQ = 0 + jsonLitTrue = 1 + jsonLitFalseQ = 6 + jsonLitFalse = 7 + jsonLitNullQ = 13 + jsonLitNull = 14 +) + +var ( + // jsonFloat64Pow10 = [...]float64{ + // 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + // 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, + // 1e20, 1e21, 1e22, + // } + + // jsonUint64Pow10 = [...]uint64{ + // 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + // 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, + // } + + // jsonTabs and jsonSpaces are used as caches for indents + jsonTabs, jsonSpaces string + + jsonCharHtmlSafeSet bitset128 + jsonCharSafeSet bitset128 + jsonCharWhitespaceSet bitset256 + jsonNumSet bitset256 + // jsonIsFloatSet bitset256 + + jsonU4Set [256]byte +) + +const ( + // If !jsonValidateSymbols, decoding will be faster, by skipping some checks: + // - If we see first character of null, false or true, + // do not validate subsequent characters. + // - e.g. if we see a n, assume null and skip next 3 characters, + // and do not validate they are ull. + // P.S. Do not expect a significant decoding boost from this. + jsonValidateSymbols = true + + jsonSpacesOrTabsLen = 128 + + jsonU4SetErrVal = 128 + + jsonAlwaysReturnInternString = false +) + +func init() { + var bs [jsonSpacesOrTabsLen]byte + for i := 0; i < jsonSpacesOrTabsLen; i++ { + bs[i] = ' ' + } + jsonSpaces = string(bs[:]) + + for i := 0; i < jsonSpacesOrTabsLen; i++ { + bs[i] = '\t' + } + jsonTabs = string(bs[:]) + + // populate the safe values as true: note: ASCII control characters are (0-31) + // jsonCharSafeSet: all true except (0-31) " \ + // jsonCharHtmlSafeSet: all true except (0-31) " \ < > & + var i byte + for i = 32; i < utf8.RuneSelf; i++ { + switch i { + case '"', '\\': + case '<', '>', '&': + jsonCharSafeSet.set(i) // = true + default: + jsonCharSafeSet.set(i) + jsonCharHtmlSafeSet.set(i) + } + } + for i = 0; i <= utf8.RuneSelf; i++ { + switch i { + case ' ', '\t', '\r', '\n': + jsonCharWhitespaceSet.set(i) + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-': + jsonNumSet.set(i) + } + } + for j := range jsonU4Set { + switch i = byte(j); i { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + jsonU4Set[i] = i - '0' + case 'a', 'b', 'c', 'd', 'e', 'f': + jsonU4Set[i] = i - 'a' + 10 + case 'A', 'B', 'C', 'D', 'E', 'F': + jsonU4Set[i] = i - 'A' + 10 + default: + jsonU4Set[i] = jsonU4SetErrVal + } + // switch i = byte(j); i { + // case 'e', 'E', '.': + // jsonIsFloatSet.set(i) + // } + } + // jsonU4Set[255] = jsonU4SetErrVal +} + +type jsonEncDriver struct { + e *Encoder + w encWriter + h *JsonHandle + b [64]byte // scratch + bs []byte // scratch + se setExtWrapper + ds string // indent string + dl uint16 // indent level + dt bool // indent using tabs + d bool // indent + c containerState + noBuiltInTypes +} + +// indent is done as below: +// - newline and indent are added before each mapKey or arrayElem +// - newline and indent are added before each ending, +// except there was no entry (so we can have {} or []) + +func (e *jsonEncDriver) WriteArrayStart(length int) { + if e.d { + e.dl++ + } + e.w.writen1('[') + e.c = containerArrayStart +} + +func (e *jsonEncDriver) WriteArrayElem() { + if e.c != containerArrayStart { + e.w.writen1(',') + } + if e.d { + e.writeIndent() + } + e.c = containerArrayElem +} + +func (e *jsonEncDriver) WriteArrayEnd() { + if e.d { + e.dl-- + if e.c != containerArrayStart { + e.writeIndent() + } + } + e.w.writen1(']') + e.c = containerArrayEnd +} + +func (e *jsonEncDriver) WriteMapStart(length int) { + if e.d { + e.dl++ + } + e.w.writen1('{') + e.c = containerMapStart +} + +func (e *jsonEncDriver) WriteMapElemKey() { + if e.c != containerMapStart { + e.w.writen1(',') + } + if e.d { + e.writeIndent() + } + e.c = containerMapKey +} + +func (e *jsonEncDriver) WriteMapElemValue() { + if e.d { + e.w.writen2(':', ' ') + } else { + e.w.writen1(':') + } + e.c = containerMapValue +} + +func (e *jsonEncDriver) WriteMapEnd() { + if e.d { + e.dl-- + if e.c != containerMapStart { + e.writeIndent() + } + } + e.w.writen1('}') + e.c = containerMapEnd +} + +func (e *jsonEncDriver) writeIndent() { + e.w.writen1('\n') + if x := len(e.ds) * int(e.dl); x <= jsonSpacesOrTabsLen { + if e.dt { + e.w.writestr(jsonTabs[:x]) + } else { + e.w.writestr(jsonSpaces[:x]) + } + } else { + for i := uint16(0); i < e.dl; i++ { + e.w.writestr(e.ds) + } + } +} + +func (e *jsonEncDriver) EncodeNil() { + // We always encode nil as just null (never in quotes) + // This allows us to easily decode if a nil in the json stream + // ie if initial token is n. + e.w.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4]) + + // if e.h.MapKeyAsString && e.c == containerMapKey { + // e.w.writeb(jsonLiterals[jsonLitNullQ : jsonLitNullQ+6]) + // } else { + // e.w.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4]) + // } +} + +func (e *jsonEncDriver) EncodeBool(b bool) { + if e.h.MapKeyAsString && e.c == containerMapKey { + if b { + e.w.writeb(jsonLiterals[jsonLitTrueQ : jsonLitTrueQ+6]) + } else { + e.w.writeb(jsonLiterals[jsonLitFalseQ : jsonLitFalseQ+7]) + } + } else { + if b { + e.w.writeb(jsonLiterals[jsonLitTrue : jsonLitTrue+4]) + } else { + e.w.writeb(jsonLiterals[jsonLitFalse : jsonLitFalse+5]) + } + } +} + +func (e *jsonEncDriver) EncodeFloat32(f float32) { + e.encodeFloat(float64(f), 32) +} + +func (e *jsonEncDriver) EncodeFloat64(f float64) { + e.encodeFloat(f, 64) +} + +func (e *jsonEncDriver) encodeFloat(f float64, numbits int) { + var blen int + var x []byte + if e.h.MapKeyAsString && e.c == containerMapKey { + e.b[0] = '"' + x = strconv.AppendFloat(e.b[1:1], f, 'G', -1, numbits) + blen = 1 + len(x) + if jsonIsFloatBytesB2(x) { + e.b[blen] = '"' + blen += 1 + } else { + e.b[blen] = '.' + e.b[blen+1] = '0' + e.b[blen+2] = '"' + blen += 3 + } + } else { + x = strconv.AppendFloat(e.b[:0], f, 'G', -1, numbits) + blen = len(x) + if !jsonIsFloatBytesB2(x) { + e.b[blen] = '.' + e.b[blen+1] = '0' + blen += 2 + } + } + e.w.writeb(e.b[:blen]) +} + +func (e *jsonEncDriver) EncodeInt(v int64) { + x := e.h.IntegerAsString + if x == 'A' || x == 'L' && (v > 1<<53 || v < -(1<<53)) || (e.h.MapKeyAsString && e.c == containerMapKey) { + blen := 2 + len(strconv.AppendInt(e.b[1:1], v, 10)) + e.b[0] = '"' + e.b[blen-1] = '"' + e.w.writeb(e.b[:blen]) + return + } + e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) +} + +func (e *jsonEncDriver) EncodeUint(v uint64) { + x := e.h.IntegerAsString + if x == 'A' || x == 'L' && v > 1<<53 || (e.h.MapKeyAsString && e.c == containerMapKey) { + blen := 2 + len(strconv.AppendUint(e.b[1:1], v, 10)) + e.b[0] = '"' + e.b[blen-1] = '"' + e.w.writeb(e.b[:blen]) + return + } + e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) +} + +func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { + if v := ext.ConvertExt(rv); v == nil { + e.EncodeNil() + } else { + en.encode(v) + } +} + +func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { + // only encodes re.Value (never re.Data) + if re.Value == nil { + e.EncodeNil() + } else { + en.encode(re.Value) + } +} + +func (e *jsonEncDriver) EncodeString(c charEncoding, v string) { + e.quoteStr(v) +} + +func (e *jsonEncDriver) EncodeSymbol(v string) { + e.quoteStr(v) +} + +func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + // if encoding raw bytes and RawBytesExt is configured, use it to encode + if c == c_RAW && e.se.i != nil { + e.EncodeExt(v, 0, &e.se, e.e) + return + } + if c == c_RAW { + slen := base64.StdEncoding.EncodedLen(len(v)) + if cap(e.bs) >= slen { + e.bs = e.bs[:slen] + } else { + e.bs = make([]byte, slen) + } + base64.StdEncoding.Encode(e.bs, v) + e.w.writen1('"') + e.w.writeb(e.bs) + e.w.writen1('"') + } else { + e.quoteStr(stringView(v)) + } +} + +func (e *jsonEncDriver) EncodeAsis(v []byte) { + e.w.writeb(v) +} + +func (e *jsonEncDriver) quoteStr(s string) { + // adapted from std pkg encoding/json + const hex = "0123456789abcdef" + w := e.w + w.writen1('"') + var start int + for i, slen := 0, len(s); i < slen; { + // encode all bytes < 0x20 (except \r, \n). + // also encode < > & to prevent security holes when served to some browsers. + if b := s[i]; b < utf8.RuneSelf { + // if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + if jsonCharHtmlSafeSet.isset(b) || (e.h.HTMLCharsAsIs && jsonCharSafeSet.isset(b)) { + i++ + continue + } + if start < i { + w.writestr(s[start:i]) + } + switch b { + case '\\', '"': + w.writen2('\\', b) + case '\n': + w.writen2('\\', 'n') + case '\r': + w.writen2('\\', 'r') + case '\b': + w.writen2('\\', 'b') + case '\f': + w.writen2('\\', 'f') + case '\t': + w.writen2('\\', 't') + default: + w.writestr(`\u00`) + w.writen2(hex[b>>4], hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + w.writestr(s[start:i]) + } + w.writestr(`\ufffd`) + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR. + // Both technically valid JSON, but bomb on JSONP, so fix here unconditionally. + if c == '\u2028' || c == '\u2029' { + if start < i { + w.writestr(s[start:i]) + } + w.writestr(`\u202`) + w.writen1(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + w.writestr(s[start:]) + } + w.writen1('"') +} + +func (e *jsonEncDriver) atEndOfEncode() { + if e.h.TermWhitespace { + if e.d { + e.w.writen1('\n') + } else { + e.w.writen1(' ') + } + } +} + +type jsonDecDriver struct { + noBuiltInTypes + d *Decoder + h *JsonHandle + r decReader + + c containerState + // tok is used to store the token read right after skipWhiteSpace. + tok uint8 + + fnull bool // found null from appendStringAsBytes + + bstr [8]byte // scratch used for string \UXXX parsing + b [64]byte // scratch, used for parsing strings or numbers + b2 [64]byte // scratch, used only for decodeBytes (after base64) + bs []byte // scratch. Initialized from b. Used for parsing strings or numbers. + + se setExtWrapper + + // n jsonNum +} + +func jsonIsWS(b byte) bool { + // return b == ' ' || b == '\t' || b == '\r' || b == '\n' + return jsonCharWhitespaceSet.isset(b) +} + +func (d *jsonDecDriver) uncacheRead() { + if d.tok != 0 { + d.r.unreadn1() + d.tok = 0 + } +} + +func (d *jsonDecDriver) ReadMapStart() int { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok != '{' { + d.d.errorf("json: expect char '%c' but got char '%c'", '{', d.tok) + } + d.tok = 0 + d.c = containerMapStart + return -1 +} + +func (d *jsonDecDriver) ReadArrayStart() int { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok != '[' { + d.d.errorf("json: expect char '%c' but got char '%c'", '[', d.tok) + } + d.tok = 0 + d.c = containerArrayStart + return -1 +} + +func (d *jsonDecDriver) CheckBreak() bool { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + return d.tok == '}' || d.tok == ']' +} + +func (d *jsonDecDriver) ReadArrayElem() { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.c != containerArrayStart { + const xc uint8 = ',' + if d.tok != xc { + d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + } + d.c = containerArrayElem +} + +func (d *jsonDecDriver) ReadArrayEnd() { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + const xc uint8 = ']' + if d.tok != xc { + d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerArrayEnd +} + +func (d *jsonDecDriver) ReadMapElemKey() { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.c != containerMapStart { + const xc uint8 = ',' + if d.tok != xc { + d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + } + d.c = containerMapKey +} + +func (d *jsonDecDriver) ReadMapElemValue() { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + const xc uint8 = ':' + if d.tok != xc { + d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerMapValue +} + +func (d *jsonDecDriver) ReadMapEnd() { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + const xc uint8 = '}' + if d.tok != xc { + d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerMapEnd +} + +// func (d *jsonDecDriver) readContainerState(c containerState, xc uint8, check bool) { +// if d.tok == 0 { +// d.tok = d.r.skip(&jsonCharWhitespaceSet) +// } +// if check { +// if d.tok != xc { +// d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) +// } +// d.tok = 0 +// } +// d.c = c +// } + +func (d *jsonDecDriver) readLit(length, fromIdx uint8) { + bs := d.r.readx(int(length)) + d.tok = 0 + if jsonValidateSymbols && !bytes.Equal(bs, jsonLiterals[fromIdx:fromIdx+length]) { + d.d.errorf("json: expecting %s: got %s", jsonLiterals[fromIdx:fromIdx+length], bs) + return + } +} + +func (d *jsonDecDriver) TryDecodeAsNil() bool { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + // TODO: we shouldn't try to see if "null" was here, right? + // only "null" denotes a nil + if d.tok == 'n' { + d.readLit(3, jsonLitNull+1) // ull + return true + } + return false +} + +func (d *jsonDecDriver) DecodeBool() (v bool) { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + fquot := d.c == containerMapKey && d.tok == '"' + if fquot { + d.tok = d.r.readn1() + } + switch d.tok { + case 'f': + d.readLit(4, jsonLitFalse+1) // alse + // v = false + case 't': + d.readLit(3, jsonLitTrue+1) // rue + v = true + default: + d.d.errorf("json: decode bool: got first char %c", d.tok) + // v = false // "unreachable" + } + if fquot { + d.r.readn1() + } + return +} + +func (d *jsonDecDriver) ContainerType() (vt valueType) { + // check container type by checking the first char + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if b := d.tok; b == '{' { + return valueTypeMap + } else if b == '[' { + return valueTypeArray + } else if b == 'n' { + return valueTypeNil + } else if b == '"' { + return valueTypeString + } + return valueTypeUnset + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + // return false // "unreachable" +} + +func (d *jsonDecDriver) decNumBytes() (bs []byte) { + // stores num bytes in d.bs + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok == '"' { + bs = d.r.readUntil(d.b2[:0], '"') + bs = bs[:len(bs)-1] + } else { + d.r.unreadn1() + bs = d.r.readTo(d.bs[:0], &jsonNumSet) + } + d.tok = 0 + return bs +} + +func (d *jsonDecDriver) DecodeUint(bitsize uint8) (u uint64) { + bs := d.decNumBytes() + u, err := strconv.ParseUint(stringView(bs), 10, int(bitsize)) + if err != nil { + d.d.errorf("json: decode uint from %s: %v", bs, err) + return + } + return +} + +func (d *jsonDecDriver) DecodeInt(bitsize uint8) (i int64) { + bs := d.decNumBytes() + i, err := strconv.ParseInt(stringView(bs), 10, int(bitsize)) + if err != nil { + d.d.errorf("json: decode int from %s: %v", bs, err) + return + } + return +} + +func (d *jsonDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + bs := d.decNumBytes() + bitsize := 64 + if chkOverflow32 { + bitsize = 32 + } + f, err := strconv.ParseFloat(stringView(bs), bitsize) + if err != nil { + d.d.errorf("json: decode float from %s: %v", bs, err) + return + } + return +} + +func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if ext == nil { + re := rv.(*RawExt) + re.Tag = xtag + d.d.decode(&re.Value) + } else { + var v interface{} + d.d.decode(&v) + ext.UpdateExt(rv, v) + } + return +} + +func (d *jsonDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + // if decoding into raw bytes, and the RawBytesExt is configured, use it to decode. + if d.se.i != nil { + bsOut = bs + d.DecodeExt(&bsOut, 0, &d.se) + return + } + d.appendStringAsBytes() + // base64 encodes []byte{} as "", and we encode nil []byte as null. + // Consequently, base64 should decode null as a nil []byte, and "" as an empty []byte{}. + // appendStringAsBytes returns a zero-len slice for both, so as not to reset d.bs. + // However, it sets a fnull field to true, so we can check if a null was found. + if len(d.bs) == 0 { + if d.fnull { + return nil + } + return []byte{} + } + bs0 := d.bs + slen := base64.StdEncoding.DecodedLen(len(bs0)) + if slen <= cap(bs) { + bsOut = bs[:slen] + } else if zerocopy && slen <= cap(d.b2) { + bsOut = d.b2[:slen] + } else { + bsOut = make([]byte, slen) + } + slen2, err := base64.StdEncoding.Decode(bsOut, bs0) + if err != nil { + d.d.errorf("json: error decoding base64 binary '%s': %v", bs0, err) + return nil + } + if slen != slen2 { + bsOut = bsOut[:slen2] + } + return +} + +func (d *jsonDecDriver) DecodeString() (s string) { + d.appendStringAsBytes() + return d.bsToString() +} + +func (d *jsonDecDriver) DecodeStringAsBytes() (s []byte) { + d.appendStringAsBytes() + return d.bs +} + +func (d *jsonDecDriver) appendStringAsBytes() { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + + d.fnull = false + if d.tok != '"' { + // d.d.errorf("json: expect char '%c' but got char '%c'", '"', d.tok) + // handle non-string scalar: null, true, false or a number + switch d.tok { + case 'n': + d.readLit(3, jsonLitNull+1) // ull + d.bs = d.bs[:0] + d.fnull = true + case 'f': + d.readLit(4, jsonLitFalse+1) // alse + d.bs = d.bs[:5] + copy(d.bs, "false") + case 't': + d.readLit(3, jsonLitTrue+1) // rue + d.bs = d.bs[:4] + copy(d.bs, "true") + default: + // try to parse a valid number + bs := d.decNumBytes() + d.bs = d.bs[:len(bs)] + copy(d.bs, bs) + } + return + } + + d.tok = 0 + r := d.r + var cs = r.readUntil(d.b2[:0], '"') + var cslen = len(cs) + var c uint8 + v := d.bs[:0] + // append on each byte seen can be expensive, so we just + // keep track of where we last read a contiguous set of + // non-special bytes (using cursor variable), + // and when we see a special byte + // e.g. end-of-slice, " or \, + // we will append the full range into the v slice before proceeding + for i, cursor := 0, 0; ; { + if i == cslen { + v = append(v, cs[cursor:]...) + cs = r.readUntil(d.b2[:0], '"') + cslen = len(cs) + i, cursor = 0, 0 + } + c = cs[i] + if c == '"' { + v = append(v, cs[cursor:i]...) + break + } + if c != '\\' { + i++ + continue + } + v = append(v, cs[cursor:i]...) + i++ + c = cs[i] + switch c { + case '"', '\\', '/', '\'': + v = append(v, c) + case 'b': + v = append(v, '\b') + case 'f': + v = append(v, '\f') + case 'n': + v = append(v, '\n') + case 'r': + v = append(v, '\r') + case 't': + v = append(v, '\t') + case 'u': + var r rune + var rr uint32 + if len(cs) < i+4 { // may help reduce bounds-checking + d.d.errorf(`json: need at least 4 more bytes for unicode sequence`) + } + // c = cs[i+4] // may help reduce bounds-checking + for j := 1; j < 5; j++ { + c = jsonU4Set[cs[i+j]] + if c == jsonU4SetErrVal { + // d.d.errorf(`json: unquoteStr: invalid hex char in \u unicode sequence: %q`, c) + r = unicode.ReplacementChar + i += 4 + goto encode_rune + } + rr = rr*16 + uint32(c) + } + r = rune(rr) + i += 4 + if utf16.IsSurrogate(r) { + if len(cs) >= i+6 && cs[i+2] == 'u' && cs[i+1] == '\\' { + i += 2 + // c = cs[i+4] // may help reduce bounds-checking + var rr1 uint32 + for j := 1; j < 5; j++ { + c = jsonU4Set[cs[i+j]] + if c == jsonU4SetErrVal { + // d.d.errorf(`json: unquoteStr: invalid hex char in \u unicode sequence: %q`, c) + r = unicode.ReplacementChar + i += 4 + goto encode_rune + } + rr1 = rr1*16 + uint32(c) + } + r = utf16.DecodeRune(r, rune(rr1)) + i += 4 + } else { + r = unicode.ReplacementChar + goto encode_rune + } + } + encode_rune: + w2 := utf8.EncodeRune(d.bstr[:], r) + v = append(v, d.bstr[:w2]...) + default: + d.d.errorf("json: unsupported escaped value: %c", c) + } + i++ + cursor = i + } + d.bs = v +} + +func (d *jsonDecDriver) nakedNum(z *decNaked, bs []byte) (err error) { + if d.h.PreferFloat || jsonIsFloatBytesB3(bs) { // bytes.IndexByte(bs, '.') != -1 ||... + // } else if d.h.PreferFloat || bytes.ContainsAny(bs, ".eE") { + z.v = valueTypeFloat + z.f, err = strconv.ParseFloat(stringView(bs), 64) + } else if d.h.SignedInteger || bs[0] == '-' { + z.v = valueTypeInt + z.i, err = strconv.ParseInt(stringView(bs), 10, 64) + } else { + z.v = valueTypeUint + z.u, err = strconv.ParseUint(stringView(bs), 10, 64) + } + if err != nil && z.v != valueTypeFloat { + if v, ok := err.(*strconv.NumError); ok && (v.Err == strconv.ErrRange || v.Err == strconv.ErrSyntax) { + z.v = valueTypeFloat + z.f, err = strconv.ParseFloat(stringView(bs), 64) + } + } + return +} + +func (d *jsonDecDriver) bsToString() string { + // if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key + if jsonAlwaysReturnInternString || d.c == containerMapKey { + return d.d.string(d.bs) + } + return string(d.bs) +} + +func (d *jsonDecDriver) DecodeNaked() { + z := d.d.n + // var decodeFurther bool + + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + switch d.tok { + case 'n': + d.readLit(3, jsonLitNull+1) // ull + z.v = valueTypeNil + case 'f': + d.readLit(4, jsonLitFalse+1) // alse + z.v = valueTypeBool + z.b = false + case 't': + d.readLit(3, jsonLitTrue+1) // rue + z.v = valueTypeBool + z.b = true + case '{': + z.v = valueTypeMap // don't consume. kInterfaceNaked will call ReadMapStart + case '[': + z.v = valueTypeArray // don't consume. kInterfaceNaked will call ReadArrayStart + case '"': + // if a string, and MapKeyAsString, then try to decode it as a nil, bool or number first + d.appendStringAsBytes() + if len(d.bs) > 0 && d.c == containerMapKey && d.h.MapKeyAsString { + switch stringView(d.bs) { + case "null": + z.v = valueTypeNil + case "true": + z.v = valueTypeBool + z.b = true + case "false": + z.v = valueTypeBool + z.b = false + default: + // check if a number: float, int or uint + if err := d.nakedNum(z, d.bs); err != nil { + z.v = valueTypeString + z.s = d.bsToString() + } + } + } else { + z.v = valueTypeString + z.s = d.bsToString() + } + default: // number + bs := d.decNumBytes() + if len(bs) == 0 { + d.d.errorf("json: decode number from empty string") + return + } + if err := d.nakedNum(z, bs); err != nil { + d.d.errorf("json: decode number from %s: %v", bs, err) + return + } + } + // if decodeFurther { + // d.s.sc.retryRead() + // } + return +} + +//---------------------- + +// JsonHandle is a handle for JSON encoding format. +// +// Json is comprehensively supported: +// - decodes numbers into interface{} as int, uint or float64 +// - configurable way to encode/decode []byte . +// by default, encodes and decodes []byte using base64 Std Encoding +// - UTF-8 support for encoding and decoding +// +// It has better performance than the json library in the standard library, +// by leveraging the performance improvements of the codec library and +// minimizing allocations. +// +// In addition, it doesn't read more bytes than necessary during a decode, which allows +// reading multiple values from a stream containing json and non-json content. +// For example, a user can read a json value, then a cbor value, then a msgpack value, +// all from the same stream in sequence. +// +// Note that, when decoding quoted strings, invalid UTF-8 or invalid UTF-16 surrogate pairs +// are not treated as an error. +// Instead, they are replaced by the Unicode replacement character U+FFFD. +type JsonHandle struct { + textEncodingType + BasicHandle + + // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way. + // If not configured, raw bytes are encoded to/from base64 text. + RawBytesExt InterfaceExt + + // Indent indicates how a value is encoded. + // - If positive, indent by that number of spaces. + // - If negative, indent by that number of tabs. + Indent int8 + + // IntegerAsString controls how integers (signed and unsigned) are encoded. + // + // Per the JSON Spec, JSON numbers are 64-bit floating point numbers. + // Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision. + // This can be mitigated by configuring how to encode integers. + // + // IntegerAsString interpretes the following values: + // - if 'L', then encode integers > 2^53 as a json string. + // - if 'A', then encode all integers as a json string + // containing the exact integer representation as a decimal. + // - else encode all integers as a json number (default) + IntegerAsString uint8 + + // HTMLCharsAsIs controls how to encode some special characters to html: < > & + // + // By default, we encode them as \uXXX + // to prevent security holes when served from some browsers. + HTMLCharsAsIs bool + + // PreferFloat says that we will default to decoding a number as a float. + // If not set, we will examine the characters of the number and decode as an + // integer type if it doesn't have any of the characters [.eE]. + PreferFloat bool + + // TermWhitespace says that we add a whitespace character + // at the end of an encoding. + // + // The whitespace is important, especially if using numbers in a context + // where multiple items are written to a stream. + TermWhitespace bool + + // MapKeyAsString says to encode all map keys as strings. + // + // Use this to enforce strict json output. + // The only caveat is that nil value is ALWAYS written as null (never as "null") + MapKeyAsString bool +} + +func (h *JsonHandle) hasElemSeparators() bool { return true } + +func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{i: ext}) +} + +func (h *JsonHandle) newEncDriver(e *Encoder) encDriver { + hd := jsonEncDriver{e: e, h: h} + hd.bs = hd.b[:0] + + hd.reset() + + return &hd +} + +func (h *JsonHandle) newDecDriver(d *Decoder) decDriver { + // d := jsonDecDriver{r: r.(*bytesDecReader), h: h} + hd := jsonDecDriver{d: d, h: h} + hd.bs = hd.b[:0] + hd.reset() + return &hd +} + +func (e *jsonEncDriver) reset() { + e.w = e.e.w + e.se.i = e.h.RawBytesExt + if e.bs != nil { + e.bs = e.bs[:0] + } + e.d, e.dt, e.dl, e.ds = false, false, 0, "" + e.c = 0 + if e.h.Indent > 0 { + e.d = true + e.ds = jsonSpaces[:e.h.Indent] + } else if e.h.Indent < 0 { + e.d = true + e.dt = true + e.ds = jsonTabs[:-(e.h.Indent)] + } +} + +func (d *jsonDecDriver) reset() { + d.r = d.d.r + d.se.i = d.h.RawBytesExt + if d.bs != nil { + d.bs = d.bs[:0] + } + d.c, d.tok = 0, 0 + // d.n.reset() +} + +// func jsonIsFloatBytes(bs []byte) bool { +// for _, v := range bs { +// // if v == '.' || v == 'e' || v == 'E' { +// if jsonIsFloatSet.isset(v) { +// return true +// } +// } +// return false +// } + +func jsonIsFloatBytesB2(bs []byte) bool { + return bytes.IndexByte(bs, '.') != -1 || + bytes.IndexByte(bs, 'E') != -1 +} + +func jsonIsFloatBytesB3(bs []byte) bool { + return bytes.IndexByte(bs, '.') != -1 || + bytes.IndexByte(bs, 'E') != -1 || + bytes.IndexByte(bs, 'e') != -1 +} + +var _ decDriver = (*jsonDecDriver)(nil) +var _ encDriver = (*jsonEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl b/vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl new file mode 100644 index 00000000000..9a46f138143 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl @@ -0,0 +1,100 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED from mammoth-test.go.tmpl +// ************************************************************ + +package codec + +import "testing" +import "fmt" + +// TestMammoth has all the different paths optimized in fast-path +// It has all the primitives, slices and maps. +// +// For each of those types, it has a pointer and a non-pointer field. + +func init() { _ = fmt.Printf } // so we can include fmt as needed + +type TestMammoth struct { + +{{range .Values }}{{if .Primitive }}{{/* +*/}}{{ .MethodNamePfx "F" true }} {{ .Primitive }} +{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }} +{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* +*/}}{{ .MethodNamePfx "F" false }} []{{ .Elem }} +{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }} +{{end}}{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* +*/}}{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }} +{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }} +{{end}}{{end}}{{end}} + +} + +{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* +*/}} type {{ .MethodNamePfx "type" false }} []{{ .Elem }} +func (_ {{ .MethodNamePfx "type" false }}) MapBySlice() { } +{{end}}{{end}}{{end}} + +func doTestMammothSlices(t *testing.T, h Handle) { +{{range $i, $e := .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* +*/}} + for _, v := range [][]{{ .Elem }}{ nil, []{{ .Elem }}{}, []{{ .Elem }}{ {{ nonzerocmd .Elem }}, {{ nonzerocmd .Elem }} } } { + // fmt.Printf(">>>> running mammoth slice v{{$i}}: %v\n", v) + var v{{$i}}v1, v{{$i}}v2, v{{$i}}v3, v{{$i}}v4 []{{ .Elem }} + v{{$i}}v1 = v + bs{{$i}}, _ := testMarshalErr(v{{$i}}v1, h, t, "enc-slice-v{{$i}}") + if v != nil { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) } + testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}") + bs{{$i}}, _ = testMarshalErr(&v{{$i}}v1, h, t, "enc-slice-v{{$i}}-p") + v{{$i}}v2 = nil + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p") + // ... + v{{$i}}v2 = nil + if v != nil { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) } + v{{$i}}v3 = {{ .MethodNamePfx "type" false }}(v{{$i}}v1) + bs{{$i}}, _ = testMarshalErr(v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom") + v{{$i}}v4 = {{ .MethodNamePfx "type" false }}(v{{$i}}v2) + testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom") + testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom") + v{{$i}}v2 = nil + bs{{$i}}, _ = testMarshalErr(&v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom-p") + v{{$i}}v4 = {{ .MethodNamePfx "type" false }}(v{{$i}}v2) + testUnmarshalErr(&v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom-p") + testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom-p") + } +{{end}}{{end}}{{end}} +} + +func doTestMammothMaps(t *testing.T, h Handle) { +{{range $i, $e := .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* +*/}} + for _, v := range []map[{{ .MapKey }}]{{ .Elem }}{ nil, map[{{ .MapKey }}]{{ .Elem }}{}, map[{{ .MapKey }}]{{ .Elem }}{ {{ nonzerocmd .MapKey }}:{{ nonzerocmd .Elem }} } } { + // fmt.Printf(">>>> running mammoth map v{{$i}}: %v\n", v) + var v{{$i}}v1, v{{$i}}v2 map[{{ .MapKey }}]{{ .Elem }} + v{{$i}}v1 = v + bs{{$i}}, _ := testMarshalErr(v{{$i}}v1, h, t, "enc-map-v{{$i}}") + if v != nil { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } + testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}") + bs{{$i}}, _ = testMarshalErr(&v{{$i}}v1, h, t, "enc-map-v{{$i}}-p") + v{{$i}}v2 = nil + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p") + } +{{end}}{{end}}{{end}} + +} + +func doTestMammothMapsAndSlices(t *testing.T, h Handle) { + doTestMammothSlices(t, h) + doTestMammothMaps(t, h) +} diff --git a/vendor/github.com/ugorji/go/codec/msgpack.go b/vendor/github.com/ugorji/go/codec/msgpack.go new file mode 100644 index 00000000000..fcc3177aaab --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/msgpack.go @@ -0,0 +1,899 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +/* +MSGPACK + +Msgpack-c implementation powers the c, c++, python, ruby, etc libraries. +We need to maintain compatibility with it and how it encodes integer values +without caring about the type. + +For compatibility with behaviour of msgpack-c reference implementation: + - Go intX (>0) and uintX + IS ENCODED AS + msgpack +ve fixnum, unsigned + - Go intX (<0) + IS ENCODED AS + msgpack -ve fixnum, signed + +*/ +package codec + +import ( + "fmt" + "io" + "math" + "net/rpc" + "reflect" +) + +const ( + mpPosFixNumMin byte = 0x00 + mpPosFixNumMax = 0x7f + mpFixMapMin = 0x80 + mpFixMapMax = 0x8f + mpFixArrayMin = 0x90 + mpFixArrayMax = 0x9f + mpFixStrMin = 0xa0 + mpFixStrMax = 0xbf + mpNil = 0xc0 + _ = 0xc1 + mpFalse = 0xc2 + mpTrue = 0xc3 + mpFloat = 0xca + mpDouble = 0xcb + mpUint8 = 0xcc + mpUint16 = 0xcd + mpUint32 = 0xce + mpUint64 = 0xcf + mpInt8 = 0xd0 + mpInt16 = 0xd1 + mpInt32 = 0xd2 + mpInt64 = 0xd3 + + // extensions below + mpBin8 = 0xc4 + mpBin16 = 0xc5 + mpBin32 = 0xc6 + mpExt8 = 0xc7 + mpExt16 = 0xc8 + mpExt32 = 0xc9 + mpFixExt1 = 0xd4 + mpFixExt2 = 0xd5 + mpFixExt4 = 0xd6 + mpFixExt8 = 0xd7 + mpFixExt16 = 0xd8 + + mpStr8 = 0xd9 // new + mpStr16 = 0xda + mpStr32 = 0xdb + + mpArray16 = 0xdc + mpArray32 = 0xdd + + mpMap16 = 0xde + mpMap32 = 0xdf + + mpNegFixNumMin = 0xe0 + mpNegFixNumMax = 0xff +) + +// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec +// that the backend RPC service takes multiple arguments, which have been arranged +// in sequence in the slice. +// +// The Codec then passes it AS-IS to the rpc service (without wrapping it in an +// array of 1 element). +type MsgpackSpecRpcMultiArgs []interface{} + +// A MsgpackContainer type specifies the different types of msgpackContainers. +type msgpackContainerType struct { + fixCutoff int + bFixMin, b8, b16, b32 byte + hasFixMin, has8, has8Always bool +} + +var ( + msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false} + msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true} + msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false} + msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false} +) + +//--------------------------------------------- + +type msgpackEncDriver struct { + noBuiltInTypes + encDriverNoopContainerWriter + // encNoSeparator + e *Encoder + w encWriter + h *MsgpackHandle + x [8]byte +} + +func (e *msgpackEncDriver) EncodeNil() { + e.w.writen1(mpNil) +} + +func (e *msgpackEncDriver) EncodeInt(i int64) { + if i >= 0 { + e.EncodeUint(uint64(i)) + } else if i >= -32 { + e.w.writen1(byte(i)) + } else if i >= math.MinInt8 { + e.w.writen2(mpInt8, byte(i)) + } else if i >= math.MinInt16 { + e.w.writen1(mpInt16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i >= math.MinInt32 { + e.w.writen1(mpInt32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { + e.w.writen1(mpInt64) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) EncodeUint(i uint64) { + if i <= math.MaxInt8 { + e.w.writen1(byte(i)) + } else if i <= math.MaxUint8 { + e.w.writen2(mpUint8, byte(i)) + } else if i <= math.MaxUint16 { + e.w.writen1(mpUint16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i <= math.MaxUint32 { + e.w.writen1(mpUint32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { + e.w.writen1(mpUint64) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(mpTrue) + } else { + e.w.writen1(mpFalse) + } +} + +func (e *msgpackEncDriver) EncodeFloat32(f float32) { + e.w.writen1(mpFloat) + bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *msgpackEncDriver) EncodeFloat64(f float64) { + e.w.writen1(mpDouble) + bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) +} + +func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) { + bs := ext.WriteExt(v) + if bs == nil { + e.EncodeNil() + return + } + if e.h.WriteExt { + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) + } else { + e.EncodeStringBytes(c_RAW, bs) + } +} + +func (e *msgpackEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + +func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { + if l == 1 { + e.w.writen2(mpFixExt1, xtag) + } else if l == 2 { + e.w.writen2(mpFixExt2, xtag) + } else if l == 4 { + e.w.writen2(mpFixExt4, xtag) + } else if l == 8 { + e.w.writen2(mpFixExt8, xtag) + } else if l == 16 { + e.w.writen2(mpFixExt16, xtag) + } else if l < 256 { + e.w.writen2(mpExt8, byte(l)) + e.w.writen1(xtag) + } else if l < 65536 { + e.w.writen1(mpExt16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) + e.w.writen1(xtag) + } else { + e.w.writen1(mpExt32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) + e.w.writen1(xtag) + } +} + +func (e *msgpackEncDriver) WriteArrayStart(length int) { + e.writeContainerLen(msgpackContainerList, length) +} + +func (e *msgpackEncDriver) WriteMapStart(length int) { + e.writeContainerLen(msgpackContainerMap, length) +} + +func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) { + slen := len(s) + if c == c_RAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, slen) + } else { + e.writeContainerLen(msgpackContainerStr, slen) + } + if slen > 0 { + e.w.writestr(s) + } +} + +func (e *msgpackEncDriver) EncodeSymbol(v string) { + e.EncodeString(c_UTF8, v) +} + +func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) { + slen := len(bs) + if c == c_RAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, slen) + } else { + e.writeContainerLen(msgpackContainerStr, slen) + } + if slen > 0 { + e.w.writeb(bs) + } +} + +func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) { + if ct.hasFixMin && l < ct.fixCutoff { + e.w.writen1(ct.bFixMin | byte(l)) + } else if ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt) { + e.w.writen2(ct.b8, uint8(l)) + } else if l < 65536 { + e.w.writen1(ct.b16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) + } else { + e.w.writen1(ct.b32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) + } +} + +//--------------------------------------------- + +type msgpackDecDriver struct { + d *Decoder + r decReader // *Decoder decReader decReaderT + h *MsgpackHandle + b [scratchByteArrayLen]byte + bd byte + bdRead bool + br bool // bytes reader + noBuiltInTypes + // noStreamingCodec + // decNoSeparator + decDriverNoopContainerReader +} + +// Note: This returns either a primitive (int, bool, etc) for non-containers, +// or a containerType, or a specific type denoting nil or extension. +// It is called when a nil interface{} is passed, leaving it up to the DecDriver +// to introspect the stream and decide how best to decode. +// It deciphers the value by looking at the stream first. +func (d *msgpackDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + bd := d.bd + n := d.d.n + var decodeFurther bool + + switch bd { + case mpNil: + n.v = valueTypeNil + d.bdRead = false + case mpFalse: + n.v = valueTypeBool + n.b = false + case mpTrue: + n.v = valueTypeBool + n.b = true + + case mpFloat: + n.v = valueTypeFloat + n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + case mpDouble: + n.v = valueTypeFloat + n.f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + + case mpUint8: + n.v = valueTypeUint + n.u = uint64(d.r.readn1()) + case mpUint16: + n.v = valueTypeUint + n.u = uint64(bigen.Uint16(d.r.readx(2))) + case mpUint32: + n.v = valueTypeUint + n.u = uint64(bigen.Uint32(d.r.readx(4))) + case mpUint64: + n.v = valueTypeUint + n.u = uint64(bigen.Uint64(d.r.readx(8))) + + case mpInt8: + n.v = valueTypeInt + n.i = int64(int8(d.r.readn1())) + case mpInt16: + n.v = valueTypeInt + n.i = int64(int16(bigen.Uint16(d.r.readx(2)))) + case mpInt32: + n.v = valueTypeInt + n.i = int64(int32(bigen.Uint32(d.r.readx(4)))) + case mpInt64: + n.v = valueTypeInt + n.i = int64(int64(bigen.Uint64(d.r.readx(8)))) + + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + // positive fixnum (always signed) + n.v = valueTypeInt + n.i = int64(int8(bd)) + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + // negative fixnum + n.v = valueTypeInt + n.i = int64(int8(bd)) + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + if d.h.RawToString { + n.v = valueTypeString + n.s = d.DecodeString() + } else { + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false) + } + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false) + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + n.v = valueTypeArray + decodeFurther = true + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + n.v = valueTypeMap + decodeFurther = true + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + n.v = valueTypeExt + clen := d.readExtLen() + n.u = uint64(d.r.readn1()) + n.l = d.r.readx(clen) + default: + d.d.errorf("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + } + } + if !decodeFurther { + d.bdRead = false + } + if n.v == valueTypeUint && d.h.SignedInteger { + n.v = valueTypeInt + n.i = int64(n.u) + } + return +} + +// int can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) DecodeInt(bitsize uint8) (i int64) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case mpUint8: + i = int64(uint64(d.r.readn1())) + case mpUint16: + i = int64(uint64(bigen.Uint16(d.r.readx(2)))) + case mpUint32: + i = int64(uint64(bigen.Uint32(d.r.readx(4)))) + case mpUint64: + i = int64(bigen.Uint64(d.r.readx(8))) + case mpInt8: + i = int64(int8(d.r.readn1())) + case mpInt16: + i = int64(int16(bigen.Uint16(d.r.readx(2)))) + case mpInt32: + i = int64(int32(bigen.Uint32(d.r.readx(4)))) + case mpInt64: + i = int64(bigen.Uint64(d.r.readx(8))) + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + i = int64(int8(d.bd)) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + i = int64(int8(d.bd)) + default: + d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) + return + } + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize > 0 { + if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { + d.d.errorf("Overflow int value: %v", i) + return + } + } + d.bdRead = false + return +} + +// uint can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case mpUint8: + ui = uint64(d.r.readn1()) + case mpUint16: + ui = uint64(bigen.Uint16(d.r.readx(2))) + case mpUint32: + ui = uint64(bigen.Uint32(d.r.readx(4))) + case mpUint64: + ui = bigen.Uint64(d.r.readx(8)) + case mpInt8: + if i := int64(int8(d.r.readn1())); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) + return + } + case mpInt16: + if i := int64(int16(bigen.Uint16(d.r.readx(2)))); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) + return + } + case mpInt32: + if i := int64(int32(bigen.Uint32(d.r.readx(4)))); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) + return + } + case mpInt64: + if i := int64(bigen.Uint64(d.r.readx(8))); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("Assigning negative signed value: %v, to unsigned type", i) + return + } + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + ui = uint64(d.bd) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + d.d.errorf("Assigning negative signed value: %v, to unsigned type", int(d.bd)) + return + default: + d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) + return + } + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize > 0 { + if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { + d.d.errorf("Overflow uint value: %v", ui) + return + } + } + d.bdRead = false + return +} + +// float can either be decoded from msgpack type: float, double or intX +func (d *msgpackDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpFloat { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if d.bd == mpDouble { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else { + f = float64(d.DecodeInt(0)) + } + if chkOverflow32 && chkOvf.Float32(f) { + d.d.errorf("msgpack: float32 overflow: %v", f) + return + } + d.bdRead = false + return +} + +// bool can be decoded from bool, fixnum 0 or 1. +func (d *msgpackDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpFalse || d.bd == 0 { + // b = false + } else if d.bd == mpTrue || d.bd == 1 { + b = true + } else { + d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + return + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + + // DecodeBytes could be from: bin str fixstr fixarray array ... + var clen int + vt := d.ContainerType() + switch vt { + case valueTypeBytes: + // valueTypeBytes may be a mpBin or an mpStr container + if bd := d.bd; bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + clen = d.readContainerLen(msgpackContainerBin) + } else { + clen = d.readContainerLen(msgpackContainerStr) + } + case valueTypeString: + clen = d.readContainerLen(msgpackContainerStr) + case valueTypeArray: + clen = d.readContainerLen(msgpackContainerList) + // ensure everything after is one byte each + for i := 0; i < clen; i++ { + d.readNextBd() + if d.bd == mpNil { + bs = append(bs, 0) + } else if d.bd == mpUint8 { + bs = append(bs, d.r.readn1()) + } else { + d.d.errorf("cannot read non-byte into a byte array") + return + } + } + d.bdRead = false + return bs + default: + d.d.errorf("invalid container type: expecting bin|str|array") + return + } + + // these are (bin|str)(8|16|32) + // println("DecodeBytes: clen: ", clen) + d.bdRead = false + // bytes may be nil, so handle it. if nil, clen=-1. + if clen < 0 { + return nil + } + if zerocopy { + if d.br { + return d.r.readx(clen) + } else if len(bs) == 0 { + bs = d.b[:] + } + } + return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) +} + +func (d *msgpackDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.b[:], true)) +} + +func (d *msgpackDecDriver) DecodeStringAsBytes() (s []byte) { + return d.DecodeBytes(d.b[:], true) +} + +func (d *msgpackDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *msgpackDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *msgpackDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + bd := d.bd + if bd == mpNil { + return valueTypeNil + } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 || + (!d.h.RawToString && + (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax))) { + return valueTypeBytes + } else if d.h.RawToString && + (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax)) { + return valueTypeString + } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { + return valueTypeArray + } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) { + return valueTypeMap + } else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + } + return valueTypeUnset +} + +func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpNil { + d.bdRead = false + v = true + } + return +} + +func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) { + bd := d.bd + if bd == mpNil { + clen = -1 // to represent nil + } else if bd == ct.b8 { + clen = int(d.r.readn1()) + } else if bd == ct.b16 { + clen = int(bigen.Uint16(d.r.readx(2))) + } else if bd == ct.b32 { + clen = int(bigen.Uint32(d.r.readx(4))) + } else if (ct.bFixMin & bd) == ct.bFixMin { + clen = int(ct.bFixMin ^ bd) + } else { + d.d.errorf("readContainerLen: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd) + return + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) ReadMapStart() int { + if !d.bdRead { + d.readNextBd() + } + return d.readContainerLen(msgpackContainerMap) +} + +func (d *msgpackDecDriver) ReadArrayStart() int { + if !d.bdRead { + d.readNextBd() + } + return d.readContainerLen(msgpackContainerList) +} + +func (d *msgpackDecDriver) readExtLen() (clen int) { + switch d.bd { + case mpNil: + clen = -1 // to represent nil + case mpFixExt1: + clen = 1 + case mpFixExt2: + clen = 2 + case mpFixExt4: + clen = 4 + case mpFixExt8: + clen = 8 + case mpFixExt16: + clen = 16 + case mpExt8: + clen = int(d.r.readn1()) + case mpExt16: + clen = int(bigen.Uint16(d.r.readx(2))) + case mpExt32: + clen = int(bigen.Uint32(d.r.readx(4))) + default: + d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd) + return + } + return +} + +func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } + return +} + +func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } + xbd := d.bd + if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 { + xbs = d.DecodeBytes(nil, true) + } else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 || + (xbd >= mpFixStrMin && xbd <= mpFixStrMax) { + xbs = d.DecodeStringAsBytes() + } else { + clen := d.readExtLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + return + } + xbs = d.r.readx(clen) + } + d.bdRead = false + return +} + +//-------------------------------------------------- + +//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format. +type MsgpackHandle struct { + BasicHandle + + // RawToString controls how raw bytes are decoded into a nil interface{}. + RawToString bool + + // WriteExt flag supports encoding configured extensions with extension tags. + // It also controls whether other elements of the new spec are encoded (ie Str8). + // + // With WriteExt=false, configured extensions are serialized as raw bytes + // and Str8 is not encoded. + // + // A stream can still be decoded into a typed value, provided an appropriate value + // is provided, but the type cannot be inferred from the stream. If no appropriate + // type is provided (e.g. decoding into a nil interface{}), you get back + // a []byte or string based on the setting of RawToString. + WriteExt bool + binaryEncodingType + noElemSeparators +} + +func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{b: ext}) +} + +func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver { + return &msgpackEncDriver{e: e, w: e.w, h: h} +} + +func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver { + return &msgpackDecDriver{d: d, h: h, r: d.r, br: d.bytes} +} + +func (e *msgpackEncDriver) reset() { + e.w = e.e.w +} + +func (d *msgpackDecDriver) reset() { + d.r, d.br = d.d.r, d.d.bytes + d.bd, d.bdRead = 0, false +} + +//-------------------------------------------------- + +type msgpackSpecRpcCodec struct { + rpcCodec +} + +// /////////////// Spec RPC Codec /////////////////// +func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + // WriteRequest can write to both a Go service, and other services that do + // not abide by the 1 argument rule of a Go service. + // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs + var bodyArr []interface{} + if m, ok := body.(MsgpackSpecRpcMultiArgs); ok { + bodyArr = ([]interface{})(m) + } else { + bodyArr = []interface{}{body} + } + r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} + return c.write(r2, nil, false, true) +} + +func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + var moe interface{} + if r.Error != "" { + moe = r.Error + } + if moe != nil && body != nil { + body = nil + } + r2 := []interface{}{1, uint32(r.Seq), moe, body} + return c.write(r2, nil, false, true) +} + +func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.parseCustomHeader(1, &r.Seq, &r.Error) +} + +func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod) +} + +func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { + if body == nil { // read and discard + return c.read(nil) + } + bodyArr := []interface{}{body} + return c.read(&bodyArr) +} + +func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { + + if c.isClosed() { + return io.EOF + } + + // We read the response header by hand + // so that the body can be decoded on its own from the stream at a later time. + + const fia byte = 0x94 //four item array descriptor value + // Not sure why the panic of EOF is swallowed above. + // if bs1 := c.dec.r.readn1(); bs1 != fia { + // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1) + // return + // } + var b byte + b, err = c.br.ReadByte() + if err != nil { + return + } + if b != fia { + err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b) + return + } + + if err = c.read(&b); err != nil { + return + } + if b != expectTypeByte { + err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b) + return + } + if err = c.read(msgid); err != nil { + return + } + if err = c.read(methodOrError); err != nil { + return + } + return +} + +//-------------------------------------------------- + +// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol +// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md +type msgpackSpecRpc struct{} + +// MsgpackSpecRpc implements Rpc using the communication protocol defined in +// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . +// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. +var MsgpackSpecRpc msgpackSpecRpc + +func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +var _ decDriver = (*msgpackDecDriver)(nil) +var _ encDriver = (*msgpackEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/rpc.go b/vendor/github.com/ugorji/go/codec/rpc.go new file mode 100644 index 00000000000..3aa06fc7bb6 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/rpc.go @@ -0,0 +1,172 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "bufio" + "errors" + "io" + "net/rpc" + "sync" +) + +// Rpc provides a rpc Server or Client Codec for rpc communication. +type Rpc interface { + ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec + ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec +} + +// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer +// used by the rpc connection. It accommodates use-cases where the connection +// should be used by rpc and non-rpc functions, e.g. streaming a file after +// sending an rpc response. +type RpcCodecBuffered interface { + BufferedReader() *bufio.Reader + BufferedWriter() *bufio.Writer +} + +// ------------------------------------- + +// rpcCodec defines the struct members and common methods. +type rpcCodec struct { + rwc io.ReadWriteCloser + dec *Decoder + enc *Encoder + bw *bufio.Writer + br *bufio.Reader + mu sync.Mutex + h Handle + + cls bool + clsmu sync.RWMutex +} + +func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec { + bw := bufio.NewWriter(conn) + br := bufio.NewReader(conn) + + // defensive: ensure that jsonH has TermWhitespace turned on. + if jsonH, ok := h.(*JsonHandle); ok && !jsonH.TermWhitespace { + panic(errors.New("rpc requires a JsonHandle with TermWhitespace set to true")) + } + + return rpcCodec{ + rwc: conn, + bw: bw, + br: br, + enc: NewEncoder(bw, h), + dec: NewDecoder(br, h), + h: h, + } +} + +func (c *rpcCodec) BufferedReader() *bufio.Reader { + return c.br +} + +func (c *rpcCodec) BufferedWriter() *bufio.Writer { + return c.bw +} + +func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) { + if c.isClosed() { + return io.EOF + } + if err = c.enc.Encode(obj1); err != nil { + return + } + if writeObj2 { + if err = c.enc.Encode(obj2); err != nil { + return + } + } + if doFlush { + return c.bw.Flush() + } + return +} + +func (c *rpcCodec) read(obj interface{}) (err error) { + if c.isClosed() { + return io.EOF + } + //If nil is passed in, we should still attempt to read content to nowhere. + if obj == nil { + var obj2 interface{} + return c.dec.Decode(&obj2) + } + return c.dec.Decode(obj) +} + +func (c *rpcCodec) isClosed() bool { + c.clsmu.RLock() + x := c.cls + c.clsmu.RUnlock() + return x +} + +func (c *rpcCodec) Close() error { + if c.isClosed() { + return io.EOF + } + c.clsmu.Lock() + c.cls = true + c.clsmu.Unlock() + return c.rwc.Close() +} + +func (c *rpcCodec) ReadResponseBody(body interface{}) error { + return c.read(body) +} + +// ------------------------------------- + +type goRpcCodec struct { + rpcCodec +} + +func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + // Must protect for concurrent access as per API + c.mu.Lock() + defer c.mu.Unlock() + return c.write(r, body, true, true) +} + +func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + c.mu.Lock() + defer c.mu.Unlock() + return c.write(r, body, true, true) +} + +func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.read(r) +} + +func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.read(r) +} + +func (c *goRpcCodec) ReadRequestBody(body interface{}) error { + return c.read(body) +} + +// ------------------------------------- + +// goRpc is the implementation of Rpc that uses the communication protocol +// as defined in net/rpc package. +type goRpc struct{} + +// GoRpc implements Rpc using the communication protocol defined in net/rpc package. +// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. +var GoRpc goRpc + +func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &goRpcCodec{newRPCCodec(conn, h)} +} + +func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &goRpcCodec{newRPCCodec(conn, h)} +} + +var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered diff --git a/vendor/github.com/ugorji/go/codec/simple.go b/vendor/github.com/ugorji/go/codec/simple.go new file mode 100644 index 00000000000..b69a15e75a3 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/simple.go @@ -0,0 +1,541 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "math" + "reflect" +) + +const ( + _ uint8 = iota + simpleVdNil = 1 + simpleVdFalse = 2 + simpleVdTrue = 3 + simpleVdFloat32 = 4 + simpleVdFloat64 = 5 + + // each lasts for 4 (ie n, n+1, n+2, n+3) + simpleVdPosInt = 8 + simpleVdNegInt = 12 + + // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7) + simpleVdString = 216 + simpleVdByteArray = 224 + simpleVdArray = 232 + simpleVdMap = 240 + simpleVdExt = 248 +) + +type simpleEncDriver struct { + noBuiltInTypes + encDriverNoopContainerWriter + // encNoSeparator + e *Encoder + h *SimpleHandle + w encWriter + b [8]byte +} + +func (e *simpleEncDriver) EncodeNil() { + e.w.writen1(simpleVdNil) +} + +func (e *simpleEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(simpleVdTrue) + } else { + e.w.writen1(simpleVdFalse) + } +} + +func (e *simpleEncDriver) EncodeFloat32(f float32) { + e.w.writen1(simpleVdFloat32) + bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *simpleEncDriver) EncodeFloat64(f float64) { + e.w.writen1(simpleVdFloat64) + bigenHelper{e.b[:8], e.w}.writeUint64(math.Float64bits(f)) +} + +func (e *simpleEncDriver) EncodeInt(v int64) { + if v < 0 { + e.encUint(uint64(-v), simpleVdNegInt) + } else { + e.encUint(uint64(v), simpleVdPosInt) + } +} + +func (e *simpleEncDriver) EncodeUint(v uint64) { + e.encUint(v, simpleVdPosInt) +} + +func (e *simpleEncDriver) encUint(v uint64, bd uint8) { + if v <= math.MaxUint8 { + e.w.writen2(bd, uint8(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd + 1) + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { + e.w.writen1(bd + 2) + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v)) + } else { // if v <= math.MaxUint64 { + e.w.writen1(bd + 3) + bigenHelper{e.b[:8], e.w}.writeUint64(v) + } +} + +func (e *simpleEncDriver) encLen(bd byte, length int) { + if length == 0 { + e.w.writen1(bd) + } else if length <= math.MaxUint8 { + e.w.writen1(bd + 1) + e.w.writen1(uint8(length)) + } else if length <= math.MaxUint16 { + e.w.writen1(bd + 2) + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(length)) + } else if int64(length) <= math.MaxUint32 { + e.w.writen1(bd + 3) + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(length)) + } else { + e.w.writen1(bd + 4) + bigenHelper{e.b[:8], e.w}.writeUint64(uint64(length)) + } +} + +func (e *simpleEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) { + bs := ext.WriteExt(rv) + if bs == nil { + e.EncodeNil() + return + } + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) +} + +func (e *simpleEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + +func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) { + e.encLen(simpleVdExt, length) + e.w.writen1(xtag) +} + +func (e *simpleEncDriver) WriteArrayStart(length int) { + e.encLen(simpleVdArray, length) +} + +func (e *simpleEncDriver) WriteMapStart(length int) { + e.encLen(simpleVdMap, length) +} + +func (e *simpleEncDriver) EncodeString(c charEncoding, v string) { + e.encLen(simpleVdString, len(v)) + e.w.writestr(v) +} + +func (e *simpleEncDriver) EncodeSymbol(v string) { + e.EncodeString(c_UTF8, v) +} + +func (e *simpleEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + e.encLen(simpleVdByteArray, len(v)) + e.w.writeb(v) +} + +//------------------------------------ + +type simpleDecDriver struct { + d *Decoder + h *SimpleHandle + r decReader + bdRead bool + bd byte + br bool // bytes reader + b [scratchByteArrayLen]byte + noBuiltInTypes + // noStreamingCodec + decDriverNoopContainerReader +} + +func (d *simpleDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *simpleDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *simpleDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdNil { + return valueTypeNil + } else if d.bd == simpleVdByteArray || d.bd == simpleVdByteArray+1 || + d.bd == simpleVdByteArray+2 || d.bd == simpleVdByteArray+3 || d.bd == simpleVdByteArray+4 { + return valueTypeBytes + } else if d.bd == simpleVdString || d.bd == simpleVdString+1 || + d.bd == simpleVdString+2 || d.bd == simpleVdString+3 || d.bd == simpleVdString+4 { + return valueTypeString + } else if d.bd == simpleVdArray || d.bd == simpleVdArray+1 || + d.bd == simpleVdArray+2 || d.bd == simpleVdArray+3 || d.bd == simpleVdArray+4 { + return valueTypeArray + } else if d.bd == simpleVdMap || d.bd == simpleVdMap+1 || + d.bd == simpleVdMap+2 || d.bd == simpleVdMap+3 || d.bd == simpleVdMap+4 { + return valueTypeMap + } else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + } + return valueTypeUnset +} + +func (d *simpleDecDriver) TryDecodeAsNil() bool { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdNil { + d.bdRead = false + return true + } + return false +} + +func (d *simpleDecDriver) decCheckInteger() (ui uint64, neg bool) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case simpleVdPosInt: + ui = uint64(d.r.readn1()) + case simpleVdPosInt + 1: + ui = uint64(bigen.Uint16(d.r.readx(2))) + case simpleVdPosInt + 2: + ui = uint64(bigen.Uint32(d.r.readx(4))) + case simpleVdPosInt + 3: + ui = uint64(bigen.Uint64(d.r.readx(8))) + case simpleVdNegInt: + ui = uint64(d.r.readn1()) + neg = true + case simpleVdNegInt + 1: + ui = uint64(bigen.Uint16(d.r.readx(2))) + neg = true + case simpleVdNegInt + 2: + ui = uint64(bigen.Uint32(d.r.readx(4))) + neg = true + case simpleVdNegInt + 3: + ui = uint64(bigen.Uint64(d.r.readx(8))) + neg = true + default: + d.d.errorf("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd) + return + } + // don't do this check, because callers may only want the unsigned value. + // if ui > math.MaxInt64 { + // d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui) + // return + // } + return +} + +func (d *simpleDecDriver) DecodeInt(bitsize uint8) (i int64) { + ui, neg := d.decCheckInteger() + i, overflow := chkOvf.SignedInt(ui) + if overflow { + d.d.errorf("simple: overflow converting %v to signed integer", ui) + return + } + if neg { + i = -i + } + if chkOvf.Int(i, bitsize) { + d.d.errorf("simple: overflow integer: %v", i) + return + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + ui, neg := d.decCheckInteger() + if neg { + d.d.errorf("Assigning negative signed value to unsigned type") + return + } + if chkOvf.Uint(ui, bitsize) { + d.d.errorf("simple: overflow integer: %v", ui) + return + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdFloat32 { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if d.bd == simpleVdFloat64 { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else { + if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 { + f = float64(d.DecodeInt(64)) + } else { + d.d.errorf("Float only valid from float32/64: Invalid descriptor: %v", d.bd) + return + } + } + if chkOverflow32 && chkOvf.Float32(f) { + d.d.errorf("msgpack: float32 overflow: %v", f) + return + } + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *simpleDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdTrue { + b = true + } else if d.bd == simpleVdFalse { + } else { + d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + return + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) ReadMapStart() (length int) { + if !d.bdRead { + d.readNextBd() + } + d.bdRead = false + return d.decLen() +} + +func (d *simpleDecDriver) ReadArrayStart() (length int) { + if !d.bdRead { + d.readNextBd() + } + d.bdRead = false + return d.decLen() +} + +func (d *simpleDecDriver) decLen() int { + switch d.bd % 8 { + case 0: + return 0 + case 1: + return int(d.r.readn1()) + case 2: + return int(bigen.Uint16(d.r.readx(2))) + case 3: + ui := uint64(bigen.Uint32(d.r.readx(4))) + if chkOvf.Uint(ui, intBitsize) { + d.d.errorf("simple: overflow integer: %v", ui) + return 0 + } + return int(ui) + case 4: + ui := bigen.Uint64(d.r.readx(8)) + if chkOvf.Uint(ui, intBitsize) { + d.d.errorf("simple: overflow integer: %v", ui) + return 0 + } + return int(ui) + } + d.d.errorf("decLen: Cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8) + return -1 +} + +func (d *simpleDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.b[:], true)) +} + +func (d *simpleDecDriver) DecodeStringAsBytes() (s []byte) { + return d.DecodeBytes(d.b[:], true) +} + +func (d *simpleDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdNil { + d.bdRead = false + return + } + clen := d.decLen() + d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(clen) + } else if len(bs) == 0 { + bs = d.b[:] + } + } + return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) +} + +func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } + return +} + +func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + return + } + xbs = d.r.readx(l) + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + xbs = d.DecodeBytes(nil, true) + default: + d.d.errorf("Invalid d.bd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd) + return + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := d.d.n + var decodeFurther bool + + switch d.bd { + case simpleVdNil: + n.v = valueTypeNil + case simpleVdFalse: + n.v = valueTypeBool + n.b = false + case simpleVdTrue: + n.v = valueTypeBool + n.b = true + case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: + if d.h.SignedInteger { + n.v = valueTypeInt + n.i = d.DecodeInt(64) + } else { + n.v = valueTypeUint + n.u = d.DecodeUint(64) + } + case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: + n.v = valueTypeInt + n.i = d.DecodeInt(64) + case simpleVdFloat32: + n.v = valueTypeFloat + n.f = d.DecodeFloat(true) + case simpleVdFloat64: + n.v = valueTypeFloat + n.f = d.DecodeFloat(false) + case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + n.v = valueTypeString + n.s = d.DecodeString() + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false) + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + n.v = valueTypeExt + l := d.decLen() + n.u = uint64(d.r.readn1()) + n.l = d.r.readx(l) + case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: + n.v = valueTypeArray + decodeFurther = true + case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: + n.v = valueTypeMap + decodeFurther = true + default: + d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) + } + + if !decodeFurther { + d.bdRead = false + } + return +} + +//------------------------------------ + +// SimpleHandle is a Handle for a very simple encoding format. +// +// simple is a simplistic codec similar to binc, but not as compact. +// - Encoding of a value is always preceded by the descriptor byte (bd) +// - True, false, nil are encoded fully in 1 byte (the descriptor) +// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). +// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. +// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte) +// - Lenght of containers (strings, bytes, array, map, extensions) +// are encoded in 0, 1, 2, 4 or 8 bytes. +// Zero-length containers have no length encoded. +// For others, the number of bytes is given by pow(2, bd%3) +// - maps are encoded as [bd] [length] [[key][value]]... +// - arrays are encoded as [bd] [length] [value]... +// - extensions are encoded as [bd] [length] [tag] [byte]... +// - strings/bytearrays are encoded as [bd] [length] [byte]... +// +// The full spec will be published soon. +type SimpleHandle struct { + BasicHandle + binaryEncodingType + noElemSeparators +} + +func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &setExtWrapper{b: ext}) +} + +func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver { + return &simpleEncDriver{e: e, w: e.w, h: h} +} + +func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver { + return &simpleDecDriver{d: d, h: h, r: d.r, br: d.bytes} +} + +func (e *simpleEncDriver) reset() { + e.w = e.e.w +} + +func (d *simpleDecDriver) reset() { + d.r, d.br = d.d.r, d.d.bytes + d.bd, d.bdRead = 0, false +} + +var _ decDriver = (*simpleDecDriver)(nil) +var _ encDriver = (*simpleEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json b/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json new file mode 100644 index 00000000000..9028586711e --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json @@ -0,0 +1,639 @@ +[ + { + "cbor": "AA==", + "hex": "00", + "roundtrip": true, + "decoded": 0 + }, + { + "cbor": "AQ==", + "hex": "01", + "roundtrip": true, + "decoded": 1 + }, + { + "cbor": "Cg==", + "hex": "0a", + "roundtrip": true, + "decoded": 10 + }, + { + "cbor": "Fw==", + "hex": "17", + "roundtrip": true, + "decoded": 23 + }, + { + "cbor": "GBg=", + "hex": "1818", + "roundtrip": true, + "decoded": 24 + }, + { + "cbor": "GBk=", + "hex": "1819", + "roundtrip": true, + "decoded": 25 + }, + { + "cbor": "GGQ=", + "hex": "1864", + "roundtrip": true, + "decoded": 100 + }, + { + "cbor": "GQPo", + "hex": "1903e8", + "roundtrip": true, + "decoded": 1000 + }, + { + "cbor": "GgAPQkA=", + "hex": "1a000f4240", + "roundtrip": true, + "decoded": 1000000 + }, + { + "cbor": "GwAAAOjUpRAA", + "hex": "1b000000e8d4a51000", + "roundtrip": true, + "decoded": 1000000000000 + }, + { + "cbor": "G///////////", + "hex": "1bffffffffffffffff", + "roundtrip": true, + "decoded": 18446744073709551615 + }, + { + "cbor": "wkkBAAAAAAAAAAA=", + "hex": "c249010000000000000000", + "roundtrip": true, + "decoded": 18446744073709551616 + }, + { + "cbor": "O///////////", + "hex": "3bffffffffffffffff", + "roundtrip": true, + "decoded": -18446744073709551616, + "skip": true + }, + { + "cbor": "w0kBAAAAAAAAAAA=", + "hex": "c349010000000000000000", + "roundtrip": true, + "decoded": -18446744073709551617 + }, + { + "cbor": "IA==", + "hex": "20", + "roundtrip": true, + "decoded": -1 + }, + { + "cbor": "KQ==", + "hex": "29", + "roundtrip": true, + "decoded": -10 + }, + { + "cbor": "OGM=", + "hex": "3863", + "roundtrip": true, + "decoded": -100 + }, + { + "cbor": "OQPn", + "hex": "3903e7", + "roundtrip": true, + "decoded": -1000 + }, + { + "cbor": "+QAA", + "hex": "f90000", + "roundtrip": true, + "decoded": 0.0 + }, + { + "cbor": "+YAA", + "hex": "f98000", + "roundtrip": true, + "decoded": -0.0 + }, + { + "cbor": "+TwA", + "hex": "f93c00", + "roundtrip": true, + "decoded": 1.0 + }, + { + "cbor": "+z/xmZmZmZma", + "hex": "fb3ff199999999999a", + "roundtrip": true, + "decoded": 1.1 + }, + { + "cbor": "+T4A", + "hex": "f93e00", + "roundtrip": true, + "decoded": 1.5 + }, + { + "cbor": "+Xv/", + "hex": "f97bff", + "roundtrip": true, + "decoded": 65504.0 + }, + { + "cbor": "+kfDUAA=", + "hex": "fa47c35000", + "roundtrip": true, + "decoded": 100000.0 + }, + { + "cbor": "+n9///8=", + "hex": "fa7f7fffff", + "roundtrip": true, + "decoded": 3.4028234663852886e+38 + }, + { + "cbor": "+3435DyIAHWc", + "hex": "fb7e37e43c8800759c", + "roundtrip": true, + "decoded": 1.0e+300 + }, + { + "cbor": "+QAB", + "hex": "f90001", + "roundtrip": true, + "decoded": 5.960464477539063e-08 + }, + { + "cbor": "+QQA", + "hex": "f90400", + "roundtrip": true, + "decoded": 6.103515625e-05 + }, + { + "cbor": "+cQA", + "hex": "f9c400", + "roundtrip": true, + "decoded": -4.0 + }, + { + "cbor": "+8AQZmZmZmZm", + "hex": "fbc010666666666666", + "roundtrip": true, + "decoded": -4.1 + }, + { + "cbor": "+XwA", + "hex": "f97c00", + "roundtrip": true, + "diagnostic": "Infinity" + }, + { + "cbor": "+X4A", + "hex": "f97e00", + "roundtrip": true, + "diagnostic": "NaN" + }, + { + "cbor": "+fwA", + "hex": "f9fc00", + "roundtrip": true, + "diagnostic": "-Infinity" + }, + { + "cbor": "+n+AAAA=", + "hex": "fa7f800000", + "roundtrip": false, + "diagnostic": "Infinity" + }, + { + "cbor": "+n/AAAA=", + "hex": "fa7fc00000", + "roundtrip": false, + "diagnostic": "NaN" + }, + { + "cbor": "+v+AAAA=", + "hex": "faff800000", + "roundtrip": false, + "diagnostic": "-Infinity" + }, + { + "cbor": "+3/wAAAAAAAA", + "hex": "fb7ff0000000000000", + "roundtrip": false, + "diagnostic": "Infinity" + }, + { + "cbor": "+3/4AAAAAAAA", + "hex": "fb7ff8000000000000", + "roundtrip": false, + "diagnostic": "NaN" + }, + { + "cbor": "+//wAAAAAAAA", + "hex": "fbfff0000000000000", + "roundtrip": false, + "diagnostic": "-Infinity" + }, + { + "cbor": "9A==", + "hex": "f4", + "roundtrip": true, + "decoded": false + }, + { + "cbor": "9Q==", + "hex": "f5", + "roundtrip": true, + "decoded": true + }, + { + "cbor": "9g==", + "hex": "f6", + "roundtrip": true, + "decoded": null + }, + { + "cbor": "9w==", + "hex": "f7", + "roundtrip": true, + "diagnostic": "undefined" + }, + { + "cbor": "8A==", + "hex": "f0", + "roundtrip": true, + "diagnostic": "simple(16)" + }, + { + "cbor": "+Bg=", + "hex": "f818", + "roundtrip": true, + "diagnostic": "simple(24)" + }, + { + "cbor": "+P8=", + "hex": "f8ff", + "roundtrip": true, + "diagnostic": "simple(255)" + }, + { + "cbor": "wHQyMDEzLTAzLTIxVDIwOjA0OjAwWg==", + "hex": "c074323031332d30332d32315432303a30343a30305a", + "roundtrip": true, + "diagnostic": "0(\"2013-03-21T20:04:00Z\")" + }, + { + "cbor": "wRpRS2ew", + "hex": "c11a514b67b0", + "roundtrip": true, + "diagnostic": "1(1363896240)" + }, + { + "cbor": "wftB1FLZ7CAAAA==", + "hex": "c1fb41d452d9ec200000", + "roundtrip": true, + "diagnostic": "1(1363896240.5)" + }, + { + "cbor": "10QBAgME", + "hex": "d74401020304", + "roundtrip": true, + "diagnostic": "23(h'01020304')" + }, + { + "cbor": "2BhFZElFVEY=", + "hex": "d818456449455446", + "roundtrip": true, + "diagnostic": "24(h'6449455446')" + }, + { + "cbor": "2CB2aHR0cDovL3d3dy5leGFtcGxlLmNvbQ==", + "hex": "d82076687474703a2f2f7777772e6578616d706c652e636f6d", + "roundtrip": true, + "diagnostic": "32(\"http://www.example.com\")" + }, + { + "cbor": "QA==", + "hex": "40", + "roundtrip": true, + "diagnostic": "h''" + }, + { + "cbor": "RAECAwQ=", + "hex": "4401020304", + "roundtrip": true, + "diagnostic": "h'01020304'" + }, + { + "cbor": "YA==", + "hex": "60", + "roundtrip": true, + "decoded": "" + }, + { + "cbor": "YWE=", + "hex": "6161", + "roundtrip": true, + "decoded": "a" + }, + { + "cbor": "ZElFVEY=", + "hex": "6449455446", + "roundtrip": true, + "decoded": "IETF" + }, + { + "cbor": "YiJc", + "hex": "62225c", + "roundtrip": true, + "decoded": "\"\\" + }, + { + "cbor": "YsO8", + "hex": "62c3bc", + "roundtrip": true, + "decoded": "ü" + }, + { + "cbor": "Y+awtA==", + "hex": "63e6b0b4", + "roundtrip": true, + "decoded": "水" + }, + { + "cbor": "ZPCQhZE=", + "hex": "64f0908591", + "roundtrip": true, + "decoded": "𐅑" + }, + { + "cbor": "gA==", + "hex": "80", + "roundtrip": true, + "decoded": [ + + ] + }, + { + "cbor": "gwECAw==", + "hex": "83010203", + "roundtrip": true, + "decoded": [ + 1, + 2, + 3 + ] + }, + { + "cbor": "gwGCAgOCBAU=", + "hex": "8301820203820405", + "roundtrip": true, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "mBkBAgMEBQYHCAkKCwwNDg8QERITFBUWFxgYGBk=", + "hex": "98190102030405060708090a0b0c0d0e0f101112131415161718181819", + "roundtrip": true, + "decoded": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25 + ] + }, + { + "cbor": "oA==", + "hex": "a0", + "roundtrip": true, + "decoded": { + } + }, + { + "cbor": "ogECAwQ=", + "hex": "a201020304", + "roundtrip": true, + "skip": true, + "diagnostic": "{1: 2, 3: 4}" + }, + { + "cbor": "omFhAWFiggID", + "hex": "a26161016162820203", + "roundtrip": true, + "decoded": { + "a": 1, + "b": [ + 2, + 3 + ] + } + }, + { + "cbor": "gmFhoWFiYWM=", + "hex": "826161a161626163", + "roundtrip": true, + "decoded": [ + "a", + { + "b": "c" + } + ] + }, + { + "cbor": "pWFhYUFhYmFCYWNhQ2FkYURhZWFF", + "hex": "a56161614161626142616361436164614461656145", + "roundtrip": true, + "decoded": { + "a": "A", + "b": "B", + "c": "C", + "d": "D", + "e": "E" + } + }, + { + "cbor": "X0IBAkMDBAX/", + "hex": "5f42010243030405ff", + "roundtrip": false, + "skip": true, + "diagnostic": "(_ h'0102', h'030405')" + }, + { + "cbor": "f2VzdHJlYWRtaW5n/w==", + "hex": "7f657374726561646d696e67ff", + "roundtrip": false, + "decoded": "streaming" + }, + { + "cbor": "n/8=", + "hex": "9fff", + "roundtrip": false, + "decoded": [ + + ] + }, + { + "cbor": "nwGCAgOfBAX//w==", + "hex": "9f018202039f0405ffff", + "roundtrip": false, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "nwGCAgOCBAX/", + "hex": "9f01820203820405ff", + "roundtrip": false, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "gwGCAgOfBAX/", + "hex": "83018202039f0405ff", + "roundtrip": false, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "gwGfAgP/ggQF", + "hex": "83019f0203ff820405", + "roundtrip": false, + "decoded": [ + 1, + [ + 2, + 3 + ], + [ + 4, + 5 + ] + ] + }, + { + "cbor": "nwECAwQFBgcICQoLDA0ODxAREhMUFRYXGBgYGf8=", + "hex": "9f0102030405060708090a0b0c0d0e0f101112131415161718181819ff", + "roundtrip": false, + "decoded": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25 + ] + }, + { + "cbor": "v2FhAWFinwID//8=", + "hex": "bf61610161629f0203ffff", + "roundtrip": false, + "decoded": { + "a": 1, + "b": [ + 2, + 3 + ] + } + }, + { + "cbor": "gmFhv2FiYWP/", + "hex": "826161bf61626163ff", + "roundtrip": false, + "decoded": [ + "a", + { + "b": "c" + } + ] + }, + { + "cbor": "v2NGdW71Y0FtdCH/", + "hex": "bf6346756ef563416d7421ff", + "roundtrip": false, + "decoded": { + "Fun": true, + "Amt": -2 + } + } +] diff --git a/vendor/github.com/ugorji/go/codec/test.py b/vendor/github.com/ugorji/go/codec/test.py new file mode 100755 index 00000000000..800376f6841 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/test.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python + +# This will create golden files in a directory passed to it. +# A Test calls this internally to create the golden files +# So it can process them (so we don't have to checkin the files). + +# Ensure msgpack-python and cbor are installed first, using: +# sudo apt-get install python-dev +# sudo apt-get install python-pip +# pip install --user msgpack-python msgpack-rpc-python cbor + +# Ensure all "string" keys are utf strings (else encoded as bytes) + +import cbor, msgpack, msgpackrpc, sys, os, threading + +def get_test_data_list(): + # get list with all primitive types, and a combo type + l0 = [ + -8, + -1616, + -32323232, + -6464646464646464, + 192, + 1616, + 32323232, + 6464646464646464, + 192, + -3232.0, + -6464646464.0, + 3232.0, + 6464.0, + 6464646464.0, + False, + True, + u"null", + None, + u"some&day>some 0 + if stopTimeSec > 0: + def myStopRpcServer(): + server.stop() + t = threading.Timer(stopTimeSec, myStopRpcServer) + t.start() + server.start() + +def doRpcClientToPythonSvc(port): + address = msgpackrpc.Address('127.0.0.1', port) + client = msgpackrpc.Client(address, unpack_encoding='utf-8') + print client.call("Echo123", "A1", "B2", "C3") + print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) + +def doRpcClientToGoSvc(port): + # print ">>>> port: ", port, " <<<<<" + address = msgpackrpc.Address('127.0.0.1', port) + client = msgpackrpc.Client(address, unpack_encoding='utf-8') + print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]) + print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) + +def doMain(args): + if len(args) == 2 and args[0] == "testdata": + build_test_data(args[1]) + elif len(args) == 3 and args[0] == "rpc-server": + doRpcServer(int(args[1]), int(args[2])) + elif len(args) == 2 and args[0] == "rpc-client-python-service": + doRpcClientToPythonSvc(int(args[1])) + elif len(args) == 2 and args[0] == "rpc-client-go-service": + doRpcClientToGoSvc(int(args[1])) + else: + print("Usage: test.py " + + "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...") + +if __name__ == "__main__": + doMain(sys.argv[1:]) + diff --git a/vendor/github.com/ugorji/go/codec/time.go b/vendor/github.com/ugorji/go/codec/time.go new file mode 100644 index 00000000000..55841d4cac6 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/time.go @@ -0,0 +1,220 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "fmt" + "time" +) + +var timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} + +type timeExt struct{} + +func (x timeExt) WriteExt(v interface{}) (bs []byte) { + switch v2 := v.(type) { + case time.Time: + bs = encodeTime(v2) + case *time.Time: + bs = encodeTime(*v2) + default: + panic(fmt.Errorf("unsupported format for time conversion: expecting time.Time; got %T", v2)) + } + return +} +func (x timeExt) ReadExt(v interface{}, bs []byte) { + tt, err := decodeTime(bs) + if err != nil { + panic(err) + } + *(v.(*time.Time)) = tt +} + +func (x timeExt) ConvertExt(v interface{}) interface{} { + return x.WriteExt(v) +} +func (x timeExt) UpdateExt(v interface{}, src interface{}) { + x.ReadExt(v, src.([]byte)) +} + +// EncodeTime encodes a time.Time as a []byte, including +// information on the instant in time and UTC offset. +// +// Format Description +// +// A timestamp is composed of 3 components: +// +// - secs: signed integer representing seconds since unix epoch +// - nsces: unsigned integer representing fractional seconds as a +// nanosecond offset within secs, in the range 0 <= nsecs < 1e9 +// - tz: signed integer representing timezone offset in minutes east of UTC, +// and a dst (daylight savings time) flag +// +// When encoding a timestamp, the first byte is the descriptor, which +// defines which components are encoded and how many bytes are used to +// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it +// is not encoded in the byte array explicitly*. +// +// Descriptor 8 bits are of the form `A B C DDD EE`: +// A: Is secs component encoded? 1 = true +// B: Is nsecs component encoded? 1 = true +// C: Is tz component encoded? 1 = true +// DDD: Number of extra bytes for secs (range 0-7). +// If A = 1, secs encoded in DDD+1 bytes. +// If A = 0, secs is not encoded, and is assumed to be 0. +// If A = 1, then we need at least 1 byte to encode secs. +// DDD says the number of extra bytes beyond that 1. +// E.g. if DDD=0, then secs is represented in 1 byte. +// if DDD=2, then secs is represented in 3 bytes. +// EE: Number of extra bytes for nsecs (range 0-3). +// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above) +// +// Following the descriptor bytes, subsequent bytes are: +// +// secs component encoded in `DDD + 1` bytes (if A == 1) +// nsecs component encoded in `EE + 1` bytes (if B == 1) +// tz component encoded in 2 bytes (if C == 1) +// +// secs and nsecs components are integers encoded in a BigEndian +// 2-complement encoding format. +// +// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to +// Least significant bit 0 are described below: +// +// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes). +// Bit 15 = have\_dst: set to 1 if we set the dst flag. +// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not. +// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format. +// +func encodeTime(t time.Time) []byte { + //t := rv.Interface().(time.Time) + tsecs, tnsecs := t.Unix(), t.Nanosecond() + var ( + bd byte + btmp [8]byte + bs [16]byte + i int = 1 + ) + l := t.Location() + if l == time.UTC { + l = nil + } + if tsecs != 0 { + bd = bd | 0x80 + bigen.PutUint64(btmp[:], uint64(tsecs)) + f := pruneSignExt(btmp[:], tsecs >= 0) + bd = bd | (byte(7-f) << 2) + copy(bs[i:], btmp[f:]) + i = i + (8 - f) + } + if tnsecs != 0 { + bd = bd | 0x40 + bigen.PutUint32(btmp[:4], uint32(tnsecs)) + f := pruneSignExt(btmp[:4], true) + bd = bd | byte(3-f) + copy(bs[i:], btmp[f:4]) + i = i + (4 - f) + } + if l != nil { + bd = bd | 0x20 + // Note that Go Libs do not give access to dst flag. + _, zoneOffset := t.Zone() + //zoneName, zoneOffset := t.Zone() + zoneOffset /= 60 + z := uint16(zoneOffset) + bigen.PutUint16(btmp[:2], z) + // clear dst flags + bs[i] = btmp[0] & 0x3f + bs[i+1] = btmp[1] + i = i + 2 + } + bs[0] = bd + return bs[0:i] +} + +// DecodeTime decodes a []byte into a time.Time. +func decodeTime(bs []byte) (tt time.Time, err error) { + bd := bs[0] + var ( + tsec int64 + tnsec uint32 + tz uint16 + i byte = 1 + i2 byte + n byte + ) + if bd&(1<<7) != 0 { + var btmp [8]byte + n = ((bd >> 2) & 0x7) + 1 + i2 = i + n + copy(btmp[8-n:], bs[i:i2]) + //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) + if bs[i]&(1<<7) != 0 { + copy(btmp[0:8-n], bsAll0xff) + //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff } + } + i = i2 + tsec = int64(bigen.Uint64(btmp[:])) + } + if bd&(1<<6) != 0 { + var btmp [4]byte + n = (bd & 0x3) + 1 + i2 = i + n + copy(btmp[4-n:], bs[i:i2]) + i = i2 + tnsec = bigen.Uint32(btmp[:]) + } + if bd&(1<<5) == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + return + } + // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. + // However, we need name here, so it can be shown when time is printed. + // Zone name is in form: UTC-08:00. + // Note that Go Libs do not give access to dst flag, so we ignore dst bits + + i2 = i + 2 + tz = bigen.Uint16(bs[i:i2]) + i = i2 + // sign extend sign bit into top 2 MSB (which were dst bits): + if tz&(1<<13) == 0 { // positive + tz = tz & 0x3fff //clear 2 MSBs: dst bits + } else { // negative + tz = tz | 0xc000 //set 2 MSBs: dst bits + //tzname[3] = '-' (TODO: verify. this works here) + } + tzint := int16(tz) + if tzint == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + } else { + // For Go Time, do not use a descriptive timezone. + // It's unnecessary, and makes it harder to do a reflect.DeepEqual. + // The Offset already tells what the offset should be, if not on UTC and unknown zone name. + // var zoneName = timeLocUTCName(tzint) + tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) + } + return +} + +// func timeLocUTCName(tzint int16) string { +// if tzint == 0 { +// return "UTC" +// } +// var tzname = []byte("UTC+00:00") +// //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below. +// //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first +// var tzhr, tzmin int16 +// if tzint < 0 { +// tzname[3] = '-' // (TODO: verify. this works here) +// tzhr, tzmin = -tzint/60, (-tzint)%60 +// } else { +// tzhr, tzmin = tzint/60, tzint%60 +// } +// tzname[4] = timeDigits[tzhr/10] +// tzname[5] = timeDigits[tzhr%10] +// tzname[7] = timeDigits[tzmin/10] +// tzname[8] = timeDigits[tzmin%10] +// return string(tzname) +// //return time.FixedZone(string(tzname), int(tzint)*60) +// } diff --git a/vendor/github.com/ugorji/go/codec/z.go b/vendor/github.com/ugorji/go/codec/z.go new file mode 100644 index 00000000000..b6ac0769a05 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/z.go @@ -0,0 +1,23 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import "sort" + +// TODO: this is brittle, as it depends on z.go's init() being called last. +// The current build tools all honor that files are passed in lexical order. +// However, we should consider using an init_channel, +// that each person doing init will write to. + +func init() { + if !useLookupRecognizedTypes { + return + } + sort.Sort(uintptrSlice(recognizedRtids)) + sort.Sort(uintptrSlice(recognizedRtidPtrs)) + recognizedRtidOrPtrs = make([]uintptr, len(recognizedRtids)+len(recognizedRtidPtrs)) + copy(recognizedRtidOrPtrs, recognizedRtids) + copy(recognizedRtidOrPtrs[len(recognizedRtids):], recognizedRtidPtrs) + sort.Sort(uintptrSlice(recognizedRtidOrPtrs)) +} diff --git a/vendor/github.com/ulikunitz/xz/LICENSE b/vendor/github.com/ulikunitz/xz/LICENSE new file mode 100644 index 00000000000..58ebdc162f5 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2014-2016 Ulrich Kunitz +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* My name, Ulrich Kunitz, may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ulikunitz/xz/README.md b/vendor/github.com/ulikunitz/xz/README.md new file mode 100644 index 00000000000..969ae7a00b3 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/README.md @@ -0,0 +1,71 @@ +# Package xz + +This Go language package supports the reading and writing of xz +compressed streams. It includes also a gxz command for compressing and +decompressing data. The package is completely written in Go and doesn't +have any dependency on any C code. + +The package is currently under development. There might be bugs and APIs +are not considered stable. At this time the package cannot compete with +the xz tool regarding compression speed and size. The algorithms there +have been developed over a long time and are highly optimized. However +there are a number of improvements planned and I'm very optimistic about +parallel compression and decompression. Stay tuned! + +# Using the API + +The following example program shows how to use the API. + + package main + + import ( + "bytes" + "io" + "log" + "os" + + "github.com/ulikunitz/xz" + ) + + func main() { + const text = "The quick brown fox jumps over the lazy dog.\n" + var buf bytes.Buffer + // compress text + w, err := xz.NewWriter(&buf) + if err != nil { + log.Fatalf("xz.NewWriter error %s", err) + } + if _, err := io.WriteString(w, text); err != nil { + log.Fatalf("WriteString error %s", err) + } + if err := w.Close(); err != nil { + log.Fatalf("w.Close error %s", err) + } + // decompress buffer and write output to stdout + r, err := xz.NewReader(&buf) + if err != nil { + log.Fatalf("NewReader error %s", err) + } + if _, err = io.Copy(os.Stdout, r); err != nil { + log.Fatalf("io.Copy error %s", err) + } + } + +# Using the gxz compression tool + +The package includes a gxz command line utility for compression and +decompression. + +Use following command for installation: + + $ go get github.com/ulikunitz/xz/cmd/gxz + +To test it call the following command. + + $ gxz bigfile + +After some time a much smaller file bigfile.xz will replace bigfile. +To decompress it use the following command. + + $ gxz -d bigfile.xz + diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md new file mode 100644 index 00000000000..7b34c0caa79 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/TODO.md @@ -0,0 +1,315 @@ +# TODO list + +## Release v0.6 + +1. Review encoder and check for lzma improvements under xz. +2. Fix binary tree matcher. +3. Compare compression ratio with xz tool using comparable parameters + and optimize parameters +4. Do some optimizations + - rename operation action and make it a simple type of size 8 + - make maxMatches, wordSize parameters + - stop searching after a certain length is found (parameter sweetLen) + +## Release v0.7 + +1. Optimize code +2. Do statistical analysis to get linear presets. +3. Test sync.Pool compatability for xz and lzma Writer and Reader +3. Fuzz optimized code. + +## Release v0.8 + +1. Support parallel go routines for writing and reading xz files. +2. Support a ReaderAt interface for xz files with small block sizes. +3. Improve compatibility between gxz and xz +4. Provide manual page for gxz + +## Release v0.9 + +1. Improve documentation +2. Fuzz again + +## Release v1.0 + +1. Full functioning gxz +2. Add godoc URL to README.md (godoc.org) +3. Resolve all issues. +4. Define release candidates. +5. Public announcement. + +## Package lzma + +### Release v0.6 + +- Rewrite Encoder into a simple greedy one-op-at-a-time encoder + including + + simple scan at the dictionary head for the same byte + + use the killer byte (requiring matches to get longer, the first + test should be the byte that would make the match longer) + + +## Optimizations + +- There may be a lot of false sharing in lzma.State; check whether this + can be improved by reorganizing the internal structure of it. +- Check whether batching encoding and decoding improves speed. + +### DAG optimizations + +- Use full buffer to create minimal bit-length above range encoder. +- Might be too slow (see v0.4) + +### Different match finders + +- hashes with 2, 3 characters additional to 4 characters +- binary trees with 2-7 characters (uint64 as key, use uint32 as + pointers into a an array) +- rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers + into an array with bit-steeling for the colors) + +## Release Procedure + +- execute goch -l for all packages; probably with lower param like 0.5. +- check orthography with gospell +- Write release notes in doc/relnotes. +- Update README.md +- xb copyright . in xz directory to ensure all new files have Copyright + header +- VERSION= go generate github.com/ulikunitz/xz/... to update + version files +- Execute test for Linux/amd64, Linux/x86 and Windows/amd64. +- Update TODO.md - write short log entry +- git checkout master && git merge dev +- git tag -a +- git push + +## Log + +### 2017-06-05 + +Release v0.5.4 fixes issues #15 of another problem with the padding size +check for the xz block header. I removed the check completely. + +### 2017-02-15 + +Release v0.5.3 fixes issue #12 regarding the decompression of an empty +XZ stream. Many thanks to Tomasz Kłak, who reported the issue. + +### 2016-12-02 + +Release v0.5.2 became necessary to allow the decoding of xz files with +4-byte padding in the block header. Many thanks to Greg, who reported +the issue. + +### 2016-07-23 + +Release v0.5.1 became necessary to fix problems with 32-bit platforms. +Many thanks to Bruno Brigas, who reported the issue. + +### 2016-07-04 + +Release v0.5 provides improvements to the compressor and provides support for +the decompression of xz files with multiple xz streams. + +### 2016-01-31 + +Another compression rate increase by checking the byte at length of the +best match first, before checking the whole prefix. This makes the +compressor even faster. We have now a large time budget to beat the +compression ratio of the xz tool. For enwik8 we have now over 40 seconds +to reduce the compressed file size for another 7 MiB. + +### 2016-01-30 + +I simplified the encoder. Speed and compression rate increased +dramatically. A high compression rate affects also the decompression +speed. The approach with the buffer and optimizing for operation +compression rate has not been successful. Going for the maximum length +appears to be the best approach. + +### 2016-01-28 + +The release v0.4 is ready. It provides a working xz implementation, +which is rather slow, but works and is interoperable with the xz tool. +It is an important milestone. + +### 2016-01-10 + +I have the first working implementation of an xz reader and writer. I'm +happy about reaching this milestone. + +### 2015-12-02 + +I'm now ready to implement xz because, I have a working LZMA2 +implementation. I decided today that v0.4 will use the slow encoder +using the operations buffer to be able to go back, if I intend to do so. + +### 2015-10-21 + +I have restarted the work on the library. While trying to implement +LZMA2, I discovered that I need to resimplify the encoder and decoder +functions. The option approach is too complicated. Using a limited byte +writer and not caring for written bytes at all and not to try to handle +uncompressed data simplifies the LZMA encoder and decoder much. +Processing uncompressed data and handling limits is a feature of the +LZMA2 format not of LZMA. + +I learned an interesting method from the LZO format. If the last copy is +too far away they are moving the head one 2 bytes and not 1 byte to +reduce processing times. + +### 2015-08-26 + +I have now reimplemented the lzma package. The code is reasonably fast, +but can still be optimized. The next step is to implement LZMA2 and then +xz. + +### 2015-07-05 + +Created release v0.3. The version is the foundation for a full xz +implementation that is the target of v0.4. + +### 2015-06-11 + +The gflag package has been developed because I couldn't use flag and +pflag for a fully compatible support of gzip's and lzma's options. It +seems to work now quite nicely. + +### 2015-06-05 + +The overflow issue was interesting to research, however Henry S. Warren +Jr. Hacker's Delight book was very helpful as usual and had the issue +explained perfectly. Fefe's information on his website was based on the +C FAQ and quite bad, because it didn't address the issue of -MININT == +MININT. + +### 2015-06-04 + +It has been a productive day. I improved the interface of lzma.Reader +and lzma.Writer and fixed the error handling. + +### 2015-06-01 + +By computing the bit length of the LZMA operations I was able to +improve the greedy algorithm implementation. By using an 8 MByte buffer +the compression rate was not as good as for xz but already better then +gzip default. + +Compression is currently slow, but this is something we will be able to +improve over time. + +### 2015-05-26 + +Checked the license of ogier/pflag. The binary lzmago binary should +include the license terms for the pflag library. + +I added the endorsement clause as used by Google for the Go sources the +LICENSE file. + +### 2015-05-22 + +The package lzb contains now the basic implementation for creating or +reading LZMA byte streams. It allows the support for the implementation +of the DAG-shortest-path algorithm for the compression function. + +### 2015-04-23 + +Completed yesterday the lzbase classes. I'm a little bit concerned that +using the components may require too much code, but on the other hand +there is a lot of flexibility. + +### 2015-04-22 + +Implemented Reader and Writer during the Bayern game against Porto. The +second half gave me enough time. + +### 2015-04-21 + +While showering today morning I discovered that the design for OpEncoder +and OpDecoder doesn't work, because encoding/decoding might depend on +the current status of the dictionary. This is not exactly the right way +to start the day. + +Therefore we need to keep the Reader and Writer design. This time around +we simplify it by ignoring size limits. These can be added by wrappers +around the Reader and Writer interfaces. The Parameters type isn't +needed anymore. + +However I will implement a ReaderState and WriterState type to use +static typing to ensure the right State object is combined with the +right lzbase.Reader and lzbase.Writer. + +As a start I have implemented ReaderState and WriterState to ensure +that the state for reading is only used by readers and WriterState only +used by Writers. + +### 2015-04-20 + +Today I implemented the OpDecoder and tested OpEncoder and OpDecoder. + +### 2015-04-08 + +Came up with a new simplified design for lzbase. I implemented already +the type State that replaces OpCodec. + +### 2015-04-06 + +The new lzma package is now fully usable and lzmago is using it now. The +old lzma package has been completely removed. + +### 2015-04-05 + +Implemented lzma.Reader and tested it. + +### 2015-04-04 + +Implemented baseReader by adapting code form lzma.Reader. + +### 2015-04-03 + +The opCodec has been copied yesterday to lzma2. opCodec has a high +number of dependencies on other files in lzma2. Therefore I had to copy +almost all files from lzma. + +### 2015-03-31 + +Removed only a TODO item. + +However in Francesco Campoy's presentation "Go for Javaneros +(Javaïstes?)" is the the idea that using an embedded field E, all the +methods of E will be defined on T. If E is an interface T satisfies E. + +https://talks.golang.org/2014/go4java.slide#51 + +I have never used this, but it seems to be a cool idea. + +### 2015-03-30 + +Finished the type writerDict and wrote a simple test. + +### 2015-03-25 + +I started to implement the writerDict. + +### 2015-03-24 + +After thinking long about the LZMA2 code and several false starts, I +have now a plan to create a self-sufficient lzma2 package that supports +the classic LZMA format as well as LZMA2. The core idea is to support a +baseReader and baseWriter type that support the basic LZMA stream +without any headers. Both types must support the reuse of dictionaries +and the opCodec. + +### 2015-01-10 + +1. Implemented simple lzmago tool +2. Tested tool against large 4.4G file + - compression worked correctly; tested decompression with lzma + - decompression hits a full buffer condition +3. Fixed a bug in the compressor and wrote a test for it +4. Executed full cycle for 4.4 GB file; performance can be improved ;-) + +### 2015-01-11 + +- Release v0.2 because of the working LZMA encoder and decoder diff --git a/vendor/github.com/ulikunitz/xz/bits.go b/vendor/github.com/ulikunitz/xz/bits.go new file mode 100644 index 00000000000..fadc1a59449 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/bits.go @@ -0,0 +1,74 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "io" +) + +// putUint32LE puts the little-endian representation of x into the first +// four bytes of p. +func putUint32LE(p []byte, x uint32) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) +} + +// putUint64LE puts the little-endian representation of x into the first +// eight bytes of p. +func putUint64LE(p []byte, x uint64) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) + p[4] = byte(x >> 32) + p[5] = byte(x >> 40) + p[6] = byte(x >> 48) + p[7] = byte(x >> 56) +} + +// uint32LE converts a little endian representation to an uint32 value. +func uint32LE(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | + uint32(p[3])<<24 +} + +// putUvarint puts a uvarint representation of x into the byte slice. +func putUvarint(p []byte, x uint64) int { + i := 0 + for x >= 0x80 { + p[i] = byte(x) | 0x80 + x >>= 7 + i++ + } + p[i] = byte(x) + return i + 1 +} + +// errOverflow indicates an overflow of the 64-bit unsigned integer. +var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer") + +// readUvarint reads a uvarint from the given byte reader. +func readUvarint(r io.ByteReader) (x uint64, n int, err error) { + var s uint + i := 0 + for { + b, err := r.ReadByte() + if err != nil { + return x, i, err + } + i++ + if b < 0x80 { + if i > 10 || i == 10 && b > 1 { + return x, i, errOverflowU64 + } + return x | uint64(b)< 0 { + k = 4 - k + } + return k +} + +/*** Header ***/ + +// headerMagic stores the magic bytes for the header +var headerMagic = []byte{0xfd, '7', 'z', 'X', 'Z', 0x00} + +// HeaderLen provides the length of the xz file header. +const HeaderLen = 12 + +// Constants for the checksum methods supported by xz. +const ( + CRC32 byte = 0x1 + CRC64 = 0x4 + SHA256 = 0xa +) + +// errInvalidFlags indicates that flags are invalid. +var errInvalidFlags = errors.New("xz: invalid flags") + +// verifyFlags returns the error errInvalidFlags if the value is +// invalid. +func verifyFlags(flags byte) error { + switch flags { + case CRC32, CRC64, SHA256: + return nil + default: + return errInvalidFlags + } +} + +// flagstrings maps flag values to strings. +var flagstrings = map[byte]string{ + CRC32: "CRC-32", + CRC64: "CRC-64", + SHA256: "SHA-256", +} + +// flagString returns the string representation for the given flags. +func flagString(flags byte) string { + s, ok := flagstrings[flags] + if !ok { + return "invalid" + } + return s +} + +// newHashFunc returns a function that creates hash instances for the +// hash method encoded in flags. +func newHashFunc(flags byte) (newHash func() hash.Hash, err error) { + switch flags { + case CRC32: + newHash = newCRC32 + case CRC64: + newHash = newCRC64 + case SHA256: + newHash = sha256.New + default: + err = errInvalidFlags + } + return +} + +// header provides the actual content of the xz file header: the flags. +type header struct { + flags byte +} + +// Errors returned by readHeader. +var errHeaderMagic = errors.New("xz: invalid header magic bytes") + +// ValidHeader checks whether data is a correct xz file header. The +// length of data must be HeaderLen. +func ValidHeader(data []byte) bool { + var h header + err := h.UnmarshalBinary(data) + return err == nil +} + +// String returns a string representation of the flags. +func (h header) String() string { + return flagString(h.flags) +} + +// UnmarshalBinary reads header from the provided data slice. +func (h *header) UnmarshalBinary(data []byte) error { + // header length + if len(data) != HeaderLen { + return errors.New("xz: wrong file header length") + } + + // magic header + if !bytes.Equal(headerMagic, data[:6]) { + return errHeaderMagic + } + + // checksum + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + if uint32LE(data[8:]) != crc.Sum32() { + return errors.New("xz: invalid checksum for file header") + } + + // stream flags + if data[6] != 0 { + return errInvalidFlags + } + flags := data[7] + if err := verifyFlags(flags); err != nil { + return err + } + + h.flags = flags + return nil +} + +// MarshalBinary generates the xz file header. +func (h *header) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(h.flags); err != nil { + return nil, err + } + + data = make([]byte, 12) + copy(data, headerMagic) + data[7] = h.flags + + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + putUint32LE(data[8:], crc.Sum32()) + + return data, nil +} + +/*** Footer ***/ + +// footerLen defines the length of the footer. +const footerLen = 12 + +// footerMagic contains the footer magic bytes. +var footerMagic = []byte{'Y', 'Z'} + +// footer represents the content of the xz file footer. +type footer struct { + indexSize int64 + flags byte +} + +// String prints a string representation of the footer structure. +func (f footer) String() string { + return fmt.Sprintf("%s index size %d", flagString(f.flags), f.indexSize) +} + +// Minimum and maximum for the size of the index (backward size). +const ( + minIndexSize = 4 + maxIndexSize = (1 << 32) * 4 +) + +// MarshalBinary converts footer values into an xz file footer. Note +// that the footer value is checked for correctness. +func (f *footer) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(f.flags); err != nil { + return nil, err + } + if !(minIndexSize <= f.indexSize && f.indexSize <= maxIndexSize) { + return nil, errors.New("xz: index size out of range") + } + if f.indexSize%4 != 0 { + return nil, errors.New( + "xz: index size not aligned to four bytes") + } + + data = make([]byte, footerLen) + + // backward size (index size) + s := (f.indexSize / 4) - 1 + putUint32LE(data[4:], uint32(s)) + // flags + data[9] = f.flags + // footer magic + copy(data[10:], footerMagic) + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + putUint32LE(data, crc.Sum32()) + + return data, nil +} + +// UnmarshalBinary sets the footer value by unmarshalling an xz file +// footer. +func (f *footer) UnmarshalBinary(data []byte) error { + if len(data) != footerLen { + return errors.New("xz: wrong footer length") + } + + // magic bytes + if !bytes.Equal(data[10:], footerMagic) { + return errors.New("xz: footer magic invalid") + } + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + if uint32LE(data) != crc.Sum32() { + return errors.New("xz: footer checksum error") + } + + var g footer + // backward size (index size) + g.indexSize = (int64(uint32LE(data[4:])) + 1) * 4 + + // flags + if data[8] != 0 { + return errInvalidFlags + } + g.flags = data[9] + if err := verifyFlags(g.flags); err != nil { + return err + } + + *f = g + return nil +} + +/*** Block Header ***/ + +// blockHeader represents the content of an xz block header. +type blockHeader struct { + compressedSize int64 + uncompressedSize int64 + filters []filter +} + +// String converts the block header into a string. +func (h blockHeader) String() string { + var buf bytes.Buffer + first := true + if h.compressedSize >= 0 { + fmt.Fprintf(&buf, "compressed size %d", h.compressedSize) + first = false + } + if h.uncompressedSize >= 0 { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "uncompressed size %d", h.uncompressedSize) + first = false + } + for _, f := range h.filters { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "filter %s", f) + first = false + } + return buf.String() +} + +// Masks for the block flags. +const ( + filterCountMask = 0x03 + compressedSizePresent = 0x40 + uncompressedSizePresent = 0x80 + reservedBlockFlags = 0x3C +) + +// errIndexIndicator signals that an index indicator (0x00) has been found +// instead of an expected block header indicator. +var errIndexIndicator = errors.New("xz: found index indicator") + +// readBlockHeader reads the block header. +func readBlockHeader(r io.Reader) (h *blockHeader, n int, err error) { + var buf bytes.Buffer + buf.Grow(20) + + // block header size + z, err := io.CopyN(&buf, r, 1) + n = int(z) + if err != nil { + return nil, n, err + } + s := buf.Bytes()[0] + if s == 0 { + return nil, n, errIndexIndicator + } + + // read complete header + headerLen := (int(s) + 1) * 4 + buf.Grow(headerLen - 1) + z, err = io.CopyN(&buf, r, int64(headerLen-1)) + n += int(z) + if err != nil { + return nil, n, err + } + + // unmarshal block header + h = new(blockHeader) + if err = h.UnmarshalBinary(buf.Bytes()); err != nil { + return nil, n, err + } + + return h, n, nil +} + +// readSizeInBlockHeader reads the uncompressed or compressed size +// fields in the block header. The present value informs the function +// whether the respective field is actually present in the header. +func readSizeInBlockHeader(r io.ByteReader, present bool) (n int64, err error) { + if !present { + return -1, nil + } + x, _, err := readUvarint(r) + if err != nil { + return 0, err + } + if x >= 1<<63 { + return 0, errors.New("xz: size overflow in block header") + } + return int64(x), nil +} + +// UnmarshalBinary unmarshals the block header. +func (h *blockHeader) UnmarshalBinary(data []byte) error { + // Check header length + s := data[0] + if data[0] == 0 { + return errIndexIndicator + } + headerLen := (int(s) + 1) * 4 + if len(data) != headerLen { + return fmt.Errorf("xz: data length %d; want %d", len(data), + headerLen) + } + n := headerLen - 4 + + // Check CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[:n]) + if crc.Sum32() != uint32LE(data[n:]) { + return errors.New("xz: checksum error for block header") + } + + // Block header flags + flags := data[1] + if flags&reservedBlockFlags != 0 { + return errors.New("xz: reserved block header flags set") + } + + r := bytes.NewReader(data[2:n]) + + // Compressed size + var err error + h.compressedSize, err = readSizeInBlockHeader( + r, flags&compressedSizePresent != 0) + if err != nil { + return err + } + + // Uncompressed size + h.uncompressedSize, err = readSizeInBlockHeader( + r, flags&uncompressedSizePresent != 0) + if err != nil { + return err + } + + h.filters, err = readFilters(r, int(flags&filterCountMask)+1) + if err != nil { + return err + } + + // Check padding + // Since headerLen is a multiple of 4 we don't need to check + // alignment. + k := r.Len() + // The standard spec says that the padding should have not more + // than 3 bytes. However we found paddings of 4 or 5 in the + // wild. See https://github.com/ulikunitz/xz/pull/11 and + // https://github.com/ulikunitz/xz/issues/15 + // + // The only reasonable approach seems to be to ignore the + // padding size. We still check that all padding bytes are zero. + if !allZeros(data[n-k : n]) { + return errPadding + } + return nil +} + +// MarshalBinary marshals the binary header. +func (h *blockHeader) MarshalBinary() (data []byte, err error) { + if !(minFilters <= len(h.filters) && len(h.filters) <= maxFilters) { + return nil, errors.New("xz: filter count wrong") + } + for i, f := range h.filters { + if i < len(h.filters)-1 { + if f.id() == lzmaFilterID { + return nil, errors.New( + "xz: LZMA2 filter is not the last") + } + } else { + // last filter + if f.id() != lzmaFilterID { + return nil, errors.New("xz: " + + "last filter must be the LZMA2 filter") + } + } + } + + var buf bytes.Buffer + // header size must set at the end + buf.WriteByte(0) + + // flags + flags := byte(len(h.filters) - 1) + if h.compressedSize >= 0 { + flags |= compressedSizePresent + } + if h.uncompressedSize >= 0 { + flags |= uncompressedSizePresent + } + buf.WriteByte(flags) + + p := make([]byte, 10) + if h.compressedSize >= 0 { + k := putUvarint(p, uint64(h.compressedSize)) + buf.Write(p[:k]) + } + if h.uncompressedSize >= 0 { + k := putUvarint(p, uint64(h.uncompressedSize)) + buf.Write(p[:k]) + } + + for _, f := range h.filters { + fp, err := f.MarshalBinary() + if err != nil { + return nil, err + } + buf.Write(fp) + } + + // padding + for i := padLen(int64(buf.Len())); i > 0; i-- { + buf.WriteByte(0) + } + + // crc place holder + buf.Write(p[:4]) + + data = buf.Bytes() + if len(data)%4 != 0 { + panic("data length not aligned") + } + s := len(data)/4 - 1 + if !(1 < s && s <= 255) { + panic("wrong block header size") + } + data[0] = byte(s) + + crc := crc32.NewIEEE() + crc.Write(data[:len(data)-4]) + putUint32LE(data[len(data)-4:], crc.Sum32()) + + return data, nil +} + +// Constants used for marshalling and unmarshalling filters in the xz +// block header. +const ( + minFilters = 1 + maxFilters = 4 + minReservedID = 1 << 62 +) + +// filter represents a filter in the block header. +type filter interface { + id() uint64 + UnmarshalBinary(data []byte) error + MarshalBinary() (data []byte, err error) + reader(r io.Reader, c *ReaderConfig) (fr io.Reader, err error) + writeCloser(w io.WriteCloser, c *WriterConfig) (fw io.WriteCloser, err error) + // filter must be last filter + last() bool +} + +// readFilter reads a block filter from the block header. At this point +// in time only the LZMA2 filter is supported. +func readFilter(r io.Reader) (f filter, err error) { + br := lzma.ByteReader(r) + + // index + id, _, err := readUvarint(br) + if err != nil { + return nil, err + } + + var data []byte + switch id { + case lzmaFilterID: + data = make([]byte, lzmaFilterLen) + data[0] = lzmaFilterID + if _, err = io.ReadFull(r, data[1:]); err != nil { + return nil, err + } + f = new(lzmaFilter) + default: + if id >= minReservedID { + return nil, errors.New( + "xz: reserved filter id in block stream header") + } + return nil, errors.New("xz: invalid filter id") + } + if err = f.UnmarshalBinary(data); err != nil { + return nil, err + } + return f, err +} + +// readFilters reads count filters. At this point in time only the count +// 1 is supported. +func readFilters(r io.Reader, count int) (filters []filter, err error) { + if count != 1 { + return nil, errors.New("xz: unsupported filter count") + } + f, err := readFilter(r) + if err != nil { + return nil, err + } + return []filter{f}, err +} + +// writeFilters writes the filters. +func writeFilters(w io.Writer, filters []filter) (n int, err error) { + for _, f := range filters { + p, err := f.MarshalBinary() + if err != nil { + return n, err + } + k, err := w.Write(p) + n += k + if err != nil { + return n, err + } + } + return n, nil +} + +/*** Index ***/ + +// record describes a block in the xz file index. +type record struct { + unpaddedSize int64 + uncompressedSize int64 +} + +// readRecord reads an index record. +func readRecord(r io.ByteReader) (rec record, n int, err error) { + u, k, err := readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.unpaddedSize = int64(u) + if rec.unpaddedSize < 0 { + return rec, n, errors.New("xz: unpadded size negative") + } + + u, k, err = readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.uncompressedSize = int64(u) + if rec.uncompressedSize < 0 { + return rec, n, errors.New("xz: uncompressed size negative") + } + + return rec, n, nil +} + +// MarshalBinary converts an index record in its binary encoding. +func (rec *record) MarshalBinary() (data []byte, err error) { + // maximum length of a uvarint is 10 + p := make([]byte, 20) + n := putUvarint(p, uint64(rec.unpaddedSize)) + n += putUvarint(p[n:], uint64(rec.uncompressedSize)) + return p[:n], nil +} + +// writeIndex writes the index, a sequence of records. +func writeIndex(w io.Writer, index []record) (n int64, err error) { + crc := crc32.NewIEEE() + mw := io.MultiWriter(w, crc) + + // index indicator + k, err := mw.Write([]byte{0}) + n += int64(k) + if err != nil { + return n, err + } + + // number of records + p := make([]byte, 10) + k = putUvarint(p, uint64(len(index))) + k, err = mw.Write(p[:k]) + n += int64(k) + if err != nil { + return n, err + } + + // list of records + for _, rec := range index { + p, err := rec.MarshalBinary() + if err != nil { + return n, err + } + k, err = mw.Write(p) + n += int64(k) + if err != nil { + return n, err + } + } + + // index padding + k, err = mw.Write(make([]byte, padLen(int64(n)))) + n += int64(k) + if err != nil { + return n, err + } + + // crc32 checksum + putUint32LE(p, crc.Sum32()) + k, err = w.Write(p[:4]) + n += int64(k) + + return n, err +} + +// readIndexBody reads the index from the reader. It assumes that the +// index indicator has already been read. +func readIndexBody(r io.Reader) (records []record, n int64, err error) { + crc := crc32.NewIEEE() + // index indicator + crc.Write([]byte{0}) + + br := lzma.ByteReader(io.TeeReader(r, crc)) + + // number of records + u, k, err := readUvarint(br) + n += int64(k) + if err != nil { + return nil, n, err + } + recLen := int(u) + if recLen < 0 || uint64(recLen) != u { + return nil, n, errors.New("xz: record number overflow") + } + + // list of records + records = make([]record, recLen) + for i := range records { + records[i], k, err = readRecord(br) + n += int64(k) + if err != nil { + return nil, n, err + } + } + + p := make([]byte, padLen(int64(n+1)), 4) + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return nil, n, err + } + if !allZeros(p) { + return nil, n, errors.New("xz: non-zero byte in index padding") + } + + // crc32 + s := crc.Sum32() + p = p[:4] + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return records, n, err + } + if uint32LE(p) != s { + return nil, n, errors.New("xz: wrong checksum for index") + } + + return records, n, nil +} diff --git a/vendor/github.com/ulikunitz/xz/fox.xz b/vendor/github.com/ulikunitz/xz/fox.xz new file mode 100644 index 00000000000..4b820bd5a16 Binary files /dev/null and b/vendor/github.com/ulikunitz/xz/fox.xz differ diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go new file mode 100644 index 00000000000..a32887872e1 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go @@ -0,0 +1,181 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// CyclicPoly provides a cyclic polynomial rolling hash. +type CyclicPoly struct { + h uint64 + p []uint64 + i int +} + +// ror rotates the unsigned 64-bit integer to right. The argument s must be +// less than 64. +func ror(x uint64, s uint) uint64 { + return (x >> s) | (x << (64 - s)) +} + +// NewCyclicPoly creates a new instance of the CyclicPoly structure. The +// argument n gives the number of bytes for which a hash will be executed. +// This number must be positive; the method panics if this isn't the case. +func NewCyclicPoly(n int) *CyclicPoly { + if n < 1 { + panic("argument n must be positive") + } + return &CyclicPoly{p: make([]uint64, 0, n)} +} + +// Len returns the length of the byte sequence for which a hash is generated. +func (r *CyclicPoly) Len() int { + return cap(r.p) +} + +// RollByte hashes the next byte and returns a hash value. The complete becomes +// available after at least Len() bytes have been hashed. +func (r *CyclicPoly) RollByte(x byte) uint64 { + y := hash[x] + if len(r.p) < cap(r.p) { + r.h = ror(r.h, 1) ^ y + r.p = append(r.p, y) + } else { + r.h ^= ror(r.p[r.i], uint(cap(r.p)-1)) + r.h = ror(r.h, 1) ^ y + r.p[r.i] = y + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} + +// Stores the hash for the individual bytes. +var hash = [256]uint64{ + 0x2e4fc3f904065142, 0xc790984cfbc99527, + 0x879f95eb8c62f187, 0x3b61be86b5021ef2, + 0x65a896a04196f0a5, 0xc5b307b80470b59e, + 0xd3bff376a70df14b, 0xc332f04f0b3f1701, + 0x753b5f0e9abf3e0d, 0xb41538fdfe66ef53, + 0x1906a10c2c1c0208, 0xfb0c712a03421c0d, + 0x38be311a65c9552b, 0xfee7ee4ca6445c7e, + 0x71aadeded184f21e, 0xd73426fccda23b2d, + 0x29773fb5fb9600b5, 0xce410261cd32981a, + 0xfe2848b3c62dbc2d, 0x459eaaff6e43e11c, + 0xc13e35fc9c73a887, 0xf30ed5c201e76dbc, + 0xa5f10b3910482cea, 0x2945d59be02dfaad, + 0x06ee334ff70571b5, 0xbabf9d8070f44380, + 0xee3e2e9912ffd27c, 0x2a7118d1ea6b8ea7, + 0x26183cb9f7b1664c, 0xea71dac7da068f21, + 0xea92eca5bd1d0bb7, 0x415595862defcd75, + 0x248a386023c60648, 0x9cf021ab284b3c8a, + 0xfc9372df02870f6c, 0x2b92d693eeb3b3fc, + 0x73e799d139dc6975, 0x7b15ae312486363c, + 0xb70e5454a2239c80, 0x208e3fb31d3b2263, + 0x01f563cabb930f44, 0x2ac4533d2a3240d8, + 0x84231ed1064f6f7c, 0xa9f020977c2a6d19, + 0x213c227271c20122, 0x09fe8a9a0a03d07a, + 0x4236dc75bcaf910c, 0x460a8b2bead8f17e, + 0xd9b27be1aa07055f, 0xd202d5dc4b11c33e, + 0x70adb010543bea12, 0xcdae938f7ea6f579, + 0x3f3d870208672f4d, 0x8e6ccbce9d349536, + 0xe4c0871a389095ae, 0xf5f2a49152bca080, + 0x9a43f9b97269934e, 0xc17b3753cb6f475c, + 0xd56d941e8e206bd4, 0xac0a4f3e525eda00, + 0xa06d5a011912a550, 0x5537ed19537ad1df, + 0xa32fe713d611449d, 0x2a1d05b47c3b579f, + 0x991d02dbd30a2a52, 0x39e91e7e28f93eb0, + 0x40d06adb3e92c9ac, 0x9b9d3afde1c77c97, + 0x9a3f3f41c02c616f, 0x22ecd4ba00f60c44, + 0x0b63d5d801708420, 0x8f227ca8f37ffaec, + 0x0256278670887c24, 0x107e14877dbf540b, + 0x32c19f2786ac1c05, 0x1df5b12bb4bc9c61, + 0xc0cac129d0d4c4e2, 0x9fdb52ee9800b001, + 0x31f601d5d31c48c4, 0x72ff3c0928bcaec7, + 0xd99264421147eb03, 0x535a2d6d38aefcfe, + 0x6ba8b4454a916237, 0xfa39366eaae4719c, + 0x10f00fd7bbb24b6f, 0x5bd23185c76c84d4, + 0xb22c3d7e1b00d33f, 0x3efc20aa6bc830a8, + 0xd61c2503fe639144, 0x30ce625441eb92d3, + 0xe5d34cf359e93100, 0xa8e5aa13f2b9f7a5, + 0x5c2b8d851ca254a6, 0x68fb6c5e8b0d5fdf, + 0xc7ea4872c96b83ae, 0x6dd5d376f4392382, + 0x1be88681aaa9792f, 0xfef465ee1b6c10d9, + 0x1f98b65ed43fcb2e, 0x4d1ca11eb6e9a9c9, + 0x7808e902b3857d0b, 0x171c9c4ea4607972, + 0x58d66274850146df, 0x42b311c10d3981d1, + 0x647fa8c621c41a4c, 0xf472771c66ddfedc, + 0x338d27e3f847b46b, 0x6402ce3da97545ce, + 0x5162db616fc38638, 0x9c83be97bc22a50e, + 0x2d3d7478a78d5e72, 0xe621a9b938fd5397, + 0x9454614eb0f81c45, 0x395fb6e742ed39b6, + 0x77dd9179d06037bf, 0xc478d0fee4d2656d, + 0x35d9d6cb772007af, 0x83a56e92c883f0f6, + 0x27937453250c00a1, 0x27bd6ebc3a46a97d, + 0x9f543bf784342d51, 0xd158f38c48b0ed52, + 0x8dd8537c045f66b4, 0x846a57230226f6d5, + 0x6b13939e0c4e7cdf, 0xfca25425d8176758, + 0x92e5fc6cd52788e6, 0x9992e13d7a739170, + 0x518246f7a199e8ea, 0xf104c2a71b9979c7, + 0x86b3ffaabea4768f, 0x6388061cf3e351ad, + 0x09d9b5295de5bbb5, 0x38bf1638c2599e92, + 0x1d759846499e148d, 0x4c0ff015e5f96ef4, + 0xa41a94cfa270f565, 0x42d76f9cb2326c0b, + 0x0cf385dd3c9c23ba, 0x0508a6c7508d6e7a, + 0x337523aabbe6cf8d, 0x646bb14001d42b12, + 0xc178729d138adc74, 0xf900ef4491f24086, + 0xee1a90d334bb5ac4, 0x9755c92247301a50, + 0xb999bf7c4ff1b610, 0x6aeeb2f3b21e8fc9, + 0x0fa8084cf91ac6ff, 0x10d226cf136e6189, + 0xd302057a07d4fb21, 0x5f03800e20a0fcc3, + 0x80118d4ae46bd210, 0x58ab61a522843733, + 0x51edd575c5432a4b, 0x94ee6ff67f9197f7, + 0x765669e0e5e8157b, 0xa5347830737132f0, + 0x3ba485a69f01510c, 0x0b247d7b957a01c3, + 0x1b3d63449fd807dc, 0x0fdc4721c30ad743, + 0x8b535ed3829b2b14, 0xee41d0cad65d232c, + 0xe6a99ed97a6a982f, 0x65ac6194c202003d, + 0x692accf3a70573eb, 0xcc3c02c3e200d5af, + 0x0d419e8b325914a3, 0x320f160f42c25e40, + 0x00710d647a51fe7a, 0x3c947692330aed60, + 0x9288aa280d355a7a, 0xa1806a9b791d1696, + 0x5d60e38496763da1, 0x6c69e22e613fd0f4, + 0x977fc2a5aadffb17, 0xfb7bd063fc5a94ba, + 0x460c17992cbaece1, 0xf7822c5444d3297f, + 0x344a9790c69b74aa, 0xb80a42e6cae09dce, + 0x1b1361eaf2b1e757, 0xd84c1e758e236f01, + 0x88e0b7be347627cc, 0x45246009b7a99490, + 0x8011c6dd3fe50472, 0xc341d682bffb99d7, + 0x2511be93808e2d15, 0xd5bc13d7fd739840, + 0x2a3cd030679ae1ec, 0x8ad9898a4b9ee157, + 0x3245fef0a8eaf521, 0x3d6d8dbbb427d2b0, + 0x1ed146d8968b3981, 0x0c6a28bf7d45f3fc, + 0x4a1fd3dbcee3c561, 0x4210ff6a476bf67e, + 0xa559cce0d9199aac, 0xde39d47ef3723380, + 0xe5b69d848ce42e35, 0xefa24296f8e79f52, + 0x70190b59db9a5afc, 0x26f166cdb211e7bf, + 0x4deaf2df3c6b8ef5, 0xf171dbdd670f1017, + 0xb9059b05e9420d90, 0x2f0da855c9388754, + 0x611d5e9ab77949cc, 0x2912038ac01163f4, + 0x0231df50402b2fba, 0x45660fc4f3245f58, + 0xb91cc97c7c8dac50, 0xb72d2aafe4953427, + 0xfa6463f87e813d6b, 0x4515f7ee95d5c6a2, + 0x1310e1c1a48d21c3, 0xad48a7810cdd8544, + 0x4d5bdfefd5c9e631, 0xa43ed43f1fdcb7de, + 0xe70cfc8fe1ee9626, 0xef4711b0d8dda442, + 0xb80dd9bd4dab6c93, 0xa23be08d31ba4d93, + 0x9b37db9d0335a39c, 0x494b6f870f5cfebc, + 0x6d1b3c1149dda943, 0x372c943a518c1093, + 0xad27af45e77c09c4, 0x3b6f92b646044604, + 0xac2917909f5fcf4f, 0x2069a60e977e5557, + 0x353a469e71014de5, 0x24be356281f55c15, + 0x2b6d710ba8e9adea, 0x404ad1751c749c29, + 0xed7311bf23d7f185, 0xba4f6976b4acc43e, + 0x32d7198d2bc39000, 0xee667019014d6e01, + 0x494ef3e128d14c83, 0x1f95a152baecd6be, + 0x201648dff1f483a5, 0x68c28550c8384af6, + 0x5fc834a6824a7f48, 0x7cd06cb7365eaf28, + 0xd82bbd95e9b30909, 0x234f0d1694c53f6d, + 0xd2fb7f4a96d83f4a, 0xff0d5da83acac05e, + 0xf8f6b97f5585080a, 0x74236084be57b95b, + 0xa25e40c03bbc36ad, 0x6b6e5c14ce88465b, + 0x4378ffe93e1528c5, 0x94ca92a17118e2d2, +} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/doc.go b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go new file mode 100644 index 00000000000..f99ec220680 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go @@ -0,0 +1,14 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package hash provides rolling hashes. + +Rolling hashes have to be used for maintaining the positions of n-byte +sequences in the dictionary buffer. + +The package provides currently the Rabin-Karp rolling hash and a Cyclic +Polynomial hash. Both support the Hashes method to be used with an interface. +*/ +package hash diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go new file mode 100644 index 00000000000..58635b113a9 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go @@ -0,0 +1,66 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// A is the default constant for Robin-Karp rolling hash. This is a random +// prime. +const A = 0x97b548add41d5da1 + +// RabinKarp supports the computation of a rolling hash. +type RabinKarp struct { + A uint64 + // a^n + aOldest uint64 + h uint64 + p []byte + i int +} + +// NewRabinKarp creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The default constant will will be +// used. +func NewRabinKarp(n int) *RabinKarp { + return NewRabinKarpConst(n, A) +} + +// NewRabinKarpConst creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The argument a provides the +// constant used to compute the hash. +func NewRabinKarpConst(n int, a uint64) *RabinKarp { + if n <= 0 { + panic("number of bytes n must be positive") + } + aOldest := uint64(1) + // There are faster methods. For the small n required by the LZMA + // compressor O(n) is sufficient. + for i := 0; i < n; i++ { + aOldest *= a + } + return &RabinKarp{ + A: a, aOldest: aOldest, + p: make([]byte, 0, n), + } +} + +// Len returns the length of the byte sequence. +func (r *RabinKarp) Len() int { + return cap(r.p) +} + +// RollByte computes the hash after x has been added. +func (r *RabinKarp) RollByte(x byte) uint64 { + if len(r.p) < cap(r.p) { + r.h += uint64(x) + r.h *= r.A + r.p = append(r.p, x) + } else { + r.h -= uint64(r.p[r.i]) * r.aOldest + r.h += uint64(x) + r.h *= r.A + r.p[r.i] = x + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/roller.go b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go new file mode 100644 index 00000000000..ab6a19ca4cc --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go @@ -0,0 +1,29 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// Roller provides an interface for rolling hashes. The hash value will become +// valid after hash has been called Len times. +type Roller interface { + Len() int + RollByte(x byte) uint64 +} + +// Hashes computes all hash values for the array p. Note that the state of the +// roller is changed. +func Hashes(r Roller, p []byte) []uint64 { + n := r.Len() + if len(p) < n { + return nil + } + h := make([]uint64, len(p)-n+1) + for i := 0; i < n-1; i++ { + r.RollByte(p[i]) + } + for i := range h { + h[i] = r.RollByte(p[i+n-1]) + } + return h +} diff --git a/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go new file mode 100644 index 00000000000..0ba45e8ff33 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go @@ -0,0 +1,457 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xlog provides a simple logging package that allows to disable +// certain message categories. It defines a type, Logger, with multiple +// methods for formatting output. The package has also a predefined +// 'standard' Logger accessible through helper function Print[f|ln], +// Fatal[f|ln], Panic[f|ln], Warn[f|ln], Print[f|ln] and Debug[f|ln] +// that are easier to use then creating a Logger manually. That logger +// writes to standard error and prints the date and time of each logged +// message, which can be configured using the function SetFlags. +// +// The Fatal functions call os.Exit(1) after the message is output +// unless not suppressed by the flags. The Panic functions call panic +// after the writing the log message unless suppressed. +package xlog + +import ( + "fmt" + "io" + "os" + "runtime" + "sync" + "time" +) + +// The flags define what information is prefixed to each log entry +// generated by the Logger. The Lno* versions allow the suppression of +// specific output. The bits are or'ed together to control what will be +// printed. There is no control over the order of the items printed and +// the format. The full format is: +// +// 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message +// +const ( + Ldate = 1 << iota // the date: 2009-01-23 + Ltime // the time: 01:23:23 + Lmicroseconds // microsecond resolution: 01:23:23.123123 + Llongfile // full file name and line number: /a/b/c/d.go:23 + Lshortfile // final file name element and line number: d.go:23 + Lnopanic // suppresses output from Panic[f|ln] but not the panic call + Lnofatal // suppresses output from Fatal[f|ln] but not the exit + Lnowarn // suppresses output from Warn[f|ln] + Lnoprint // suppresses output from Print[f|ln] + Lnodebug // suppresses output from Debug[f|ln] + // initial values for the standard logger + Lstdflags = Ldate | Ltime | Lnodebug +) + +// A Logger represents an active logging object that generates lines of +// output to an io.Writer. Each logging operation if not suppressed +// makes a single call to the Writer's Write method. A Logger can be +// used simultaneously from multiple goroutines; it guarantees to +// serialize access to the Writer. +type Logger struct { + mu sync.Mutex // ensures atomic writes; and protects the following + // fields + prefix string // prefix to write at beginning of each line + flag int // properties + out io.Writer // destination for output + buf []byte // for accumulating text to write +} + +// New creates a new Logger. The out argument sets the destination to +// which the log output will be written. The prefix appears at the +// beginning of each log line. The flag argument defines the logging +// properties. +func New(out io.Writer, prefix string, flag int) *Logger { + return &Logger{out: out, prefix: prefix, flag: flag} +} + +// std is the standard logger used by the package scope functions. +var std = New(os.Stderr, "", Lstdflags) + +// itoa converts the integer to ASCII. A negative widths will avoid +// zero-padding. The function supports only non-negative integers. +func itoa(buf *[]byte, i int, wid int) { + var u = uint(i) + if u == 0 && wid <= 1 { + *buf = append(*buf, '0') + return + } + var b [32]byte + bp := len(b) + for ; u > 0 || wid > 0; u /= 10 { + bp-- + wid-- + b[bp] = byte(u%10) + '0' + } + *buf = append(*buf, b[bp:]...) +} + +// formatHeader puts the header into the buf field of the buffer. +func (l *Logger) formatHeader(t time.Time, file string, line int) { + l.buf = append(l.buf, l.prefix...) + if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 { + if l.flag&Ldate != 0 { + year, month, day := t.Date() + itoa(&l.buf, year, 4) + l.buf = append(l.buf, '-') + itoa(&l.buf, int(month), 2) + l.buf = append(l.buf, '-') + itoa(&l.buf, day, 2) + l.buf = append(l.buf, ' ') + } + if l.flag&(Ltime|Lmicroseconds) != 0 { + hour, min, sec := t.Clock() + itoa(&l.buf, hour, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, min, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, sec, 2) + if l.flag&Lmicroseconds != 0 { + l.buf = append(l.buf, '.') + itoa(&l.buf, t.Nanosecond()/1e3, 6) + } + l.buf = append(l.buf, ' ') + } + } + if l.flag&(Lshortfile|Llongfile) != 0 { + if l.flag&Lshortfile != 0 { + short := file + for i := len(file) - 1; i > 0; i-- { + if file[i] == '/' { + short = file[i+1:] + break + } + } + file = short + } + l.buf = append(l.buf, file...) + l.buf = append(l.buf, ':') + itoa(&l.buf, line, -1) + l.buf = append(l.buf, ": "...) + } +} + +func (l *Logger) output(calldepth int, now time.Time, s string) error { + var file string + var line int + if l.flag&(Lshortfile|Llongfile) != 0 { + l.mu.Unlock() + var ok bool + _, file, line, ok = runtime.Caller(calldepth) + if !ok { + file = "???" + line = 0 + } + l.mu.Lock() + } + l.buf = l.buf[:0] + l.formatHeader(now, file, line) + l.buf = append(l.buf, s...) + if len(s) == 0 || s[len(s)-1] != '\n' { + l.buf = append(l.buf, '\n') + } + _, err := l.out.Write(l.buf) + return err +} + +// Output writes the string s with the header controlled by the flags to +// the l.out writer. A newline will be appended if s doesn't end in a +// newline. Calldepth is used to recover the PC, although all current +// calls of Output use the call depth 2. Access to the function is serialized. +func (l *Logger) Output(calldepth, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprint(v...) + return l.output(calldepth+1, now, s) +} + +// Outputf works like output but formats the output like Printf. +func (l *Logger) Outputf(calldepth int, noflag int, format string, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintf(format, v...) + return l.output(calldepth+1, now, s) +} + +// Outputln works like output but formats the output like Println. +func (l *Logger) Outputln(calldepth int, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintln(v...) + return l.output(calldepth+1, now, s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panic(v ...interface{}) { + l.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panic(v ...interface{}) { + std.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicf(format string, v ...interface{}) { + l.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicf(format string, v ...interface{}) { + std.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicln(v ...interface{}) { + l.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicln(v ...interface{}) { + std.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatal(v ...interface{}) { + l.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatal(v ...interface{}) { + std.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalf(format string, v ...interface{}) { + l.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalf(format string, v ...interface{}) { + std.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalln(format string, v ...interface{}) { + l.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalln(format string, v ...interface{}) { + std.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warn(v ...interface{}) { + l.Output(2, Lnowarn, v...) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func Warn(v ...interface{}) { + std.Output(2, Lnowarn, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnf(format string, v ...interface{}) { + l.Outputf(2, Lnowarn, format, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func Warnf(format string, v ...interface{}) { + std.Outputf(2, Lnowarn, format, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnln(v ...interface{}) { + l.Outputln(2, Lnowarn, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func Warnln(v ...interface{}) { + std.Outputln(2, Lnowarn, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Print(v ...interface{}) { + l.Output(2, Lnoprint, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func Print(v ...interface{}) { + std.Output(2, Lnoprint, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Printf(format string, v ...interface{}) { + l.Outputf(2, Lnoprint, format, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func Printf(format string, v ...interface{}) { + std.Outputf(2, Lnoprint, format, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func (l *Logger) Println(v ...interface{}) { + l.Outputln(2, Lnoprint, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func Println(v ...interface{}) { + std.Outputln(2, Lnoprint, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debug(v ...interface{}) { + l.Output(2, Lnodebug, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func Debug(v ...interface{}) { + std.Output(2, Lnodebug, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugf(format string, v ...interface{}) { + l.Outputf(2, Lnodebug, format, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func Debugf(format string, v ...interface{}) { + std.Outputf(2, Lnodebug, format, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugln(v ...interface{}) { + l.Outputln(2, Lnodebug, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func Debugln(v ...interface{}) { + std.Outputln(2, Lnodebug, v...) +} + +// Flags returns the current flags used by the logger. +func (l *Logger) Flags() int { + l.mu.Lock() + defer l.mu.Unlock() + return l.flag +} + +// Flags returns the current flags used by the standard logger. +func Flags() int { + return std.Flags() +} + +// SetFlags sets the flags of the logger. +func (l *Logger) SetFlags(flag int) { + l.mu.Lock() + defer l.mu.Unlock() + l.flag = flag +} + +// SetFlags sets the flags for the standard logger. +func SetFlags(flag int) { + std.SetFlags(flag) +} + +// Prefix returns the prefix used by the logger. +func (l *Logger) Prefix() string { + l.mu.Lock() + defer l.mu.Unlock() + return l.prefix +} + +// Prefix returns the prefix used by the standard logger of the package. +func Prefix() string { + return std.Prefix() +} + +// SetPrefix sets the prefix for the logger. +func (l *Logger) SetPrefix(prefix string) { + l.mu.Lock() + defer l.mu.Unlock() + l.prefix = prefix +} + +// SetPrefix sets the prefix of the standard logger of the package. +func SetPrefix(prefix string) { + std.SetPrefix(prefix) +} + +// SetOutput sets the output of the logger. +func (l *Logger) SetOutput(w io.Writer) { + l.mu.Lock() + defer l.mu.Unlock() + l.out = w +} + +// SetOutput sets the output for the standard logger of the package. +func SetOutput(w io.Writer) { + std.SetOutput(w) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bintree.go b/vendor/github.com/ulikunitz/xz/lzma/bintree.go new file mode 100644 index 00000000000..a781bd1953d --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bintree.go @@ -0,0 +1,523 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "bufio" + "errors" + "fmt" + "io" + "unicode" +) + +// node represents a node in the binary tree. +type node struct { + // x is the search value + x uint32 + // p parent node + p uint32 + // l left child + l uint32 + // r right child + r uint32 +} + +// wordLen is the number of bytes represented by the v field of a node. +const wordLen = 4 + +// binTree supports the identification of the next operation based on a +// binary tree. +// +// Nodes will be identified by their index into the ring buffer. +type binTree struct { + dict *encoderDict + // ring buffer of nodes + node []node + // absolute offset of the entry for the next node. Position 4 + // byte larger. + hoff int64 + // front position in the node ring buffer + front uint32 + // index of the root node + root uint32 + // current x value + x uint32 + // preallocated array + data []byte +} + +// null represents the nonexistent index. We can't use zero because it +// would always exist or we would need to decrease the index for each +// reference. +const null uint32 = 1<<32 - 1 + +// newBinTree initializes the binTree structure. The capacity defines +// the size of the buffer and defines the maximum distance for which +// matches will be found. +func newBinTree(capacity int) (t *binTree, err error) { + if capacity < 1 { + return nil, errors.New( + "newBinTree: capacity must be larger than zero") + } + if int64(capacity) >= int64(null) { + return nil, errors.New( + "newBinTree: capacity must less 2^{32}-1") + } + t = &binTree{ + node: make([]node, capacity), + hoff: -int64(wordLen), + root: null, + data: make([]byte, maxMatchLen), + } + return t, nil +} + +func (t *binTree) SetDict(d *encoderDict) { t.dict = d } + +// WriteByte writes a single byte into the binary tree. +func (t *binTree) WriteByte(c byte) error { + t.x = (t.x << 8) | uint32(c) + t.hoff++ + if t.hoff < 0 { + return nil + } + v := t.front + if int64(v) < t.hoff { + // We are overwriting old nodes stored in the tree. + t.remove(v) + } + t.node[v].x = t.x + t.add(v) + t.front++ + if int64(t.front) >= int64(len(t.node)) { + t.front = 0 + } + return nil +} + +// Writes writes a sequence of bytes into the binTree structure. +func (t *binTree) Write(p []byte) (n int, err error) { + for _, c := range p { + t.WriteByte(c) + } + return len(p), nil +} + +// add puts the node v into the tree. The node must not be part of the +// tree before. +func (t *binTree) add(v uint32) { + vn := &t.node[v] + // Set left and right to null indices. + vn.l, vn.r = null, null + // If the binary tree is empty make v the root. + if t.root == null { + t.root = v + vn.p = null + return + } + x := vn.x + p := t.root + // Search for the right leave link and add the new node. + for { + pn := &t.node[p] + if x <= pn.x { + if pn.l == null { + pn.l = v + vn.p = p + return + } + p = pn.l + } else { + if pn.r == null { + pn.r = v + vn.p = p + return + } + p = pn.r + } + } +} + +// parent returns the parent node index of v and the pointer to v value +// in the parent. +func (t *binTree) parent(v uint32) (p uint32, ptr *uint32) { + if t.root == v { + return null, &t.root + } + p = t.node[v].p + if t.node[p].l == v { + ptr = &t.node[p].l + } else { + ptr = &t.node[p].r + } + return +} + +// Remove node v. +func (t *binTree) remove(v uint32) { + vn := &t.node[v] + p, ptr := t.parent(v) + l, r := vn.l, vn.r + if l == null { + // Move the right child up. + *ptr = r + if r != null { + t.node[r].p = p + } + return + } + if r == null { + // Move the left child up. + *ptr = l + t.node[l].p = p + return + } + + // Search the in-order predecessor u. + un := &t.node[l] + ur := un.r + if ur == null { + // In order predecessor is l. Move it up. + un.r = r + t.node[r].p = l + un.p = p + *ptr = l + return + } + var u uint32 + for { + // Look for the max value in the tree where l is root. + u = ur + ur = t.node[u].r + if ur == null { + break + } + } + // replace u with ul + un = &t.node[u] + ul := un.l + up := un.p + t.node[up].r = ul + if ul != null { + t.node[ul].p = up + } + + // replace v by u + un.l, un.r = l, r + t.node[l].p = u + t.node[r].p = u + *ptr = u + un.p = p +} + +// search looks for the node that have the value x or for the nodes that +// brace it. The node highest in the tree with the value x will be +// returned. All other nodes with the same value live in left subtree of +// the returned node. +func (t *binTree) search(v uint32, x uint32) (a, b uint32) { + a, b = null, null + if v == null { + return + } + for { + vn := &t.node[v] + if x <= vn.x { + if x == vn.x { + return v, v + } + b = v + if vn.l == null { + return + } + v = vn.l + } else { + a = v + if vn.r == null { + return + } + v = vn.r + } + } +} + +// max returns the node with maximum value in the subtree with v as +// root. +func (t *binTree) max(v uint32) uint32 { + if v == null { + return null + } + for { + r := t.node[v].r + if r == null { + return v + } + v = r + } +} + +// min returns the node with the minimum value in the subtree with v as +// root. +func (t *binTree) min(v uint32) uint32 { + if v == null { + return null + } + for { + l := t.node[v].l + if l == null { + return v + } + v = l + } +} + +// pred returns the in-order predecessor of node v. +func (t *binTree) pred(v uint32) uint32 { + if v == null { + return null + } + u := t.max(t.node[v].l) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].r == v { + return p + } + v = p + } +} + +// succ returns the in-order successor of node v. +func (t *binTree) succ(v uint32) uint32 { + if v == null { + return null + } + u := t.min(t.node[v].r) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].l == v { + return p + } + v = p + } +} + +// xval converts the first four bytes of a into an 32-bit unsigned +// integer in big-endian order. +func xval(a []byte) uint32 { + var x uint32 + switch len(a) { + default: + x |= uint32(a[3]) + fallthrough + case 3: + x |= uint32(a[2]) << 8 + fallthrough + case 2: + x |= uint32(a[1]) << 16 + fallthrough + case 1: + x |= uint32(a[0]) << 24 + case 0: + } + return x +} + +// dumpX converts value x into a four-letter string. +func dumpX(x uint32) string { + a := make([]byte, 4) + for i := 0; i < 4; i++ { + c := byte(x >> uint((3-i)*8)) + if unicode.IsGraphic(rune(c)) { + a[i] = c + } else { + a[i] = '.' + } + } + return string(a) +} + +// dumpNode writes a representation of the node v into the io.Writer. +func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) { + if v == null { + return + } + + vn := &t.node[v] + + t.dumpNode(w, vn.r, indent+2) + + for i := 0; i < indent; i++ { + fmt.Fprint(w, " ") + } + if vn.p == null { + fmt.Fprintf(w, "node %d %q parent null\n", v, dumpX(vn.x)) + } else { + fmt.Fprintf(w, "node %d %q parent %d\n", v, dumpX(vn.x), vn.p) + } + + t.dumpNode(w, vn.l, indent+2) +} + +// dump prints a representation of the binary tree into the writer. +func (t *binTree) dump(w io.Writer) error { + bw := bufio.NewWriter(w) + t.dumpNode(bw, t.root, 0) + return bw.Flush() +} + +func (t *binTree) distance(v uint32) int { + dist := int(t.front) - int(v) + if dist <= 0 { + dist += len(t.node) + } + return dist +} + +type matchParams struct { + rep [4]uint32 + // length when match will be accepted + nAccept int + // nodes to check + check int + // finish if length get shorter + stopShorter bool +} + +func (t *binTree) match(m match, distIter func() (int, bool), p matchParams, +) (r match, checked int, accepted bool) { + buf := &t.dict.buf + for { + if checked >= p.check { + return m, checked, true + } + dist, ok := distIter() + if !ok { + return m, checked, false + } + checked++ + if m.n > 0 { + i := buf.rear - dist + m.n - 1 + if i < 0 { + i += len(buf.data) + } else if i >= len(buf.data) { + i -= len(buf.data) + } + if buf.data[i] != t.data[m.n-1] { + if p.stopShorter { + return m, checked, false + } + continue + } + } + n := buf.matchLen(dist, t.data) + switch n { + case 0: + if p.stopShorter { + return m, checked, false + } + continue + case 1: + if uint32(dist-minDistance) != p.rep[0] { + continue + } + } + if n < m.n || (n == m.n && int64(dist) >= m.distance) { + continue + } + m = match{int64(dist), n} + if n >= p.nAccept { + return m, checked, true + } + } +} + +func (t *binTree) NextOp(rep [4]uint32) operation { + // retrieve maxMatchLen data + n, _ := t.dict.buf.Peek(t.data[:maxMatchLen]) + if n == 0 { + panic("no data in buffer") + } + t.data = t.data[:n] + + var ( + m match + x, u, v uint32 + iterPred, iterSucc func() (int, bool) + ) + p := matchParams{ + rep: rep, + nAccept: maxMatchLen, + check: 32, + } + i := 4 + iterSmall := func() (dist int, ok bool) { + i-- + if i <= 0 { + return 0, false + } + return i, true + } + m, checked, accepted := t.match(m, iterSmall, p) + if accepted { + goto end + } + p.check -= checked + x = xval(t.data) + u, v = t.search(t.root, x) + if u == v && len(t.data) == 4 { + iter := func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u, v = t.search(t.node[u].l, x) + if u != v { + u = null + } + return dist, true + } + m, _, _ = t.match(m, iter, p) + goto end + } + p.stopShorter = true + iterSucc = func() (dist int, ok bool) { + if v == null { + return 0, false + } + dist = t.distance(v) + v = t.succ(v) + return dist, true + } + m, checked, accepted = t.match(m, iterSucc, p) + if accepted { + goto end + } + p.check -= checked + iterPred = func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u = t.pred(u) + return dist, true + } + m, _, _ = t.match(m, iterPred, p) +end: + if m.n == 0 { + return lit{t.data[0]} + } + return m +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bitops.go b/vendor/github.com/ulikunitz/xz/lzma/bitops.go new file mode 100644 index 00000000000..e9bab019901 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bitops.go @@ -0,0 +1,45 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +/* Naming conventions follows the CodeReviewComments in the Go Wiki. */ + +// ntz32Const is used by the functions NTZ and NLZ. +const ntz32Const = 0x04d7651f + +// ntz32Table is a helper table for de Bruijn algorithm by Danny Dubé. +// See Henry S. Warren, Jr. "Hacker's Delight" section 5-1 figure 5-26. +var ntz32Table = [32]int8{ + 0, 1, 2, 24, 3, 19, 6, 25, + 22, 4, 20, 10, 16, 7, 12, 26, + 31, 23, 18, 5, 21, 9, 15, 11, + 30, 17, 8, 14, 29, 13, 28, 27, +} + +// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer. +func ntz32(x uint32) int { + if x == 0 { + return 32 + } + x = (x & -x) * ntz32Const + return int(ntz32Table[x>>27]) +} + +// nlz32 computes the number of leading zeros for an unsigned 32-bit integer. +func nlz32(x uint32) int { + // Smear left most bit to the right + x |= x >> 1 + x |= x >> 2 + x |= x >> 4 + x |= x >> 8 + x |= x >> 16 + // Use ntz mechanism to calculate nlz. + x++ + if x == 0 { + return 0 + } + x *= ntz32Const + return 32 - int(ntz32Table[x>>27]) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/breader.go b/vendor/github.com/ulikunitz/xz/lzma/breader.go new file mode 100644 index 00000000000..5350d814fa7 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/breader.go @@ -0,0 +1,39 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// breader provides the ReadByte function for a Reader. It doesn't read +// more data from the reader than absolutely necessary. +type breader struct { + io.Reader + // helper slice to save allocations + p []byte +} + +// ByteReader converts an io.Reader into an io.ByteReader. +func ByteReader(r io.Reader) io.ByteReader { + br, ok := r.(io.ByteReader) + if !ok { + return &breader{r, make([]byte, 1)} + } + return br +} + +// ReadByte read byte function. +func (r *breader) ReadByte() (c byte, err error) { + n, err := r.Reader.Read(r.p) + if n < 1 { + if err == nil { + err = errors.New("breader.ReadByte: no data") + } + return 0, err + } + return r.p[0], nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/buffer.go b/vendor/github.com/ulikunitz/xz/lzma/buffer.go new file mode 100644 index 00000000000..50e0b6d57b5 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/buffer.go @@ -0,0 +1,171 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" +) + +// buffer provides a circular buffer of bytes. If the front index equals +// the rear index the buffer is empty. As a consequence front cannot be +// equal rear for a full buffer. So a full buffer has a length that is +// one byte less the the length of the data slice. +type buffer struct { + data []byte + front int + rear int +} + +// newBuffer creates a buffer with the given size. +func newBuffer(size int) *buffer { + return &buffer{data: make([]byte, size+1)} +} + +// Cap returns the capacity of the buffer. +func (b *buffer) Cap() int { + return len(b.data) - 1 +} + +// Resets the buffer. The front and rear index are set to zero. +func (b *buffer) Reset() { + b.front = 0 + b.rear = 0 +} + +// Buffered returns the number of bytes buffered. +func (b *buffer) Buffered() int { + delta := b.front - b.rear + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// Available returns the number of bytes available for writing. +func (b *buffer) Available() int { + delta := b.rear - 1 - b.front + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// addIndex adds a non-negative integer to the index i and returns the +// resulting index. The function takes care of wrapping the index as +// well as potential overflow situations. +func (b *buffer) addIndex(i int, n int) int { + // subtraction of len(b.data) prevents overflow + i += n - len(b.data) + if i < 0 { + i += len(b.data) + } + return i +} + +// Read reads bytes from the buffer into p and returns the number of +// bytes read. The function never returns an error but might return less +// data than requested. +func (b *buffer) Read(p []byte) (n int, err error) { + n, err = b.Peek(p) + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// Peek reads bytes from the buffer into p without changing the buffer. +// Peek will never return an error but might return less data than +// requested. +func (b *buffer) Peek(p []byte) (n int, err error) { + m := b.Buffered() + n = len(p) + if m < n { + n = m + p = p[:n] + } + k := copy(p, b.data[b.rear:]) + if k < n { + copy(p[k:], b.data) + } + return n, nil +} + +// Discard skips the n next bytes to read from the buffer, returning the +// bytes discarded. +// +// If Discards skips fewer than n bytes, it returns an error. +func (b *buffer) Discard(n int) (discarded int, err error) { + if n < 0 { + return 0, errors.New("buffer.Discard: negative argument") + } + m := b.Buffered() + if m < n { + n = m + err = errors.New( + "buffer.Discard: discarded less bytes then requested") + } + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// ErrNoSpace indicates that there is insufficient space for the Write +// operation. +var ErrNoSpace = errors.New("insufficient space") + +// Write puts data into the buffer. If less bytes are written than +// requested ErrNoSpace is returned. +func (b *buffer) Write(p []byte) (n int, err error) { + m := b.Available() + n = len(p) + if m < n { + n = m + p = p[:m] + err = ErrNoSpace + } + k := copy(b.data[b.front:], p) + if k < n { + copy(b.data, p[k:]) + } + b.front = b.addIndex(b.front, n) + return n, err +} + +// WriteByte writes a single byte into the buffer. The error ErrNoSpace +// is returned if no single byte is available in the buffer for writing. +func (b *buffer) WriteByte(c byte) error { + if b.Available() < 1 { + return ErrNoSpace + } + b.data[b.front] = c + b.front = b.addIndex(b.front, 1) + return nil +} + +// prefixLen returns the length of the common prefix of a and b. +func prefixLen(a, b []byte) int { + if len(a) > len(b) { + a, b = b, a + } + for i, c := range a { + if b[i] != c { + return i + } + } + return len(a) +} + +// matchLen returns the length of the common prefix for the given +// distance from the rear and the byte slice p. +func (b *buffer) matchLen(distance int, p []byte) int { + var n int + i := b.rear - distance + if i < 0 { + if n = prefixLen(p, b.data[len(b.data)+i:]); n < -i { + return n + } + p = p[n:] + i = 0 + } + n += prefixLen(p, b.data[i:]) + return n +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go new file mode 100644 index 00000000000..a3696ba08be --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go @@ -0,0 +1,37 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// ErrLimit indicates that the limit of the LimitedByteWriter has been +// reached. +var ErrLimit = errors.New("limit reached") + +// LimitedByteWriter provides a byte writer that can be written until a +// limit is reached. The field N provides the number of remaining +// bytes. +type LimitedByteWriter struct { + BW io.ByteWriter + N int64 +} + +// WriteByte writes a single byte to the limited byte writer. It returns +// ErrLimit if the limit has been reached. If the byte is successfully +// written the field N of the LimitedByteWriter will be decremented by +// one. +func (l *LimitedByteWriter) WriteByte(c byte) error { + if l.N <= 0 { + return ErrLimit + } + if err := l.BW.WriteByte(c); err != nil { + return err + } + l.N-- + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoder.go b/vendor/github.com/ulikunitz/xz/lzma/decoder.go new file mode 100644 index 00000000000..16e14db3941 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/decoder.go @@ -0,0 +1,277 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// decoder decodes a raw LZMA stream without any header. +type decoder struct { + // dictionary; the rear pointer of the buffer will be used for + // reading the data. + Dict *decoderDict + // decoder state + State *state + // range decoder + rd *rangeDecoder + // start stores the head value of the dictionary for the LZMA + // stream + start int64 + // size of uncompressed data + size int64 + // end-of-stream encountered + eos bool + // EOS marker found + eosMarker bool +} + +// newDecoder creates a new decoder instance. The parameter size provides +// the expected byte size of the decompressed data. If the size is +// unknown use a negative value. In that case the decoder will look for +// a terminating end-of-stream marker. +func newDecoder(br io.ByteReader, state *state, dict *decoderDict, size int64) (d *decoder, err error) { + rd, err := newRangeDecoder(br) + if err != nil { + return nil, err + } + d = &decoder{ + State: state, + Dict: dict, + rd: rd, + size: size, + start: dict.pos(), + } + return d, nil +} + +// Reopen restarts the decoder with a new byte reader and a new size. Reopen +// resets the Decompressed counter to zero. +func (d *decoder) Reopen(br io.ByteReader, size int64) error { + var err error + if d.rd, err = newRangeDecoder(br); err != nil { + return err + } + d.start = d.Dict.pos() + d.size = size + d.eos = false + return nil +} + +// decodeLiteral decodes a single literal from the LZMA stream. +func (d *decoder) decodeLiteral() (op operation, err error) { + litState := d.State.litState(d.Dict.byteAt(1), d.Dict.head) + match := d.Dict.byteAt(int(d.State.rep[0]) + 1) + s, err := d.State.litCodec.Decode(d.rd, d.State.state, match, litState) + if err != nil { + return nil, err + } + return lit{s}, nil +} + +// errEOS indicates that an EOS marker has been found. +var errEOS = errors.New("EOS marker found") + +// readOp decodes the next operation from the compressed stream. It +// returns the operation. If an explicit end of stream marker is +// identified the eos error is returned. +func (d *decoder) readOp() (op operation, err error) { + // Value of the end of stream (EOS) marker + const eosDist = 1<<32 - 1 + + state, state2, posState := d.State.states(d.Dict.head) + + b, err := d.State.isMatch[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // literal + op, err := d.decodeLiteral() + if err != nil { + return nil, err + } + d.State.updateStateLiteral() + return op, nil + } + b, err = d.State.isRep[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // simple match + d.State.rep[3], d.State.rep[2], d.State.rep[1] = + d.State.rep[2], d.State.rep[1], d.State.rep[0] + + d.State.updateStateMatch() + // The length decoder returns the length offset. + n, err := d.State.lenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + // The dist decoder returns the distance offset. The actual + // distance is 1 higher. + d.State.rep[0], err = d.State.distCodec.Decode(d.rd, n) + if err != nil { + return nil, err + } + if d.State.rep[0] == eosDist { + d.eosMarker = true + return nil, errEOS + } + op = match{n: int(n) + minMatchLen, + distance: int64(d.State.rep[0]) + minDistance} + return op, nil + } + b, err = d.State.isRepG0[state].Decode(d.rd) + if err != nil { + return nil, err + } + dist := d.State.rep[0] + if b == 0 { + // rep match 0 + b, err = d.State.isRepG0Long[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + d.State.updateStateShortRep() + op = match{n: 1, distance: int64(dist) + minDistance} + return op, nil + } + } else { + b, err = d.State.isRepG1[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[1] + } else { + b, err = d.State.isRepG2[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[2] + } else { + dist = d.State.rep[3] + d.State.rep[3] = d.State.rep[2] + } + d.State.rep[2] = d.State.rep[1] + } + d.State.rep[1] = d.State.rep[0] + d.State.rep[0] = dist + } + n, err := d.State.repLenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + d.State.updateStateRep() + op = match{n: int(n) + minMatchLen, distance: int64(dist) + minDistance} + return op, nil +} + +// apply takes the operation and transforms the decoder dictionary accordingly. +func (d *decoder) apply(op operation) error { + var err error + switch x := op.(type) { + case match: + err = d.Dict.writeMatch(x.distance, x.n) + case lit: + err = d.Dict.WriteByte(x.b) + default: + panic("op is neither a match nor a literal") + } + return err +} + +// decompress fills the dictionary unless no space for new data is +// available. If the end of the LZMA stream has been reached io.EOF will +// be returned. +func (d *decoder) decompress() error { + if d.eos { + return io.EOF + } + for d.Dict.Available() >= maxMatchLen { + op, err := d.readOp() + switch err { + case nil: + break + case errEOS: + d.eos = true + if !d.rd.possiblyAtEnd() { + return errDataAfterEOS + } + if d.size >= 0 && d.size != d.Decompressed() { + return errSize + } + return io.EOF + case io.EOF: + d.eos = true + return io.ErrUnexpectedEOF + default: + return err + } + if err = d.apply(op); err != nil { + return err + } + if d.size >= 0 && d.Decompressed() >= d.size { + d.eos = true + if d.Decompressed() > d.size { + return errSize + } + if !d.rd.possiblyAtEnd() { + switch _, err = d.readOp(); err { + case nil: + return errSize + case io.EOF: + return io.ErrUnexpectedEOF + case errEOS: + break + default: + return err + } + } + return io.EOF + } + } + return nil +} + +// Errors that may be returned while decoding data. +var ( + errDataAfterEOS = errors.New("lzma: data after end of stream marker") + errSize = errors.New("lzma: wrong uncompressed data size") +) + +// Read reads data from the buffer. If no more data is available io.EOF is +// returned. +func (d *decoder) Read(p []byte) (n int, err error) { + var k int + for { + // Read of decoder dict never returns an error. + k, err = d.Dict.Read(p[n:]) + if err != nil { + panic(fmt.Errorf("dictionary read error %s", err)) + } + if k == 0 && d.eos { + return n, io.EOF + } + n += k + if n >= len(p) { + return n, nil + } + if err = d.decompress(); err != nil && err != io.EOF { + return n, err + } + } +} + +// Decompressed returns the number of bytes decompressed by the decoder. +func (d *decoder) Decompressed() int64 { + return d.Dict.pos() - d.start +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go new file mode 100644 index 00000000000..564a12b834c --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go @@ -0,0 +1,135 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// decoderDict provides the dictionary for the decoder. The whole +// dictionary is used as reader buffer. +type decoderDict struct { + buf buffer + head int64 +} + +// newDecoderDict creates a new decoder dictionary. The whole dictionary +// will be used as reader buffer. +func newDecoderDict(dictCap int) (d *decoderDict, err error) { + // lower limit supports easy test cases + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New("lzma: dictCap out of range") + } + d = &decoderDict{buf: *newBuffer(dictCap)} + return d, nil +} + +// Reset clears the dictionary. The read buffer is not changed, so the +// buffered data can still be read. +func (d *decoderDict) Reset() { + d.head = 0 +} + +// WriteByte writes a single byte into the dictionary. It is used to +// write literals into the dictionary. +func (d *decoderDict) WriteByte(c byte) error { + if err := d.buf.WriteByte(c); err != nil { + return err + } + d.head++ + return nil +} + +// pos returns the position of the dictionary head. +func (d *decoderDict) pos() int64 { return d.head } + +// dictLen returns the actual length of the dictionary. +func (d *decoderDict) dictLen() int { + capacity := d.buf.Cap() + if d.head >= int64(capacity) { + return capacity + } + return int(d.head) +} + +// byteAt returns a byte stored in the dictionary. If the distance is +// non-positive or exceeds the current length of the dictionary the zero +// byte is returned. +func (d *decoderDict) byteAt(dist int) byte { + if !(0 < dist && dist <= d.dictLen()) { + return 0 + } + i := d.buf.front - dist + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// writeMatch writes the match at the top of the dictionary. The given +// distance must point in the current dictionary and the length must not +// exceed the maximum length 273 supported in LZMA. +// +// The error value ErrNoSpace indicates that no space is available in +// the dictionary for writing. You need to read from the dictionary +// first. +func (d *decoderDict) writeMatch(dist int64, length int) error { + if !(0 < dist && dist <= int64(d.dictLen())) { + return errors.New("writeMatch: distance out of range") + } + if !(0 < length && length <= maxMatchLen) { + return errors.New("writeMatch: length out of range") + } + if length > d.buf.Available() { + return ErrNoSpace + } + d.head += int64(length) + + i := d.buf.front - int(dist) + if i < 0 { + i += len(d.buf.data) + } + for length > 0 { + var p []byte + if i >= d.buf.front { + p = d.buf.data[i:] + i = 0 + } else { + p = d.buf.data[i:d.buf.front] + i = d.buf.front + } + if len(p) > length { + p = p[:length] + } + if _, err := d.buf.Write(p); err != nil { + panic(fmt.Errorf("d.buf.Write returned error %s", err)) + } + length -= len(p) + } + return nil +} + +// Write writes the given bytes into the dictionary and advances the +// head. +func (d *decoderDict) Write(p []byte) (n int, err error) { + n, err = d.buf.Write(p) + d.head += int64(n) + return n, err +} + +// Available returns the number of available bytes for writing into the +// decoder dictionary. +func (d *decoderDict) Available() int { return d.buf.Available() } + +// Read reads data from the buffer contained in the decoder dictionary. +func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) } + +// Buffered returns the number of bytes currently buffered in the +// decoder dictionary. +func (d *decoderDict) buffered() int { return d.buf.Buffered() } + +// Peek gets data from the buffer without advancing the rear index. +func (d *decoderDict) peek(p []byte) (n int, err error) { return d.buf.Peek(p) } diff --git a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go new file mode 100644 index 00000000000..e08eb989ff9 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go @@ -0,0 +1,49 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "fmt" + +// directCodec allows the encoding and decoding of values with a fixed number +// of bits. The number of bits must be in the range [1,32]. +type directCodec byte + +// makeDirectCodec creates a directCodec. The function panics if the number of +// bits is not in the range [1,32]. +func makeDirectCodec(bits int) directCodec { + if !(1 <= bits && bits <= 32) { + panic(fmt.Errorf("bits=%d out of range", bits)) + } + return directCodec(bits) +} + +// Bits returns the number of bits supported by this codec. +func (dc directCodec) Bits() int { + return int(dc) +} + +// Encode uses the range encoder to encode a value with the fixed number of +// bits. The most-significant bit is encoded first. +func (dc directCodec) Encode(e *rangeEncoder, v uint32) error { + for i := int(dc) - 1; i >= 0; i-- { + if err := e.DirectEncodeBit(v >> uint(i)); err != nil { + return err + } + } + return nil +} + +// Decode uses the range decoder to decode a value with the given number of +// given bits. The most-significant bit is decoded first. +func (dc directCodec) Decode(d *rangeDecoder) (v uint32, err error) { + for i := int(dc) - 1; i >= 0; i-- { + x, err := d.DirectDecodeBit() + if err != nil { + return 0, err + } + v = (v << 1) | x + } + return v, nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go new file mode 100644 index 00000000000..b053a2dce21 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go @@ -0,0 +1,156 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// Constants used by the distance codec. +const ( + // minimum supported distance + minDistance = 1 + // maximum supported distance, value is used for the eos marker. + maxDistance = 1 << 32 + // number of the supported len states + lenStates = 4 + // start for the position models + startPosModel = 4 + // first index with align bits support + endPosModel = 14 + // bits for the position slots + posSlotBits = 6 + // number of align bits + alignBits = 4 + // maximum position slot + maxPosSlot = 63 +) + +// distCodec provides encoding and decoding of distance values. +type distCodec struct { + posSlotCodecs [lenStates]treeCodec + posModel [endPosModel - startPosModel]treeReverseCodec + alignCodec treeReverseCodec +} + +// deepcopy initializes dc as deep copy of the source. +func (dc *distCodec) deepcopy(src *distCodec) { + if dc == src { + return + } + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i].deepcopy(&src.posSlotCodecs[i]) + } + for i := range dc.posModel { + dc.posModel[i].deepcopy(&src.posModel[i]) + } + dc.alignCodec.deepcopy(&src.alignCodec) +} + +// distBits returns the number of bits required to encode dist. +func distBits(dist uint32) int { + if dist < startPosModel { + return 6 + } + // slot s > 3, dist d + // s = 2(bits(d)-1) + bit(d, bits(d)-2) + // s>>1 = bits(d)-1 + // bits(d) = 32-nlz32(d) + // s>>1=31-nlz32(d) + // n = 5 + (s>>1) = 36 - nlz32(d) + return 36 - nlz32(dist) +} + +// newDistCodec creates a new distance codec. +func (dc *distCodec) init() { + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i] = makeTreeCodec(posSlotBits) + } + for i := range dc.posModel { + posSlot := startPosModel + i + bits := (posSlot >> 1) - 1 + dc.posModel[i] = makeTreeReverseCodec(bits) + } + dc.alignCodec = makeTreeReverseCodec(alignBits) +} + +// lenState converts the value l to a supported lenState value. +func lenState(l uint32) uint32 { + if l >= lenStates { + l = lenStates - 1 + } + return l +} + +// Encode encodes the distance using the parameter l. Dist can have values from +// the full range of uint32 values. To get the distance offset the actual match +// distance has to be decreased by 1. A distance offset of 0xffffffff (eos) +// indicates the end of the stream. +func (dc *distCodec) Encode(e *rangeEncoder, dist uint32, l uint32) (err error) { + // Compute the posSlot using nlz32 + var posSlot uint32 + var bits uint32 + if dist < startPosModel { + posSlot = dist + } else { + bits = uint32(30 - nlz32(dist)) + posSlot = startPosModel - 2 + (bits << 1) + posSlot += (dist >> uint(bits)) & 1 + } + + if err = dc.posSlotCodecs[lenState(l)].Encode(e, posSlot); err != nil { + return + } + + switch { + case posSlot < startPosModel: + return nil + case posSlot < endPosModel: + tc := &dc.posModel[posSlot-startPosModel] + return tc.Encode(dist, e) + } + dic := directCodec(bits - alignBits) + if err = dic.Encode(e, dist>>alignBits); err != nil { + return + } + return dc.alignCodec.Encode(dist, e) +} + +// Decode decodes the distance offset using the parameter l. The dist value +// 0xffffffff (eos) indicates the end of the stream. Add one to the distance +// offset to get the actual match distance. +func (dc *distCodec) Decode(d *rangeDecoder, l uint32) (dist uint32, err error) { + posSlot, err := dc.posSlotCodecs[lenState(l)].Decode(d) + if err != nil { + return + } + + // posSlot equals distance + if posSlot < startPosModel { + return posSlot, nil + } + + // posSlot uses the individual models + bits := (posSlot >> 1) - 1 + dist = (2 | (posSlot & 1)) << bits + var u uint32 + if posSlot < endPosModel { + tc := &dc.posModel[posSlot-startPosModel] + if u, err = tc.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil + } + + // posSlots use direct encoding and a single model for the four align + // bits. + dic := directCodec(bits - alignBits) + if u, err = dic.Decode(d); err != nil { + return 0, err + } + dist += u << alignBits + if u, err = dc.alignCodec.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoder.go b/vendor/github.com/ulikunitz/xz/lzma/encoder.go new file mode 100644 index 00000000000..18ce0099282 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/encoder.go @@ -0,0 +1,268 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "fmt" + "io" +) + +// opLenMargin provides the upper limit of the number of bytes required +// to encode a single operation. +const opLenMargin = 10 + +// compressFlags control the compression process. +type compressFlags uint32 + +// Values for compressFlags. +const ( + // all data should be compressed, even if compression is not + // optimal. + all compressFlags = 1 << iota +) + +// encoderFlags provide the flags for an encoder. +type encoderFlags uint32 + +// Flags for the encoder. +const ( + // eosMarker requests an EOS marker to be written. + eosMarker encoderFlags = 1 << iota +) + +// Encoder compresses data buffered in the encoder dictionary and writes +// it into a byte writer. +type encoder struct { + dict *encoderDict + state *state + re *rangeEncoder + start int64 + // generate eos marker + marker bool + limit bool + margin int +} + +// newEncoder creates a new encoder. If the byte writer must be +// limited use LimitedByteWriter provided by this package. The flags +// argument supports the eosMarker flag, controlling whether a +// terminating end-of-stream marker must be written. +func newEncoder(bw io.ByteWriter, state *state, dict *encoderDict, + flags encoderFlags) (e *encoder, err error) { + + re, err := newRangeEncoder(bw) + if err != nil { + return nil, err + } + e = &encoder{ + dict: dict, + state: state, + re: re, + marker: flags&eosMarker != 0, + start: dict.Pos(), + margin: opLenMargin, + } + if e.marker { + e.margin += 5 + } + return e, nil +} + +// Write writes the bytes from p into the dictionary. If not enough +// space is available the data in the dictionary buffer will be +// compressed to make additional space available. If the limit of the +// underlying writer has been reached ErrLimit will be returned. +func (e *encoder) Write(p []byte) (n int, err error) { + for { + k, err := e.dict.Write(p[n:]) + n += k + if err == ErrNoSpace { + if err = e.compress(0); err != nil { + return n, err + } + continue + } + return n, err + } +} + +// Reopen reopens the encoder with a new byte writer. +func (e *encoder) Reopen(bw io.ByteWriter) error { + var err error + if e.re, err = newRangeEncoder(bw); err != nil { + return err + } + e.start = e.dict.Pos() + e.limit = false + return nil +} + +// writeLiteral writes a literal into the LZMA stream +func (e *encoder) writeLiteral(l lit) error { + var err error + state, state2, _ := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 0); err != nil { + return err + } + litState := e.state.litState(e.dict.ByteAt(1), e.dict.Pos()) + match := e.dict.ByteAt(int(e.state.rep[0]) + 1) + err = e.state.litCodec.Encode(e.re, l.b, state, match, litState) + if err != nil { + return err + } + e.state.updateStateLiteral() + return nil +} + +// iverson implements the Iverson operator as proposed by Donald Knuth in his +// book Concrete Mathematics. +func iverson(ok bool) uint32 { + if ok { + return 1 + } + return 0 +} + +// writeMatch writes a repetition operation into the operation stream +func (e *encoder) writeMatch(m match) error { + var err error + if !(minDistance <= m.distance && m.distance <= maxDistance) { + panic(fmt.Errorf("match distance %d out of range", m.distance)) + } + dist := uint32(m.distance - minDistance) + if !(minMatchLen <= m.n && m.n <= maxMatchLen) && + !(dist == e.state.rep[0] && m.n == 1) { + panic(fmt.Errorf( + "match length %d out of range; dist %d rep[0] %d", + m.n, dist, e.state.rep[0])) + } + state, state2, posState := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 1); err != nil { + return err + } + g := 0 + for ; g < 4; g++ { + if e.state.rep[g] == dist { + break + } + } + b := iverson(g < 4) + if err = e.state.isRep[state].Encode(e.re, b); err != nil { + return err + } + n := uint32(m.n - minMatchLen) + if b == 0 { + // simple match + e.state.rep[3], e.state.rep[2], e.state.rep[1], e.state.rep[0] = + e.state.rep[2], e.state.rep[1], e.state.rep[0], dist + e.state.updateStateMatch() + if err = e.state.lenCodec.Encode(e.re, n, posState); err != nil { + return err + } + return e.state.distCodec.Encode(e.re, dist, n) + } + b = iverson(g != 0) + if err = e.state.isRepG0[state].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + // g == 0 + b = iverson(m.n != 1) + if err = e.state.isRepG0Long[state2].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + e.state.updateStateShortRep() + return nil + } + } else { + // g in {1,2,3} + b = iverson(g != 1) + if err = e.state.isRepG1[state].Encode(e.re, b); err != nil { + return err + } + if b == 1 { + // g in {2,3} + b = iverson(g != 2) + err = e.state.isRepG2[state].Encode(e.re, b) + if err != nil { + return err + } + if b == 1 { + e.state.rep[3] = e.state.rep[2] + } + e.state.rep[2] = e.state.rep[1] + } + e.state.rep[1] = e.state.rep[0] + e.state.rep[0] = dist + } + e.state.updateStateRep() + return e.state.repLenCodec.Encode(e.re, n, posState) +} + +// writeOp writes a single operation to the range encoder. The function +// checks whether there is enough space available to close the LZMA +// stream. +func (e *encoder) writeOp(op operation) error { + if e.re.Available() < int64(e.margin) { + return ErrLimit + } + switch x := op.(type) { + case lit: + return e.writeLiteral(x) + case match: + return e.writeMatch(x) + default: + panic("unexpected operation") + } +} + +// compress compressed data from the dictionary buffer. If the flag all +// is set, all data in the dictionary buffer will be compressed. The +// function returns ErrLimit if the underlying writer has reached its +// limit. +func (e *encoder) compress(flags compressFlags) error { + n := 0 + if flags&all == 0 { + n = maxMatchLen - 1 + } + d := e.dict + m := d.m + for d.Buffered() > n { + op := m.NextOp(e.state.rep) + if err := e.writeOp(op); err != nil { + return err + } + d.Discard(op.Len()) + } + return nil +} + +// eosMatch is a pseudo operation that indicates the end of the stream. +var eosMatch = match{distance: maxDistance, n: minMatchLen} + +// Close terminates the LZMA stream. If requested the end-of-stream +// marker will be written. If the byte writer limit has been or will be +// reached during compression of the remaining data in the buffer the +// LZMA stream will be closed and data will remain in the buffer. +func (e *encoder) Close() error { + err := e.compress(all) + if err != nil && err != ErrLimit { + return err + } + if e.marker { + if err := e.writeMatch(eosMatch); err != nil { + return err + } + } + err = e.re.Close() + return err +} + +// Compressed returns the number bytes of the input data that been +// compressed. +func (e *encoder) Compressed() int64 { + return e.dict.Pos() - e.start +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go new file mode 100644 index 00000000000..9d0fbc70332 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go @@ -0,0 +1,149 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// matcher is an interface that supports the identification of the next +// operation. +type matcher interface { + io.Writer + SetDict(d *encoderDict) + NextOp(rep [4]uint32) operation +} + +// encoderDict provides the dictionary of the encoder. It includes an +// addtional buffer atop of the actual dictionary. +type encoderDict struct { + buf buffer + m matcher + head int64 + capacity int + // preallocated array + data [maxMatchLen]byte +} + +// newEncoderDict creates the encoder dictionary. The argument bufSize +// defines the size of the additional buffer. +func newEncoderDict(dictCap, bufSize int, m matcher) (d *encoderDict, err error) { + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New( + "lzma: dictionary capacity out of range") + } + if bufSize < 1 { + return nil, errors.New( + "lzma: buffer size must be larger than zero") + } + d = &encoderDict{ + buf: *newBuffer(dictCap + bufSize), + capacity: dictCap, + m: m, + } + m.SetDict(d) + return d, nil +} + +// Discard discards n bytes. Note that n must not be larger than +// MaxMatchLen. +func (d *encoderDict) Discard(n int) { + p := d.data[:n] + k, _ := d.buf.Read(p) + if k < n { + panic(fmt.Errorf("lzma: can't discard %d bytes", n)) + } + d.head += int64(n) + d.m.Write(p) +} + +// Len returns the data available in the encoder dictionary. +func (d *encoderDict) Len() int { + n := d.buf.Available() + if int64(n) > d.head { + return int(d.head) + } + return n +} + +// DictLen returns the actual length of data in the dictionary. +func (d *encoderDict) DictLen() int { + if d.head < int64(d.capacity) { + return int(d.head) + } + return d.capacity +} + +// Available returns the number of bytes that can be written by a +// following Write call. +func (d *encoderDict) Available() int { + return d.buf.Available() - d.DictLen() +} + +// Write writes data into the dictionary buffer. Note that the position +// of the dictionary head will not be moved. If there is not enough +// space in the buffer ErrNoSpace will be returned. +func (d *encoderDict) Write(p []byte) (n int, err error) { + m := d.Available() + if len(p) > m { + p = p[:m] + err = ErrNoSpace + } + var e error + if n, e = d.buf.Write(p); e != nil { + err = e + } + return n, err +} + +// Pos returns the position of the head. +func (d *encoderDict) Pos() int64 { return d.head } + +// ByteAt returns the byte at the given distance. +func (d *encoderDict) ByteAt(distance int) byte { + if !(0 < distance && distance <= d.Len()) { + return 0 + } + i := d.buf.rear - distance + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// CopyN copies the last n bytes from the dictionary into the provided +// writer. This is used for copying uncompressed data into an +// uncompressed segment. +func (d *encoderDict) CopyN(w io.Writer, n int) (written int, err error) { + if n <= 0 { + return 0, nil + } + m := d.Len() + if n > m { + n = m + err = ErrNoSpace + } + i := d.buf.rear - n + var e error + if i < 0 { + i += len(d.buf.data) + if written, e = w.Write(d.buf.data[i:]); e != nil { + return written, e + } + i = 0 + } + var k int + k, e = w.Write(d.buf.data[i:d.buf.rear]) + written += k + if e != nil { + err = e + } + return written, err +} + +// Buffered returns the number of bytes in the buffer. +func (d *encoderDict) Buffered() int { return d.buf.Buffered() } diff --git a/vendor/github.com/ulikunitz/xz/lzma/fox.lzma b/vendor/github.com/ulikunitz/xz/lzma/fox.lzma new file mode 100644 index 00000000000..5edad633266 Binary files /dev/null and b/vendor/github.com/ulikunitz/xz/lzma/fox.lzma differ diff --git a/vendor/github.com/ulikunitz/xz/lzma/hashtable.go b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go new file mode 100644 index 00000000000..d786a9745d4 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go @@ -0,0 +1,309 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + + "github.com/ulikunitz/xz/internal/hash" +) + +/* For compression we need to find byte sequences that match the byte + * sequence at the dictionary head. A hash table is a simple method to + * provide this capability. + */ + +// maxMatches limits the number of matches requested from the Matches +// function. This controls the speed of the overall encoding. +const maxMatches = 16 + +// shortDists defines the number of short distances supported by the +// implementation. +const shortDists = 8 + +// The minimum is somehow arbitrary but the maximum is limited by the +// memory requirements of the hash table. +const ( + minTableExponent = 9 + maxTableExponent = 20 +) + +// newRoller contains the function used to create an instance of the +// hash.Roller. +var newRoller = func(n int) hash.Roller { return hash.NewCyclicPoly(n) } + +// hashTable stores the hash table including the rolling hash method. +// +// We implement chained hashing into a circular buffer. Each entry in +// the circular buffer stores the delta distance to the next position with a +// word that has the same hash value. +type hashTable struct { + dict *encoderDict + // actual hash table + t []int64 + // circular list data with the offset to the next word + data []uint32 + front int + // mask for computing the index for the hash table + mask uint64 + // hash offset; initial value is -int64(wordLen) + hoff int64 + // length of the hashed word + wordLen int + // hash roller for computing the hash values for the Write + // method + wr hash.Roller + // hash roller for computing arbitrary hashes + hr hash.Roller + // preallocated slices + p [maxMatches]int64 + distances [maxMatches + shortDists]int +} + +// hashTableExponent derives the hash table exponent from the dictionary +// capacity. +func hashTableExponent(n uint32) int { + e := 30 - nlz32(n) + switch { + case e < minTableExponent: + e = minTableExponent + case e > maxTableExponent: + e = maxTableExponent + } + return e +} + +// newHashTable creates a new hash table for words of length wordLen +func newHashTable(capacity int, wordLen int) (t *hashTable, err error) { + if !(0 < capacity) { + return nil, errors.New( + "newHashTable: capacity must not be negative") + } + exp := hashTableExponent(uint32(capacity)) + if !(1 <= wordLen && wordLen <= 4) { + return nil, errors.New("newHashTable: " + + "argument wordLen out of range") + } + n := 1 << uint(exp) + if n <= 0 { + panic("newHashTable: exponent is too large") + } + t = &hashTable{ + t: make([]int64, n), + data: make([]uint32, capacity), + mask: (uint64(1) << uint(exp)) - 1, + hoff: -int64(wordLen), + wordLen: wordLen, + wr: newRoller(wordLen), + hr: newRoller(wordLen), + } + return t, nil +} + +func (t *hashTable) SetDict(d *encoderDict) { t.dict = d } + +// buffered returns the number of bytes that are currently hashed. +func (t *hashTable) buffered() int { + n := t.hoff + 1 + switch { + case n <= 0: + return 0 + case n >= int64(len(t.data)): + return len(t.data) + } + return int(n) +} + +// addIndex adds n to an index ensuring that is stays inside the +// circular buffer for the hash chain. +func (t *hashTable) addIndex(i, n int) int { + i += n - len(t.data) + if i < 0 { + i += len(t.data) + } + return i +} + +// putDelta puts the delta instance at the current front of the circular +// chain buffer. +func (t *hashTable) putDelta(delta uint32) { + t.data[t.front] = delta + t.front = t.addIndex(t.front, 1) +} + +// putEntry puts a new entry into the hash table. If there is already a +// value stored it is moved into the circular chain buffer. +func (t *hashTable) putEntry(h uint64, pos int64) { + if pos < 0 { + return + } + i := h & t.mask + old := t.t[i] - 1 + t.t[i] = pos + 1 + var delta int64 + if old >= 0 { + delta = pos - old + if delta > 1<<32-1 || delta > int64(t.buffered()) { + delta = 0 + } + } + t.putDelta(uint32(delta)) +} + +// WriteByte converts a single byte into a hash and puts them into the hash +// table. +func (t *hashTable) WriteByte(b byte) error { + h := t.wr.RollByte(b) + t.hoff++ + t.putEntry(h, t.hoff) + return nil +} + +// Write converts the bytes provided into hash tables and stores the +// abbreviated offsets into the hash table. The method will never return an +// error. +func (t *hashTable) Write(p []byte) (n int, err error) { + for _, b := range p { + // WriteByte doesn't generate an error. + t.WriteByte(b) + } + return len(p), nil +} + +// getMatches the matches for a specific hash. The functions returns the +// number of positions found. +// +// TODO: Make a getDistances because that we are actually interested in. +func (t *hashTable) getMatches(h uint64, positions []int64) (n int) { + if t.hoff < 0 || len(positions) == 0 { + return 0 + } + buffered := t.buffered() + tailPos := t.hoff + 1 - int64(buffered) + rear := t.front - buffered + if rear >= 0 { + rear -= len(t.data) + } + // get the slot for the hash + pos := t.t[h&t.mask] - 1 + delta := pos - tailPos + for { + if delta < 0 { + return n + } + positions[n] = tailPos + delta + n++ + if n >= len(positions) { + return n + } + i := rear + int(delta) + if i < 0 { + i += len(t.data) + } + u := t.data[i] + if u == 0 { + return n + } + delta -= int64(u) + } +} + +// hash computes the rolling hash for the word stored in p. For correct +// results its length must be equal to t.wordLen. +func (t *hashTable) hash(p []byte) uint64 { + var h uint64 + for _, b := range p { + h = t.hr.RollByte(b) + } + return h +} + +// Matches fills the positions slice with potential matches. The +// functions returns the number of positions filled into positions. The +// byte slice p must have word length of the hash table. +func (t *hashTable) Matches(p []byte, positions []int64) int { + if len(p) != t.wordLen { + panic(fmt.Errorf( + "byte slice must have length %d", t.wordLen)) + } + h := t.hash(p) + return t.getMatches(h, positions) +} + +// NextOp identifies the next operation using the hash table. +// +// TODO: Use all repetitions to find matches. +func (t *hashTable) NextOp(rep [4]uint32) operation { + // get positions + data := t.dict.data[:maxMatchLen] + n, _ := t.dict.buf.Peek(data) + data = data[:n] + var p []int64 + if n < t.wordLen { + p = t.p[:0] + } else { + p = t.p[:maxMatches] + n = t.Matches(data[:t.wordLen], p) + p = p[:n] + } + + // convert positions in potential distances + head := t.dict.head + dists := append(t.distances[:0], 1, 2, 3, 4, 5, 6, 7, 8) + for _, pos := range p { + dis := int(head - pos) + if dis > shortDists { + dists = append(dists, dis) + } + } + + // check distances + var m match + dictLen := t.dict.DictLen() + for _, dist := range dists { + if dist > dictLen { + continue + } + + // Here comes a trick. We are only interested in matches + // that are longer than the matches we have been found + // before. So before we test the whole byte sequence at + // the given distance, we test the first byte that would + // make the match longer. If it doesn't match the byte + // to match, we don't to care any longer. + i := t.dict.buf.rear - dist + m.n + if i < 0 { + i += len(t.dict.buf.data) + } + if t.dict.buf.data[i] != data[m.n] { + // We can't get a longer match. Jump to the next + // distance. + continue + } + + n := t.dict.buf.matchLen(dist, data) + switch n { + case 0: + continue + case 1: + if uint32(dist-minDistance) != rep[0] { + continue + } + } + if n > m.n { + m = match{int64(dist), n} + if n == len(data) { + // No better match will be found. + break + } + } + } + + if m.n == 0 { + return lit{data[0]} + } + return m +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/header.go b/vendor/github.com/ulikunitz/xz/lzma/header.go new file mode 100644 index 00000000000..bc708969fd3 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/header.go @@ -0,0 +1,167 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// uint32LE reads an uint32 integer from a byte slice +func uint32LE(b []byte) uint32 { + x := uint32(b[3]) << 24 + x |= uint32(b[2]) << 16 + x |= uint32(b[1]) << 8 + x |= uint32(b[0]) + return x +} + +// uint64LE converts the uint64 value stored as little endian to an uint64 +// value. +func uint64LE(b []byte) uint64 { + x := uint64(b[7]) << 56 + x |= uint64(b[6]) << 48 + x |= uint64(b[5]) << 40 + x |= uint64(b[4]) << 32 + x |= uint64(b[3]) << 24 + x |= uint64(b[2]) << 16 + x |= uint64(b[1]) << 8 + x |= uint64(b[0]) + return x +} + +// putUint32LE puts an uint32 integer into a byte slice that must have at least +// a length of 4 bytes. +func putUint32LE(b []byte, x uint32) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) +} + +// putUint64LE puts the uint64 value into the byte slice as little endian +// value. The byte slice b must have at least place for 8 bytes. +func putUint64LE(b []byte, x uint64) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) + b[4] = byte(x >> 32) + b[5] = byte(x >> 40) + b[6] = byte(x >> 48) + b[7] = byte(x >> 56) +} + +// noHeaderSize defines the value of the length field in the LZMA header. +const noHeaderSize uint64 = 1<<64 - 1 + +// HeaderLen provides the length of the LZMA file header. +const HeaderLen = 13 + +// header represents the header of an LZMA file. +type header struct { + properties Properties + dictCap int + // uncompressed size; negative value if no size is given + size int64 +} + +// marshalBinary marshals the header. +func (h *header) marshalBinary() (data []byte, err error) { + if err = h.properties.verify(); err != nil { + return nil, err + } + if !(0 <= h.dictCap && int64(h.dictCap) <= MaxDictCap) { + return nil, fmt.Errorf("lzma: DictCap %d out of range", + h.dictCap) + } + + data = make([]byte, 13) + + // property byte + data[0] = h.properties.Code() + + // dictionary capacity + putUint32LE(data[1:5], uint32(h.dictCap)) + + // uncompressed size + var s uint64 + if h.size > 0 { + s = uint64(h.size) + } else { + s = noHeaderSize + } + putUint64LE(data[5:], s) + + return data, nil +} + +// unmarshalBinary unmarshals the header. +func (h *header) unmarshalBinary(data []byte) error { + if len(data) != HeaderLen { + return errors.New("lzma.unmarshalBinary: data has wrong length") + } + + // properties + var err error + if h.properties, err = PropertiesForCode(data[0]); err != nil { + return err + } + + // dictionary capacity + h.dictCap = int(uint32LE(data[1:])) + if h.dictCap < 0 { + return errors.New( + "LZMA header: dictionary capacity exceeds maximum " + + "integer") + } + + // uncompressed size + s := uint64LE(data[5:]) + if s == noHeaderSize { + h.size = -1 + } else { + h.size = int64(s) + if h.size < 0 { + return errors.New( + "LZMA header: uncompressed size " + + "out of int64 range") + } + } + + return nil +} + +// validDictCap checks whether the dictionary capacity is correct. This +// is used to weed out wrong file headers. +func validDictCap(dictcap int) bool { + if int64(dictcap) == MaxDictCap { + return true + } + for n := uint(10); n < 32; n++ { + if dictcap == 1<= 10 or 2^32-1. If +// there is an explicit size it must not exceed 256 GiB. The length of +// the data argument must be HeaderLen. +func ValidHeader(data []byte) bool { + var h header + if err := h.unmarshalBinary(data); err != nil { + return false + } + if !validDictCap(h.dictCap) { + return false + } + return h.size < 0 || h.size <= 1<<38 +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/header2.go b/vendor/github.com/ulikunitz/xz/lzma/header2.go new file mode 100644 index 00000000000..ac6a71a5a98 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/header2.go @@ -0,0 +1,398 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +const ( + // maximum size of compressed data in a chunk + maxCompressed = 1 << 16 + // maximum size of uncompressed data in a chunk + maxUncompressed = 1 << 21 +) + +// chunkType represents the type of an LZMA2 chunk. Note that this +// value is an internal representation and no actual encoding of a LZMA2 +// chunk header. +type chunkType byte + +// Possible values for the chunk type. +const ( + // end of stream + cEOS chunkType = iota + // uncompressed; reset dictionary + cUD + // uncompressed; no reset of dictionary + cU + // LZMA compressed; no reset + cL + // LZMA compressed; reset state + cLR + // LZMA compressed; reset state; new property value + cLRN + // LZMA compressed; reset state; new property value; reset dictionary + cLRND +) + +// chunkTypeStrings provide a string representation for the chunk types. +var chunkTypeStrings = [...]string{ + cEOS: "EOS", + cU: "U", + cUD: "UD", + cL: "L", + cLR: "LR", + cLRN: "LRN", + cLRND: "LRND", +} + +// String returns a string representation of the chunk type. +func (c chunkType) String() string { + if !(cEOS <= c && c <= cLRND) { + return "unknown" + } + return chunkTypeStrings[c] +} + +// Actual encodings for the chunk types in the value. Note that the high +// uncompressed size bits are stored in the header byte additionally. +const ( + hEOS = 0 + hUD = 1 + hU = 2 + hL = 1 << 7 + hLR = 1<<7 | 1<<5 + hLRN = 1<<7 | 1<<6 + hLRND = 1<<7 | 1<<6 | 1<<5 +) + +// errHeaderByte indicates an unsupported value for the chunk header +// byte. These bytes starts the variable-length chunk header. +var errHeaderByte = errors.New("lzma: unsupported chunk header byte") + +// headerChunkType converts the header byte into a chunk type. It +// ignores the uncompressed size bits in the chunk header byte. +func headerChunkType(h byte) (c chunkType, err error) { + if h&hL == 0 { + // no compression + switch h { + case hEOS: + c = cEOS + case hUD: + c = cUD + case hU: + c = cU + default: + return 0, errHeaderByte + } + return + } + switch h & hLRND { + case hL: + c = cL + case hLR: + c = cLR + case hLRN: + c = cLRN + case hLRND: + c = cLRND + default: + return 0, errHeaderByte + } + return +} + +// uncompressedHeaderLen provides the length of an uncompressed header +const uncompressedHeaderLen = 3 + +// headerLen returns the length of the LZMA2 header for a given chunk +// type. +func headerLen(c chunkType) int { + switch c { + case cEOS: + return 1 + case cU, cUD: + return uncompressedHeaderLen + case cL, cLR: + return 5 + case cLRN, cLRND: + return 6 + } + panic(fmt.Errorf("unsupported chunk type %d", c)) +} + +// chunkHeader represents the contents of a chunk header. +type chunkHeader struct { + ctype chunkType + uncompressed uint32 + compressed uint16 + props Properties +} + +// String returns a string representation of the chunk header. +func (h *chunkHeader) String() string { + return fmt.Sprintf("%s %d %d %s", h.ctype, h.uncompressed, + h.compressed, &h.props) +} + +// UnmarshalBinary reads the content of the chunk header from the data +// slice. The slice must have the correct length. +func (h *chunkHeader) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return errors.New("no data") + } + c, err := headerChunkType(data[0]) + if err != nil { + return err + } + + n := headerLen(c) + if len(data) < n { + return errors.New("incomplete data") + } + if len(data) > n { + return errors.New("invalid data length") + } + + *h = chunkHeader{ctype: c} + if c == cEOS { + return nil + } + + h.uncompressed = uint32(uint16BE(data[1:3])) + if c <= cU { + return nil + } + h.uncompressed |= uint32(data[0]&^hLRND) << 16 + + h.compressed = uint16BE(data[3:5]) + if c <= cLR { + return nil + } + + h.props, err = PropertiesForCode(data[5]) + return err +} + +// MarshalBinary encodes the chunk header value. The function checks +// whether the content of the chunk header is correct. +func (h *chunkHeader) MarshalBinary() (data []byte, err error) { + if h.ctype > cLRND { + return nil, errors.New("invalid chunk type") + } + if err = h.props.verify(); err != nil { + return nil, err + } + + data = make([]byte, headerLen(h.ctype)) + + switch h.ctype { + case cEOS: + return data, nil + case cUD: + data[0] = hUD + case cU: + data[0] = hU + case cL: + data[0] = hL + case cLR: + data[0] = hLR + case cLRN: + data[0] = hLRN + case cLRND: + data[0] = hLRND + } + + putUint16BE(data[1:3], uint16(h.uncompressed)) + if h.ctype <= cU { + return data, nil + } + data[0] |= byte(h.uncompressed>>16) &^ hLRND + + putUint16BE(data[3:5], h.compressed) + if h.ctype <= cLR { + return data, nil + } + + data[5] = h.props.Code() + return data, nil +} + +// readChunkHeader reads the chunk header from the IO reader. +func readChunkHeader(r io.Reader) (h *chunkHeader, err error) { + p := make([]byte, 1, 6) + if _, err = io.ReadFull(r, p); err != nil { + return + } + c, err := headerChunkType(p[0]) + if err != nil { + return + } + p = p[:headerLen(c)] + if _, err = io.ReadFull(r, p[1:]); err != nil { + return + } + h = new(chunkHeader) + if err = h.UnmarshalBinary(p); err != nil { + return nil, err + } + return h, nil +} + +// uint16BE converts a big-endian uint16 representation to an uint16 +// value. +func uint16BE(p []byte) uint16 { + return uint16(p[0])<<8 | uint16(p[1]) +} + +// putUint16BE puts the big-endian uint16 presentation into the given +// slice. +func putUint16BE(p []byte, x uint16) { + p[0] = byte(x >> 8) + p[1] = byte(x) +} + +// chunkState is used to manage the state of the chunks +type chunkState byte + +// start and stop define the initial and terminating state of the chunk +// state +const ( + start chunkState = 'S' + stop = 'T' +) + +// errors for the chunk state handling +var ( + errChunkType = errors.New("lzma: unexpected chunk type") + errState = errors.New("lzma: wrong chunk state") +) + +// next transitions state based on chunk type input +func (c *chunkState) next(ctype chunkType) error { + switch *c { + // start state + case 'S': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cLRND: + *c = 'L' + default: + return errChunkType + } + // normal LZMA mode + case 'L': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + *c = 'U' + case cL, cLR, cLRN, cLRND: + break + default: + return errChunkType + } + // reset required + case 'R': + switch ctype { + case cEOS: + *c = 'T' + case cUD, cU: + break + case cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // uncompressed + case 'U': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + break + case cL, cLR, cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // terminal state + case 'T': + return errChunkType + default: + return errState + } + return nil +} + +// defaultChunkType returns the default chunk type for each chunk state. +func (c chunkState) defaultChunkType() chunkType { + switch c { + case 'S': + return cLRND + case 'L', 'U': + return cL + case 'R': + return cLRN + default: + // no error + return cEOS + } +} + +// maxDictCap defines the maximum dictionary capacity supported by the +// LZMA2 dictionary capacity encoding. +const maxDictCap = 1<<32 - 1 + +// maxDictCapCode defines the maximum dictionary capacity code. +const maxDictCapCode = 40 + +// The function decodes the dictionary capacity byte, but doesn't change +// for the correct range of the given byte. +func decodeDictCap(c byte) int64 { + return (2 | int64(c)&1) << (11 + (c>>1)&0x1f) +} + +// DecodeDictCap decodes the encoded dictionary capacity. The function +// returns an error if the code is out of range. +func DecodeDictCap(c byte) (n int64, err error) { + if c >= maxDictCapCode { + if c == maxDictCapCode { + return maxDictCap, nil + } + return 0, errors.New("lzma: invalid dictionary size code") + } + return decodeDictCap(c), nil +} + +// EncodeDictCap encodes a dictionary capacity. The function returns the +// code for the capacity that is greater or equal n. If n exceeds the +// maximum support dictionary capacity, the maximum value is returned. +func EncodeDictCap(n int64) byte { + a, b := byte(0), byte(40) + for a < b { + c := a + (b-a)>>1 + m := decodeDictCap(c) + if n <= m { + if n == m { + return c + } + b = c + } else { + a = c + 1 + } + } + return a +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go new file mode 100644 index 00000000000..e517730924f --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go @@ -0,0 +1,129 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// maxPosBits defines the number of bits of the position value that are used to +// to compute the posState value. The value is used to select the tree codec +// for length encoding and decoding. +const maxPosBits = 4 + +// minMatchLen and maxMatchLen give the minimum and maximum values for +// encoding and decoding length values. minMatchLen is also used as base +// for the encoded length values. +const ( + minMatchLen = 2 + maxMatchLen = minMatchLen + 16 + 256 - 1 +) + +// lengthCodec support the encoding of the length value. +type lengthCodec struct { + choice [2]prob + low [1 << maxPosBits]treeCodec + mid [1 << maxPosBits]treeCodec + high treeCodec +} + +// deepcopy initializes the lc value as deep copy of the source value. +func (lc *lengthCodec) deepcopy(src *lengthCodec) { + if lc == src { + return + } + lc.choice = src.choice + for i := range lc.low { + lc.low[i].deepcopy(&src.low[i]) + } + for i := range lc.mid { + lc.mid[i].deepcopy(&src.mid[i]) + } + lc.high.deepcopy(&src.high) +} + +// init initializes a new length codec. +func (lc *lengthCodec) init() { + for i := range lc.choice { + lc.choice[i] = probInit + } + for i := range lc.low { + lc.low[i] = makeTreeCodec(3) + } + for i := range lc.mid { + lc.mid[i] = makeTreeCodec(3) + } + lc.high = makeTreeCodec(8) +} + +// lBits gives the number of bits used for the encoding of the l value +// provided to the range encoder. +func lBits(l uint32) int { + switch { + case l < 8: + return 4 + case l < 16: + return 5 + default: + return 10 + } +} + +// Encode encodes the length offset. The length offset l can be compute by +// subtracting minMatchLen (2) from the actual length. +// +// l = length - minMatchLen +// +func (lc *lengthCodec) Encode(e *rangeEncoder, l uint32, posState uint32, +) (err error) { + if l > maxMatchLen-minMatchLen { + return errors.New("lengthCodec.Encode: l out of range") + } + if l < 8 { + if err = lc.choice[0].Encode(e, 0); err != nil { + return + } + return lc.low[posState].Encode(e, l) + } + if err = lc.choice[0].Encode(e, 1); err != nil { + return + } + if l < 16 { + if err = lc.choice[1].Encode(e, 0); err != nil { + return + } + return lc.mid[posState].Encode(e, l-8) + } + if err = lc.choice[1].Encode(e, 1); err != nil { + return + } + if err = lc.high.Encode(e, l-16); err != nil { + return + } + return nil +} + +// Decode reads the length offset. Add minMatchLen to compute the actual length +// to the length offset l. +func (lc *lengthCodec) Decode(d *rangeDecoder, posState uint32, +) (l uint32, err error) { + var b uint32 + if b, err = lc.choice[0].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.low[posState].Decode(d) + return + } + if b, err = lc.choice[1].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.mid[posState].Decode(d) + l += 8 + return + } + l, err = lc.high.Decode(d) + l += 16 + return +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go new file mode 100644 index 00000000000..c949d6ebd12 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go @@ -0,0 +1,132 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// literalCodec supports the encoding of literal. It provides 768 probability +// values per literal state. The upper 512 probabilities are used with the +// context of a match bit. +type literalCodec struct { + probs []prob +} + +// deepcopy initializes literal codec c as a deep copy of the source. +func (c *literalCodec) deepcopy(src *literalCodec) { + if c == src { + return + } + c.probs = make([]prob, len(src.probs)) + copy(c.probs, src.probs) +} + +// init initializes the literal codec. +func (c *literalCodec) init(lc, lp int) { + switch { + case !(minLC <= lc && lc <= maxLC): + panic("lc out of range") + case !(minLP <= lp && lp <= maxLP): + panic("lp out of range") + } + c.probs = make([]prob, 0x300<= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + bit := (r >> 7) & 1 + r <<= 1 + i := ((1 + matchBit) << 8) | symbol + if err = probs[i].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit := (r >> 7) & 1 + r <<= 1 + if err = probs[symbol].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + } + return nil +} + +// Decode decodes a literal byte using the range decoder as well as the LZMA +// state, a match byte, and the literal state. +func (c *literalCodec) Decode(d *rangeDecoder, + state uint32, match byte, litState uint32, +) (s byte, err error) { + k := litState * 0x300 + probs := c.probs[k : k+0x300] + symbol := uint32(1) + if state >= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + i := ((1 + matchBit) << 8) | symbol + bit, err := d.DecodeBit(&probs[i]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit, err := d.DecodeBit(&probs[symbol]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + } + s = byte(symbol - 0x100) + return s, nil +} + +// minLC and maxLC define the range for LC values. +const ( + minLC = 0 + maxLC = 8 +) + +// minLC and maxLC define the range for LP values. +const ( + minLP = 0 + maxLP = 4 +) + +// minState and maxState define a range for the state values stored in +// the State values. +const ( + minState = 0 + maxState = 11 +) diff --git a/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go new file mode 100644 index 00000000000..4a244eb1ac7 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go @@ -0,0 +1,52 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// MatchAlgorithm identifies an algorithm to find matches in the +// dictionary. +type MatchAlgorithm byte + +// Supported matcher algorithms. +const ( + HashTable4 MatchAlgorithm = iota + BinaryTree +) + +// maStrings are used by the String method. +var maStrings = map[MatchAlgorithm]string{ + HashTable4: "HashTable4", + BinaryTree: "BinaryTree", +} + +// String returns a string representation of the Matcher. +func (a MatchAlgorithm) String() string { + if s, ok := maStrings[a]; ok { + return s + } + return "unknown" +} + +var errUnsupportedMatchAlgorithm = errors.New( + "lzma: unsupported match algorithm value") + +// verify checks whether the matcher value is supported. +func (a MatchAlgorithm) verify() error { + if _, ok := maStrings[a]; !ok { + return errUnsupportedMatchAlgorithm + } + return nil +} + +func (a MatchAlgorithm) new(dictCap int) (m matcher, err error) { + switch a { + case HashTable4: + return newHashTable(dictCap, 4) + case BinaryTree: + return newBinTree(dictCap) + } + return nil, errUnsupportedMatchAlgorithm +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/operation.go b/vendor/github.com/ulikunitz/xz/lzma/operation.go new file mode 100644 index 00000000000..733bb99da41 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/operation.go @@ -0,0 +1,80 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "unicode" +) + +// operation represents an operation on the dictionary during encoding or +// decoding. +type operation interface { + Len() int +} + +// rep represents a repetition at the given distance and the given length +type match struct { + // supports all possible distance values, including the eos marker + distance int64 + // length + n int +} + +// verify checks whether the match is valid. If that is not the case an +// error is returned. +func (m match) verify() error { + if !(minDistance <= m.distance && m.distance <= maxDistance) { + return errors.New("distance out of range") + } + if !(1 <= m.n && m.n <= maxMatchLen) { + return errors.New("length out of range") + } + return nil +} + +// l return the l-value for the match, which is the difference of length +// n and 2. +func (m match) l() uint32 { + return uint32(m.n - minMatchLen) +} + +// dist returns the dist value for the match, which is one less of the +// distance stored in the match. +func (m match) dist() uint32 { + return uint32(m.distance - minDistance) +} + +// Len returns the number of bytes matched. +func (m match) Len() int { + return m.n +} + +// String returns a string representation for the repetition. +func (m match) String() string { + return fmt.Sprintf("M{%d,%d}", m.distance, m.n) +} + +// lit represents a single byte literal. +type lit struct { + b byte +} + +// Len returns 1 for the single byte literal. +func (l lit) Len() int { + return 1 +} + +// String returns a string representation for the literal. +func (l lit) String() string { + var c byte + if unicode.IsPrint(rune(l.b)) { + c = l.b + } else { + c = '.' + } + return fmt.Sprintf("L{%c/%02x}", c, l.b) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/prob.go b/vendor/github.com/ulikunitz/xz/lzma/prob.go new file mode 100644 index 00000000000..24d50ec6814 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/prob.go @@ -0,0 +1,53 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// movebits defines the number of bits used for the updates of probability +// values. +const movebits = 5 + +// probbits defines the number of bits of a probability value. +const probbits = 11 + +// probInit defines 0.5 as initial value for prob values. +const probInit prob = 1 << (probbits - 1) + +// Type prob represents probabilities. The type can also be used to encode and +// decode single bits. +type prob uint16 + +// Dec decreases the probability. The decrease is proportional to the +// probability value. +func (p *prob) dec() { + *p -= *p >> movebits +} + +// Inc increases the probability. The Increase is proportional to the +// difference of 1 and the probability value. +func (p *prob) inc() { + *p += ((1 << probbits) - *p) >> movebits +} + +// Computes the new bound for a given range using the probability value. +func (p prob) bound(r uint32) uint32 { + return (r >> probbits) * uint32(p) +} + +// Bits returns 1. One is the number of bits that can be encoded or decoded +// with a single prob value. +func (p prob) Bits() int { + return 1 +} + +// Encode encodes the least-significant bit of v. Note that the p value will be +// changed. +func (p *prob) Encode(e *rangeEncoder, v uint32) error { + return e.EncodeBit(v, p) +} + +// Decode decodes a single bit. Note that the p value will change. +func (p *prob) Decode(d *rangeDecoder) (v uint32, err error) { + return d.DecodeBit(p) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/properties.go b/vendor/github.com/ulikunitz/xz/lzma/properties.go new file mode 100644 index 00000000000..23418e25d26 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/properties.go @@ -0,0 +1,69 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// maximum and minimum values for the LZMA properties. +const ( + minPB = 0 + maxPB = 4 +) + +// maxPropertyCode is the possible maximum of a properties code byte. +const maxPropertyCode = (maxPB+1)*(maxLP+1)*(maxLC+1) - 1 + +// Properties contains the parameters LC, LP and PB. The parameter LC +// defines the number of literal context bits; parameter LP the number +// of literal position bits and PB the number of position bits. +type Properties struct { + LC int + LP int + PB int +} + +// String returns the properties in a string representation. +func (p *Properties) String() string { + return fmt.Sprintf("LC %d LP %d PB %d", p.LC, p.LP, p.PB) +} + +// PropertiesForCode converts a properties code byte into a Properties value. +func PropertiesForCode(code byte) (p Properties, err error) { + if code > maxPropertyCode { + return p, errors.New("lzma: invalid properties code") + } + p.LC = int(code % 9) + code /= 9 + p.LP = int(code % 5) + code /= 5 + p.PB = int(code % 5) + return p, err +} + +// verify checks the properties for correctness. +func (p *Properties) verify() error { + if p == nil { + return errors.New("lzma: properties are nil") + } + if !(minLC <= p.LC && p.LC <= maxLC) { + return errors.New("lzma: lc out of range") + } + if !(minLP <= p.LP && p.LP <= maxLP) { + return errors.New("lzma: lp out of range") + } + if !(minPB <= p.PB && p.PB <= maxPB) { + return errors.New("lzma: pb out of range") + } + return nil +} + +// Code converts the properties to a byte. The function assumes that +// the properties components are all in range. +func (p Properties) Code() byte { + return byte((p.PB*5+p.LP)*9 + p.LC) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go new file mode 100644 index 00000000000..6361c5e7c83 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go @@ -0,0 +1,248 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// rangeEncoder implements range encoding of single bits. The low value can +// overflow therefore we need uint64. The cache value is used to handle +// overflows. +type rangeEncoder struct { + lbw *LimitedByteWriter + nrange uint32 + low uint64 + cacheLen int64 + cache byte +} + +// maxInt64 provides the maximal value of the int64 type +const maxInt64 = 1<<63 - 1 + +// newRangeEncoder creates a new range encoder. +func newRangeEncoder(bw io.ByteWriter) (re *rangeEncoder, err error) { + lbw, ok := bw.(*LimitedByteWriter) + if !ok { + lbw = &LimitedByteWriter{BW: bw, N: maxInt64} + } + return &rangeEncoder{ + lbw: lbw, + nrange: 0xffffffff, + cacheLen: 1}, nil +} + +// Available returns the number of bytes that still can be written. The +// method takes the bytes that will be currently written by Close into +// account. +func (e *rangeEncoder) Available() int64 { + return e.lbw.N - (e.cacheLen + 4) +} + +// writeByte writes a single byte to the underlying writer. An error is +// returned if the limit is reached. The written byte will be counted if +// the underlying writer doesn't return an error. +func (e *rangeEncoder) writeByte(c byte) error { + if e.Available() < 1 { + return ErrLimit + } + return e.lbw.WriteByte(c) +} + +// DirectEncodeBit encodes the least-significant bit of b with probability 1/2. +func (e *rangeEncoder) DirectEncodeBit(b uint32) error { + e.nrange >>= 1 + e.low += uint64(e.nrange) & (0 - (uint64(b) & 1)) + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// EncodeBit encodes the least significant bit of b. The p value will be +// updated by the function depending on the bit encoded. +func (e *rangeEncoder) EncodeBit(b uint32, p *prob) error { + bound := p.bound(e.nrange) + if b&1 == 0 { + e.nrange = bound + p.inc() + } else { + e.low += uint64(bound) + e.nrange -= bound + p.dec() + } + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// Close writes a complete copy of the low value. +func (e *rangeEncoder) Close() error { + for i := 0; i < 5; i++ { + if err := e.shiftLow(); err != nil { + return err + } + } + return nil +} + +// shiftLow shifts the low value for 8 bit. The shifted byte is written into +// the byte writer. The cache value is used to handle overflows. +func (e *rangeEncoder) shiftLow() error { + if uint32(e.low) < 0xff000000 || (e.low>>32) != 0 { + tmp := e.cache + for { + err := e.writeByte(tmp + byte(e.low>>32)) + if err != nil { + return err + } + tmp = 0xff + e.cacheLen-- + if e.cacheLen <= 0 { + if e.cacheLen < 0 { + panic("negative cacheLen") + } + break + } + } + e.cache = byte(uint32(e.low) >> 24) + } + e.cacheLen++ + e.low = uint64(uint32(e.low) << 8) + return nil +} + +// rangeDecoder decodes single bits of the range encoding stream. +type rangeDecoder struct { + br io.ByteReader + nrange uint32 + code uint32 +} + +// init initializes the range decoder, by reading from the byte reader. +func (d *rangeDecoder) init() error { + d.nrange = 0xffffffff + d.code = 0 + + b, err := d.br.ReadByte() + if err != nil { + return err + } + if b != 0 { + return errors.New("newRangeDecoder: first byte not zero") + } + + for i := 0; i < 4; i++ { + if err = d.updateCode(); err != nil { + return err + } + } + + if d.code >= d.nrange { + return errors.New("newRangeDecoder: d.code >= d.nrange") + } + + return nil +} + +// newRangeDecoder initializes a range decoder. It reads five bytes from the +// reader and therefore may return an error. +func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) { + d = &rangeDecoder{br: br, nrange: 0xffffffff} + + b, err := d.br.ReadByte() + if err != nil { + return nil, err + } + if b != 0 { + return nil, errors.New("newRangeDecoder: first byte not zero") + } + + for i := 0; i < 4; i++ { + if err = d.updateCode(); err != nil { + return nil, err + } + } + + if d.code >= d.nrange { + return nil, errors.New("newRangeDecoder: d.code >= d.nrange") + } + + return d, nil +} + +// possiblyAtEnd checks whether the decoder may be at the end of the stream. +func (d *rangeDecoder) possiblyAtEnd() bool { + return d.code == 0 +} + +// DirectDecodeBit decodes a bit with probability 1/2. The return value b will +// contain the bit at the least-significant position. All other bits will be +// zero. +func (d *rangeDecoder) DirectDecodeBit() (b uint32, err error) { + d.nrange >>= 1 + d.code -= d.nrange + t := 0 - (d.code >> 31) + d.code += d.nrange & t + b = (t + 1) & 1 + + // d.code will stay less then d.nrange + + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// decodeBit decodes a single bit. The bit will be returned at the +// least-significant position. All other bits will be zero. The probability +// value will be updated. +func (d *rangeDecoder) DecodeBit(p *prob) (b uint32, err error) { + bound := p.bound(d.nrange) + if d.code < bound { + d.nrange = bound + p.inc() + b = 0 + } else { + d.code -= bound + d.nrange -= bound + p.dec() + b = 1 + } + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// updateCode reads a new byte into the code. +func (d *rangeDecoder) updateCode() error { + b, err := d.br.ReadByte() + if err != nil { + return err + } + d.code = (d.code << 8) | uint32(b) + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader.go b/vendor/github.com/ulikunitz/xz/lzma/reader.go new file mode 100644 index 00000000000..2ef3dcaaa9b --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/reader.go @@ -0,0 +1,100 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lzma supports the decoding and encoding of LZMA streams. +// Reader and Writer support the classic LZMA format. Reader2 and +// Writer2 support the decoding and encoding of LZMA2 streams. +// +// The package is written completely in Go and doesn't rely on any external +// library. +package lzma + +import ( + "errors" + "io" +) + +// ReaderConfig stores the parameters for the reader of the classic LZMA +// format. +type ReaderConfig struct { + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *ReaderConfig) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero values will +// be replaced by default values. +func (c *ReaderConfig) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader provides a reader for LZMA files or streams. +type Reader struct { + lzma io.Reader + h header + d *decoder +} + +// NewReader creates a new reader for an LZMA stream using the classic +// format. NewReader reads and checks the header of the LZMA stream. +func NewReader(lzma io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(lzma) +} + +// NewReader creates a new reader for an LZMA stream in the classic +// format. The function reads and verifies the the header of the LZMA +// stream. +func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(lzma, data); err != nil { + if err == io.EOF { + return nil, errors.New("lzma: unexpected EOF") + } + return nil, err + } + r = &Reader{lzma: lzma} + if err = r.h.unmarshalBinary(data); err != nil { + return nil, err + } + if r.h.dictCap < MinDictCap { + return nil, errors.New("lzma: dictionary capacity too small") + } + dictCap := r.h.dictCap + if c.DictCap > dictCap { + dictCap = c.DictCap + } + + state := newState(r.h.properties) + dict, err := newDecoderDict(dictCap) + if err != nil { + return nil, err + } + r.d, err = newDecoder(ByteReader(lzma), state, dict, r.h.size) + if err != nil { + return nil, err + } + return r, nil +} + +// EOSMarker indicates that an EOS marker has been encountered. +func (r *Reader) EOSMarker() bool { + return r.d.eosMarker +} + +// Read returns uncompressed data. +func (r *Reader) Read(p []byte) (n int, err error) { + return r.d.Read(p) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader2.go b/vendor/github.com/ulikunitz/xz/lzma/reader2.go new file mode 100644 index 00000000000..a55cfaa4e3f --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/reader2.go @@ -0,0 +1,232 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" + + "github.com/ulikunitz/xz/internal/xlog" +) + +// Reader2Config stores the parameters for the LZMA2 reader. +// format. +type Reader2Config struct { + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *Reader2Config) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero configuration values +// will be replaced by default values. +func (c *Reader2Config) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader2 supports the reading of LZMA2 chunk sequences. Note that the +// first chunk should have a dictionary reset and the first compressed +// chunk a properties reset. The chunk sequence may not be terminated by +// an end-of-stream chunk. +type Reader2 struct { + r io.Reader + err error + + dict *decoderDict + ur *uncompressedReader + decoder *decoder + chunkReader io.Reader + + cstate chunkState + ctype chunkType +} + +// NewReader2 creates a reader for an LZMA2 chunk sequence. +func NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + return Reader2Config{}.NewReader2(lzma2) +} + +// NewReader2 creates an LZMA2 reader using the given configuration. +func (c Reader2Config) NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader2{r: lzma2, cstate: start} + r.dict, err = newDecoderDict(c.DictCap) + if err != nil { + return nil, err + } + if err = r.startChunk(); err != nil { + r.err = err + } + return r, nil +} + +// uncompressed tests whether the chunk type specifies an uncompressed +// chunk. +func uncompressed(ctype chunkType) bool { + return ctype == cU || ctype == cUD +} + +// startChunk parses a new chunk. +func (r *Reader2) startChunk() error { + r.chunkReader = nil + header, err := readChunkHeader(r.r) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + xlog.Debugf("chunk header %v", header) + if err = r.cstate.next(header.ctype); err != nil { + return err + } + if r.cstate == stop { + return io.EOF + } + if header.ctype == cUD || header.ctype == cLRND { + r.dict.Reset() + } + size := int64(header.uncompressed) + 1 + if uncompressed(header.ctype) { + if r.ur != nil { + r.ur.Reopen(r.r, size) + } else { + r.ur = newUncompressedReader(r.r, r.dict, size) + } + r.chunkReader = r.ur + return nil + } + br := ByteReader(io.LimitReader(r.r, int64(header.compressed)+1)) + if r.decoder == nil { + state := newState(header.props) + r.decoder, err = newDecoder(br, state, r.dict, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil + } + switch header.ctype { + case cLR: + r.decoder.State.Reset() + case cLRN, cLRND: + r.decoder.State = newState(header.props) + } + err = r.decoder.Reopen(br, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil +} + +// Read reads data from the LZMA2 chunk sequence. +func (r *Reader2) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + for n < len(p) { + var k int + k, err = r.chunkReader.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + err = r.startChunk() + if err == nil { + continue + } + } + r.err = err + return n, err + } + if k == 0 { + r.err = errors.New("lzma: Reader2 doesn't get data") + return n, r.err + } + } + return n, nil +} + +// EOS returns whether the LZMA2 stream has been terminated by an +// end-of-stream chunk. +func (r *Reader2) EOS() bool { + return r.cstate == stop +} + +// uncompressedReader is used to read uncompressed chunks. +type uncompressedReader struct { + lr io.LimitedReader + Dict *decoderDict + eof bool + err error +} + +// newUncompressedReader initializes a new uncompressedReader. +func newUncompressedReader(r io.Reader, dict *decoderDict, size int64) *uncompressedReader { + ur := &uncompressedReader{ + lr: io.LimitedReader{R: r, N: size}, + Dict: dict, + } + return ur +} + +// Reopen reinitializes an uncompressed reader. +func (ur *uncompressedReader) Reopen(r io.Reader, size int64) { + ur.err = nil + ur.eof = false + ur.lr = io.LimitedReader{R: r, N: size} +} + +// fill reads uncompressed data into the dictionary. +func (ur *uncompressedReader) fill() error { + if !ur.eof { + n, err := io.CopyN(ur.Dict, &ur.lr, int64(ur.Dict.Available())) + if err != io.EOF { + return err + } + ur.eof = true + if n > 0 { + return nil + } + } + if ur.lr.N != 0 { + return io.ErrUnexpectedEOF + } + return io.EOF +} + +// Read reads uncompressed data from the limited reader. +func (ur *uncompressedReader) Read(p []byte) (n int, err error) { + if ur.err != nil { + return 0, ur.err + } + for { + var k int + k, err = ur.Dict.Read(p[n:]) + n += k + if n >= len(p) { + return n, nil + } + if err != nil { + break + } + err = ur.fill() + if err != nil { + break + } + } + ur.err = err + return n, err +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/state.go b/vendor/github.com/ulikunitz/xz/lzma/state.go new file mode 100644 index 00000000000..502351052fd --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/state.go @@ -0,0 +1,151 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// states defines the overall state count +const states = 12 + +// State maintains the full state of the operation encoding or decoding +// process. +type state struct { + rep [4]uint32 + isMatch [states << maxPosBits]prob + isRepG0Long [states << maxPosBits]prob + isRep [states]prob + isRepG0 [states]prob + isRepG1 [states]prob + isRepG2 [states]prob + litCodec literalCodec + lenCodec lengthCodec + repLenCodec lengthCodec + distCodec distCodec + state uint32 + posBitMask uint32 + Properties Properties +} + +// initProbSlice initializes a slice of probabilities. +func initProbSlice(p []prob) { + for i := range p { + p[i] = probInit + } +} + +// Reset sets all state information to the original values. +func (s *state) Reset() { + p := s.Properties + *s = state{ + Properties: p, + // dict: s.dict, + posBitMask: (uint32(1) << uint(p.PB)) - 1, + } + initProbSlice(s.isMatch[:]) + initProbSlice(s.isRep[:]) + initProbSlice(s.isRepG0[:]) + initProbSlice(s.isRepG1[:]) + initProbSlice(s.isRepG2[:]) + initProbSlice(s.isRepG0Long[:]) + s.litCodec.init(p.LC, p.LP) + s.lenCodec.init() + s.repLenCodec.init() + s.distCodec.init() +} + +// initState initializes the state. +func initState(s *state, p Properties) { + *s = state{Properties: p} + s.Reset() +} + +// newState creates a new state from the give Properties. +func newState(p Properties) *state { + s := &state{Properties: p} + s.Reset() + return s +} + +// deepcopy initializes s as a deep copy of the source. +func (s *state) deepcopy(src *state) { + if s == src { + return + } + s.rep = src.rep + s.isMatch = src.isMatch + s.isRepG0Long = src.isRepG0Long + s.isRep = src.isRep + s.isRepG0 = src.isRepG0 + s.isRepG1 = src.isRepG1 + s.isRepG2 = src.isRepG2 + s.litCodec.deepcopy(&src.litCodec) + s.lenCodec.deepcopy(&src.lenCodec) + s.repLenCodec.deepcopy(&src.repLenCodec) + s.distCodec.deepcopy(&src.distCodec) + s.state = src.state + s.posBitMask = src.posBitMask + s.Properties = src.Properties +} + +// cloneState creates a new clone of the give state. +func cloneState(src *state) *state { + s := new(state) + s.deepcopy(src) + return s +} + +// updateStateLiteral updates the state for a literal. +func (s *state) updateStateLiteral() { + switch { + case s.state < 4: + s.state = 0 + return + case s.state < 10: + s.state -= 3 + return + } + s.state -= 6 +} + +// updateStateMatch updates the state for a match. +func (s *state) updateStateMatch() { + if s.state < 7 { + s.state = 7 + } else { + s.state = 10 + } +} + +// updateStateRep updates the state for a repetition. +func (s *state) updateStateRep() { + if s.state < 7 { + s.state = 8 + } else { + s.state = 11 + } +} + +// updateStateShortRep updates the state for a short repetition. +func (s *state) updateStateShortRep() { + if s.state < 7 { + s.state = 9 + } else { + s.state = 11 + } +} + +// states computes the states of the operation codec. +func (s *state) states(dictHead int64) (state1, state2, posState uint32) { + state1 = s.state + posState = uint32(dictHead) & s.posBitMask + state2 = (s.state << maxPosBits) | posState + return +} + +// litState computes the literal state. +func (s *state) litState(prev byte, dictHead int64) uint32 { + lp, lc := uint(s.Properties.LP), uint(s.Properties.LC) + litState := ((uint32(dictHead) & ((1 << lp) - 1)) << lc) | + (uint32(prev) >> (8 - lc)) + return litState +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go new file mode 100644 index 00000000000..504b3d78e44 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go @@ -0,0 +1,133 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// treeCodec encodes or decodes values with a fixed bit size. It is using a +// tree of probability value. The root of the tree is the most-significant bit. +type treeCodec struct { + probTree +} + +// makeTreeCodec makes a tree codec. The bits value must be inside the range +// [1,32]. +func makeTreeCodec(bits int) treeCodec { + return treeCodec{makeProbTree(bits)} +} + +// deepcopy initializes tc as a deep copy of the source. +func (tc *treeCodec) deepcopy(src *treeCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// Encode uses the range encoder to encode a fixed-bit-size value. +func (tc *treeCodec) Encode(e *rangeEncoder, v uint32) (err error) { + m := uint32(1) + for i := int(tc.bits) - 1; i >= 0; i-- { + b := (v >> uint(i)) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors may +// be caused by the range decoder. +func (tc *treeCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := 0; j < int(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + } + return m - (1 << uint(tc.bits)), nil +} + +// treeReverseCodec is another tree codec, where the least-significant bit is +// the start of the probability tree. +type treeReverseCodec struct { + probTree +} + +// deepcopy initializes the treeReverseCodec as a deep copy of the +// source. +func (tc *treeReverseCodec) deepcopy(src *treeReverseCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// makeTreeReverseCodec creates treeReverseCodec value. The bits argument must +// be in the range [1,32]. +func makeTreeReverseCodec(bits int) treeReverseCodec { + return treeReverseCodec{makeProbTree(bits)} +} + +// Encode uses range encoder to encode a fixed-bit-size value. The range +// encoder may cause errors. +func (tc *treeReverseCodec) Encode(v uint32, e *rangeEncoder) (err error) { + m := uint32(1) + for i := uint(0); i < uint(tc.bits); i++ { + b := (v >> i) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors +// returned by the range decoder will be returned. +func (tc *treeReverseCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := uint(0); j < uint(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + v |= b << j + } + return v, nil +} + +// probTree stores enough probability values to be used by the treeEncode and +// treeDecode methods of the range coder types. +type probTree struct { + probs []prob + bits byte +} + +// deepcopy initializes the probTree value as a deep copy of the source. +func (t *probTree) deepcopy(src *probTree) { + if t == src { + return + } + t.probs = make([]prob, len(src.probs)) + copy(t.probs, src.probs) + t.bits = src.bits +} + +// makeProbTree initializes a probTree structure. +func makeProbTree(bits int) probTree { + if !(1 <= bits && bits <= 32) { + panic("bits outside of range [1,32]") + } + t := probTree{ + bits: byte(bits), + probs: make([]prob, 1< 0 { + c.SizeInHeader = true + } + if !c.SizeInHeader { + c.EOSMarker = true + } +} + +// Verify checks WriterConfig for errors. Verify will replace zero +// values with default values. +func (c *WriterConfig) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.SizeInHeader { + if c.Size < 0 { + return errors.New("lzma: negative size not supported") + } + } else if !c.EOSMarker { + return errors.New("lzma: EOS marker is required") + } + if err = c.Matcher.verify(); err != nil { + return err + } + + return nil +} + +// header returns the header structure for this configuration. +func (c *WriterConfig) header() header { + h := header{ + properties: *c.Properties, + dictCap: c.DictCap, + size: -1, + } + if c.SizeInHeader { + h.size = c.Size + } + return h +} + +// Writer writes an LZMA stream in the classic format. +type Writer struct { + h header + bw io.ByteWriter + buf *bufio.Writer + e *encoder +} + +// NewWriter creates a new LZMA writer for the classic format. The +// method will write the header to the underlying stream. +func (c WriterConfig) NewWriter(lzma io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{h: c.header()} + + var ok bool + w.bw, ok = lzma.(io.ByteWriter) + if !ok { + w.buf = bufio.NewWriter(lzma) + w.bw = w.buf + } + state := newState(w.h.properties) + m, err := c.Matcher.new(w.h.dictCap) + if err != nil { + return nil, err + } + dict, err := newEncoderDict(w.h.dictCap, c.BufSize, m) + if err != nil { + return nil, err + } + var flags encoderFlags + if c.EOSMarker { + flags = eosMarker + } + if w.e, err = newEncoder(w.bw, state, dict, flags); err != nil { + return nil, err + } + + if err = w.writeHeader(); err != nil { + return nil, err + } + return w, nil +} + +// NewWriter creates a new LZMA writer using the classic format. The +// function writes the header to the underlying stream. +func NewWriter(lzma io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(lzma) +} + +// writeHeader writes the LZMA header into the stream. +func (w *Writer) writeHeader() error { + data, err := w.h.marshalBinary() + if err != nil { + return err + } + _, err = w.bw.(io.Writer).Write(data) + return err +} + +// Write puts data into the Writer. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.h.size >= 0 { + m := w.h.size + m -= w.e.Compressed() + int64(w.e.dict.Buffered()) + if m < 0 { + m = 0 + } + if m < int64(len(p)) { + p = p[:m] + err = ErrNoSpace + } + } + var werr error + if n, werr = w.e.Write(p); werr != nil { + err = werr + } + return n, err +} + +// Close closes the writer stream. It ensures that all data from the +// buffer will be compressed and the LZMA stream will be finished. +func (w *Writer) Close() error { + if w.h.size >= 0 { + n := w.e.Compressed() + int64(w.e.dict.Buffered()) + if n != w.h.size { + return errSize + } + } + err := w.e.Close() + if w.buf != nil { + ferr := w.buf.Flush() + if err == nil { + err = ferr + } + } + return err +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer2.go b/vendor/github.com/ulikunitz/xz/lzma/writer2.go new file mode 100644 index 00000000000..7c1afe15725 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/writer2.go @@ -0,0 +1,305 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "bytes" + "errors" + "io" +) + +// Writer2Config is used to create a Writer2 using parameters. +type Writer2Config struct { + // The properties for the encoding. If the it is nil the value + // {LC: 3, LP: 0, PB: 2} will be chosen. + Properties *Properties + // The capacity of the dictionary. If DictCap is zero, the value + // 8 MiB will be chosen. + DictCap int + // Size of the lookahead buffer; value 0 indicates default size + // 4096 + BufSize int + // Match algorithm + Matcher MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *Writer2Config) fill() { + if c.Properties == nil { + c.Properties = &Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } +} + +// Verify checks the Writer2Config for correctness. Zero values will be +// replaced by default values. +func (c *Writer2Config) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.Properties.LC+c.Properties.LP > 4 { + return errors.New("lzma: sum of lc and lp exceeds 4") + } + if err = c.Matcher.verify(); err != nil { + return err + } + return nil +} + +// Writer2 supports the creation of an LZMA2 stream. But note that +// written data is buffered, so call Flush or Close to write data to the +// underlying writer. The Close method writes the end-of-stream marker +// to the stream. So you may be able to concatenate the output of two +// writers as long the output of the first writer has only been flushed +// but not closed. +// +// Any change to the fields Properties, DictCap must be done before the +// first call to Write, Flush or Close. +type Writer2 struct { + w io.Writer + + start *state + encoder *encoder + + cstate chunkState + ctype chunkType + + buf bytes.Buffer + lbw LimitedByteWriter +} + +// NewWriter2 creates an LZMA2 chunk sequence writer with the default +// parameters and options. +func NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + return Writer2Config{}.NewWriter2(lzma2) +} + +// NewWriter2 creates a new LZMA2 writer using the given configuration. +func (c Writer2Config) NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer2{ + w: lzma2, + start: newState(*c.Properties), + cstate: start, + ctype: start.defaultChunkType(), + } + w.buf.Grow(maxCompressed) + w.lbw = LimitedByteWriter{BW: &w.buf, N: maxCompressed} + m, err := c.Matcher.new(c.DictCap) + if err != nil { + return nil, err + } + d, err := newEncoderDict(c.DictCap, c.BufSize, m) + if err != nil { + return nil, err + } + w.encoder, err = newEncoder(&w.lbw, cloneState(w.start), d, 0) + if err != nil { + return nil, err + } + return w, nil +} + +// written returns the number of bytes written to the current chunk +func (w *Writer2) written() int { + if w.encoder == nil { + return 0 + } + return int(w.encoder.Compressed()) + w.encoder.dict.Buffered() +} + +// errClosed indicates that the writer is closed. +var errClosed = errors.New("lzma: writer closed") + +// Writes data to LZMA2 stream. Note that written data will be buffered. +// Use Flush or Close to ensure that data is written to the underlying +// writer. +func (w *Writer2) Write(p []byte) (n int, err error) { + if w.cstate == stop { + return 0, errClosed + } + for n < len(p) { + m := maxUncompressed - w.written() + if m <= 0 { + panic("lzma: maxUncompressed reached") + } + var q []byte + if n+m < len(p) { + q = p[n : n+m] + } else { + q = p[n:] + } + k, err := w.encoder.Write(q) + n += k + if err != nil && err != ErrLimit { + return n, err + } + if err == ErrLimit || k == m { + if err = w.flushChunk(); err != nil { + return n, err + } + } + } + return n, nil +} + +// writeUncompressedChunk writes an uncompressed chunk to the LZMA2 +// stream. +func (w *Writer2) writeUncompressedChunk() error { + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("lzma: can't write empty uncompressed chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + switch w.ctype { + case cLRND: + w.ctype = cUD + default: + w.ctype = cU + } + w.encoder.state = w.start + + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = w.encoder.dict.CopyN(w.w, int(u)) + return err +} + +// writeCompressedChunk writes a compressed chunk to the underlying +// writer. +func (w *Writer2) writeCompressedChunk() error { + if w.ctype == cU || w.ctype == cUD { + panic("chunk type uncompressed") + } + + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("writeCompressedChunk: empty chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + c := w.buf.Len() + if c <= 0 { + panic("no compressed data") + } + if c > maxCompressed { + panic("overrun of compressed data limit") + } + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + compressed: uint16(c - 1), + props: w.encoder.state.Properties, + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = io.Copy(w.w, &w.buf) + return err +} + +// writes a single chunk to the underlying writer. +func (w *Writer2) writeChunk() error { + u := int(uncompressedHeaderLen + w.encoder.Compressed()) + c := headerLen(w.ctype) + w.buf.Len() + if u < c { + return w.writeUncompressedChunk() + } + return w.writeCompressedChunk() +} + +// flushChunk terminates the current chunk. The encoder will be reset +// to support the next chunk. +func (w *Writer2) flushChunk() error { + if w.written() == 0 { + return nil + } + var err error + if err = w.encoder.Close(); err != nil { + return err + } + if err = w.writeChunk(); err != nil { + return err + } + w.buf.Reset() + w.lbw.N = maxCompressed + if err = w.encoder.Reopen(&w.lbw); err != nil { + return err + } + if err = w.cstate.next(w.ctype); err != nil { + return err + } + w.ctype = w.cstate.defaultChunkType() + w.start = cloneState(w.encoder.state) + return nil +} + +// Flush writes all buffered data out to the underlying stream. This +// could result in multiple chunks to be created. +func (w *Writer2) Flush() error { + if w.cstate == stop { + return errClosed + } + for w.written() > 0 { + if err := w.flushChunk(); err != nil { + return err + } + } + return nil +} + +// Close terminates the LZMA2 stream with an EOS chunk. +func (w *Writer2) Close() error { + if w.cstate == stop { + return errClosed + } + if err := w.Flush(); err != nil { + return nil + } + // write zero byte EOS chunk + _, err := w.w.Write([]byte{0}) + if err != nil { + return err + } + w.cstate = stop + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzmafilter.go b/vendor/github.com/ulikunitz/xz/lzmafilter.go new file mode 100644 index 00000000000..69cf5f7c279 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzmafilter.go @@ -0,0 +1,117 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "fmt" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// LZMA filter constants. +const ( + lzmaFilterID = 0x21 + lzmaFilterLen = 3 +) + +// lzmaFilter declares the LZMA2 filter information stored in an xz +// block header. +type lzmaFilter struct { + dictCap int64 +} + +// String returns a representation of the LZMA filter. +func (f lzmaFilter) String() string { + return fmt.Sprintf("LZMA dict cap %#x", f.dictCap) +} + +// id returns the ID for the LZMA2 filter. +func (f lzmaFilter) id() uint64 { return lzmaFilterID } + +// MarshalBinary converts the lzmaFilter in its encoded representation. +func (f lzmaFilter) MarshalBinary() (data []byte, err error) { + c := lzma.EncodeDictCap(f.dictCap) + return []byte{lzmaFilterID, 1, c}, nil +} + +// UnmarshalBinary unmarshals the given data representation of the LZMA2 +// filter. +func (f *lzmaFilter) UnmarshalBinary(data []byte) error { + if len(data) != lzmaFilterLen { + return errors.New("xz: data for LZMA2 filter has wrong length") + } + if data[0] != lzmaFilterID { + return errors.New("xz: wrong LZMA2 filter id") + } + if data[1] != 1 { + return errors.New("xz: wrong LZMA2 filter size") + } + dc, err := lzma.DecodeDictCap(data[2]) + if err != nil { + return errors.New("xz: wrong LZMA2 dictionary size property") + } + + f.dictCap = dc + return nil +} + +// reader creates a new reader for the LZMA2 filter. +func (f lzmaFilter) reader(r io.Reader, c *ReaderConfig) (fr io.Reader, + err error) { + + config := new(lzma.Reader2Config) + if c != nil { + config.DictCap = c.DictCap + } + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fr, err = config.NewReader2(r) + if err != nil { + return nil, err + } + return fr, nil +} + +// writeCloser creates a io.WriteCloser for the LZMA2 filter. +func (f lzmaFilter) writeCloser(w io.WriteCloser, c *WriterConfig, +) (fw io.WriteCloser, err error) { + config := new(lzma.Writer2Config) + if c != nil { + *config = lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + } + + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fw, err = config.NewWriter2(w) + if err != nil { + return nil, err + } + return fw, nil +} + +// last returns true, because an LZMA2 filter must be the last filter in +// the filter list. +func (f lzmaFilter) last() bool { return true } diff --git a/vendor/github.com/ulikunitz/xz/make-docs b/vendor/github.com/ulikunitz/xz/make-docs new file mode 100755 index 00000000000..a8c612ce172 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/make-docs @@ -0,0 +1,5 @@ +#!/bin/sh + +set -x +pandoc -t html5 -f markdown -s --css=doc/md.css -o README.html README.md +pandoc -t html5 -f markdown -s --css=doc/md.css -o TODO.html TODO.md diff --git a/vendor/github.com/ulikunitz/xz/reader.go b/vendor/github.com/ulikunitz/xz/reader.go new file mode 100644 index 00000000000..0634c6bcc0c --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/reader.go @@ -0,0 +1,373 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xz supports the compression and decompression of xz files. It +// supports version 1.0.4 of the specification without the non-LZMA2 +// filters. See http://tukaani.org/xz/xz-file-format-1.0.4.txt +package xz + +import ( + "bytes" + "errors" + "fmt" + "hash" + "io" + + "github.com/ulikunitz/xz/internal/xlog" + "github.com/ulikunitz/xz/lzma" +) + +// ReaderConfig defines the parameters for the xz reader. The +// SingleStream parameter requests the reader to assume that the +// underlying stream contains only a single stream. +type ReaderConfig struct { + DictCap int + SingleStream bool +} + +// fill replaces all zero values with their default values. +func (c *ReaderConfig) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader parameters for Validity. Zero values will be +// replaced by default values. +func (c *ReaderConfig) Verify() error { + if c == nil { + return errors.New("xz: reader parameters are nil") + } + lc := lzma.Reader2Config{DictCap: c.DictCap} + if err := lc.Verify(); err != nil { + return err + } + return nil +} + +// Reader supports the reading of one or multiple xz streams. +type Reader struct { + ReaderConfig + + xz io.Reader + sr *streamReader +} + +// streamReader decodes a single xz stream +type streamReader struct { + ReaderConfig + + xz io.Reader + br *blockReader + newHash func() hash.Hash + h header + index []record +} + +// NewReader creates a new xz reader using the default parameters. +// The function reads and checks the header of the first XZ stream. The +// reader will process multiple streams including padding. +func NewReader(xz io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(xz) +} + +// NewReader creates an xz stream reader. The created reader will be +// able to process multiple streams and padding unless a SingleStream +// has been set in the reader configuration c. +func (c ReaderConfig) NewReader(xz io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader{ + ReaderConfig: c, + xz: xz, + } + if r.sr, err = c.newStreamReader(xz); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + return r, nil +} + +var errUnexpectedData = errors.New("xz: unexpected data after stream") + +// Read reads uncompressed data from the stream. +func (r *Reader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.sr == nil { + if r.SingleStream { + data := make([]byte, 1) + _, err = io.ReadFull(r.xz, data) + if err != io.EOF { + return n, errUnexpectedData + } + return n, io.EOF + } + for { + r.sr, err = r.ReaderConfig.newStreamReader(r.xz) + if err != errPadding { + break + } + } + if err != nil { + return n, err + } + } + k, err := r.sr.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.sr = nil + continue + } + return n, err + } + } + return n, nil +} + +var errPadding = errors.New("xz: padding (4 zero bytes) encountered") + +// newStreamReader creates a new xz stream reader using the given configuration +// parameters. NewReader reads and checks the header of the xz stream. +func (c ReaderConfig) newStreamReader(xz io.Reader) (r *streamReader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(xz, data[:4]); err != nil { + return nil, err + } + if bytes.Equal(data[:4], []byte{0, 0, 0, 0}) { + return nil, errPadding + } + if _, err = io.ReadFull(xz, data[4:]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + r = &streamReader{ + ReaderConfig: c, + xz: xz, + index: make([]record, 0, 4), + } + if err = r.h.UnmarshalBinary(data); err != nil { + return nil, err + } + xlog.Debugf("xz header %s", r.h) + if r.newHash, err = newHashFunc(r.h.flags); err != nil { + return nil, err + } + return r, nil +} + +// errIndex indicates an error with the xz file index. +var errIndex = errors.New("xz: error in xz file index") + +// readTail reads the index body and the xz footer. +func (r *streamReader) readTail() error { + index, n, err := readIndexBody(r.xz) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + if len(index) != len(r.index) { + return fmt.Errorf("xz: index length is %d; want %d", + len(index), len(r.index)) + } + for i, rec := range r.index { + if rec != index[i] { + return fmt.Errorf("xz: record %d is %v; want %v", + i, rec, index[i]) + } + } + + p := make([]byte, footerLen) + if _, err = io.ReadFull(r.xz, p); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + var f footer + if err = f.UnmarshalBinary(p); err != nil { + return err + } + xlog.Debugf("xz footer %s", f) + if f.flags != r.h.flags { + return errors.New("xz: footer flags incorrect") + } + if f.indexSize != int64(n)+1 { + return errors.New("xz: index size in footer wrong") + } + return nil +} + +// Read reads actual data from the xz stream. +func (r *streamReader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.br == nil { + bh, hlen, err := readBlockHeader(r.xz) + if err != nil { + if err == errIndexIndicator { + if err = r.readTail(); err != nil { + return n, err + } + return n, io.EOF + } + return n, err + } + xlog.Debugf("block %v", *bh) + r.br, err = r.ReaderConfig.newBlockReader(r.xz, bh, + hlen, r.newHash()) + if err != nil { + return n, err + } + } + k, err := r.br.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.index = append(r.index, r.br.record()) + r.br = nil + } else { + return n, err + } + } + } + return n, nil +} + +// countingReader is a reader that counts the bytes read. +type countingReader struct { + r io.Reader + n int64 +} + +// Read reads data from the wrapped reader and adds it to the n field. +func (lr *countingReader) Read(p []byte) (n int, err error) { + n, err = lr.r.Read(p) + lr.n += int64(n) + return n, err +} + +// blockReader supports the reading of a block. +type blockReader struct { + lxz countingReader + header *blockHeader + headerLen int + n int64 + hash hash.Hash + r io.Reader + err error +} + +// newBlockReader creates a new block reader. +func (c *ReaderConfig) newBlockReader(xz io.Reader, h *blockHeader, + hlen int, hash hash.Hash) (br *blockReader, err error) { + + br = &blockReader{ + lxz: countingReader{r: xz}, + header: h, + headerLen: hlen, + hash: hash, + } + + fr, err := c.newFilterReader(&br.lxz, h.filters) + if err != nil { + return nil, err + } + br.r = io.TeeReader(fr, br.hash) + + return br, nil +} + +// uncompressedSize returns the uncompressed size of the block. +func (br *blockReader) uncompressedSize() int64 { + return br.n +} + +// compressedSize returns the compressed size of the block. +func (br *blockReader) compressedSize() int64 { + return br.lxz.n +} + +// unpaddedSize computes the unpadded size for the block. +func (br *blockReader) unpaddedSize() int64 { + n := int64(br.headerLen) + n += br.compressedSize() + n += int64(br.hash.Size()) + return n +} + +// record returns the index record for the current block. +func (br *blockReader) record() record { + return record{br.unpaddedSize(), br.uncompressedSize()} +} + +// errBlockSize indicates that the size of the block in the block header +// is wrong. +var errBlockSize = errors.New("xz: wrong uncompressed size for block") + +// Read reads data from the block. +func (br *blockReader) Read(p []byte) (n int, err error) { + n, err = br.r.Read(p) + br.n += int64(n) + + u := br.header.uncompressedSize + if u >= 0 && br.uncompressedSize() > u { + return n, errors.New("xz: wrong uncompressed size for block") + } + c := br.header.compressedSize + if c >= 0 && br.compressedSize() > c { + return n, errors.New("xz: wrong compressed size for block") + } + if err != io.EOF { + return n, err + } + if br.uncompressedSize() < u || br.compressedSize() < c { + return n, io.ErrUnexpectedEOF + } + + s := br.hash.Size() + k := padLen(br.lxz.n) + q := make([]byte, k+s, k+2*s) + if _, err = io.ReadFull(br.lxz.r, q); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return n, err + } + if !allZeros(q[:k]) { + return n, errors.New("xz: non-zero block padding") + } + checkSum := q[k:] + computedSum := br.hash.Sum(checkSum[s:]) + if !bytes.Equal(checkSum, computedSum) { + return n, errors.New("xz: checksum error for block") + } + return n, io.EOF +} + +func (c *ReaderConfig) newFilterReader(r io.Reader, f []filter) (fr io.Reader, + err error) { + + if err = verifyFilters(f); err != nil { + return nil, err + } + + fr = r + for i := len(f) - 1; i >= 0; i-- { + fr, err = f[i].reader(fr, c) + if err != nil { + return nil, err + } + } + return fr, nil +} diff --git a/vendor/github.com/ulikunitz/xz/writer.go b/vendor/github.com/ulikunitz/xz/writer.go new file mode 100644 index 00000000000..c126f70995d --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/writer.go @@ -0,0 +1,386 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "hash" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// WriterConfig describe the parameters for an xz writer. +type WriterConfig struct { + Properties *lzma.Properties + DictCap int + BufSize int + BlockSize int64 + // checksum method: CRC32, CRC64 or SHA256 + CheckSum byte + // match algorithm + Matcher lzma.MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *WriterConfig) fill() { + if c.Properties == nil { + c.Properties = &lzma.Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } + if c.BlockSize == 0 { + c.BlockSize = maxInt64 + } + if c.CheckSum == 0 { + c.CheckSum = CRC64 + } +} + +// Verify checks the configuration for errors. Zero values will be +// replaced by default values. +func (c *WriterConfig) Verify() error { + if c == nil { + return errors.New("xz: writer configuration is nil") + } + c.fill() + lc := lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + if err := lc.Verify(); err != nil { + return err + } + if c.BlockSize <= 0 { + return errors.New("xz: block size out of range") + } + if err := verifyFlags(c.CheckSum); err != nil { + return err + } + return nil +} + +// filters creates the filter list for the given parameters. +func (c *WriterConfig) filters() []filter { + return []filter{&lzmaFilter{int64(c.DictCap)}} +} + +// maxInt64 defines the maximum 64-bit signed integer. +const maxInt64 = 1<<63 - 1 + +// verifyFilters checks the filter list for the length and the right +// sequence of filters. +func verifyFilters(f []filter) error { + if len(f) == 0 { + return errors.New("xz: no filters") + } + if len(f) > 4 { + return errors.New("xz: more than four filters") + } + for _, g := range f[:len(f)-1] { + if g.last() { + return errors.New("xz: last filter is not last") + } + } + if !f[len(f)-1].last() { + return errors.New("xz: wrong last filter") + } + return nil +} + +// newFilterWriteCloser converts a filter list into a WriteCloser that +// can be used by a blockWriter. +func (c *WriterConfig) newFilterWriteCloser(w io.Writer, f []filter) (fw io.WriteCloser, err error) { + if err = verifyFilters(f); err != nil { + return nil, err + } + fw = nopWriteCloser(w) + for i := len(f) - 1; i >= 0; i-- { + fw, err = f[i].writeCloser(fw, c) + if err != nil { + return nil, err + } + } + return fw, nil +} + +// nopWCloser implements a WriteCloser with a Close method not doing +// anything. +type nopWCloser struct { + io.Writer +} + +// Close returns nil and doesn't do anything else. +func (c nopWCloser) Close() error { + return nil +} + +// nopWriteCloser converts the Writer into a WriteCloser with a Close +// function that does nothing beside returning nil. +func nopWriteCloser(w io.Writer) io.WriteCloser { + return nopWCloser{w} +} + +// Writer compresses data written to it. It is an io.WriteCloser. +type Writer struct { + WriterConfig + + xz io.Writer + bw *blockWriter + newHash func() hash.Hash + h header + index []record + closed bool +} + +// newBlockWriter creates a new block writer writes the header out. +func (w *Writer) newBlockWriter() error { + var err error + w.bw, err = w.WriterConfig.newBlockWriter(w.xz, w.newHash()) + if err != nil { + return err + } + if err = w.bw.writeHeader(w.xz); err != nil { + return err + } + return nil +} + +// closeBlockWriter closes a block writer and records the sizes in the +// index. +func (w *Writer) closeBlockWriter() error { + var err error + if err = w.bw.Close(); err != nil { + return err + } + w.index = append(w.index, w.bw.record()) + return nil +} + +// NewWriter creates a new xz writer using default parameters. +func NewWriter(xz io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(xz) +} + +// NewWriter creates a new Writer using the given configuration parameters. +func (c WriterConfig) NewWriter(xz io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{ + WriterConfig: c, + xz: xz, + h: header{c.CheckSum}, + index: make([]record, 0, 4), + } + if w.newHash, err = newHashFunc(c.CheckSum); err != nil { + return nil, err + } + data, err := w.h.MarshalBinary() + if _, err = xz.Write(data); err != nil { + return nil, err + } + if err = w.newBlockWriter(); err != nil { + return nil, err + } + return w, nil + +} + +// Write compresses the uncompressed data provided. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.closed { + return 0, errClosed + } + for { + k, err := w.bw.Write(p[n:]) + n += k + if err != errNoSpace { + return n, err + } + if err = w.closeBlockWriter(); err != nil { + return n, err + } + if err = w.newBlockWriter(); err != nil { + return n, err + } + } +} + +// Close closes the writer and adds the footer to the Writer. Close +// doesn't close the underlying writer. +func (w *Writer) Close() error { + if w.closed { + return errClosed + } + w.closed = true + var err error + if err = w.closeBlockWriter(); err != nil { + return err + } + + f := footer{flags: w.h.flags} + if f.indexSize, err = writeIndex(w.xz, w.index); err != nil { + return err + } + data, err := f.MarshalBinary() + if err != nil { + return err + } + if _, err = w.xz.Write(data); err != nil { + return err + } + return nil +} + +// countingWriter is a writer that counts all data written to it. +type countingWriter struct { + w io.Writer + n int64 +} + +// Write writes data to the countingWriter. +func (cw *countingWriter) Write(p []byte) (n int, err error) { + n, err = cw.w.Write(p) + cw.n += int64(n) + if err == nil && cw.n < 0 { + return n, errors.New("xz: counter overflow") + } + return +} + +// blockWriter is writes a single block. +type blockWriter struct { + cxz countingWriter + // mw combines io.WriteCloser w and the hash. + mw io.Writer + w io.WriteCloser + n int64 + blockSize int64 + closed bool + headerLen int + + filters []filter + hash hash.Hash +} + +// newBlockWriter creates a new block writer. +func (c *WriterConfig) newBlockWriter(xz io.Writer, hash hash.Hash) (bw *blockWriter, err error) { + bw = &blockWriter{ + cxz: countingWriter{w: xz}, + blockSize: c.BlockSize, + filters: c.filters(), + hash: hash, + } + bw.w, err = c.newFilterWriteCloser(&bw.cxz, bw.filters) + if err != nil { + return nil, err + } + bw.mw = io.MultiWriter(bw.w, bw.hash) + return bw, nil +} + +// writeHeader writes the header. If the function is called after Close +// the commpressedSize and uncompressedSize fields will be filled. +func (bw *blockWriter) writeHeader(w io.Writer) error { + h := blockHeader{ + compressedSize: -1, + uncompressedSize: -1, + filters: bw.filters, + } + if bw.closed { + h.compressedSize = bw.compressedSize() + h.uncompressedSize = bw.uncompressedSize() + } + data, err := h.MarshalBinary() + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + bw.headerLen = len(data) + return nil +} + +// compressed size returns the amount of data written to the underlying +// stream. +func (bw *blockWriter) compressedSize() int64 { + return bw.cxz.n +} + +// uncompressedSize returns the number of data written to the +// blockWriter +func (bw *blockWriter) uncompressedSize() int64 { + return bw.n +} + +// unpaddedSize returns the sum of the header length, the uncompressed +// size of the block and the hash size. +func (bw *blockWriter) unpaddedSize() int64 { + if bw.headerLen <= 0 { + panic("xz: block header not written") + } + n := int64(bw.headerLen) + n += bw.compressedSize() + n += int64(bw.hash.Size()) + return n +} + +// record returns the record for the current stream. Call Close before +// calling this method. +func (bw *blockWriter) record() record { + return record{bw.unpaddedSize(), bw.uncompressedSize()} +} + +var errClosed = errors.New("xz: writer already closed") + +var errNoSpace = errors.New("xz: no space") + +// Write writes uncompressed data to the block writer. +func (bw *blockWriter) Write(p []byte) (n int, err error) { + if bw.closed { + return 0, errClosed + } + + t := bw.blockSize - bw.n + if int64(len(p)) > t { + err = errNoSpace + p = p[:t] + } + + var werr error + n, werr = bw.mw.Write(p) + bw.n += int64(n) + if werr != nil { + return n, werr + } + return n, err +} + +// Close closes the writer. +func (bw *blockWriter) Close() error { + if bw.closed { + return errClosed + } + bw.closed = true + if err := bw.w.Close(); err != nil { + return err + } + s := bw.hash.Size() + k := padLen(bw.cxz.n) + p := make([]byte, k+s) + bw.hash.Sum(p[k:k]) + if _, err := bw.cxz.w.Write(p); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/capsule.go b/vendor/github.com/zclconf/go-cty/cty/capsule.go new file mode 100644 index 00000000000..4fce92add14 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/capsule.go @@ -0,0 +1,89 @@ +package cty + +import ( + "fmt" + "reflect" +) + +type capsuleType struct { + typeImplSigil + Name string + GoType reflect.Type +} + +func (t *capsuleType) Equals(other Type) bool { + if otherP, ok := other.typeImpl.(*capsuleType); ok { + // capsule types compare by pointer identity + return otherP == t + } + return false +} + +func (t *capsuleType) FriendlyName() string { + return t.Name +} + +func (t *capsuleType) GoString() string { + // To get a useful representation of our native type requires some + // shenanigans. + victimVal := reflect.Zero(t.GoType) + return fmt.Sprintf("cty.Capsule(%q, reflect.TypeOf(%#v))", t.Name, victimVal.Interface()) +} + +// Capsule creates a new Capsule type. +// +// A Capsule type is a special type that can be used to transport arbitrary +// Go native values of a given type through the cty type system. A language +// that uses cty as its type system might, for example, provide functions +// that return capsule-typed values and then other functions that operate +// on those values. +// +// From cty's perspective, Capsule types have a few interesting characteristics, +// described in the following paragraphs. +// +// Each capsule type has an associated Go native type that it is able to +// transport. Capsule types compare by identity, so each call to the +// Capsule function creates an entirely-distinct cty Type, even if two calls +// use the same native type. +// +// Each capsule-typed value contains a pointer to a value of the given native +// type. A capsule-typed value supports no operations except equality, and +// equality is implemented by pointer identity of the encapsulated pointer. +// +// The given name is used as the new type's "friendly name". This can be any +// string in principle, but will usually be a short, all-lowercase name aimed +// at users of the embedding language (i.e. not mention Go-specific details) +// and will ideally not create ambiguity with any predefined cty type. +// +// Capsule types are never introduced by any standard cty operation, so a +// calling application opts in to including them within its own type system +// by creating them and introducing them via its own functions. At that point, +// the application is responsible for dealing with any capsule-typed values +// that might be returned. +func Capsule(name string, nativeType reflect.Type) Type { + return Type{ + &capsuleType{ + Name: name, + GoType: nativeType, + }, + } +} + +// IsCapsuleType returns true if this type is a capsule type, as created +// by cty.Capsule . +func (t Type) IsCapsuleType() bool { + _, ok := t.typeImpl.(*capsuleType) + return ok +} + +// EncapsulatedType returns the encapsulated native type of a capsule type, +// or panics if the receiver is not a Capsule type. +// +// Is IsCapsuleType to determine if this method is safe to call. +func (t Type) EncapsulatedType() reflect.Type { + impl, ok := t.typeImpl.(*capsuleType) + if !ok { + panic("not a capsule type") + } + return impl.GoType +} diff --git a/vendor/github.com/zclconf/go-cty/cty/collection.go b/vendor/github.com/zclconf/go-cty/cty/collection.go new file mode 100644 index 00000000000..ab3919b14b7 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/collection.go @@ -0,0 +1,34 @@ +package cty + +import ( + "errors" +) + +type collectionTypeImpl interface { + ElementType() Type +} + +// IsCollectionType returns true if the given type supports the operations +// that are defined for all collection types. +func (t Type) IsCollectionType() bool { + _, ok := t.typeImpl.(collectionTypeImpl) + return ok +} + +// ElementType returns the element type of the receiver if it is a collection +// type, or panics if it is not. Use IsCollectionType first to test whether +// this method will succeed. +func (t Type) ElementType() Type { + if ct, ok := t.typeImpl.(collectionTypeImpl); ok { + return ct.ElementType() + } + panic(errors.New("not a collection type")) +} + +// ElementCallback is a callback type used for iterating over elements of +// collections and attributes of objects. +// +// The types of key and value depend on what type is being iterated over. +// Return true to stop iterating after the current element, or false to +// continue iterating. +type ElementCallback func(key Value, val Value) (stop bool) diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go b/vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go new file mode 100644 index 00000000000..d84f6ac1049 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go @@ -0,0 +1,165 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// compareTypes implements a preference order for unification. +// +// The result of this method is not useful for anything other than unification +// preferences, since it assumes that the caller will verify that any suggested +// conversion is actually possible and it is thus able to to make certain +// optimistic assumptions. +func compareTypes(a cty.Type, b cty.Type) int { + + // DynamicPseudoType always has lowest preference, because anything can + // convert to it (it acts as a placeholder for "any type") and we want + // to optimistically assume that any dynamics will converge on matching + // their neighbors. + if a == cty.DynamicPseudoType || b == cty.DynamicPseudoType { + if a != cty.DynamicPseudoType { + return -1 + } + if b != cty.DynamicPseudoType { + return 1 + } + return 0 + } + + if a.IsPrimitiveType() && b.IsPrimitiveType() { + // String is a supertype of all primitive types, because we can + // represent all primitive values as specially-formatted strings. + if a == cty.String || b == cty.String { + if a != cty.String { + return 1 + } + if b != cty.String { + return -1 + } + return 0 + } + } + + if a.IsListType() && b.IsListType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + if a.IsSetType() && b.IsSetType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + if a.IsMapType() && b.IsMapType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + + // From this point on we may have swapped the two items in order to + // simplify our cases. Therefore any non-zero return after this point + // must be multiplied by "swap" to potentially invert the return value + // if needed. + swap := 1 + switch { + case a.IsTupleType() && b.IsListType(): + fallthrough + case a.IsObjectType() && b.IsMapType(): + fallthrough + case a.IsSetType() && b.IsTupleType(): + fallthrough + case a.IsSetType() && b.IsListType(): + a, b = b, a + swap = -1 + } + + if b.IsSetType() && (a.IsTupleType() || a.IsListType()) { + // We'll just optimistically assume that the element types are + // unifyable/convertible, and let a second recursive pass + // figure out how to make that so. + return -1 * swap + } + + if a.IsListType() && b.IsTupleType() { + // We'll just optimistically assume that the tuple's element types + // can be unified into something compatible with the list's element + // type. + return -1 * swap + } + + if a.IsMapType() && b.IsObjectType() { + // We'll just optimistically assume that the object's attribute types + // can be unified into something compatible with the map's element + // type. + return -1 * swap + } + + // For object and tuple types, comparing two types doesn't really tell + // the whole story because it may be possible to construct a new type C + // that is the supertype of both A and B by unifying each attribute/element + // separately. That possibility is handled by Unify as a follow-up if + // type sorting is insufficient to produce a valid result. + // + // Here we will take care of the simple possibilities where no new type + // is needed. + if a.IsObjectType() && b.IsObjectType() { + atysA := a.AttributeTypes() + atysB := b.AttributeTypes() + + if len(atysA) != len(atysB) { + return 0 + } + + hasASuper := false + hasBSuper := false + for k := range atysA { + if _, has := atysB[k]; !has { + return 0 + } + + cmp := compareTypes(atysA[k], atysB[k]) + if cmp < 0 { + hasASuper = true + } else if cmp > 0 { + hasBSuper = true + } + } + + switch { + case hasASuper && hasBSuper: + return 0 + case hasASuper: + return -1 * swap + case hasBSuper: + return 1 * swap + default: + return 0 + } + } + if a.IsTupleType() && b.IsTupleType() { + etysA := a.TupleElementTypes() + etysB := b.TupleElementTypes() + + if len(etysA) != len(etysB) { + return 0 + } + + hasASuper := false + hasBSuper := false + for i := range etysA { + cmp := compareTypes(etysA[i], etysB[i]) + if cmp < 0 { + hasASuper = true + } else if cmp > 0 { + hasBSuper = true + } + } + + switch { + case hasASuper && hasBSuper: + return 0 + case hasASuper: + return -1 * swap + case hasBSuper: + return 1 * swap + default: + return 0 + } + } + + return 0 +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go new file mode 100644 index 00000000000..b064bfb7d77 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go @@ -0,0 +1,99 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// conversion is an internal variant of Conversion that carries around +// a cty.Path to be used in error responses. +type conversion func(cty.Value, cty.Path) (cty.Value, error) + +func getConversion(in cty.Type, out cty.Type, unsafe bool) conversion { + conv := getConversionKnown(in, out, unsafe) + if conv == nil { + return nil + } + + // Wrap the conversion in some standard checks that we don't want to + // have to repeat in every conversion function. + return func(in cty.Value, path cty.Path) (cty.Value, error) { + if !in.IsKnown() { + return cty.UnknownVal(out), nil + } + if in.IsNull() { + // We'll pass through nulls, albeit type converted, and let + // the caller deal with whatever handling they want to do in + // case null values are considered valid in some applications. + return cty.NullVal(out), nil + } + + return conv(in, path) + } +} + +func getConversionKnown(in cty.Type, out cty.Type, unsafe bool) conversion { + switch { + + case out == cty.DynamicPseudoType: + // Conversion *to* DynamicPseudoType means that the caller wishes + // to allow any type in this position, so we'll produce a do-nothing + // conversion that just passes through the value as-is. + return dynamicPassthrough + + case unsafe && in == cty.DynamicPseudoType: + // Conversion *from* DynamicPseudoType means that we have a value + // whose type isn't yet known during type checking. For these we will + // assume that conversion will succeed and deal with any errors that + // result (which is why we can only do this when "unsafe" is set). + return dynamicFixup(out) + + case in.IsPrimitiveType() && out.IsPrimitiveType(): + conv := primitiveConversionsSafe[in][out] + if conv != nil { + return conv + } + if unsafe { + return primitiveConversionsUnsafe[in][out] + } + return nil + + case out.IsListType() && (in.IsListType() || in.IsSetType()): + inEty := in.ElementType() + outEty := out.ElementType() + if inEty.Equals(outEty) { + // This indicates that we're converting from list to set with + // the same element type, so we don't need an element converter. + return conversionCollectionToList(outEty, nil) + } + + convEty := getConversion(inEty, outEty, unsafe) + if convEty == nil { + return nil + } + return conversionCollectionToList(outEty, convEty) + + case out.IsListType() && in.IsTupleType(): + outEty := out.ElementType() + return conversionTupleToList(in, outEty, unsafe) + + case out.IsMapType() && in.IsObjectType(): + outEty := out.ElementType() + return conversionObjectToMap(in, outEty, unsafe) + + default: + return nil + + } +} + +// retConversion wraps a conversion (internal type) so it can be returned +// as a Conversion (public type). +func retConversion(conv conversion) Conversion { + if conv == nil { + return nil + } + + return func(in cty.Value) (cty.Value, error) { + return conv(in, cty.Path(nil)) + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go new file mode 100644 index 00000000000..a6a4c35d777 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go @@ -0,0 +1,186 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// conversionCollectionToList returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a list. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a list. (For example, +// if we're converting from a set into a list of the same element type.) +func conversionCollectionToList(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, val.LengthInt()) + i := int64(0) + path = append(path, nil) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + if len(elems) == 0 { + return cty.ListValEmpty(ety), nil + } + + return cty.ListVal(elems), nil + } +} + +// conversionTupleToList returns a conversion that will take a value of the +// given tuple type and return a list of the given element type. +// +// Will panic if the given tupleType isn't actually a tuple type. +func conversionTupleToList(tupleType cty.Type, listEty cty.Type, unsafe bool) conversion { + tupleEtys := tupleType.TupleElementTypes() + + if len(tupleEtys) == 0 { + // Empty tuple short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.ListValEmpty(listEty), nil + } + } + + if listEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + listEty, _ = unify(tupleEtys, unsafe) + if listEty == cty.NilType { + return nil + } + } + + elemConvs := make([]conversion, len(tupleEtys)) + for i, tupleEty := range tupleEtys { + if tupleEty.Equals(listEty) { + // no conversion required + continue + } + + elemConvs[i] = getConversion(tupleEty, listEty, unsafe) + if elemConvs[i] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, len(elemConvs)) + path = append(path, nil) + i := int64(0) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + return cty.ListVal(elems), nil + } +} + +// conversionObjectToMap returns a conversion that will take a value of the +// given object type and return a map of the given element type. +// +// Will panic if the given objectType isn't actually an object type. +func conversionObjectToMap(objectType cty.Type, mapEty cty.Type, unsafe bool) conversion { + objectAtys := objectType.AttributeTypes() + + if len(objectAtys) == 0 { + // Empty object short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.MapValEmpty(mapEty), nil + } + } + + if mapEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + objectAtysList := make([]cty.Type, 0, len(objectAtys)) + for _, aty := range objectAtys { + objectAtysList = append(objectAtysList, aty) + } + mapEty, _ = unify(objectAtysList, unsafe) + if mapEty == cty.NilType { + return nil + } + } + + elemConvs := make(map[string]conversion, len(objectAtys)) + for name, objectAty := range objectAtys { + if objectAty.Equals(mapEty) { + // no conversion required + continue + } + + elemConvs[name] = getConversion(objectAty, mapEty, unsafe) + if elemConvs[name] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make(map[string]cty.Value, len(elemConvs)) + path = append(path, nil) + it := val.ElementIterator() + for it.Next() { + name, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: name, + } + + conv := elemConvs[name.AsString()] + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems[name.AsString()] = val + } + + return cty.MapVal(elems), nil + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go new file mode 100644 index 00000000000..4d19cf6c5cb --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go @@ -0,0 +1,33 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// dynamicFixup deals with just-in-time conversions of values that were +// input-typed as cty.DynamicPseudoType during analysis, ensuring that +// we end up with the desired output type once the value is known, or +// failing with an error if that is not possible. +// +// This is in the spirit of the cty philosophy of optimistically assuming that +// DynamicPseudoType values will become the intended value eventually, and +// dealing with any inconsistencies during final evaluation. +func dynamicFixup(wantType cty.Type) conversion { + return func(in cty.Value, path cty.Path) (cty.Value, error) { + ret, err := Convert(in, wantType) + if err != nil { + // Re-wrap this error so that the returned path is relative + // to the caller's original value, rather than relative to our + // conversion value here. + return cty.NilVal, path.NewError(err) + } + return ret, nil + } +} + +// dynamicPassthrough is an identity conversion that is used when the +// target type is DynamicPseudoType, indicating that the caller doesn't care +// which type is returned. +func dynamicPassthrough(in cty.Value, path cty.Path) (cty.Value, error) { + return in, nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go new file mode 100644 index 00000000000..399ff9a596e --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go @@ -0,0 +1,50 @@ +package convert + +import ( + "math/big" + + "github.com/zclconf/go-cty/cty" +) + +var stringTrue = cty.StringVal("true") +var stringFalse = cty.StringVal("false") + +var primitiveConversionsSafe = map[cty.Type]map[cty.Type]conversion{ + cty.Number: { + cty.String: func(val cty.Value, path cty.Path) (cty.Value, error) { + f := val.AsBigFloat() + return cty.StringVal(f.Text('f', -1)), nil + }, + }, + cty.Bool: { + cty.String: func(val cty.Value, path cty.Path) (cty.Value, error) { + if val.True() { + return stringTrue, nil + } else { + return stringFalse, nil + } + }, + }, +} + +var primitiveConversionsUnsafe = map[cty.Type]map[cty.Type]conversion{ + cty.String: { + cty.Number: func(val cty.Value, path cty.Path) (cty.Value, error) { + f, _, err := (&big.Float{}).Parse(val.AsString(), 10) + if err != nil { + return cty.NilVal, path.NewErrorf("a number is required") + } + return cty.NumberVal(f), nil + }, + cty.Bool: func(val cty.Value, path cty.Path) (cty.Value, error) { + switch val.AsString() { + case "true", "1": + return cty.True, nil + case "false", "0": + return cty.False, nil + default: + return cty.NilVal, path.NewErrorf("a bool is required") + } + }, + }, +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/doc.go b/vendor/github.com/zclconf/go-cty/cty/convert/doc.go new file mode 100644 index 00000000000..2037299bab4 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/doc.go @@ -0,0 +1,15 @@ +// Package convert contains some routines for converting between cty types. +// The intent of providing this package is to encourage applications using +// cty to have consistent type conversion behavior for maximal interoperability +// when Values pass from one application to another. +// +// The conversions are categorized into two categories. "Safe" conversions are +// ones that are guaranteed to succeed if given a non-null value of the +// appropriate source type. "Unsafe" conversions, on the other hand, are valid +// for only a subset of input values, and thus may fail with an error when +// called for values outside of that valid subset. +// +// The functions whose names end in Unsafe support all of the conversions that +// are supported by the corresponding functions whose names do not have that +// suffix, and then additional unsafe conversions as well. +package convert diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/public.go b/vendor/github.com/zclconf/go-cty/cty/convert/public.go new file mode 100644 index 00000000000..55f44aeca04 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/public.go @@ -0,0 +1,83 @@ +package convert + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" +) + +// This file contains the public interface of this package, which is intended +// to be a small, convenient interface designed for easy integration into +// a hypothetical language type checker and interpreter. + +// Conversion is a named function type representing a conversion from a +// value of one type to a value of another type. +// +// The source type for a conversion is always the source type given to +// the function that returned the Conversion, but there is no way to recover +// that from a Conversion value itself. If a Conversion is given a value +// that is not of its expected type (with the exception of DynamicPseudoType, +// which is always supported) then the function may panic or produce undefined +// results. +type Conversion func(in cty.Value) (out cty.Value, err error) + +// GetConversion returns a Conversion between the given in and out Types if +// a safe one is available, or returns nil otherwise. +func GetConversion(in cty.Type, out cty.Type) Conversion { + return retConversion(getConversion(in, out, false)) +} + +// GetConversionUnsafe returns a Conversion between the given in and out Types +// if either a safe or unsafe one is available, or returns nil otherwise. +func GetConversionUnsafe(in cty.Type, out cty.Type) Conversion { + return retConversion(getConversion(in, out, true)) +} + +// Convert returns the result of converting the given value to the given type +// if an safe or unsafe conversion is available, or returns an error if such a +// conversion is impossible. +// +// This is a convenience wrapper around calling GetConversionUnsafe and then +// immediately passing the given value to the resulting function. +func Convert(in cty.Value, want cty.Type) (cty.Value, error) { + if in.Type().Equals(want) { + return in, nil + } + + conv := GetConversionUnsafe(in.Type(), want) + if conv == nil { + return cty.NilVal, fmt.Errorf("incorrect type; %s required", want.FriendlyName()) + } + return conv(in) +} + +// Unify attempts to find the most general type that can be converted from +// all of the given types. If this is possible, that type is returned along +// with a slice of necessary conversions for some of the given types. +// +// If no common supertype can be found, this function returns cty.NilType and +// a nil slice. +// +// If a common supertype *can* be found, the returned slice will always be +// non-nil and will contain a non-nil conversion for each given type that +// needs to be converted, with indices corresponding to the input slice. +// Any given type that does *not* need conversion (because it is already of +// the appropriate type) will have a nil Conversion. +// +// cty.DynamicPseudoType is, as usual, a special case. If the given type list +// contains a mixture of dynamic and non-dynamic types, the dynamic types are +// disregarded for type selection and a conversion is returned for them that +// will attempt a late conversion of the given value to the target type, +// failing with a conversion error if the eventual concrete type is not +// compatible. If *all* given types are DynamicPseudoType, or in the +// degenerate case of an empty slice of types, the returned type is itself +// cty.DynamicPseudoType and no conversions are attempted. +func Unify(types []cty.Type) (cty.Type, []Conversion) { + return unify(types, false) +} + +// UnifyUnsafe is the same as Unify except that it may return unsafe +// conversions in situations where a safe conversion isn't also available. +func UnifyUnsafe(types []cty.Type) (cty.Type, []Conversion) { + return unify(types, true) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go b/vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go new file mode 100644 index 00000000000..b7769106d11 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go @@ -0,0 +1,69 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// sortTypes produces an ordering of the given types that serves as a +// preference order for the result of unification of the given types. +// The return value is a slice of indices into the given slice, and will +// thus always be the same length as the given slice. +// +// The goal is that the most general of the given types will appear first +// in the ordering. If there are uncomparable pairs of types in the list +// then they will appear in an undefined order, and the unification pass +// will presumably then fail. +func sortTypes(tys []cty.Type) []int { + l := len(tys) + + // First we build a graph whose edges represent "more general than", + // which we will then do a topological sort of. + edges := make([][]int, l) + for i := 0; i < (l - 1); i++ { + for j := i + 1; j < l; j++ { + cmp := compareTypes(tys[i], tys[j]) + switch { + case cmp < 0: + edges[i] = append(edges[i], j) + case cmp > 0: + edges[j] = append(edges[j], i) + } + } + } + + // Compute the in-degree of each node + inDegree := make([]int, l) + for _, outs := range edges { + for _, j := range outs { + inDegree[j]++ + } + } + + // The array backing our result will double as our queue for visiting + // the nodes, with the queue slice moving along this array until it + // is empty and positioned at the end of the array. Thus our visiting + // order is also our result order. + result := make([]int, l) + queue := result[0:0] + + // Initialize the queue with any item of in-degree 0, preserving + // their relative order. + for i, n := range inDegree { + if n == 0 { + queue = append(queue, i) + } + } + + for len(queue) != 0 { + i := queue[0] + queue = queue[1:] + for _, j := range edges[i] { + inDegree[j]-- + if inDegree[j] == 0 { + queue = append(queue, j) + } + } + } + + return result +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/unify.go b/vendor/github.com/zclconf/go-cty/cty/convert/unify.go new file mode 100644 index 00000000000..bd6736b4dc2 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/unify.go @@ -0,0 +1,66 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// The current unify implementation is somewhat inefficient, but we accept this +// under the assumption that it will generally be used with small numbers of +// types and with types of reasonable complexity. However, it does have a +// "happy path" where all of the given types are equal. +// +// This function is likely to have poor performance in cases where any given +// types are very complex (lots of deeply-nested structures) or if the list +// of types itself is very large. In particular, it will walk the nested type +// structure under the given types several times, especially when given a +// list of types for which unification is not possible, since each permutation +// will be tried to determine that result. +func unify(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { + if len(types) == 0 { + // Degenerate case + return cty.NilType, nil + } + + prefOrder := sortTypes(types) + + // sortTypes gives us an order where earlier items are preferable as + // our result type. We'll now walk through these and choose the first + // one we encounter for which conversions exist for all source types. + conversions := make([]Conversion, len(types)) +Preferences: + for _, wantTypeIdx := range prefOrder { + wantType := types[wantTypeIdx] + for i, tryType := range types { + if i == wantTypeIdx { + // Don't need to convert our wanted type to itself + conversions[i] = nil + continue + } + + if tryType.Equals(wantType) { + conversions[i] = nil + continue + } + + if unsafe { + conversions[i] = GetConversionUnsafe(tryType, wantType) + } else { + conversions[i] = GetConversion(tryType, wantType) + } + + if conversions[i] == nil { + // wantType is not a suitable unification type, so we'll + // try the next one in our preference order. + continue Preferences + } + } + + return wantType, conversions + } + + // TODO: For structural types, try to invent a new type that they + // can all be unified to, by unifying their respective attributes. + + // If we fall out here, no unification is possible + return cty.NilType, nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/doc.go b/vendor/github.com/zclconf/go-cty/cty/doc.go new file mode 100644 index 00000000000..d31f0547bf4 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/doc.go @@ -0,0 +1,18 @@ +// Package cty (pronounced see-tie) provides some infrastructure for a type +// system that might be useful for applications that need to represent +// configuration values provided by the user whose types are not known +// at compile time, particularly if the calling application also allows +// such values to be used in expressions. +// +// The type system consists of primitive types Number, String and Bool, as +// well as List and Map collection types and Object types that can have +// arbitrarily-typed sets of attributes. +// +// A set of operations is defined on these types, which is accessible via +// the wrapper struct Value, which annotates the raw, internal representation +// of a value with its corresponding type. +// +// This package is oriented towards being a building block for configuration +// languages used to bootstrap an application. It is not optimized for use +// in tight loops where CPU time or memory pressure are a concern. +package cty diff --git a/vendor/github.com/zclconf/go-cty/cty/element_iterator.go b/vendor/github.com/zclconf/go-cty/cty/element_iterator.go new file mode 100644 index 00000000000..0bf84c774a9 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/element_iterator.go @@ -0,0 +1,191 @@ +package cty + +import ( + "sort" + + "github.com/zclconf/go-cty/cty/set" +) + +// ElementIterator is the interface type returned by Value.ElementIterator to +// allow the caller to iterate over elements of a collection-typed value. +// +// Its usage pattern is as follows: +// +// it := val.ElementIterator() +// for it.Next() { +// key, val := it.Element() +// // ... +// } +type ElementIterator interface { + Next() bool + Element() (key Value, value Value) +} + +func canElementIterator(val Value) bool { + switch { + case val.ty.IsListType(): + return true + case val.ty.IsMapType(): + return true + case val.ty.IsSetType(): + return true + case val.ty.IsTupleType(): + return true + case val.ty.IsObjectType(): + return true + default: + return false + } +} + +func elementIterator(val Value) ElementIterator { + switch { + case val.ty.IsListType(): + return &listElementIterator{ + ety: val.ty.ElementType(), + vals: val.v.([]interface{}), + idx: -1, + } + case val.ty.IsMapType(): + // We iterate the keys in a predictable lexicographical order so + // that results will always be stable given the same input map. + rawMap := val.v.(map[string]interface{}) + keys := make([]string, 0, len(rawMap)) + for key := range rawMap { + keys = append(keys, key) + } + sort.Strings(keys) + + return &mapElementIterator{ + ety: val.ty.ElementType(), + vals: rawMap, + keys: keys, + idx: -1, + } + case val.ty.IsSetType(): + rawSet := val.v.(set.Set) + return &setElementIterator{ + ety: val.ty.ElementType(), + setIt: rawSet.Iterator(), + } + case val.ty.IsTupleType(): + return &tupleElementIterator{ + etys: val.ty.TupleElementTypes(), + vals: val.v.([]interface{}), + idx: -1, + } + case val.ty.IsObjectType(): + // We iterate the keys in a predictable lexicographical order so + // that results will always be stable given the same object type. + atys := val.ty.AttributeTypes() + keys := make([]string, 0, len(atys)) + for key := range atys { + keys = append(keys, key) + } + sort.Strings(keys) + + return &objectElementIterator{ + atys: atys, + vals: val.v.(map[string]interface{}), + attrNames: keys, + idx: -1, + } + default: + panic("attempt to iterate on non-collection, non-tuple type") + } +} + +type listElementIterator struct { + ety Type + vals []interface{} + idx int +} + +func (it *listElementIterator) Element() (Value, Value) { + i := it.idx + return NumberIntVal(int64(i)), Value{ + ty: it.ety, + v: it.vals[i], + } +} + +func (it *listElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} + +type mapElementIterator struct { + ety Type + vals map[string]interface{} + keys []string + idx int +} + +func (it *mapElementIterator) Element() (Value, Value) { + key := it.keys[it.idx] + return StringVal(key), Value{ + ty: it.ety, + v: it.vals[key], + } +} + +func (it *mapElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.keys) +} + +type setElementIterator struct { + ety Type + setIt *set.Iterator +} + +func (it *setElementIterator) Element() (Value, Value) { + val := Value{ + ty: it.ety, + v: it.setIt.Value(), + } + return val, val +} + +func (it *setElementIterator) Next() bool { + return it.setIt.Next() +} + +type tupleElementIterator struct { + etys []Type + vals []interface{} + idx int +} + +func (it *tupleElementIterator) Element() (Value, Value) { + i := it.idx + return NumberIntVal(int64(i)), Value{ + ty: it.etys[i], + v: it.vals[i], + } +} + +func (it *tupleElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} + +type objectElementIterator struct { + atys map[string]Type + vals map[string]interface{} + attrNames []string + idx int +} + +func (it *objectElementIterator) Element() (Value, Value) { + key := it.attrNames[it.idx] + return StringVal(key), Value{ + ty: it.atys[key], + v: it.vals[key], + } +} + +func (it *objectElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.attrNames) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/error.go b/vendor/github.com/zclconf/go-cty/cty/error.go new file mode 100644 index 00000000000..dd139f72499 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/error.go @@ -0,0 +1,55 @@ +package cty + +import ( + "fmt" +) + +// PathError is a specialization of error that represents where in a +// potentially-deep data structure an error occured, using a Path. +type PathError struct { + error + Path Path +} + +func errorf(path Path, f string, args ...interface{}) error { + // We need to copy the Path because often our caller builds it by + // continually mutating the same underlying buffer. + sPath := make(Path, len(path)) + copy(sPath, path) + return PathError{ + error: fmt.Errorf(f, args...), + Path: sPath, + } +} + +// NewErrorf creates a new PathError for the current path by passing the +// given format and arguments to fmt.Errorf and then wrapping the result +// similarly to NewError. +func (p Path) NewErrorf(f string, args ...interface{}) error { + return errorf(p, f, args...) +} + +// NewError creates a new PathError for the current path, wrapping the given +// error. +func (p Path) NewError(err error) error { + // if we're being asked to wrap an existing PathError then our new + // PathError will be the concatenation of the two paths, ensuring + // that we still get a single flat PathError that's thus easier for + // callers to deal with. + perr, wrappingPath := err.(PathError) + pathLen := len(p) + if wrappingPath { + pathLen = pathLen + len(perr.Path) + } + + sPath := make(Path, pathLen) + copy(sPath, p) + if wrappingPath { + copy(sPath[len(p):], perr.Path) + } + + return PathError{ + error: err, + Path: sPath, + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/argument.go b/vendor/github.com/zclconf/go-cty/cty/function/argument.go new file mode 100644 index 00000000000..bfd30157e68 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/argument.go @@ -0,0 +1,50 @@ +package function + +import ( + "github.com/zclconf/go-cty/cty" +) + +// Parameter represents a parameter to a function. +type Parameter struct { + // Name is an optional name for the argument. This package ignores this + // value, but callers may use it for documentation, etc. + Name string + + // A type that any argument for this parameter must conform to. + // cty.DynamicPseudoType can be used, either at top-level or nested + // in a parameterized type, to indicate that any type should be + // permitted, to allow the definition of type-generic functions. + Type cty.Type + + // If AllowNull is set then null values may be passed into this + // argument's slot in both the type-check function and the implementation + // function. If not set, such values are rejected by the built-in + // checking rules. + AllowNull bool + + // If AllowUnknown is set then unknown values may be passed into this + // argument's slot in the implementation function. If not set, any + // unknown values will cause the function to immediately return + // an unkonwn value without calling the implementation function, thus + // freeing the function implementer from dealing with this case. + AllowUnknown bool + + // If AllowDynamicType is set then DynamicVal may be passed into this + // argument's slot in the implementation function. If not set, any + // dynamic values will cause the function to immediately return + // DynamicVal value without calling the implementation function, thus + // freeing the function implementer from dealing with this case. + // + // Note that DynamicVal is also unknown, so in order to receive dynamic + // *values* it is also necessary to set AllowUnknown. + // + // However, it is valid to set AllowDynamicType without AllowUnknown, in + // which case a dynamic value may be passed to the type checking function + // but will not make it to the *implementation* function. Instead, an + // unknown value of the type returned by the type-check function will be + // returned. This is suggested for functions that have a static return + // type since it allows the return value to be typed even if the input + // values are not, thus improving the type-check accuracy of derived + // values. + AllowDynamicType bool +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/doc.go b/vendor/github.com/zclconf/go-cty/cty/function/doc.go new file mode 100644 index 00000000000..ac1dd8ce746 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/doc.go @@ -0,0 +1,6 @@ +// Package function builds on the functionality of cty by modeling functions +// that operate on cty Values. +// +// Functions are, at their call, Go anonymous functions. However, this package +// wraps around them utility functions for parameter type checking, etc. +package function diff --git a/vendor/github.com/zclconf/go-cty/cty/function/error.go b/vendor/github.com/zclconf/go-cty/cty/function/error.go new file mode 100644 index 00000000000..2b56779986c --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/error.go @@ -0,0 +1,50 @@ +package function + +import ( + "fmt" + "runtime/debug" +) + +// ArgError represents an error with one of the arguments in a call. The +// attribute Index represents the zero-based index of the argument in question. +// +// Its error *may* be a cty.PathError, in which case the error actually +// pertains to a nested value within the data structure passed as the argument. +type ArgError struct { + error + Index int +} + +func NewArgErrorf(i int, f string, args ...interface{}) error { + return ArgError{ + error: fmt.Errorf(f, args...), + Index: i, + } +} + +func NewArgError(i int, err error) error { + return ArgError{ + error: err, + Index: i, + } +} + +// PanicError indicates that a panic occurred while executing either a +// function's type or implementation function. This is captured and wrapped +// into a normal error so that callers (expected to be language runtimes) +// are freed from having to deal with panics in buggy functions. +type PanicError struct { + Value interface{} + Stack []byte +} + +func errorForPanic(val interface{}) error { + return PanicError{ + Value: val, + Stack: debug.Stack(), + } +} + +func (e PanicError) Error() string { + return fmt.Sprintf("panic in function implementation: %s\n%s", e.Value, e.Stack) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/function.go b/vendor/github.com/zclconf/go-cty/cty/function/function.go new file mode 100644 index 00000000000..162f7bfcdee --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/function.go @@ -0,0 +1,291 @@ +package function + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" +) + +// Function represents a function. This is the main type in this package. +type Function struct { + spec *Spec +} + +// Spec is the specification of a function, used to instantiate +// a new Function. +type Spec struct { + // Params is a description of the positional parameters for the function. + // The standard checking logic rejects any calls that do not provide + // arguments conforming to this definition, freeing the function + // implementer from dealing with such inconsistencies. + Params []Parameter + + // VarParam is an optional specification of additional "varargs" the + // function accepts. If this is non-nil then callers may provide an + // arbitrary number of additional arguments (after those matching with + // the fixed parameters in Params) that conform to the given specification, + // which will appear as additional values in the slices of values + // provided to the type and implementation functions. + VarParam *Parameter + + // Type is the TypeFunc that decides the return type of the function + // given its arguments, which may be Unknown. See the documentation + // of TypeFunc for more information. + // + // Use StaticReturnType if the function's return type does not vary + // depending on its arguments. + Type TypeFunc + + // Impl is the ImplFunc that implements the function's behavior. + // + // Functions are expected to behave as pure functions, and not create + // any visible side-effects. + // + // If a TypeFunc is also provided, the value returned from Impl *must* + // conform to the type it returns, or a call to the function will panic. + Impl ImplFunc +} + +// New creates a new function with the given specification. +// +// After passing a Spec to this function, the caller must no longer read from +// or mutate it. +func New(spec *Spec) Function { + f := Function{ + spec: spec, + } + return f +} + +// TypeFunc is a callback type for determining the return type of a function +// given its arguments. +// +// Any of the values passed to this function may be unknown, even if the +// parameters are not configured to accept unknowns. +// +// If any of the given values are *not* unknown, the TypeFunc may use the +// values for pre-validation and for choosing the return type. For example, +// a hypothetical JSON-unmarshalling function could return +// cty.DynamicPseudoType if the given JSON string is unknown, but return +// a concrete type based on the JSON structure if the JSON string is already +// known. +type TypeFunc func(args []cty.Value) (cty.Type, error) + +// ImplFunc is a callback type for the main implementation of a function. +// +// "args" are the values for the arguments, and this slice will always be at +// least as long as the argument definition slice for the function. +// +// "retType" is the type returned from the Type callback, included as a +// convenience to avoid the need to re-compute the return type for generic +// functions whose return type is a function of the arguments. +type ImplFunc func(args []cty.Value, retType cty.Type) (cty.Value, error) + +// StaticReturnType returns a TypeFunc that always returns the given type. +// +// This is provided as a convenience for defining a function whose return +// type does not depend on the argument types. +func StaticReturnType(ty cty.Type) TypeFunc { + return func([]cty.Value) (cty.Type, error) { + return ty, nil + } +} + +// ReturnType returns the return type of a function given a set of candidate +// argument types, or returns an error if the given types are unacceptable. +// +// If the caller already knows values for at least some of the arguments +// it can be better to call ReturnTypeForValues, since certain functions may +// determine their return types from their values and return DynamicVal if +// the values are unknown. +func (f Function) ReturnType(argTypes []cty.Type) (cty.Type, error) { + vals := make([]cty.Value, len(argTypes)) + for i, ty := range argTypes { + vals[i] = cty.UnknownVal(ty) + } + return f.ReturnTypeForValues(vals) +} + +// ReturnTypeForValues is similar to ReturnType but can be used if the caller +// already knows the values of some or all of the arguments, in which case +// the function may be able to determine a more definite result if its +// return type depends on the argument *values*. +// +// For any arguments whose values are not known, pass an Unknown value of +// the appropriate type. +func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) { + var posArgs []cty.Value + var varArgs []cty.Value + + if f.spec.VarParam == nil { + if len(args) != len(f.spec.Params) { + return cty.Type{}, fmt.Errorf( + "wrong number of arguments (%d required; %d given)", + len(f.spec.Params), len(args), + ) + } + + posArgs = args + varArgs = nil + } else { + if len(args) < len(f.spec.Params) { + return cty.Type{}, fmt.Errorf( + "wrong number of arguments (at least %d required; %d given)", + len(f.spec.Params), len(args), + ) + } + + posArgs = args[0:len(f.spec.Params)] + varArgs = args[len(f.spec.Params):] + } + + for i, spec := range f.spec.Params { + val := posArgs[i] + + if val.IsNull() && !spec.AllowNull { + return cty.Type{}, NewArgErrorf(i, "must not be null") + } + + // AllowUnknown is ignored for type-checking, since we expect to be + // able to type check with unknown values. We *do* still need to deal + // with DynamicPseudoType here though, since the Type function might + // not be ready to deal with that. + + if val.Type() == cty.DynamicPseudoType { + if !spec.AllowDynamicType { + return cty.DynamicPseudoType, nil + } + } else if errs := val.Type().TestConformance(spec.Type); errs != nil { + // For now we'll just return the first error in the set, since + // we don't have a good way to return the whole list here. + // Would be good to do something better at some point... + return cty.Type{}, NewArgError(i, errs[0]) + } + } + + if varArgs != nil { + spec := f.spec.VarParam + for i, val := range varArgs { + realI := i + len(posArgs) + + if val.IsNull() && !spec.AllowNull { + return cty.Type{}, NewArgErrorf(realI, "must not be null") + } + + if val.Type() == cty.DynamicPseudoType { + if !spec.AllowDynamicType { + return cty.DynamicPseudoType, nil + } + } else if errs := val.Type().TestConformance(spec.Type); errs != nil { + // For now we'll just return the first error in the set, since + // we don't have a good way to return the whole list here. + // Would be good to do something better at some point... + return cty.Type{}, NewArgError(i, errs[0]) + } + } + } + + // Intercept any panics from the function and return them as normal errors, + // so a calling language runtime doesn't need to deal with panics. + defer func() { + if r := recover(); r != nil { + ty = cty.NilType + err = errorForPanic(r) + } + }() + + return f.spec.Type(args) +} + +// Call actually calls the function with the given arguments, which must +// conform to the function's parameter specification or an error will be +// returned. +func (f Function) Call(args []cty.Value) (val cty.Value, err error) { + expectedType, err := f.ReturnTypeForValues(args) + if err != nil { + return cty.NilVal, err + } + + // Type checking already dealt with most situations relating to our + // parameter specification, but we still need to deal with unknown + // values. + posArgs := args[:len(f.spec.Params)] + varArgs := args[len(f.spec.Params):] + + for i, spec := range f.spec.Params { + val := posArgs[i] + + if !val.IsKnown() && !spec.AllowUnknown { + return cty.UnknownVal(expectedType), nil + } + } + + if f.spec.VarParam != nil { + spec := f.spec.VarParam + for _, val := range varArgs { + if !val.IsKnown() && !spec.AllowUnknown { + return cty.UnknownVal(expectedType), nil + } + } + } + + var retVal cty.Value + { + // Intercept any panics from the function and return them as normal errors, + // so a calling language runtime doesn't need to deal with panics. + defer func() { + if r := recover(); r != nil { + val = cty.NilVal + err = errorForPanic(r) + } + }() + + retVal, err = f.spec.Impl(args, expectedType) + if err != nil { + return cty.NilVal, err + } + } + + // Returned value must conform to what the Type function expected, to + // protect callers from having to deal with inconsistencies. + if errs := retVal.Type().TestConformance(expectedType); errs != nil { + panic(fmt.Errorf( + "returned value %#v does not conform to expected return type %#v: %s", + retVal, expectedType, errs[0], + )) + } + + return retVal, nil +} + +// ProxyFunc the type returned by the method Function.Proxy. +type ProxyFunc func(args ...cty.Value) (cty.Value, error) + +// Proxy returns a function that can be called with cty.Value arguments +// to run the function. This is provided as a convenience for when using +// a function directly within Go code. +func (f Function) Proxy() ProxyFunc { + return func(args ...cty.Value) (cty.Value, error) { + return f.Call(args) + } +} + +// Params returns information about the function's fixed positional parameters. +// This does not include information about any variadic arguments accepted; +// for that, call VarParam. +func (f Function) Params() []Parameter { + new := make([]Parameter, len(f.spec.Params)) + copy(new, f.spec.Params) + return new +} + +// VarParam returns information about the variadic arguments the function +// expects, or nil if the function is not variadic. +func (f Function) VarParam() *Parameter { + if f.spec.VarParam == nil { + return nil + } + + ret := *f.spec.VarParam + return &ret +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go new file mode 100644 index 00000000000..a473d0ec3fa --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go @@ -0,0 +1,73 @@ +package stdlib + +import ( + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var NotFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "val", + Type: cty.Bool, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].Not(), nil + }, +}) + +var AndFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Bool, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Bool, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].And(args[1]), nil + }, +}) + +var OrFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Bool, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Bool, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].Or(args[1]), nil + }, +}) + +// Not returns the logical complement of the given boolean value. +func Not(num cty.Value) (cty.Value, error) { + return NotFunc.Call([]cty.Value{num}) +} + +// And returns true if and only if both of the given boolean values are true. +func And(a, b cty.Value) (cty.Value, error) { + return AndFunc.Call([]cty.Value{a, b}) +} + +// Or returns true if either of the given boolean values are true. +func Or(a, b cty.Value) (cty.Value, error) { + return OrFunc.Call([]cty.Value{a, b}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go new file mode 100644 index 00000000000..a132e0cde54 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go @@ -0,0 +1,112 @@ +package stdlib + +import ( + "fmt" + "reflect" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +// Bytes is a capsule type that can be used with the binary functions to +// support applications that need to support raw buffers in addition to +// UTF-8 strings. +var Bytes = cty.Capsule("bytes", reflect.TypeOf([]byte(nil))) + +// BytesVal creates a new Bytes value from the given buffer, which must be +// non-nil or this function will panic. +// +// Once a byte slice has been wrapped in a Bytes capsule, its underlying array +// must be considered immutable. +func BytesVal(buf []byte) cty.Value { + if buf == nil { + panic("can't make Bytes value from nil slice") + } + + return cty.CapsuleVal(Bytes, &buf) +} + +// BytesLen is a Function that returns the length of the buffer encapsulated +// in a Bytes value. +var BytesLenFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "buf", + Type: Bytes, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + bufPtr := args[0].EncapsulatedValue().(*[]byte) + return cty.NumberIntVal(int64(len(*bufPtr))), nil + }, +}) + +// BytesSlice is a Function that returns a slice of the given Bytes value. +var BytesSliceFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "buf", + Type: Bytes, + AllowDynamicType: true, + }, + { + Name: "offset", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "length", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(Bytes), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + bufPtr := args[0].EncapsulatedValue().(*[]byte) + + var offset, length int + + var err error + err = gocty.FromCtyValue(args[1], &offset) + if err != nil { + return cty.NilVal, err + } + err = gocty.FromCtyValue(args[2], &length) + if err != nil { + return cty.NilVal, err + } + + if offset < 0 || length < 0 { + return cty.NilVal, fmt.Errorf("offset and length must be non-negative") + } + + if offset > len(*bufPtr) { + return cty.NilVal, fmt.Errorf( + "offset %d is greater than total buffer length %d", + offset, len(*bufPtr), + ) + } + + end := offset + length + + if end > len(*bufPtr) { + return cty.NilVal, fmt.Errorf( + "offset %d + length %d is greater than total buffer length %d", + offset, length, len(*bufPtr), + ) + } + + return BytesVal((*bufPtr)[offset:end]), nil + }, +}) + +func BytesLen(buf cty.Value) (cty.Value, error) { + return BytesLenFunc.Call([]cty.Value{buf}) +} + +func BytesSlice(buf cty.Value, offset cty.Value, length cty.Value) (cty.Value, error) { + return BytesSliceFunc.Call([]cty.Value{buf, offset, length}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go new file mode 100644 index 00000000000..967ba03c8b2 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go @@ -0,0 +1,140 @@ +package stdlib + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +var HasIndexFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "collection", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + { + Name: "key", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + collTy := args[0].Type() + if !(collTy.IsTupleType() || collTy.IsListType() || collTy.IsMapType() || collTy == cty.DynamicPseudoType) { + return cty.NilType, fmt.Errorf("collection must be a list, a map or a tuple") + } + return cty.Bool, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].HasIndex(args[1]), nil + }, +}) + +var IndexFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "collection", + Type: cty.DynamicPseudoType, + }, + { + Name: "key", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + collTy := args[0].Type() + key := args[1] + keyTy := key.Type() + switch { + case collTy.IsTupleType(): + if keyTy != cty.Number && keyTy != cty.DynamicPseudoType { + return cty.NilType, fmt.Errorf("key for tuple must be number") + } + if !key.IsKnown() { + return cty.DynamicPseudoType, nil + } + var idx int + err := gocty.FromCtyValue(key, &idx) + if err != nil { + return cty.NilType, fmt.Errorf("invalid key for tuple: %s", err) + } + + etys := collTy.TupleElementTypes() + + if idx >= len(etys) || idx < 0 { + return cty.NilType, fmt.Errorf("key must be between 0 and %d inclusive", len(etys)) + } + + return etys[idx], nil + + case collTy.IsListType(): + if keyTy != cty.Number && keyTy != cty.DynamicPseudoType { + return cty.NilType, fmt.Errorf("key for list must be number") + } + + return collTy.ElementType(), nil + + case collTy.IsMapType(): + if keyTy != cty.String && keyTy != cty.DynamicPseudoType { + return cty.NilType, fmt.Errorf("key for map must be string") + } + + return collTy.ElementType(), nil + + default: + return cty.NilType, fmt.Errorf("collection must be a list, a map or a tuple") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + has, err := HasIndex(args[0], args[1]) + if err != nil { + return cty.NilVal, err + } + if has.False() { // safe because collection and key are guaranteed known here + return cty.NilVal, fmt.Errorf("invalid index") + } + + return args[0].Index(args[1]), nil + }, +}) + +var LengthFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "collection", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + collTy := args[0].Type() + if !(collTy.IsTupleType() || collTy.IsListType() || collTy.IsMapType() || collTy.IsSetType() || collTy == cty.DynamicPseudoType) { + return cty.NilType, fmt.Errorf("collection must be a list, a map or a tuple") + } + return cty.Number, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].Length(), nil + }, +}) + +// HasIndex determines whether the given collection can be indexed with the +// given key. +func HasIndex(collection cty.Value, key cty.Value) (cty.Value, error) { + return HasIndexFunc.Call([]cty.Value{collection, key}) +} + +// Index returns an element from the given collection using the given key, +// or returns an error if there is no element for the given key. +func Index(collection cty.Value, key cty.Value) (cty.Value, error) { + return IndexFunc.Call([]cty.Value{collection, key}) +} + +// Length returns the number of elements in the given collection. +func Length(collection cty.Value) (cty.Value, error) { + return LengthFunc.Call([]cty.Value{collection}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/doc.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/doc.go new file mode 100644 index 00000000000..cfb613e5a5d --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/doc.go @@ -0,0 +1,13 @@ +// Package stdlib is a collection of cty functions that are expected to be +// generally useful, and are thus factored out into this shared library in +// the hope that cty-using applications will have consistent behavior when +// using these functions. +// +// See the parent package "function" for more information on the purpose +// and usage of cty functions. +// +// This package contains both Go functions, which provide convenient access +// to call the functions from Go code, and the Function objects themselves. +// The latter follow the naming scheme of appending "Func" to the end of +// the function name. +package stdlib diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go new file mode 100644 index 00000000000..6b31f266141 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go @@ -0,0 +1,107 @@ +package stdlib + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +var EqualFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + { + Name: "b", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].Equals(args[1]), nil + }, +}) + +var NotEqualFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + { + Name: "b", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].Equals(args[1]).Not(), nil + }, +}) + +var CoalesceFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + argTypes := make([]cty.Type, len(args)) + for i, val := range args { + argTypes[i] = val.Type() + } + retType, _ := convert.UnifyUnsafe(argTypes) + if retType == cty.NilType { + return cty.NilType, fmt.Errorf("all arguments must have the same type") + } + return retType, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + for _, argVal := range args { + if !argVal.IsKnown() { + return cty.UnknownVal(retType), nil + } + if argVal.IsNull() { + continue + } + + return convert.Convert(argVal, retType) + } + return cty.NilVal, fmt.Errorf("no non-null arguments") + }, +}) + +// Equal determines whether the two given values are equal, returning a +// bool value. +func Equal(a cty.Value, b cty.Value) (cty.Value, error) { + return EqualFunc.Call([]cty.Value{a, b}) +} + +// NotEqual is the opposite of Equal. +func NotEqual(a cty.Value, b cty.Value) (cty.Value, error) { + return NotEqualFunc.Call([]cty.Value{a, b}) +} + +// Coalesce returns the first of the given arguments that is not null. If +// all arguments are null, an error is produced. +func Coalesce(vals ...cty.Value) (cty.Value, error) { + return CoalesceFunc.Call(vals) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go new file mode 100644 index 00000000000..6288a1e7451 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go @@ -0,0 +1,65 @@ +package stdlib + +import ( + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/json" +) + +var JSONEncodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "val", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + val := args[0] + buf, err := json.Marshal(val, val.Type()) + if err != nil { + return cty.NilVal, err + } + + return cty.StringVal(string(buf)), nil + }, +}) + +var JSONDecodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + str := args[0] + if !str.IsKnown() { + return cty.DynamicPseudoType, nil + } + + buf := []byte(str.AsString()) + return json.ImpliedType(buf) + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + buf := []byte(args[0].AsString()) + return json.Unmarshal(buf, retType) + }, +}) + +// JSONEncode returns a JSON serialization of the given value. +func JSONEncode(val cty.Value) (cty.Value, error) { + return JSONEncodeFunc.Call([]cty.Value{val}) +} + +// JSONDecode parses the given JSON string and, if it is valid, returns the +// value it represents. +// +// Note that applying JSONDecode to the result of JSONEncode may not produce +// an identically-typed result, since JSON encoding is lossy for cty Types. +// The resulting value will consist only of primitive types, object types, and +// tuple types. +func JSONDecode(str cty.Value) (cty.Value, error) { + return JSONDecodeFunc.Call([]cty.Value{str}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go new file mode 100644 index 00000000000..bd9b2e51b10 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go @@ -0,0 +1,428 @@ +package stdlib + +import ( + "fmt" + "math/big" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var AbsoluteFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].Absolute(), nil + }, +}) + +var AddFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Add can panic if the input values are opposing infinities, + // so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't compute sum of opposing infinities") + } else { + // not a panic we recognize + panic(r) + } + } + }() + return args[0].Add(args[1]), nil + }, +}) + +var SubtractFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Sub can panic if the input values are infinities, + // so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't subtract infinity from itself") + } else { + // not a panic we recognize + panic(r) + } + } + }() + return args[0].Subtract(args[1]), nil + }, +}) + +var MultiplyFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Mul can panic if the input values are both zero or both + // infinity, so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't multiply zero by infinity") + } else { + // not a panic we recognize + panic(r) + } + } + }() + + return args[0].Multiply(args[1]), nil + }, +}) + +var DivideFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Quo can panic if the input values are both zero or both + // infinity, so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't divide zero by zero or infinity by infinity") + } else { + // not a panic we recognize + panic(r) + } + } + }() + + return args[0].Divide(args[1]), nil + }, +}) + +var ModuloFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Mul can panic if the input values are both zero or both + // infinity, so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't use modulo with zero and infinity") + } else { + // not a panic we recognize + panic(r) + } + } + }() + + return args[0].Modulo(args[1]), nil + }, +}) + +var GreaterThanFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].GreaterThan(args[1]), nil + }, +}) + +var GreaterThanOrEqualToFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].GreaterThanOrEqualTo(args[1]), nil + }, +}) + +var LessThanFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].LessThan(args[1]), nil + }, +}) + +var LessThanOrEqualToFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].LessThanOrEqualTo(args[1]), nil + }, +}) + +var NegateFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].Negate(), nil + }, +}) + +var MinFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "numbers", + Type: cty.Number, + AllowDynamicType: true, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + if len(args) == 0 { + return cty.NilVal, fmt.Errorf("must pass at least one number") + } + + min := cty.PositiveInfinity + for _, num := range args { + if num.LessThan(min).True() { + min = num + } + } + + return min, nil + }, +}) + +var MaxFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "numbers", + Type: cty.Number, + AllowDynamicType: true, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + if len(args) == 0 { + return cty.NilVal, fmt.Errorf("must pass at least one number") + } + + max := cty.NegativeInfinity + for _, num := range args { + if num.GreaterThan(max).True() { + max = num + } + } + + return max, nil + }, +}) + +var IntFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + bf := args[0].AsBigFloat() + if bf.IsInt() { + return args[0], nil + } + bi, _ := bf.Int(nil) + bf = (&big.Float{}).SetInt(bi) + return cty.NumberVal(bf), nil + }, +}) + +// Absolute returns the magnitude of the given number, without its sign. +// That is, it turns negative values into positive values. +func Absolute(num cty.Value) (cty.Value, error) { + return AbsoluteFunc.Call([]cty.Value{num}) +} + +// Add returns the sum of the two given numbers. +func Add(a cty.Value, b cty.Value) (cty.Value, error) { + return AddFunc.Call([]cty.Value{a, b}) +} + +// Subtract returns the difference between the two given numbers. +func Subtract(a cty.Value, b cty.Value) (cty.Value, error) { + return SubtractFunc.Call([]cty.Value{a, b}) +} + +// Multiply returns the product of the two given numbers. +func Multiply(a cty.Value, b cty.Value) (cty.Value, error) { + return MultiplyFunc.Call([]cty.Value{a, b}) +} + +// Divide returns a divided by b, where both a and b are numbers. +func Divide(a cty.Value, b cty.Value) (cty.Value, error) { + return DivideFunc.Call([]cty.Value{a, b}) +} + +// Negate returns the given number multipled by -1. +func Negate(num cty.Value) (cty.Value, error) { + return NegateFunc.Call([]cty.Value{num}) +} + +// LessThan returns true if a is less than b. +func LessThan(a cty.Value, b cty.Value) (cty.Value, error) { + return LessThanFunc.Call([]cty.Value{a, b}) +} + +// LessThanOrEqualTo returns true if a is less than b. +func LessThanOrEqualTo(a cty.Value, b cty.Value) (cty.Value, error) { + return LessThanOrEqualToFunc.Call([]cty.Value{a, b}) +} + +// GreaterThan returns true if a is less than b. +func GreaterThan(a cty.Value, b cty.Value) (cty.Value, error) { + return GreaterThanFunc.Call([]cty.Value{a, b}) +} + +// GreaterThanOrEqualTo returns true if a is less than b. +func GreaterThanOrEqualTo(a cty.Value, b cty.Value) (cty.Value, error) { + return GreaterThanOrEqualToFunc.Call([]cty.Value{a, b}) +} + +// Modulo returns the remainder of a divided by b under integer division, +// where both a and b are numbers. +func Modulo(a cty.Value, b cty.Value) (cty.Value, error) { + return ModuloFunc.Call([]cty.Value{a, b}) +} + +// Min returns the minimum number from the given numbers. +func Min(numbers ...cty.Value) (cty.Value, error) { + return MinFunc.Call(numbers) +} + +// Max returns the maximum number from the given numbers. +func Max(numbers ...cty.Value) (cty.Value, error) { + return MaxFunc.Call(numbers) +} + +// Int removes the fractional component of the given number returning an +// integer representing the whole number component, rounding towards zero. +// For example, -1.5 becomes -1. +// +// If an infinity is passed to Int, an error is returned. +func Int(num cty.Value) (cty.Value, error) { + if num == cty.PositiveInfinity || num == cty.NegativeInfinity { + return cty.NilVal, fmt.Errorf("can't truncate infinity to an integer") + } + return IntFunc.Call([]cty.Value{num}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go new file mode 100644 index 00000000000..e2c77c5d35a --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go @@ -0,0 +1,130 @@ +package stdlib + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +var ConcatFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "seqs", + Type: cty.DynamicPseudoType, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + if len(args) == 0 { + return cty.NilType, fmt.Errorf("at least one argument is required") + } + + if args[0].Type().IsListType() { + // Possibly we're going to return a list, if all of our other + // args are also lists and we can find a common element type. + tys := make([]cty.Type, len(args)) + for i, val := range args { + ty := val.Type() + if !ty.IsListType() { + tys = nil + break + } + tys[i] = ty + } + + if tys != nil { + commonType, _ := convert.UnifyUnsafe(tys) + if commonType != cty.NilType { + return commonType, nil + } + } + } + + etys := make([]cty.Type, 0, len(args)) + for i, val := range args { + ety := val.Type() + switch { + case ety.IsTupleType(): + etys = append(etys, ety.TupleElementTypes()...) + case ety.IsListType(): + if !val.IsKnown() { + // We need to know the list to count its elements to + // build our tuple type, so any concat of an unknown + // list can't be typed yet. + return cty.DynamicPseudoType, nil + } + + l := val.LengthInt() + subEty := ety.ElementType() + for j := 0; j < l; j++ { + etys = append(etys, subEty) + } + default: + return cty.NilType, function.NewArgErrorf( + i, "all arguments must be lists or tuples; got %s", + ety.FriendlyName(), + ) + } + } + return cty.Tuple(etys), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + switch { + case retType.IsListType(): + // If retType is a list type then we know that all of the + // given values will be lists and that they will either be of + // retType or of something we can convert to retType. + vals := make([]cty.Value, 0, len(args)) + for i, list := range args { + list, err = convert.Convert(list, retType) + if err != nil { + // Conversion might fail because we used UnifyUnsafe + // to choose our return type. + return cty.NilVal, function.NewArgError(i, err) + } + + it := list.ElementIterator() + for it.Next() { + _, v := it.Element() + vals = append(vals, v) + } + } + if len(vals) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + + return cty.ListVal(vals), nil + case retType.IsTupleType(): + // If retType is a tuple type then we could have a mixture of + // lists and tuples but we know they all have known values + // (because our params don't AllowUnknown) and we know that + // concatenating them all together will produce a tuple of + // retType because of the work we did in the Type function above. + vals := make([]cty.Value, 0, len(args)) + + for _, seq := range args { + // Both lists and tuples support ElementIterator, so this is easy. + it := seq.ElementIterator() + for it.Next() { + _, v := it.Element() + vals = append(vals, v) + } + } + + return cty.TupleVal(vals), nil + default: + // should never happen if Type is working correctly above + panic("unsupported return type") + } + }, +}) + +// Concat takes one or more sequences (lists or tuples) and returns the single +// sequence that results from concatenating them together in order. +// +// If all of the given sequences are lists of the same element type then the +// result is a list of that type. Otherwise, the result is a of a tuple type +// constructed from the given sequence types. +func Concat(seqs ...cty.Value) (cty.Value, error) { + return ConcatFunc.Call(seqs) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go new file mode 100644 index 00000000000..d7c89fa8296 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go @@ -0,0 +1,234 @@ +package stdlib + +import ( + "strings" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" + "github.com/apparentlymart/go-textseg/textseg" +) + +var UpperFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := args[0].AsString() + out := strings.ToUpper(in) + return cty.StringVal(out), nil + }, +}) + +var LowerFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := args[0].AsString() + out := strings.ToLower(in) + return cty.StringVal(out), nil + }, +}) + +var ReverseFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := []byte(args[0].AsString()) + out := make([]byte, len(in)) + pos := len(out) + + inB := []byte(in) + for i := 0; i < len(in); { + d, _, _ := textseg.ScanGraphemeClusters(inB[i:], true) + cluster := in[i : i+d] + pos -= len(cluster) + copy(out[pos:], cluster) + i += d + } + + return cty.StringVal(string(out)), nil + }, +}) + +var StrlenFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := args[0].AsString() + l := 0 + + inB := []byte(in) + for i := 0; i < len(in); { + d, _, _ := textseg.ScanGraphemeClusters(inB[i:], true) + l++ + i += d + } + + return cty.NumberIntVal(int64(l)), nil + }, +}) + +var SubstrFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + { + Name: "offset", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "length", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := []byte(args[0].AsString()) + var offset, length int + + var err error + err = gocty.FromCtyValue(args[1], &offset) + if err != nil { + return cty.NilVal, err + } + err = gocty.FromCtyValue(args[2], &length) + if err != nil { + return cty.NilVal, err + } + + if offset < 0 { + totalLenNum, err := Strlen(args[0]) + if err != nil { + // should never happen + panic("Stdlen returned an error") + } + + var totalLen int + err = gocty.FromCtyValue(totalLenNum, &totalLen) + if err != nil { + // should never happen + panic("Stdlen returned a non-int number") + } + + offset += totalLen + } + + sub := in + pos := 0 + var i int + + // First we'll seek forward to our offset + if offset > 0 { + for i = 0; i < len(sub); { + d, _, _ := textseg.ScanGraphemeClusters(sub[i:], true) + i += d + pos++ + if pos == offset { + break + } + if i >= len(in) { + return cty.StringVal(""), nil + } + } + + sub = sub[i:] + } + + if length < 0 { + // Taking the remainder of the string is a fast path since + // we can just return the rest of the buffer verbatim. + return cty.StringVal(string(sub)), nil + } + + // Otherwise we need to start seeking forward again until we + // reach the length we want. + pos = 0 + for i = 0; i < len(sub); { + d, _, _ := textseg.ScanGraphemeClusters(sub[i:], true) + i += d + pos++ + if pos == length { + break + } + } + + sub = sub[:i] + + return cty.StringVal(string(sub)), nil + }, +}) + +// Upper is a Function that converts a given string to uppercase. +func Upper(str cty.Value) (cty.Value, error) { + return UpperFunc.Call([]cty.Value{str}) +} + +// Lower is a Function that converts a given string to lowercase. +func Lower(str cty.Value) (cty.Value, error) { + return LowerFunc.Call([]cty.Value{str}) +} + +// Reverse is a Function that reverses the order of the characters in the +// given string. +// +// As usual, "character" for the sake of this function is a grapheme cluster, +// so combining diacritics (for example) will be considered together as a +// single character. +func Reverse(str cty.Value) (cty.Value, error) { + return ReverseFunc.Call([]cty.Value{str}) +} + +// Strlen is a Function that returns the length of the given string in +// characters. +// +// As usual, "character" for the sake of this function is a grapheme cluster, +// so combining diacritics (for example) will be considered together as a +// single character. +func Strlen(str cty.Value) (cty.Value, error) { + return StrlenFunc.Call([]cty.Value{str}) +} + +// Substr is a Function that extracts a sequence of characters from another +// string and creates a new string. +// +// As usual, "character" for the sake of this function is a grapheme cluster, +// so combining diacritics (for example) will be considered together as a +// single character. +// +// The "offset" index may be negative, in which case it is relative to the +// end of the given string. +// +// The "length" may be -1, in which case the remainder of the string after +// the given offset will be returned. +func Substr(str cty.Value, offset cty.Value, length cty.Value) (cty.Value, error) { + return SubstrFunc.Call([]cty.Value{str, offset, length}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/gob.go b/vendor/github.com/zclconf/go-cty/cty/gob.go new file mode 100644 index 00000000000..3d731993bc2 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gob.go @@ -0,0 +1,125 @@ +package cty + +import ( + "bytes" + "encoding/gob" + "fmt" + "math/big" +) + +// GobEncode is an implementation of the gob.GobEncoder interface, which +// allows Values to be included in structures encoded with encoding/gob. +// +// Currently it is not possible to represent values of capsule types in gob, +// because the types themselves cannot be represented. +func (val Value) GobEncode() ([]byte, error) { + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + + gv := gobValue{ + Version: 0, + Ty: val.ty, + V: val.v, + } + + err := enc.Encode(gv) + if err != nil { + return nil, fmt.Errorf("error encoding cty.Value: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is an implementation of the gob.GobDecoder interface, which +// inverts the operation performed by GobEncode. See the documentation of +// GobEncode for considerations when using cty.Value instances with gob. +func (val *Value) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gv gobValue + err := dec.Decode(&gv) + if err != nil { + return fmt.Errorf("error decoding cty.Value: %s", err) + } + if gv.Version != 0 { + return fmt.Errorf("unsupported cty.Value encoding version %d; only 0 is supported", gv.Version) + } + + // big.Float seems to, for some reason, lose its "pointerness" when we + // round-trip it, so we'll fix that here. + if bf, ok := gv.V.(big.Float); ok { + gv.V = &bf + } + + val.ty = gv.Ty + val.v = gv.V + + return nil +} + +// GobEncode is an implementation of the gob.GobEncoder interface, which +// allows Types to be included in structures encoded with encoding/gob. +// +// Currently it is not possible to represent capsule types in gob. +func (t Type) GobEncode() ([]byte, error) { + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + + gt := gobType{ + Version: 0, + Impl: t.typeImpl, + } + + err := enc.Encode(gt) + if err != nil { + return nil, fmt.Errorf("error encoding cty.Type: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is an implementatino of the gob.GobDecoder interface, which +// reverses the encoding performed by GobEncode to allow types to be recovered +// from gob buffers. +func (t *Type) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gt gobType + err := dec.Decode(>) + if err != nil { + return fmt.Errorf("error decoding cty.Type: %s", err) + } + if gt.Version != 0 { + return fmt.Errorf("unsupported cty.Type encoding version %d; only 0 is supported", gt.Version) + } + + t.typeImpl = gt.Impl + + return nil +} + +// Capsule types cannot currently be gob-encoded, because they rely on pointer +// equality and we have no way to recover the original pointer on decode. +func (t *capsuleType) GobEncode() ([]byte, error) { + return nil, fmt.Errorf("cannot gob-encode capsule type %q", t.FriendlyName()) +} + +func (t *capsuleType) GobDecode() ([]byte, error) { + return nil, fmt.Errorf("cannot gob-decode capsule type %q", t.FriendlyName()) +} + +type gobValue struct { + Version int + Ty Type + V interface{} +} + +type gobType struct { + Version int + Impl typeImpl +} + +type gobCapsuleTypeImpl struct { +} diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/doc.go b/vendor/github.com/zclconf/go-cty/cty/gocty/doc.go new file mode 100644 index 00000000000..a5177d22b27 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gocty/doc.go @@ -0,0 +1,7 @@ +// Package gocty deals with converting between cty Values and native go +// values. +// +// It operates under a similar principle to the encoding/json and +// encoding/xml packages in the standard library, using reflection to +// populate native Go data structures from cty values and vice-versa. +package gocty diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go b/vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go new file mode 100644 index 00000000000..94ffd2fb74a --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go @@ -0,0 +1,43 @@ +package gocty + +import ( + "math/big" + "reflect" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/set" +) + +var valueType = reflect.TypeOf(cty.Value{}) +var typeType = reflect.TypeOf(cty.Type{}) + +var setType = reflect.TypeOf(set.Set{}) + +var bigFloatType = reflect.TypeOf(big.Float{}) +var bigIntType = reflect.TypeOf(big.Int{}) + +var emptyInterfaceType = reflect.TypeOf(interface{}(nil)) + +var stringType = reflect.TypeOf("") + +// structTagIndices interrogates the fields of the given type (which must +// be a struct type, or we'll panic) and returns a map from the cty +// attribute names declared via struct tags to the indices of the +// fields holding those tags. +// +// This function will panic if two fields within the struct are tagged with +// the same cty attribute name. +func structTagIndices(st reflect.Type) map[string]int { + ct := st.NumField() + ret := make(map[string]int, ct) + + for i := 0; i < ct; i++ { + field := st.Field(i) + attrName := field.Tag.Get("cty") + if attrName != "" { + ret[attrName] = i + } + } + + return ret +} diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/in.go b/vendor/github.com/zclconf/go-cty/cty/gocty/in.go new file mode 100644 index 00000000000..642501b25b6 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gocty/in.go @@ -0,0 +1,528 @@ +package gocty + +import ( + "math/big" + "reflect" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/set" +) + +// ToCtyValue produces a cty.Value from a Go value. The result will conform +// to the given type, or an error will be returned if this is not possible. +// +// The target type serves as a hint to resolve ambiguities in the mapping. +// For example, the Go type set.Set tells us that the value is a set but +// does not describe the set's element type. This also allows for convenient +// conversions, such as populating a set from a slice rather than having to +// first explicitly instantiate a set.Set. +// +// The audience of this function is assumed to be the developers of Go code +// that is integrating with cty, and thus the error messages it returns are +// presented from Go's perspective. These messages are thus not appropriate +// for display to end-users. An error returned from ToCtyValue represents a +// bug in the calling program, not user error. +func ToCtyValue(val interface{}, ty cty.Type) (cty.Value, error) { + // 'path' starts off as empty but will grow for each level of recursive + // call we make, so by the time toCtyValue returns it is likely to have + // unused capacity on the end of it, depending on how deeply-recursive + // the given Type is. + path := make(cty.Path, 0) + return toCtyValue(reflect.ValueOf(val), ty, path) +} + +func toCtyValue(val reflect.Value, ty cty.Type, path cty.Path) (cty.Value, error) { + + switch ty { + case cty.Bool: + return toCtyBool(val, path) + case cty.Number: + return toCtyNumber(val, path) + case cty.String: + return toCtyString(val, path) + case cty.DynamicPseudoType: + return toCtyDynamic(val, path) + } + + switch { + case ty.IsListType(): + return toCtyList(val, ty.ElementType(), path) + case ty.IsMapType(): + return toCtyMap(val, ty.ElementType(), path) + case ty.IsSetType(): + return toCtySet(val, ty.ElementType(), path) + case ty.IsObjectType(): + return toCtyObject(val, ty.AttributeTypes(), path) + case ty.IsTupleType(): + return toCtyTuple(val, ty.TupleElementTypes(), path) + case ty.IsCapsuleType(): + return toCtyCapsule(val, ty, path) + } + + // We should never fall out here + return cty.NilVal, path.NewErrorf("unsupported target type %#v", ty) +} + +func toCtyBool(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Bool), nil + } + + switch val.Kind() { + + case reflect.Bool: + return cty.BoolVal(val.Bool()), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to bool", val.Kind()) + + } + +} + +func toCtyNumber(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Number), nil + } + + switch val.Kind() { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return cty.NumberIntVal(val.Int()), nil + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return cty.NumberUIntVal(val.Uint()), nil + + case reflect.Float32, reflect.Float64: + return cty.NumberFloatVal(val.Float()), nil + + case reflect.Struct: + if val.Type().AssignableTo(bigIntType) { + bigInt := val.Interface().(big.Int) + bigFloat := (&big.Float{}).SetInt(&bigInt) + val = reflect.ValueOf(*bigFloat) + } + + if val.Type().AssignableTo(bigFloatType) { + bigFloat := val.Interface().(big.Float) + return cty.NumberVal(&bigFloat), nil + } + + fallthrough + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to number", val.Kind()) + + } + +} + +func toCtyString(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.String), nil + } + + switch val.Kind() { + + case reflect.String: + return cty.StringVal(val.String()), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to string", val.Kind()) + + } + +} + +func toCtyList(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.List(ety)), nil + } + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.List(ety)), nil + } + fallthrough + case reflect.Array: + if val.Len() == 0 { + return cty.ListValEmpty(ety), nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, val.Len()) + for i := range vals { + var err error + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ListVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.List(ety)) + + } +} + +func toCtyMap(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Map(ety)), nil + } + + switch val.Kind() { + + case reflect.Map: + if val.IsNil() { + return cty.NullVal(cty.Map(ety)), nil + } + + if val.Len() == 0 { + return cty.MapValEmpty(ety), nil + } + + keyType := val.Type().Key() + if keyType.Kind() != reflect.String { + return cty.NilVal, path.NewErrorf("can't convert Go map with key type %s; key type must be string", keyType) + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our index step. + path = append(path, cty.PathStep(nil)) + + vals := make(map[string]cty.Value, val.Len()) + for _, kv := range val.MapKeys() { + k := kv.String() + var err error + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(k), + } + vals[k], err = toCtyValue(val.MapIndex(reflect.ValueOf(k)), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.MapVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Map(ety)) + + } +} + +func toCtySet(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Set(ety)), nil + } + + var vals []cty.Value + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.Set(ety)), nil + } + fallthrough + case reflect.Array: + if val.Len() == 0 { + return cty.SetValEmpty(ety), nil + } + + vals = make([]cty.Value, val.Len()) + for i := range vals { + var err error + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + case reflect.Struct: + + if !val.Type().AssignableTo(setType) { + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Type(), cty.Set(ety)) + } + + rawSet := val.Interface().(set.Set) + inVals := rawSet.Values() + + if len(inVals) == 0 { + return cty.SetValEmpty(ety), nil + } + + vals = make([]cty.Value, len(inVals)) + for i := range inVals { + var err error + vals[i], err = toCtyValue(reflect.ValueOf(inVals[i]), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Set(ety)) + + } + + return cty.SetVal(vals), nil +} + +func toCtyObject(val reflect.Value, attrTypes map[string]cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Object(attrTypes)), nil + } + + switch val.Kind() { + + case reflect.Map: + if val.IsNil() { + return cty.NullVal(cty.Object(attrTypes)), nil + } + + keyType := val.Type().Key() + if keyType.Kind() != reflect.String { + return cty.NilVal, path.NewErrorf("can't convert Go map with key type %s; key type must be string", keyType) + } + + if len(attrTypes) == 0 { + return cty.EmptyObjectVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our GetAttr step. + path = append(path, cty.PathStep(nil)) + + haveKeys := make(map[string]struct{}, val.Len()) + for _, kv := range val.MapKeys() { + haveKeys[kv.String()] = struct{}{} + } + + vals := make(map[string]cty.Value, len(attrTypes)) + for k, at := range attrTypes { + var err error + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + if _, have := haveKeys[k]; !have { + vals[k] = cty.NullVal(at) + continue + } + + vals[k], err = toCtyValue(val.MapIndex(reflect.ValueOf(k)), at, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ObjectVal(vals), nil + + case reflect.Struct: + if len(attrTypes) == 0 { + return cty.EmptyObjectVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our GetAttr step. + path = append(path, cty.PathStep(nil)) + + attrFields := structTagIndices(val.Type()) + + vals := make(map[string]cty.Value, len(attrTypes)) + for k, at := range attrTypes { + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + if fieldIdx, have := attrFields[k]; have { + var err error + vals[k], err = toCtyValue(val.Field(fieldIdx), at, path) + if err != nil { + return cty.NilVal, err + } + } else { + vals[k] = cty.NullVal(at) + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ObjectVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Object(attrTypes)) + + } +} + +func toCtyTuple(val reflect.Value, elemTypes []cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Tuple(elemTypes)), nil + } + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.Tuple(elemTypes)), nil + } + + if val.Len() != len(elemTypes) { + return cty.NilVal, path.NewErrorf("wrong number of elements %d; need %d", val.Len(), len(elemTypes)) + } + + if len(elemTypes) == 0 { + return cty.EmptyTupleVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our Index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, len(elemTypes)) + for i, ety := range elemTypes { + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.TupleVal(vals), nil + + case reflect.Struct: + fieldCount := val.Type().NumField() + if fieldCount != len(elemTypes) { + return cty.NilVal, path.NewErrorf("wrong number of struct fields %d; need %d", fieldCount, len(elemTypes)) + } + + if len(elemTypes) == 0 { + return cty.EmptyTupleVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our Index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, len(elemTypes)) + for i, ety := range elemTypes { + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + vals[i], err = toCtyValue(val.Field(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.TupleVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Tuple(elemTypes)) + + } +} + +func toCtyCapsule(val reflect.Value, capsuleType cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(capsuleType), nil + } + + if val.Kind() != reflect.Ptr { + if !val.CanAddr() { + return cty.NilVal, path.NewErrorf("source value for capsule %#v must be addressable", capsuleType) + } + + val = val.Addr() + } + + if !val.Type().Elem().AssignableTo(capsuleType.EncapsulatedType()) { + return cty.NilVal, path.NewErrorf("value of type %T not compatible with capsule %#v", val.Interface(), capsuleType) + } + + return cty.CapsuleVal(capsuleType, val.Interface()), nil +} + +func toCtyDynamic(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.DynamicPseudoType), nil + } + + switch val.Kind() { + + case reflect.Struct: + if !val.Type().AssignableTo(valueType) { + return cty.NilVal, path.NewErrorf("can't convert Go %s dynamically; only cty.Value allowed", val.Type()) + } + + return val.Interface().(cty.Value), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s dynamically; only cty.Value allowed", val.Kind()) + + } + +} + +// toCtyUnwrapPointer is a helper for dealing with Go pointers. It has three +// possible outcomes: +// +// - Given value isn't a pointer, so it's just returned as-is. +// - Given value is a non-nil pointer, in which case it is dereferenced +// and the result returned. +// - Given value is a nil pointer, in which case an invalid value is returned. +// +// For nested pointer types, like **int, they are all dereferenced in turn +// until a non-pointer value is found, or until a nil pointer is encountered. +func toCtyUnwrapPointer(val reflect.Value) reflect.Value { + for val.Kind() == reflect.Ptr || val.Kind() == reflect.Interface { + if val.IsNil() { + return reflect.Value{} + } + + val = val.Elem() + } + + return val +} diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/out.go b/vendor/github.com/zclconf/go-cty/cty/gocty/out.go new file mode 100644 index 00000000000..99b65a767ce --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gocty/out.go @@ -0,0 +1,705 @@ +package gocty + +import ( + "math/big" + "reflect" + + "math" + + "github.com/zclconf/go-cty/cty" +) + +// FromCtyValue assigns a cty.Value to a reflect.Value, which must be a pointer, +// using a fixed set of conversion rules. +// +// This function considers its audience to be the creator of the cty Value +// given, and thus the error messages it generates are (unlike with ToCtyValue) +// presented in cty terminology that is generally appropriate to return to +// end-users in applications where cty data structures are built from +// user-provided configuration. In particular this means that if incorrect +// target types are provided by the calling application the resulting error +// messages are likely to be confusing, since we assume that the given target +// type is correct and the cty.Value is where the error lies. +// +// If an error is returned, the target data structure may have been partially +// populated, but the degree to which this is true is an implementation +// detail that the calling application should not rely on. +// +// The function will panic if given a non-pointer as the Go value target, +// since that is considered to be a bug in the calling program. +func FromCtyValue(val cty.Value, target interface{}) error { + tVal := reflect.ValueOf(target) + if tVal.Kind() != reflect.Ptr { + panic("target value is not a pointer") + } + if tVal.IsNil() { + panic("target value is nil pointer") + } + + // 'path' starts off as empty but will grow for each level of recursive + // call we make, so by the time fromCtyValue returns it is likely to have + // unused capacity on the end of it, depending on how deeply-recursive + // the given cty.Value is. + path := make(cty.Path, 0) + return fromCtyValue(val, tVal, path) +} + +func fromCtyValue(val cty.Value, target reflect.Value, path cty.Path) error { + ty := val.Type() + + deepTarget := fromCtyPopulatePtr(target, false) + + // If we're decoding into a cty.Value then we just pass through the + // value as-is, to enable partial decoding. This is the only situation + // where unknown values are permitted. + if deepTarget.Kind() == reflect.Struct && deepTarget.Type().AssignableTo(valueType) { + deepTarget.Set(reflect.ValueOf(val)) + return nil + } + + // Lists and maps can be nil without indirection, but everything else + // requires a pointer and we set it immediately to nil. + // We also make an exception for capsule types because we want to handle + // pointers specially for these. + // (fromCtyList and fromCtyMap must therefore deal with val.IsNull, while + // other types can assume no nulls after this point.) + if val.IsNull() && !val.Type().IsListType() && !val.Type().IsMapType() && !val.Type().IsCapsuleType() { + target = fromCtyPopulatePtr(target, true) + if target.Kind() != reflect.Ptr { + return path.NewErrorf("null value is not allowed") + } + + target.Set(reflect.Zero(target.Type())) + return nil + } + + target = deepTarget + + if !val.IsKnown() { + return path.NewErrorf("value must be known") + } + + switch ty { + case cty.Bool: + return fromCtyBool(val, target, path) + case cty.Number: + return fromCtyNumber(val, target, path) + case cty.String: + return fromCtyString(val, target, path) + } + + switch { + case ty.IsListType(): + return fromCtyList(val, target, path) + case ty.IsMapType(): + return fromCtyMap(val, target, path) + case ty.IsSetType(): + return fromCtySet(val, target, path) + case ty.IsObjectType(): + return fromCtyObject(val, target, path) + case ty.IsTupleType(): + return fromCtyTuple(val, target, path) + case ty.IsCapsuleType(): + return fromCtyCapsule(val, target, path) + } + + // We should never fall out here; reaching here indicates a bug in this + // function. + return path.NewErrorf("unsupported source type %#v", ty) +} + +func fromCtyBool(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Bool: + if val.True() { + target.Set(reflect.ValueOf(true)) + } else { + target.Set(reflect.ValueOf(false)) + } + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyNumber(val cty.Value, target reflect.Value, path cty.Path) error { + bf := val.AsBigFloat() + + switch target.Kind() { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return fromCtyNumberInt(bf, target, path) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return fromCtyNumberUInt(bf, target, path) + + case reflect.Float32, reflect.Float64: + return fromCtyNumberFloat(bf, target, path) + + case reflect.Struct: + return fromCtyNumberBig(bf, target, path) + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyNumberInt(bf *big.Float, target reflect.Value, path cty.Path) error { + // Doing this with switch rather than << arithmetic because << with + // result >32-bits is not portable to 32-bit systems. + var min int64 + var max int64 + switch target.Type().Bits() { + case 8: + min = math.MinInt8 + max = math.MaxInt8 + case 16: + min = math.MinInt16 + max = math.MaxInt16 + case 32: + min = math.MinInt32 + max = math.MaxInt32 + case 64: + min = math.MinInt64 + max = math.MaxInt64 + default: + panic("weird number of bits in target int") + } + + iv, accuracy := bf.Int64() + if accuracy != big.Exact || iv < min || iv > max { + return path.NewErrorf("value must be a whole number, between %d and %d", min, max) + } + + target.Set(reflect.ValueOf(iv).Convert(target.Type())) + + return nil +} + +func fromCtyNumberUInt(bf *big.Float, target reflect.Value, path cty.Path) error { + // Doing this with switch rather than << arithmetic because << with + // result >32-bits is not portable to 32-bit systems. + var max uint64 + switch target.Type().Bits() { + case 8: + max = math.MaxUint8 + case 16: + max = math.MaxUint16 + case 32: + max = math.MaxUint32 + case 64: + max = math.MaxUint64 + default: + panic("weird number of bits in target uint") + } + + iv, accuracy := bf.Uint64() + if accuracy != big.Exact || iv > max { + return path.NewErrorf("value must be a whole number, between 0 and %d inclusive", max) + } + + target.Set(reflect.ValueOf(iv).Convert(target.Type())) + + return nil +} + +func fromCtyNumberFloat(bf *big.Float, target reflect.Value, path cty.Path) error { + switch target.Kind() { + case reflect.Float32: + fv, accuracy := bf.Float32() + if accuracy != big.Exact { + // We allow the precision to be truncated as part of our conversion, + // but we don't want to silently introduce infinities. + if math.IsInf(float64(fv), 0) { + return path.NewErrorf("value must be between %f and %f inclusive", -math.MaxFloat32, math.MaxFloat32) + } + } + target.Set(reflect.ValueOf(fv)) + return nil + case reflect.Float64: + fv, accuracy := bf.Float64() + if accuracy != big.Exact { + // We allow the precision to be truncated as part of our conversion, + // but we don't want to silently introduce infinities. + if math.IsInf(fv, 0) { + return path.NewErrorf("value must be between %f and %f inclusive", -math.MaxFloat64, math.MaxFloat64) + } + } + target.Set(reflect.ValueOf(fv)) + return nil + default: + panic("unsupported kind of float") + } +} + +func fromCtyNumberBig(bf *big.Float, target reflect.Value, path cty.Path) error { + switch { + + case bigFloatType.AssignableTo(target.Type()): + // Easy! + target.Set(reflect.ValueOf(bf).Elem()) + return nil + + case bigIntType.AssignableTo(target.Type()): + bi, accuracy := bf.Int(nil) + if accuracy != big.Exact { + return path.NewErrorf("value must be a whole number") + } + target.Set(reflect.ValueOf(bi).Elem()) + return nil + + default: + return likelyRequiredTypesError(path, target) + } +} + +func fromCtyString(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.String: + target.Set(reflect.ValueOf(val.AsString())) + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyList(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Slice: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + length := val.LengthInt() + tv := reflect.MakeSlice(target.Type(), length, length) + + path = append(path, nil) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + targetElem := tv.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + target.Set(tv) + return nil + + case reflect.Array: + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + length := val.LengthInt() + if length != target.Len() { + return path.NewErrorf("must be a list of length %d", target.Len()) + } + + path = append(path, nil) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + targetElem := target.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyMap(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Map: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + tv := reflect.MakeMap(target.Type()) + et := target.Type().Elem() + + path = append(path, nil) + + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: key, + } + + ks := key.AsString() + + targetElem := reflect.New(et) + err = fromCtyValue(val, targetElem, path) + + tv.SetMapIndex(reflect.ValueOf(ks), targetElem.Elem()) + + return err != nil + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + target.Set(tv) + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtySet(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Slice: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + length := val.LengthInt() + tv := reflect.MakeSlice(target.Type(), length, length) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + targetElem := tv.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + target.Set(tv) + return nil + + case reflect.Array: + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + length := val.LengthInt() + if length != target.Len() { + return path.NewErrorf("must be a set of length %d", target.Len()) + } + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + targetElem := target.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + return nil + + // TODO: decode into set.Set instance + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyObject(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Struct: + + attrTypes := val.Type().AttributeTypes() + targetFields := structTagIndices(target.Type()) + + path = append(path, nil) + + for k, i := range targetFields { + if _, exists := attrTypes[k]; !exists { + // If the field in question isn't able to represent nil, + // that's an error. + fk := target.Field(i).Kind() + switch fk { + case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface: + // okay + default: + return path.NewErrorf("missing required attribute %q", k) + } + } + } + + for k := range attrTypes { + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + fieldIdx, exists := targetFields[k] + if !exists { + return path.NewErrorf("unsupported attribute %q", k) + } + + ev := val.GetAttr(k) + + targetField := target.Field(fieldIdx) + err := fromCtyValue(ev, targetField, path) + if err != nil { + return err + } + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyTuple(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Struct: + + elemTypes := val.Type().TupleElementTypes() + fieldCount := target.Type().NumField() + + if fieldCount != len(elemTypes) { + return path.NewErrorf("a tuple of %d elements is required", fieldCount) + } + + path = append(path, nil) + + for i := range elemTypes { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + ev := val.Index(cty.NumberIntVal(int64(i))) + + targetField := target.Field(i) + err := fromCtyValue(ev, targetField, path) + if err != nil { + return err + } + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyCapsule(val cty.Value, target reflect.Value, path cty.Path) error { + + if target.Kind() == reflect.Ptr { + // Walk through indirection until we get to the last pointer, + // which we might set to null below. + target = fromCtyPopulatePtr(target, true) + + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + // Since a capsule contains a pointer to an object, we'll preserve + // that pointer on the way out and thus allow the caller to recover + // the original object, rather than a copy of it. + + eType := val.Type().EncapsulatedType() + + if !eType.AssignableTo(target.Elem().Type()) { + // Our interface contract promises that we won't expose Go + // implementation details in error messages, so we need to keep + // this vague. This can only arise if a calling application has + // more than one capsule type in play and a user mixes them up. + return path.NewErrorf("incorrect type %s", val.Type().FriendlyName()) + } + + target.Set(reflect.ValueOf(val.EncapsulatedValue())) + + return nil + } else { + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + // If our target isn't a pointer then we will attempt to copy + // the encapsulated value into it. + + eType := val.Type().EncapsulatedType() + + if !eType.AssignableTo(target.Type()) { + // Our interface contract promises that we won't expose Go + // implementation details in error messages, so we need to keep + // this vague. This can only arise if a calling application has + // more than one capsule type in play and a user mixes them up. + return path.NewErrorf("incorrect type %s", val.Type().FriendlyName()) + } + + // We know that EncapsulatedValue is always a pointer, so we + // can safely call .Elem on its reflect.Value. + target.Set(reflect.ValueOf(val.EncapsulatedValue()).Elem()) + + return nil + } + +} + +// fromCtyPopulatePtr recognizes when target is a pointer type and allocates +// a value to assign to that pointer, which it returns. +// +// If the given value has multiple levels of indirection, like **int, these +// will be processed in turn so that the return value is guaranteed to be +// a non-pointer. +// +// As an exception, if decodingNull is true then the returned value will be +// the final level of pointer, if any, so that the caller can assign it +// as nil to represent a null value. If the given target value is not a pointer +// at all then the returned value will be just the given target, so the caller +// must test if the returned value is a pointer before trying to assign nil +// to it. +func fromCtyPopulatePtr(target reflect.Value, decodingNull bool) reflect.Value { + for { + if target.Kind() == reflect.Interface && !target.IsNil() { + e := target.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + target = e + } + } + + if target.Kind() != reflect.Ptr { + break + } + + // Stop early if we're decodingNull and we've found our last indirection + if target.Elem().Kind() != reflect.Ptr && decodingNull && target.CanSet() { + break + } + + if target.IsNil() { + target.Set(reflect.New(target.Type().Elem())) + } + + target = target.Elem() + } + return target +} + +// likelyRequiredTypesError returns an error that states which types are +// acceptable by making some assumptions about what types we support for +// each target Go kind. It's not a precise science but it allows us to return +// an error message that is cty-user-oriented rather than Go-oriented. +// +// Generally these error messages should be a matter of last resort, since +// the calling application should be validating user-provided value types +// before decoding anyway. +func likelyRequiredTypesError(path cty.Path, target reflect.Value) error { + switch target.Kind() { + + case reflect.Bool: + return path.NewErrorf("bool value is required") + + case reflect.String: + return path.NewErrorf("string value is required") + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + fallthrough + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + fallthrough + case reflect.Float32, reflect.Float64: + return path.NewErrorf("number value is required") + + case reflect.Slice, reflect.Array: + return path.NewErrorf("list or set value is required") + + case reflect.Map: + return path.NewErrorf("map or object value is required") + + case reflect.Struct: + switch { + + case target.Type().AssignableTo(bigFloatType) || target.Type().AssignableTo(bigIntType): + return path.NewErrorf("number value is required") + + case target.Type().AssignableTo(setType): + return path.NewErrorf("set or list value is required") + + default: + return path.NewErrorf("object or tuple value is required") + + } + + default: + // We should avoid getting into this path, since this error + // message is rather useless. + return path.NewErrorf("incorrect type") + + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/type_implied.go b/vendor/github.com/zclconf/go-cty/cty/gocty/type_implied.go new file mode 100644 index 00000000000..ce4c8f1e9f0 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gocty/type_implied.go @@ -0,0 +1,108 @@ +package gocty + +import ( + "reflect" + + "github.com/zclconf/go-cty/cty" +) + +// ImpliedType takes an arbitrary Go value (as an interface{}) and attempts +// to find a suitable cty.Type instance that could be used for a conversion +// with ToCtyValue. +// +// This allows -- for simple situations at least -- types to be defined just +// once in Go and the cty types derived from the Go types, but in the process +// it makes some assumptions that may be undesirable so applications are +// encouraged to build their cty types directly if exacting control is +// required. +// +// Not all Go types can be represented as cty types, so an error may be +// returned which is usually considered to be a bug in the calling program. +// In particular, ImpliedType will never use capsule types in its returned +// type, because it cannot know the capsule types supported by the calling +// program. +func ImpliedType(gv interface{}) (cty.Type, error) { + rt := reflect.TypeOf(gv) + var path cty.Path + return impliedType(rt, path) +} + +func impliedType(rt reflect.Type, path cty.Path) (cty.Type, error) { + switch rt.Kind() { + + case reflect.Ptr: + return impliedType(rt.Elem(), path) + + // Primitive types + case reflect.Bool: + return cty.Bool, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return cty.Number, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return cty.Number, nil + case reflect.Float32, reflect.Float64: + return cty.Number, nil + case reflect.String: + return cty.String, nil + + // Collection types + case reflect.Slice: + path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.Number)}) + ety, err := impliedType(rt.Elem(), path) + if err != nil { + return cty.NilType, err + } + return cty.List(ety), nil + case reflect.Map: + if !stringType.AssignableTo(rt.Key()) { + return cty.NilType, path.NewErrorf("no cty.Type for %s (must have string keys)", rt) + } + path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.String)}) + ety, err := impliedType(rt.Elem(), path) + if err != nil { + return cty.NilType, err + } + return cty.Map(ety), nil + + // Structural types + case reflect.Struct: + return impliedStructType(rt, path) + + default: + return cty.NilType, path.NewErrorf("no cty.Type for %s", rt) + } +} + +func impliedStructType(rt reflect.Type, path cty.Path) (cty.Type, error) { + if valueType.AssignableTo(rt) { + // Special case: cty.Value represents cty.DynamicPseudoType, for + // type conformance checking. + return cty.DynamicPseudoType, nil + } + + fieldIdxs := structTagIndices(rt) + if len(fieldIdxs) == 0 { + return cty.NilType, path.NewErrorf("no cty.Type for %s (no cty field tags)", rt) + } + + atys := make(map[string]cty.Type, len(fieldIdxs)) + + { + // Temporary extension of path for attributes + path := append(path, nil) + + for k, fi := range fieldIdxs { + path[len(path)-1] = cty.GetAttrStep{Name: k} + + ft := rt.Field(fi).Type + aty, err := impliedType(ft, path) + if err != nil { + return cty.NilType, err + } + + atys[k] = aty + } + } + + return cty.Object(atys), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/helper.go b/vendor/github.com/zclconf/go-cty/cty/helper.go new file mode 100644 index 00000000000..1b88e9fa082 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/helper.go @@ -0,0 +1,99 @@ +package cty + +import ( + "fmt" +) + +// anyUnknown is a helper to easily check if a set of values contains any +// unknowns, for operations that short-circuit to return unknown in that case. +func anyUnknown(values ...Value) bool { + for _, val := range values { + if val.v == unknown { + return true + } + } + return false +} + +// typeCheck tests whether all of the given values belong to the given type. +// If the given types are a mixture of the given type and the dynamic +// pseudo-type then a short-circuit dynamic value is returned. If the given +// values are all of the correct type but at least one is unknown then +// a short-circuit unknown value is returned. If any other types appear then +// an error is returned. Otherwise (finally!) the result is nil, nil. +func typeCheck(required Type, ret Type, values ...Value) (shortCircuit *Value, err error) { + hasDynamic := false + hasUnknown := false + + for i, val := range values { + if val.ty == DynamicPseudoType { + hasDynamic = true + continue + } + + if !val.Type().Equals(required) { + return nil, fmt.Errorf( + "type mismatch: want %s but value %d is %s", + required.FriendlyName(), + i, val.ty.FriendlyName(), + ) + } + + if val.v == unknown { + hasUnknown = true + } + } + + if hasDynamic { + return &DynamicVal, nil + } + + if hasUnknown { + ret := UnknownVal(ret) + return &ret, nil + } + + return nil, nil +} + +// mustTypeCheck is a wrapper around typeCheck that immediately panics if +// any error is returned. +func mustTypeCheck(required Type, ret Type, values ...Value) *Value { + shortCircuit, err := typeCheck(required, ret, values...) + if err != nil { + panic(err) + } + return shortCircuit +} + +// shortCircuitForceType takes the return value from mustTypeCheck and +// replaces it with an unknown of the given type if the original value was +// DynamicVal. +// +// This is useful for operations that are specified to always return a +// particular type, since then a dynamic result can safely be "upgrade" to +// a strongly-typed unknown, which then allows subsequent operations to +// be actually type-checked. +// +// It is safe to use this only if the operation in question is defined as +// returning either a value of the given type or panicking, since we know +// then that subsequent operations won't run if the operation panics. +// +// If the given short-circuit value is *not* DynamicVal then it must be +// of the given type, or this function will panic. +func forceShortCircuitType(shortCircuit *Value, ty Type) *Value { + if shortCircuit == nil { + return nil + } + + if shortCircuit.ty == DynamicPseudoType { + ret := UnknownVal(ty) + return &ret + } + + if !shortCircuit.ty.Equals(ty) { + panic("forceShortCircuitType got value of wrong type") + } + + return shortCircuit +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json.go b/vendor/github.com/zclconf/go-cty/cty/json.go new file mode 100644 index 00000000000..c421a62ed94 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json.go @@ -0,0 +1,176 @@ +package cty + +import ( + "bytes" + "encoding/json" + "fmt" +) + +// MarshalJSON is an implementation of json.Marshaler that allows Type +// instances to be serialized as JSON. +// +// All standard types can be serialized, but capsule types cannot since there +// is no way to automatically recover the original pointer and capsule types +// compare by equality. +func (t Type) MarshalJSON() ([]byte, error) { + switch impl := t.typeImpl.(type) { + case primitiveType: + switch impl.Kind { + case primitiveTypeBool: + return []byte{'"', 'b', 'o', 'o', 'l', '"'}, nil + case primitiveTypeNumber: + return []byte{'"', 'n', 'u', 'm', 'b', 'e', 'r', '"'}, nil + case primitiveTypeString: + return []byte{'"', 's', 't', 'r', 'i', 'n', 'g', '"'}, nil + default: + panic("unknown primitive type kind") + } + case typeList, typeMap, typeSet: + buf := &bytes.Buffer{} + etyJSON, err := t.ElementType().MarshalJSON() + if err != nil { + return nil, err + } + buf.WriteRune('[') + switch impl.(type) { + case typeList: + buf.WriteString(`"list"`) + case typeMap: + buf.WriteString(`"map"`) + case typeSet: + buf.WriteString(`"set"`) + } + buf.WriteRune(',') + buf.Write(etyJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case typeObject: + buf := &bytes.Buffer{} + atysJSON, err := json.Marshal(t.AttributeTypes()) + if err != nil { + return nil, err + } + buf.WriteString(`["object",`) + buf.Write(atysJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case typeTuple: + buf := &bytes.Buffer{} + etysJSON, err := json.Marshal(t.TupleElementTypes()) + if err != nil { + return nil, err + } + buf.WriteString(`["tuple",`) + buf.Write(etysJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case pseudoTypeDynamic: + return []byte{'"', 'd', 'y', 'n', 'a', 'm', 'i', 'c', '"'}, nil + case *capsuleType: + return nil, fmt.Errorf("type not allowed: %s", t.FriendlyName()) + default: + // should never happen + panic("unknown type implementation") + } +} + +// UnmarshalJSON is the opposite of MarshalJSON. See the documentation of +// MarshalJSON for information on the limitations of JSON serialization of +// types. +func (t *Type) UnmarshalJSON(buf []byte) error { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + + tok, err := dec.Token() + if err != nil { + return err + } + + switch v := tok.(type) { + case string: + switch v { + case "bool": + *t = Bool + case "number": + *t = Number + case "string": + *t = String + case "dynamic": + *t = DynamicPseudoType + default: + return fmt.Errorf("invalid primitive type name %q", v) + } + + if dec.More() { + return fmt.Errorf("extraneous data after type description") + } + return nil + case json.Delim: + if rune(v) != '[' { + return fmt.Errorf("invalid complex type description") + } + + tok, err = dec.Token() + if err != nil { + return err + } + + kind, ok := tok.(string) + if !ok { + return fmt.Errorf("invalid complex type kind name") + } + + switch kind { + case "list": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = List(ety) + case "map": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = Map(ety) + case "set": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = Set(ety) + case "object": + var atys map[string]Type + err = dec.Decode(&atys) + if err != nil { + return err + } + *t = Object(atys) + case "tuple": + var etys []Type + err = dec.Decode(&etys) + if err != nil { + return err + } + *t = Tuple(etys) + default: + return fmt.Errorf("invalid complex type kind name") + } + + tok, err = dec.Token() + if err != nil { + return err + } + if delim, ok := tok.(json.Delim); !ok || rune(delim) != ']' || dec.More() { + return fmt.Errorf("unexpected extra data in type description") + } + + return nil + + default: + return fmt.Errorf("invalid type description") + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json/doc.go b/vendor/github.com/zclconf/go-cty/cty/json/doc.go new file mode 100644 index 00000000000..8916513d673 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/doc.go @@ -0,0 +1,11 @@ +// Package json provides functions for serializing cty types and values in +// JSON format, and for decoding them again. +// +// Since the cty type system is a superset of the JSON type system, +// round-tripping through JSON is lossy unless type information is provided +// both at encoding time and decoding time. Callers of this package are +// therefore suggested to define their expected structure as a cty.Type +// and pass it in consistently both when encoding and when decoding, though +// default (type-lossy) behavior is provided for situations where the precise +// representation of the data is not significant. +package json diff --git a/vendor/github.com/zclconf/go-cty/cty/json/marshal.go b/vendor/github.com/zclconf/go-cty/cty/json/marshal.go new file mode 100644 index 00000000000..f7bea1a2ff6 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/marshal.go @@ -0,0 +1,189 @@ +package json + +import ( + "bytes" + "encoding/json" + "sort" + + "github.com/zclconf/go-cty/cty" +) + +func marshal(val cty.Value, t cty.Type, path cty.Path, b *bytes.Buffer) error { + // If we're going to decode as DynamicPseudoType then we need to save + // dynamic type information to recover the real type. + if t == cty.DynamicPseudoType && val.Type() != cty.DynamicPseudoType { + return marshalDynamic(val, path, b) + } + + if val.IsNull() { + b.WriteString("null") + return nil + } + + if !val.IsKnown() { + return path.NewErrorf("value is not known") + } + + // The caller should've guaranteed that the given val is conformant with + // the given type t, so we'll proceed under that assumption here. + + switch { + case t.IsPrimitiveType(): + switch t { + case cty.String: + json, err := json.Marshal(val.AsString()) + if err != nil { + return path.NewErrorf("failed to serialize value: %s", err) + } + b.Write(json) + return nil + case cty.Number: + if val.RawEquals(cty.PositiveInfinity) || val.RawEquals(cty.NegativeInfinity) { + return path.NewErrorf("cannot serialize infinity as JSON") + } + b.WriteString(val.AsBigFloat().Text('f', -1)) + return nil + case cty.Bool: + if val.True() { + b.WriteString("true") + } else { + b.WriteString("false") + } + return nil + default: + panic("unsupported primitive type") + } + case t.IsListType(), t.IsSetType(): + b.WriteRune('[') + first := true + ety := t.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + if !first { + b.WriteRune(',') + } + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, b) + if err != nil { + return err + } + first = false + } + b.WriteRune(']') + return nil + case t.IsMapType(): + b.WriteRune('{') + first := true + ety := t.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + if !first { + b.WriteRune(',') + } + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + var err error + err = marshal(ek, ek.Type(), path, b) + if err != nil { + return err + } + b.WriteRune(':') + err = marshal(ev, ety, path, b) + if err != nil { + return err + } + first = false + } + b.WriteRune('}') + return nil + case t.IsTupleType(): + b.WriteRune('[') + etys := t.TupleElementTypes() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + i := 0 + for it.Next() { + if i > 0 { + b.WriteRune(',') + } + ety := etys[i] + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, b) + if err != nil { + return err + } + i++ + } + b.WriteRune(']') + return nil + case t.IsObjectType(): + b.WriteRune('{') + atys := t.AttributeTypes() + path := append(path, nil) // local override of 'path' with extra element + + names := make([]string, 0, len(atys)) + for k := range atys { + names = append(names, k) + } + sort.Strings(names) + + for i, k := range names { + aty := atys[k] + if i > 0 { + b.WriteRune(',') + } + av := val.GetAttr(k) + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + var err error + err = marshal(cty.StringVal(k), cty.String, path, b) + if err != nil { + return err + } + b.WriteRune(':') + err = marshal(av, aty, path, b) + if err != nil { + return err + } + } + b.WriteRune('}') + return nil + case t.IsCapsuleType(): + rawVal := val.EncapsulatedValue() + jsonVal, err := json.Marshal(rawVal) + if err != nil { + return path.NewError(err) + } + b.Write(jsonVal) + return nil + default: + // should never happen + return path.NewErrorf("cannot JSON-serialize %s", t.FriendlyName()) + } +} + +// marshalDynamic adds an extra wrapping object containing dynamic type +// information for the given value. +func marshalDynamic(val cty.Value, path cty.Path, b *bytes.Buffer) error { + typeJSON, err := MarshalType(val.Type()) + if err != nil { + return path.NewErrorf("failed to serialize type: %s", err) + } + b.WriteString(`{"value":`) + marshal(val, val.Type(), path, b) + b.WriteString(`,"type":`) + b.Write(typeJSON) + b.WriteRune('}') + return nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json/simple.go b/vendor/github.com/zclconf/go-cty/cty/json/simple.go new file mode 100644 index 00000000000..507c9cc2c60 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/simple.go @@ -0,0 +1,41 @@ +package json + +import ( + "github.com/zclconf/go-cty/cty" +) + +// SimpleJSONValue is a wrapper around cty.Value that adds implementations of +// json.Marshaler and json.Unmarshaler for simple-but-type-lossy automatic +// encoding and decoding of values. +// +// The couplet Marshal and Unmarshal both take extra type information to +// inform the encoding and decoding process so that all of the cty types +// can be represented even though JSON's type system is a subset. +// +// SimpleJSONValue instead takes the approach of discarding the value's type +// information and then deriving a new type from the stored structure when +// decoding. This results in the same data being returned but not necessarily +// with exactly the same type. +// +// For information on how types are inferred when decoding, see the +// documentation of the function ImpliedType. +type SimpleJSONValue struct { + cty.Value +} + +// MarshalJSON is an implementation of json.Marshaler. See the documentation +// of SimpleJSONValue for more information. +func (v SimpleJSONValue) MarshalJSON() ([]byte, error) { + return Marshal(v.Value, v.Type()) +} + +// UnmarshalJSON is an implementation of json.Unmarshaler. See the +// documentation of SimpleJSONValue for more information. +func (v *SimpleJSONValue) UnmarshalJSON(buf []byte) error { + t, err := ImpliedType(buf) + if err != nil { + return err + } + v.Value, err = Unmarshal(buf, t) + return err +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json/type.go b/vendor/github.com/zclconf/go-cty/cty/json/type.go new file mode 100644 index 00000000000..9131c6c7743 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/type.go @@ -0,0 +1,23 @@ +package json + +import ( + "github.com/zclconf/go-cty/cty" +) + +// MarshalType returns a JSON serialization of the given type. +// +// This is just a thin wrapper around t.MarshalJSON, for symmetry with +// UnmarshalType. +func MarshalType(t cty.Type) ([]byte, error) { + return t.MarshalJSON() +} + +// UnmarshalType decodes a JSON serialization of the given type as produced +// by either Type.MarshalJSON or MarshalType. +// +// This is a convenience wrapper around Type.UnmarshalJSON. +func UnmarshalType(buf []byte) (cty.Type, error) { + var t cty.Type + err := t.UnmarshalJSON(buf) + return t, err +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json/type_implied.go b/vendor/github.com/zclconf/go-cty/cty/json/type_implied.go new file mode 100644 index 00000000000..1a973066e4a --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/type_implied.go @@ -0,0 +1,171 @@ +package json + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/zclconf/go-cty/cty" +) + +// ImpliedType returns the cty Type implied by the structure of the given +// JSON-compliant buffer. This function implements the default type mapping +// behavior used when decoding arbitrary JSON without explicit cty Type +// information. +// +// The rules are as follows: +// +// JSON strings, numbers and bools map to their equivalent primitive type in +// cty. +// +// JSON objects map to cty object types, with the attributes defined by the +// object keys and the types of their values. +// +// JSON arrays map to cty tuple types, with the elements defined by the +// types of the array members. +// +// Any nulls are typed as DynamicPseudoType, so callers of this function +// must be prepared to deal with this. Callers that do not wish to deal with +// dynamic typing should not use this function and should instead describe +// their required types explicitly with a cty.Type instance when decoding. +// +// Any JSON syntax errors will be returned as an error, and the type will +// be the invalid value cty.NilType. +func ImpliedType(buf []byte) (cty.Type, error) { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + dec.UseNumber() + + ty, err := impliedType(dec) + if err != nil { + return cty.NilType, err + } + + if dec.More() { + return cty.NilType, fmt.Errorf("extraneous data after JSON object") + } + + return ty, nil +} + +func impliedType(dec *json.Decoder) (cty.Type, error) { + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + return impliedTypeForTok(tok, dec) +} + +func impliedTypeForTok(tok json.Token, dec *json.Decoder) (cty.Type, error) { + if tok == nil { + return cty.DynamicPseudoType, nil + } + + switch ttok := tok.(type) { + case bool: + return cty.Bool, nil + + case json.Number: + return cty.Number, nil + + case string: + return cty.String, nil + + case json.Delim: + + switch rune(ttok) { + case '{': + return impliedObjectType(dec) + case '[': + return impliedTupleType(dec) + default: + return cty.NilType, fmt.Errorf("unexpected token %q", ttok) + } + + default: + return cty.NilType, fmt.Errorf("unsupported JSON token %#v", tok) + } +} + +func impliedObjectType(dec *json.Decoder) (cty.Type, error) { + // By the time we get in here, we've already consumed the { delimiter + // and so our next token should be the first object key. + + var atys map[string]cty.Type + + for { + // Read the object key first + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + if ttok, ok := tok.(json.Delim); ok { + if rune(ttok) != '}' { + return cty.NilType, fmt.Errorf("unexpected delimiter %q", ttok) + } + break + } + + key, ok := tok.(string) + if !ok { + return cty.NilType, fmt.Errorf("expected string but found %T", tok) + } + + // Now read the value + tok, err = dec.Token() + if err != nil { + return cty.NilType, err + } + + aty, err := impliedTypeForTok(tok, dec) + if err != nil { + return cty.NilType, err + } + + if atys == nil { + atys = make(map[string]cty.Type) + } + atys[key] = aty + } + + if len(atys) == 0 { + return cty.EmptyObject, nil + } + + return cty.Object(atys), nil +} + +func impliedTupleType(dec *json.Decoder) (cty.Type, error) { + // By the time we get in here, we've already consumed the { delimiter + // and so our next token should be the first value. + + var etys []cty.Type + + for { + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + if ttok, ok := tok.(json.Delim); ok { + if rune(ttok) != ']' { + return cty.NilType, fmt.Errorf("unexpected delimiter %q", ttok) + } + break + } + + ety, err := impliedTypeForTok(tok, dec) + if err != nil { + return cty.NilType, err + } + etys = append(etys, ety) + } + + if len(etys) == 0 { + return cty.EmptyTuple, nil + } + + return cty.Tuple(etys), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go b/vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go new file mode 100644 index 00000000000..155f0b8a1a7 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go @@ -0,0 +1,459 @@ +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +func unmarshal(buf []byte, t cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return cty.NilVal, path.NewError(err) + } + + if tok == nil { + return cty.NullVal(t), nil + } + + if t == cty.DynamicPseudoType { + return unmarshalDynamic(buf, path) + } + + switch { + case t.IsPrimitiveType(): + val, err := unmarshalPrimitive(tok, t, path) + if err != nil { + return cty.NilVal, err + } + return val, nil + case t.IsListType(): + return unmarshalList(buf, t.ElementType(), path) + case t.IsSetType(): + return unmarshalSet(buf, t.ElementType(), path) + case t.IsMapType(): + return unmarshalMap(buf, t.ElementType(), path) + case t.IsTupleType(): + return unmarshalTuple(buf, t.TupleElementTypes(), path) + case t.IsObjectType(): + return unmarshalObject(buf, t.AttributeTypes(), path) + case t.IsCapsuleType(): + return unmarshalCapsule(buf, t, path) + default: + return cty.NilVal, path.NewErrorf("unsupported type %s", t.FriendlyName()) + } +} + +func unmarshalPrimitive(tok json.Token, t cty.Type, path cty.Path) (cty.Value, error) { + + switch t { + case cty.Bool: + switch v := tok.(type) { + case bool: + return cty.BoolVal(v), nil + case string: + val, err := convert.Convert(cty.StringVal(v), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("bool is required") + } + case cty.Number: + if v, ok := tok.(json.Number); ok { + tok = string(v) + } + switch v := tok.(type) { + case string: + val, err := convert.Convert(cty.StringVal(v), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("number is required") + } + case cty.String: + switch v := tok.(type) { + case string: + return cty.StringVal(v), nil + case json.Number: + return cty.StringVal(string(v)), nil + case bool: + val, err := convert.Convert(cty.BoolVal(v), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("string is required") + } + default: + // should never happen + panic("unsupported primitive type") + } +} + +func unmarshalList(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + var idx int64 + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(idx), + } + idx++ + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read list value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.ListValEmpty(ety), nil + } + + return cty.ListVal(vals), nil +} + +func unmarshalSet(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.UnknownVal(ety), + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read set value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(vals), nil +} + +func unmarshalMap(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + vals := make(map[string]cty.Value) + + { + path := append(path, nil) + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.UnknownVal(cty.String), + } + + var err error + + k, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read map key: %s", err) + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(k), + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read map value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals[k] = el + } + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.MapValEmpty(ety), nil + } + + return cty.MapVal(vals), nil +} + +func unmarshalTuple(buf []byte, etys []cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + var idx int + + for dec.More() { + if idx >= len(etys) { + return cty.NilVal, path[:len(path)-1].NewErrorf("too many tuple elements (need %d)", len(etys)) + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(idx)), + } + ety := etys[idx] + idx++ + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read tuple value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) != len(etys) { + return cty.NilVal, path[:len(path)-1].NewErrorf("not enough tuple elements (need %d)", len(etys)) + } + + if len(vals) == 0 { + return cty.EmptyTupleVal, nil + } + + return cty.TupleVal(vals), nil +} + +func unmarshalObject(buf []byte, atys map[string]cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + vals := make(map[string]cty.Value) + + { + objPath := path // some errors report from the object's perspective + path := append(path, nil) // path to a specific attribute + + for dec.More() { + + var err error + + k, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read object key: %s", err) + } + + aty, ok := atys[k] + if !ok { + return cty.NilVal, objPath.NewErrorf("unsupported attribute %q", k) + } + + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read object value: %s", err) + } + + el, err := unmarshal(rawVal, aty, path) + if err != nil { + return cty.NilVal, err + } + + vals[k] = el + } + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + // Make sure we have a value for every attribute + for k, aty := range atys { + if _, exists := vals[k]; !exists { + vals[k] = cty.NullVal(aty) + } + } + + if len(vals) == 0 { + return cty.EmptyObjectVal, nil + } + + return cty.ObjectVal(vals), nil +} + +func unmarshalCapsule(buf []byte, t cty.Type, path cty.Path) (cty.Value, error) { + rawType := t.EncapsulatedType() + ptrPtr := reflect.New(reflect.PtrTo(rawType)) + ptrPtr.Elem().Set(reflect.New(rawType)) + ptr := ptrPtr.Elem().Interface() + err := json.Unmarshal(buf, ptr) + if err != nil { + return cty.NilVal, path.NewError(err) + } + + return cty.CapsuleVal(t, ptr), nil +} + +func unmarshalDynamic(buf []byte, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + var t cty.Type + var valBody []byte // defer actual decoding until we know the type + + for dec.More() { + var err error + + key, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read dynamic type descriptor key: %s", err) + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read dynamic type descriptor value: %s", err) + } + + switch key { + case "type": + err := json.Unmarshal(rawVal, &t) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to decode type for dynamic value: %s", err) + } + case "value": + valBody = rawVal + default: + return cty.NilVal, path.NewErrorf("invalid key %q in dynamically-typed value", key) + } + + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if t == cty.NilType { + return cty.NilVal, path.NewErrorf("missing type in dynamically-typed value") + } + if valBody == nil { + return cty.NilVal, path.NewErrorf("missing value in dynamically-typed value") + } + + val, err := Unmarshal([]byte(valBody), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil +} + +func requireDelim(dec *json.Decoder, d rune) error { + tok, err := dec.Token() + if err != nil { + return err + } + + if tok != json.Delim(d) { + return fmt.Errorf("missing expected %c", d) + } + + return nil +} + +func requireObjectKey(dec *json.Decoder) (string, error) { + tok, err := dec.Token() + if err != nil { + return "", err + } + if s, ok := tok.(string); ok { + return s, nil + } + return "", fmt.Errorf("missing expected object key") +} + +func readRawValue(dec *json.Decoder) ([]byte, error) { + var rawVal json.RawMessage + err := dec.Decode(&rawVal) + if err != nil { + return nil, err + } + return []byte(rawVal), nil +} + +func bufDecoder(buf []byte) *json.Decoder { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + dec.UseNumber() + return dec +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json/value.go b/vendor/github.com/zclconf/go-cty/cty/json/value.go new file mode 100644 index 00000000000..f2f7dd56c77 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/value.go @@ -0,0 +1,65 @@ +package json + +import ( + "bytes" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// Marshal produces a JSON representation of the given value that can later +// be decoded into a value of the given type. +// +// A type is specified separately to allow for the given type to include +// cty.DynamicPseudoType to represent situations where any type is permitted +// and so type information must be included to allow recovery of the stored +// structure when decoding. +// +// The given type will also be used to attempt automatic conversions of any +// non-conformant types in the given value, although this will not always +// be possible. If the value cannot be made to be conformant then an error is +// returned, which may be a cty.PathError. +// +// Capsule-typed values can be marshalled, but with some caveats. Since +// capsule values are compared by pointer equality, it is impossible to recover +// a value that will compare equal to the original value. Additionally, +// it's not possible to JSON-serialize the capsule type itself, so it's not +// valid to use capsule types within parts of the value that are conformed to +// cty.DynamicPseudoType. Otherwise, a capsule value can be used as long as +// the encapsulated type itself is serializable with the Marshal function +// in encoding/json. +func Marshal(val cty.Value, t cty.Type) ([]byte, error) { + errs := val.Type().TestConformance(t) + if errs != nil { + // Attempt a conversion + var err error + val, err = convert.Convert(val, t) + if err != nil { + return nil, err + } + } + + // From this point onward, val can be assumed to be conforming to t. + + buf := &bytes.Buffer{} + var path cty.Path + err := marshal(val, t, path, buf) + + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// Unmarshal decodes a JSON representation of the given value into a cty Value +// conforming to the given type. +// +// While decoding, type conversions will be done where possible to make +// the result conformant even if the types given in JSON are not exactly +// correct. If conversion isn't possible then an error is returned, which +// may be a cty.PathError. +func Unmarshal(buf []byte, t cty.Type) (cty.Value, error) { + var path cty.Path + return unmarshal(buf, t, path) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/list_type.go b/vendor/github.com/zclconf/go-cty/cty/list_type.go new file mode 100644 index 00000000000..eadc865e543 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/list_type.go @@ -0,0 +1,68 @@ +package cty + +import ( + "fmt" +) + +// TypeList instances represent specific list types. Each distinct ElementType +// creates a distinct, non-equal list type. +type typeList struct { + typeImplSigil + ElementTypeT Type +} + +// List creates a map type with the given element Type. +// +// List types are CollectionType implementations. +func List(elem Type) Type { + return Type{ + typeList{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a list whose element type is +// equal to that of the receiver. +func (t typeList) Equals(other Type) bool { + ot, isList := other.typeImpl.(typeList) + if !isList { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeList) FriendlyName() string { + return "list of " + t.ElementTypeT.FriendlyName() +} + +func (t typeList) ElementType() Type { + return t.ElementTypeT +} + +func (t typeList) GoString() string { + return fmt.Sprintf("cty.List(%#v)", t.ElementTypeT) +} + +// IsListType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsListType() bool { + _, ok := t.typeImpl.(typeList) + return ok +} + +// ListElementType is a convenience method that checks if the given type is +// a list type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.ListElementType(); et != nil { +// // Do something with *et +// } +func (t Type) ListElementType() *Type { + if lt, ok := t.typeImpl.(typeList); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/map_type.go b/vendor/github.com/zclconf/go-cty/cty/map_type.go new file mode 100644 index 00000000000..ae9abae040d --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/map_type.go @@ -0,0 +1,68 @@ +package cty + +import ( + "fmt" +) + +// TypeList instances represent specific list types. Each distinct ElementType +// creates a distinct, non-equal list type. +type typeMap struct { + typeImplSigil + ElementTypeT Type +} + +// Map creates a map type with the given element Type. +// +// Map types are CollectionType implementations. +func Map(elem Type) Type { + return Type{ + typeMap{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a map whose element type is +// equal to that of the receiver. +func (t typeMap) Equals(other Type) bool { + ot, isMap := other.typeImpl.(typeMap) + if !isMap { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeMap) FriendlyName() string { + return "map of " + t.ElementTypeT.FriendlyName() +} + +func (t typeMap) ElementType() Type { + return t.ElementTypeT +} + +func (t typeMap) GoString() string { + return fmt.Sprintf("cty.Map(%#v)", t.ElementTypeT) +} + +// IsMapType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsMapType() bool { + _, ok := t.typeImpl.(typeMap) + return ok +} + +// MapElementType is a convenience method that checks if the given type is +// a map type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.MapElementType(); et != nil { +// // Do something with *et +// } +func (t Type) MapElementType() *Type { + if lt, ok := t.typeImpl.(typeMap); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/null.go b/vendor/github.com/zclconf/go-cty/cty/null.go new file mode 100644 index 00000000000..d58d0287b64 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/null.go @@ -0,0 +1,14 @@ +package cty + +// NullVal returns a null value of the given type. A null can be created of any +// type, but operations on such values will always panic. Calling applications +// are encouraged to use nulls only sparingly, particularly when user-provided +// expressions are to be evaluated, since the precence of nulls creates a +// much higher chance of evaluation errors that can't be caught by a type +// checker. +func NullVal(t Type) Value { + return Value{ + ty: t, + v: nil, + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/object_type.go b/vendor/github.com/zclconf/go-cty/cty/object_type.go new file mode 100644 index 00000000000..6f5ef305661 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/object_type.go @@ -0,0 +1,128 @@ +package cty + +import ( + "fmt" +) + +type typeObject struct { + typeImplSigil + AttrTypes map[string]Type +} + +// Object creates an object type with the given attribute types. +// +// After a map is passed to this function the caller must no longer access it, +// since ownership is transferred to this library. +func Object(attrTypes map[string]Type) Type { + return Type{ + typeObject{ + AttrTypes: attrTypes, + }, + } +} + +func (t typeObject) Equals(other Type) bool { + if ot, ok := other.typeImpl.(typeObject); ok { + if len(t.AttrTypes) != len(ot.AttrTypes) { + // Fast path: if we don't have the same number of attributes + // then we can't possibly be equal. This also avoids the need + // to test attributes in both directions below, since we know + // there can't be extras in "other". + return false + } + + for attr, ty := range t.AttrTypes { + oty, ok := ot.AttrTypes[attr] + if !ok { + return false + } + if !oty.Equals(ty) { + return false + } + } + + return true + } + return false +} + +func (t typeObject) FriendlyName() string { + // There isn't really a friendly way to write an object type due to its + // complexity, so we'll just do something English-ish. Callers will + // probably want to make some extra effort to avoid ever printing out + // an object type FriendlyName in its entirety. For example, could + // produce an error message by diffing two object types and saying + // something like "Expected attribute foo to be string, but got number". + // TODO: Finish this + return "object" +} + +func (t typeObject) GoString() string { + if len(t.AttrTypes) == 0 { + return "cty.EmptyObject" + } + return fmt.Sprintf("cty.Object(%#v)", t.AttrTypes) +} + +// EmptyObject is a shorthand for Object(map[string]Type{}), to more +// easily talk about the empty object type. +var EmptyObject Type + +// EmptyObjectVal is the only possible non-null, non-unknown value of type +// EmptyObject. +var EmptyObjectVal Value + +func init() { + EmptyObject = Object(map[string]Type{}) + EmptyObjectVal = Value{ + ty: EmptyObject, + v: map[string]interface{}{}, + } +} + +// IsObjectType returns true if the given type is an object type, regardless +// of its element type. +func (t Type) IsObjectType() bool { + _, ok := t.typeImpl.(typeObject) + return ok +} + +// HasAttribute returns true if the receiver has an attribute with the given +// name, regardless of its type. Will panic if the reciever isn't an object +// type; use IsObjectType to determine whether this operation will succeed. +func (t Type) HasAttribute(name string) bool { + if ot, ok := t.typeImpl.(typeObject); ok { + _, hasAttr := ot.AttrTypes[name] + return hasAttr + } + panic("HasAttribute on non-object Type") +} + +// AttributeType returns the type of the attribute with the given name. Will +// panic if the receiver is not an object type (use IsObjectType to confirm) +// or if the object type has no such attribute (use HasAttribute to confirm). +func (t Type) AttributeType(name string) Type { + if ot, ok := t.typeImpl.(typeObject); ok { + aty, hasAttr := ot.AttrTypes[name] + if !hasAttr { + panic("no such attribute") + } + return aty + } + panic("AttributeType on non-object Type") +} + +// AttributeTypes returns a map from attribute names to their associated +// types. Will panic if the receiver is not an object type (use IsObjectType +// to confirm). +// +// The returned map is part of the internal state of the type, and is provided +// for read access only. It is forbidden for any caller to modify the returned +// map. For many purposes the attribute-related methods of Value are more +// appropriate and more convenient to use. +func (t Type) AttributeTypes() map[string]Type { + if ot, ok := t.typeImpl.(typeObject); ok { + return ot.AttrTypes + } + panic("AttributeTypes on non-object Type") +} diff --git a/vendor/github.com/zclconf/go-cty/cty/path.go b/vendor/github.com/zclconf/go-cty/cty/path.go new file mode 100644 index 00000000000..d38c2231c67 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/path.go @@ -0,0 +1,178 @@ +package cty + +import ( + "errors" + "fmt" +) + +// A Path is a sequence of operations to locate a nested value within a +// data structure. +// +// The empty Path represents the given item. Any PathSteps within represent +// taking a single step down into a data structure. +// +// Path has some convenience methods for gradually constructing a path, +// but callers can also feel free to just produce a slice of PathStep manually +// and convert to this type, which may be more appropriate in environments +// where memory pressure is a concern. +type Path []PathStep + +// PathStep represents a single step down into a data structure, as part +// of a Path. PathStep is a closed interface, meaning that the only +// permitted implementations are those within this package. +type PathStep interface { + pathStepSigil() pathStepImpl + Apply(Value) (Value, error) +} + +// embed pathImpl into a struct to declare it a PathStep implementation +type pathStepImpl struct{} + +func (p pathStepImpl) pathStepSigil() pathStepImpl { + return p +} + +// Index returns a new Path that is the reciever with an IndexStep appended +// to the end. +// +// This is provided as a convenient way to construct paths, but each call +// will create garbage so it should not be used where memory pressure is a +// concern. +func (p Path) Index(v Value) Path { + ret := make(Path, len(p)+1) + copy(ret, p) + ret[len(p)] = IndexStep{ + Key: v, + } + return ret +} + +// GetAttr returns a new Path that is the reciever with a GetAttrStep appended +// to the end. +// +// This is provided as a convenient way to construct paths, but each call +// will create garbage so it should not be used where memory pressure is a +// concern. +func (p Path) GetAttr(name string) Path { + ret := make(Path, len(p)+1) + copy(ret, p) + ret[len(p)] = GetAttrStep{ + Name: name, + } + return ret +} + +// Apply applies each of the steps in turn to successive values starting with +// the given value, and returns the result. If any step returns an error, +// the whole operation returns an error. +func (p Path) Apply(val Value) (Value, error) { + var err error + for i, step := range p { + val, err = step.Apply(val) + if err != nil { + return NilVal, fmt.Errorf("at step %d: %s", i, err) + } + } + return val, nil +} + +// LastStep applies the given path up to the last step and then returns +// the resulting value and the final step. +// +// This is useful when dealing with assignment operations, since in that +// case the *value* of the last step is not important (and may not, in fact, +// present at all) and we care only about its location. +// +// Since LastStep applies all steps except the last, it will return errors +// for those steps in the same way as Apply does. +// +// If the path has *no* steps then the returned PathStep will be nil, +// representing that any operation should be applied directly to the +// given value. +func (p Path) LastStep(val Value) (Value, PathStep, error) { + var err error + + if len(p) == 0 { + return val, nil, nil + } + + journey := p[:len(p)-1] + val, err = journey.Apply(val) + if err != nil { + return NilVal, nil, err + } + return val, p[len(p)-1], nil +} + +// Copy makes a shallow copy of the receiver. Often when paths are passed to +// caller code they come with the constraint that they are valid only until +// the caller returns, due to how they are constructed internally. Callers +// can use Copy to conveniently produce a copy of the value that _they_ control +// the validity of. +func (p Path) Copy() Path { + ret := make(Path, len(p)) + copy(ret, p) + return ret +} + +// IndexStep is a Step implementation representing applying the index operation +// to a value, which must be of either a list, map, or set type. +// +// When describing a path through a *type* rather than a concrete value, +// the Key may be an unknown value, indicating that the step applies to +// *any* key of the given type. +// +// When indexing into a set, the Key is actually the element being accessed +// itself, since in sets elements are their own identity. +type IndexStep struct { + pathStepImpl + Key Value +} + +// Apply returns the value resulting from indexing the given value with +// our key value. +func (s IndexStep) Apply(val Value) (Value, error) { + switch s.Key.Type() { + case Number: + if !val.Type().IsListType() { + return NilVal, errors.New("not a list type") + } + case String: + if !val.Type().IsMapType() { + return NilVal, errors.New("not a map type") + } + default: + return NilVal, errors.New("key value not number or string") + } + + has := val.HasIndex(s.Key) + if !has.IsKnown() { + return UnknownVal(val.Type().ElementType()), nil + } + if !has.True() { + return NilVal, errors.New("value does not have given index key") + } + + return val.Index(s.Key), nil +} + +// GetAttrStep is a Step implementation representing retrieving an attribute +// from a value, which must be of an object type. +type GetAttrStep struct { + pathStepImpl + Name string +} + +// Apply returns the value of our named attribute from the given value, which +// must be of an object type that has a value of that name. +func (s GetAttrStep) Apply(val Value) (Value, error) { + if !val.Type().IsObjectType() { + return NilVal, errors.New("not an object type") + } + + if !val.Type().HasAttribute(s.Name) { + return NilVal, fmt.Errorf("object has no attribute %q", s.Name) + } + + return val.GetAttr(s.Name), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/primitive_type.go b/vendor/github.com/zclconf/go-cty/cty/primitive_type.go new file mode 100644 index 00000000000..b8682dd3d17 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/primitive_type.go @@ -0,0 +1,122 @@ +package cty + +import "math/big" + +// primitiveType is the hidden implementation of the various primitive types +// that are exposed as variables in this package. +type primitiveType struct { + typeImplSigil + Kind primitiveTypeKind +} + +type primitiveTypeKind byte + +const ( + primitiveTypeBool primitiveTypeKind = 'B' + primitiveTypeNumber primitiveTypeKind = 'N' + primitiveTypeString primitiveTypeKind = 'S' +) + +func (t primitiveType) Equals(other Type) bool { + if otherP, ok := other.typeImpl.(primitiveType); ok { + return otherP.Kind == t.Kind + } + return false +} + +func (t primitiveType) FriendlyName() string { + switch t.Kind { + case primitiveTypeBool: + return "bool" + case primitiveTypeNumber: + return "number" + case primitiveTypeString: + return "string" + default: + // should never happen + panic("invalid primitive type") + } +} + +func (t primitiveType) GoString() string { + switch t.Kind { + case primitiveTypeBool: + return "cty.Bool" + case primitiveTypeNumber: + return "cty.Number" + case primitiveTypeString: + return "cty.String" + default: + // should never happen + panic("invalid primitive type") + } +} + +// Number is the numeric type. Number values are arbitrary-precision +// decimal numbers, which can then be converted into Go's various numeric +// types only if they are in the appropriate range. +var Number Type + +// String is the string type. String values are sequences of unicode codepoints +// encoded internally as UTF-8. +var String Type + +// Bool is the boolean type. The two values of this type are True and False. +var Bool Type + +// True is the truthy value of type Bool +var True Value + +// False is the falsey value of type Bool +var False Value + +// Zero is a number value representing exactly zero. +var Zero Value + +// PositiveInfinity is a Number value representing positive infinity +var PositiveInfinity Value + +// NegativeInfinity is a Number value representing negative infinity +var NegativeInfinity Value + +func init() { + Number = Type{ + primitiveType{Kind: primitiveTypeNumber}, + } + String = Type{ + primitiveType{Kind: primitiveTypeString}, + } + Bool = Type{ + primitiveType{Kind: primitiveTypeBool}, + } + True = Value{ + ty: Bool, + v: true, + } + False = Value{ + ty: Bool, + v: false, + } + Zero = Value{ + ty: Number, + v: big.NewFloat(0), + } + PositiveInfinity = Value{ + ty: Number, + v: (&big.Float{}).SetInf(false), + } + NegativeInfinity = Value{ + ty: Number, + v: (&big.Float{}).SetInf(true), + } +} + +// IsPrimitiveType returns true if and only if the reciever is a primitive +// type, which means it's either number, string, or bool. Any two primitive +// types can be safely compared for equality using the standard == operator +// without panic, which is not a guarantee that holds for all types. Primitive +// types can therefore also be used in switch statements. +func (t Type) IsPrimitiveType() bool { + _, ok := t.typeImpl.(primitiveType) + return ok +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/gob.go b/vendor/github.com/zclconf/go-cty/cty/set/gob.go new file mode 100644 index 00000000000..da2978f655d --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/gob.go @@ -0,0 +1,76 @@ +package set + +import ( + "bytes" + "encoding/gob" + "fmt" +) + +// GobEncode is an implementation of the interface gob.GobEncoder, allowing +// sets to be included in structures encoded via gob. +// +// The set rules are included in the serialized value, so the caller must +// register its concrete rules type with gob.Register before using a +// set in a gob, and possibly also implement GobEncode/GobDecode to customize +// how any parameters are persisted. +// +// The set elements are also included, so if they are of non-primitive types +// they too must be registered with gob. +// +// If the produced gob values will persist for a long time, the caller must +// ensure compatibility of the rules implementation. In particular, if the +// definition of element equivalence changes between encoding and decoding +// then two distinct stored elements may be considered equivalent on decoding, +// causing the recovered set to have fewer elements than when it was stored. +func (s Set) GobEncode() ([]byte, error) { + gs := gobSet{ + Version: 0, + Rules: s.rules, + Values: s.Values(), + } + + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + err := enc.Encode(gs) + if err != nil { + return nil, fmt.Errorf("error encoding set.Set: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is the opposite of GobEncode. See GobEncode for information +// on the requirements for and caveats of including set values in gobs. +func (s *Set) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gs gobSet + err := dec.Decode(&gs) + if err != nil { + return fmt.Errorf("error decoding set.Set: %s", err) + } + if gs.Version != 0 { + return fmt.Errorf("unsupported set.Set encoding version %d; need 0", gs.Version) + } + + victim := NewSetFromSlice(gs.Rules, gs.Values) + s.vals = victim.vals + s.rules = victim.rules + return nil +} + +type gobSet struct { + Version int + Rules Rules + + // The bucket-based representation is for efficient in-memory access, but + // for serialization it's enough to just retain the values themselves, + // which we can re-bucket using the rules (which may have changed!) when + // we re-inflate. + Values []interface{} +} + +func init() { + gob.Register([]interface{}(nil)) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/iterator.go b/vendor/github.com/zclconf/go-cty/cty/set/iterator.go new file mode 100644 index 00000000000..f15498e2fee --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/iterator.go @@ -0,0 +1,36 @@ +package set + +type Iterator struct { + bucketIds []int + vals map[int][]interface{} + bucketIdx int + valIdx int +} + +func (it *Iterator) Value() interface{} { + return it.currentBucket()[it.valIdx] +} + +func (it *Iterator) Next() bool { + if it.bucketIdx == -1 { + // init + if len(it.bucketIds) == 0 { + return false + } + + it.valIdx = 0 + it.bucketIdx = 0 + return true + } + + it.valIdx++ + if it.valIdx >= len(it.currentBucket()) { + it.valIdx = 0 + it.bucketIdx++ + } + return it.bucketIdx < len(it.bucketIds) +} + +func (it *Iterator) currentBucket() []interface{} { + return it.vals[it.bucketIds[it.bucketIdx]] +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/ops.go b/vendor/github.com/zclconf/go-cty/cty/set/ops.go new file mode 100644 index 00000000000..e900eb0ec3d --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/ops.go @@ -0,0 +1,189 @@ +package set + +import ( + "sort" +) + +// Add inserts the given value into the receiving Set. +// +// This mutates the set in-place. This operation is not thread-safe. +func (s Set) Add(val interface{}) { + hv := s.rules.Hash(val) + if _, ok := s.vals[hv]; !ok { + s.vals[hv] = make([]interface{}, 0, 1) + } + bucket := s.vals[hv] + + // See if an equivalent value is already present + for _, ev := range bucket { + if s.rules.Equivalent(val, ev) { + return + } + } + + s.vals[hv] = append(bucket, val) +} + +// Remove deletes the given value from the receiving set, if indeed it was +// there in the first place. If the value is not present, this is a no-op. +func (s Set) Remove(val interface{}) { + hv := s.rules.Hash(val) + bucket, ok := s.vals[hv] + if !ok { + return + } + + for i, ev := range bucket { + if s.rules.Equivalent(val, ev) { + newBucket := make([]interface{}, 0, len(bucket)-1) + newBucket = append(newBucket, bucket[:i]...) + newBucket = append(newBucket, bucket[i+1:]...) + if len(newBucket) > 0 { + s.vals[hv] = newBucket + } else { + delete(s.vals, hv) + } + return + } + } +} + +// Has returns true if the given value is in the receiving set, or false if +// it is not. +func (s Set) Has(val interface{}) bool { + hv := s.rules.Hash(val) + bucket, ok := s.vals[hv] + if !ok { + return false + } + + for _, ev := range bucket { + if s.rules.Equivalent(val, ev) { + return true + } + } + return false +} + +// Iterator returns an iterator over values in the set, in an undefined order +// that callers should not depend on. +// +// The pattern for using the returned iterator is: +// +// it := set.Iterator() +// for it.Next() { +// val := it.Value() +// // ... +// } +// +// Once an iterator has been created for a set, the set *must not* be mutated +// until the iterator is no longer in use. +func (s Set) Iterator() *Iterator { + // Sort the bucketIds to ensure that we always traverse in a + // consistent order. + bucketIds := make([]int, 0, len(s.vals)) + for id := range s.vals { + bucketIds = append(bucketIds, id) + } + sort.Ints(bucketIds) + + return &Iterator{ + bucketIds: bucketIds, + vals: s.vals, + bucketIdx: -1, + } +} + +// EachValue calls the given callback once for each value in the set, in an +// undefined order that callers should not depend on. +func (s Set) EachValue(cb func(interface{})) { + it := s.Iterator() + for it.Next() { + cb(it.Value()) + } +} + +// Values returns a slice of all of the values in the set in no particular +// order. This is just a wrapper around EachValue that accumulates the results +// in a slice for caller convenience. +// +// The returned slice will be nil if there are no values in the set. +func (s Set) Values() []interface{} { + var ret []interface{} + s.EachValue(func(v interface{}) { + ret = append(ret, v) + }) + return ret +} + +// Length returns the number of values in the set. +func (s Set) Length() int { + var count int + for _, bucket := range s.vals { + count = count + len(bucket) + } + return count +} + +// Union returns a new set that contains all of the members of both the +// receiving set and the given set. Both sets must have the same rules, or +// else this function will panic. +func (s1 Set) Union(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + rs.Add(v) + }) + s2.EachValue(func(v interface{}) { + rs.Add(v) + }) + return rs +} + +// Intersection returns a new set that contains the values that both the +// receiver and given sets have in common. Both sets must have the same rules, +// or else this function will panic. +func (s1 Set) Intersection(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if s2.Has(v) { + rs.Add(v) + } + }) + return rs +} + +// Subtract returns a new set that contains all of the values from the receiver +// that are not also in the given set. Both sets must have the same rules, +// or else this function will panic. +func (s1 Set) Subtract(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + rs.Add(v) + } + }) + return rs +} + +// SymmetricDifference returns a new set that contains all of the values from +// both the receiver and given sets, except those that both sets have in +// common. Both sets must have the same rules, or else this function will +// panic. +func (s1 Set) SymmetricDifference(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + rs.Add(v) + } + }) + s2.EachValue(func(v interface{}) { + if !s1.Has(v) { + rs.Add(v) + } + }) + return rs +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/rules.go b/vendor/github.com/zclconf/go-cty/cty/set/rules.go new file mode 100644 index 00000000000..7200184b0f2 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/rules.go @@ -0,0 +1,25 @@ +package set + +// Rules represents the operations that define membership for a Set. +// +// Each Set has a Rules instance, whose methods must satisfy the interface +// contracts given below for any value that will be added to the set. +type Rules interface { + // Hash returns an int that somewhat-uniquely identifies the given value. + // + // A good hash function will minimize collisions for values that will be + // added to the set, though collisions *are* permitted. Collisions will + // simply reduce the efficiency of operations on the set. + Hash(interface{}) int + + // Equivalent returns true if and only if the two values are considered + // equivalent for the sake of set membership. Two values that are + // equivalent cannot exist in the set at the same time, and if two + // equivalent values are added it is undefined which one will be + // returned when enumerating all of the set members. + // + // Two values that are equivalent *must* result in the same hash value, + // though it is *not* required that two values with the same hash value + // be equivalent. + Equivalent(interface{}, interface{}) bool +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/set.go b/vendor/github.com/zclconf/go-cty/cty/set/set.go new file mode 100644 index 00000000000..b4fb316f1cc --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/set.go @@ -0,0 +1,62 @@ +package set + +import ( + "fmt" +) + +// Set is an implementation of the concept of a set: a collection where all +// values are conceptually either in or out of the set, but the members are +// not ordered. +// +// This type primarily exists to be the internal type of sets in cty, but +// it is considered to be at the same level of abstraction as Go's built in +// slice and map collection types, and so should make no cty-specific +// assumptions. +// +// Set operations are not thread safe. It is the caller's responsibility to +// provide mutex guarantees where necessary. +// +// Set operations are not optimized to minimize memory pressure. Mutating +// a set will generally create garbage and so should perhaps be avoided in +// tight loops where memory pressure is a concern. +type Set struct { + vals map[int][]interface{} + rules Rules +} + +// NewSet returns an empty set with the membership rules given. +func NewSet(rules Rules) Set { + return Set{ + vals: map[int][]interface{}{}, + rules: rules, + } +} + +func NewSetFromSlice(rules Rules, vals []interface{}) Set { + s := NewSet(rules) + for _, v := range vals { + s.Add(v) + } + return s +} + +func sameRules(s1 Set, s2 Set) bool { + return s1.rules == s2.rules +} + +func mustHaveSameRules(s1 Set, s2 Set) { + if !sameRules(s1, s2) { + panic(fmt.Errorf("incompatible set rules: %#v, %#v", s1.rules, s2.rules)) + } +} + +// HasRules returns true if and only if the receiving set has the given rules +// instance as its rules. +func (s Set) HasRules(rules Rules) bool { + return s.rules == rules +} + +// Rules returns the receiving set's rules instance. +func (s Set) Rules() Rules { + return s.rules +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set_internals.go b/vendor/github.com/zclconf/go-cty/cty/set_internals.go new file mode 100644 index 00000000000..ce738dbe66e --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set_internals.go @@ -0,0 +1,146 @@ +package cty + +import ( + "bytes" + "fmt" + "hash/crc32" + "math/big" + "sort" +) + +// setRules provides a Rules implementation for the ./set package that +// respects the equality rules for cty values of the given type. +// +// This implementation expects that values added to the set will be +// valid internal values for the given Type, which is to say that wrapping +// the given value in a Value struct along with the ruleset's type should +// produce a valid, working Value. +type setRules struct { + Type Type +} + +func (r setRules) Hash(v interface{}) int { + hashBytes := makeSetHashBytes(Value{ + ty: r.Type, + v: v, + }) + return int(crc32.ChecksumIEEE(hashBytes)) +} + +func (r setRules) Equivalent(v1 interface{}, v2 interface{}) bool { + v1v := Value{ + ty: r.Type, + v: v1, + } + v2v := Value{ + ty: r.Type, + v: v2, + } + + eqv := v1v.Equals(v2v) + + // By comparing the result to true we ensure that an Unknown result, + // which will result if either value is unknown, will be considered + // as non-equivalent. Two unknown values are not equivalent for the + // sake of set membership. + return eqv.v == true +} + +func makeSetHashBytes(val Value) []byte { + var buf bytes.Buffer + appendSetHashBytes(val, &buf) + return buf.Bytes() +} + +func appendSetHashBytes(val Value, buf *bytes.Buffer) { + // Exactly what bytes we generate here don't matter as long as the following + // constraints hold: + // - Unknown and null values all generate distinct strings from + // each other and from any normal value of the given type. + // - The delimiter used to separate items in a compound structure can + // never appear literally in any of its elements. + // Since we don't support hetrogenous lists we don't need to worry about + // collisions between values of different types, apart from + // PseudoTypeDynamic. + // If in practice we *do* get a collision then it's not a big deal because + // the Equivalent function will still distinguish values, but set + // performance will be best if we are able to produce a distinct string + // for each distinct value, unknown values notwithstanding. + if !val.IsKnown() { + buf.WriteRune('?') + return + } + if val.IsNull() { + buf.WriteRune('~') + return + } + + switch val.ty { + case Number: + buf.WriteString(val.v.(*big.Float).String()) + return + case Bool: + if val.v.(bool) { + buf.WriteRune('T') + } else { + buf.WriteRune('F') + } + return + case String: + buf.WriteString(fmt.Sprintf("%q", val.v.(string))) + return + } + + if val.ty.IsMapType() { + buf.WriteRune('{') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(keyVal, buf) + buf.WriteRune(':') + appendSetHashBytes(elementVal, buf) + buf.WriteRune(';') + return false + }) + buf.WriteRune('}') + return + } + + if val.ty.IsListType() || val.ty.IsSetType() { + buf.WriteRune('[') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(elementVal, buf) + buf.WriteRune(';') + return false + }) + buf.WriteRune(']') + return + } + + if val.ty.IsObjectType() { + buf.WriteRune('<') + attrNames := make([]string, 0, len(val.ty.AttributeTypes())) + for attrName := range val.ty.AttributeTypes() { + attrNames = append(attrNames, attrName) + } + sort.Strings(attrNames) + for _, attrName := range attrNames { + appendSetHashBytes(val.GetAttr(attrName), buf) + buf.WriteRune(';') + } + buf.WriteRune('>') + return + } + + if val.ty.IsTupleType() { + buf.WriteRune('<') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(elementVal, buf) + buf.WriteRune(';') + return false + }) + buf.WriteRune('>') + return + } + + // should never get down here + panic("unsupported type in set hash") +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set_type.go b/vendor/github.com/zclconf/go-cty/cty/set_type.go new file mode 100644 index 00000000000..952a2d2baae --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set_type.go @@ -0,0 +1,66 @@ +package cty + +import ( + "fmt" +) + +type typeSet struct { + typeImplSigil + ElementTypeT Type +} + +// Set creates a set type with the given element Type. +// +// Set types are CollectionType implementations. +func Set(elem Type) Type { + return Type{ + typeSet{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a set whose element type is +// equal to that of the receiver. +func (t typeSet) Equals(other Type) bool { + ot, isSet := other.typeImpl.(typeSet) + if !isSet { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeSet) FriendlyName() string { + return "set of " + t.ElementTypeT.FriendlyName() +} + +func (t typeSet) ElementType() Type { + return t.ElementTypeT +} + +func (t typeSet) GoString() string { + return fmt.Sprintf("cty.Set(%#v)", t.ElementTypeT) +} + +// IsSetType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsSetType() bool { + _, ok := t.typeImpl.(typeSet) + return ok +} + +// SetElementType is a convenience method that checks if the given type is +// a set type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.SetElementType(); et != nil { +// // Do something with *et +// } +func (t Type) SetElementType() *Type { + if lt, ok := t.typeImpl.(typeSet); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/tuple_type.go b/vendor/github.com/zclconf/go-cty/cty/tuple_type.go new file mode 100644 index 00000000000..b98349e3a01 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/tuple_type.go @@ -0,0 +1,121 @@ +package cty + +import ( + "fmt" +) + +type typeTuple struct { + typeImplSigil + ElemTypes []Type +} + +// Tuple creates a tuple type with the given element types. +// +// After a slice is passed to this function the caller must no longer access +// the underlying array, since ownership is transferred to this library. +func Tuple(elemTypes []Type) Type { + return Type{ + typeTuple{ + ElemTypes: elemTypes, + }, + } +} + +func (t typeTuple) Equals(other Type) bool { + if ot, ok := other.typeImpl.(typeTuple); ok { + if len(t.ElemTypes) != len(ot.ElemTypes) { + // Fast path: if we don't have the same number of elements + // then we can't possibly be equal. + return false + } + + for i, ty := range t.ElemTypes { + oty := ot.ElemTypes[i] + if !ok { + return false + } + if !oty.Equals(ty) { + return false + } + } + + return true + } + return false +} + +func (t typeTuple) FriendlyName() string { + // There isn't really a friendly way to write a tuple type due to its + // complexity, so we'll just do something English-ish. Callers will + // probably want to make some extra effort to avoid ever printing out + // a tuple type FriendlyName in its entirety. For example, could + // produce an error message by diffing two object types and saying + // something like "Expected attribute foo to be string, but got number". + // TODO: Finish this + return "tuple" +} + +func (t typeTuple) GoString() string { + if len(t.ElemTypes) == 0 { + return "cty.EmptyTuple" + } + return fmt.Sprintf("cty.Tuple(%#v)", t.ElemTypes) +} + +// EmptyTuple is a shorthand for Tuple([]Type{}), to more easily talk about +// the empty tuple type. +var EmptyTuple Type + +// EmptyTupleVal is the only possible non-null, non-unknown value of type +// EmptyTuple. +var EmptyTupleVal Value + +func init() { + EmptyTuple = Tuple([]Type{}) + EmptyTupleVal = Value{ + ty: EmptyTuple, + v: []interface{}{}, + } +} + +// IsTupleType returns true if the given type is an object type, regardless +// of its element type. +func (t Type) IsTupleType() bool { + _, ok := t.typeImpl.(typeTuple) + return ok +} + +// Length returns the number of elements of the receiving tuple type. +// Will panic if the reciever isn't a tuple type; use IsTupleType to determine +// whether this operation will succeed. +func (t Type) Length() int { + if ot, ok := t.typeImpl.(typeTuple); ok { + return len(ot.ElemTypes) + } + panic("Length on non-tuple Type") +} + +// TupleElementType returns the type of the element with the given index. Will +// panic if the receiver is not a tuple type (use IsTupleType to confirm) +// or if the index is out of range (use Length to confirm). +func (t Type) TupleElementType(idx int) Type { + if ot, ok := t.typeImpl.(typeTuple); ok { + return ot.ElemTypes[idx] + } + panic("TupleElementType on non-tuple Type") +} + +// TupleElementTypes returns a slice of the recieving tuple type's element +// types. Will panic if the receiver is not a tuple type (use IsTupleType +// to confirm). +// +// The returned slice is part of the internal state of the type, and is provided +// for read access only. It is forbidden for any caller to modify the +// underlying array. For many purposes the element-related methods of Value +// are more appropriate and more convenient to use. +func (t Type) TupleElementTypes() []Type { + if ot, ok := t.typeImpl.(typeTuple); ok { + return ot.ElemTypes + } + panic("TupleElementTypes on non-tuple Type") +} diff --git a/vendor/github.com/zclconf/go-cty/cty/type.go b/vendor/github.com/zclconf/go-cty/cty/type.go new file mode 100644 index 00000000000..ae5f1c83ef0 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/type.go @@ -0,0 +1,95 @@ +package cty + +// Type represents value types within the type system. +// +// This is a closed interface type, meaning that only the concrete +// implementations provided within this package are considered valid. +type Type struct { + typeImpl +} + +type typeImpl interface { + // isTypeImpl is a do-nothing method that exists only to express + // that a type is an implementation of typeImpl. + isTypeImpl() typeImplSigil + + // Equals returns true if the other given Type exactly equals the + // receiver Type. + Equals(other Type) bool + + // FriendlyName returns a human-friendly *English* name for the given + // type. + FriendlyName() string + + // GoString implements the GoStringer interface from package fmt. + GoString() string +} + +// Base implementation of Type to embed into concrete implementations +// to signal that they are implementations of Type. +type typeImplSigil struct{} + +func (t typeImplSigil) isTypeImpl() typeImplSigil { + return typeImplSigil{} +} + +// Equals returns true if the other given Type exactly equals the receiver +// type. +func (t Type) Equals(other Type) bool { + return t.typeImpl.Equals(other) +} + +// FriendlyName returns a human-friendly *English* name for the given type. +func (t Type) FriendlyName() string { + return t.typeImpl.FriendlyName() +} + +// GoString returns a string approximating how the receiver type would be +// expressed in Go source code. +func (t Type) GoString() string { + if t.typeImpl == nil { + return "cty.NilType" + } + + return t.typeImpl.GoString() +} + +// NilType is an invalid type used when a function is returning an error +// and has no useful type to return. It should not be used and any methods +// called on it will panic. +var NilType = Type{} + +// HasDynamicTypes returns true either if the receiver is itself +// DynamicPseudoType or if it is a compound type whose descendent elements +// are DynamicPseudoType. +func (t Type) HasDynamicTypes() bool { + switch { + case t == DynamicPseudoType: + return true + case t.IsPrimitiveType(): + return false + case t.IsCollectionType(): + return false + case t.IsObjectType(): + attrTypes := t.AttributeTypes() + for _, at := range attrTypes { + if at.HasDynamicTypes() { + return true + } + } + return false + case t.IsTupleType(): + elemTypes := t.TupleElementTypes() + for _, et := range elemTypes { + if et.HasDynamicTypes() { + return true + } + } + return false + case t.IsCapsuleType(): + return false + default: + // Should never happen, since above should be exhaustive + panic("HasDynamicTypes does not support the given type") + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/type_conform.go b/vendor/github.com/zclconf/go-cty/cty/type_conform.go new file mode 100644 index 00000000000..b417dc79b41 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/type_conform.go @@ -0,0 +1,142 @@ +package cty + +// TestConformance recursively walks the receiver and the given other type and +// returns nil if the receiver *conforms* to the given type. +// +// Type conformance is similar to type equality but has one crucial difference: +// PseudoTypeDynamic can be used within the given type to represent that +// *any* type is allowed. +// +// If any non-conformities are found, the returned slice will be non-nil and +// contain at least one error value. It will be nil if the type is entirely +// conformant. +// +// Note that the special behavior of PseudoTypeDynamic is the *only* exception +// to normal type equality. Calling applications may wish to apply their own +// automatic conversion logic to the given data structure to create a more +// liberal notion of conformance to a type. +// +// Returned errors are usually (but not always) PathError instances that +// indicate where in the structure the error was found. If a returned error +// is of that type then the error message is written for (English-speaking) +// end-users working within the cty type system, not mentioning any Go-oriented +// implementation details. +func (t Type) TestConformance(other Type) []error { + path := make(Path, 0) + var errs []error + testConformance(t, other, path, &errs) + return errs +} + +func testConformance(given Type, want Type, path Path, errs *[]error) { + if want.Equals(DynamicPseudoType) { + // anything goes! + return + } + + if given.Equals(want) { + // Any equal types are always conformant + return + } + + // The remainder of this function is concerned with detecting + // and reporting the specific non-conformance, since we wouldn't + // have got here if the types were not divergent. + // We treat compound structures as special so that we can report + // specifically what is non-conforming, rather than simply returning + // the entire type names and letting the user puzzle it out. + + if given.IsObjectType() && want.IsObjectType() { + givenAttrs := given.AttributeTypes() + wantAttrs := want.AttributeTypes() + + if len(givenAttrs) != len(wantAttrs) { + // Something is missing from one of them. + for k := range givenAttrs { + if _, exists := wantAttrs[k]; !exists { + *errs = append( + *errs, + errorf(path, "unsupported attribute %q", k), + ) + } + } + for k := range wantAttrs { + if _, exists := givenAttrs[k]; !exists { + *errs = append( + *errs, + errorf(path, "missing required attribute %q", k), + ) + } + } + } + + path = append(path, nil) + pathIdx := len(path) - 1 + + for k, wantAttrType := range wantAttrs { + if givenAttrType, exists := givenAttrs[k]; exists { + path[pathIdx] = GetAttrStep{Name: k} + testConformance(givenAttrType, wantAttrType, path, errs) + } + } + + path = path[0:pathIdx] + + return + } + + if given.IsTupleType() && want.IsTupleType() { + givenElems := given.TupleElementTypes() + wantElems := want.TupleElementTypes() + + if len(givenElems) != len(wantElems) { + *errs = append( + *errs, + errorf(path, "%d elements are required, but got %d", len(wantElems), len(givenElems)), + ) + return + } + + path = append(path, nil) + pathIdx := len(path) - 1 + + for i, wantElemType := range wantElems { + givenElemType := givenElems[i] + path[pathIdx] = IndexStep{Key: NumberIntVal(int64(i))} + testConformance(givenElemType, wantElemType, path, errs) + } + + path = path[0:pathIdx] + + return + } + + if given.IsListType() && want.IsListType() { + path = append(path, IndexStep{Key: UnknownVal(Number)}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + if given.IsMapType() && want.IsMapType() { + path = append(path, IndexStep{Key: UnknownVal(String)}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + if given.IsSetType() && want.IsSetType() { + path = append(path, IndexStep{Key: UnknownVal(given.ElementType())}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + *errs = append( + *errs, + errorf(path, "%s required, but received %s", want.FriendlyName(), given.FriendlyName()), + ) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/types_to_register.go b/vendor/github.com/zclconf/go-cty/cty/types_to_register.go new file mode 100644 index 00000000000..a6c4d51f88f --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/types_to_register.go @@ -0,0 +1,50 @@ +package cty + +import ( + "encoding/gob" + "math/big" + + "github.com/zclconf/go-cty/cty/set" +) + +// InternalTypesToRegister is a slice of values that covers all of the +// internal types used in the representation of cty.Type and cty.Value +// across all cty Types. +// +// This is intended to be used to register these types with encoding +// packages that require registration of types used in interfaces, such as +// encoding/gob, thus allowing cty types and values to be included in streams +// created from those packages. However, registering with gob is not necessary +// since that is done automatically as a side-effect of importing this package. +// +// Callers should not do anything with the values here except pass them on +// verbatim to a registration function. +// +// If the calling application uses Capsule types that wrap local structs either +// directly or indirectly, these structs may also need to be registered in +// order to support encoding and decoding of values of these types. That is the +// responsibility of the calling application. +var InternalTypesToRegister []interface{} + +func init() { + InternalTypesToRegister = []interface{}{ + primitiveType{}, + typeList{}, + typeMap{}, + typeObject{}, + typeSet{}, + setRules{}, + set.Set{}, + typeTuple{}, + big.Float{}, + capsuleType{}, + []interface{}(nil), + map[string]interface{}(nil), + } + + // Register these with gob here, rather than in gob.go, to ensure + // that this will always happen after we build the above. + for _, tv := range InternalTypesToRegister { + gob.Register(tv) + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/unknown.go b/vendor/github.com/zclconf/go-cty/cty/unknown.go new file mode 100644 index 00000000000..9f6fce99df6 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/unknown.go @@ -0,0 +1,79 @@ +package cty + +// unknownType is the placeholder type used for the sigil value representing +// "Unknown", to make it unambigiously distinct from any other possible value. +type unknownType struct { +} + +// Unknown is a special value that can be +var unknown interface{} = &unknownType{} + +// UnknownVal returns an Value that represents an unknown value of the given +// type. Unknown values can be used to represent a value that is +// not yet known. Its meaning is undefined in cty, but it could be used by +// an calling application to allow partial evaluation. +// +// Unknown values of any type can be created of any type. All operations on +// Unknown values themselves return Unknown. +func UnknownVal(t Type) Value { + return Value{ + ty: t, + v: unknown, + } +} + +func (t unknownType) GoString() string { + // This is the stringification of our internal unknown marker. The + // stringification of the public representation of unknowns is in + // Value.GoString. + return "cty.unknown" +} + +type pseudoTypeDynamic struct { + typeImplSigil +} + +// DynamicPseudoType represents the dynamic pseudo-type. +// +// This type can represent situations where a type is not yet known. Its +// meaning is undefined in cty, but it could be used by a calling +// application to allow expression type checking with some types not yet known. +// For example, the application might optimistically permit any operation on +// values of this type in type checking, allowing a partial type-check result, +// and then repeat the check when more information is known to get the +// final, concrete type. +// +// It is a pseudo-type because it is used only as a sigil to the calling +// application. "Unknown" is the only valid value of this pseudo-type, so +// operations on values of this type will always short-circuit as per +// the rules for that special value. +var DynamicPseudoType Type + +func (t pseudoTypeDynamic) Equals(other Type) bool { + _, ok := other.typeImpl.(pseudoTypeDynamic) + return ok +} + +func (t pseudoTypeDynamic) FriendlyName() string { + return "dynamic" +} + +func (t pseudoTypeDynamic) GoString() string { + return "cty.DynamicPseudoType" +} + +// DynamicVal is the only valid value of the pseudo-type dynamic. +// This value can be used as a placeholder where a value or expression's +// type and value are both unknown, thus allowing partial evaluation. See +// the docs for DynamicPseudoType for more information. +var DynamicVal Value + +func init() { + DynamicPseudoType = Type{ + pseudoTypeDynamic{}, + } + DynamicVal = Value{ + ty: DynamicPseudoType, + v: unknown, + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/value.go b/vendor/github.com/zclconf/go-cty/cty/value.go new file mode 100644 index 00000000000..24ff35afd34 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/value.go @@ -0,0 +1,71 @@ +package cty + +// Value represents a value of a particular type, and is the interface by +// which operations are executed on typed values. +// +// Value has two different classes of method. Operation methods stay entirely +// within the type system (methods accept and return Value instances) and +// are intended for use in implementing a language in terms of cty, while +// integration methods either enter or leave the type system, working with +// native Go values. Operation methods are guaranteed to support all of the +// expected short-circuit behavior for unknown and dynamic values, while +// integration methods may not. +// +// The philosophy for the operations API is that it's the caller's +// responsibility to ensure that the given types and values satisfy the +// specified invariants during a separate type check, so that the caller is +// able to return errors to its user from the application's own perspective. +// +// Consequently the design of these methods assumes such checks have already +// been done and panics if any invariants turn out not to be satisfied. These +// panic errors are not intended to be handled, but rather indicate a bug in +// the calling application that should be fixed with more checks prior to +// executing operations. +// +// A related consequence of this philosophy is that no automatic type +// conversions are done. If a method specifies that its argument must be +// number then it's the caller's responsibility to do that conversion before +// the call, thus allowing the application to have more constrained conversion +// rules than are offered by the built-in converter where necessary. +type Value struct { + ty Type + v interface{} +} + +// Type returns the type of the value. +func (val Value) Type() Type { + return val.ty +} + +// IsKnown returns true if the value is known. That is, if it is not +// the result of the unknown value constructor Unknown(...), and is not +// the result of an operation on another unknown value. +// +// Unknown values are only produced either directly or as a result of +// operating on other unknown values, and so an application that never +// introduces Unknown values can be guaranteed to never receive any either. +func (val Value) IsKnown() bool { + return val.v != unknown +} + +// IsNull returns true if the value is null. Values of any type can be +// null, but any operations on a null value will panic. No operation ever +// produces null, so an application that never introduces Null values can +// be guaranteed to never receive any either. +func (val Value) IsNull() bool { + return val.v == nil +} + +// NilVal is an invalid Value that can be used as a placeholder when returning +// with an error from a function that returns (Value, error). +// +// NilVal is *not* a valid error and so no operations may be performed on it. +// Any attempt to use it will result in a panic. +// +// This should not be confused with the idea of a Null value, as returned by +// NullVal. NilVal is a nil within the *Go* type system, and is invalid in +// the cty type system. Null values *do* exist in the cty type system. +var NilVal = Value{ + ty: Type{typeImpl: nil}, + v: nil, +} diff --git a/vendor/github.com/zclconf/go-cty/cty/value_init.go b/vendor/github.com/zclconf/go-cty/cty/value_init.go new file mode 100644 index 00000000000..2ccafb1bf90 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/value_init.go @@ -0,0 +1,251 @@ +package cty + +import ( + "fmt" + "math/big" + "reflect" + + "golang.org/x/text/unicode/norm" + + "github.com/zclconf/go-cty/cty/set" +) + +// BoolVal returns a Value of type Number whose internal value is the given +// bool. +func BoolVal(v bool) Value { + return Value{ + ty: Bool, + v: v, + } +} + +// NumberVal returns a Value of type Number whose internal value is the given +// big.Float. The returned value becomes the owner of the big.Float object, +// and so it's forbidden for the caller to mutate the object after it's +// wrapped in this way. +func NumberVal(v *big.Float) Value { + return Value{ + ty: Number, + v: v, + } +} + +// NumberIntVal returns a Value of type Number whose internal value is equal +// to the given integer. +func NumberIntVal(v int64) Value { + return NumberVal(new(big.Float).SetInt64(v)) +} + +// NumberUIntVal returns a Value of type Number whose internal value is equal +// to the given unsigned integer. +func NumberUIntVal(v uint64) Value { + return NumberVal(new(big.Float).SetUint64(v)) +} + +// NumberFloatVal returns a Value of type Number whose internal value is +// equal to the given float. +func NumberFloatVal(v float64) Value { + return NumberVal(new(big.Float).SetFloat64(v)) +} + +// StringVal returns a Value of type String whose internal value is the +// given string. +// +// Strings must be UTF-8 encoded sequences of valid unicode codepoints, and +// they are NFC-normalized on entry into the world of cty values. +// +// If the given string is not valid UTF-8 then behavior of string operations +// is undefined. +func StringVal(v string) Value { + return Value{ + ty: String, + v: norm.NFC.String(v), + } +} + +// ObjectVal returns a Value of an object type whose structure is defined +// by the key names and value types in the given map. +func ObjectVal(attrs map[string]Value) Value { + attrTypes := make(map[string]Type, len(attrs)) + attrVals := make(map[string]interface{}, len(attrs)) + + for attr, val := range attrs { + attrTypes[attr] = val.ty + attrVals[attr] = val.v + } + + return Value{ + ty: Object(attrTypes), + v: attrVals, + } +} + +// TupleVal returns a Value of a tuple type whose element types are +// defined by the value types in the given slice. +func TupleVal(elems []Value) Value { + elemTypes := make([]Type, len(elems)) + elemVals := make([]interface{}, len(elems)) + + for i, val := range elems { + elemTypes[i] = val.ty + elemVals[i] = val.v + } + + return Value{ + ty: Tuple(elemTypes), + v: elemVals, + } +} + +// ListVal returns a Value of list type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given list is empty, since then the element type cannot be inferred. +// (See also ListValEmpty.) +func ListVal(vals []Value) Value { + if len(vals) == 0 { + panic("must not call ListVal with empty slice") + } + elementType := DynamicPseudoType + rawList := make([]interface{}, len(vals)) + + for i, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent list element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawList[i] = val.v + } + + return Value{ + ty: List(elementType), + v: rawList, + } +} + +// ListValEmpty returns an empty list of the given element type. +func ListValEmpty(element Type) Value { + return Value{ + ty: List(element), + v: []interface{}{}, + } +} + +// MapVal returns a Value of a map type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given map is empty, since then the element type cannot be inferred. +// (See also MapValEmpty.) +func MapVal(vals map[string]Value) Value { + if len(vals) == 0 { + panic("must not call MapVal with empty map") + } + elementType := DynamicPseudoType + rawMap := make(map[string]interface{}, len(vals)) + + for key, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent map element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawMap[key] = val.v + } + + return Value{ + ty: Map(elementType), + v: rawMap, + } +} + +// MapValEmpty returns an empty map of the given element type. +func MapValEmpty(element Type) Value { + return Value{ + ty: Map(element), + v: map[string]interface{}{}, + } +} + +// SetVal returns a Value of set type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given list is empty, since then the element type cannot be inferred. +// (See also SetValEmpty.) +func SetVal(vals []Value) Value { + if len(vals) == 0 { + panic("must not call SetVal with empty slice") + } + elementType := DynamicPseudoType + rawList := make([]interface{}, len(vals)) + + for i, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent set element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawList[i] = val.v + } + + rawVal := set.NewSetFromSlice(setRules{elementType}, rawList) + + return Value{ + ty: Set(elementType), + v: rawVal, + } +} + +// SetValEmpty returns an empty set of the given element type. +func SetValEmpty(element Type) Value { + return Value{ + ty: Set(element), + v: set.NewSet(setRules{element}), + } +} + +// CapsuleVal creates a value of the given capsule type using the given +// wrapVal, which must be a pointer to a value of the capsule type's native +// type. +// +// This function will panic if the given type is not a capsule type, if +// the given wrapVal is not compatible with the given capsule type, or if +// wrapVal is not a pointer. +func CapsuleVal(ty Type, wrapVal interface{}) Value { + if !ty.IsCapsuleType() { + panic("not a capsule type") + } + + wv := reflect.ValueOf(wrapVal) + if wv.Kind() != reflect.Ptr { + panic("wrapVal is not a pointer") + } + + it := ty.typeImpl.(*capsuleType).GoType + if !wv.Type().Elem().AssignableTo(it) { + panic("wrapVal target is not compatible with the given capsule type") + } + + return Value{ + ty: ty, + v: wrapVal, + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/value_ops.go b/vendor/github.com/zclconf/go-cty/cty/value_ops.go new file mode 100644 index 00000000000..766e38ffc1d --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/value_ops.go @@ -0,0 +1,976 @@ +package cty + +import ( + "fmt" + "math/big" + + "reflect" + + "github.com/zclconf/go-cty/cty/set" +) + +func (val Value) GoString() string { + if val == NilVal { + return "cty.NilVal" + } + + if val.ty == DynamicPseudoType { + return "cty.DynamicValue" + } + + if !val.IsKnown() { + return fmt.Sprintf("cty.Unknown(%#v)", val.ty) + } + if val.IsNull() { + return fmt.Sprintf("cty.Null(%#v)", val.ty) + } + + // By the time we reach here we've dealt with all of the exceptions around + // unknowns and nulls, so we're guaranteed that the values are the + // canonical internal representation of the given type. + + switch val.ty { + case Bool: + if val.v.(bool) { + return "cty.True" + } else { + return "cty.False" + } + case Number: + fv := val.v.(*big.Float) + // We'll try to use NumberIntVal or NumberFloatVal if we can, since + // the fully-general initializer call is pretty ugly-looking. + if fv.IsInt() { + return fmt.Sprintf("cty.NumberIntVal(%#v)", fv) + } + if rfv, accuracy := fv.Float64(); accuracy == big.Exact { + return fmt.Sprintf("cty.NumberFloatVal(%#v)", rfv) + } + return fmt.Sprintf("cty.NumberVal(new(big.Float).Parse(\"%#v\", 10))", fv) + case String: + return fmt.Sprintf("cty.StringVal(%#v)", val.v) + } + + switch { + case val.ty.IsSetType(): + vals := val.v.(set.Set).Values() + if vals == nil || len(vals) == 0 { + return fmt.Sprintf("cty.SetValEmpty()") + } else { + return fmt.Sprintf("cty.SetVal(%#v)", vals) + } + case val.ty.IsCapsuleType(): + return fmt.Sprintf("cty.CapsuleVal(%#v, %#v)", val.ty, val.v) + } + + // Default exposes implementation details, so should actually cover + // all of the cases above for good caller UX. + return fmt.Sprintf("cty.Value{ty: %#v, v: %#v}", val.ty, val.v) +} + +// Equals returns True if the receiver and the given other value have the +// same type and are exactly equal in value. +// +// The usual short-circuit rules apply, so the result can be unknown or typed +// as dynamic if either of the given values are. Use RawEquals to compare +// if two values are equal *ignoring* the short-circuit rules. +func (val Value) Equals(other Value) Value { + if val.ty.HasDynamicTypes() || other.ty.HasDynamicTypes() { + return UnknownVal(Bool) + } + + if !val.ty.Equals(other.ty) { + return BoolVal(false) + } + + if !(val.IsKnown() && other.IsKnown()) { + return UnknownVal(Bool) + } + + if val.IsNull() || other.IsNull() { + if val.IsNull() && other.IsNull() { + return BoolVal(true) + } + return BoolVal(false) + } + + ty := val.ty + result := false + + switch { + case ty == Number: + result = val.v.(*big.Float).Cmp(other.v.(*big.Float)) == 0 + case ty == Bool: + result = val.v.(bool) == other.v.(bool) + case ty == String: + // Simple equality is safe because we NFC-normalize strings as they + // enter our world from StringVal, and so we can assume strings are + // always in normal form. + result = val.v.(string) == other.v.(string) + case ty.IsObjectType(): + oty := ty.typeImpl.(typeObject) + result = true + for attr, aty := range oty.AttrTypes { + lhs := Value{ + ty: aty, + v: val.v.(map[string]interface{})[attr], + } + rhs := Value{ + ty: aty, + v: other.v.(map[string]interface{})[attr], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + case ty.IsTupleType(): + tty := ty.typeImpl.(typeTuple) + result = true + for i, ety := range tty.ElemTypes { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + case ty.IsListType(): + ety := ty.typeImpl.(typeList).ElementTypeT + if len(val.v.([]interface{})) == len(other.v.([]interface{})) { + result = true + for i := range val.v.([]interface{}) { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + } + case ty.IsSetType(): + s1 := val.v.(set.Set) + s2 := other.v.(set.Set) + equal := true + + // Note that by our definition of sets it's never possible for two + // sets that contain unknown values (directly or indicrectly) to + // ever be equal, even if they are otherwise identical. + + // FIXME: iterating both lists and checking each item is not the + // ideal implementation here, but it works with the primitives we + // have in the set implementation. Perhaps the set implementation + // can provide its own equality test later. + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + equal = false + } + }) + s2.EachValue(func(v interface{}) { + if !s1.Has(v) { + equal = false + } + }) + + result = equal + case ty.IsMapType(): + ety := ty.typeImpl.(typeMap).ElementTypeT + if len(val.v.(map[string]interface{})) == len(other.v.(map[string]interface{})) { + result = true + for k := range val.v.(map[string]interface{}) { + if _, ok := other.v.(map[string]interface{})[k]; !ok { + result = false + break + } + lhs := Value{ + ty: ety, + v: val.v.(map[string]interface{})[k], + } + rhs := Value{ + ty: ety, + v: other.v.(map[string]interface{})[k], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + } + case ty.IsCapsuleType(): + // A capsule type's encapsulated value is a pointer to a value of its + // native type, so we can just compare these to get the identity test + // we need. + return BoolVal(val.v == other.v) + + default: + // should never happen + panic(fmt.Errorf("unsupported value type %#v in Equals", ty)) + } + + return BoolVal(result) +} + +// NotEqual is a shorthand for Equals followed by Not. +func (val Value) NotEqual(other Value) Value { + return val.Equals(other).Not() +} + +// True returns true if the receiver is True, false if False, and panics if +// the receiver is not of type Bool. +// +// This is a helper function to help write application logic that works with +// values, rather than a first-class operation. It does not work with unknown +// or null values. For more robust handling with unknown value +// short-circuiting, use val.Equals(cty.True). +func (val Value) True() bool { + if val.ty != Bool { + panic("not bool") + } + return val.Equals(True).v.(bool) +} + +// False is the opposite of True. +func (val Value) False() bool { + return !val.True() +} + +// RawEquals returns true if and only if the two given values have the same +// type and equal value, ignoring the usual short-circuit rules about +// unknowns and dynamic types. +// +// This method is more appropriate for testing than for real use, since it +// skips over usual semantics around unknowns but as a consequence allows +// testing the result of another operation that is expected to return unknown. +// It returns a primitive Go bool rather than a Value to remind us that it +// is not a first-class value operation. +func (val Value) RawEquals(other Value) bool { + if !val.ty.Equals(other.ty) { + return false + } + if (!val.IsKnown()) && (!other.IsKnown()) { + return true + } + if (val.IsKnown() && !other.IsKnown()) || (other.IsKnown() && !val.IsKnown()) { + return false + } + if val.IsNull() && other.IsNull() { + return true + } + if (val.IsNull() && !other.IsNull()) || (other.IsNull() && !val.IsNull()) { + return false + } + if val.ty == DynamicPseudoType && other.ty == DynamicPseudoType { + return true + } + + ty := val.ty + switch { + case ty == Number || ty == Bool || ty == String || ty == DynamicPseudoType: + return val.Equals(other).True() + case ty.IsObjectType(): + oty := ty.typeImpl.(typeObject) + for attr, aty := range oty.AttrTypes { + lhs := Value{ + ty: aty, + v: val.v.(map[string]interface{})[attr], + } + rhs := Value{ + ty: aty, + v: other.v.(map[string]interface{})[attr], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + case ty.IsTupleType(): + tty := ty.typeImpl.(typeTuple) + for i, ety := range tty.ElemTypes { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + case ty.IsListType(): + ety := ty.typeImpl.(typeList).ElementTypeT + if len(val.v.([]interface{})) == len(other.v.([]interface{})) { + for i := range val.v.([]interface{}) { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + } + return false + case ty.IsSetType(): + s1 := val.v.(set.Set) + s2 := other.v.(set.Set) + + // Since we're intentionally ignoring our rule that two unknowns + // are never equal, we can cheat here. + // (This isn't 100% right since e.g. it will fail if the set contains + // numbers that are infinite, which DeepEqual can't compare properly. + // We're accepting that limitation for simplicity here, since this + // function is here primarily for testing.) + return reflect.DeepEqual(s1, s2) + + case ty.IsMapType(): + ety := ty.typeImpl.(typeMap).ElementTypeT + if len(val.v.(map[string]interface{})) == len(other.v.(map[string]interface{})) { + for k := range val.v.(map[string]interface{}) { + if _, ok := other.v.(map[string]interface{})[k]; !ok { + return false + } + lhs := Value{ + ty: ety, + v: val.v.(map[string]interface{})[k], + } + rhs := Value{ + ty: ety, + v: other.v.(map[string]interface{})[k], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + } + return false + case ty.IsCapsuleType(): + // A capsule type's encapsulated value is a pointer to a value of its + // native type, so we can just compare these to get the identity test + // we need. + return val.v == other.v + + default: + // should never happen + panic(fmt.Errorf("unsupported value type %#v in RawEquals", ty)) + } +} + +// Add returns the sum of the receiver and the given other value. Both values +// must be numbers; this method will panic if not. +func (val Value) Add(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Add(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Subtract returns receiver minus the given other value. Both values must be +// numbers; this method will panic if not. +func (val Value) Subtract(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + return val.Add(other.Negate()) +} + +// Negate returns the numeric negative of the receiver, which must be a number. +// This method will panic when given a value of any other type. +func (val Value) Negate() Value { + if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float).Neg(val.v.(*big.Float)) + return NumberVal(ret) +} + +// Multiply returns the product of the receiver and the given other value. +// Both values must be numbers; this method will panic if not. +func (val Value) Multiply(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Mul(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Divide returns the quotient of the receiver and the given other value. +// Both values must be numbers; this method will panic if not. +// +// If the "other" value is exactly zero, this operation will return either +// PositiveInfinity or NegativeInfinity, depending on the sign of the +// receiver value. For some use-cases the presence of infinities may be +// undesirable, in which case the caller should check whether the +// other value equals zero before calling and raise an error instead. +// +// If both values are zero or infinity, this function will panic with +// an instance of big.ErrNaN. +func (val Value) Divide(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Quo(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Modulo returns the remainder of an integer division of the receiver and +// the given other value. Both values must be numbers; this method will panic +// if not. +// +// If the "other" value is exactly zero, this operation will return either +// PositiveInfinity or NegativeInfinity, depending on the sign of the +// receiver value. For some use-cases the presence of infinities may be +// undesirable, in which case the caller should check whether the +// other value equals zero before calling and raise an error instead. +// +// This operation is primarily here for use with nonzero natural numbers. +// Modulo with "other" as a non-natural number gets somewhat philosophical, +// and this function takes a position on what that should mean, but callers +// may wish to disallow such things outright or implement their own modulo +// if they disagree with the interpretation used here. +func (val Value) Modulo(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + // We cheat a bit here with infinities, just abusing the Multiply operation + // to get an infinite result of the correct sign. + if val == PositiveInfinity || val == NegativeInfinity || other == PositiveInfinity || other == NegativeInfinity { + return val.Multiply(other) + } + + if other.RawEquals(Zero) { + return val + } + + // FIXME: This is a bit clumsy. Should come back later and see if there's a + // more straightforward way to do this. + rat := val.Divide(other) + ratFloorInt := &big.Int{} + rat.v.(*big.Float).Int(ratFloorInt) + work := (&big.Float{}).SetInt(ratFloorInt) + work.Mul(other.v.(*big.Float), work) + work.Sub(val.v.(*big.Float), work) + + return NumberVal(work) +} + +// Absolute returns the absolute (signless) value of the receiver, which must +// be a number or this method will panic. +func (val Value) Absolute() Value { + if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := (&big.Float{}).Abs(val.v.(*big.Float)) + return NumberVal(ret) +} + +// GetAttr returns the value of the given attribute of the receiver, which +// must be of an object type that has an attribute of the given name. +// This method will panic if the receiver type is not compatible. +// +// The method will also panic if the given attribute name is not defined +// for the value's type. Use the attribute-related methods on Type to +// check for the validity of an attribute before trying to use it. +// +// This method may be called on a value whose type is DynamicPseudoType, +// in which case the result will also be DynamicVal. +func (val Value) GetAttr(name string) Value { + if val.ty == DynamicPseudoType { + return DynamicVal + } + + if !val.ty.IsObjectType() { + panic("value is not an object") + } + if !val.ty.HasAttribute(name) { + panic("value has no attribute of that name") + } + + attrType := val.ty.AttributeType(name) + + if !val.IsKnown() { + return UnknownVal(attrType) + } + + return Value{ + ty: attrType, + v: val.v.(map[string]interface{})[name], + } +} + +// Index returns the value of an element of the receiver, which must have +// either a list, map or tuple type. This method will panic if the receiver +// type is not compatible. +// +// The key value must be the correct type for the receving collection: a +// number if the collection is a list or tuple, or a string if it is a map. +// In the case of a list or tuple, the given number must be convertable to int +// or this method will panic. The key may alternatively be of +// DynamicPseudoType, in which case the result itself is an unknown of the +// collection's element type. +// +// The result is of the receiver collection's element type, or in the case +// of a tuple the type of the specific element index requested. +// +// This method may be called on a value whose type is DynamicPseudoType, +// in which case the result will also be the DynamicValue. +func (val Value) Index(key Value) Value { + if val.ty == DynamicPseudoType { + return DynamicVal + } + + switch { + case val.Type().IsListType(): + elty := val.Type().ElementType() + if key.Type() == DynamicPseudoType { + return UnknownVal(elty) + } + + if key.Type() != Number { + panic("element key for list must be number") + } + if !key.IsKnown() { + return UnknownVal(elty) + } + + if !val.IsKnown() { + return UnknownVal(elty) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + panic("element key for list must be non-negative integer") + } + + return Value{ + ty: elty, + v: val.v.([]interface{})[index], + } + case val.Type().IsMapType(): + elty := val.Type().ElementType() + if key.Type() == DynamicPseudoType { + return UnknownVal(elty) + } + + if key.Type() != String { + panic("element key for map must be string") + } + if !key.IsKnown() { + return UnknownVal(elty) + } + + if !val.IsKnown() { + return UnknownVal(elty) + } + + keyStr := key.v.(string) + + return Value{ + ty: elty, + v: val.v.(map[string]interface{})[keyStr], + } + case val.Type().IsTupleType(): + if key.Type() == DynamicPseudoType { + return DynamicVal + } + + if key.Type() != Number { + panic("element key for tuple must be number") + } + if !key.IsKnown() { + return DynamicVal + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + panic("element key for list must be non-negative integer") + } + + eltys := val.Type().TupleElementTypes() + + if !val.IsKnown() { + return UnknownVal(eltys[index]) + } + + return Value{ + ty: eltys[index], + v: val.v.([]interface{})[index], + } + default: + panic("not a list, map, or tuple type") + } +} + +// HasIndex returns True if the receiver (which must be supported for Index) +// has an element with the given index key, or False if it does not. +// +// The result will be UnknownVal(Bool) if either the collection or the +// key value are unknown. +// +// This method will panic if the receiver is not indexable, but does not +// impose any panic-causing type constraints on the key. +func (val Value) HasIndex(key Value) Value { + if val.ty == DynamicPseudoType { + return UnknownVal(Bool) + } + + switch { + case val.Type().IsListType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != Number { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + if !val.IsKnown() { + return UnknownVal(Bool) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + return False + } + + return BoolVal(int(index) < len(val.v.([]interface{})) && index >= 0) + case val.Type().IsMapType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != String { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + if !val.IsKnown() { + return UnknownVal(Bool) + } + + keyStr := key.v.(string) + _, exists := val.v.(map[string]interface{})[keyStr] + + return BoolVal(exists) + case val.Type().IsTupleType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != Number { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + return False + } + + length := val.Type().Length() + return BoolVal(int(index) < length && index >= 0) + default: + panic("not a list, map, or tuple type") + } +} + +// Length returns the length of the receiver, which must be a collection type +// or tuple type, as a number value. If the receiver is not a compatible type +// then this method will panic. +// +// If the receiver is unknown then the result is also unknown. +// +// If the receiver is null then this function will panic. +// +// Note that Length is not supported for strings. To determine the length +// of a string, call AsString and take the length of the native Go string +// that is returned. +func (val Value) Length() Value { + if val.Type().IsTupleType() { + // For tuples, we can return the length even if the value is not known. + return NumberIntVal(int64(val.Type().Length())) + } + + if !val.IsKnown() { + return UnknownVal(Number) + } + + return NumberIntVal(int64(val.LengthInt())) +} + +// LengthInt is like Length except it returns an int. It has the same behavior +// as Length except that it will panic if the receiver is unknown. +// +// This is an integration method provided for the convenience of code bridging +// into Go's type system. +func (val Value) LengthInt() int { + if val.Type().IsTupleType() { + // For tuples, we can return the length even if the value is not known. + return val.Type().Length() + } + if !val.IsKnown() { + panic("value is not known") + } + if val.IsNull() { + panic("value is null") + } + + switch { + + case val.ty.IsListType(): + return len(val.v.([]interface{})) + + case val.ty.IsSetType(): + return val.v.(set.Set).Length() + + case val.ty.IsMapType(): + return len(val.v.(map[string]interface{})) + + default: + panic("value is not a collection") + } +} + +// ElementIterator returns an ElementIterator for iterating the elements +// of the receiver, which must be a collection type, a tuple type, or an object +// type. If called on a method of any other type, this method will panic. +// +// The value must be Known and non-Null, or this method will panic. +// +// If the receiver is of a list type, the returned keys will be of type Number +// and the values will be of the list's element type. +// +// If the receiver is of a map type, the returned keys will be of type String +// and the value will be of the map's element type. Elements are passed in +// ascending lexicographical order by key. +// +// If the receiver is of a set type, each element is returned as both the +// key and the value, since set members are their own identity. +// +// If the receiver is of a tuple type, the returned keys will be of type Number +// and the value will be of the corresponding element's type. +// +// If the receiver is of an object type, the returned keys will be of type +// String and the value will be of the corresponding attributes's type. +// +// ElementIterator is an integration method, so it cannot handle Unknown +// values. This method will panic if the receiver is Unknown. +func (val Value) ElementIterator() ElementIterator { + if !val.IsKnown() { + panic("can't use ElementIterator on unknown value") + } + if val.IsNull() { + panic("can't use ElementIterator on null value") + } + return elementIterator(val) +} + +// CanIterateElements returns true if the receiver can support the +// ElementIterator method (and by extension, ForEachElement) without panic. +func (val Value) CanIterateElements() bool { + return canElementIterator(val) +} + +// ForEachElement executes a given callback function for each element of +// the receiver, which must be a collection type or tuple type, or this method +// will panic. +// +// ForEachElement uses ElementIterator internally, and so the values passed +// to the callback are as described for ElementIterator. +// +// Returns true if the iteration exited early due to the callback function +// returning true, or false if the loop ran to completion. +// +// ForEachElement is an integration method, so it cannot handle Unknown +// values. This method will panic if the receiver is Unknown. +func (val Value) ForEachElement(cb ElementCallback) bool { + it := val.ElementIterator() + for it.Next() { + key, val := it.Element() + stop := cb(key, val) + if stop { + return true + } + } + return false +} + +// Not returns the logical inverse of the receiver, which must be of type +// Bool or this method will panic. +func (val Value) Not() Value { + if shortCircuit := mustTypeCheck(Bool, Bool, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(!val.v.(bool)) +} + +// And returns the result of logical AND with the receiver and the other given +// value, which must both be of type Bool or this method will panic. +func (val Value) And(other Value) Value { + if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(bool) && other.v.(bool)) +} + +// Or returns the result of logical OR with the receiver and the other given +// value, which must both be of type Bool or this method will panic. +func (val Value) Or(other Value) Value { + if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(bool) || other.v.(bool)) +} + +// LessThan returns True if the receiver is less than the other given value, +// which must both be numbers or this method will panic. +func (val Value) LessThan(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) < 0) +} + +// GreaterThan returns True if the receiver is greater than the other given +// value, which must both be numbers or this method will panic. +func (val Value) GreaterThan(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) > 0) +} + +// LessThanOrEqualTo is equivalent to LessThan and Equal combined with Or. +func (val Value) LessThanOrEqualTo(other Value) Value { + return val.LessThan(other).Or(val.Equals(other)) +} + +// GreaterThanOrEqualTo is equivalent to GreaterThan and Equal combined with Or. +func (val Value) GreaterThanOrEqualTo(other Value) Value { + return val.GreaterThan(other).Or(val.Equals(other)) +} + +// AsString returns the native string from a non-null, non-unknown cty.String +// value, or panics if called on any other value. +func (val Value) AsString() string { + if val.ty != String { + panic("not a string") + } + if val.IsNull() { + panic("value is null") + } + if !val.IsKnown() { + panic("value is unknown") + } + + return val.v.(string) +} + +// AsBigFloat returns a big.Float representation of a non-null, non-unknown +// cty.Number value, or panics if called on any other value. +// +// For more convenient conversions to other native numeric types, use the +// "convert" package. +func (val Value) AsBigFloat() *big.Float { + if val.ty != Number { + panic("not a number") + } + if val.IsNull() { + panic("value is null") + } + if !val.IsKnown() { + panic("value is unknown") + } + + // Copy the float so that callers can't mutate our internal state + ret := *(val.v.(*big.Float)) + + return &ret +} + +// EncapsulatedValue returns the native value encapsulated in a non-null, +// non-unknown capsule-typed value, or panics if called on any other value. +// +// The result is the same pointer that was passed to CapsuleVal to create +// the value. Since cty considers values to be immutable, it is strongly +// recommended to treat the encapsulated value itself as immutable too. +func (val Value) EncapsulatedValue() interface{} { + if !val.Type().IsCapsuleType() { + panic("not a capsule-typed value") + } + + return val.v +} diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go new file mode 100644 index 00000000000..a3c021d3f88 --- /dev/null +++ b/vendor/golang.org/x/net/context/context.go @@ -0,0 +1,56 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// As of Go 1.7 this package is available in the standard library under the +// name context. https://golang.org/pkg/context. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context // import "golang.org/x/net/context" + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go new file mode 100644 index 00000000000..606cf1f9726 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -0,0 +1,74 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +// Package ctxhttp provides helper functions for performing context-aware HTTP requests. +package ctxhttp // import "golang.org/x/net/context/ctxhttp" + +import ( + "io" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +// Do sends an HTTP request with the provided http.Client and returns +// an HTTP response. +// +// If the client is nil, http.DefaultClient is used. +// +// The provided ctx must be non-nil. If it is canceled or times out, +// ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + resp, err := client.Do(req.WithContext(ctx)) + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + if err != nil { + select { + case <-ctx.Done(): + err = ctx.Err() + default: + } + } + return resp, err +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go new file mode 100644 index 00000000000..926870cc23f --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go @@ -0,0 +1,147 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package ctxhttp // import "golang.org/x/net/context/ctxhttp" + +import ( + "io" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +func nop() {} + +var ( + testHookContextDoneBeforeHeaders = nop + testHookDoReturned = nop + testHookDidBodyClose = nop +) + +// Do sends an HTTP request with the provided http.Client and returns an HTTP response. +// If the client is nil, http.DefaultClient is used. +// If the context is canceled or times out, ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + // TODO(djd): Respect any existing value of req.Cancel. + cancel := make(chan struct{}) + req.Cancel = cancel + + type responseAndError struct { + resp *http.Response + err error + } + result := make(chan responseAndError, 1) + + // Make local copies of test hooks closed over by goroutines below. + // Prevents data races in tests. + testHookDoReturned := testHookDoReturned + testHookDidBodyClose := testHookDidBodyClose + + go func() { + resp, err := client.Do(req) + testHookDoReturned() + result <- responseAndError{resp, err} + }() + + var resp *http.Response + + select { + case <-ctx.Done(): + testHookContextDoneBeforeHeaders() + close(cancel) + // Clean up after the goroutine calling client.Do: + go func() { + if r := <-result; r.resp != nil { + testHookDidBodyClose() + r.resp.Body.Close() + } + }() + return nil, ctx.Err() + case r := <-result: + var err error + resp, err = r.resp, r.err + if err != nil { + return resp, err + } + } + + c := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + close(cancel) + case <-c: + // The response's Body is closed. + } + }() + resp.Body = ¬ifyingReader{resp.Body, c} + + return resp, nil +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// notifyingReader is an io.ReadCloser that closes the notify channel after +// Close is called or a Read fails on the underlying ReadCloser. +type notifyingReader struct { + io.ReadCloser + notify chan<- struct{} +} + +func (r *notifyingReader) Read(p []byte) (int, error) { + n, err := r.ReadCloser.Read(p) + if err != nil && r.notify != nil { + close(r.notify) + r.notify = nil + } + return n, err +} + +func (r *notifyingReader) Close() error { + err := r.ReadCloser.Close() + if r.notify != nil { + close(r.notify) + r.notify = nil + } + return err +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go new file mode 100644 index 00000000000..d20f52b7de9 --- /dev/null +++ b/vendor/golang.org/x/net/context/go17.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +var ( + todo = context.TODO() + background = context.Background() +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = context.DeadlineExceeded + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + ctx, f := context.WithCancel(parent) + return ctx, CancelFunc(f) +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + ctx, f := context.WithDeadline(parent, deadline) + return ctx, CancelFunc(f) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go new file mode 100644 index 00000000000..d88bd1db127 --- /dev/null +++ b/vendor/golang.org/x/net/context/go19.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package context + +import "context" // standard library's context, as of Go 1.7 + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go new file mode 100644 index 00000000000..0f35592df51 --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -0,0 +1,300 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go new file mode 100644 index 00000000000..b105f80be4f --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go19.go @@ -0,0 +1,109 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package context + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out chan<- Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile new file mode 100644 index 00000000000..53fc5257974 --- /dev/null +++ b/vendor/golang.org/x/net/http2/Dockerfile @@ -0,0 +1,51 @@ +# +# This Dockerfile builds a recent curl with HTTP/2 client support, using +# a recent nghttp2 build. +# +# See the Makefile for how to tag it. If Docker and that image is found, the +# Go tests use this curl binary for integration tests. +# + +FROM ubuntu:trusty + +RUN apt-get update && \ + apt-get upgrade -y && \ + apt-get install -y git-core build-essential wget + +RUN apt-get install -y --no-install-recommends \ + autotools-dev libtool pkg-config zlib1g-dev \ + libcunit1-dev libssl-dev libxml2-dev libevent-dev \ + automake autoconf + +# The list of packages nghttp2 recommends for h2load: +RUN apt-get install -y --no-install-recommends make binutils \ + autoconf automake autotools-dev \ + libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \ + libev-dev libevent-dev libjansson-dev libjemalloc-dev \ + cython python3.4-dev python-setuptools + +# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: +ENV NGHTTP2_VER 895da9a +RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git + +WORKDIR /root/nghttp2 +RUN git reset --hard $NGHTTP2_VER +RUN autoreconf -i +RUN automake +RUN autoconf +RUN ./configure +RUN make +RUN make install + +WORKDIR /root +RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz +RUN tar -zxvf curl-7.45.0.tar.gz +WORKDIR /root/curl-7.45.0 +RUN ./configure --with-ssl --with-nghttp2=/usr/local +RUN make +RUN make install +RUN ldconfig + +CMD ["-h"] +ENTRYPOINT ["/usr/local/bin/curl"] + diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile new file mode 100644 index 00000000000..55fd826f77b --- /dev/null +++ b/vendor/golang.org/x/net/http2/Makefile @@ -0,0 +1,3 @@ +curlimage: + docker build -t gohttp2/curl . + diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README new file mode 100644 index 00000000000..360d5aa3790 --- /dev/null +++ b/vendor/golang.org/x/net/http2/README @@ -0,0 +1,20 @@ +This is a work-in-progress HTTP/2 implementation for Go. + +It will eventually live in the Go standard library and won't require +any changes to your code to use. It will just be automatic. + +Status: + +* The server support is pretty good. A few things are missing + but are being worked on. +* The client work has just started but shares a lot of code + is coming along much quicker. + +Docs are at https://godoc.org/golang.org/x/net/http2 + +Demo test server at https://http2.golang.org/ + +Help & bug reports welcome! + +Contributing: https://golang.org/doc/contribute.html +Bugs: https://golang.org/issue/new?title=x/net/http2:+ diff --git a/vendor/golang.org/x/net/http2/ciphers.go b/vendor/golang.org/x/net/http2/ciphers.go new file mode 100644 index 00000000000..698860b7775 --- /dev/null +++ b/vendor/golang.org/x/net/http2/ciphers.go @@ -0,0 +1,641 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +// A list of the possible cipher suite ids. Taken from +// http://www.iana.org/assignments/tls-parameters/tls-parameters.txt + +const ( + cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000 + cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001 + cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002 + cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003 + cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004 + cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005 + cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006 + cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007 + cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008 + cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009 + cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A + cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B + cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C + cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D + cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E + cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F + cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010 + cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011 + cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012 + cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013 + cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014 + cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015 + cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016 + cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017 + cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018 + cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019 + cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A + cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B + // Reserved uint16 = 0x001C-1D + cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F + cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020 + cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021 + cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022 + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023 + cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024 + cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025 + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026 + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027 + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028 + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029 + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B + cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C + cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D + cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E + cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030 + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031 + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032 + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033 + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034 + cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035 + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036 + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037 + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038 + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039 + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A + cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C + cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040 + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041 + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042 + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045 + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046 + // Reserved uint16 = 0x0047-4F + // Reserved uint16 = 0x0050-58 + // Reserved uint16 = 0x0059-5C + // Unassigned uint16 = 0x005D-5F + // Reserved uint16 = 0x0060-66 + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067 + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068 + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069 + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D + // Unassigned uint16 = 0x006E-83 + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085 + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089 + cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A + cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B + cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C + cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D + cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E + cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090 + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091 + cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092 + cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093 + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094 + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095 + cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096 + cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097 + cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098 + cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099 + cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A + cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B + cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C + cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D + cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E + cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F + cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0 + cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1 + cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2 + cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3 + cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4 + cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5 + cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6 + cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7 + cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8 + cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9 + cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA + cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB + cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC + cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD + cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE + cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF + cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0 + cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1 + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2 + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3 + cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4 + cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5 + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6 + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7 + cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8 + cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9 + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1 + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5 + // Unassigned uint16 = 0x00C6-FE + cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF + // Unassigned uint16 = 0x01-55,* + cipher_TLS_FALLBACK_SCSV uint16 = 0x5600 + // Unassigned uint16 = 0x5601 - 0xC000 + cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001 + cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002 + cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003 + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004 + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005 + cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006 + cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007 + cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008 + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009 + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A + cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B + cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C + cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F + cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010 + cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011 + cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012 + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013 + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014 + cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015 + cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016 + cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017 + cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018 + cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019 + cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A + cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B + cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C + cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D + cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E + cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F + cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020 + cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021 + cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022 + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023 + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024 + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025 + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026 + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027 + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028 + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029 + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C + cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D + cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E + cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F + cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030 + cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031 + cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032 + cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033 + cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034 + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035 + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036 + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037 + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038 + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039 + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B + cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C + cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D + cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E + cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F + cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040 + cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041 + cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042 + cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043 + cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044 + cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045 + cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046 + cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047 + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048 + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049 + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D + cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E + cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F + cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050 + cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051 + cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052 + cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053 + cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054 + cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055 + cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056 + cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057 + cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058 + cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059 + cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A + cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060 + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061 + cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062 + cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063 + cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064 + cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065 + cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066 + cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067 + cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068 + cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069 + cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A + cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B + cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C + cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D + cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E + cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F + cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070 + cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077 + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078 + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079 + cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A + cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081 + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083 + cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D + cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E + cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093 + cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094 + cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099 + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B + cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C + cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D + cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E + cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F + cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0 + cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1 + cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2 + cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3 + cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4 + cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5 + cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6 + cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7 + cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8 + cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9 + cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA + cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF + // Unassigned uint16 = 0xC0B0-FF + // Unassigned uint16 = 0xC1-CB,* + // Unassigned uint16 = 0xCC00-A7 + cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8 + cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9 + cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA + cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB + cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC + cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD + cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE +) + +// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. +// References: +// https://tools.ietf.org/html/rfc7540#appendix-A +// Reject cipher suites from Appendix A. +// "This list includes those cipher suites that do not +// offer an ephemeral key exchange and those that are +// based on the TLS null, stream or block cipher type" +func isBadCipher(cipher uint16) bool { + switch cipher { + case cipher_TLS_NULL_WITH_NULL_NULL, + cipher_TLS_RSA_WITH_NULL_MD5, + cipher_TLS_RSA_WITH_NULL_SHA, + cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_RSA_WITH_RC4_128_MD5, + cipher_TLS_RSA_WITH_RC4_128_SHA, + cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5, + cipher_TLS_RSA_WITH_IDEA_CBC_SHA, + cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_RSA_WITH_DES_CBC_SHA, + cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_DSS_WITH_DES_CBC_SHA, + cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_RSA_WITH_DES_CBC_SHA, + cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_DH_anon_WITH_RC4_128_MD5, + cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_anon_WITH_DES_CBC_SHA, + cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_KRB5_WITH_DES_CBC_SHA, + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_KRB5_WITH_RC4_128_SHA, + cipher_TLS_KRB5_WITH_IDEA_CBC_SHA, + cipher_TLS_KRB5_WITH_DES_CBC_MD5, + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5, + cipher_TLS_KRB5_WITH_RC4_128_MD5, + cipher_TLS_KRB5_WITH_IDEA_CBC_MD5, + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5, + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5, + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_PSK_WITH_NULL_SHA, + cipher_TLS_DHE_PSK_WITH_NULL_SHA, + cipher_TLS_RSA_PSK_WITH_NULL_SHA, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA, + cipher_TLS_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_NULL_SHA256, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_PSK_WITH_RC4_128_SHA, + cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_RC4_128_SHA, + cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_RC4_128_SHA, + cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA, + cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DH_anon_WITH_SEED_CBC_SHA, + cipher_TLS_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384, + cipher_TLS_PSK_WITH_AES_128_GCM_SHA256, + cipher_TLS_PSK_WITH_AES_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384, + cipher_TLS_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_PSK_WITH_NULL_SHA256, + cipher_TLS_PSK_WITH_NULL_SHA384, + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_NULL_SHA256, + cipher_TLS_DHE_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_NULL_SHA256, + cipher_TLS_RSA_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV, + cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA, + cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA, + cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_NULL_SHA, + cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA, + cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_NULL_SHA, + cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_NULL_SHA, + cipher_TLS_ECDH_anon_WITH_RC4_128_SHA, + cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_AES_128_CCM, + cipher_TLS_RSA_WITH_AES_256_CCM, + cipher_TLS_RSA_WITH_AES_128_CCM_8, + cipher_TLS_RSA_WITH_AES_256_CCM_8, + cipher_TLS_PSK_WITH_AES_128_CCM, + cipher_TLS_PSK_WITH_AES_256_CCM, + cipher_TLS_PSK_WITH_AES_128_CCM_8, + cipher_TLS_PSK_WITH_AES_256_CCM_8: + return true + default: + return false + } +} diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go new file mode 100644 index 00000000000..bdf5652b01c --- /dev/null +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -0,0 +1,256 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code's client connection pooling. + +package http2 + +import ( + "crypto/tls" + "net/http" + "sync" +) + +// ClientConnPool manages a pool of HTTP/2 client connections. +type ClientConnPool interface { + GetClientConn(req *http.Request, addr string) (*ClientConn, error) + MarkDead(*ClientConn) +} + +// clientConnPoolIdleCloser is the interface implemented by ClientConnPool +// implementations which can close their idle connections. +type clientConnPoolIdleCloser interface { + ClientConnPool + closeIdleConnections() +} + +var ( + _ clientConnPoolIdleCloser = (*clientConnPool)(nil) + _ clientConnPoolIdleCloser = noDialClientConnPool{} +) + +// TODO: use singleflight for dialing and addConnCalls? +type clientConnPool struct { + t *Transport + + mu sync.Mutex // TODO: maybe switch to RWMutex + // TODO: add support for sharing conns based on cert names + // (e.g. share conn for googleapis.com and appspot.com) + conns map[string][]*ClientConn // key is host:port + dialing map[string]*dialCall // currently in-flight dials + keys map[*ClientConn][]string + addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls +} + +func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { + return p.getClientConn(req, addr, dialOnMiss) +} + +const ( + dialOnMiss = true + noDialOnMiss = false +) + +func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { + if isConnectionCloseRequest(req) && dialOnMiss { + // It gets its own connection. + const singleUse = true + cc, err := p.t.dialClientConn(addr, singleUse) + if err != nil { + return nil, err + } + return cc, nil + } + p.mu.Lock() + for _, cc := range p.conns[addr] { + if cc.CanTakeNewRequest() { + p.mu.Unlock() + return cc, nil + } + } + if !dialOnMiss { + p.mu.Unlock() + return nil, ErrNoCachedConn + } + call := p.getStartDialLocked(addr) + p.mu.Unlock() + <-call.done + return call.res, call.err +} + +// dialCall is an in-flight Transport dial call to a host. +type dialCall struct { + p *clientConnPool + done chan struct{} // closed when done + res *ClientConn // valid after done is closed + err error // valid after done is closed +} + +// requires p.mu is held. +func (p *clientConnPool) getStartDialLocked(addr string) *dialCall { + if call, ok := p.dialing[addr]; ok { + // A dial is already in-flight. Don't start another. + return call + } + call := &dialCall{p: p, done: make(chan struct{})} + if p.dialing == nil { + p.dialing = make(map[string]*dialCall) + } + p.dialing[addr] = call + go call.dial(addr) + return call +} + +// run in its own goroutine. +func (c *dialCall) dial(addr string) { + const singleUse = false // shared conn + c.res, c.err = c.p.t.dialClientConn(addr, singleUse) + close(c.done) + + c.p.mu.Lock() + delete(c.p.dialing, addr) + if c.err == nil { + c.p.addConnLocked(addr, c.res) + } + c.p.mu.Unlock() +} + +// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't +// already exist. It coalesces concurrent calls with the same key. +// This is used by the http1 Transport code when it creates a new connection. Because +// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know +// the protocol), it can get into a situation where it has multiple TLS connections. +// This code decides which ones live or die. +// The return value used is whether c was used. +// c is never closed. +func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { + p.mu.Lock() + for _, cc := range p.conns[key] { + if cc.CanTakeNewRequest() { + p.mu.Unlock() + return false, nil + } + } + call, dup := p.addConnCalls[key] + if !dup { + if p.addConnCalls == nil { + p.addConnCalls = make(map[string]*addConnCall) + } + call = &addConnCall{ + p: p, + done: make(chan struct{}), + } + p.addConnCalls[key] = call + go call.run(t, key, c) + } + p.mu.Unlock() + + <-call.done + if call.err != nil { + return false, call.err + } + return !dup, nil +} + +type addConnCall struct { + p *clientConnPool + done chan struct{} // closed when done + err error +} + +func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { + cc, err := t.NewClientConn(tc) + + p := c.p + p.mu.Lock() + if err != nil { + c.err = err + } else { + p.addConnLocked(key, cc) + } + delete(p.addConnCalls, key) + p.mu.Unlock() + close(c.done) +} + +func (p *clientConnPool) addConn(key string, cc *ClientConn) { + p.mu.Lock() + p.addConnLocked(key, cc) + p.mu.Unlock() +} + +// p.mu must be held +func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { + for _, v := range p.conns[key] { + if v == cc { + return + } + } + if p.conns == nil { + p.conns = make(map[string][]*ClientConn) + } + if p.keys == nil { + p.keys = make(map[*ClientConn][]string) + } + p.conns[key] = append(p.conns[key], cc) + p.keys[cc] = append(p.keys[cc], key) +} + +func (p *clientConnPool) MarkDead(cc *ClientConn) { + p.mu.Lock() + defer p.mu.Unlock() + for _, key := range p.keys[cc] { + vv, ok := p.conns[key] + if !ok { + continue + } + newList := filterOutClientConn(vv, cc) + if len(newList) > 0 { + p.conns[key] = newList + } else { + delete(p.conns, key) + } + } + delete(p.keys, cc) +} + +func (p *clientConnPool) closeIdleConnections() { + p.mu.Lock() + defer p.mu.Unlock() + // TODO: don't close a cc if it was just added to the pool + // milliseconds ago and has never been used. There's currently + // a small race window with the HTTP/1 Transport's integration + // where it can add an idle conn just before using it, and + // somebody else can concurrently call CloseIdleConns and + // break some caller's RoundTrip. + for _, vv := range p.conns { + for _, cc := range vv { + cc.closeIfIdle() + } + } +} + +func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { + out := in[:0] + for _, v := range in { + if v != exclude { + out = append(out, v) + } + } + // If we filtered it out, zero out the last item to prevent + // the GC from seeing it. + if len(in) != len(out) { + in[len(in)-1] = nil + } + return out +} + +// noDialClientConnPool is an implementation of http2.ClientConnPool +// which never dials. We let the HTTP/1.1 client dial and use its TLS +// connection instead. +type noDialClientConnPool struct{ *clientConnPool } + +func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { + return p.getClientConn(req, addr, noDialOnMiss) +} diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go new file mode 100644 index 00000000000..b65fc6d423d --- /dev/null +++ b/vendor/golang.org/x/net/http2/configure_transport.go @@ -0,0 +1,80 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.6 + +package http2 + +import ( + "crypto/tls" + "fmt" + "net/http" +) + +func configureTransport(t1 *http.Transport) (*Transport, error) { + connPool := new(clientConnPool) + t2 := &Transport{ + ConnPool: noDialClientConnPool{connPool}, + t1: t1, + } + connPool.t = t2 + if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { + return nil, err + } + if t1.TLSClientConfig == nil { + t1.TLSClientConfig = new(tls.Config) + } + if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { + t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) + } + if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { + t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") + } + upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { + addr := authorityAddr("https", authority) + if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { + go c.Close() + return erringRoundTripper{err} + } else if !used { + // Turns out we don't need this c. + // For example, two goroutines made requests to the same host + // at the same time, both kicking off TCP dials. (since protocol + // was unknown) + go c.Close() + } + return t2 + } + if m := t1.TLSNextProto; len(m) == 0 { + t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ + "h2": upgradeFn, + } + } else { + m["h2"] = upgradeFn + } + return t2, nil +} + +// registerHTTPSProtocol calls Transport.RegisterProtocol but +// converting panics into errors. +func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) { + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + }() + t.RegisterProtocol("https", rt) + return nil +} + +// noDialH2RoundTripper is a RoundTripper which only tries to complete the request +// if there's already has a cached connection to the host. +type noDialH2RoundTripper struct{ t *Transport } + +func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + res, err := rt.t.RoundTrip(req) + if err == ErrNoCachedConn { + return nil, http.ErrSkipAltProtocol + } + return res, err +} diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go new file mode 100644 index 00000000000..a3067f8de74 --- /dev/null +++ b/vendor/golang.org/x/net/http2/databuffer.go @@ -0,0 +1,146 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "fmt" + "sync" +) + +// Buffer chunks are allocated from a pool to reduce pressure on GC. +// The maximum wasted space per dataBuffer is 2x the largest size class, +// which happens when the dataBuffer has multiple chunks and there is +// one unread byte in both the first and last chunks. We use a few size +// classes to minimize overheads for servers that typically receive very +// small request bodies. +// +// TODO: Benchmark to determine if the pools are necessary. The GC may have +// improved enough that we can instead allocate chunks like this: +// make([]byte, max(16<<10, expectedBytesRemaining)) +var ( + dataChunkSizeClasses = []int{ + 1 << 10, + 2 << 10, + 4 << 10, + 8 << 10, + 16 << 10, + } + dataChunkPools = [...]sync.Pool{ + {New: func() interface{} { return make([]byte, 1<<10) }}, + {New: func() interface{} { return make([]byte, 2<<10) }}, + {New: func() interface{} { return make([]byte, 4<<10) }}, + {New: func() interface{} { return make([]byte, 8<<10) }}, + {New: func() interface{} { return make([]byte, 16<<10) }}, + } +) + +func getDataBufferChunk(size int64) []byte { + i := 0 + for ; i < len(dataChunkSizeClasses)-1; i++ { + if size <= int64(dataChunkSizeClasses[i]) { + break + } + } + return dataChunkPools[i].Get().([]byte) +} + +func putDataBufferChunk(p []byte) { + for i, n := range dataChunkSizeClasses { + if len(p) == n { + dataChunkPools[i].Put(p) + return + } + } + panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) +} + +// dataBuffer is an io.ReadWriter backed by a list of data chunks. +// Each dataBuffer is used to read DATA frames on a single stream. +// The buffer is divided into chunks so the server can limit the +// total memory used by a single connection without limiting the +// request body size on any single stream. +type dataBuffer struct { + chunks [][]byte + r int // next byte to read is chunks[0][r] + w int // next byte to write is chunks[len(chunks)-1][w] + size int // total buffered bytes + expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0) +} + +var errReadEmpty = errors.New("read from empty dataBuffer") + +// Read copies bytes from the buffer into p. +// It is an error to read when no data is available. +func (b *dataBuffer) Read(p []byte) (int, error) { + if b.size == 0 { + return 0, errReadEmpty + } + var ntotal int + for len(p) > 0 && b.size > 0 { + readFrom := b.bytesFromFirstChunk() + n := copy(p, readFrom) + p = p[n:] + ntotal += n + b.r += n + b.size -= n + // If the first chunk has been consumed, advance to the next chunk. + if b.r == len(b.chunks[0]) { + putDataBufferChunk(b.chunks[0]) + end := len(b.chunks) - 1 + copy(b.chunks[:end], b.chunks[1:]) + b.chunks[end] = nil + b.chunks = b.chunks[:end] + b.r = 0 + } + } + return ntotal, nil +} + +func (b *dataBuffer) bytesFromFirstChunk() []byte { + if len(b.chunks) == 1 { + return b.chunks[0][b.r:b.w] + } + return b.chunks[0][b.r:] +} + +// Len returns the number of bytes of the unread portion of the buffer. +func (b *dataBuffer) Len() int { + return b.size +} + +// Write appends p to the buffer. +func (b *dataBuffer) Write(p []byte) (int, error) { + ntotal := len(p) + for len(p) > 0 { + // If the last chunk is empty, allocate a new chunk. Try to allocate + // enough to fully copy p plus any additional bytes we expect to + // receive. However, this may allocate less than len(p). + want := int64(len(p)) + if b.expected > want { + want = b.expected + } + chunk := b.lastChunkOrAlloc(want) + n := copy(chunk[b.w:], p) + p = p[n:] + b.w += n + b.size += n + b.expected -= int64(n) + } + return ntotal, nil +} + +func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte { + if len(b.chunks) != 0 { + last := b.chunks[len(b.chunks)-1] + if b.w < len(last) { + return last + } + } + chunk := getDataBufferChunk(want) + b.chunks = append(b.chunks, chunk) + b.w = 0 + return chunk +} diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go new file mode 100644 index 00000000000..71f2c463178 --- /dev/null +++ b/vendor/golang.org/x/net/http2/errors.go @@ -0,0 +1,133 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "fmt" +) + +// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. +type ErrCode uint32 + +const ( + ErrCodeNo ErrCode = 0x0 + ErrCodeProtocol ErrCode = 0x1 + ErrCodeInternal ErrCode = 0x2 + ErrCodeFlowControl ErrCode = 0x3 + ErrCodeSettingsTimeout ErrCode = 0x4 + ErrCodeStreamClosed ErrCode = 0x5 + ErrCodeFrameSize ErrCode = 0x6 + ErrCodeRefusedStream ErrCode = 0x7 + ErrCodeCancel ErrCode = 0x8 + ErrCodeCompression ErrCode = 0x9 + ErrCodeConnect ErrCode = 0xa + ErrCodeEnhanceYourCalm ErrCode = 0xb + ErrCodeInadequateSecurity ErrCode = 0xc + ErrCodeHTTP11Required ErrCode = 0xd +) + +var errCodeName = map[ErrCode]string{ + ErrCodeNo: "NO_ERROR", + ErrCodeProtocol: "PROTOCOL_ERROR", + ErrCodeInternal: "INTERNAL_ERROR", + ErrCodeFlowControl: "FLOW_CONTROL_ERROR", + ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT", + ErrCodeStreamClosed: "STREAM_CLOSED", + ErrCodeFrameSize: "FRAME_SIZE_ERROR", + ErrCodeRefusedStream: "REFUSED_STREAM", + ErrCodeCancel: "CANCEL", + ErrCodeCompression: "COMPRESSION_ERROR", + ErrCodeConnect: "CONNECT_ERROR", + ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM", + ErrCodeInadequateSecurity: "INADEQUATE_SECURITY", + ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED", +} + +func (e ErrCode) String() string { + if s, ok := errCodeName[e]; ok { + return s + } + return fmt.Sprintf("unknown error code 0x%x", uint32(e)) +} + +// ConnectionError is an error that results in the termination of the +// entire connection. +type ConnectionError ErrCode + +func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) } + +// StreamError is an error that only affects one stream within an +// HTTP/2 connection. +type StreamError struct { + StreamID uint32 + Code ErrCode + Cause error // optional additional detail +} + +func streamError(id uint32, code ErrCode) StreamError { + return StreamError{StreamID: id, Code: code} +} + +func (e StreamError) Error() string { + if e.Cause != nil { + return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause) + } + return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) +} + +// 6.9.1 The Flow Control Window +// "If a sender receives a WINDOW_UPDATE that causes a flow control +// window to exceed this maximum it MUST terminate either the stream +// or the connection, as appropriate. For streams, [...]; for the +// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code." +type goAwayFlowError struct{} + +func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } + +// connError represents an HTTP/2 ConnectionError error code, along +// with a string (for debugging) explaining why. +// +// Errors of this type are only returned by the frame parser functions +// and converted into ConnectionError(Code), after stashing away +// the Reason into the Framer's errDetail field, accessible via +// the (*Framer).ErrorDetail method. +type connError struct { + Code ErrCode // the ConnectionError error code + Reason string // additional reason +} + +func (e connError) Error() string { + return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) +} + +type pseudoHeaderError string + +func (e pseudoHeaderError) Error() string { + return fmt.Sprintf("invalid pseudo-header %q", string(e)) +} + +type duplicatePseudoHeaderError string + +func (e duplicatePseudoHeaderError) Error() string { + return fmt.Sprintf("duplicate pseudo-header %q", string(e)) +} + +type headerFieldNameError string + +func (e headerFieldNameError) Error() string { + return fmt.Sprintf("invalid header field name %q", string(e)) +} + +type headerFieldValueError string + +func (e headerFieldValueError) Error() string { + return fmt.Sprintf("invalid header field value %q", string(e)) +} + +var ( + errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") + errPseudoAfterRegular = errors.New("pseudo header field after regular") +) diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go new file mode 100644 index 00000000000..957de25420d --- /dev/null +++ b/vendor/golang.org/x/net/http2/flow.go @@ -0,0 +1,50 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Flow control + +package http2 + +// flow is the flow control window's size. +type flow struct { + // n is the number of DATA bytes we're allowed to send. + // A flow is kept both on a conn and a per-stream. + n int32 + + // conn points to the shared connection-level flow that is + // shared by all streams on that conn. It is nil for the flow + // that's on the conn directly. + conn *flow +} + +func (f *flow) setConnFlow(cf *flow) { f.conn = cf } + +func (f *flow) available() int32 { + n := f.n + if f.conn != nil && f.conn.n < n { + n = f.conn.n + } + return n +} + +func (f *flow) take(n int32) { + if n > f.available() { + panic("internal error: took too much") + } + f.n -= n + if f.conn != nil { + f.conn.n -= n + } +} + +// add adds n bytes (positive or negative) to the flow control window. +// It returns false if the sum would exceed 2^31-1. +func (f *flow) add(n int32) bool { + remain := (1<<31 - 1) - f.n + if n > remain { + return false + } + f.n += n + return true +} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go new file mode 100644 index 00000000000..3b14890728f --- /dev/null +++ b/vendor/golang.org/x/net/http2/frame.go @@ -0,0 +1,1579 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "strings" + "sync" + + "golang.org/x/net/http2/hpack" + "golang.org/x/net/lex/httplex" +) + +const frameHeaderLen = 9 + +var padZeros = make([]byte, 255) // zeros for padding + +// A FrameType is a registered frame type as defined in +// http://http2.github.io/http2-spec/#rfc.section.11.2 +type FrameType uint8 + +const ( + FrameData FrameType = 0x0 + FrameHeaders FrameType = 0x1 + FramePriority FrameType = 0x2 + FrameRSTStream FrameType = 0x3 + FrameSettings FrameType = 0x4 + FramePushPromise FrameType = 0x5 + FramePing FrameType = 0x6 + FrameGoAway FrameType = 0x7 + FrameWindowUpdate FrameType = 0x8 + FrameContinuation FrameType = 0x9 +) + +var frameName = map[FrameType]string{ + FrameData: "DATA", + FrameHeaders: "HEADERS", + FramePriority: "PRIORITY", + FrameRSTStream: "RST_STREAM", + FrameSettings: "SETTINGS", + FramePushPromise: "PUSH_PROMISE", + FramePing: "PING", + FrameGoAway: "GOAWAY", + FrameWindowUpdate: "WINDOW_UPDATE", + FrameContinuation: "CONTINUATION", +} + +func (t FrameType) String() string { + if s, ok := frameName[t]; ok { + return s + } + return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) +} + +// Flags is a bitmask of HTTP/2 flags. +// The meaning of flags varies depending on the frame type. +type Flags uint8 + +// Has reports whether f contains all (0 or more) flags in v. +func (f Flags) Has(v Flags) bool { + return (f & v) == v +} + +// Frame-specific FrameHeader flag bits. +const ( + // Data Frame + FlagDataEndStream Flags = 0x1 + FlagDataPadded Flags = 0x8 + + // Headers Frame + FlagHeadersEndStream Flags = 0x1 + FlagHeadersEndHeaders Flags = 0x4 + FlagHeadersPadded Flags = 0x8 + FlagHeadersPriority Flags = 0x20 + + // Settings Frame + FlagSettingsAck Flags = 0x1 + + // Ping Frame + FlagPingAck Flags = 0x1 + + // Continuation Frame + FlagContinuationEndHeaders Flags = 0x4 + + FlagPushPromiseEndHeaders Flags = 0x4 + FlagPushPromisePadded Flags = 0x8 +) + +var flagName = map[FrameType]map[Flags]string{ + FrameData: { + FlagDataEndStream: "END_STREAM", + FlagDataPadded: "PADDED", + }, + FrameHeaders: { + FlagHeadersEndStream: "END_STREAM", + FlagHeadersEndHeaders: "END_HEADERS", + FlagHeadersPadded: "PADDED", + FlagHeadersPriority: "PRIORITY", + }, + FrameSettings: { + FlagSettingsAck: "ACK", + }, + FramePing: { + FlagPingAck: "ACK", + }, + FrameContinuation: { + FlagContinuationEndHeaders: "END_HEADERS", + }, + FramePushPromise: { + FlagPushPromiseEndHeaders: "END_HEADERS", + FlagPushPromisePadded: "PADDED", + }, +} + +// a frameParser parses a frame given its FrameHeader and payload +// bytes. The length of payload will always equal fh.Length (which +// might be 0). +type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) + +var frameParsers = map[FrameType]frameParser{ + FrameData: parseDataFrame, + FrameHeaders: parseHeadersFrame, + FramePriority: parsePriorityFrame, + FrameRSTStream: parseRSTStreamFrame, + FrameSettings: parseSettingsFrame, + FramePushPromise: parsePushPromise, + FramePing: parsePingFrame, + FrameGoAway: parseGoAwayFrame, + FrameWindowUpdate: parseWindowUpdateFrame, + FrameContinuation: parseContinuationFrame, +} + +func typeFrameParser(t FrameType) frameParser { + if f := frameParsers[t]; f != nil { + return f + } + return parseUnknownFrame +} + +// A FrameHeader is the 9 byte header of all HTTP/2 frames. +// +// See http://http2.github.io/http2-spec/#FrameHeader +type FrameHeader struct { + valid bool // caller can access []byte fields in the Frame + + // Type is the 1 byte frame type. There are ten standard frame + // types, but extension frame types may be written by WriteRawFrame + // and will be returned by ReadFrame (as UnknownFrame). + Type FrameType + + // Flags are the 1 byte of 8 potential bit flags per frame. + // They are specific to the frame type. + Flags Flags + + // Length is the length of the frame, not including the 9 byte header. + // The maximum size is one byte less than 16MB (uint24), but only + // frames up to 16KB are allowed without peer agreement. + Length uint32 + + // StreamID is which stream this frame is for. Certain frames + // are not stream-specific, in which case this field is 0. + StreamID uint32 +} + +// Header returns h. It exists so FrameHeaders can be embedded in other +// specific frame types and implement the Frame interface. +func (h FrameHeader) Header() FrameHeader { return h } + +func (h FrameHeader) String() string { + var buf bytes.Buffer + buf.WriteString("[FrameHeader ") + h.writeDebug(&buf) + buf.WriteByte(']') + return buf.String() +} + +func (h FrameHeader) writeDebug(buf *bytes.Buffer) { + buf.WriteString(h.Type.String()) + if h.Flags != 0 { + buf.WriteString(" flags=") + set := 0 + for i := uint8(0); i < 8; i++ { + if h.Flags&(1< 1 { + buf.WriteByte('|') + } + name := flagName[h.Type][Flags(1<>24), + byte(streamID>>16), + byte(streamID>>8), + byte(streamID)) +} + +func (f *Framer) endWrite() error { + // Now that we know the final size, fill in the FrameHeader in + // the space previously reserved for it. Abuse append. + length := len(f.wbuf) - frameHeaderLen + if length >= (1 << 24) { + return ErrFrameTooLarge + } + _ = append(f.wbuf[:0], + byte(length>>16), + byte(length>>8), + byte(length)) + if f.logWrites { + f.logWrite() + } + + n, err := f.w.Write(f.wbuf) + if err == nil && n != len(f.wbuf) { + err = io.ErrShortWrite + } + return err +} + +func (f *Framer) logWrite() { + if f.debugFramer == nil { + f.debugFramerBuf = new(bytes.Buffer) + f.debugFramer = NewFramer(nil, f.debugFramerBuf) + f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below + // Let us read anything, even if we accidentally wrote it + // in the wrong order: + f.debugFramer.AllowIllegalReads = true + } + f.debugFramerBuf.Write(f.wbuf) + fr, err := f.debugFramer.ReadFrame() + if err != nil { + f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f) + return + } + f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) +} + +func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } +func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) } +func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) } +func (f *Framer) writeUint32(v uint32) { + f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +const ( + minMaxFrameSize = 1 << 14 + maxFrameSize = 1<<24 - 1 +) + +// SetReuseFrames allows the Framer to reuse Frames. +// If called on a Framer, Frames returned by calls to ReadFrame are only +// valid until the next call to ReadFrame. +func (fr *Framer) SetReuseFrames() { + if fr.frameCache != nil { + return + } + fr.frameCache = &frameCache{} +} + +type frameCache struct { + dataFrame DataFrame +} + +func (fc *frameCache) getDataFrame() *DataFrame { + if fc == nil { + return &DataFrame{} + } + return &fc.dataFrame +} + +// NewFramer returns a Framer that writes frames to w and reads them from r. +func NewFramer(w io.Writer, r io.Reader) *Framer { + fr := &Framer{ + w: w, + r: r, + logReads: logFrameReads, + logWrites: logFrameWrites, + debugReadLoggerf: log.Printf, + debugWriteLoggerf: log.Printf, + } + fr.getReadBuf = func(size uint32) []byte { + if cap(fr.readBuf) >= int(size) { + return fr.readBuf[:size] + } + fr.readBuf = make([]byte, size) + return fr.readBuf + } + fr.SetMaxReadFrameSize(maxFrameSize) + return fr +} + +// SetMaxReadFrameSize sets the maximum size of a frame +// that will be read by a subsequent call to ReadFrame. +// It is the caller's responsibility to advertise this +// limit with a SETTINGS frame. +func (fr *Framer) SetMaxReadFrameSize(v uint32) { + if v > maxFrameSize { + v = maxFrameSize + } + fr.maxReadSize = v +} + +// ErrorDetail returns a more detailed error of the last error +// returned by Framer.ReadFrame. For instance, if ReadFrame +// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail +// will say exactly what was invalid. ErrorDetail is not guaranteed +// to return a non-nil value and like the rest of the http2 package, +// its return value is not protected by an API compatibility promise. +// ErrorDetail is reset after the next call to ReadFrame. +func (fr *Framer) ErrorDetail() error { + return fr.errDetail +} + +// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer +// sends a frame that is larger than declared with SetMaxReadFrameSize. +var ErrFrameTooLarge = errors.New("http2: frame too large") + +// terminalReadFrameError reports whether err is an unrecoverable +// error from ReadFrame and no other frames should be read. +func terminalReadFrameError(err error) bool { + if _, ok := err.(StreamError); ok { + return false + } + return err != nil +} + +// ReadFrame reads a single frame. The returned Frame is only valid +// until the next call to ReadFrame. +// +// If the frame is larger than previously set with SetMaxReadFrameSize, the +// returned error is ErrFrameTooLarge. Other errors may be of type +// ConnectionError, StreamError, or anything else from the underlying +// reader. +func (fr *Framer) ReadFrame() (Frame, error) { + fr.errDetail = nil + if fr.lastFrame != nil { + fr.lastFrame.invalidate() + } + fh, err := readFrameHeader(fr.headerBuf[:], fr.r) + if err != nil { + return nil, err + } + if fh.Length > fr.maxReadSize { + return nil, ErrFrameTooLarge + } + payload := fr.getReadBuf(fh.Length) + if _, err := io.ReadFull(fr.r, payload); err != nil { + return nil, err + } + f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload) + if err != nil { + if ce, ok := err.(connError); ok { + return nil, fr.connError(ce.Code, ce.Reason) + } + return nil, err + } + if err := fr.checkFrameOrder(f); err != nil { + return nil, err + } + if fr.logReads { + fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) + } + if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { + return fr.readMetaFrame(f.(*HeadersFrame)) + } + return f, nil +} + +// connError returns ConnectionError(code) but first +// stashes away a public reason to the caller can optionally relay it +// to the peer before hanging up on them. This might help others debug +// their implementations. +func (fr *Framer) connError(code ErrCode, reason string) error { + fr.errDetail = errors.New(reason) + return ConnectionError(code) +} + +// checkFrameOrder reports an error if f is an invalid frame to return +// next from ReadFrame. Mostly it checks whether HEADERS and +// CONTINUATION frames are contiguous. +func (fr *Framer) checkFrameOrder(f Frame) error { + last := fr.lastFrame + fr.lastFrame = f + if fr.AllowIllegalReads { + return nil + } + + fh := f.Header() + if fr.lastHeaderStream != 0 { + if fh.Type != FrameContinuation { + return fr.connError(ErrCodeProtocol, + fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", + fh.Type, fh.StreamID, + last.Header().Type, fr.lastHeaderStream)) + } + if fh.StreamID != fr.lastHeaderStream { + return fr.connError(ErrCodeProtocol, + fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d", + fh.StreamID, fr.lastHeaderStream)) + } + } else if fh.Type == FrameContinuation { + return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID)) + } + + switch fh.Type { + case FrameHeaders, FrameContinuation: + if fh.Flags.Has(FlagHeadersEndHeaders) { + fr.lastHeaderStream = 0 + } else { + fr.lastHeaderStream = fh.StreamID + } + } + + return nil +} + +// A DataFrame conveys arbitrary, variable-length sequences of octets +// associated with a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.1 +type DataFrame struct { + FrameHeader + data []byte +} + +func (f *DataFrame) StreamEnded() bool { + return f.FrameHeader.Flags.Has(FlagDataEndStream) +} + +// Data returns the frame's data octets, not including any padding +// size byte or padding suffix bytes. +// The caller must not retain the returned memory past the next +// call to ReadFrame. +func (f *DataFrame) Data() []byte { + f.checkValid() + return f.data +} + +func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) { + if fh.StreamID == 0 { + // DATA frames MUST be associated with a stream. If a + // DATA frame is received whose stream identifier + // field is 0x0, the recipient MUST respond with a + // connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} + } + f := fc.getDataFrame() + f.FrameHeader = fh + + var padSize byte + if fh.Flags.Has(FlagDataPadded) { + var err error + payload, padSize, err = readByte(payload) + if err != nil { + return nil, err + } + } + if int(padSize) > len(payload) { + // If the length of the padding is greater than the + // length of the frame payload, the recipient MUST + // treat this as a connection error. + // Filed: https://github.com/http2/http2-spec/issues/610 + return nil, connError{ErrCodeProtocol, "pad size larger than data payload"} + } + f.data = payload[:len(payload)-int(padSize)] + return f, nil +} + +var ( + errStreamID = errors.New("invalid stream ID") + errDepStreamID = errors.New("invalid dependent stream ID") + errPadLength = errors.New("pad length too large") + errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled") +) + +func validStreamIDOrZero(streamID uint32) bool { + return streamID&(1<<31) == 0 +} + +func validStreamID(streamID uint32) bool { + return streamID != 0 && streamID&(1<<31) == 0 +} + +// WriteData writes a DATA frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility not to violate the maximum frame size +// and to not call other Write methods concurrently. +func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { + return f.WriteDataPadded(streamID, endStream, data, nil) +} + +// WriteData writes a DATA frame with optional padding. +// +// If pad is nil, the padding bit is not sent. +// The length of pad must not exceed 255 bytes. +// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility not to violate the maximum frame size +// and to not call other Write methods concurrently. +func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + if len(pad) > 0 { + if len(pad) > 255 { + return errPadLength + } + if !f.AllowIllegalWrites { + for _, b := range pad { + if b != 0 { + // "Padding octets MUST be set to zero when sending." + return errPadBytes + } + } + } + } + var flags Flags + if endStream { + flags |= FlagDataEndStream + } + if pad != nil { + flags |= FlagDataPadded + } + f.startWrite(FrameData, flags, streamID) + if pad != nil { + f.wbuf = append(f.wbuf, byte(len(pad))) + } + f.wbuf = append(f.wbuf, data...) + f.wbuf = append(f.wbuf, pad...) + return f.endWrite() +} + +// A SettingsFrame conveys configuration parameters that affect how +// endpoints communicate, such as preferences and constraints on peer +// behavior. +// +// See http://http2.github.io/http2-spec/#SETTINGS +type SettingsFrame struct { + FrameHeader + p []byte +} + +func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { + // When this (ACK 0x1) bit is set, the payload of the + // SETTINGS frame MUST be empty. Receipt of a + // SETTINGS frame with the ACK flag set and a length + // field value other than 0 MUST be treated as a + // connection error (Section 5.4.1) of type + // FRAME_SIZE_ERROR. + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID != 0 { + // SETTINGS frames always apply to a connection, + // never a single stream. The stream identifier for a + // SETTINGS frame MUST be zero (0x0). If an endpoint + // receives a SETTINGS frame whose stream identifier + // field is anything other than 0x0, the endpoint MUST + // respond with a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR. + return nil, ConnectionError(ErrCodeProtocol) + } + if len(p)%6 != 0 { + // Expecting even number of 6 byte settings. + return nil, ConnectionError(ErrCodeFrameSize) + } + f := &SettingsFrame{FrameHeader: fh, p: p} + if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 { + // Values above the maximum flow control window size of 2^31 - 1 MUST + // be treated as a connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + return nil, ConnectionError(ErrCodeFlowControl) + } + return f, nil +} + +func (f *SettingsFrame) IsAck() bool { + return f.FrameHeader.Flags.Has(FlagSettingsAck) +} + +func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) { + f.checkValid() + buf := f.p + for len(buf) > 0 { + settingID := SettingID(binary.BigEndian.Uint16(buf[:2])) + if settingID == s { + return binary.BigEndian.Uint32(buf[2:6]), true + } + buf = buf[6:] + } + return 0, false +} + +// ForeachSetting runs fn for each setting. +// It stops and returns the first error. +func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error { + f.checkValid() + buf := f.p + for len(buf) > 0 { + if err := fn(Setting{ + SettingID(binary.BigEndian.Uint16(buf[:2])), + binary.BigEndian.Uint32(buf[2:6]), + }); err != nil { + return err + } + buf = buf[6:] + } + return nil +} + +// WriteSettings writes a SETTINGS frame with zero or more settings +// specified and the ACK bit not set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteSettings(settings ...Setting) error { + f.startWrite(FrameSettings, 0, 0) + for _, s := range settings { + f.writeUint16(uint16(s.ID)) + f.writeUint32(s.Val) + } + return f.endWrite() +} + +// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteSettingsAck() error { + f.startWrite(FrameSettings, FlagSettingsAck, 0) + return f.endWrite() +} + +// A PingFrame is a mechanism for measuring a minimal round trip time +// from the sender, as well as determining whether an idle connection +// is still functional. +// See http://http2.github.io/http2-spec/#rfc.section.6.7 +type PingFrame struct { + FrameHeader + Data [8]byte +} + +func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } + +func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { + if len(payload) != 8 { + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID != 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + f := &PingFrame{FrameHeader: fh} + copy(f.Data[:], payload) + return f, nil +} + +func (f *Framer) WritePing(ack bool, data [8]byte) error { + var flags Flags + if ack { + flags = FlagPingAck + } + f.startWrite(FramePing, flags, 0) + f.writeBytes(data[:]) + return f.endWrite() +} + +// A GoAwayFrame informs the remote peer to stop creating streams on this connection. +// See http://http2.github.io/http2-spec/#rfc.section.6.8 +type GoAwayFrame struct { + FrameHeader + LastStreamID uint32 + ErrCode ErrCode + debugData []byte +} + +// DebugData returns any debug data in the GOAWAY frame. Its contents +// are not defined. +// The caller must not retain the returned memory past the next +// call to ReadFrame. +func (f *GoAwayFrame) DebugData() []byte { + f.checkValid() + return f.debugData +} + +func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if fh.StreamID != 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + if len(p) < 8 { + return nil, ConnectionError(ErrCodeFrameSize) + } + return &GoAwayFrame{ + FrameHeader: fh, + LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1), + ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])), + debugData: p[8:], + }, nil +} + +func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error { + f.startWrite(FrameGoAway, 0, 0) + f.writeUint32(maxStreamID & (1<<31 - 1)) + f.writeUint32(uint32(code)) + f.writeBytes(debugData) + return f.endWrite() +} + +// An UnknownFrame is the frame type returned when the frame type is unknown +// or no specific frame type parser exists. +type UnknownFrame struct { + FrameHeader + p []byte +} + +// Payload returns the frame's payload (after the header). It is not +// valid to call this method after a subsequent call to +// Framer.ReadFrame, nor is it valid to retain the returned slice. +// The memory is owned by the Framer and is invalidated when the next +// frame is read. +func (f *UnknownFrame) Payload() []byte { + f.checkValid() + return f.p +} + +func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + return &UnknownFrame{fh, p}, nil +} + +// A WindowUpdateFrame is used to implement flow control. +// See http://http2.github.io/http2-spec/#rfc.section.6.9 +type WindowUpdateFrame struct { + FrameHeader + Increment uint32 // never read with high bit set +} + +func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if len(p) != 4 { + return nil, ConnectionError(ErrCodeFrameSize) + } + inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit + if inc == 0 { + // A receiver MUST treat the receipt of a + // WINDOW_UPDATE frame with an flow control window + // increment of 0 as a stream error (Section 5.4.2) of + // type PROTOCOL_ERROR; errors on the connection flow + // control window MUST be treated as a connection + // error (Section 5.4.1). + if fh.StreamID == 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + return nil, streamError(fh.StreamID, ErrCodeProtocol) + } + return &WindowUpdateFrame{ + FrameHeader: fh, + Increment: inc, + }, nil +} + +// WriteWindowUpdate writes a WINDOW_UPDATE frame. +// The increment value must be between 1 and 2,147,483,647, inclusive. +// If the Stream ID is zero, the window update applies to the +// connection as a whole. +func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error { + // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets." + if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites { + return errors.New("illegal window increment value") + } + f.startWrite(FrameWindowUpdate, 0, streamID) + f.writeUint32(incr) + return f.endWrite() +} + +// A HeadersFrame is used to open a stream and additionally carries a +// header block fragment. +type HeadersFrame struct { + FrameHeader + + // Priority is set if FlagHeadersPriority is set in the FrameHeader. + Priority PriorityParam + + headerFragBuf []byte // not owned +} + +func (f *HeadersFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *HeadersFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders) +} + +func (f *HeadersFrame) StreamEnded() bool { + return f.FrameHeader.Flags.Has(FlagHeadersEndStream) +} + +func (f *HeadersFrame) HasPriority() bool { + return f.FrameHeader.Flags.Has(FlagHeadersPriority) +} + +func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { + hf := &HeadersFrame{ + FrameHeader: fh, + } + if fh.StreamID == 0 { + // HEADERS frames MUST be associated with a stream. If a HEADERS frame + // is received whose stream identifier field is 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"} + } + var padLength uint8 + if fh.Flags.Has(FlagHeadersPadded) { + if p, padLength, err = readByte(p); err != nil { + return + } + } + if fh.Flags.Has(FlagHeadersPriority) { + var v uint32 + p, v, err = readUint32(p) + if err != nil { + return nil, err + } + hf.Priority.StreamDep = v & 0x7fffffff + hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set + p, hf.Priority.Weight, err = readByte(p) + if err != nil { + return nil, err + } + } + if len(p)-int(padLength) <= 0 { + return nil, streamError(fh.StreamID, ErrCodeProtocol) + } + hf.headerFragBuf = p[:len(p)-int(padLength)] + return hf, nil +} + +// HeadersFrameParam are the parameters for writing a HEADERS frame. +type HeadersFrameParam struct { + // StreamID is the required Stream ID to initiate. + StreamID uint32 + // BlockFragment is part (or all) of a Header Block. + BlockFragment []byte + + // EndStream indicates that the header block is the last that + // the endpoint will send for the identified stream. Setting + // this flag causes the stream to enter one of "half closed" + // states. + EndStream bool + + // EndHeaders indicates that this frame contains an entire + // header block and is not followed by any + // CONTINUATION frames. + EndHeaders bool + + // PadLength is the optional number of bytes of zeros to add + // to this frame. + PadLength uint8 + + // Priority, if non-zero, includes stream priority information + // in the HEADER frame. + Priority PriorityParam +} + +// WriteHeaders writes a single HEADERS frame. +// +// This is a low-level header writing method. Encoding headers and +// splitting them into any necessary CONTINUATION frames is handled +// elsewhere. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteHeaders(p HeadersFrameParam) error { + if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if p.PadLength != 0 { + flags |= FlagHeadersPadded + } + if p.EndStream { + flags |= FlagHeadersEndStream + } + if p.EndHeaders { + flags |= FlagHeadersEndHeaders + } + if !p.Priority.IsZero() { + flags |= FlagHeadersPriority + } + f.startWrite(FrameHeaders, flags, p.StreamID) + if p.PadLength != 0 { + f.writeByte(p.PadLength) + } + if !p.Priority.IsZero() { + v := p.Priority.StreamDep + if !validStreamIDOrZero(v) && !f.AllowIllegalWrites { + return errDepStreamID + } + if p.Priority.Exclusive { + v |= 1 << 31 + } + f.writeUint32(v) + f.writeByte(p.Priority.Weight) + } + f.wbuf = append(f.wbuf, p.BlockFragment...) + f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) + return f.endWrite() +} + +// A PriorityFrame specifies the sender-advised priority of a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.3 +type PriorityFrame struct { + FrameHeader + PriorityParam +} + +// PriorityParam are the stream prioritzation parameters. +type PriorityParam struct { + // StreamDep is a 31-bit stream identifier for the + // stream that this stream depends on. Zero means no + // dependency. + StreamDep uint32 + + // Exclusive is whether the dependency is exclusive. + Exclusive bool + + // Weight is the stream's zero-indexed weight. It should be + // set together with StreamDep, or neither should be set. Per + // the spec, "Add one to the value to obtain a weight between + // 1 and 256." + Weight uint8 +} + +func (p PriorityParam) IsZero() bool { + return p == PriorityParam{} +} + +func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { + if fh.StreamID == 0 { + return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} + } + if len(payload) != 5 { + return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))} + } + v := binary.BigEndian.Uint32(payload[:4]) + streamID := v & 0x7fffffff // mask off high bit + return &PriorityFrame{ + FrameHeader: fh, + PriorityParam: PriorityParam{ + Weight: payload[4], + StreamDep: streamID, + Exclusive: streamID != v, // was high bit set? + }, + }, nil +} + +// WritePriority writes a PRIORITY frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + if !validStreamIDOrZero(p.StreamDep) { + return errDepStreamID + } + f.startWrite(FramePriority, 0, streamID) + v := p.StreamDep + if p.Exclusive { + v |= 1 << 31 + } + f.writeUint32(v) + f.writeByte(p.Weight) + return f.endWrite() +} + +// A RSTStreamFrame allows for abnormal termination of a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.4 +type RSTStreamFrame struct { + FrameHeader + ErrCode ErrCode +} + +func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if len(p) != 4 { + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID == 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil +} + +// WriteRSTStream writes a RST_STREAM frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + f.startWrite(FrameRSTStream, 0, streamID) + f.writeUint32(uint32(code)) + return f.endWrite() +} + +// A ContinuationFrame is used to continue a sequence of header block fragments. +// See http://http2.github.io/http2-spec/#rfc.section.6.10 +type ContinuationFrame struct { + FrameHeader + headerFragBuf []byte +} + +func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if fh.StreamID == 0 { + return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} + } + return &ContinuationFrame{fh, p}, nil +} + +func (f *ContinuationFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *ContinuationFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders) +} + +// WriteContinuation writes a CONTINUATION frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if endHeaders { + flags |= FlagContinuationEndHeaders + } + f.startWrite(FrameContinuation, flags, streamID) + f.wbuf = append(f.wbuf, headerBlockFragment...) + return f.endWrite() +} + +// A PushPromiseFrame is used to initiate a server stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.6 +type PushPromiseFrame struct { + FrameHeader + PromiseID uint32 + headerFragBuf []byte // not owned +} + +func (f *PushPromiseFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *PushPromiseFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) +} + +func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { + pp := &PushPromiseFrame{ + FrameHeader: fh, + } + if pp.StreamID == 0 { + // PUSH_PROMISE frames MUST be associated with an existing, + // peer-initiated stream. The stream identifier of a + // PUSH_PROMISE frame indicates the stream it is associated + // with. If the stream identifier field specifies the value + // 0x0, a recipient MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return nil, ConnectionError(ErrCodeProtocol) + } + // The PUSH_PROMISE frame includes optional padding. + // Padding fields and flags are identical to those defined for DATA frames + var padLength uint8 + if fh.Flags.Has(FlagPushPromisePadded) { + if p, padLength, err = readByte(p); err != nil { + return + } + } + + p, pp.PromiseID, err = readUint32(p) + if err != nil { + return + } + pp.PromiseID = pp.PromiseID & (1<<31 - 1) + + if int(padLength) > len(p) { + // like the DATA frame, error out if padding is longer than the body. + return nil, ConnectionError(ErrCodeProtocol) + } + pp.headerFragBuf = p[:len(p)-int(padLength)] + return pp, nil +} + +// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame. +type PushPromiseParam struct { + // StreamID is the required Stream ID to initiate. + StreamID uint32 + + // PromiseID is the required Stream ID which this + // Push Promises + PromiseID uint32 + + // BlockFragment is part (or all) of a Header Block. + BlockFragment []byte + + // EndHeaders indicates that this frame contains an entire + // header block and is not followed by any + // CONTINUATION frames. + EndHeaders bool + + // PadLength is the optional number of bytes of zeros to add + // to this frame. + PadLength uint8 +} + +// WritePushPromise writes a single PushPromise Frame. +// +// As with Header Frames, This is the low level call for writing +// individual frames. Continuation frames are handled elsewhere. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WritePushPromise(p PushPromiseParam) error { + if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if p.PadLength != 0 { + flags |= FlagPushPromisePadded + } + if p.EndHeaders { + flags |= FlagPushPromiseEndHeaders + } + f.startWrite(FramePushPromise, flags, p.StreamID) + if p.PadLength != 0 { + f.writeByte(p.PadLength) + } + if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites { + return errStreamID + } + f.writeUint32(p.PromiseID) + f.wbuf = append(f.wbuf, p.BlockFragment...) + f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) + return f.endWrite() +} + +// WriteRawFrame writes a raw frame. This can be used to write +// extension frames unknown to this package. +func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error { + f.startWrite(t, flags, streamID) + f.writeBytes(payload) + return f.endWrite() +} + +func readByte(p []byte) (remain []byte, b byte, err error) { + if len(p) == 0 { + return nil, 0, io.ErrUnexpectedEOF + } + return p[1:], p[0], nil +} + +func readUint32(p []byte) (remain []byte, v uint32, err error) { + if len(p) < 4 { + return nil, 0, io.ErrUnexpectedEOF + } + return p[4:], binary.BigEndian.Uint32(p[:4]), nil +} + +type streamEnder interface { + StreamEnded() bool +} + +type headersEnder interface { + HeadersEnded() bool +} + +type headersOrContinuation interface { + headersEnder + HeaderBlockFragment() []byte +} + +// A MetaHeadersFrame is the representation of one HEADERS frame and +// zero or more contiguous CONTINUATION frames and the decoding of +// their HPACK-encoded contents. +// +// This type of frame does not appear on the wire and is only returned +// by the Framer when Framer.ReadMetaHeaders is set. +type MetaHeadersFrame struct { + *HeadersFrame + + // Fields are the fields contained in the HEADERS and + // CONTINUATION frames. The underlying slice is owned by the + // Framer and must not be retained after the next call to + // ReadFrame. + // + // Fields are guaranteed to be in the correct http2 order and + // not have unknown pseudo header fields or invalid header + // field names or values. Required pseudo header fields may be + // missing, however. Use the MetaHeadersFrame.Pseudo accessor + // method access pseudo headers. + Fields []hpack.HeaderField + + // Truncated is whether the max header list size limit was hit + // and Fields is incomplete. The hpack decoder state is still + // valid, however. + Truncated bool +} + +// PseudoValue returns the given pseudo header field's value. +// The provided pseudo field should not contain the leading colon. +func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string { + for _, hf := range mh.Fields { + if !hf.IsPseudo() { + return "" + } + if hf.Name[1:] == pseudo { + return hf.Value + } + } + return "" +} + +// RegularFields returns the regular (non-pseudo) header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[i:] + } + } + return nil +} + +// PseudoFields returns the pseudo header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[:i] + } + } + return mh.Fields +} + +func (mh *MetaHeadersFrame) checkPseudos() error { + var isRequest, isResponse bool + pf := mh.PseudoFields() + for i, hf := range pf { + switch hf.Name { + case ":method", ":path", ":scheme", ":authority": + isRequest = true + case ":status": + isResponse = true + default: + return pseudoHeaderError(hf.Name) + } + // Check for duplicates. + // This would be a bad algorithm, but N is 4. + // And this doesn't allocate. + for _, hf2 := range pf[:i] { + if hf.Name == hf2.Name { + return duplicatePseudoHeaderError(hf.Name) + } + } + } + if isRequest && isResponse { + return errMixPseudoHeaderTypes + } + return nil +} + +func (fr *Framer) maxHeaderStringLen() int { + v := fr.maxHeaderListSize() + if uint32(int(v)) == v { + return int(v) + } + // They had a crazy big number for MaxHeaderBytes anyway, + // so give them unlimited header lengths: + return 0 +} + +// readMetaFrame returns 0 or more CONTINUATION frames from fr and +// merge them into into the provided hf and returns a MetaHeadersFrame +// with the decoded hpack values. +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { + if fr.AllowIllegalReads { + return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") + } + mh := &MetaHeadersFrame{ + HeadersFrame: hf, + } + var remainSize = fr.maxHeaderListSize() + var sawRegular bool + + var invalid error // pseudo header field errors + hdec := fr.ReadMetaHeaders + hdec.SetEmitEnabled(true) + hdec.SetMaxStringLength(fr.maxHeaderStringLen()) + hdec.SetEmitFunc(func(hf hpack.HeaderField) { + if VerboseLogs && fr.logReads { + fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) + } + if !httplex.ValidHeaderFieldValue(hf.Value) { + invalid = headerFieldValueError(hf.Value) + } + isPseudo := strings.HasPrefix(hf.Name, ":") + if isPseudo { + if sawRegular { + invalid = errPseudoAfterRegular + } + } else { + sawRegular = true + if !validWireHeaderFieldName(hf.Name) { + invalid = headerFieldNameError(hf.Name) + } + } + + if invalid != nil { + hdec.SetEmitEnabled(false) + return + } + + size := hf.Size() + if size > remainSize { + hdec.SetEmitEnabled(false) + mh.Truncated = true + return + } + remainSize -= size + + mh.Fields = append(mh.Fields, hf) + }) + // Lose reference to MetaHeadersFrame: + defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {}) + + var hc headersOrContinuation = hf + for { + frag := hc.HeaderBlockFragment() + if _, err := hdec.Write(frag); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + + if hc.HeadersEnded() { + break + } + if f, err := fr.ReadFrame(); err != nil { + return nil, err + } else { + hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder + } + } + + mh.HeadersFrame.headerFragBuf = nil + mh.HeadersFrame.invalidate() + + if err := hdec.Close(); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + if invalid != nil { + fr.errDetail = invalid + if VerboseLogs { + log.Printf("http2: invalid header: %v", invalid) + } + return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid} + } + if err := mh.checkPseudos(); err != nil { + fr.errDetail = err + if VerboseLogs { + log.Printf("http2: invalid pseudo headers: %v", err) + } + return nil, StreamError{mh.StreamID, ErrCodeProtocol, err} + } + return mh, nil +} + +func summarizeFrame(f Frame) string { + var buf bytes.Buffer + f.Header().writeDebug(&buf) + switch f := f.(type) { + case *SettingsFrame: + n := 0 + f.ForeachSetting(func(s Setting) error { + n++ + if n == 1 { + buf.WriteString(", settings:") + } + fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val) + return nil + }) + if n > 0 { + buf.Truncate(buf.Len() - 1) // remove trailing comma + } + case *DataFrame: + data := f.Data() + const max = 256 + if len(data) > max { + data = data[:max] + } + fmt.Fprintf(&buf, " data=%q", data) + if len(f.Data()) > max { + fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max) + } + case *WindowUpdateFrame: + if f.StreamID == 0 { + buf.WriteString(" (conn)") + } + fmt.Fprintf(&buf, " incr=%v", f.Increment) + case *PingFrame: + fmt.Fprintf(&buf, " ping=%q", f.Data[:]) + case *GoAwayFrame: + fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q", + f.LastStreamID, f.ErrCode, f.debugData) + case *RSTStreamFrame: + fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode) + } + return buf.String() +} diff --git a/vendor/golang.org/x/net/http2/go16.go b/vendor/golang.org/x/net/http2/go16.go new file mode 100644 index 00000000000..00b2e9e3cf3 --- /dev/null +++ b/vendor/golang.org/x/net/http2/go16.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.6 + +package http2 + +import ( + "net/http" + "time" +) + +func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { + return t1.ExpectContinueTimeout +} diff --git a/vendor/golang.org/x/net/http2/go17.go b/vendor/golang.org/x/net/http2/go17.go new file mode 100644 index 00000000000..47b7fae081a --- /dev/null +++ b/vendor/golang.org/x/net/http2/go17.go @@ -0,0 +1,106 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package http2 + +import ( + "context" + "net" + "net/http" + "net/http/httptrace" + "time" +) + +type contextContext interface { + context.Context +} + +func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { + ctx, cancel = context.WithCancel(context.Background()) + ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) + if hs := opts.baseConfig(); hs != nil { + ctx = context.WithValue(ctx, http.ServerContextKey, hs) + } + return +} + +func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { + return context.WithCancel(ctx) +} + +func requestWithContext(req *http.Request, ctx contextContext) *http.Request { + return req.WithContext(ctx) +} + +type clientTrace httptrace.ClientTrace + +func reqContext(r *http.Request) context.Context { return r.Context() } + +func (t *Transport) idleConnTimeout() time.Duration { + if t.t1 != nil { + return t.t1.IdleConnTimeout + } + return 0 +} + +func setResponseUncompressed(res *http.Response) { res.Uncompressed = true } + +func traceGotConn(req *http.Request, cc *ClientConn) { + trace := httptrace.ContextClientTrace(req.Context()) + if trace == nil || trace.GotConn == nil { + return + } + ci := httptrace.GotConnInfo{Conn: cc.tconn} + cc.mu.Lock() + ci.Reused = cc.nextStreamID > 1 + ci.WasIdle = len(cc.streams) == 0 && ci.Reused + if ci.WasIdle && !cc.lastActive.IsZero() { + ci.IdleTime = time.Now().Sub(cc.lastActive) + } + cc.mu.Unlock() + + trace.GotConn(ci) +} + +func traceWroteHeaders(trace *clientTrace) { + if trace != nil && trace.WroteHeaders != nil { + trace.WroteHeaders() + } +} + +func traceGot100Continue(trace *clientTrace) { + if trace != nil && trace.Got100Continue != nil { + trace.Got100Continue() + } +} + +func traceWait100Continue(trace *clientTrace) { + if trace != nil && trace.Wait100Continue != nil { + trace.Wait100Continue() + } +} + +func traceWroteRequest(trace *clientTrace, err error) { + if trace != nil && trace.WroteRequest != nil { + trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) + } +} + +func traceFirstResponseByte(trace *clientTrace) { + if trace != nil && trace.GotFirstResponseByte != nil { + trace.GotFirstResponseByte() + } +} + +func requestTrace(req *http.Request) *clientTrace { + trace := httptrace.ContextClientTrace(req.Context()) + return (*clientTrace)(trace) +} + +// Ping sends a PING frame to the server and waits for the ack. +func (cc *ClientConn) Ping(ctx context.Context) error { + return cc.ping(ctx) +} diff --git a/vendor/golang.org/x/net/http2/go17_not18.go b/vendor/golang.org/x/net/http2/go17_not18.go new file mode 100644 index 00000000000..b4c52ecec3b --- /dev/null +++ b/vendor/golang.org/x/net/http2/go17_not18.go @@ -0,0 +1,36 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7,!go1.8 + +package http2 + +import "crypto/tls" + +// temporary copy of Go 1.7's private tls.Config.clone: +func cloneTLSConfig(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, + Renegotiation: c.Renegotiation, + } +} diff --git a/vendor/golang.org/x/net/http2/go18.go b/vendor/golang.org/x/net/http2/go18.go new file mode 100644 index 00000000000..4f30d228a88 --- /dev/null +++ b/vendor/golang.org/x/net/http2/go18.go @@ -0,0 +1,56 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package http2 + +import ( + "crypto/tls" + "io" + "net/http" +) + +func cloneTLSConfig(c *tls.Config) *tls.Config { + c2 := c.Clone() + c2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264 + return c2 +} + +var _ http.Pusher = (*responseWriter)(nil) + +// Push implements http.Pusher. +func (w *responseWriter) Push(target string, opts *http.PushOptions) error { + internalOpts := pushOptions{} + if opts != nil { + internalOpts.Method = opts.Method + internalOpts.Header = opts.Header + } + return w.push(target, internalOpts) +} + +func configureServer18(h1 *http.Server, h2 *Server) error { + if h2.IdleTimeout == 0 { + if h1.IdleTimeout != 0 { + h2.IdleTimeout = h1.IdleTimeout + } else { + h2.IdleTimeout = h1.ReadTimeout + } + } + return nil +} + +func shouldLogPanic(panicValue interface{}) bool { + return panicValue != nil && panicValue != http.ErrAbortHandler +} + +func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { + return req.GetBody +} + +func reqBodyIsNoBody(body io.ReadCloser) bool { + return body == http.NoBody +} + +func go18httpNoBody() io.ReadCloser { return http.NoBody } // for tests only diff --git a/vendor/golang.org/x/net/http2/go19.go b/vendor/golang.org/x/net/http2/go19.go new file mode 100644 index 00000000000..38124ba56e2 --- /dev/null +++ b/vendor/golang.org/x/net/http2/go19.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package http2 + +import ( + "net/http" +) + +func configureServer19(s *http.Server, conf *Server) error { + s.RegisterOnShutdown(conf.state.startGracefulShutdown) + return nil +} diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go new file mode 100644 index 00000000000..9933c9f8c74 --- /dev/null +++ b/vendor/golang.org/x/net/http2/gotrack.go @@ -0,0 +1,170 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Defensive debug-only utility to track that functions run on the +// goroutine that they're supposed to. + +package http2 + +import ( + "bytes" + "errors" + "fmt" + "os" + "runtime" + "strconv" + "sync" +) + +var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" + +type goroutineLock uint64 + +func newGoroutineLock() goroutineLock { + if !DebugGoroutines { + return 0 + } + return goroutineLock(curGoroutineID()) +} + +func (g goroutineLock) check() { + if !DebugGoroutines { + return + } + if curGoroutineID() != uint64(g) { + panic("running on the wrong goroutine") + } +} + +func (g goroutineLock) checkNotOn() { + if !DebugGoroutines { + return + } + if curGoroutineID() == uint64(g) { + panic("running on the wrong goroutine") + } +} + +var goroutineSpace = []byte("goroutine ") + +func curGoroutineID() uint64 { + bp := littleBuf.Get().(*[]byte) + defer littleBuf.Put(bp) + b := *bp + b = b[:runtime.Stack(b, false)] + // Parse the 4707 out of "goroutine 4707 [" + b = bytes.TrimPrefix(b, goroutineSpace) + i := bytes.IndexByte(b, ' ') + if i < 0 { + panic(fmt.Sprintf("No space found in %q", b)) + } + b = b[:i] + n, err := parseUintBytes(b, 10, 64) + if err != nil { + panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) + } + return n +} + +var littleBuf = sync.Pool{ + New: func() interface{} { + buf := make([]byte, 64) + return &buf + }, +} + +// parseUintBytes is like strconv.ParseUint, but using a []byte. +func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { + var cutoff, maxVal uint64 + + if bitSize == 0 { + bitSize = int(strconv.IntSize) + } + + s0 := s + switch { + case len(s) < 1: + err = strconv.ErrSyntax + goto Error + + case 2 <= base && base <= 36: + // valid base; nothing to do + + case base == 0: + // Look for octal, hex prefix. + switch { + case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): + base = 16 + s = s[2:] + if len(s) < 1 { + err = strconv.ErrSyntax + goto Error + } + case s[0] == '0': + base = 8 + default: + base = 10 + } + + default: + err = errors.New("invalid base " + strconv.Itoa(base)) + goto Error + } + + n = 0 + cutoff = cutoff64(base) + maxVal = 1<= base { + n = 0 + err = strconv.ErrSyntax + goto Error + } + + if n >= cutoff { + // n*base overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n *= uint64(base) + + n1 := n + uint64(v) + if n1 < n || n1 > maxVal { + // n+v overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n = n1 + } + + return n, nil + +Error: + return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} +} + +// Return the first number n such that n*base >= 1<<64. +func cutoff64(base int) uint64 { + if base < 2 { + return 0 + } + return (1<<64-1)/uint64(base) + 1 +} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go new file mode 100644 index 00000000000..c2805f6ac4d --- /dev/null +++ b/vendor/golang.org/x/net/http2/headermap.go @@ -0,0 +1,78 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "net/http" + "strings" +) + +var ( + commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case + commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case +) + +func init() { + for _, v := range []string{ + "accept", + "accept-charset", + "accept-encoding", + "accept-language", + "accept-ranges", + "age", + "access-control-allow-origin", + "allow", + "authorization", + "cache-control", + "content-disposition", + "content-encoding", + "content-language", + "content-length", + "content-location", + "content-range", + "content-type", + "cookie", + "date", + "etag", + "expect", + "expires", + "from", + "host", + "if-match", + "if-modified-since", + "if-none-match", + "if-unmodified-since", + "last-modified", + "link", + "location", + "max-forwards", + "proxy-authenticate", + "proxy-authorization", + "range", + "referer", + "refresh", + "retry-after", + "server", + "set-cookie", + "strict-transport-security", + "trailer", + "transfer-encoding", + "user-agent", + "vary", + "via", + "www-authenticate", + } { + chk := http.CanonicalHeaderKey(v) + commonLowerHeader[chk] = v + commonCanonHeader[v] = chk + } +} + +func lowerHeader(v string) string { + if s, ok := commonLowerHeader[v]; ok { + return s + } + return strings.ToLower(v) +} diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go new file mode 100644 index 00000000000..54726c2a3c5 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -0,0 +1,240 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "io" +) + +const ( + uint32Max = ^uint32(0) + initialHeaderTableSize = 4096 +) + +type Encoder struct { + dynTab dynamicTable + // minSize is the minimum table size set by + // SetMaxDynamicTableSize after the previous Header Table Size + // Update. + minSize uint32 + // maxSizeLimit is the maximum table size this encoder + // supports. This will protect the encoder from too large + // size. + maxSizeLimit uint32 + // tableSizeUpdate indicates whether "Header Table Size + // Update" is required. + tableSizeUpdate bool + w io.Writer + buf []byte +} + +// NewEncoder returns a new Encoder which performs HPACK encoding. An +// encoded data is written to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{ + minSize: uint32Max, + maxSizeLimit: initialHeaderTableSize, + tableSizeUpdate: false, + w: w, + } + e.dynTab.table.init() + e.dynTab.setMaxSize(initialHeaderTableSize) + return e +} + +// WriteField encodes f into a single Write to e's underlying Writer. +// This function may also produce bytes for "Header Table Size Update" +// if necessary. If produced, it is done before encoding f. +func (e *Encoder) WriteField(f HeaderField) error { + e.buf = e.buf[:0] + + if e.tableSizeUpdate { + e.tableSizeUpdate = false + if e.minSize < e.dynTab.maxSize { + e.buf = appendTableSize(e.buf, e.minSize) + } + e.minSize = uint32Max + e.buf = appendTableSize(e.buf, e.dynTab.maxSize) + } + + idx, nameValueMatch := e.searchTable(f) + if nameValueMatch { + e.buf = appendIndexed(e.buf, idx) + } else { + indexing := e.shouldIndex(f) + if indexing { + e.dynTab.add(f) + } + + if idx == 0 { + e.buf = appendNewName(e.buf, f, indexing) + } else { + e.buf = appendIndexedName(e.buf, f, idx, indexing) + } + } + n, err := e.w.Write(e.buf) + if err == nil && n != len(e.buf) { + err = io.ErrShortWrite + } + return err +} + +// searchTable searches f in both stable and dynamic header tables. +// The static header table is searched first. Only when there is no +// exact match for both name and value, the dynamic header table is +// then searched. If there is no match, i is 0. If both name and value +// match, i is the matched index and nameValueMatch becomes true. If +// only name matches, i points to that index and nameValueMatch +// becomes false. +func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { + i, nameValueMatch = staticTable.search(f) + if nameValueMatch { + return i, true + } + + j, nameValueMatch := e.dynTab.table.search(f) + if nameValueMatch || (i == 0 && j != 0) { + return j + uint64(staticTable.len()), nameValueMatch + } + + return i, false +} + +// SetMaxDynamicTableSize changes the dynamic header table size to v. +// The actual size is bounded by the value passed to +// SetMaxDynamicTableSizeLimit. +func (e *Encoder) SetMaxDynamicTableSize(v uint32) { + if v > e.maxSizeLimit { + v = e.maxSizeLimit + } + if v < e.minSize { + e.minSize = v + } + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) +} + +// SetMaxDynamicTableSizeLimit changes the maximum value that can be +// specified in SetMaxDynamicTableSize to v. By default, it is set to +// 4096, which is the same size of the default dynamic header table +// size described in HPACK specification. If the current maximum +// dynamic header table size is strictly greater than v, "Header Table +// Size Update" will be done in the next WriteField call and the +// maximum dynamic header table size is truncated to v. +func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { + e.maxSizeLimit = v + if e.dynTab.maxSize > v { + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) + } +} + +// shouldIndex reports whether f should be indexed. +func (e *Encoder) shouldIndex(f HeaderField) bool { + return !f.Sensitive && f.Size() <= e.dynTab.maxSize +} + +// appendIndexed appends index i, as encoded in "Indexed Header Field" +// representation, to dst and returns the extended buffer. +func appendIndexed(dst []byte, i uint64) []byte { + first := len(dst) + dst = appendVarInt(dst, 7, i) + dst[first] |= 0x80 + return dst +} + +// appendNewName appends f, as encoded in one of "Literal Header field +// - New Name" representation variants, to dst and returns the +// extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Inremental Indexing" +// representation is used. +func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { + dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) + dst = appendHpackString(dst, f.Name) + return appendHpackString(dst, f.Value) +} + +// appendIndexedName appends f and index i referring indexed name +// entry, as encoded in one of "Literal Header field - Indexed Name" +// representation variants, to dst and returns the extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Incremental Indexing" +// representation is used. +func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { + first := len(dst) + var n byte + if indexing { + n = 6 + } else { + n = 4 + } + dst = appendVarInt(dst, n, i) + dst[first] |= encodeTypeByte(indexing, f.Sensitive) + return appendHpackString(dst, f.Value) +} + +// appendTableSize appends v, as encoded in "Header Table Size Update" +// representation, to dst and returns the extended buffer. +func appendTableSize(dst []byte, v uint32) []byte { + first := len(dst) + dst = appendVarInt(dst, 5, uint64(v)) + dst[first] |= 0x20 + return dst +} + +// appendVarInt appends i, as encoded in variable integer form using n +// bit prefix, to dst and returns the extended buffer. +// +// See +// http://http2.github.io/http2-spec/compression.html#integer.representation +func appendVarInt(dst []byte, n byte, i uint64) []byte { + k := uint64((1 << n) - 1) + if i < k { + return append(dst, byte(i)) + } + dst = append(dst, byte(k)) + i -= k + for ; i >= 128; i >>= 7 { + dst = append(dst, byte(0x80|(i&0x7f))) + } + return append(dst, byte(i)) +} + +// appendHpackString appends s, as encoded in "String Literal" +// representation, to dst and returns the the extended buffer. +// +// s will be encoded in Huffman codes only when it produces strictly +// shorter byte string. +func appendHpackString(dst []byte, s string) []byte { + huffmanLength := HuffmanEncodeLength(s) + if huffmanLength < uint64(len(s)) { + first := len(dst) + dst = appendVarInt(dst, 7, huffmanLength) + dst = AppendHuffmanString(dst, s) + dst[first] |= 0x80 + } else { + dst = appendVarInt(dst, 7, uint64(len(s))) + dst = append(dst, s...) + } + return dst +} + +// encodeTypeByte returns type byte. If sensitive is true, type byte +// for "Never Indexed" representation is returned. If sensitive is +// false and indexing is true, type byte for "Incremental Indexing" +// representation is returned. Otherwise, type byte for "Without +// Indexing" is returned. +func encodeTypeByte(indexing, sensitive bool) byte { + if sensitive { + return 0x10 + } + if indexing { + return 0x40 + } + return 0 +} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go new file mode 100644 index 00000000000..176644acdac --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -0,0 +1,490 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hpack implements HPACK, a compression format for +// efficiently representing HTTP header fields in the context of HTTP/2. +// +// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 +package hpack + +import ( + "bytes" + "errors" + "fmt" +) + +// A DecodingError is something the spec defines as a decoding error. +type DecodingError struct { + Err error +} + +func (de DecodingError) Error() string { + return fmt.Sprintf("decoding error: %v", de.Err) +} + +// An InvalidIndexError is returned when an encoder references a table +// entry before the static table or after the end of the dynamic table. +type InvalidIndexError int + +func (e InvalidIndexError) Error() string { + return fmt.Sprintf("invalid indexed representation index %d", int(e)) +} + +// A HeaderField is a name-value pair. Both the name and value are +// treated as opaque sequences of octets. +type HeaderField struct { + Name, Value string + + // Sensitive means that this header field should never be + // indexed. + Sensitive bool +} + +// IsPseudo reports whether the header field is an http2 pseudo header. +// That is, it reports whether it starts with a colon. +// It is not otherwise guaranteed to be a valid pseudo header field, +// though. +func (hf HeaderField) IsPseudo() bool { + return len(hf.Name) != 0 && hf.Name[0] == ':' +} + +func (hf HeaderField) String() string { + var suffix string + if hf.Sensitive { + suffix = " (sensitive)" + } + return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) +} + +// Size returns the size of an entry per RFC 7541 section 4.1. +func (hf HeaderField) Size() uint32 { + // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 + // "The size of the dynamic table is the sum of the size of + // its entries. The size of an entry is the sum of its name's + // length in octets (as defined in Section 5.2), its value's + // length in octets (see Section 5.2), plus 32. The size of + // an entry is calculated using the length of the name and + // value without any Huffman encoding applied." + + // This can overflow if somebody makes a large HeaderField + // Name and/or Value by hand, but we don't care, because that + // won't happen on the wire because the encoding doesn't allow + // it. + return uint32(len(hf.Name) + len(hf.Value) + 32) +} + +// A Decoder is the decoding context for incremental processing of +// header blocks. +type Decoder struct { + dynTab dynamicTable + emit func(f HeaderField) + + emitEnabled bool // whether calls to emit are enabled + maxStrLen int // 0 means unlimited + + // buf is the unparsed buffer. It's only written to + // saveBuf if it was truncated in the middle of a header + // block. Because it's usually not owned, we can only + // process it under Write. + buf []byte // not owned; only valid during Write + + // saveBuf is previous data passed to Write which we weren't able + // to fully parse before. Unlike buf, we own this data. + saveBuf bytes.Buffer +} + +// NewDecoder returns a new decoder with the provided maximum dynamic +// table size. The emitFunc will be called for each valid field +// parsed, in the same goroutine as calls to Write, before Write returns. +func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder { + d := &Decoder{ + emit: emitFunc, + emitEnabled: true, + } + d.dynTab.table.init() + d.dynTab.allowedMaxSize = maxDynamicTableSize + d.dynTab.setMaxSize(maxDynamicTableSize) + return d +} + +// ErrStringLength is returned by Decoder.Write when the max string length +// (as configured by Decoder.SetMaxStringLength) would be violated. +var ErrStringLength = errors.New("hpack: string too long") + +// SetMaxStringLength sets the maximum size of a HeaderField name or +// value string. If a string exceeds this length (even after any +// decompression), Write will return ErrStringLength. +// A value of 0 means unlimited and is the default from NewDecoder. +func (d *Decoder) SetMaxStringLength(n int) { + d.maxStrLen = n +} + +// SetEmitFunc changes the callback used when new header fields +// are decoded. +// It must be non-nil. It does not affect EmitEnabled. +func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { + d.emit = emitFunc +} + +// SetEmitEnabled controls whether the emitFunc provided to NewDecoder +// should be called. The default is true. +// +// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE +// while still decoding and keeping in-sync with decoder state, but +// without doing unnecessary decompression or generating unnecessary +// garbage for header fields past the limit. +func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v } + +// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder +// are currently enabled. The default is true. +func (d *Decoder) EmitEnabled() bool { return d.emitEnabled } + +// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their +// underlying buffers for garbage reasons. + +func (d *Decoder) SetMaxDynamicTableSize(v uint32) { + d.dynTab.setMaxSize(v) +} + +// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded +// stream (via dynamic table size updates) may set the maximum size +// to. +func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { + d.dynTab.allowedMaxSize = v +} + +type dynamicTable struct { + // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 + table headerFieldTable + size uint32 // in bytes + maxSize uint32 // current maxSize + allowedMaxSize uint32 // maxSize may go up to this, inclusive +} + +func (dt *dynamicTable) setMaxSize(v uint32) { + dt.maxSize = v + dt.evict() +} + +func (dt *dynamicTable) add(f HeaderField) { + dt.table.addEntry(f) + dt.size += f.Size() + dt.evict() +} + +// If we're too big, evict old stuff. +func (dt *dynamicTable) evict() { + var n int + for dt.size > dt.maxSize && n < dt.table.len() { + dt.size -= dt.table.ents[n].Size() + n++ + } + dt.table.evictOldest(n) +} + +func (d *Decoder) maxTableIndex() int { + // This should never overflow. RFC 7540 Section 6.5.2 limits the size of + // the dynamic table to 2^32 bytes, where each entry will occupy more than + // one byte. Further, the staticTable has a fixed, small length. + return d.dynTab.table.len() + staticTable.len() +} + +func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { + // See Section 2.3.3. + if i == 0 { + return + } + if i <= uint64(staticTable.len()) { + return staticTable.ents[i-1], true + } + if i > uint64(d.maxTableIndex()) { + return + } + // In the dynamic table, newer entries have lower indices. + // However, dt.ents[0] is the oldest entry. Hence, dt.ents is + // the reversed dynamic table. + dt := d.dynTab.table + return dt.ents[dt.len()-(int(i)-staticTable.len())], true +} + +// Decode decodes an entire block. +// +// TODO: remove this method and make it incremental later? This is +// easier for debugging now. +func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { + var hf []HeaderField + saveFunc := d.emit + defer func() { d.emit = saveFunc }() + d.emit = func(f HeaderField) { hf = append(hf, f) } + if _, err := d.Write(p); err != nil { + return nil, err + } + if err := d.Close(); err != nil { + return nil, err + } + return hf, nil +} + +func (d *Decoder) Close() error { + if d.saveBuf.Len() > 0 { + d.saveBuf.Reset() + return DecodingError{errors.New("truncated headers")} + } + return nil +} + +func (d *Decoder) Write(p []byte) (n int, err error) { + if len(p) == 0 { + // Prevent state machine CPU attacks (making us redo + // work up to the point of finding out we don't have + // enough data) + return + } + // Only copy the data if we have to. Optimistically assume + // that p will contain a complete header block. + if d.saveBuf.Len() == 0 { + d.buf = p + } else { + d.saveBuf.Write(p) + d.buf = d.saveBuf.Bytes() + d.saveBuf.Reset() + } + + for len(d.buf) > 0 { + err = d.parseHeaderFieldRepr() + if err == errNeedMore { + // Extra paranoia, making sure saveBuf won't + // get too large. All the varint and string + // reading code earlier should already catch + // overlong things and return ErrStringLength, + // but keep this as a last resort. + const varIntOverhead = 8 // conservative + if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) { + return 0, ErrStringLength + } + d.saveBuf.Write(d.buf) + return len(p), nil + } + if err != nil { + break + } + } + return len(p), err +} + +// errNeedMore is an internal sentinel error value that means the +// buffer is truncated and we need to read more data before we can +// continue parsing. +var errNeedMore = errors.New("need more data") + +type indexType int + +const ( + indexedTrue indexType = iota + indexedFalse + indexedNever +) + +func (v indexType) indexed() bool { return v == indexedTrue } +func (v indexType) sensitive() bool { return v == indexedNever } + +// returns errNeedMore if there isn't enough data available. +// any other error is fatal. +// consumes d.buf iff it returns nil. +// precondition: must be called with len(d.buf) > 0 +func (d *Decoder) parseHeaderFieldRepr() error { + b := d.buf[0] + switch { + case b&128 != 0: + // Indexed representation. + // High bit set? + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 + return d.parseFieldIndexed() + case b&192 == 64: + // 6.2.1 Literal Header Field with Incremental Indexing + // 0b10xxxxxx: top two bits are 10 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 + return d.parseFieldLiteral(6, indexedTrue) + case b&240 == 0: + // 6.2.2 Literal Header Field without Indexing + // 0b0000xxxx: top four bits are 0000 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 + return d.parseFieldLiteral(4, indexedFalse) + case b&240 == 16: + // 6.2.3 Literal Header Field never Indexed + // 0b0001xxxx: top four bits are 0001 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 + return d.parseFieldLiteral(4, indexedNever) + case b&224 == 32: + // 6.3 Dynamic Table Size Update + // Top three bits are '001'. + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 + return d.parseDynamicTableSizeUpdate() + } + + return DecodingError{errors.New("invalid encoding")} +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldIndexed() error { + buf := d.buf + idx, buf, err := readVarInt(7, buf) + if err != nil { + return err + } + hf, ok := d.at(idx) + if !ok { + return DecodingError{InvalidIndexError(idx)} + } + d.buf = buf + return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value}) +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { + buf := d.buf + nameIdx, buf, err := readVarInt(n, buf) + if err != nil { + return err + } + + var hf HeaderField + wantStr := d.emitEnabled || it.indexed() + if nameIdx > 0 { + ihf, ok := d.at(nameIdx) + if !ok { + return DecodingError{InvalidIndexError(nameIdx)} + } + hf.Name = ihf.Name + } else { + hf.Name, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + } + hf.Value, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + d.buf = buf + if it.indexed() { + d.dynTab.add(hf) + } + hf.Sensitive = it.sensitive() + return d.callEmit(hf) +} + +func (d *Decoder) callEmit(hf HeaderField) error { + if d.maxStrLen != 0 { + if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen { + return ErrStringLength + } + } + if d.emitEnabled { + d.emit(hf) + } + return nil +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseDynamicTableSizeUpdate() error { + buf := d.buf + size, buf, err := readVarInt(5, buf) + if err != nil { + return err + } + if size > uint64(d.dynTab.allowedMaxSize) { + return DecodingError{errors.New("dynamic table size update too large")} + } + d.dynTab.setMaxSize(uint32(size)) + d.buf = buf + return nil +} + +var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} + +// readVarInt reads an unsigned variable length integer off the +// beginning of p. n is the parameter as described in +// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. +// +// n must always be between 1 and 8. +// +// The returned remain buffer is either a smaller suffix of p, or err != nil. +// The error is errNeedMore if p doesn't contain a complete integer. +func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { + if n < 1 || n > 8 { + panic("bad n") + } + if len(p) == 0 { + return 0, p, errNeedMore + } + i = uint64(p[0]) + if n < 8 { + i &= (1 << uint64(n)) - 1 + } + if i < (1< 0 { + b := p[0] + p = p[1:] + i += uint64(b&127) << m + if b&128 == 0 { + return i, p, nil + } + m += 7 + if m >= 63 { // TODO: proper overflow check. making this up. + return 0, origP, errVarintOverflow + } + } + return 0, origP, errNeedMore +} + +// readString decodes an hpack string from p. +// +// wantStr is whether s will be used. If false, decompression and +// []byte->string garbage are skipped if s will be ignored +// anyway. This does mean that huffman decoding errors for non-indexed +// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server +// is returning an error anyway, and because they're not indexed, the error +// won't affect the decoding state. +func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { + if len(p) == 0 { + return "", p, errNeedMore + } + isHuff := p[0]&128 != 0 + strLen, p, err := readVarInt(7, p) + if err != nil { + return "", p, err + } + if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { + return "", nil, ErrStringLength + } + if uint64(len(p)) < strLen { + return "", p, errNeedMore + } + if !isHuff { + if wantStr { + s = string(p[:strLen]) + } + return s, p[strLen:], nil + } + + if wantStr { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() // don't trust others + defer bufPool.Put(buf) + if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { + buf.Reset() + return "", nil, err + } + s = buf.String() + buf.Reset() // be nice to GC + } + return s, p[strLen:], nil +} diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go new file mode 100644 index 00000000000..8850e394677 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -0,0 +1,212 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bytes" + "errors" + "io" + "sync" +) + +var bufPool = sync.Pool{ + New: func() interface{} { return new(bytes.Buffer) }, +} + +// HuffmanDecode decodes the string in v and writes the expanded +// result to w, returning the number of bytes written to w and the +// Write call's return value. At most one Write call is made. +func HuffmanDecode(w io.Writer, v []byte) (int, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return 0, err + } + return w.Write(buf.Bytes()) +} + +// HuffmanDecodeToString decodes the string in v. +func HuffmanDecodeToString(v []byte) (string, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return "", err + } + return buf.String(), nil +} + +// ErrInvalidHuffman is returned for errors found decoding +// Huffman-encoded strings. +var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") + +// huffmanDecode decodes v to buf. +// If maxLen is greater than 0, attempts to write more to buf than +// maxLen bytes will return ErrStringLength. +func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { + n := rootHuffmanNode + // cur is the bit buffer that has not been fed into n. + // cbits is the number of low order bits in cur that are valid. + // sbits is the number of bits of the symbol prefix being decoded. + cur, cbits, sbits := uint(0), uint8(0), uint8(0) + for _, b := range v { + cur = cur<<8 | uint(b) + cbits += 8 + sbits += 8 + for cbits >= 8 { + idx := byte(cur >> (cbits - 8)) + n = n.children[idx] + if n == nil { + return ErrInvalidHuffman + } + if n.children == nil { + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + cbits -= n.codeLen + n = rootHuffmanNode + sbits = cbits + } else { + cbits -= 8 + } + } + } + for cbits > 0 { + n = n.children[byte(cur<<(8-cbits))] + if n == nil { + return ErrInvalidHuffman + } + if n.children != nil || n.codeLen > cbits { + break + } + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + cbits -= n.codeLen + n = rootHuffmanNode + sbits = cbits + } + if sbits > 7 { + // Either there was an incomplete symbol, or overlong padding. + // Both are decoding errors per RFC 7541 section 5.2. + return ErrInvalidHuffman + } + if mask := uint(1< 8 { + codeLen -= 8 + i := uint8(code >> codeLen) + if cur.children[i] == nil { + cur.children[i] = newInternalNode() + } + cur = cur.children[i] + } + shift := 8 - codeLen + start, end := int(uint8(code<> (nbits - rembits)) + dst[len(dst)-1] |= t + } + + return dst +} + +// HuffmanEncodeLength returns the number of bytes required to encode +// s in Huffman codes. The result is round up to byte boundary. +func HuffmanEncodeLength(s string) uint64 { + n := uint64(0) + for i := 0; i < len(s); i++ { + n += uint64(huffmanCodeLen[s[i]]) + } + return (n + 7) / 8 +} + +// appendByteToHuffmanCode appends Huffman code for c to dst and +// returns the extended buffer and the remaining bits in the last +// element. The appending is not byte aligned and the remaining bits +// in the last element of dst is given in rembits. +func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { + code := huffmanCodes[c] + nbits := huffmanCodeLen[c] + + for { + if rembits > nbits { + t := uint8(code << (rembits - nbits)) + dst[len(dst)-1] |= t + rembits -= nbits + break + } + + t := uint8(code >> (nbits - rembits)) + dst[len(dst)-1] |= t + + nbits -= rembits + rembits = 8 + + if nbits == 0 { + break + } + + dst = append(dst, 0) + } + + return dst, rembits +} diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go new file mode 100644 index 00000000000..a66cfbea69d --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/tables.go @@ -0,0 +1,479 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "fmt" +) + +// headerFieldTable implements a list of HeaderFields. +// This is used to implement the static and dynamic tables. +type headerFieldTable struct { + // For static tables, entries are never evicted. + // + // For dynamic tables, entries are evicted from ents[0] and added to the end. + // Each entry has a unique id that starts at one and increments for each + // entry that is added. This unique id is stable across evictions, meaning + // it can be used as a pointer to a specific entry. As in hpack, unique ids + // are 1-based. The unique id for ents[k] is k + evictCount + 1. + // + // Zero is not a valid unique id. + // + // evictCount should not overflow in any remotely practical situation. In + // practice, we will have one dynamic table per HTTP/2 connection. If we + // assume a very powerful server that handles 1M QPS per connection and each + // request adds (then evicts) 100 entries from the table, it would still take + // 2M years for evictCount to overflow. + ents []HeaderField + evictCount uint64 + + // byName maps a HeaderField name to the unique id of the newest entry with + // the same name. See above for a definition of "unique id". + byName map[string]uint64 + + // byNameValue maps a HeaderField name/value pair to the unique id of the newest + // entry with the same name and value. See above for a definition of "unique id". + byNameValue map[pairNameValue]uint64 +} + +type pairNameValue struct { + name, value string +} + +func (t *headerFieldTable) init() { + t.byName = make(map[string]uint64) + t.byNameValue = make(map[pairNameValue]uint64) +} + +// len reports the number of entries in the table. +func (t *headerFieldTable) len() int { + return len(t.ents) +} + +// addEntry adds a new entry. +func (t *headerFieldTable) addEntry(f HeaderField) { + id := uint64(t.len()) + t.evictCount + 1 + t.byName[f.Name] = id + t.byNameValue[pairNameValue{f.Name, f.Value}] = id + t.ents = append(t.ents, f) +} + +// evictOldest evicts the n oldest entries in the table. +func (t *headerFieldTable) evictOldest(n int) { + if n > t.len() { + panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len())) + } + for k := 0; k < n; k++ { + f := t.ents[k] + id := t.evictCount + uint64(k) + 1 + if t.byName[f.Name] == id { + delete(t.byName, f.Name) + } + if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id { + delete(t.byNameValue, p) + } + } + copy(t.ents, t.ents[n:]) + for k := t.len() - n; k < t.len(); k++ { + t.ents[k] = HeaderField{} // so strings can be garbage collected + } + t.ents = t.ents[:t.len()-n] + if t.evictCount+uint64(n) < t.evictCount { + panic("evictCount overflow") + } + t.evictCount += uint64(n) +} + +// search finds f in the table. If there is no match, i is 0. +// If both name and value match, i is the matched index and nameValueMatch +// becomes true. If only name matches, i points to that index and +// nameValueMatch becomes false. +// +// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says +// that index 1 should be the newest entry, but t.ents[0] is the oldest entry, +// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic +// table, the return value i actually refers to the entry t.ents[t.len()-i]. +// +// All tables are assumed to be a dynamic tables except for the global +// staticTable pointer. +// +// See Section 2.3.3. +func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) { + if !f.Sensitive { + if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 { + return t.idToIndex(id), true + } + } + if id := t.byName[f.Name]; id != 0 { + return t.idToIndex(id), false + } + return 0, false +} + +// idToIndex converts a unique id to an HPACK index. +// See Section 2.3.3. +func (t *headerFieldTable) idToIndex(id uint64) uint64 { + if id <= t.evictCount { + panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount)) + } + k := id - t.evictCount - 1 // convert id to an index t.ents[k] + if t != staticTable { + return uint64(t.len()) - k // dynamic table + } + return k + 1 +} + +// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B +var staticTable = newStaticTable() +var staticTableEntries = [...]HeaderField{ + {Name: ":authority"}, + {Name: ":method", Value: "GET"}, + {Name: ":method", Value: "POST"}, + {Name: ":path", Value: "/"}, + {Name: ":path", Value: "/index.html"}, + {Name: ":scheme", Value: "http"}, + {Name: ":scheme", Value: "https"}, + {Name: ":status", Value: "200"}, + {Name: ":status", Value: "204"}, + {Name: ":status", Value: "206"}, + {Name: ":status", Value: "304"}, + {Name: ":status", Value: "400"}, + {Name: ":status", Value: "404"}, + {Name: ":status", Value: "500"}, + {Name: "accept-charset"}, + {Name: "accept-encoding", Value: "gzip, deflate"}, + {Name: "accept-language"}, + {Name: "accept-ranges"}, + {Name: "accept"}, + {Name: "access-control-allow-origin"}, + {Name: "age"}, + {Name: "allow"}, + {Name: "authorization"}, + {Name: "cache-control"}, + {Name: "content-disposition"}, + {Name: "content-encoding"}, + {Name: "content-language"}, + {Name: "content-length"}, + {Name: "content-location"}, + {Name: "content-range"}, + {Name: "content-type"}, + {Name: "cookie"}, + {Name: "date"}, + {Name: "etag"}, + {Name: "expect"}, + {Name: "expires"}, + {Name: "from"}, + {Name: "host"}, + {Name: "if-match"}, + {Name: "if-modified-since"}, + {Name: "if-none-match"}, + {Name: "if-range"}, + {Name: "if-unmodified-since"}, + {Name: "last-modified"}, + {Name: "link"}, + {Name: "location"}, + {Name: "max-forwards"}, + {Name: "proxy-authenticate"}, + {Name: "proxy-authorization"}, + {Name: "range"}, + {Name: "referer"}, + {Name: "refresh"}, + {Name: "retry-after"}, + {Name: "server"}, + {Name: "set-cookie"}, + {Name: "strict-transport-security"}, + {Name: "transfer-encoding"}, + {Name: "user-agent"}, + {Name: "vary"}, + {Name: "via"}, + {Name: "www-authenticate"}, +} + +func newStaticTable() *headerFieldTable { + t := &headerFieldTable{} + t.init() + for _, e := range staticTableEntries[:] { + t.addEntry(e) + } + return t +} + +var huffmanCodes = [256]uint32{ + 0x1ff8, + 0x7fffd8, + 0xfffffe2, + 0xfffffe3, + 0xfffffe4, + 0xfffffe5, + 0xfffffe6, + 0xfffffe7, + 0xfffffe8, + 0xffffea, + 0x3ffffffc, + 0xfffffe9, + 0xfffffea, + 0x3ffffffd, + 0xfffffeb, + 0xfffffec, + 0xfffffed, + 0xfffffee, + 0xfffffef, + 0xffffff0, + 0xffffff1, + 0xffffff2, + 0x3ffffffe, + 0xffffff3, + 0xffffff4, + 0xffffff5, + 0xffffff6, + 0xffffff7, + 0xffffff8, + 0xffffff9, + 0xffffffa, + 0xffffffb, + 0x14, + 0x3f8, + 0x3f9, + 0xffa, + 0x1ff9, + 0x15, + 0xf8, + 0x7fa, + 0x3fa, + 0x3fb, + 0xf9, + 0x7fb, + 0xfa, + 0x16, + 0x17, + 0x18, + 0x0, + 0x1, + 0x2, + 0x19, + 0x1a, + 0x1b, + 0x1c, + 0x1d, + 0x1e, + 0x1f, + 0x5c, + 0xfb, + 0x7ffc, + 0x20, + 0xffb, + 0x3fc, + 0x1ffa, + 0x21, + 0x5d, + 0x5e, + 0x5f, + 0x60, + 0x61, + 0x62, + 0x63, + 0x64, + 0x65, + 0x66, + 0x67, + 0x68, + 0x69, + 0x6a, + 0x6b, + 0x6c, + 0x6d, + 0x6e, + 0x6f, + 0x70, + 0x71, + 0x72, + 0xfc, + 0x73, + 0xfd, + 0x1ffb, + 0x7fff0, + 0x1ffc, + 0x3ffc, + 0x22, + 0x7ffd, + 0x3, + 0x23, + 0x4, + 0x24, + 0x5, + 0x25, + 0x26, + 0x27, + 0x6, + 0x74, + 0x75, + 0x28, + 0x29, + 0x2a, + 0x7, + 0x2b, + 0x76, + 0x2c, + 0x8, + 0x9, + 0x2d, + 0x77, + 0x78, + 0x79, + 0x7a, + 0x7b, + 0x7ffe, + 0x7fc, + 0x3ffd, + 0x1ffd, + 0xffffffc, + 0xfffe6, + 0x3fffd2, + 0xfffe7, + 0xfffe8, + 0x3fffd3, + 0x3fffd4, + 0x3fffd5, + 0x7fffd9, + 0x3fffd6, + 0x7fffda, + 0x7fffdb, + 0x7fffdc, + 0x7fffdd, + 0x7fffde, + 0xffffeb, + 0x7fffdf, + 0xffffec, + 0xffffed, + 0x3fffd7, + 0x7fffe0, + 0xffffee, + 0x7fffe1, + 0x7fffe2, + 0x7fffe3, + 0x7fffe4, + 0x1fffdc, + 0x3fffd8, + 0x7fffe5, + 0x3fffd9, + 0x7fffe6, + 0x7fffe7, + 0xffffef, + 0x3fffda, + 0x1fffdd, + 0xfffe9, + 0x3fffdb, + 0x3fffdc, + 0x7fffe8, + 0x7fffe9, + 0x1fffde, + 0x7fffea, + 0x3fffdd, + 0x3fffde, + 0xfffff0, + 0x1fffdf, + 0x3fffdf, + 0x7fffeb, + 0x7fffec, + 0x1fffe0, + 0x1fffe1, + 0x3fffe0, + 0x1fffe2, + 0x7fffed, + 0x3fffe1, + 0x7fffee, + 0x7fffef, + 0xfffea, + 0x3fffe2, + 0x3fffe3, + 0x3fffe4, + 0x7ffff0, + 0x3fffe5, + 0x3fffe6, + 0x7ffff1, + 0x3ffffe0, + 0x3ffffe1, + 0xfffeb, + 0x7fff1, + 0x3fffe7, + 0x7ffff2, + 0x3fffe8, + 0x1ffffec, + 0x3ffffe2, + 0x3ffffe3, + 0x3ffffe4, + 0x7ffffde, + 0x7ffffdf, + 0x3ffffe5, + 0xfffff1, + 0x1ffffed, + 0x7fff2, + 0x1fffe3, + 0x3ffffe6, + 0x7ffffe0, + 0x7ffffe1, + 0x3ffffe7, + 0x7ffffe2, + 0xfffff2, + 0x1fffe4, + 0x1fffe5, + 0x3ffffe8, + 0x3ffffe9, + 0xffffffd, + 0x7ffffe3, + 0x7ffffe4, + 0x7ffffe5, + 0xfffec, + 0xfffff3, + 0xfffed, + 0x1fffe6, + 0x3fffe9, + 0x1fffe7, + 0x1fffe8, + 0x7ffff3, + 0x3fffea, + 0x3fffeb, + 0x1ffffee, + 0x1ffffef, + 0xfffff4, + 0xfffff5, + 0x3ffffea, + 0x7ffff4, + 0x3ffffeb, + 0x7ffffe6, + 0x3ffffec, + 0x3ffffed, + 0x7ffffe7, + 0x7ffffe8, + 0x7ffffe9, + 0x7ffffea, + 0x7ffffeb, + 0xffffffe, + 0x7ffffec, + 0x7ffffed, + 0x7ffffee, + 0x7ffffef, + 0x7fffff0, + 0x3ffffee, +} + +var huffmanCodeLen = [256]uint8{ + 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, + 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, + 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, + 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, + 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, + 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, + 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, + 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, + 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, + 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, + 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, + 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, + 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, + 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, +} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go new file mode 100644 index 00000000000..d565f40e0c1 --- /dev/null +++ b/vendor/golang.org/x/net/http2/http2.go @@ -0,0 +1,391 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package http2 implements the HTTP/2 protocol. +// +// This package is low-level and intended to be used directly by very +// few people. Most users will use it indirectly through the automatic +// use by the net/http package (from Go 1.6 and later). +// For use in earlier Go versions see ConfigureServer. (Transport support +// requires Go 1.6 or later) +// +// See https://http2.github.io/ for more information on HTTP/2. +// +// See https://http2.golang.org/ for a test server running this code. +// +package http2 // import "golang.org/x/net/http2" + +import ( + "bufio" + "crypto/tls" + "errors" + "fmt" + "io" + "net/http" + "os" + "sort" + "strconv" + "strings" + "sync" + + "golang.org/x/net/lex/httplex" +) + +var ( + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool +) + +func init() { + e := os.Getenv("GODEBUG") + if strings.Contains(e, "http2debug=1") { + VerboseLogs = true + } + if strings.Contains(e, "http2debug=2") { + VerboseLogs = true + logFrameWrites = true + logFrameReads = true + } +} + +const ( + // ClientPreface is the string that must be sent by new + // connections from clients. + ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" + + // SETTINGS_MAX_FRAME_SIZE default + // http://http2.github.io/http2-spec/#rfc.section.6.5.2 + initialMaxFrameSize = 16384 + + // NextProtoTLS is the NPN/ALPN protocol negotiated during + // HTTP/2's TLS setup. + NextProtoTLS = "h2" + + // http://http2.github.io/http2-spec/#SettingValues + initialHeaderTableSize = 4096 + + initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size + + defaultMaxReadFrameSize = 1 << 20 +) + +var ( + clientPreface = []byte(ClientPreface) +) + +type streamState int + +// HTTP/2 stream states. +// +// See http://tools.ietf.org/html/rfc7540#section-5.1. +// +// For simplicity, the server code merges "reserved (local)" into +// "half-closed (remote)". This is one less state transition to track. +// The only downside is that we send PUSH_PROMISEs slightly less +// liberally than allowable. More discussion here: +// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html +// +// "reserved (remote)" is omitted since the client code does not +// support server push. +const ( + stateIdle streamState = iota + stateOpen + stateHalfClosedLocal + stateHalfClosedRemote + stateClosed +) + +var stateName = [...]string{ + stateIdle: "Idle", + stateOpen: "Open", + stateHalfClosedLocal: "HalfClosedLocal", + stateHalfClosedRemote: "HalfClosedRemote", + stateClosed: "Closed", +} + +func (st streamState) String() string { + return stateName[st] +} + +// Setting is a setting parameter: which setting it is, and its value. +type Setting struct { + // ID is which setting is being set. + // See http://http2.github.io/http2-spec/#SettingValues + ID SettingID + + // Val is the value. + Val uint32 +} + +func (s Setting) String() string { + return fmt.Sprintf("[%v = %d]", s.ID, s.Val) +} + +// Valid reports whether the setting is valid. +func (s Setting) Valid() error { + // Limits and error codes from 6.5.2 Defined SETTINGS Parameters + switch s.ID { + case SettingEnablePush: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } + case SettingInitialWindowSize: + if s.Val > 1<<31-1 { + return ConnectionError(ErrCodeFlowControl) + } + case SettingMaxFrameSize: + if s.Val < 16384 || s.Val > 1<<24-1 { + return ConnectionError(ErrCodeProtocol) + } + } + return nil +} + +// A SettingID is an HTTP/2 setting as defined in +// http://http2.github.io/http2-spec/#iana-settings +type SettingID uint16 + +const ( + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 +) + +var settingName = map[SettingID]string{ + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", +} + +func (s SettingID) String() string { + if v, ok := settingName[s]; ok { + return v + } + return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) +} + +var ( + errInvalidHeaderFieldName = errors.New("http2: invalid header field name") + errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") +) + +// validWireHeaderFieldName reports whether v is a valid header field +// name (key). See httplex.ValidHeaderName for the base rules. +// +// Further, http2 says: +// "Just as in HTTP/1.x, header field names are strings of ASCII +// characters that are compared in a case-insensitive +// fashion. However, header field names MUST be converted to +// lowercase prior to their encoding in HTTP/2. " +func validWireHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if !httplex.IsTokenRune(r) { + return false + } + if 'A' <= r && r <= 'Z' { + return false + } + } + return true +} + +var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n) + +func init() { + for i := 100; i <= 999; i++ { + if v := http.StatusText(i); v != "" { + httpCodeStringCommon[i] = strconv.Itoa(i) + } + } +} + +func httpCodeString(code int) string { + if s, ok := httpCodeStringCommon[code]; ok { + return s + } + return strconv.Itoa(code) +} + +// from pkg io +type stringWriter interface { + WriteString(s string) (n int, err error) +} + +// A gate lets two goroutines coordinate their activities. +type gate chan struct{} + +func (g gate) Done() { g <- struct{}{} } +func (g gate) Wait() { <-g } + +// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). +type closeWaiter chan struct{} + +// Init makes a closeWaiter usable. +// It exists because so a closeWaiter value can be placed inside a +// larger struct and have the Mutex and Cond's memory in the same +// allocation. +func (cw *closeWaiter) Init() { + *cw = make(chan struct{}) +} + +// Close marks the closeWaiter as closed and unblocks any waiters. +func (cw closeWaiter) Close() { + close(cw) +} + +// Wait waits for the closeWaiter to become closed. +func (cw closeWaiter) Wait() { + <-cw +} + +// bufferedWriter is a buffered writer that writes to w. +// Its buffered writer is lazily allocated as needed, to minimize +// idle memory usage with many connections. +type bufferedWriter struct { + w io.Writer // immutable + bw *bufio.Writer // non-nil when data is buffered +} + +func newBufferedWriter(w io.Writer) *bufferedWriter { + return &bufferedWriter{w: w} +} + +// bufWriterPoolBufferSize is the size of bufio.Writer's +// buffers created using bufWriterPool. +// +// TODO: pick a less arbitrary value? this is a bit under +// (3 x typical 1500 byte MTU) at least. Other than that, +// not much thought went into it. +const bufWriterPoolBufferSize = 4 << 10 + +var bufWriterPool = sync.Pool{ + New: func() interface{} { + return bufio.NewWriterSize(nil, bufWriterPoolBufferSize) + }, +} + +func (w *bufferedWriter) Available() int { + if w.bw == nil { + return bufWriterPoolBufferSize + } + return w.bw.Available() +} + +func (w *bufferedWriter) Write(p []byte) (n int, err error) { + if w.bw == nil { + bw := bufWriterPool.Get().(*bufio.Writer) + bw.Reset(w.w) + w.bw = bw + } + return w.bw.Write(p) +} + +func (w *bufferedWriter) Flush() error { + bw := w.bw + if bw == nil { + return nil + } + err := bw.Flush() + bw.Reset(nil) + bufWriterPool.Put(bw) + w.bw = nil + return err +} + +func mustUint31(v int32) uint32 { + if v < 0 || v > 2147483647 { + panic("out of range") + } + return uint32(v) +} + +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC 2616, section 4.4. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +type httpError struct { + msg string + timeout bool +} + +func (e *httpError) Error() string { return e.msg } +func (e *httpError) Timeout() bool { return e.timeout } +func (e *httpError) Temporary() bool { return true } + +var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} + +type connectionStater interface { + ConnectionState() tls.ConnectionState +} + +var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} + +type sorter struct { + v []string // owned by sorter +} + +func (s *sorter) Len() int { return len(s.v) } +func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } +func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } + +// Keys returns the sorted keys of h. +// +// The returned slice is only valid until s used again or returned to +// its pool. +func (s *sorter) Keys(h http.Header) []string { + keys := s.v[:0] + for k := range h { + keys = append(keys, k) + } + s.v = keys + sort.Sort(s) + return keys +} + +func (s *sorter) SortStrings(ss []string) { + // Our sorter works on s.v, which sorter owns, so + // stash it away while we sort the user's buffer. + save := s.v + s.v = ss + sort.Sort(s) + s.v = save +} + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// *) a non-empty string starting with '/' +// *) the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +// +// We used to enforce that the path also didn't start with "//", but +// Google's GFE accepts such paths and Chrome sends them, so ignore +// that part of the spec. See golang.org/issue/19103. +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/') || v == "*" +} diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go new file mode 100644 index 00000000000..508cebcc4db --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go16.go @@ -0,0 +1,21 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.6 + +package http2 + +import ( + "net/http" + "time" +) + +func configureTransport(t1 *http.Transport) (*Transport, error) { + return nil, errTransportVersion +} + +func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { + return 0 + +} diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go new file mode 100644 index 00000000000..140434a791a --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go17.go @@ -0,0 +1,87 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package http2 + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +type contextContext interface { + Done() <-chan struct{} + Err() error +} + +type fakeContext struct{} + +func (fakeContext) Done() <-chan struct{} { return nil } +func (fakeContext) Err() error { panic("should not be called") } + +func reqContext(r *http.Request) fakeContext { + return fakeContext{} +} + +func setResponseUncompressed(res *http.Response) { + // Nothing. +} + +type clientTrace struct{} + +func requestTrace(*http.Request) *clientTrace { return nil } +func traceGotConn(*http.Request, *ClientConn) {} +func traceFirstResponseByte(*clientTrace) {} +func traceWroteHeaders(*clientTrace) {} +func traceWroteRequest(*clientTrace, error) {} +func traceGot100Continue(trace *clientTrace) {} +func traceWait100Continue(trace *clientTrace) {} + +func nop() {} + +func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { + return nil, nop +} + +func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { + return ctx, nop +} + +func requestWithContext(req *http.Request, ctx contextContext) *http.Request { + return req +} + +// temporary copy of Go 1.6's private tls.Config.clone: +func cloneTLSConfig(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + } +} + +func (cc *ClientConn) Ping(ctx contextContext) error { + return cc.ping(ctx) +} + +func (t *Transport) idleConnTimeout() time.Duration { return 0 } diff --git a/vendor/golang.org/x/net/http2/not_go18.go b/vendor/golang.org/x/net/http2/not_go18.go new file mode 100644 index 00000000000..6f8d3f86fa8 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go18.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package http2 + +import ( + "io" + "net/http" +) + +func configureServer18(h1 *http.Server, h2 *Server) error { + // No IdleTimeout to sync prior to Go 1.8. + return nil +} + +func shouldLogPanic(panicValue interface{}) bool { + return panicValue != nil +} + +func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { + return nil +} + +func reqBodyIsNoBody(io.ReadCloser) bool { return false } + +func go18httpNoBody() io.ReadCloser { return nil } // for tests only diff --git a/vendor/golang.org/x/net/http2/not_go19.go b/vendor/golang.org/x/net/http2/not_go19.go new file mode 100644 index 00000000000..5ae07726b74 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go19.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package http2 + +import ( + "net/http" +) + +func configureServer19(s *http.Server, conf *Server) error { + // not supported prior to go1.9 + return nil +} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go new file mode 100644 index 00000000000..a6140099cb3 --- /dev/null +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -0,0 +1,163 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "io" + "sync" +) + +// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like +// io.Pipe except there are no PipeReader/PipeWriter halves, and the +// underlying buffer is an interface. (io.Pipe is always unbuffered) +type pipe struct { + mu sync.Mutex + c sync.Cond // c.L lazily initialized to &p.mu + b pipeBuffer // nil when done reading + err error // read error once empty. non-nil means closed. + breakErr error // immediate read error (caller doesn't see rest of b) + donec chan struct{} // closed on error + readFn func() // optional code to run in Read before error +} + +type pipeBuffer interface { + Len() int + io.Writer + io.Reader +} + +func (p *pipe) Len() int { + p.mu.Lock() + defer p.mu.Unlock() + if p.b == nil { + return 0 + } + return p.b.Len() +} + +// Read waits until data is available and copies bytes +// from the buffer into p. +func (p *pipe) Read(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + for { + if p.breakErr != nil { + return 0, p.breakErr + } + if p.b != nil && p.b.Len() > 0 { + return p.b.Read(d) + } + if p.err != nil { + if p.readFn != nil { + p.readFn() // e.g. copy trailers + p.readFn = nil // not sticky like p.err + } + p.b = nil + return 0, p.err + } + p.c.Wait() + } +} + +var errClosedPipeWrite = errors.New("write on closed buffer") + +// Write copies bytes from p into the buffer and wakes a reader. +// It is an error to write more data than the buffer can hold. +func (p *pipe) Write(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if p.err != nil { + return 0, errClosedPipeWrite + } + if p.breakErr != nil { + return len(d), nil // discard when there is no reader + } + return p.b.Write(d) +} + +// CloseWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err after all data has been +// read. +// +// The error must be non-nil. +func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } + +// BreakWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err immediately, without +// waiting for unread data. +func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } + +// closeWithErrorAndCode is like CloseWithError but also sets some code to run +// in the caller's goroutine before returning the error. +func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } + +func (p *pipe) closeWithError(dst *error, err error, fn func()) { + if err == nil { + panic("err must be non-nil") + } + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if *dst != nil { + // Already been done. + return + } + p.readFn = fn + if dst == &p.breakErr { + p.b = nil + } + *dst = err + p.closeDoneLocked() +} + +// requires p.mu be held. +func (p *pipe) closeDoneLocked() { + if p.donec == nil { + return + } + // Close if unclosed. This isn't racy since we always + // hold p.mu while closing. + select { + case <-p.donec: + default: + close(p.donec) + } +} + +// Err returns the error (if any) first set by BreakWithError or CloseWithError. +func (p *pipe) Err() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.breakErr != nil { + return p.breakErr + } + return p.err +} + +// Done returns a channel which is closed if and when this pipe is closed +// with CloseWithError. +func (p *pipe) Done() <-chan struct{} { + p.mu.Lock() + defer p.mu.Unlock() + if p.donec == nil { + p.donec = make(chan struct{}) + if p.err != nil || p.breakErr != nil { + // Already hit an error. + p.closeDoneLocked() + } + } + return p.donec +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go new file mode 100644 index 00000000000..d790c3b3d59 --- /dev/null +++ b/vendor/golang.org/x/net/http2/server.go @@ -0,0 +1,2863 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO: turn off the serve goroutine when idle, so +// an idle conn only has the readFrames goroutine active. (which could +// also be optimized probably to pin less memory in crypto/tls). This +// would involve tracking when the serve goroutine is active (atomic +// int32 read/CAS probably?) and starting it up when frames arrive, +// and shutting it down when all handlers exit. the occasional PING +// packets could use time.AfterFunc to call sc.wakeStartServeLoop() +// (which is a no-op if already running) and then queue the PING write +// as normal. The serve loop would then exit in most cases (if no +// Handlers running) and not be woken up again until the PING packet +// returns. + +// TODO (maybe): add a mechanism for Handlers to going into +// half-closed-local mode (rw.(io.Closer) test?) but not exit their +// handler, and continue to be able to read from the +// Request.Body. This would be a somewhat semantic change from HTTP/1 +// (or at least what we expose in net/http), so I'd probably want to +// add it there too. For now, this package says that returning from +// the Handler ServeHTTP function means you're both done reading and +// done writing, without a way to stop just one or the other. + +package http2 + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "log" + "math" + "net" + "net/http" + "net/textproto" + "net/url" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/http2/hpack" +) + +const ( + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? +) + +var ( + errClientDisconnected = errors.New("client disconnected") + errClosedBody = errors.New("body closed by handler") + errHandlerComplete = errors.New("http2: request body closed due to handler exiting") + errStreamClosed = errors.New("http2: stream closed") +) + +var responseWriterStatePool = sync.Pool{ + New: func() interface{} { + rws := &responseWriterState{} + rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) + return rws + }, +} + +// Test hooks. +var ( + testHookOnConn func() + testHookGetServerConn func(*serverConn) + testHookOnPanicMu *sync.Mutex // nil except in tests + testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) +) + +// Server is an HTTP/2 server. +type Server struct { + // MaxHandlers limits the number of http.Handler ServeHTTP goroutines + // which may run at a time over all connections. + // Negative or zero no limit. + // TODO: implement + MaxHandlers int + + // MaxConcurrentStreams optionally specifies the number of + // concurrent streams that each client may have open at a + // time. This is unrelated to the number of http.Handler goroutines + // which may be active globally, which is MaxHandlers. + // If zero, MaxConcurrentStreams defaults to at least 100, per + // the HTTP/2 spec's recommendations. + MaxConcurrentStreams uint32 + + // MaxReadFrameSize optionally specifies the largest frame + // this server is willing to read. A valid value is between + // 16k and 16M, inclusive. If zero or otherwise invalid, a + // default value is used. + MaxReadFrameSize uint32 + + // PermitProhibitedCipherSuites, if true, permits the use of + // cipher suites prohibited by the HTTP/2 spec. + PermitProhibitedCipherSuites bool + + // IdleTimeout specifies how long until idle clients should be + // closed with a GOAWAY frame. PING frames are not considered + // activity for the purposes of IdleTimeout. + IdleTimeout time.Duration + + // MaxUploadBufferPerConnection is the size of the initial flow + // control window for each connections. The HTTP/2 spec does not + // allow this to be smaller than 65535 or larger than 2^32-1. + // If the value is outside this range, a default value will be + // used instead. + MaxUploadBufferPerConnection int32 + + // MaxUploadBufferPerStream is the size of the initial flow control + // window for each stream. The HTTP/2 spec does not allow this to + // be larger than 2^32-1. If the value is zero or larger than the + // maximum, a default value will be used instead. + MaxUploadBufferPerStream int32 + + // NewWriteScheduler constructs a write scheduler for a connection. + // If nil, a default scheduler is chosen. + NewWriteScheduler func() WriteScheduler + + // Internal state. This is a pointer (rather than embedded directly) + // so that we don't embed a Mutex in this struct, which will make the + // struct non-copyable, which might break some callers. + state *serverInternalState +} + +func (s *Server) initialConnRecvWindowSize() int32 { + if s.MaxUploadBufferPerConnection > initialWindowSize { + return s.MaxUploadBufferPerConnection + } + return 1 << 20 +} + +func (s *Server) initialStreamRecvWindowSize() int32 { + if s.MaxUploadBufferPerStream > 0 { + return s.MaxUploadBufferPerStream + } + return 1 << 20 +} + +func (s *Server) maxReadFrameSize() uint32 { + if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { + return v + } + return defaultMaxReadFrameSize +} + +func (s *Server) maxConcurrentStreams() uint32 { + if v := s.MaxConcurrentStreams; v > 0 { + return v + } + return defaultMaxStreams +} + +type serverInternalState struct { + mu sync.Mutex + activeConns map[*serverConn]struct{} +} + +func (s *serverInternalState) registerConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + s.activeConns[sc] = struct{}{} + s.mu.Unlock() +} + +func (s *serverInternalState) unregisterConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + delete(s.activeConns, sc) + s.mu.Unlock() +} + +func (s *serverInternalState) startGracefulShutdown() { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + for sc := range s.activeConns { + sc.startGracefulShutdown() + } + s.mu.Unlock() +} + +// ConfigureServer adds HTTP/2 support to a net/http Server. +// +// The configuration conf may be nil. +// +// ConfigureServer must be called before s begins serving. +func ConfigureServer(s *http.Server, conf *Server) error { + if s == nil { + panic("nil *http.Server") + } + if conf == nil { + conf = new(Server) + } + conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} + if err := configureServer18(s, conf); err != nil { + return err + } + if err := configureServer19(s, conf); err != nil { + return err + } + + if s.TLSConfig == nil { + s.TLSConfig = new(tls.Config) + } else if s.TLSConfig.CipherSuites != nil { + // If they already provided a CipherSuite list, return + // an error if it has a bad order or is missing + // ECDHE_RSA_WITH_AES_128_GCM_SHA256. + const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + haveRequired := false + sawBad := false + for i, cs := range s.TLSConfig.CipherSuites { + if cs == requiredCipher { + haveRequired = true + } + if isBadCipher(cs) { + sawBad = true + } else if sawBad { + return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) + } + } + if !haveRequired { + return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256") + } + } + + // Note: not setting MinVersion to tls.VersionTLS12, + // as we don't want to interfere with HTTP/1.1 traffic + // on the user's server. We enforce TLS 1.2 later once + // we accept a connection. Ideally this should be done + // during next-proto selection, but using TLS <1.2 with + // HTTP/2 is still the client's bug. + + s.TLSConfig.PreferServerCipherSuites = true + + haveNPN := false + for _, p := range s.TLSConfig.NextProtos { + if p == NextProtoTLS { + haveNPN = true + break + } + } + if !haveNPN { + s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) + } + + if s.TLSNextProto == nil { + s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} + } + protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + if testHookOnConn != nil { + testHookOnConn() + } + conf.ServeConn(c, &ServeConnOpts{ + Handler: h, + BaseConfig: hs, + }) + } + s.TLSNextProto[NextProtoTLS] = protoHandler + return nil +} + +// ServeConnOpts are options for the Server.ServeConn method. +type ServeConnOpts struct { + // BaseConfig optionally sets the base configuration + // for values. If nil, defaults are used. + BaseConfig *http.Server + + // Handler specifies which handler to use for processing + // requests. If nil, BaseConfig.Handler is used. If BaseConfig + // or BaseConfig.Handler is nil, http.DefaultServeMux is used. + Handler http.Handler +} + +func (o *ServeConnOpts) baseConfig() *http.Server { + if o != nil && o.BaseConfig != nil { + return o.BaseConfig + } + return new(http.Server) +} + +func (o *ServeConnOpts) handler() http.Handler { + if o != nil { + if o.Handler != nil { + return o.Handler + } + if o.BaseConfig != nil && o.BaseConfig.Handler != nil { + return o.BaseConfig.Handler + } + } + return http.DefaultServeMux +} + +// ServeConn serves HTTP/2 requests on the provided connection and +// blocks until the connection is no longer readable. +// +// ServeConn starts speaking HTTP/2 assuming that c has not had any +// reads or writes. It writes its initial settings frame and expects +// to be able to read the preface and settings frame from the +// client. If c has a ConnectionState method like a *tls.Conn, the +// ConnectionState is used to verify the TLS ciphersuite and to set +// the Request.TLS field in Handlers. +// +// ServeConn does not support h2c by itself. Any h2c support must be +// implemented in terms of providing a suitably-behaving net.Conn. +// +// The opts parameter is optional. If nil, default values are used. +func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + baseCtx, cancel := serverConnBaseContext(c, opts) + defer cancel() + + sc := &serverConn{ + srv: s, + hs: opts.baseConfig(), + conn: c, + baseCtx: baseCtx, + remoteAddrStr: c.RemoteAddr().String(), + bw: newBufferedWriter(c), + handler: opts.handler(), + streams: make(map[uint32]*stream), + readFrameCh: make(chan readFrameResult), + wantWriteFrameCh: make(chan FrameWriteRequest, 8), + serveMsgCh: make(chan interface{}, 8), + wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync + bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way + doneServing: make(chan struct{}), + clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" + advMaxStreams: s.maxConcurrentStreams(), + initialStreamSendWindowSize: initialWindowSize, + maxFrameSize: initialMaxFrameSize, + headerTableSize: initialHeaderTableSize, + serveG: newGoroutineLock(), + pushEnabled: true, + } + + s.state.registerConn(sc) + defer s.state.unregisterConn(sc) + + // The net/http package sets the write deadline from the + // http.Server.WriteTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already set. + // Write deadlines are set per stream in serverConn.newStream. + // Disarm the net.Conn write deadline here. + if sc.hs.WriteTimeout != 0 { + sc.conn.SetWriteDeadline(time.Time{}) + } + + if s.NewWriteScheduler != nil { + sc.writeSched = s.NewWriteScheduler() + } else { + sc.writeSched = NewRandomWriteScheduler() + } + + // These start at the RFC-specified defaults. If there is a higher + // configured value for inflow, that will be updated when we send a + // WINDOW_UPDATE shortly after sending SETTINGS. + sc.flow.add(initialWindowSize) + sc.inflow.add(initialWindowSize) + sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) + + fr := NewFramer(sc.bw, c) + fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + fr.MaxHeaderListSize = sc.maxHeaderListSize() + fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + sc.framer = fr + + if tc, ok := c.(connectionStater); ok { + sc.tlsState = new(tls.ConnectionState) + *sc.tlsState = tc.ConnectionState() + // 9.2 Use of TLS Features + // An implementation of HTTP/2 over TLS MUST use TLS + // 1.2 or higher with the restrictions on feature set + // and cipher suite described in this section. Due to + // implementation limitations, it might not be + // possible to fail TLS negotiation. An endpoint MUST + // immediately terminate an HTTP/2 connection that + // does not meet the TLS requirements described in + // this section with a connection error (Section + // 5.4.1) of type INADEQUATE_SECURITY. + if sc.tlsState.Version < tls.VersionTLS12 { + sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") + return + } + + if sc.tlsState.ServerName == "" { + // Client must use SNI, but we don't enforce that anymore, + // since it was causing problems when connecting to bare IP + // addresses during development. + // + // TODO: optionally enforce? Or enforce at the time we receive + // a new request, and verify the the ServerName matches the :authority? + // But that precludes proxy situations, perhaps. + // + // So for now, do nothing here again. + } + + if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + // "Endpoints MAY choose to generate a connection error + // (Section 5.4.1) of type INADEQUATE_SECURITY if one of + // the prohibited cipher suites are negotiated." + // + // We choose that. In my opinion, the spec is weak + // here. It also says both parties must support at least + // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no + // excuses here. If we really must, we could allow an + // "AllowInsecureWeakCiphers" option on the server later. + // Let's see how it plays out first. + sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) + return + } + } + + if hook := testHookGetServerConn; hook != nil { + hook(sc) + } + sc.serve() +} + +func (sc *serverConn) rejectConn(err ErrCode, debug string) { + sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) + // ignoring errors. hanging up anyway. + sc.framer.WriteGoAway(0, err, []byte(debug)) + sc.bw.Flush() + sc.conn.Close() +} + +type serverConn struct { + // Immutable: + srv *Server + hs *http.Server + conn net.Conn + bw *bufferedWriter // writing to conn + handler http.Handler + baseCtx contextContext + framer *Framer + doneServing chan struct{} // closed when serverConn.serve ends + readFrameCh chan readFrameResult // written by serverConn.readFrames + wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve + wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes + bodyReadCh chan bodyReadMsg // from handlers -> serve + serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop + flow flow // conn-wide (not stream-specific) outbound flow control + inflow flow // conn-wide inbound flow control + tlsState *tls.ConnectionState // shared by all handlers, like net/http + remoteAddrStr string + writeSched WriteScheduler + + // Everything following is owned by the serve loop; use serveG.check(): + serveG goroutineLock // used to verify funcs are on serve() + pushEnabled bool + sawFirstSettings bool // got the initial SETTINGS frame after the preface + needToSendSettingsAck bool + unackedSettings int // how many SETTINGS have we sent without ACKs? + clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) + advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client + curClientStreams uint32 // number of open streams initiated by the client + curPushedStreams uint32 // number of open streams initiated by server push + maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests + maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes + streams map[uint32]*stream + initialStreamSendWindowSize int32 + maxFrameSize int32 + headerTableSize uint32 + peerMaxHeaderListSize uint32 // zero means unknown (default) + canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case + writingFrame bool // started writing a frame (on serve goroutine or separate) + writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh + needsFrameFlush bool // last frame write wasn't a flush + inGoAway bool // we've started to or sent GOAWAY + inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop + needToSendGoAway bool // we need to schedule a GOAWAY frame write + goAwayCode ErrCode + shutdownTimer *time.Timer // nil until used + idleTimer *time.Timer // nil if unused + + // Owned by the writeFrameAsync goroutine: + headerWriteBuf bytes.Buffer + hpackEncoder *hpack.Encoder + + // Used by startGracefulShutdown. + shutdownOnce sync.Once +} + +func (sc *serverConn) maxHeaderListSize() uint32 { + n := sc.hs.MaxHeaderBytes + if n <= 0 { + n = http.DefaultMaxHeaderBytes + } + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return uint32(n + typicalHeaders*perFieldOverhead) +} + +func (sc *serverConn) curOpenStreams() uint32 { + sc.serveG.check() + return sc.curClientStreams + sc.curPushedStreams +} + +// stream represents a stream. This is the minimal metadata needed by +// the serve goroutine. Most of the actual stream state is owned by +// the http.Handler's goroutine in the responseWriter. Because the +// responseWriter's responseWriterState is recycled at the end of a +// handler, this struct intentionally has no pointer to the +// *responseWriter{,State} itself, as the Handler ending nils out the +// responseWriter's state field. +type stream struct { + // immutable: + sc *serverConn + id uint32 + body *pipe // non-nil if expecting DATA frames + cw closeWaiter // closed wait stream transitions to closed state + ctx contextContext + cancelCtx func() + + // owned by serverConn's serve loop: + bodyBytes int64 // body bytes seen so far + declBodyBytes int64 // or -1 if undeclared + flow flow // limits writing from Handler to client + inflow flow // what the client is allowed to POST/etc to us + parent *stream // or nil + numTrailerValues int64 + weight uint8 + state streamState + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + writeDeadline *time.Timer // nil if unused + + trailer http.Header // accumulated trailers + reqTrailer http.Header // handler's Request.Trailer +} + +func (sc *serverConn) Framer() *Framer { return sc.framer } +func (sc *serverConn) CloseConn() error { return sc.conn.Close() } +func (sc *serverConn) Flush() error { return sc.bw.Flush() } +func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { + return sc.hpackEncoder, &sc.headerWriteBuf +} + +func (sc *serverConn) state(streamID uint32) (streamState, *stream) { + sc.serveG.check() + // http://tools.ietf.org/html/rfc7540#section-5.1 + if st, ok := sc.streams[streamID]; ok { + return st.state, st + } + // "The first use of a new stream identifier implicitly closes all + // streams in the "idle" state that might have been initiated by + // that peer with a lower-valued stream identifier. For example, if + // a client sends a HEADERS frame on stream 7 without ever sending a + // frame on stream 5, then stream 5 transitions to the "closed" + // state when the first frame for stream 7 is sent or received." + if streamID%2 == 1 { + if streamID <= sc.maxClientStreamID { + return stateClosed, nil + } + } else { + if streamID <= sc.maxPushPromiseID { + return stateClosed, nil + } + } + return stateIdle, nil +} + +// setConnState calls the net/http ConnState hook for this connection, if configured. +// Note that the net/http package does StateNew and StateClosed for us. +// There is currently no plan for StateHijacked or hijacking HTTP/2 connections. +func (sc *serverConn) setConnState(state http.ConnState) { + if sc.hs.ConnState != nil { + sc.hs.ConnState(sc.conn, state) + } +} + +func (sc *serverConn) vlogf(format string, args ...interface{}) { + if VerboseLogs { + sc.logf(format, args...) + } +} + +func (sc *serverConn) logf(format string, args ...interface{}) { + if lg := sc.hs.ErrorLog; lg != nil { + lg.Printf(format, args...) + } else { + log.Printf(format, args...) + } +} + +// errno returns v's underlying uintptr, else 0. +// +// TODO: remove this helper function once http2 can use build +// tags. See comment in isClosedConnError. +func errno(v error) uintptr { + if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { + return uintptr(rv.Uint()) + } + return 0 +} + +// isClosedConnError reports whether err is an error from use of a closed +// network connection. +func isClosedConnError(err error) bool { + if err == nil { + return false + } + + // TODO: remove this string search and be more like the Windows + // case below. That might involve modifying the standard library + // to return better error types. + str := err.Error() + if strings.Contains(str, "use of closed network connection") { + return true + } + + // TODO(bradfitz): x/tools/cmd/bundle doesn't really support + // build tags, so I can't make an http2_windows.go file with + // Windows-specific stuff. Fix that and move this, once we + // have a way to bundle this into std's net/http somehow. + if runtime.GOOS == "windows" { + if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { + if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { + const WSAECONNABORTED = 10053 + const WSAECONNRESET = 10054 + if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { + return true + } + } + } + } + return false +} + +func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { + if err == nil { + return + } + if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) { + // Boring, expected errors. + sc.vlogf(format, args...) + } else { + sc.logf(format, args...) + } +} + +func (sc *serverConn) canonicalHeader(v string) string { + sc.serveG.check() + cv, ok := commonCanonHeader[v] + if ok { + return cv + } + cv, ok = sc.canonHeader[v] + if ok { + return cv + } + if sc.canonHeader == nil { + sc.canonHeader = make(map[string]string) + } + cv = http.CanonicalHeaderKey(v) + sc.canonHeader[v] = cv + return cv +} + +type readFrameResult struct { + f Frame // valid until readMore is called + err error + + // readMore should be called once the consumer no longer needs or + // retains f. After readMore, f is invalid and more frames can be + // read. + readMore func() +} + +// readFrames is the loop that reads incoming frames. +// It takes care to only read one frame at a time, blocking until the +// consumer is done with the frame. +// It's run on its own goroutine. +func (sc *serverConn) readFrames() { + gate := make(gate) + gateDone := gate.Done + for { + f, err := sc.framer.ReadFrame() + select { + case sc.readFrameCh <- readFrameResult{f, err, gateDone}: + case <-sc.doneServing: + return + } + select { + case <-gate: + case <-sc.doneServing: + return + } + if terminalReadFrameError(err) { + return + } + } +} + +// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. +type frameWriteResult struct { + wr FrameWriteRequest // what was written (or attempted) + err error // result of the writeFrame call +} + +// writeFrameAsync runs in its own goroutine and writes a single frame +// and then reports when it's done. +// At most one goroutine can be running writeFrameAsync at a time per +// serverConn. +func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { + err := wr.write.writeFrame(sc) + sc.wroteFrameCh <- frameWriteResult{wr, err} +} + +func (sc *serverConn) closeAllStreamsOnConnClose() { + sc.serveG.check() + for _, st := range sc.streams { + sc.closeStream(st, errClientDisconnected) + } +} + +func (sc *serverConn) stopShutdownTimer() { + sc.serveG.check() + if t := sc.shutdownTimer; t != nil { + t.Stop() + } +} + +func (sc *serverConn) notePanic() { + // Note: this is for serverConn.serve panicking, not http.Handler code. + if testHookOnPanicMu != nil { + testHookOnPanicMu.Lock() + defer testHookOnPanicMu.Unlock() + } + if testHookOnPanic != nil { + if e := recover(); e != nil { + if testHookOnPanic(sc, e) { + panic(e) + } + } + } +} + +func (sc *serverConn) serve() { + sc.serveG.check() + defer sc.notePanic() + defer sc.conn.Close() + defer sc.closeAllStreamsOnConnClose() + defer sc.stopShutdownTimer() + defer close(sc.doneServing) // unblocks handlers trying to send + + if VerboseLogs { + sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) + } + + sc.writeFrame(FrameWriteRequest{ + write: writeSettings{ + {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, + }, + }) + sc.unackedSettings++ + + // Each connection starts with intialWindowSize inflow tokens. + // If a higher value is configured, we add more tokens. + if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + sc.sendWindowUpdate(nil, int(diff)) + } + + if err := sc.readPreface(); err != nil { + sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) + return + } + // Now that we've got the preface, get us out of the + // "StateNew" state. We can't go directly to idle, though. + // Active means we read some data and anticipate a request. We'll + // do another Active when we get a HEADERS frame. + sc.setConnState(http.StateActive) + sc.setConnState(http.StateIdle) + + if sc.srv.IdleTimeout != 0 { + sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + defer sc.idleTimer.Stop() + } + + go sc.readFrames() // closed by defer sc.conn.Close above + + settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) + defer settingsTimer.Stop() + + loopNum := 0 + for { + loopNum++ + select { + case wr := <-sc.wantWriteFrameCh: + if se, ok := wr.write.(StreamError); ok { + sc.resetStream(se) + break + } + sc.writeFrame(wr) + case res := <-sc.wroteFrameCh: + sc.wroteFrame(res) + case res := <-sc.readFrameCh: + if !sc.processFrameFromReader(res) { + return + } + res.readMore() + if settingsTimer != nil { + settingsTimer.Stop() + settingsTimer = nil + } + case m := <-sc.bodyReadCh: + sc.noteBodyRead(m.st, m.n) + case msg := <-sc.serveMsgCh: + switch v := msg.(type) { + case func(int): + v(loopNum) // for testing + case *serverMessage: + switch v { + case settingsTimerMsg: + sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) + return + case idleTimerMsg: + sc.vlogf("connection is idle") + sc.goAway(ErrCodeNo) + case shutdownTimerMsg: + sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) + return + case gracefulShutdownMsg: + sc.startGracefulShutdownInternal() + default: + panic("unknown timer") + } + case *startPushRequest: + sc.startPush(v) + default: + panic(fmt.Sprintf("unexpected type %T", v)) + } + } + + // Start the shutdown timer after sending a GOAWAY. When sending GOAWAY + // with no error code (graceful shutdown), don't start the timer until + // all open streams have been completed. + sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame + gracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0 + if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) { + sc.shutDownIn(goAwayTimeout) + } + } +} + +func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) { + select { + case <-sc.doneServing: + case <-sharedCh: + close(privateCh) + } +} + +type serverMessage int + +// Message values sent to serveMsgCh. +var ( + settingsTimerMsg = new(serverMessage) + idleTimerMsg = new(serverMessage) + shutdownTimerMsg = new(serverMessage) + gracefulShutdownMsg = new(serverMessage) +) + +func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } +func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } + +func (sc *serverConn) sendServeMsg(msg interface{}) { + sc.serveG.checkNotOn() // NOT + select { + case sc.serveMsgCh <- msg: + case <-sc.doneServing: + } +} + +// readPreface reads the ClientPreface greeting from the peer +// or returns an error on timeout or an invalid greeting. +func (sc *serverConn) readPreface() error { + errc := make(chan error, 1) + go func() { + // Read the client preface + buf := make([]byte, len(ClientPreface)) + if _, err := io.ReadFull(sc.conn, buf); err != nil { + errc <- err + } else if !bytes.Equal(buf, clientPreface) { + errc <- fmt.Errorf("bogus greeting %q", buf) + } else { + errc <- nil + } + }() + timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? + defer timer.Stop() + select { + case <-timer.C: + return errors.New("timeout waiting for client preface") + case err := <-errc: + if err == nil { + if VerboseLogs { + sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) + } + } + return err + } +} + +var errChanPool = sync.Pool{ + New: func() interface{} { return make(chan error, 1) }, +} + +var writeDataPool = sync.Pool{ + New: func() interface{} { return new(writeData) }, +} + +// writeDataFromHandler writes DATA response frames from a handler on +// the given stream. +func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { + ch := errChanPool.Get().(chan error) + writeArg := writeDataPool.Get().(*writeData) + *writeArg = writeData{stream.id, data, endStream} + err := sc.writeFrameFromHandler(FrameWriteRequest{ + write: writeArg, + stream: stream, + done: ch, + }) + if err != nil { + return err + } + var frameWriteDone bool // the frame write is done (successfully or not) + select { + case err = <-ch: + frameWriteDone = true + case <-sc.doneServing: + return errClientDisconnected + case <-stream.cw: + // If both ch and stream.cw were ready (as might + // happen on the final Write after an http.Handler + // ends), prefer the write result. Otherwise this + // might just be us successfully closing the stream. + // The writeFrameAsync and serve goroutines guarantee + // that the ch send will happen before the stream.cw + // close. + select { + case err = <-ch: + frameWriteDone = true + default: + return errStreamClosed + } + } + errChanPool.Put(ch) + if frameWriteDone { + writeDataPool.Put(writeArg) + } + return err +} + +// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts +// if the connection has gone away. +// +// This must not be run from the serve goroutine itself, else it might +// deadlock writing to sc.wantWriteFrameCh (which is only mildly +// buffered and is read by serve itself). If you're on the serve +// goroutine, call writeFrame instead. +func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error { + sc.serveG.checkNotOn() // NOT + select { + case sc.wantWriteFrameCh <- wr: + return nil + case <-sc.doneServing: + // Serve loop is gone. + // Client has closed their connection to the server. + return errClientDisconnected + } +} + +// writeFrame schedules a frame to write and sends it if there's nothing +// already being written. +// +// There is no pushback here (the serve goroutine never blocks). It's +// the http.Handlers that block, waiting for their previous frames to +// make it onto the wire +// +// If you're not on the serve goroutine, use writeFrameFromHandler instead. +func (sc *serverConn) writeFrame(wr FrameWriteRequest) { + sc.serveG.check() + + // If true, wr will not be written and wr.done will not be signaled. + var ignoreWrite bool + + // We are not allowed to write frames on closed streams. RFC 7540 Section + // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on + // a closed stream." Our server never sends PRIORITY, so that exception + // does not apply. + // + // The serverConn might close an open stream while the stream's handler + // is still running. For example, the server might close a stream when it + // receives bad data from the client. If this happens, the handler might + // attempt to write a frame after the stream has been closed (since the + // handler hasn't yet been notified of the close). In this case, we simply + // ignore the frame. The handler will notice that the stream is closed when + // it waits for the frame to be written. + // + // As an exception to this rule, we allow sending RST_STREAM after close. + // This allows us to immediately reject new streams without tracking any + // state for those streams (except for the queued RST_STREAM frame). This + // may result in duplicate RST_STREAMs in some cases, but the client should + // ignore those. + if wr.StreamID() != 0 { + _, isReset := wr.write.(StreamError) + if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset { + ignoreWrite = true + } + } + + // Don't send a 100-continue response if we've already sent headers. + // See golang.org/issue/14030. + switch wr.write.(type) { + case *writeResHeaders: + wr.stream.wroteHeaders = true + case write100ContinueHeadersFrame: + if wr.stream.wroteHeaders { + // We do not need to notify wr.done because this frame is + // never written with wr.done != nil. + if wr.done != nil { + panic("wr.done != nil for write100ContinueHeadersFrame") + } + ignoreWrite = true + } + } + + if !ignoreWrite { + sc.writeSched.Push(wr) + } + sc.scheduleFrameWrite() +} + +// startFrameWrite starts a goroutine to write wr (in a separate +// goroutine since that might block on the network), and updates the +// serve goroutine's state about the world, updated from info in wr. +func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { + sc.serveG.check() + if sc.writingFrame { + panic("internal error: can only be writing one frame at a time") + } + + st := wr.stream + if st != nil { + switch st.state { + case stateHalfClosedLocal: + switch wr.write.(type) { + case StreamError, handlerPanicRST, writeWindowUpdate: + // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE + // in this state. (We never send PRIORITY from the server, so that is not checked.) + default: + panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr)) + } + case stateClosed: + panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr)) + } + } + if wpp, ok := wr.write.(*writePushPromise); ok { + var err error + wpp.promisedID, err = wpp.allocatePromisedID() + if err != nil { + sc.writingFrameAsync = false + wr.replyToWriter(err) + return + } + } + + sc.writingFrame = true + sc.needsFrameFlush = true + if wr.write.staysWithinBuffer(sc.bw.Available()) { + sc.writingFrameAsync = false + err := wr.write.writeFrame(sc) + sc.wroteFrame(frameWriteResult{wr, err}) + } else { + sc.writingFrameAsync = true + go sc.writeFrameAsync(wr) + } +} + +// errHandlerPanicked is the error given to any callers blocked in a read from +// Request.Body when the main goroutine panics. Since most handlers read in the +// the main ServeHTTP goroutine, this will show up rarely. +var errHandlerPanicked = errors.New("http2: handler panicked") + +// wroteFrame is called on the serve goroutine with the result of +// whatever happened on writeFrameAsync. +func (sc *serverConn) wroteFrame(res frameWriteResult) { + sc.serveG.check() + if !sc.writingFrame { + panic("internal error: expected to be already writing a frame") + } + sc.writingFrame = false + sc.writingFrameAsync = false + + wr := res.wr + + if writeEndsStream(wr.write) { + st := wr.stream + if st == nil { + panic("internal error: expecting non-nil stream") + } + switch st.state { + case stateOpen: + // Here we would go to stateHalfClosedLocal in + // theory, but since our handler is done and + // the net/http package provides no mechanism + // for closing a ResponseWriter while still + // reading data (see possible TODO at top of + // this file), we go into closed state here + // anyway, after telling the peer we're + // hanging up on them. We'll transition to + // stateClosed after the RST_STREAM frame is + // written. + st.state = stateHalfClosedLocal + // Section 8.1: a server MAY request that the client abort + // transmission of a request without error by sending a + // RST_STREAM with an error code of NO_ERROR after sending + // a complete response. + sc.resetStream(streamError(st.id, ErrCodeNo)) + case stateHalfClosedRemote: + sc.closeStream(st, errHandlerComplete) + } + } else { + switch v := wr.write.(type) { + case StreamError: + // st may be unknown if the RST_STREAM was generated to reject bad input. + if st, ok := sc.streams[v.StreamID]; ok { + sc.closeStream(st, v) + } + case handlerPanicRST: + sc.closeStream(wr.stream, errHandlerPanicked) + } + } + + // Reply (if requested) to unblock the ServeHTTP goroutine. + wr.replyToWriter(res.err) + + sc.scheduleFrameWrite() +} + +// scheduleFrameWrite tickles the frame writing scheduler. +// +// If a frame is already being written, nothing happens. This will be called again +// when the frame is done being written. +// +// If a frame isn't being written we need to send one, the best frame +// to send is selected, preferring first things that aren't +// stream-specific (e.g. ACKing settings), and then finding the +// highest priority stream. +// +// If a frame isn't being written and there's nothing else to send, we +// flush the write buffer. +func (sc *serverConn) scheduleFrameWrite() { + sc.serveG.check() + if sc.writingFrame || sc.inFrameScheduleLoop { + return + } + sc.inFrameScheduleLoop = true + for !sc.writingFrameAsync { + if sc.needToSendGoAway { + sc.needToSendGoAway = false + sc.startFrameWrite(FrameWriteRequest{ + write: &writeGoAway{ + maxStreamID: sc.maxClientStreamID, + code: sc.goAwayCode, + }, + }) + continue + } + if sc.needToSendSettingsAck { + sc.needToSendSettingsAck = false + sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}}) + continue + } + if !sc.inGoAway || sc.goAwayCode == ErrCodeNo { + if wr, ok := sc.writeSched.Pop(); ok { + sc.startFrameWrite(wr) + continue + } + } + if sc.needsFrameFlush { + sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}}) + sc.needsFrameFlush = false // after startFrameWrite, since it sets this true + continue + } + break + } + sc.inFrameScheduleLoop = false +} + +// startGracefulShutdown gracefully shuts down a connection. This +// sends GOAWAY with ErrCodeNo to tell the client we're gracefully +// shutting down. The connection isn't closed until all current +// streams are done. +// +// startGracefulShutdown returns immediately; it does not wait until +// the connection has shut down. +func (sc *serverConn) startGracefulShutdown() { + sc.serveG.checkNotOn() // NOT + sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) }) +} + +// After sending GOAWAY, the connection will close after goAwayTimeout. +// If we close the connection immediately after sending GOAWAY, there may +// be unsent data in our kernel receive buffer, which will cause the kernel +// to send a TCP RST on close() instead of a FIN. This RST will abort the +// connection immediately, whether or not the client had received the GOAWAY. +// +// Ideally we should delay for at least 1 RTT + epsilon so the client has +// a chance to read the GOAWAY and stop sending messages. Measuring RTT +// is hard, so we approximate with 1 second. See golang.org/issue/18701. +// +// This is a var so it can be shorter in tests, where all requests uses the +// loopback interface making the expected RTT very small. +// +// TODO: configurable? +var goAwayTimeout = 1 * time.Second + +func (sc *serverConn) startGracefulShutdownInternal() { + sc.goAway(ErrCodeNo) +} + +func (sc *serverConn) goAway(code ErrCode) { + sc.serveG.check() + if sc.inGoAway { + return + } + sc.inGoAway = true + sc.needToSendGoAway = true + sc.goAwayCode = code + sc.scheduleFrameWrite() +} + +func (sc *serverConn) shutDownIn(d time.Duration) { + sc.serveG.check() + sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) +} + +func (sc *serverConn) resetStream(se StreamError) { + sc.serveG.check() + sc.writeFrame(FrameWriteRequest{write: se}) + if st, ok := sc.streams[se.StreamID]; ok { + st.resetQueued = true + } +} + +// processFrameFromReader processes the serve loop's read from readFrameCh from the +// frame-reading goroutine. +// processFrameFromReader returns whether the connection should be kept open. +func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { + sc.serveG.check() + err := res.err + if err != nil { + if err == ErrFrameTooLarge { + sc.goAway(ErrCodeFrameSize) + return true // goAway will close the loop + } + clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) + if clientGone { + // TODO: could we also get into this state if + // the peer does a half close + // (e.g. CloseWrite) because they're done + // sending frames but they're still wanting + // our open replies? Investigate. + // TODO: add CloseWrite to crypto/tls.Conn first + // so we have a way to test this? I suppose + // just for testing we could have a non-TLS mode. + return false + } + } else { + f := res.f + if VerboseLogs { + sc.vlogf("http2: server read frame %v", summarizeFrame(f)) + } + err = sc.processFrame(f) + if err == nil { + return true + } + } + + switch ev := err.(type) { + case StreamError: + sc.resetStream(ev) + return true + case goAwayFlowError: + sc.goAway(ErrCodeFlowControl) + return true + case ConnectionError: + sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) + sc.goAway(ErrCode(ev)) + return true // goAway will handle shutdown + default: + if res.err != nil { + sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) + } else { + sc.logf("http2: server closing client connection: %v", err) + } + return false + } +} + +func (sc *serverConn) processFrame(f Frame) error { + sc.serveG.check() + + // First frame received must be SETTINGS. + if !sc.sawFirstSettings { + if _, ok := f.(*SettingsFrame); !ok { + return ConnectionError(ErrCodeProtocol) + } + sc.sawFirstSettings = true + } + + switch f := f.(type) { + case *SettingsFrame: + return sc.processSettings(f) + case *MetaHeadersFrame: + return sc.processHeaders(f) + case *WindowUpdateFrame: + return sc.processWindowUpdate(f) + case *PingFrame: + return sc.processPing(f) + case *DataFrame: + return sc.processData(f) + case *RSTStreamFrame: + return sc.processResetStream(f) + case *PriorityFrame: + return sc.processPriority(f) + case *GoAwayFrame: + return sc.processGoAway(f) + case *PushPromiseFrame: + // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE + // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + default: + sc.vlogf("http2: server ignoring frame: %v", f.Header()) + return nil + } +} + +func (sc *serverConn) processPing(f *PingFrame) error { + sc.serveG.check() + if f.IsAck() { + // 6.7 PING: " An endpoint MUST NOT respond to PING frames + // containing this flag." + return nil + } + if f.StreamID != 0 { + // "PING frames are not associated with any individual + // stream. If a PING frame is received with a stream + // identifier field value other than 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + sc.writeFrame(FrameWriteRequest{write: writePingAck{f}}) + return nil +} + +func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { + sc.serveG.check() + switch { + case f.StreamID != 0: // stream-level flow control + state, st := sc.state(f.StreamID) + if state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if st == nil { + // "WINDOW_UPDATE can be sent by a peer that has sent a + // frame bearing the END_STREAM flag. This means that a + // receiver could receive a WINDOW_UPDATE frame on a "half + // closed (remote)" or "closed" stream. A receiver MUST + // NOT treat this as an error, see Section 5.1." + return nil + } + if !st.flow.add(int32(f.Increment)) { + return streamError(f.StreamID, ErrCodeFlowControl) + } + default: // connection-level flow control + if !sc.flow.add(int32(f.Increment)) { + return goAwayFlowError{} + } + } + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { + sc.serveG.check() + + state, st := sc.state(f.StreamID) + if state == stateIdle { + // 6.4 "RST_STREAM frames MUST NOT be sent for a + // stream in the "idle" state. If a RST_STREAM frame + // identifying an idle stream is received, the + // recipient MUST treat this as a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + } + if st != nil { + st.cancelCtx() + sc.closeStream(st, streamError(f.StreamID, f.ErrCode)) + } + return nil +} + +func (sc *serverConn) closeStream(st *stream, err error) { + sc.serveG.check() + if st.state == stateIdle || st.state == stateClosed { + panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) + } + st.state = stateClosed + if st.writeDeadline != nil { + st.writeDeadline.Stop() + } + if st.isPushed() { + sc.curPushedStreams-- + } else { + sc.curClientStreams-- + } + delete(sc.streams, st.id) + if len(sc.streams) == 0 { + sc.setConnState(http.StateIdle) + if sc.srv.IdleTimeout != 0 { + sc.idleTimer.Reset(sc.srv.IdleTimeout) + } + if h1ServerKeepAlivesDisabled(sc.hs) { + sc.startGracefulShutdownInternal() + } + } + if p := st.body; p != nil { + // Return any buffered unread bytes worth of conn-level flow control. + // See golang.org/issue/16481 + sc.sendWindowUpdate(nil, p.Len()) + + p.CloseWithError(err) + } + st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc + sc.writeSched.CloseStream(st.id) +} + +func (sc *serverConn) processSettings(f *SettingsFrame) error { + sc.serveG.check() + if f.IsAck() { + sc.unackedSettings-- + if sc.unackedSettings < 0 { + // Why is the peer ACKing settings we never sent? + // The spec doesn't mention this case, but + // hang up on them anyway. + return ConnectionError(ErrCodeProtocol) + } + return nil + } + if err := f.ForeachSetting(sc.processSetting); err != nil { + return err + } + sc.needToSendSettingsAck = true + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processSetting(s Setting) error { + sc.serveG.check() + if err := s.Valid(); err != nil { + return err + } + if VerboseLogs { + sc.vlogf("http2: server processing setting %v", s) + } + switch s.ID { + case SettingHeaderTableSize: + sc.headerTableSize = s.Val + sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) + case SettingEnablePush: + sc.pushEnabled = s.Val != 0 + case SettingMaxConcurrentStreams: + sc.clientMaxStreams = s.Val + case SettingInitialWindowSize: + return sc.processSettingInitialWindowSize(s.Val) + case SettingMaxFrameSize: + sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 + case SettingMaxHeaderListSize: + sc.peerMaxHeaderListSize = s.Val + default: + // Unknown setting: "An endpoint that receives a SETTINGS + // frame with any unknown or unsupported identifier MUST + // ignore that setting." + if VerboseLogs { + sc.vlogf("http2: server ignoring unknown setting %v", s) + } + } + return nil +} + +func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { + sc.serveG.check() + // Note: val already validated to be within range by + // processSetting's Valid call. + + // "A SETTINGS frame can alter the initial flow control window + // size for all current streams. When the value of + // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST + // adjust the size of all stream flow control windows that it + // maintains by the difference between the new value and the + // old value." + old := sc.initialStreamSendWindowSize + sc.initialStreamSendWindowSize = int32(val) + growth := int32(val) - old // may be negative + for _, st := range sc.streams { + if !st.flow.add(growth) { + // 6.9.2 Initial Flow Control Window Size + // "An endpoint MUST treat a change to + // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow + // control window to exceed the maximum size as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR." + return ConnectionError(ErrCodeFlowControl) + } + } + return nil +} + +func (sc *serverConn) processData(f *DataFrame) error { + sc.serveG.check() + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + data := f.Data() + + // "If a DATA frame is received whose stream is not in "open" + // or "half closed (local)" state, the recipient MUST respond + // with a stream error (Section 5.4.2) of type STREAM_CLOSED." + id := f.Header().StreamID + state, st := sc.state(id) + if id == 0 || state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { + // This includes sending a RST_STREAM if the stream is + // in stateHalfClosedLocal (which currently means that + // the http.Handler returned, so it's done reading & + // done writing). Try to stop the client from sending + // more DATA. + + // But still enforce their connection-level flow control, + // and return any flow control bytes since we're not going + // to consume them. + if sc.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + // Deduct the flow control from inflow, since we're + // going to immediately add it back in + // sendWindowUpdate, which also schedules sending the + // frames. + sc.inflow.take(int32(f.Length)) + sc.sendWindowUpdate(nil, int(f.Length)) // conn-level + + if st != nil && st.resetQueued { + // Already have a stream error in flight. Don't send another. + return nil + } + return streamError(id, ErrCodeStreamClosed) + } + if st.body == nil { + panic("internal error: should have a body in this state") + } + + // Sender sending more than they'd declared? + if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { + st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) + return streamError(id, ErrCodeStreamClosed) + } + if f.Length > 0 { + // Check whether the client has flow control quota. + if st.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + st.inflow.take(int32(f.Length)) + + if len(data) > 0 { + wrote, err := st.body.Write(data) + if err != nil { + return streamError(id, ErrCodeStreamClosed) + } + if wrote != len(data) { + panic("internal error: bad Writer") + } + st.bodyBytes += int64(len(data)) + } + + // Return any padded flow control now, since we won't + // refund it later on body reads. + if pad := int32(f.Length) - int32(len(data)); pad > 0 { + sc.sendWindowUpdate32(nil, pad) + sc.sendWindowUpdate32(st, pad) + } + } + if f.StreamEnded() { + st.endStream() + } + return nil +} + +func (sc *serverConn) processGoAway(f *GoAwayFrame) error { + sc.serveG.check() + if f.ErrCode != ErrCodeNo { + sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } else { + sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } + sc.startGracefulShutdownInternal() + // http://tools.ietf.org/html/rfc7540#section-6.8 + // We should not create any new streams, which means we should disable push. + sc.pushEnabled = false + return nil +} + +// isPushed reports whether the stream is server-initiated. +func (st *stream) isPushed() bool { + return st.id%2 == 0 +} + +// endStream closes a Request.Body's pipe. It is called when a DATA +// frame says a request body is over (or after trailers). +func (st *stream) endStream() { + sc := st.sc + sc.serveG.check() + + if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { + st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", + st.declBodyBytes, st.bodyBytes)) + } else { + st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) + st.body.CloseWithError(io.EOF) + } + st.state = stateHalfClosedRemote +} + +// copyTrailersToHandlerRequest is run in the Handler's goroutine in +// its Request.Body.Read just before it gets io.EOF. +func (st *stream) copyTrailersToHandlerRequest() { + for k, vv := range st.trailer { + if _, ok := st.reqTrailer[k]; ok { + // Only copy it over it was pre-declared. + st.reqTrailer[k] = vv + } + } +} + +// onWriteTimeout is run on its own goroutine (from time.AfterFunc) +// when the stream's WriteTimeout has fired. +func (st *stream) onWriteTimeout() { + st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)}) +} + +func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { + sc.serveG.check() + id := f.StreamID + if sc.inGoAway { + // Ignore. + return nil + } + // http://tools.ietf.org/html/rfc7540#section-5.1.1 + // Streams initiated by a client MUST use odd-numbered stream + // identifiers. [...] An endpoint that receives an unexpected + // stream identifier MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + if id%2 != 1 { + return ConnectionError(ErrCodeProtocol) + } + // A HEADERS frame can be used to create a new stream or + // send a trailer for an open one. If we already have a stream + // open, let it process its own HEADERS frame (trailers at this + // point, if it's valid). + if st := sc.streams[f.StreamID]; st != nil { + if st.resetQueued { + // We're sending RST_STREAM to close the stream, so don't bother + // processing this frame. + return nil + } + return st.processTrailerHeaders(f) + } + + // [...] The identifier of a newly established stream MUST be + // numerically greater than all streams that the initiating + // endpoint has opened or reserved. [...] An endpoint that + // receives an unexpected stream identifier MUST respond with + // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + if id <= sc.maxClientStreamID { + return ConnectionError(ErrCodeProtocol) + } + sc.maxClientStreamID = id + + if sc.idleTimer != nil { + sc.idleTimer.Stop() + } + + // http://tools.ietf.org/html/rfc7540#section-5.1.2 + // [...] Endpoints MUST NOT exceed the limit set by their peer. An + // endpoint that receives a HEADERS frame that causes their + // advertised concurrent stream limit to be exceeded MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR + // or REFUSED_STREAM. + if sc.curClientStreams+1 > sc.advMaxStreams { + if sc.unackedSettings == 0 { + // They should know better. + return streamError(id, ErrCodeProtocol) + } + // Assume it's a network race, where they just haven't + // received our last SETTINGS update. But actually + // this can't happen yet, because we don't yet provide + // a way for users to adjust server parameters at + // runtime. + return streamError(id, ErrCodeRefusedStream) + } + + initialState := stateOpen + if f.StreamEnded() { + initialState = stateHalfClosedRemote + } + st := sc.newStream(id, 0, initialState) + + if f.HasPriority() { + if err := checkPriority(f.StreamID, f.Priority); err != nil { + return err + } + sc.writeSched.AdjustStream(st.id, f.Priority) + } + + rw, req, err := sc.newWriterAndRequest(st, f) + if err != nil { + return err + } + st.reqTrailer = req.Trailer + if st.reqTrailer != nil { + st.trailer = make(http.Header) + } + st.body = req.Body.(*requestBody).pipe // may be nil + st.declBodyBytes = req.ContentLength + + handler := sc.handler.ServeHTTP + if f.Truncated { + // Their header list was too long. Send a 431 error. + handler = handleHeaderListTooLong + } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil { + handler = new400Handler(err) + } + + // The net/http package sets the read deadline from the + // http.Server.ReadTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already + // set. Disarm it here after the request headers are read, + // similar to how the http1 server works. Here it's + // technically more like the http1 Server's ReadHeaderTimeout + // (in Go 1.8), though. That's a more sane option anyway. + if sc.hs.ReadTimeout != 0 { + sc.conn.SetReadDeadline(time.Time{}) + } + + go sc.runHandler(rw, req, handler) + return nil +} + +func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { + sc := st.sc + sc.serveG.check() + if st.gotTrailerHeader { + return ConnectionError(ErrCodeProtocol) + } + st.gotTrailerHeader = true + if !f.StreamEnded() { + return streamError(st.id, ErrCodeProtocol) + } + + if len(f.PseudoFields()) > 0 { + return streamError(st.id, ErrCodeProtocol) + } + if st.trailer != nil { + for _, hf := range f.RegularFields() { + key := sc.canonicalHeader(hf.Name) + if !ValidTrailerHeader(key) { + // TODO: send more details to the peer somehow. But http2 has + // no way to send debug data at a stream level. Discuss with + // HTTP folk. + return streamError(st.id, ErrCodeProtocol) + } + st.trailer[key] = append(st.trailer[key], hf.Value) + } + } + st.endStream() + return nil +} + +func checkPriority(streamID uint32, p PriorityParam) error { + if streamID == p.StreamDep { + // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR." + // Section 5.3.3 says that a stream can depend on one of its dependencies, + // so it's only self-dependencies that are forbidden. + return streamError(streamID, ErrCodeProtocol) + } + return nil +} + +func (sc *serverConn) processPriority(f *PriorityFrame) error { + if sc.inGoAway { + return nil + } + if err := checkPriority(f.StreamID, f.PriorityParam); err != nil { + return err + } + sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam) + return nil +} + +func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream { + sc.serveG.check() + if id == 0 { + panic("internal error: cannot create stream with id 0") + } + + ctx, cancelCtx := contextWithCancel(sc.baseCtx) + st := &stream{ + sc: sc, + id: id, + state: state, + ctx: ctx, + cancelCtx: cancelCtx, + } + st.cw.Init() + st.flow.conn = &sc.flow // link to conn-level counter + st.flow.add(sc.initialStreamSendWindowSize) + st.inflow.conn = &sc.inflow // link to conn-level counter + st.inflow.add(sc.srv.initialStreamRecvWindowSize()) + if sc.hs.WriteTimeout != 0 { + st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + } + + sc.streams[id] = st + sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) + if st.isPushed() { + sc.curPushedStreams++ + } else { + sc.curClientStreams++ + } + if sc.curOpenStreams() == 1 { + sc.setConnState(http.StateActive) + } + + return st +} + +func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + rp := requestParam{ + method: f.PseudoValue("method"), + scheme: f.PseudoValue("scheme"), + authority: f.PseudoValue("authority"), + path: f.PseudoValue("path"), + } + + isConnect := rp.method == "CONNECT" + if isConnect { + if rp.path != "" || rp.scheme != "" || rp.authority == "" { + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { + // See 8.1.2.6 Malformed Requests and Responses: + // + // Malformed requests or responses that are detected + // MUST be treated as a stream error (Section 5.4.2) + // of type PROTOCOL_ERROR." + // + // 8.1.2.3 Request Pseudo-Header Fields + // "All HTTP/2 requests MUST include exactly one valid + // value for the :method, :scheme, and :path + // pseudo-header fields" + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + + bodyOpen := !f.StreamEnded() + if rp.method == "HEAD" && bodyOpen { + // HEAD requests can't have bodies + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + + rp.header = make(http.Header) + for _, hf := range f.RegularFields() { + rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) + } + if rp.authority == "" { + rp.authority = rp.header.Get("Host") + } + + rw, req, err := sc.newWriterAndRequestNoBody(st, rp) + if err != nil { + return nil, nil, err + } + if bodyOpen { + if vv, ok := rp.header["Content-Length"]; ok { + req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) + } else { + req.ContentLength = -1 + } + req.Body.(*requestBody).pipe = &pipe{ + b: &dataBuffer{expected: req.ContentLength}, + } + } + return rw, req, nil +} + +type requestParam struct { + method string + scheme, authority, path string + header http.Header +} + +func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + var tlsState *tls.ConnectionState // nil if not scheme https + if rp.scheme == "https" { + tlsState = sc.tlsState + } + + needsContinue := rp.header.Get("Expect") == "100-continue" + if needsContinue { + rp.header.Del("Expect") + } + // Merge Cookie headers into one "; "-delimited value. + if cookies := rp.header["Cookie"]; len(cookies) > 1 { + rp.header.Set("Cookie", strings.Join(cookies, "; ")) + } + + // Setup Trailers + var trailer http.Header + for _, v := range rp.header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = http.CanonicalHeaderKey(strings.TrimSpace(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(http.Header) + } + trailer[key] = nil + } + } + } + delete(rp.header, "Trailer") + + var url_ *url.URL + var requestURI string + if rp.method == "CONNECT" { + url_ = &url.URL{Host: rp.authority} + requestURI = rp.authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(rp.path) + if err != nil { + return nil, nil, streamError(st.id, ErrCodeProtocol) + } + requestURI = rp.path + } + + body := &requestBody{ + conn: sc, + stream: st, + needsContinue: needsContinue, + } + req := &http.Request{ + Method: rp.method, + URL: url_, + RemoteAddr: sc.remoteAddrStr, + Header: rp.header, + RequestURI: requestURI, + Proto: "HTTP/2.0", + ProtoMajor: 2, + ProtoMinor: 0, + TLS: tlsState, + Host: rp.authority, + Body: body, + Trailer: trailer, + } + req = requestWithContext(req, st.ctx) + + rws := responseWriterStatePool.Get().(*responseWriterState) + bwSave := rws.bw + *rws = responseWriterState{} // zero all the fields + rws.conn = sc + rws.bw = bwSave + rws.bw.Reset(chunkWriter{rws}) + rws.stream = st + rws.req = req + rws.body = body + + rw := &responseWriter{rws: rws} + return rw, req, nil +} + +// Run on its own goroutine. +func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + didPanic := true + defer func() { + rw.rws.stream.cancelCtx() + if didPanic { + e := recover() + sc.writeFrameFromHandler(FrameWriteRequest{ + write: handlerPanicRST{rw.rws.stream.id}, + stream: rw.rws.stream, + }) + // Same as net/http: + if shouldLogPanic(e) { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) + } + return + } + rw.handlerDone() + }() + handler(rw, req) + didPanic = false +} + +func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { + // 10.5.1 Limits on Header Block Size: + // .. "A server that receives a larger header block than it is + // willing to handle can send an HTTP 431 (Request Header Fields Too + // Large) status code" + const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ + w.WriteHeader(statusRequestHeaderFieldsTooLarge) + io.WriteString(w, "

HTTP Error 431

Request Header Field(s) Too Large

") +} + +// called from handler goroutines. +// h may be nil. +func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error { + sc.serveG.checkNotOn() // NOT on + var errc chan error + if headerData.h != nil { + // If there's a header map (which we don't own), so we have to block on + // waiting for this frame to be written, so an http.Flush mid-handler + // writes out the correct value of keys, before a handler later potentially + // mutates it. + errc = errChanPool.Get().(chan error) + } + if err := sc.writeFrameFromHandler(FrameWriteRequest{ + write: headerData, + stream: st, + done: errc, + }); err != nil { + return err + } + if errc != nil { + select { + case err := <-errc: + errChanPool.Put(errc) + return err + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + } + } + return nil +} + +// called from handler goroutines. +func (sc *serverConn) write100ContinueHeaders(st *stream) { + sc.writeFrameFromHandler(FrameWriteRequest{ + write: write100ContinueHeadersFrame{st.id}, + stream: st, + }) +} + +// A bodyReadMsg tells the server loop that the http.Handler read n +// bytes of the DATA from the client on the given stream. +type bodyReadMsg struct { + st *stream + n int +} + +// called from handler goroutines. +// Notes that the handler for the given stream ID read n bytes of its body +// and schedules flow control tokens to be sent. +func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { + sc.serveG.checkNotOn() // NOT on + if n > 0 { + select { + case sc.bodyReadCh <- bodyReadMsg{st, n}: + case <-sc.doneServing: + } + } +} + +func (sc *serverConn) noteBodyRead(st *stream, n int) { + sc.serveG.check() + sc.sendWindowUpdate(nil, n) // conn-level + if st.state != stateHalfClosedRemote && st.state != stateClosed { + // Don't send this WINDOW_UPDATE if the stream is closed + // remotely. + sc.sendWindowUpdate(st, n) + } +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate(st *stream, n int) { + sc.serveG.check() + // "The legal range for the increment to the flow control + // window is 1 to 2^31-1 (2,147,483,647) octets." + // A Go Read call on 64-bit machines could in theory read + // a larger Read than this. Very unlikely, but we handle it here + // rather than elsewhere for now. + const maxUint31 = 1<<31 - 1 + for n >= maxUint31 { + sc.sendWindowUpdate32(st, maxUint31) + n -= maxUint31 + } + sc.sendWindowUpdate32(st, int32(n)) +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { + sc.serveG.check() + if n == 0 { + return + } + if n < 0 { + panic("negative update") + } + var streamID uint32 + if st != nil { + streamID = st.id + } + sc.writeFrame(FrameWriteRequest{ + write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, + stream: st, + }) + var ok bool + if st == nil { + ok = sc.inflow.add(n) + } else { + ok = st.inflow.add(n) + } + if !ok { + panic("internal error; sent too many window updates without decrements?") + } +} + +// requestBody is the Handler's Request.Body type. +// Read and Close may be called concurrently. +type requestBody struct { + stream *stream + conn *serverConn + closed bool // for use by Close only + sawEOF bool // for use by Read only + pipe *pipe // non-nil if we have a HTTP entity message body + needsContinue bool // need to send a 100-continue +} + +func (b *requestBody) Close() error { + if b.pipe != nil && !b.closed { + b.pipe.BreakWithError(errClosedBody) + } + b.closed = true + return nil +} + +func (b *requestBody) Read(p []byte) (n int, err error) { + if b.needsContinue { + b.needsContinue = false + b.conn.write100ContinueHeaders(b.stream) + } + if b.pipe == nil || b.sawEOF { + return 0, io.EOF + } + n, err = b.pipe.Read(p) + if err == io.EOF { + b.sawEOF = true + } + if b.conn == nil && inTests { + return + } + b.conn.noteBodyReadFromHandler(b.stream, n, err) + return +} + +// responseWriter is the http.ResponseWriter implementation. It's +// intentionally small (1 pointer wide) to minimize garbage. The +// responseWriterState pointer inside is zeroed at the end of a +// request (in handlerDone) and calls on the responseWriter thereafter +// simply crash (caller's mistake), but the much larger responseWriterState +// and buffers are reused between multiple requests. +type responseWriter struct { + rws *responseWriterState +} + +// Optional http.ResponseWriter interfaces implemented. +var ( + _ http.CloseNotifier = (*responseWriter)(nil) + _ http.Flusher = (*responseWriter)(nil) + _ stringWriter = (*responseWriter)(nil) +) + +type responseWriterState struct { + // immutable within a request: + stream *stream + req *http.Request + body *requestBody // to close at end of request, if DATA frames didn't + conn *serverConn + + // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc + bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} + + // mutated by http.Handler goroutine: + handlerHeader http.Header // nil until called + snapHeader http.Header // snapshot of handlerHeader at WriteHeader time + trailers []string // set in writeChunk + status int // status code passed to WriteHeader + wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. + sentHeader bool // have we sent the header frame? + handlerDone bool // handler has finished + dirty bool // a Write failed; don't reuse this responseWriterState + + sentContentLen int64 // non-zero if handler set a Content-Length header + wroteBytes int64 + + closeNotifierMu sync.Mutex // guards closeNotifierCh + closeNotifierCh chan bool // nil until first used +} + +type chunkWriter struct{ rws *responseWriterState } + +func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } + +func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 } + +// declareTrailer is called for each Trailer header when the +// response header is written. It notes that a header will need to be +// written in the trailers at the end of the response. +func (rws *responseWriterState) declareTrailer(k string) { + k = http.CanonicalHeaderKey(k) + if !ValidTrailerHeader(k) { + // Forbidden by RFC 2616 14.40. + rws.conn.logf("ignoring invalid trailer %q", k) + return + } + if !strSliceContains(rws.trailers, k) { + rws.trailers = append(rws.trailers, k) + } +} + +// writeChunk writes chunks from the bufio.Writer. But because +// bufio.Writer may bypass its chunking, sometimes p may be +// arbitrarily large. +// +// writeChunk is also responsible (on the first chunk) for sending the +// HEADER response. +func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { + if !rws.wroteHeader { + rws.writeHeader(200) + } + + isHeadResp := rws.req.Method == "HEAD" + if !rws.sentHeader { + rws.sentHeader = true + var ctype, clen string + if clen = rws.snapHeader.Get("Content-Length"); clen != "" { + rws.snapHeader.Del("Content-Length") + clen64, err := strconv.ParseInt(clen, 10, 64) + if err == nil && clen64 >= 0 { + rws.sentContentLen = clen64 + } else { + clen = "" + } + } + if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { + clen = strconv.Itoa(len(p)) + } + _, hasContentType := rws.snapHeader["Content-Type"] + if !hasContentType && bodyAllowedForStatus(rws.status) { + ctype = http.DetectContentType(p) + } + var date string + if _, ok := rws.snapHeader["Date"]; !ok { + // TODO(bradfitz): be faster here, like net/http? measure. + date = time.Now().UTC().Format(http.TimeFormat) + } + + for _, v := range rws.snapHeader["Trailer"] { + foreachHeaderElement(v, rws.declareTrailer) + } + + endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + httpResCode: rws.status, + h: rws.snapHeader, + endStream: endStream, + contentType: ctype, + contentLength: clen, + date: date, + }) + if err != nil { + rws.dirty = true + return 0, err + } + if endStream { + return 0, nil + } + } + if isHeadResp { + return len(p), nil + } + if len(p) == 0 && !rws.handlerDone { + return 0, nil + } + + if rws.handlerDone { + rws.promoteUndeclaredTrailers() + } + + endStream := rws.handlerDone && !rws.hasTrailers() + if len(p) > 0 || endStream { + // only send a 0 byte DATA frame if we're ending the stream. + if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { + rws.dirty = true + return 0, err + } + } + + if rws.handlerDone && rws.hasTrailers() { + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + h: rws.handlerHeader, + trailers: rws.trailers, + endStream: true, + }) + if err != nil { + rws.dirty = true + } + return len(p), err + } + return len(p), nil +} + +// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys +// that, if present, signals that the map entry is actually for +// the response trailers, and not the response headers. The prefix +// is stripped after the ServeHTTP call finishes and the values are +// sent in the trailers. +// +// This mechanism is intended only for trailers that are not known +// prior to the headers being written. If the set of trailers is fixed +// or known before the header is written, the normal Go trailers mechanism +// is preferred: +// https://golang.org/pkg/net/http/#ResponseWriter +// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +const TrailerPrefix = "Trailer:" + +// promoteUndeclaredTrailers permits http.Handlers to set trailers +// after the header has already been flushed. Because the Go +// ResponseWriter interface has no way to set Trailers (only the +// Header), and because we didn't want to expand the ResponseWriter +// interface, and because nobody used trailers, and because RFC 2616 +// says you SHOULD (but not must) predeclare any trailers in the +// header, the official ResponseWriter rules said trailers in Go must +// be predeclared, and then we reuse the same ResponseWriter.Header() +// map to mean both Headers and Trailers. When it's time to write the +// Trailers, we pick out the fields of Headers that were declared as +// trailers. That worked for a while, until we found the first major +// user of Trailers in the wild: gRPC (using them only over http2), +// and gRPC libraries permit setting trailers mid-stream without +// predeclarnig them. So: change of plans. We still permit the old +// way, but we also permit this hack: if a Header() key begins with +// "Trailer:", the suffix of that key is a Trailer. Because ':' is an +// invalid token byte anyway, there is no ambiguity. (And it's already +// filtered out) It's mildly hacky, but not terrible. +// +// This method runs after the Handler is done and promotes any Header +// fields to be trailers. +func (rws *responseWriterState) promoteUndeclaredTrailers() { + for k, vv := range rws.handlerHeader { + if !strings.HasPrefix(k, TrailerPrefix) { + continue + } + trailerKey := strings.TrimPrefix(k, TrailerPrefix) + rws.declareTrailer(trailerKey) + rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv + } + + if len(rws.trailers) > 1 { + sorter := sorterPool.Get().(*sorter) + sorter.SortStrings(rws.trailers) + sorterPool.Put(sorter) + } +} + +func (w *responseWriter) Flush() { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.bw.Buffered() > 0 { + if err := rws.bw.Flush(); err != nil { + // Ignore the error. The frame writer already knows. + return + } + } else { + // The bufio.Writer won't call chunkWriter.Write + // (writeChunk with zero bytes, so we have to do it + // ourselves to force the HTTP response header and/or + // final DATA frame (with END_STREAM) to be sent. + rws.writeChunk(nil) + } +} + +func (w *responseWriter) CloseNotify() <-chan bool { + rws := w.rws + if rws == nil { + panic("CloseNotify called after Handler finished") + } + rws.closeNotifierMu.Lock() + ch := rws.closeNotifierCh + if ch == nil { + ch = make(chan bool, 1) + rws.closeNotifierCh = ch + cw := rws.stream.cw + go func() { + cw.Wait() // wait for close + ch <- true + }() + } + rws.closeNotifierMu.Unlock() + return ch +} + +func (w *responseWriter) Header() http.Header { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.handlerHeader == nil { + rws.handlerHeader = make(http.Header) + } + return rws.handlerHeader +} + +func (w *responseWriter) WriteHeader(code int) { + rws := w.rws + if rws == nil { + panic("WriteHeader called after Handler finished") + } + rws.writeHeader(code) +} + +func (rws *responseWriterState) writeHeader(code int) { + if !rws.wroteHeader { + rws.wroteHeader = true + rws.status = code + if len(rws.handlerHeader) > 0 { + rws.snapHeader = cloneHeader(rws.handlerHeader) + } + } +} + +func cloneHeader(h http.Header) http.Header { + h2 := make(http.Header, len(h)) + for k, vv := range h { + vv2 := make([]string, len(vv)) + copy(vv2, vv) + h2[k] = vv2 + } + return h2 +} + +// The Life Of A Write is like this: +// +// * Handler calls w.Write or w.WriteString -> +// * -> rws.bw (*bufio.Writer) -> +// * (Handler might call Flush) +// * -> chunkWriter{rws} +// * -> responseWriterState.writeChunk(p []byte) +// * -> responseWriterState.writeChunk (most of the magic; see comment there) +func (w *responseWriter) Write(p []byte) (n int, err error) { + return w.write(len(p), p, "") +} + +func (w *responseWriter) WriteString(s string) (n int, err error) { + return w.write(len(s), nil, s) +} + +// either dataB or dataS is non-zero. +func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { + rws := w.rws + if rws == nil { + panic("Write called after Handler finished") + } + if !rws.wroteHeader { + w.WriteHeader(200) + } + if !bodyAllowedForStatus(rws.status) { + return 0, http.ErrBodyNotAllowed + } + rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set + if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { + // TODO: send a RST_STREAM + return 0, errors.New("http2: handler wrote more than declared Content-Length") + } + + if dataB != nil { + return rws.bw.Write(dataB) + } else { + return rws.bw.WriteString(dataS) + } +} + +func (w *responseWriter) handlerDone() { + rws := w.rws + dirty := rws.dirty + rws.handlerDone = true + w.Flush() + w.rws = nil + if !dirty { + // Only recycle the pool if all prior Write calls to + // the serverConn goroutine completed successfully. If + // they returned earlier due to resets from the peer + // there might still be write goroutines outstanding + // from the serverConn referencing the rws memory. See + // issue 20704. + responseWriterStatePool.Put(rws) + } +} + +// Push errors. +var ( + ErrRecursivePush = errors.New("http2: recursive push not allowed") + ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS") +) + +// pushOptions is the internal version of http.PushOptions, which we +// cannot include here because it's only defined in Go 1.8 and later. +type pushOptions struct { + Method string + Header http.Header +} + +func (w *responseWriter) push(target string, opts pushOptions) error { + st := w.rws.stream + sc := st.sc + sc.serveG.checkNotOn() + + // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream." + // http://tools.ietf.org/html/rfc7540#section-6.6 + if st.isPushed() { + return ErrRecursivePush + } + + // Default options. + if opts.Method == "" { + opts.Method = "GET" + } + if opts.Header == nil { + opts.Header = http.Header{} + } + wantScheme := "http" + if w.rws.req.TLS != nil { + wantScheme = "https" + } + + // Validate the request. + u, err := url.Parse(target) + if err != nil { + return err + } + if u.Scheme == "" { + if !strings.HasPrefix(target, "/") { + return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target) + } + u.Scheme = wantScheme + u.Host = w.rws.req.Host + } else { + if u.Scheme != wantScheme { + return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme) + } + if u.Host == "" { + return errors.New("URL must have a host") + } + } + for k := range opts.Header { + if strings.HasPrefix(k, ":") { + return fmt.Errorf("promised request headers cannot include pseudo header %q", k) + } + // These headers are meaningful only if the request has a body, + // but PUSH_PROMISE requests cannot have a body. + // http://tools.ietf.org/html/rfc7540#section-8.2 + // Also disallow Host, since the promised URL must be absolute. + switch strings.ToLower(k) { + case "content-length", "content-encoding", "trailer", "te", "expect", "host": + return fmt.Errorf("promised request headers cannot include %q", k) + } + } + if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil { + return err + } + + // The RFC effectively limits promised requests to GET and HEAD: + // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]" + // http://tools.ietf.org/html/rfc7540#section-8.2 + if opts.Method != "GET" && opts.Method != "HEAD" { + return fmt.Errorf("method %q must be GET or HEAD", opts.Method) + } + + msg := &startPushRequest{ + parent: st, + method: opts.Method, + url: u, + header: cloneHeader(opts.Header), + done: errChanPool.Get().(chan error), + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case sc.serveMsgCh <- msg: + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case err := <-msg.done: + errChanPool.Put(msg.done) + return err + } +} + +type startPushRequest struct { + parent *stream + method string + url *url.URL + header http.Header + done chan error +} + +func (sc *serverConn) startPush(msg *startPushRequest) { + sc.serveG.check() + + // http://tools.ietf.org/html/rfc7540#section-6.6. + // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that + // is in either the "open" or "half-closed (remote)" state. + if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote { + // responseWriter.Push checks that the stream is peer-initiaed. + msg.done <- errStreamClosed + return + } + + // http://tools.ietf.org/html/rfc7540#section-6.6. + if !sc.pushEnabled { + msg.done <- http.ErrNotSupported + return + } + + // PUSH_PROMISE frames must be sent in increasing order by stream ID, so + // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE + // is written. Once the ID is allocated, we start the request handler. + allocatePromisedID := func() (uint32, error) { + sc.serveG.check() + + // Check this again, just in case. Technically, we might have received + // an updated SETTINGS by the time we got around to writing this frame. + if !sc.pushEnabled { + return 0, http.ErrNotSupported + } + // http://tools.ietf.org/html/rfc7540#section-6.5.2. + if sc.curPushedStreams+1 > sc.clientMaxStreams { + return 0, ErrPushLimitReached + } + + // http://tools.ietf.org/html/rfc7540#section-5.1.1. + // Streams initiated by the server MUST use even-numbered identifiers. + // A server that is unable to establish a new stream identifier can send a GOAWAY + // frame so that the client is forced to open a new connection for new streams. + if sc.maxPushPromiseID+2 >= 1<<31 { + sc.startGracefulShutdownInternal() + return 0, ErrPushLimitReached + } + sc.maxPushPromiseID += 2 + promisedID := sc.maxPushPromiseID + + // http://tools.ietf.org/html/rfc7540#section-8.2. + // Strictly speaking, the new stream should start in "reserved (local)", then + // transition to "half closed (remote)" after sending the initial HEADERS, but + // we start in "half closed (remote)" for simplicity. + // See further comments at the definition of stateHalfClosedRemote. + promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) + rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ + method: msg.method, + scheme: msg.url.Scheme, + authority: msg.url.Host, + path: msg.url.RequestURI(), + header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE + }) + if err != nil { + // Should not happen, since we've already validated msg.url. + panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) + } + + go sc.runHandler(rw, req, sc.handler.ServeHTTP) + return promisedID, nil + } + + sc.writeFrame(FrameWriteRequest{ + write: &writePushPromise{ + streamID: msg.parent.id, + method: msg.method, + url: msg.url, + h: msg.header, + allocatePromisedID: allocatePromisedID, + }, + stream: msg.parent, + done: msg.done, + }) +} + +// foreachHeaderElement splits v according to the "#rule" construction +// in RFC 2616 section 2.1 and calls fn for each non-empty element. +func foreachHeaderElement(v string, fn func(string)) { + v = textproto.TrimString(v) + if v == "" { + return + } + if !strings.Contains(v, ",") { + fn(v) + return + } + for _, f := range strings.Split(v, ",") { + if f = textproto.TrimString(f); f != "" { + fn(f) + } + } +} + +// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 +var connHeaders = []string{ + "Connection", + "Keep-Alive", + "Proxy-Connection", + "Transfer-Encoding", + "Upgrade", +} + +// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request, +// per RFC 7540 Section 8.1.2.2. +// The returned error is reported to users. +func checkValidHTTP2RequestHeaders(h http.Header) error { + for _, k := range connHeaders { + if _, ok := h[k]; ok { + return fmt.Errorf("request header %q is not valid in HTTP/2", k) + } + } + te := h["Te"] + if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { + return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) + } + return nil +} + +func new400Handler(err error) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + http.Error(w, err.Error(), http.StatusBadRequest) + } +} + +// ValidTrailerHeader reports whether name is a valid header field name to appear +// in trailers. +// See: http://tools.ietf.org/html/rfc7230#section-4.1.2 +func ValidTrailerHeader(name string) bool { + name = http.CanonicalHeaderKey(name) + if strings.HasPrefix(name, "If-") || badTrailer[name] { + return false + } + return true +} + +var badTrailer = map[string]bool{ + "Authorization": true, + "Cache-Control": true, + "Connection": true, + "Content-Encoding": true, + "Content-Length": true, + "Content-Range": true, + "Content-Type": true, + "Expect": true, + "Host": true, + "Keep-Alive": true, + "Max-Forwards": true, + "Pragma": true, + "Proxy-Authenticate": true, + "Proxy-Authorization": true, + "Proxy-Connection": true, + "Range": true, + "Realm": true, + "Te": true, + "Trailer": true, + "Transfer-Encoding": true, + "Www-Authenticate": true, +} + +// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives +// disabled. See comments on h1ServerShutdownChan above for why +// the code is written this way. +func h1ServerKeepAlivesDisabled(hs *http.Server) bool { + var x interface{} = hs + type I interface { + doKeepAlives() bool + } + if hs, ok := x.(I); ok { + return !hs.doKeepAlives() + } + return false +} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go new file mode 100644 index 00000000000..5760b359545 --- /dev/null +++ b/vendor/golang.org/x/net/http2/transport.go @@ -0,0 +1,2293 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code. + +package http2 + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/rand" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "math" + mathrand "math/rand" + "net" + "net/http" + "sort" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/http2/hpack" + "golang.org/x/net/idna" + "golang.org/x/net/lex/httplex" +) + +const ( + // transportDefaultConnFlow is how many connection-level flow control + // tokens we give the server at start-up, past the default 64k. + transportDefaultConnFlow = 1 << 30 + + // transportDefaultStreamFlow is how many stream-level flow + // control tokens we announce to the peer, and how many bytes + // we buffer per stream. + transportDefaultStreamFlow = 4 << 20 + + // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send + // a stream-level WINDOW_UPDATE for at a time. + transportDefaultStreamMinRefresh = 4 << 10 + + defaultUserAgent = "Go-http-client/2.0" +) + +// Transport is an HTTP/2 Transport. +// +// A Transport internally caches connections to servers. It is safe +// for concurrent use by multiple goroutines. +type Transport struct { + // DialTLS specifies an optional dial function for creating + // TLS connections for requests. + // + // If DialTLS is nil, tls.Dial is used. + // + // If the returned net.Conn has a ConnectionState method like tls.Conn, + // it will be used to set http.Response.TLS. + DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) + + // TLSClientConfig specifies the TLS configuration to use with + // tls.Client. If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // ConnPool optionally specifies an alternate connection pool to use. + // If nil, the default is used. + ConnPool ClientConnPool + + // DisableCompression, if true, prevents the Transport from + // requesting compression with an "Accept-Encoding: gzip" + // request header when the Request contains no existing + // Accept-Encoding value. If the Transport requests gzip on + // its own and gets a gzipped response, it's transparently + // decoded in the Response.Body. However, if the user + // explicitly requested gzip it is not automatically + // uncompressed. + DisableCompression bool + + // AllowHTTP, if true, permits HTTP/2 requests using the insecure, + // plain-text "http" scheme. Note that this does not enable h2c support. + AllowHTTP bool + + // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to + // send in the initial settings frame. It is how many bytes + // of response headers are allowed. Unlike the http2 spec, zero here + // means to use a default limit (currently 10MB). If you actually + // want to advertise an ulimited value to the peer, Transport + // interprets the highest possible value here (0xffffffff or 1<<32-1) + // to mean no limit. + MaxHeaderListSize uint32 + + // t1, if non-nil, is the standard library Transport using + // this transport. Its settings are used (but not its + // RoundTrip method, etc). + t1 *http.Transport + + connPoolOnce sync.Once + connPoolOrDef ClientConnPool // non-nil version of ConnPool +} + +func (t *Transport) maxHeaderListSize() uint32 { + if t.MaxHeaderListSize == 0 { + return 10 << 20 + } + if t.MaxHeaderListSize == 0xffffffff { + return 0 + } + return t.MaxHeaderListSize +} + +func (t *Transport) disableCompression() bool { + return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) +} + +var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") + +// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. +// It requires Go 1.6 or later and returns an error if the net/http package is too old +// or if t1 has already been HTTP/2-enabled. +func ConfigureTransport(t1 *http.Transport) error { + _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go + return err +} + +func (t *Transport) connPool() ClientConnPool { + t.connPoolOnce.Do(t.initConnPool) + return t.connPoolOrDef +} + +func (t *Transport) initConnPool() { + if t.ConnPool != nil { + t.connPoolOrDef = t.ConnPool + } else { + t.connPoolOrDef = &clientConnPool{t: t} + } +} + +// ClientConn is the state of a single HTTP/2 client connection to an +// HTTP/2 server. +type ClientConn struct { + t *Transport + tconn net.Conn // usually *tls.Conn, except specialized impls + tlsState *tls.ConnectionState // nil only for specialized impls + singleUse bool // whether being used for a single http.Request + + // readLoop goroutine fields: + readerDone chan struct{} // closed on error + readerErr error // set before readerDone is closed + + idleTimeout time.Duration // or 0 for never + idleTimer *time.Timer + + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow flow // our conn-level flow control quota (cs.flow is per stream) + inflow flow // peer's conn-level flow control + closed bool + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + bw *bufio.Writer + br *bufio.Reader + fr *Framer + lastActive time.Time + // Settings from peer: (also guarded by mu) + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + initialWindowSize uint32 + + hbuf bytes.Buffer // HPACK encoder writes into this + henc *hpack.Encoder + freeBuf [][]byte + + wmu sync.Mutex // held while writing; acquire AFTER mu if holding both + werr error // first write error that has occurred +} + +// clientStream is the state for a single HTTP/2 stream. One of these +// is created for each Transport.RoundTrip call. +type clientStream struct { + cc *ClientConn + req *http.Request + trace *clientTrace // or nil + ID uint32 + resc chan resAndError + bufPipe pipe // buffered pipe with the flow-controlled response payload + startedWrite bool // started request body write; guarded by cc.mu + requestedGzip bool + on100 func() // optional code to run if get a 100 continue response + + flow flow // guarded by cc.mu + inflow flow // guarded by cc.mu + bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read + readErr error // sticky read error; owned by transportResponseBody.Read + stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu + didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu + + peerReset chan struct{} // closed on peer reset + resetErr error // populated before peerReset is closed + + done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu + + // owned by clientConnReadLoop: + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + + trailer http.Header // accumulated trailers + resTrailer *http.Header // client's Response.Trailer +} + +// awaitRequestCancel waits for the user to cancel a request or for the done +// channel to be signaled. A non-nil error is returned only if the request was +// canceled. +func awaitRequestCancel(req *http.Request, done <-chan struct{}) error { + ctx := reqContext(req) + if req.Cancel == nil && ctx.Done() == nil { + return nil + } + select { + case <-req.Cancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() + case <-done: + return nil + } +} + +// awaitRequestCancel waits for the user to cancel a request, its context to +// expire, or for the request to be done (any way it might be removed from the +// cc.streams map: peer reset, successful completion, TCP connection breakage, +// etc). If the request is canceled, then cs will be canceled and closed. +func (cs *clientStream) awaitRequestCancel(req *http.Request) { + if err := awaitRequestCancel(req, cs.done); err != nil { + cs.cancelStream() + cs.bufPipe.CloseWithError(err) + } +} + +func (cs *clientStream) cancelStream() { + cc := cs.cc + cc.mu.Lock() + didReset := cs.didReset + cs.didReset = true + cc.mu.Unlock() + + if !didReset { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + cc.forgetStreamID(cs.ID) + } +} + +// checkResetOrDone reports any error sent in a RST_STREAM frame by the +// server, or errStreamClosed if the stream is complete. +func (cs *clientStream) checkResetOrDone() error { + select { + case <-cs.peerReset: + return cs.resetErr + case <-cs.done: + return errStreamClosed + default: + return nil + } +} + +func (cs *clientStream) abortRequestBodyWrite(err error) { + if err == nil { + panic("nil error") + } + cc := cs.cc + cc.mu.Lock() + cs.stopReqBody = err + cc.cond.Broadcast() + cc.mu.Unlock() +} + +type stickyErrWriter struct { + w io.Writer + err *error +} + +func (sew stickyErrWriter) Write(p []byte) (n int, err error) { + if *sew.err != nil { + return 0, *sew.err + } + n, err = sew.w.Write(p) + *sew.err = err + return +} + +var ErrNoCachedConn = errors.New("http2: no cached connection was available") + +// RoundTripOpt are options for the Transport.RoundTripOpt method. +type RoundTripOpt struct { + // OnlyCachedConn controls whether RoundTripOpt may + // create a new TCP connection. If set true and + // no cached connection is available, RoundTripOpt + // will return ErrNoCachedConn. + OnlyCachedConn bool +} + +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + return t.RoundTripOpt(req, RoundTripOpt{}) +} + +// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) +// and returns a host:port. The port 443 is added if needed. +func authorityAddr(scheme string, authority string) (addr string) { + host, port, err := net.SplitHostPort(authority) + if err != nil { // authority didn't have a port + port = "443" + if scheme == "http" { + port = "80" + } + host = authority + } + if a, err := idna.ToASCII(host); err == nil { + host = a + } + // IPv6 address literal, without a port: + if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { + return host + ":" + port + } + return net.JoinHostPort(host, port) +} + +// RoundTripOpt is like RoundTrip, but takes options. +func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { + if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + return nil, errors.New("http2: unsupported scheme") + } + + addr := authorityAddr(req.URL.Scheme, req.URL.Host) + for retry := 0; ; retry++ { + cc, err := t.connPool().GetClientConn(req, addr) + if err != nil { + t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) + return nil, err + } + traceGotConn(req, cc) + res, err := cc.RoundTrip(req) + if err != nil && retry <= 6 { + afterBodyWrite := false + if e, ok := err.(afterReqBodyWriteError); ok { + err = e + afterBodyWrite = true + } + if req, err = shouldRetryRequest(req, err, afterBodyWrite); err == nil { + // After the first retry, do exponential backoff with 10% jitter. + if retry == 0 { + continue + } + backoff := float64(uint(1) << (uint(retry) - 1)) + backoff += backoff * (0.1 * mathrand.Float64()) + select { + case <-time.After(time.Second * time.Duration(backoff)): + continue + case <-reqContext(req).Done(): + return nil, reqContext(req).Err() + } + } + } + if err != nil { + t.vlogf("RoundTrip failure: %v", err) + return nil, err + } + return res, nil + } +} + +// CloseIdleConnections closes any connections which were previously +// connected from previous requests but are now sitting idle. +// It does not interrupt any connections currently in use. +func (t *Transport) CloseIdleConnections() { + if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { + cp.closeIdleConnections() + } +} + +var ( + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") +) + +// afterReqBodyWriteError is a wrapper around errors returned by ClientConn.RoundTrip. +// It is used to signal that err happened after part of Request.Body was sent to the server. +type afterReqBodyWriteError struct { + err error +} + +func (e afterReqBodyWriteError) Error() string { + return e.err.Error() + "; some request body already written" +} + +// shouldRetryRequest is called by RoundTrip when a request fails to get +// response headers. It is always called with a non-nil error. +// It returns either a request to retry (either the same request, or a +// modified clone), or an error if the request can't be replayed. +func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) { + if !canRetryError(err) { + return nil, err + } + if !afterBodyWrite { + return req, nil + } + // If the Body is nil (or http.NoBody), it's safe to reuse + // this request and its Body. + if req.Body == nil || reqBodyIsNoBody(req.Body) { + return req, nil + } + // Otherwise we depend on the Request having its GetBody + // func defined. + getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody + if getBody == nil { + return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) + } + body, err := getBody() + if err != nil { + return nil, err + } + newReq := *req + newReq.Body = body + return &newReq, nil +} + +func canRetryError(err error) bool { + if err == errClientConnUnusable || err == errClientConnGotGoAway { + return true + } + if se, ok := err.(StreamError); ok { + return se.Code == ErrCodeRefusedStream + } + return false +} + +func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) + if err != nil { + return nil, err + } + return t.newClientConn(tconn, singleUse) +} + +func (t *Transport) newTLSConfig(host string) *tls.Config { + cfg := new(tls.Config) + if t.TLSClientConfig != nil { + *cfg = *cloneTLSConfig(t.TLSClientConfig) + } + if !strSliceContains(cfg.NextProtos, NextProtoTLS) { + cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) + } + if cfg.ServerName == "" { + cfg.ServerName = host + } + return cfg +} + +func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { + if t.DialTLS != nil { + return t.DialTLS + } + return t.dialTLSDefault +} + +func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { + cn, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + if err := cn.Handshake(); err != nil { + return nil, err + } + if !cfg.InsecureSkipVerify { + if err := cn.VerifyHostname(cfg.ServerName); err != nil { + return nil, err + } + } + state := cn.ConnectionState() + if p := state.NegotiatedProtocol; p != NextProtoTLS { + return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) + } + if !state.NegotiatedProtocolIsMutual { + return nil, errors.New("http2: could not negotiate protocol mutually") + } + return cn, nil +} + +// disableKeepAlives reports whether connections should be closed as +// soon as possible after handling the first request. +func (t *Transport) disableKeepAlives() bool { + return t.t1 != nil && t.t1.DisableKeepAlives +} + +func (t *Transport) expectContinueTimeout() time.Duration { + if t.t1 == nil { + return 0 + } + return transportExpectContinueTimeout(t.t1) +} + +func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { + return t.newClientConn(c, false) +} + +func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + cc := &ClientConn{ + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + pings: make(map[[8]byte]chan struct{}), + } + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d + cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + } + if VerboseLogs { + t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) + } + + cc.cond = sync.NewCond(&cc.mu) + cc.flow.add(int32(initialWindowSize)) + + // TODO: adjust this writer size to account for frame size + + // MTU + crypto/tls record padding. + cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) + cc.br = bufio.NewReader(c) + cc.fr = NewFramer(cc.bw, cc.br) + cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + cc.fr.MaxHeaderListSize = t.maxHeaderListSize() + + // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on + // henc in response to SETTINGS frames? + cc.henc = hpack.NewEncoder(&cc.hbuf) + + if cs, ok := c.(connectionStater); ok { + state := cs.ConnectionState() + cc.tlsState = &state + } + + initialSettings := []Setting{ + {ID: SettingEnablePush, Val: 0}, + {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, + } + if max := t.maxHeaderListSize(); max != 0 { + initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) + } + + cc.bw.Write(clientPreface) + cc.fr.WriteSettings(initialSettings...) + cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) + cc.inflow.add(transportDefaultConnFlow + initialWindowSize) + cc.bw.Flush() + if cc.werr != nil { + return nil, cc.werr + } + + go cc.readLoop() + return cc, nil +} + +func (cc *ClientConn) setGoAway(f *GoAwayFrame) { + cc.mu.Lock() + defer cc.mu.Unlock() + + old := cc.goAway + cc.goAway = f + + // Merge the previous and current GoAway error frames. + if cc.goAwayDebug == "" { + cc.goAwayDebug = string(f.DebugData()) + } + if old != nil && old.ErrCode != ErrCodeNo { + cc.goAway.ErrCode = old.ErrCode + } + last := f.LastStreamID + for streamID, cs := range cc.streams { + if streamID > last { + select { + case cs.resc <- resAndError{err: errClientConnGotGoAway}: + default: + } + } + } +} + +// CanTakeNewRequest reports whether the connection can take a new request, +// meaning it has not been closed or received or sent a GOAWAY. +func (cc *ClientConn) CanTakeNewRequest() bool { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.canTakeNewRequestLocked() +} + +func (cc *ClientConn) canTakeNewRequestLocked() bool { + if cc.singleUse && cc.nextStreamID > 1 { + return false + } + return cc.goAway == nil && !cc.closed && + int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32 +} + +// onIdleTimeout is called from a time.AfterFunc goroutine. It will +// only be called when we're idle, but because we're coming from a new +// goroutine, there could be a new request coming in at the same time, +// so this simply calls the synchronized closeIfIdle to shut down this +// connection. The timer could just call closeIfIdle, but this is more +// clear. +func (cc *ClientConn) onIdleTimeout() { + cc.closeIfIdle() +} + +func (cc *ClientConn) closeIfIdle() { + cc.mu.Lock() + if len(cc.streams) > 0 { + cc.mu.Unlock() + return + } + cc.closed = true + nextID := cc.nextStreamID + // TODO: do clients send GOAWAY too? maybe? Just Close: + cc.mu.Unlock() + + if VerboseLogs { + cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) + } + cc.tconn.Close() +} + +const maxAllocFrameSize = 512 << 10 + +// frameBuffer returns a scratch buffer suitable for writing DATA frames. +// They're capped at the min of the peer's max frame size or 512KB +// (kinda arbitrarily), but definitely capped so we don't allocate 4GB +// bufers. +func (cc *ClientConn) frameScratchBuffer() []byte { + cc.mu.Lock() + size := cc.maxFrameSize + if size > maxAllocFrameSize { + size = maxAllocFrameSize + } + for i, buf := range cc.freeBuf { + if len(buf) >= int(size) { + cc.freeBuf[i] = nil + cc.mu.Unlock() + return buf[:size] + } + } + cc.mu.Unlock() + return make([]byte, size) +} + +func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { + cc.mu.Lock() + defer cc.mu.Unlock() + const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. + if len(cc.freeBuf) < maxBufs { + cc.freeBuf = append(cc.freeBuf, buf) + return + } + for i, old := range cc.freeBuf { + if old == nil { + cc.freeBuf[i] = buf + return + } + } + // forget about it. +} + +// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not +// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. +var errRequestCanceled = errors.New("net/http: request canceled") + +func commaSeparatedTrailers(req *http.Request) (string, error) { + keys := make([]string, 0, len(req.Trailer)) + for k := range req.Trailer { + k = http.CanonicalHeaderKey(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", &badStringError{"invalid Trailer key", k} + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + return strings.Join(keys, ","), nil + } + return "", nil +} + +func (cc *ClientConn) responseHeaderTimeout() time.Duration { + if cc.t.t1 != nil { + return cc.t.t1.ResponseHeaderTimeout + } + // No way to do this (yet?) with just an http2.Transport. Probably + // no need. Request.Cancel this is the new way. We only need to support + // this for compatibility with the old http.Transport fields when + // we're doing transparent http2. + return 0 +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. +// Certain headers are special-cased as okay but not transmitted later. +func checkConnHeaders(req *http.Request) error { + if v := req.Header.Get("Upgrade"); v != "" { + return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) + } + if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) + } + if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") { + return fmt.Errorf("http2: invalid Connection request header: %q", vv) + } + return nil +} + +// actualContentLength returns a sanitized version of +// req.ContentLength, where 0 actually means zero (not unknown) and -1 +// means unknown. +func actualContentLength(req *http.Request) int64 { + if req.Body == nil || reqBodyIsNoBody(req.Body) { + return 0 + } + if req.ContentLength != 0 { + return req.ContentLength + } + return -1 +} + +func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + if err := checkConnHeaders(req); err != nil { + return nil, err + } + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + + trailers, err := commaSeparatedTrailers(req) + if err != nil { + return nil, err + } + hasTrailers := trailers != "" + + cc.mu.Lock() + if err := cc.awaitOpenSlotForRequest(req); err != nil { + cc.mu.Unlock() + return nil, err + } + + body := req.Body + contentLen := actualContentLength(req) + hasBody := contentLen != 0 + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + var requestedGzip bool + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + req.Method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + requestedGzip = true + } + + // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is + // sent by writeRequestBody below, along with any Trailers, + // again in form HEADERS{1}, CONTINUATION{0,}) + hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) + if err != nil { + cc.mu.Unlock() + return nil, err + } + + cs := cc.newStream() + cs.req = req + cs.trace = requestTrace(req) + cs.requestedGzip = requestedGzip + bodyWriter := cc.t.getBodyWriterState(cs, body) + cs.on100 = bodyWriter.on100 + + cc.wmu.Lock() + endStream := !hasBody && !hasTrailers + werr := cc.writeHeaders(cs.ID, endStream, hdrs) + cc.wmu.Unlock() + traceWroteHeaders(cs.trace) + cc.mu.Unlock() + + if werr != nil { + if hasBody { + req.Body.Close() // per RoundTripper contract + bodyWriter.cancel() + } + cc.forgetStreamID(cs.ID) + // Don't bother sending a RST_STREAM (our write already failed; + // no need to keep writing) + traceWroteRequest(cs.trace, werr) + return nil, werr + } + + var respHeaderTimer <-chan time.Time + if hasBody { + bodyWriter.scheduleBodyWrite() + } else { + traceWroteRequest(cs.trace, nil) + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + + readLoopResCh := cs.resc + bodyWritten := false + ctx := reqContext(req) + + handleReadLoopResponse := func(re resAndError) (*http.Response, error) { + res := re.res + if re.err != nil || res.StatusCode > 299 { + // On error or status code 3xx, 4xx, 5xx, etc abort any + // ongoing write, assuming that the server doesn't care + // about our request body. If the server replied with 1xx or + // 2xx, however, then assume the server DOES potentially + // want our body (e.g. full-duplex streaming: + // golang.org/issue/13444). If it turns out the server + // doesn't, they'll RST_STREAM us soon enough. This is a + // heuristic to avoid adding knobs to Transport. Hopefully + // we can keep it. + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWrite) + } + if re.err != nil { + cc.mu.Lock() + afterBodyWrite := cs.startedWrite + cc.mu.Unlock() + cc.forgetStreamID(cs.ID) + if afterBodyWrite { + return nil, afterReqBodyWriteError{re.err} + } + return nil, re.err + } + res.Request = req + res.TLS = cc.tlsState + return res, nil + } + + for { + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + case <-respHeaderTimer: + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + cc.forgetStreamID(cs.ID) + return nil, errTimeout + case <-ctx.Done(): + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + cc.forgetStreamID(cs.ID) + return nil, ctx.Err() + case <-req.Cancel: + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + cc.forgetStreamID(cs.ID) + return nil, errRequestCanceled + case <-cs.peerReset: + // processResetStream already removed the + // stream from the streams map; no need for + // forgetStreamID. + return nil, cs.resetErr + case err := <-bodyWriter.resc: + // Prefer the read loop's response, if available. Issue 16102. + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + default: + } + if err != nil { + return nil, err + } + bodyWritten = true + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + } +} + +// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams. +// Must hold cc.mu. +func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error { + var waitingForConn chan struct{} + var waitingForConnErr error // guarded by cc.mu + for { + cc.lastActive = time.Now() + if cc.closed || !cc.canTakeNewRequestLocked() { + return errClientConnUnusable + } + if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) { + if waitingForConn != nil { + close(waitingForConn) + } + return nil + } + // Unfortunately, we cannot wait on a condition variable and channel at + // the same time, so instead, we spin up a goroutine to check if the + // request is canceled while we wait for a slot to open in the connection. + if waitingForConn == nil { + waitingForConn = make(chan struct{}) + go func() { + if err := awaitRequestCancel(req, waitingForConn); err != nil { + cc.mu.Lock() + waitingForConnErr = err + cc.cond.Broadcast() + cc.mu.Unlock() + } + }() + } + cc.pendingRequests++ + cc.cond.Wait() + cc.pendingRequests-- + if waitingForConnErr != nil { + return waitingForConnErr + } + } +} + +// requires cc.wmu be held +func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error { + first := true // first frame written (HEADERS is first, then CONTINUATION) + frameSize := int(cc.maxFrameSize) + for len(hdrs) > 0 && cc.werr == nil { + chunk := hdrs + if len(chunk) > frameSize { + chunk = chunk[:frameSize] + } + hdrs = hdrs[len(chunk):] + endHeaders := len(hdrs) == 0 + if first { + cc.fr.WriteHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: chunk, + EndStream: endStream, + EndHeaders: endHeaders, + }) + first = false + } else { + cc.fr.WriteContinuation(streamID, endHeaders, chunk) + } + } + // TODO(bradfitz): this Flush could potentially block (as + // could the WriteHeaders call(s) above), which means they + // wouldn't respond to Request.Cancel being readable. That's + // rare, but this should probably be in a goroutine. + cc.bw.Flush() + return cc.werr +} + +// internal error values; they don't escape to callers +var ( + // abort request body write; don't send cancel + errStopReqBodyWrite = errors.New("http2: aborting request body write") + + // abort request body write, but send stream reset of cancel. + errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") +) + +func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { + cc := cs.cc + sentEnd := false // whether we sent the final DATA frame w/ END_STREAM + buf := cc.frameScratchBuffer() + defer cc.putFrameScratchBuffer(buf) + + defer func() { + traceWroteRequest(cs.trace, err) + // TODO: write h12Compare test showing whether + // Request.Body is closed by the Transport, + // and in multiple cases: server replies <=299 and >299 + // while still writing request body + cerr := bodyCloser.Close() + if err == nil { + err = cerr + } + }() + + req := cs.req + hasTrailers := req.Trailer != nil + + var sawEOF bool + for !sawEOF { + n, err := body.Read(buf) + if err == io.EOF { + sawEOF = true + err = nil + } else if err != nil { + return err + } + + remain := buf[:n] + for len(remain) > 0 && err == nil { + var allowed int32 + allowed, err = cs.awaitFlowControl(len(remain)) + switch { + case err == errStopReqBodyWrite: + return err + case err == errStopReqBodyWriteAndCancel: + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + return err + case err != nil: + return err + } + cc.wmu.Lock() + data := remain[:allowed] + remain = remain[allowed:] + sentEnd = sawEOF && len(remain) == 0 && !hasTrailers + err = cc.fr.WriteData(cs.ID, sentEnd, data) + if err == nil { + // TODO(bradfitz): this flush is for latency, not bandwidth. + // Most requests won't need this. Make this opt-in or + // opt-out? Use some heuristic on the body type? Nagel-like + // timers? Based on 'n'? Only last chunk of this for loop, + // unless flow control tokens are low? For now, always. + // If we change this, see comment below. + err = cc.bw.Flush() + } + cc.wmu.Unlock() + } + if err != nil { + return err + } + } + + if sentEnd { + // Already sent END_STREAM (which implies we have no + // trailers) and flushed, because currently all + // WriteData frames above get a flush. So we're done. + return nil + } + + var trls []byte + if hasTrailers { + cc.mu.Lock() + trls, err = cc.encodeTrailers(req) + cc.mu.Unlock() + if err != nil { + cc.writeStreamReset(cs.ID, ErrCodeInternal, err) + cc.forgetStreamID(cs.ID) + return err + } + } + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + // Two ways to send END_STREAM: either with trailers, or + // with an empty DATA frame. + if len(trls) > 0 { + err = cc.writeHeaders(cs.ID, true, trls) + } else { + err = cc.fr.WriteData(cs.ID, true, nil) + } + if ferr := cc.bw.Flush(); ferr != nil && err == nil { + err = ferr + } + return err +} + +// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow +// control tokens from the server. +// It returns either the non-zero number of tokens taken or an error +// if the stream is dead. +func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if cc.closed { + return 0, errClientConnClosed + } + if cs.stopReqBody != nil { + return 0, cs.stopReqBody + } + if err := cs.checkResetOrDone(); err != nil { + return 0, err + } + if a := cs.flow.available(); a > 0 { + take := a + if int(take) > maxBytes { + + take = int32(maxBytes) // can't truncate int; take is int32 + } + if take > int32(cc.maxFrameSize) { + take = int32(cc.maxFrameSize) + } + cs.flow.take(take) + return take, nil + } + cc.cond.Wait() + } +} + +type badStringError struct { + what string + str string +} + +func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } + +// requires cc.mu be held. +func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { + cc.hbuf.Reset() + + host := req.Host + if host == "" { + host = req.URL.Host + } + host, err := httplex.PunycodeHostPort(host) + if err != nil { + return nil, err + } + + var path string + if req.Method != "CONNECT" { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return nil, fmt.Errorf("invalid request :path %q", orig) + } + } + } + } + + // Check for any invalid headers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + for k, vv := range req.Header { + if !httplex.ValidHeaderFieldName(k) { + return nil, fmt.Errorf("invalid HTTP header name %q", k) + } + for _, v := range vv { + if !httplex.ValidHeaderFieldValue(v) { + return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) + } + } + } + + enumerateHeaders := func(f func(name, value string)) { + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production (see Sections 3.3 and 3.4 of + // [RFC3986]). + f(":authority", host) + f(":method", req.Method) + if req.Method != "CONNECT" { + f(":path", path) + f(":scheme", req.URL.Scheme) + } + if trailers != "" { + f("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + if strings.EqualFold(k, "host") || strings.EqualFold(k, "content-length") { + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + } else if strings.EqualFold(k, "connection") || strings.EqualFold(k, "proxy-connection") || + strings.EqualFold(k, "transfer-encoding") || strings.EqualFold(k, "upgrade") || + strings.EqualFold(k, "keep-alive") { + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + } else if strings.EqualFold(k, "user-agent") { + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + + } + + for _, v := range vv { + f(k, v) + } + } + if shouldSendReqContentLength(req.Method, contentLength) { + f("content-length", strconv.FormatInt(contentLength, 10)) + } + if addGzipHeader { + f("accept-encoding", "gzip") + } + if !didUA { + f("user-agent", defaultUserAgent) + } + } + + // Do a first pass over the headers counting bytes to ensure + // we don't exceed cc.peerMaxHeaderListSize. This is done as a + // separate pass before encoding the headers to prevent + // modifying the hpack state. + hlSize := uint64(0) + enumerateHeaders(func(name, value string) { + hf := hpack.HeaderField{Name: name, Value: value} + hlSize += uint64(hf.Size()) + }) + + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize + } + + // Header list size is ok. Write the headers. + enumerateHeaders(func(name, value string) { + cc.writeHeader(strings.ToLower(name), value) + }) + + return cc.hbuf.Bytes(), nil +} + +// shouldSendReqContentLength reports whether the http2.Transport should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} + +// requires cc.mu be held. +func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) { + cc.hbuf.Reset() + + hlSize := uint64(0) + for k, vv := range req.Trailer { + for _, v := range vv { + hf := hpack.HeaderField{Name: k, Value: v} + hlSize += uint64(hf.Size()) + } + } + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize + } + + for k, vv := range req.Trailer { + // Transfer-Encoding, etc.. have already been filtered at the + // start of RoundTrip + lowKey := strings.ToLower(k) + for _, v := range vv { + cc.writeHeader(lowKey, v) + } + } + return cc.hbuf.Bytes(), nil +} + +func (cc *ClientConn) writeHeader(name, value string) { + if VerboseLogs { + log.Printf("http2: Transport encoding header %q = %q", name, value) + } + cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) +} + +type resAndError struct { + res *http.Response + err error +} + +// requires cc.mu be held. +func (cc *ClientConn) newStream() *clientStream { + cs := &clientStream{ + cc: cc, + ID: cc.nextStreamID, + resc: make(chan resAndError, 1), + peerReset: make(chan struct{}), + done: make(chan struct{}), + } + cs.flow.add(int32(cc.initialWindowSize)) + cs.flow.setConnFlow(&cc.flow) + cs.inflow.add(transportDefaultStreamFlow) + cs.inflow.setConnFlow(&cc.inflow) + cc.nextStreamID += 2 + cc.streams[cs.ID] = cs + return cs +} + +func (cc *ClientConn) forgetStreamID(id uint32) { + cc.streamByID(id, true) +} + +func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { + cc.mu.Lock() + defer cc.mu.Unlock() + cs := cc.streams[id] + if andRemove && cs != nil && !cc.closed { + cc.lastActive = time.Now() + delete(cc.streams, id) + if len(cc.streams) == 0 && cc.idleTimer != nil { + cc.idleTimer.Reset(cc.idleTimeout) + } + close(cs.done) + // Wake up checkResetOrDone via clientStream.awaitFlowControl and + // wake up RoundTrip if there is a pending request. + cc.cond.Broadcast() + } + return cs +} + +// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. +type clientConnReadLoop struct { + cc *ClientConn + activeRes map[uint32]*clientStream // keyed by streamID + closeWhenIdle bool +} + +// readLoop runs in its own goroutine and reads and dispatches frames. +func (cc *ClientConn) readLoop() { + rl := &clientConnReadLoop{ + cc: cc, + activeRes: make(map[uint32]*clientStream), + } + + defer rl.cleanup() + cc.readerErr = rl.run() + if ce, ok := cc.readerErr.(ConnectionError); ok { + cc.wmu.Lock() + cc.fr.WriteGoAway(0, ErrCode(ce), nil) + cc.wmu.Unlock() + } +} + +// GoAwayError is returned by the Transport when the server closes the +// TCP connection after sending a GOAWAY frame. +type GoAwayError struct { + LastStreamID uint32 + ErrCode ErrCode + DebugData string +} + +func (e GoAwayError) Error() string { + return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", + e.LastStreamID, e.ErrCode, e.DebugData) +} + +func isEOFOrNetReadError(err error) bool { + if err == io.EOF { + return true + } + ne, ok := err.(*net.OpError) + return ok && ne.Op == "read" +} + +func (rl *clientConnReadLoop) cleanup() { + cc := rl.cc + defer cc.tconn.Close() + defer cc.t.connPool().MarkDead(cc) + defer close(cc.readerDone) + + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + + // Close any response bodies if the server closes prematurely. + // TODO: also do this if we've written the headers but not + // gotten a response yet. + err := cc.readerErr + cc.mu.Lock() + if cc.goAway != nil && isEOFOrNetReadError(err) { + err = GoAwayError{ + LastStreamID: cc.goAway.LastStreamID, + ErrCode: cc.goAway.ErrCode, + DebugData: cc.goAwayDebug, + } + } else if err == io.EOF { + err = io.ErrUnexpectedEOF + } + for _, cs := range rl.activeRes { + cs.bufPipe.CloseWithError(err) + } + for _, cs := range cc.streams { + select { + case cs.resc <- resAndError{err: err}: + default: + } + close(cs.done) + } + cc.closed = true + cc.cond.Broadcast() + cc.mu.Unlock() +} + +func (rl *clientConnReadLoop) run() error { + cc := rl.cc + rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse + gotReply := false // ever saw a HEADERS reply + gotSettings := false + for { + f, err := cc.fr.ReadFrame() + if err != nil { + cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) + } + if se, ok := err.(StreamError); ok { + if cs := cc.streamByID(se.StreamID, false); cs != nil { + cs.cc.writeStreamReset(cs.ID, se.Code, err) + cs.cc.forgetStreamID(cs.ID) + if se.Cause == nil { + se.Cause = cc.fr.errDetail + } + rl.endStreamError(cs, se) + } + continue + } else if err != nil { + return err + } + if VerboseLogs { + cc.vlogf("http2: Transport received %s", summarizeFrame(f)) + } + if !gotSettings { + if _, ok := f.(*SettingsFrame); !ok { + cc.logf("protocol error: received %T before a SETTINGS frame", f) + return ConnectionError(ErrCodeProtocol) + } + gotSettings = true + } + maybeIdle := false // whether frame might transition us to idle + + switch f := f.(type) { + case *MetaHeadersFrame: + err = rl.processHeaders(f) + maybeIdle = true + gotReply = true + case *DataFrame: + err = rl.processData(f) + maybeIdle = true + case *GoAwayFrame: + err = rl.processGoAway(f) + maybeIdle = true + case *RSTStreamFrame: + err = rl.processResetStream(f) + maybeIdle = true + case *SettingsFrame: + err = rl.processSettings(f) + case *PushPromiseFrame: + err = rl.processPushPromise(f) + case *WindowUpdateFrame: + err = rl.processWindowUpdate(f) + case *PingFrame: + err = rl.processPing(f) + default: + cc.logf("Transport: unhandled response frame type %T", f) + } + if err != nil { + if VerboseLogs { + cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) + } + return err + } + if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { + cc.closeIfIdle() + } + } +} + +func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { + cc := rl.cc + if f.StreamEnded() { + // Issue 20521: If the stream has ended, streamByID() causes + // clientStream.done to be closed, which causes the request's bodyWriter + // to be closed with an errStreamClosed, which may be received by + // clientConn.RoundTrip before the result of processing these headers. + // Deferring stream closure allows the header processing to occur first. + // clientConn.RoundTrip may still receive the bodyWriter error first, but + // the fix for issue 16102 prioritises any response. + defer cc.streamByID(f.StreamID, true) + } + cs := cc.streamByID(f.StreamID, false) + if cs == nil { + // We'd get here if we canceled a request while the + // server had its response still in flight. So if this + // was just something we canceled, ignore it. + return nil + } + if !cs.firstByte { + if cs.trace != nil { + // TODO(bradfitz): move first response byte earlier, + // when we first read the 9 byte header, not waiting + // until all the HEADERS+CONTINUATION frames have been + // merged. This works for now. + traceFirstResponseByte(cs.trace) + } + cs.firstByte = true + } + if !cs.pastHeaders { + cs.pastHeaders = true + } else { + return rl.processTrailers(cs, f) + } + + res, err := rl.handleResponse(cs, f) + if err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // Any other error type is a stream error. + cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) + cs.resc <- resAndError{err: err} + return nil // return nil from process* funcs to keep conn alive + } + if res == nil { + // (nil, nil) special case. See handleResponse docs. + return nil + } + if res.Body != noBody { + rl.activeRes[cs.ID] = cs + } + cs.resTrailer = &res.Trailer + cs.resc <- resAndError{res: res} + return nil +} + +// may return error types nil, or ConnectionError. Any other error value +// is a StreamError of type ErrCodeProtocol. The returned error in that case +// is the detail. +// +// As a special case, handleResponse may return (nil, nil) to skip the +// frame (currently only used for 100 expect continue). This special +// case is going away after Issue 13851 is fixed. +func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { + if f.Truncated { + return nil, errResponseHeaderListSize + } + + status := f.PseudoValue("status") + if status == "" { + return nil, errors.New("missing status pseudo header") + } + statusCode, err := strconv.Atoi(status) + if err != nil { + return nil, errors.New("malformed non-numeric status pseudo header") + } + + if statusCode == 100 { + traceGot100Continue(cs.trace) + if cs.on100 != nil { + cs.on100() // forces any write delay timer to fire + } + cs.pastHeaders = false // do it all again + return nil, nil + } + + header := make(http.Header) + res := &http.Response{ + Proto: "HTTP/2.0", + ProtoMajor: 2, + Header: header, + StatusCode: statusCode, + Status: status + " " + http.StatusText(statusCode), + } + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + if key == "Trailer" { + t := res.Trailer + if t == nil { + t = make(http.Header) + res.Trailer = t + } + foreachHeaderElement(hf.Value, func(v string) { + t[http.CanonicalHeaderKey(v)] = nil + }) + } else { + header[key] = append(header[key], hf.Value) + } + } + + streamEnded := f.StreamEnded() + isHead := cs.req.Method == "HEAD" + if !streamEnded || isHead { + res.ContentLength = -1 + if clens := res.Header["Content-Length"]; len(clens) == 1 { + if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { + res.ContentLength = clen64 + } else { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } else if len(clens) > 1 { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } + + if streamEnded || isHead { + res.Body = noBody + return res, nil + } + + cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} + cs.bytesRemain = res.ContentLength + res.Body = transportResponseBody{cs} + go cs.awaitRequestCancel(cs.req) + + if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { + res.Header.Del("Content-Encoding") + res.Header.Del("Content-Length") + res.ContentLength = -1 + res.Body = &gzipReader{body: res.Body} + setResponseUncompressed(res) + } + return res, nil +} + +func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { + if cs.pastTrailers { + // Too many HEADERS frames for this stream. + return ConnectionError(ErrCodeProtocol) + } + cs.pastTrailers = true + if !f.StreamEnded() { + // We expect that any headers for trailers also + // has END_STREAM. + return ConnectionError(ErrCodeProtocol) + } + if len(f.PseudoFields()) > 0 { + // No pseudo header fields are defined for trailers. + // TODO: ConnectionError might be overly harsh? Check. + return ConnectionError(ErrCodeProtocol) + } + + trailer := make(http.Header) + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + trailer[key] = append(trailer[key], hf.Value) + } + cs.trailer = trailer + + rl.endStream(cs) + return nil +} + +// transportResponseBody is the concrete type of Transport.RoundTrip's +// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. +// On Close it sends RST_STREAM if EOF wasn't already seen. +type transportResponseBody struct { + cs *clientStream +} + +func (b transportResponseBody) Read(p []byte) (n int, err error) { + cs := b.cs + cc := cs.cc + + if cs.readErr != nil { + return 0, cs.readErr + } + n, err = b.cs.bufPipe.Read(p) + if cs.bytesRemain != -1 { + if int64(n) > cs.bytesRemain { + n = int(cs.bytesRemain) + if err == nil { + err = errors.New("net/http: server replied with more than declared Content-Length; truncated") + cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) + } + cs.readErr = err + return int(cs.bytesRemain), err + } + cs.bytesRemain -= int64(n) + if err == io.EOF && cs.bytesRemain > 0 { + err = io.ErrUnexpectedEOF + cs.readErr = err + return n, err + } + } + if n == 0 { + // No flow control tokens to send back. + return + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + var connAdd, streamAdd int32 + // Check the conn-level first, before the stream-level. + if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { + connAdd = transportDefaultConnFlow - v + cc.inflow.add(connAdd) + } + if err == nil { // No need to refresh if the stream is over or failed. + // Consider any buffered body data (read from the conn but not + // consumed by the client) when computing flow control for this + // stream. + v := int(cs.inflow.available()) + cs.bufPipe.Len() + if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { + streamAdd = int32(transportDefaultStreamFlow - v) + cs.inflow.add(streamAdd) + } + } + if connAdd != 0 || streamAdd != 0 { + cc.wmu.Lock() + defer cc.wmu.Unlock() + if connAdd != 0 { + cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) + } + if streamAdd != 0 { + cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) + } + cc.bw.Flush() + } + return +} + +var errClosedResponseBody = errors.New("http2: response body closed") + +func (b transportResponseBody) Close() error { + cs := b.cs + cc := cs.cc + + serverSentStreamEnd := cs.bufPipe.Err() == io.EOF + unread := cs.bufPipe.Len() + + if unread > 0 || !serverSentStreamEnd { + cc.mu.Lock() + cc.wmu.Lock() + if !serverSentStreamEnd { + cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) + cs.didReset = true + } + // Return connection-level flow control. + if unread > 0 { + cc.inflow.add(int32(unread)) + cc.fr.WriteWindowUpdate(0, uint32(unread)) + } + cc.bw.Flush() + cc.wmu.Unlock() + cc.mu.Unlock() + } + + cs.bufPipe.BreakWithError(errClosedResponseBody) + cc.forgetStreamID(cs.ID) + return nil +} + +func (rl *clientConnReadLoop) processData(f *DataFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, f.StreamEnded()) + data := f.Data() + if cs == nil { + cc.mu.Lock() + neverSent := cc.nextStreamID + cc.mu.Unlock() + if f.StreamID >= neverSent { + // We never asked for this. + cc.logf("http2: Transport received unsolicited DATA frame; closing connection") + return ConnectionError(ErrCodeProtocol) + } + // We probably did ask for this, but canceled. Just ignore it. + // TODO: be stricter here? only silently ignore things which + // we canceled, but not things which were closed normally + // by the peer? Tough without accumulating too much state. + + // But at least return their flow control: + if f.Length > 0 { + cc.mu.Lock() + cc.inflow.add(int32(f.Length)) + cc.mu.Unlock() + + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(f.Length)) + cc.bw.Flush() + cc.wmu.Unlock() + } + return nil + } + if !cs.firstByte { + cc.logf("protocol error: received DATA before a HEADERS frame") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } + if f.Length > 0 { + if cs.req.Method == "HEAD" && len(data) > 0 { + cc.logf("protocol error: received DATA on a HEAD request") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } + // Check connection-level flow control. + cc.mu.Lock() + if cs.inflow.available() >= int32(f.Length) { + cs.inflow.take(int32(f.Length)) + } else { + cc.mu.Unlock() + return ConnectionError(ErrCodeFlowControl) + } + // Return any padded flow control now, since we won't + // refund it later on body reads. + var refund int + if pad := int(f.Length) - len(data); pad > 0 { + refund += pad + } + // Return len(data) now if the stream is already closed, + // since data will never be read. + didReset := cs.didReset + if didReset { + refund += len(data) + } + if refund > 0 { + cc.inflow.add(int32(refund)) + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(refund)) + if !didReset { + cs.inflow.add(int32(refund)) + cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) + } + cc.bw.Flush() + cc.wmu.Unlock() + } + cc.mu.Unlock() + + if len(data) > 0 && !didReset { + if _, err := cs.bufPipe.Write(data); err != nil { + rl.endStreamError(cs, err) + return err + } + } + } + + if f.StreamEnded() { + rl.endStream(cs) + } + return nil +} + +var errInvalidTrailers = errors.New("http2: invalid trailers") + +func (rl *clientConnReadLoop) endStream(cs *clientStream) { + // TODO: check that any declared content-length matches, like + // server.go's (*stream).endStream method. + rl.endStreamError(cs, nil) +} + +func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { + var code func() + if err == nil { + err = io.EOF + code = cs.copyTrailers + } + cs.bufPipe.closeWithErrorAndCode(err, code) + delete(rl.activeRes, cs.ID) + if isConnectionCloseRequest(cs.req) { + rl.closeWhenIdle = true + } + + select { + case cs.resc <- resAndError{err: err}: + default: + } +} + +func (cs *clientStream) copyTrailers() { + for k, vv := range cs.trailer { + t := cs.resTrailer + if *t == nil { + *t = make(http.Header) + } + (*t)[k] = vv + } +} + +func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { + cc := rl.cc + cc.t.connPool().MarkDead(cc) + if f.ErrCode != 0 { + // TODO: deal with GOAWAY more. particularly the error code + cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) + } + cc.setGoAway(f) + return nil +} + +func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + + if f.IsAck() { + if cc.wantSettingsAck { + cc.wantSettingsAck = false + return nil + } + return ConnectionError(ErrCodeProtocol) + } + + err := f.ForeachSetting(func(s Setting) error { + switch s.ID { + case SettingMaxFrameSize: + cc.maxFrameSize = s.Val + case SettingMaxConcurrentStreams: + cc.maxConcurrentStreams = s.Val + case SettingMaxHeaderListSize: + cc.peerMaxHeaderListSize = uint64(s.Val) + case SettingInitialWindowSize: + // Values above the maximum flow-control + // window size of 2^31-1 MUST be treated as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + if s.Val > math.MaxInt32 { + return ConnectionError(ErrCodeFlowControl) + } + + // Adjust flow control of currently-open + // frames by the difference of the old initial + // window size and this one. + delta := int32(s.Val) - int32(cc.initialWindowSize) + for _, cs := range cc.streams { + cs.flow.add(delta) + } + cc.cond.Broadcast() + + cc.initialWindowSize = s.Val + default: + // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. + cc.vlogf("Unhandled Setting: %v", s) + } + return nil + }) + if err != nil { + return err + } + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + cc.fr.WriteSettingsAck() + cc.bw.Flush() + return cc.werr +} + +func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, false) + if f.StreamID != 0 && cs == nil { + return nil + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + fl := &cc.flow + if cs != nil { + fl = &cs.flow + } + if !fl.add(int32(f.Increment)) { + return ConnectionError(ErrCodeFlowControl) + } + cc.cond.Broadcast() + return nil +} + +func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { + cs := rl.cc.streamByID(f.StreamID, true) + if cs == nil { + // TODO: return error if server tries to RST_STEAM an idle stream + return nil + } + select { + case <-cs.peerReset: + // Already reset. + // This is the only goroutine + // which closes this, so there + // isn't a race. + default: + err := streamError(cs.ID, f.ErrCode) + cs.resetErr = err + close(cs.peerReset) + cs.bufPipe.CloseWithError(err) + cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl + } + delete(rl.activeRes, cs.ID) + return nil +} + +// Ping sends a PING frame to the server and waits for the ack. +// Public implementation is in go17.go and not_go17.go +func (cc *ClientConn) ping(ctx contextContext) error { + c := make(chan struct{}) + // Generate a random payload + var p [8]byte + for { + if _, err := rand.Read(p[:]); err != nil { + return err + } + cc.mu.Lock() + // check for dup before insert + if _, found := cc.pings[p]; !found { + cc.pings[p] = c + cc.mu.Unlock() + break + } + cc.mu.Unlock() + } + cc.wmu.Lock() + if err := cc.fr.WritePing(false, p); err != nil { + cc.wmu.Unlock() + return err + } + if err := cc.bw.Flush(); err != nil { + cc.wmu.Unlock() + return err + } + cc.wmu.Unlock() + select { + case <-c: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-cc.readerDone: + // connection closed + return cc.readerErr + } +} + +func (rl *clientConnReadLoop) processPing(f *PingFrame) error { + if f.IsAck() { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + // If ack, notify listener if any + if c, ok := cc.pings[f.Data]; ok { + close(c) + delete(cc.pings, f.Data) + } + return nil + } + cc := rl.cc + cc.wmu.Lock() + defer cc.wmu.Unlock() + if err := cc.fr.WritePing(true, f.Data); err != nil { + return err + } + return cc.bw.Flush() +} + +func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { + // We told the peer we don't want them. + // Spec says: + // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH + // setting of the peer endpoint is set to 0. An endpoint that + // has set this setting and has received acknowledgement MUST + // treat the receipt of a PUSH_PROMISE frame as a connection + // error (Section 5.4.1) of type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) +} + +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { + // TODO: map err to more interesting error codes, once the + // HTTP community comes up with some. But currently for + // RST_STREAM there's no equivalent to GOAWAY frame's debug + // data, and the error codes are all pretty vague ("cancel"). + cc.wmu.Lock() + cc.fr.WriteRSTStream(streamID, code) + cc.bw.Flush() + cc.wmu.Unlock() +} + +var ( + errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") + errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") + errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") +) + +func (cc *ClientConn) logf(format string, args ...interface{}) { + cc.t.logf(format, args...) +} + +func (cc *ClientConn) vlogf(format string, args ...interface{}) { + cc.t.vlogf(format, args...) +} + +func (t *Transport) vlogf(format string, args ...interface{}) { + if VerboseLogs { + t.logf(format, args...) + } +} + +func (t *Transport) logf(format string, args ...interface{}) { + log.Printf(format, args...) +} + +var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) + +func strSliceContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +type erringRoundTripper struct{ err error } + +func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } + +// gzipReader wraps a response body so it can lazily +// call gzip.NewReader on the first call to Read +type gzipReader struct { + body io.ReadCloser // underlying Response.Body + zr *gzip.Reader // lazily-initialized gzip reader + zerr error // sticky error +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + if gz.zerr != nil { + return 0, gz.zerr + } + if gz.zr == nil { + gz.zr, err = gzip.NewReader(gz.body) + if err != nil { + gz.zerr = err + return 0, err + } + } + return gz.zr.Read(p) +} + +func (gz *gzipReader) Close() error { + return gz.body.Close() +} + +type errorReader struct{ err error } + +func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } + +// bodyWriterState encapsulates various state around the Transport's writing +// of the request body, particularly regarding doing delayed writes of the body +// when the request contains "Expect: 100-continue". +type bodyWriterState struct { + cs *clientStream + timer *time.Timer // if non-nil, we're doing a delayed write + fnonce *sync.Once // to call fn with + fn func() // the code to run in the goroutine, writing the body + resc chan error // result of fn's execution + delay time.Duration // how long we should delay a delayed write for +} + +func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { + s.cs = cs + if body == nil { + return + } + resc := make(chan error, 1) + s.resc = resc + s.fn = func() { + cs.cc.mu.Lock() + cs.startedWrite = true + cs.cc.mu.Unlock() + resc <- cs.writeRequestBody(body, cs.req.Body) + } + s.delay = t.expectContinueTimeout() + if s.delay == 0 || + !httplex.HeaderValuesContainsToken( + cs.req.Header["Expect"], + "100-continue") { + return + } + s.fnonce = new(sync.Once) + + // Arm the timer with a very large duration, which we'll + // intentionally lower later. It has to be large now because + // we need a handle to it before writing the headers, but the + // s.delay value is defined to not start until after the + // request headers were written. + const hugeDuration = 365 * 24 * time.Hour + s.timer = time.AfterFunc(hugeDuration, func() { + s.fnonce.Do(s.fn) + }) + return +} + +func (s bodyWriterState) cancel() { + if s.timer != nil { + s.timer.Stop() + } +} + +func (s bodyWriterState) on100() { + if s.timer == nil { + // If we didn't do a delayed write, ignore the server's + // bogus 100 continue response. + return + } + s.timer.Stop() + go func() { s.fnonce.Do(s.fn) }() +} + +// scheduleBodyWrite starts writing the body, either immediately (in +// the common case) or after the delay timeout. It should not be +// called until after the headers have been written. +func (s bodyWriterState) scheduleBodyWrite() { + if s.timer == nil { + // We're not doing a delayed write (see + // getBodyWriterState), so just start the writing + // goroutine immediately. + go s.fn() + return + } + traceWait100Continue(s.cs.trace) + if s.timer.Stop() { + s.timer.Reset(s.delay) + } +} + +// isConnectionCloseRequest reports whether req should use its own +// connection for a single request and then close the connection. +func isConnectionCloseRequest(req *http.Request) bool { + return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close") +} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go new file mode 100644 index 00000000000..54ab4a88e7b --- /dev/null +++ b/vendor/golang.org/x/net/http2/write.go @@ -0,0 +1,365 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "fmt" + "log" + "net/http" + "net/url" + + "golang.org/x/net/http2/hpack" + "golang.org/x/net/lex/httplex" +) + +// writeFramer is implemented by any type that is used to write frames. +type writeFramer interface { + writeFrame(writeContext) error + + // staysWithinBuffer reports whether this writer promises that + // it will only write less than or equal to size bytes, and it + // won't Flush the write context. + staysWithinBuffer(size int) bool +} + +// writeContext is the interface needed by the various frame writer +// types below. All the writeFrame methods below are scheduled via the +// frame writing scheduler (see writeScheduler in writesched.go). +// +// This interface is implemented by *serverConn. +// +// TODO: decide whether to a) use this in the client code (which didn't +// end up using this yet, because it has a simpler design, not +// currently implementing priorities), or b) delete this and +// make the server code a bit more concrete. +type writeContext interface { + Framer() *Framer + Flush() error + CloseConn() error + // HeaderEncoder returns an HPACK encoder that writes to the + // returned buffer. + HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) +} + +// writeEndsStream reports whether w writes a frame that will transition +// the stream to a half-closed local state. This returns false for RST_STREAM, +// which closes the entire stream (not just the local half). +func writeEndsStream(w writeFramer) bool { + switch v := w.(type) { + case *writeData: + return v.endStream + case *writeResHeaders: + return v.endStream + case nil: + // This can only happen if the caller reuses w after it's + // been intentionally nil'ed out to prevent use. Keep this + // here to catch future refactoring breaking it. + panic("writeEndsStream called on nil writeFramer") + } + return false +} + +type flushFrameWriter struct{} + +func (flushFrameWriter) writeFrame(ctx writeContext) error { + return ctx.Flush() +} + +func (flushFrameWriter) staysWithinBuffer(max int) bool { return false } + +type writeSettings []Setting + +func (s writeSettings) staysWithinBuffer(max int) bool { + const settingSize = 6 // uint16 + uint32 + return frameHeaderLen+settingSize*len(s) <= max + +} + +func (s writeSettings) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettings([]Setting(s)...) +} + +type writeGoAway struct { + maxStreamID uint32 + code ErrCode +} + +func (p *writeGoAway) writeFrame(ctx writeContext) error { + err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) + ctx.Flush() // ignore error: we're hanging up on them anyway + return err +} + +func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes + +type writeData struct { + streamID uint32 + p []byte + endStream bool +} + +func (w *writeData) String() string { + return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) +} + +func (w *writeData) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) +} + +func (w *writeData) staysWithinBuffer(max int) bool { + return frameHeaderLen+len(w.p) <= max +} + +// handlerPanicRST is the message sent from handler goroutines when +// the handler panics. +type handlerPanicRST struct { + StreamID uint32 +} + +func (hp handlerPanicRST) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) +} + +func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +func (se StreamError) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) +} + +func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +type writePingAck struct{ pf *PingFrame } + +func (w writePingAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(true, w.pf.Data) +} + +func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max } + +type writeSettingsAck struct{} + +func (writeSettingsAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettingsAck() +} + +func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max } + +// splitHeaderBlock splits headerBlock into fragments so that each fragment fits +// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true +// for the first/last fragment, respectively. +func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error { + // For now we're lazy and just pick the minimum MAX_FRAME_SIZE + // that all peers must support (16KB). Later we could care + // more and send larger frames if the peer advertised it, but + // there's little point. Most headers are small anyway (so we + // generally won't have CONTINUATION frames), and extra frames + // only waste 9 bytes anyway. + const maxFrameSize = 16384 + + first := true + for len(headerBlock) > 0 { + frag := headerBlock + if len(frag) > maxFrameSize { + frag = frag[:maxFrameSize] + } + headerBlock = headerBlock[len(frag):] + if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil { + return err + } + first = false + } + return nil +} + +// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames +// for HTTP response headers or trailers from a server handler. +type writeResHeaders struct { + streamID uint32 + httpResCode int // 0 means no ":status" line + h http.Header // may be nil + trailers []string // if non-nil, which keys of h to write. nil means all. + endStream bool + + date string + contentType string + contentLength string +} + +func encKV(enc *hpack.Encoder, k, v string) { + if VerboseLogs { + log.Printf("http2: server encoding header %q = %q", k, v) + } + enc.WriteField(hpack.HeaderField{Name: k, Value: v}) +} + +func (w *writeResHeaders) staysWithinBuffer(max int) bool { + // TODO: this is a common one. It'd be nice to return true + // here and get into the fast path if we could be clever and + // calculate the size fast enough, or at least a conservative + // uppper bound that usually fires. (Maybe if w.h and + // w.trailers are nil, so we don't need to enumerate it.) + // Otherwise I'm afraid that just calculating the length to + // answer this question would be slower than the ~2µs benefit. + return false +} + +func (w *writeResHeaders) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + if w.httpResCode != 0 { + encKV(enc, ":status", httpCodeString(w.httpResCode)) + } + + encodeHeaders(enc, w.h, w.trailers) + + if w.contentType != "" { + encKV(enc, "content-type", w.contentType) + } + if w.contentLength != "" { + encKV(enc, "content-length", w.contentLength) + } + if w.date != "" { + encKV(enc, "date", w.date) + } + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 && w.trailers == nil { + panic("unexpected empty hpack") + } + + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} + +func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: frag, + EndStream: w.endStream, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) + } +} + +// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames. +type writePushPromise struct { + streamID uint32 // pusher stream + method string // for :method + url *url.URL // for :scheme, :authority, :path + h http.Header + + // Creates an ID for a pushed stream. This runs on serveG just before + // the frame is written. The returned ID is copied to promisedID. + allocatePromisedID func() (uint32, error) + promisedID uint32 +} + +func (w *writePushPromise) staysWithinBuffer(max int) bool { + // TODO: see writeResHeaders.staysWithinBuffer + return false +} + +func (w *writePushPromise) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + encKV(enc, ":method", w.method) + encKV(enc, ":scheme", w.url.Scheme) + encKV(enc, ":authority", w.url.Host) + encKV(enc, ":path", w.url.RequestURI()) + encodeHeaders(enc, w.h, nil) + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 { + panic("unexpected empty hpack") + } + + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} + +func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WritePushPromise(PushPromiseParam{ + StreamID: w.streamID, + PromiseID: w.promisedID, + BlockFragment: frag, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) + } +} + +type write100ContinueHeadersFrame struct { + streamID uint32 +} + +func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + encKV(enc, ":status", "100") + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: buf.Bytes(), + EndStream: false, + EndHeaders: true, + }) +} + +func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool { + // Sloppy but conservative: + return 9+2*(len(":status")+len("100")) <= max +} + +type writeWindowUpdate struct { + streamID uint32 // or 0 for conn-level + n uint32 +} + +func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) +} + +// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) +// is encoded only only if k is in keys. +func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { + if keys == nil { + sorter := sorterPool.Get().(*sorter) + // Using defer here, since the returned keys from the + // sorter.Keys method is only valid until the sorter + // is returned: + defer sorterPool.Put(sorter) + keys = sorter.Keys(h) + } + for _, k := range keys { + vv := h[k] + k = lowerHeader(k) + if !validWireHeaderFieldName(k) { + // Skip it as backup paranoia. Per + // golang.org/issue/14048, these should + // already be rejected at a higher level. + continue + } + isTE := k == "transfer-encoding" + for _, v := range vv { + if !httplex.ValidHeaderFieldValue(v) { + // TODO: return an error? golang.org/issue/14048 + // For now just omit it. + continue + } + // TODO: more of "8.1.2.2 Connection-Specific Header Fields" + if isTE && v != "trailers" { + continue + } + encKV(enc, k, v) + } + } +} diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go new file mode 100644 index 00000000000..4fe30730735 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -0,0 +1,242 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "fmt" + +// WriteScheduler is the interface implemented by HTTP/2 write schedulers. +// Methods are never called concurrently. +type WriteScheduler interface { + // OpenStream opens a new stream in the write scheduler. + // It is illegal to call this with streamID=0 or with a streamID that is + // already open -- the call may panic. + OpenStream(streamID uint32, options OpenStreamOptions) + + // CloseStream closes a stream in the write scheduler. Any frames queued on + // this stream should be discarded. It is illegal to call this on a stream + // that is not open -- the call may panic. + CloseStream(streamID uint32) + + // AdjustStream adjusts the priority of the given stream. This may be called + // on a stream that has not yet been opened or has been closed. Note that + // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See: + // https://tools.ietf.org/html/rfc7540#section-5.1 + AdjustStream(streamID uint32, priority PriorityParam) + + // Push queues a frame in the scheduler. In most cases, this will not be + // called with wr.StreamID()!=0 unless that stream is currently open. The one + // exception is RST_STREAM frames, which may be sent on idle or closed streams. + Push(wr FrameWriteRequest) + + // Pop dequeues the next frame to write. Returns false if no frames can + // be written. Frames with a given wr.StreamID() are Pop'd in the same + // order they are Push'd. + Pop() (wr FrameWriteRequest, ok bool) +} + +// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream. +type OpenStreamOptions struct { + // PusherID is zero if the stream was initiated by the client. Otherwise, + // PusherID names the stream that pushed the newly opened stream. + PusherID uint32 +} + +// FrameWriteRequest is a request to write a frame. +type FrameWriteRequest struct { + // write is the interface value that does the writing, once the + // WriteScheduler has selected this frame to write. The write + // functions are all defined in write.go. + write writeFramer + + // stream is the stream on which this frame will be written. + // nil for non-stream frames like PING and SETTINGS. + stream *stream + + // done, if non-nil, must be a buffered channel with space for + // 1 message and is sent the return value from write (or an + // earlier error) when the frame has been written. + done chan error +} + +// StreamID returns the id of the stream this frame will be written to. +// 0 is used for non-stream frames such as PING and SETTINGS. +func (wr FrameWriteRequest) StreamID() uint32 { + if wr.stream == nil { + if se, ok := wr.write.(StreamError); ok { + // (*serverConn).resetStream doesn't set + // stream because it doesn't necessarily have + // one. So special case this type of write + // message. + return se.StreamID + } + return 0 + } + return wr.stream.id +} + +// DataSize returns the number of flow control bytes that must be consumed +// to write this entire frame. This is 0 for non-DATA frames. +func (wr FrameWriteRequest) DataSize() int { + if wd, ok := wr.write.(*writeData); ok { + return len(wd.p) + } + return 0 +} + +// Consume consumes min(n, available) bytes from this frame, where available +// is the number of flow control bytes available on the stream. Consume returns +// 0, 1, or 2 frames, where the integer return value gives the number of frames +// returned. +// +// If flow control prevents consuming any bytes, this returns (_, _, 0). If +// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this +// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and +// 'rest' contains the remaining bytes. The consumed bytes are deducted from the +// underlying stream's flow control budget. +func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) { + var empty FrameWriteRequest + + // Non-DATA frames are always consumed whole. + wd, ok := wr.write.(*writeData) + if !ok || len(wd.p) == 0 { + return wr, empty, 1 + } + + // Might need to split after applying limits. + allowed := wr.stream.flow.available() + if n < allowed { + allowed = n + } + if wr.stream.sc.maxFrameSize < allowed { + allowed = wr.stream.sc.maxFrameSize + } + if allowed <= 0 { + return empty, empty, 0 + } + if len(wd.p) > int(allowed) { + wr.stream.flow.take(allowed) + consumed := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[:allowed], + // Even if the original had endStream set, there + // are bytes remaining because len(wd.p) > allowed, + // so we know endStream is false. + endStream: false, + }, + // Our caller is blocking on the final DATA frame, not + // this intermediate frame, so no need to wait. + done: nil, + } + rest := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[allowed:], + endStream: wd.endStream, + }, + done: wr.done, + } + return consumed, rest, 2 + } + + // The frame is consumed whole. + // NB: This cast cannot overflow because allowed is <= math.MaxInt32. + wr.stream.flow.take(int32(len(wd.p))) + return wr, empty, 1 +} + +// String is for debugging only. +func (wr FrameWriteRequest) String() string { + var des string + if s, ok := wr.write.(fmt.Stringer); ok { + des = s.String() + } else { + des = fmt.Sprintf("%T", wr.write) + } + return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des) +} + +// replyToWriter sends err to wr.done and panics if the send must block +// This does nothing if wr.done is nil. +func (wr *FrameWriteRequest) replyToWriter(err error) { + if wr.done == nil { + return + } + select { + case wr.done <- err: + default: + panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write)) + } + wr.write = nil // prevent use (assume it's tainted after wr.done send) +} + +// writeQueue is used by implementations of WriteScheduler. +type writeQueue struct { + s []FrameWriteRequest +} + +func (q *writeQueue) empty() bool { return len(q.s) == 0 } + +func (q *writeQueue) push(wr FrameWriteRequest) { + q.s = append(q.s, wr) +} + +func (q *writeQueue) shift() FrameWriteRequest { + if len(q.s) == 0 { + panic("invalid use of queue") + } + wr := q.s[0] + // TODO: less copy-happy queue. + copy(q.s, q.s[1:]) + q.s[len(q.s)-1] = FrameWriteRequest{} + q.s = q.s[:len(q.s)-1] + return wr +} + +// consume consumes up to n bytes from q.s[0]. If the frame is +// entirely consumed, it is removed from the queue. If the frame +// is partially consumed, the frame is kept with the consumed +// bytes removed. Returns true iff any bytes were consumed. +func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { + if len(q.s) == 0 { + return FrameWriteRequest{}, false + } + consumed, rest, numresult := q.s[0].Consume(n) + switch numresult { + case 0: + return FrameWriteRequest{}, false + case 1: + q.shift() + case 2: + q.s[0] = rest + } + return consumed, true +} + +type writeQueuePool []*writeQueue + +// put inserts an unused writeQueue into the pool. +func (p *writeQueuePool) put(q *writeQueue) { + for i := range q.s { + q.s[i] = FrameWriteRequest{} + } + q.s = q.s[:0] + *p = append(*p, q) +} + +// get returns an empty writeQueue. +func (p *writeQueuePool) get() *writeQueue { + ln := len(*p) + if ln == 0 { + return new(writeQueue) + } + x := ln - 1 + q := (*p)[x] + (*p)[x] = nil + *p = (*p)[:x] + return q +} diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go new file mode 100644 index 00000000000..848fed6ec76 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -0,0 +1,452 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" + "sort" +) + +// RFC 7540, Section 5.3.5: the default weight is 16. +const priorityDefaultWeight = 15 // 16 = 15 + 1 + +// PriorityWriteSchedulerConfig configures a priorityWriteScheduler. +type PriorityWriteSchedulerConfig struct { + // MaxClosedNodesInTree controls the maximum number of closed streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // "It is possible for a stream to become closed while prioritization + // information ... is in transit. ... This potentially creates suboptimal + // prioritization, since the stream could be given a priority that is + // different from what is intended. To avoid these problems, an endpoint + // SHOULD retain stream prioritization state for a period after streams + // become closed. The longer state is retained, the lower the chance that + // streams are assigned incorrect or default priority values." + MaxClosedNodesInTree int + + // MaxIdleNodesInTree controls the maximum number of idle streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // Similarly, streams that are in the "idle" state can be assigned + // priority or become a parent of other streams. This allows for the + // creation of a grouping node in the dependency tree, which enables + // more flexible expressions of priority. Idle streams begin with a + // default priority (Section 5.3.5). + MaxIdleNodesInTree int + + // ThrottleOutOfOrderWrites enables write throttling to help ensure that + // data is delivered in priority order. This works around a race where + // stream B depends on stream A and both streams are about to call Write + // to queue DATA frames. If B wins the race, a naive scheduler would eagerly + // write as much data from B as possible, but this is suboptimal because A + // is a higher-priority stream. With throttling enabled, we write a small + // amount of data from B to minimize the amount of bandwidth that B can + // steal from A. + ThrottleOutOfOrderWrites bool +} + +// NewPriorityWriteScheduler constructs a WriteScheduler that schedules +// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. +// If cfg is nil, default options are used. +func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { + if cfg == nil { + // For justification of these defaults, see: + // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY + cfg = &PriorityWriteSchedulerConfig{ + MaxClosedNodesInTree: 10, + MaxIdleNodesInTree: 10, + ThrottleOutOfOrderWrites: false, + } + } + + ws := &priorityWriteScheduler{ + nodes: make(map[uint32]*priorityNode), + maxClosedNodesInTree: cfg.MaxClosedNodesInTree, + maxIdleNodesInTree: cfg.MaxIdleNodesInTree, + enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, + } + ws.nodes[0] = &ws.root + if cfg.ThrottleOutOfOrderWrites { + ws.writeThrottleLimit = 1024 + } else { + ws.writeThrottleLimit = math.MaxInt32 + } + return ws +} + +type priorityNodeState int + +const ( + priorityNodeOpen priorityNodeState = iota + priorityNodeClosed + priorityNodeIdle +) + +// priorityNode is a node in an HTTP/2 priority tree. +// Each node is associated with a single stream ID. +// See RFC 7540, Section 5.3. +type priorityNode struct { + q writeQueue // queue of pending frames to write + id uint32 // id of the stream, or 0 for the root of the tree + weight uint8 // the actual weight is weight+1, so the value is in [1,256] + state priorityNodeState // open | closed | idle + bytes int64 // number of bytes written by this node, or 0 if closed + subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree + + // These links form the priority tree. + parent *priorityNode + kids *priorityNode // start of the kids list + prev, next *priorityNode // doubly-linked list of siblings +} + +func (n *priorityNode) setParent(parent *priorityNode) { + if n == parent { + panic("setParent to self") + } + if n.parent == parent { + return + } + // Unlink from current parent. + if parent := n.parent; parent != nil { + if n.prev == nil { + parent.kids = n.next + } else { + n.prev.next = n.next + } + if n.next != nil { + n.next.prev = n.prev + } + } + // Link to new parent. + // If parent=nil, remove n from the tree. + // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). + n.parent = parent + if parent == nil { + n.next = nil + n.prev = nil + } else { + n.next = parent.kids + n.prev = nil + if n.next != nil { + n.next.prev = n + } + parent.kids = n + } +} + +func (n *priorityNode) addBytes(b int64) { + n.bytes += b + for ; n != nil; n = n.parent { + n.subtreeBytes += b + } +} + +// walkReadyInOrder iterates over the tree in priority order, calling f for each node +// with a non-empty write queue. When f returns true, this funcion returns true and the +// walk halts. tmp is used as scratch space for sorting. +// +// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true +// if any ancestor p of n is still open (ignoring the root node). +func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { + if !n.q.empty() && f(n, openParent) { + return true + } + if n.kids == nil { + return false + } + + // Don't consider the root "open" when updating openParent since + // we can't send data frames on the root stream (only control frames). + if n.id != 0 { + openParent = openParent || (n.state == priorityNodeOpen) + } + + // Common case: only one kid or all kids have the same weight. + // Some clients don't use weights; other clients (like web browsers) + // use mostly-linear priority trees. + w := n.kids.weight + needSort := false + for k := n.kids.next; k != nil; k = k.next { + if k.weight != w { + needSort = true + break + } + } + if !needSort { + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false + } + + // Uncommon case: sort the child nodes. We remove the kids from the parent, + // then re-insert after sorting so we can reuse tmp for future sort calls. + *tmp = (*tmp)[:0] + for n.kids != nil { + *tmp = append(*tmp, n.kids) + n.kids.setParent(nil) + } + sort.Sort(sortPriorityNodeSiblings(*tmp)) + for i := len(*tmp) - 1; i >= 0; i-- { + (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids + } + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false +} + +type sortPriorityNodeSiblings []*priorityNode + +func (z sortPriorityNodeSiblings) Len() int { return len(z) } +func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } +func (z sortPriorityNodeSiblings) Less(i, k int) bool { + // Prefer the subtree that has sent fewer bytes relative to its weight. + // See sections 5.3.2 and 5.3.4. + wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) + wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) + if bi == 0 && bk == 0 { + return wi >= wk + } + if bk == 0 { + return false + } + return bi/bk <= wi/wk +} + +type priorityWriteScheduler struct { + // root is the root of the priority tree, where root.id = 0. + // The root queues control frames that are not associated with any stream. + root priorityNode + + // nodes maps stream ids to priority tree nodes. + nodes map[uint32]*priorityNode + + // maxID is the maximum stream id in nodes. + maxID uint32 + + // lists of nodes that have been closed or are idle, but are kept in + // the tree for improved prioritization. When the lengths exceed either + // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. + closedNodes, idleNodes []*priorityNode + + // From the config. + maxClosedNodesInTree int + maxIdleNodesInTree int + writeThrottleLimit int32 + enableWriteThrottle bool + + // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. + tmp []*priorityNode + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // The stream may be currently idle but cannot be opened or closed. + if curr := ws.nodes[streamID]; curr != nil { + if curr.state != priorityNodeIdle { + panic(fmt.Sprintf("stream %d already opened", streamID)) + } + curr.state = priorityNodeOpen + return + } + + // RFC 7540, Section 5.3.5: + // "All streams are initially assigned a non-exclusive dependency on stream 0x0. + // Pushed streams initially depend on their associated stream. In both cases, + // streams are assigned a default weight of 16." + parent := ws.nodes[options.PusherID] + if parent == nil { + parent = &ws.root + } + n := &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeOpen, + } + n.setParent(parent) + ws.nodes[streamID] = n + if streamID > ws.maxID { + ws.maxID = streamID + } +} + +func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { + if streamID == 0 { + panic("violation of WriteScheduler interface: cannot close stream 0") + } + if ws.nodes[streamID] == nil { + panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) + } + if ws.nodes[streamID].state != priorityNodeOpen { + panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) + } + + n := ws.nodes[streamID] + n.state = priorityNodeClosed + n.addBytes(-n.bytes) + + q := n.q + ws.queuePool.put(&q) + n.q.s = nil + if ws.maxClosedNodesInTree > 0 { + ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) + } else { + ws.removeNode(n) + } +} + +func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + if streamID == 0 { + panic("adjustPriority on root") + } + + // If streamID does not exist, there are two cases: + // - A closed stream that has been removed (this will have ID <= maxID) + // - An idle stream that is being used for "grouping" (this will have ID > maxID) + n := ws.nodes[streamID] + if n == nil { + if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { + return + } + ws.maxID = streamID + n = &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeIdle, + } + n.setParent(&ws.root) + ws.nodes[streamID] = n + ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) + } + + // Section 5.3.1: A dependency on a stream that is not currently in the tree + // results in that stream being given a default priority (Section 5.3.5). + parent := ws.nodes[priority.StreamDep] + if parent == nil { + n.setParent(&ws.root) + n.weight = priorityDefaultWeight + return + } + + // Ignore if the client tries to make a node its own parent. + if n == parent { + return + } + + // Section 5.3.3: + // "If a stream is made dependent on one of its own dependencies, the + // formerly dependent stream is first moved to be dependent on the + // reprioritized stream's previous parent. The moved dependency retains + // its weight." + // + // That is: if parent depends on n, move parent to depend on n.parent. + for x := parent.parent; x != nil; x = x.parent { + if x == n { + parent.setParent(n.parent) + break + } + } + + // Section 5.3.3: The exclusive flag causes the stream to become the sole + // dependency of its parent stream, causing other dependencies to become + // dependent on the exclusive stream. + if priority.Exclusive { + k := parent.kids + for k != nil { + next := k.next + if k != n { + k.setParent(n) + } + k = next + } + } + + n.setParent(parent) + n.weight = priority.Weight +} + +func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { + var n *priorityNode + if id := wr.StreamID(); id == 0 { + n = &ws.root + } else { + n = ws.nodes[id] + if n == nil { + // id is an idle or closed stream. wr should not be a HEADERS or + // DATA frame. However, wr can be a RST_STREAM. In this case, we + // push wr onto the root, rather than creating a new priorityNode, + // since RST_STREAM is tiny and the stream's priority is unknown + // anyway. See issue #17919. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + n = &ws.root + } + } + n.q.push(wr) +} + +func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { + ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { + limit := int32(math.MaxInt32) + if openParent { + limit = ws.writeThrottleLimit + } + wr, ok = n.q.consume(limit) + if !ok { + return false + } + n.addBytes(int64(wr.DataSize())) + // If B depends on A and B continuously has data available but A + // does not, gradually increase the throttling limit to allow B to + // steal more and more bandwidth from A. + if openParent { + ws.writeThrottleLimit += 1024 + if ws.writeThrottleLimit < 0 { + ws.writeThrottleLimit = math.MaxInt32 + } + } else if ws.enableWriteThrottle { + ws.writeThrottleLimit = 1024 + } + return true + }) + return wr, ok +} + +func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { + if maxSize == 0 { + return + } + if len(*list) == maxSize { + // Remove the oldest node, then shift left. + ws.removeNode((*list)[0]) + x := (*list)[1:] + copy(*list, x) + *list = (*list)[:len(x)] + } + *list = append(*list, n) +} + +func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { + for k := n.kids; k != nil; k = k.next { + k.setParent(n.parent) + } + n.setParent(nil) + delete(ws.nodes, n.id) +} diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go new file mode 100644 index 00000000000..36d7919f16a --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_random.go @@ -0,0 +1,72 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "math" + +// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2 +// priorities. Control frames like SETTINGS and PING are written before DATA +// frames, but if no control frames are queued and multiple streams have queued +// HEADERS or DATA frames, Pop selects a ready stream arbitrarily. +func NewRandomWriteScheduler() WriteScheduler { + return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)} +} + +type randomWriteScheduler struct { + // zero are frames not associated with a specific stream. + zero writeQueue + + // sq contains the stream-specific queues, keyed by stream ID. + // When a stream is idle or closed, it's deleted from the map. + sq map[uint32]*writeQueue + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // no-op: idle streams are not tracked +} + +func (ws *randomWriteScheduler) CloseStream(streamID uint32) { + q, ok := ws.sq[streamID] + if !ok { + return + } + delete(ws.sq, streamID) + ws.queuePool.put(q) +} + +func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + // no-op: priorities are ignored +} + +func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { + id := wr.StreamID() + if id == 0 { + ws.zero.push(wr) + return + } + q, ok := ws.sq[id] + if !ok { + q = ws.queuePool.get() + ws.sq[id] = q + } + q.push(wr) +} + +func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { + // Control frames first. + if !ws.zero.empty() { + return ws.zero.shift(), true + } + // Iterate over all non-idle streams until finding one that can be consumed. + for _, q := range ws.sq { + if wr, ok := q.consume(math.MaxInt32); ok { + return wr, true + } + } + return FrameWriteRequest{}, false +} diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna.go new file mode 100644 index 00000000000..7d328fcb0f6 --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna.go @@ -0,0 +1,717 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package idna implements IDNA2008 using the compatibility processing +// defined by UTS (Unicode Technical Standard) #46, which defines a standard to +// deal with the transition from IDNA2003. +// +// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC +// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. +// UTS #46 is defined in http://www.unicode.org/reports/tr46. +// See http://unicode.org/cldr/utility/idna.jsp for a visualization of the +// differences between these two standards. +package idna // import "golang.org/x/net/idna" + +import ( + "fmt" + "strings" + "unicode/utf8" + + "golang.org/x/text/secure/bidirule" + "golang.org/x/text/unicode/bidi" + "golang.org/x/text/unicode/norm" +) + +// NOTE: Unlike common practice in Go APIs, the functions will return a +// sanitized domain name in case of errors. Browsers sometimes use a partially +// evaluated string as lookup. +// TODO: the current error handling is, in my opinion, the least opinionated. +// Other strategies are also viable, though: +// Option 1) Return an empty string in case of error, but allow the user to +// specify explicitly which errors to ignore. +// Option 2) Return the partially evaluated string if it is itself a valid +// string, otherwise return the empty string in case of error. +// Option 3) Option 1 and 2. +// Option 4) Always return an empty string for now and implement Option 1 as +// needed, and document that the return string may not be empty in case of +// error in the future. +// I think Option 1 is best, but it is quite opinionated. + +// ToASCII is a wrapper for Punycode.ToASCII. +func ToASCII(s string) (string, error) { + return Punycode.process(s, true) +} + +// ToUnicode is a wrapper for Punycode.ToUnicode. +func ToUnicode(s string) (string, error) { + return Punycode.process(s, false) +} + +// An Option configures a Profile at creation time. +type Option func(*options) + +// Transitional sets a Profile to use the Transitional mapping as defined in UTS +// #46. This will cause, for example, "ß" to be mapped to "ss". Using the +// transitional mapping provides a compromise between IDNA2003 and IDNA2008 +// compatibility. It is used by most browsers when resolving domain names. This +// option is only meaningful if combined with MapForLookup. +func Transitional(transitional bool) Option { + return func(o *options) { o.transitional = true } +} + +// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts +// are longer than allowed by the RFC. +func VerifyDNSLength(verify bool) Option { + return func(o *options) { o.verifyDNSLength = verify } +} + +// RemoveLeadingDots removes leading label separators. Leading runes that map to +// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well. +// +// This is the behavior suggested by the UTS #46 and is adopted by some +// browsers. +func RemoveLeadingDots(remove bool) Option { + return func(o *options) { o.removeLeadingDots = remove } +} + +// ValidateLabels sets whether to check the mandatory label validation criteria +// as defined in Section 5.4 of RFC 5891. This includes testing for correct use +// of hyphens ('-'), normalization, validity of runes, and the context rules. +func ValidateLabels(enable bool) Option { + return func(o *options) { + // Don't override existing mappings, but set one that at least checks + // normalization if it is not set. + if o.mapping == nil && enable { + o.mapping = normalize + } + o.trie = trie + o.validateLabels = enable + o.fromPuny = validateFromPunycode + } +} + +// StrictDomainName limits the set of permissible ASCII characters to those +// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the +// hyphen). This is set by default for MapForLookup and ValidateForRegistration. +// +// This option is useful, for instance, for browsers that allow characters +// outside this range, for example a '_' (U+005F LOW LINE). See +// http://www.rfc-editor.org/std/std3.txt for more details This option +// corresponds to the UseSTD3ASCIIRules option in UTS #46. +func StrictDomainName(use bool) Option { + return func(o *options) { + o.trie = trie + o.useSTD3Rules = use + o.fromPuny = validateFromPunycode + } +} + +// NOTE: the following options pull in tables. The tables should not be linked +// in as long as the options are not used. + +// BidiRule enables the Bidi rule as defined in RFC 5893. Any application +// that relies on proper validation of labels should include this rule. +func BidiRule() Option { + return func(o *options) { o.bidirule = bidirule.ValidString } +} + +// ValidateForRegistration sets validation options to verify that a given IDN is +// properly formatted for registration as defined by Section 4 of RFC 5891. +func ValidateForRegistration() Option { + return func(o *options) { + o.mapping = validateRegistration + StrictDomainName(true)(o) + ValidateLabels(true)(o) + VerifyDNSLength(true)(o) + BidiRule()(o) + } +} + +// MapForLookup sets validation and mapping options such that a given IDN is +// transformed for domain name lookup according to the requirements set out in +// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894, +// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option +// to add this check. +// +// The mappings include normalization and mapping case, width and other +// compatibility mappings. +func MapForLookup() Option { + return func(o *options) { + o.mapping = validateAndMap + StrictDomainName(true)(o) + ValidateLabels(true)(o) + } +} + +type options struct { + transitional bool + useSTD3Rules bool + validateLabels bool + verifyDNSLength bool + removeLeadingDots bool + + trie *idnaTrie + + // fromPuny calls validation rules when converting A-labels to U-labels. + fromPuny func(p *Profile, s string) error + + // mapping implements a validation and mapping step as defined in RFC 5895 + // or UTS 46, tailored to, for example, domain registration or lookup. + mapping func(p *Profile, s string) (mapped string, isBidi bool, err error) + + // bidirule, if specified, checks whether s conforms to the Bidi Rule + // defined in RFC 5893. + bidirule func(s string) bool +} + +// A Profile defines the configuration of an IDNA mapper. +type Profile struct { + options +} + +func apply(o *options, opts []Option) { + for _, f := range opts { + f(o) + } +} + +// New creates a new Profile. +// +// With no options, the returned Profile is the most permissive and equals the +// Punycode Profile. Options can be passed to further restrict the Profile. The +// MapForLookup and ValidateForRegistration options set a collection of options, +// for lookup and registration purposes respectively, which can be tailored by +// adding more fine-grained options, where later options override earlier +// options. +func New(o ...Option) *Profile { + p := &Profile{} + apply(&p.options, o) + return p +} + +// ToASCII converts a domain or domain label to its ASCII form. For example, +// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and +// ToASCII("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToASCII(s string) (string, error) { + return p.process(s, true) +} + +// ToUnicode converts a domain or domain label to its Unicode form. For example, +// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and +// ToUnicode("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToUnicode(s string) (string, error) { + pp := *p + pp.transitional = false + return pp.process(s, false) +} + +// String reports a string with a description of the profile for debugging +// purposes. The string format may change with different versions. +func (p *Profile) String() string { + s := "" + if p.transitional { + s = "Transitional" + } else { + s = "NonTransitional" + } + if p.useSTD3Rules { + s += ":UseSTD3Rules" + } + if p.validateLabels { + s += ":ValidateLabels" + } + if p.verifyDNSLength { + s += ":VerifyDNSLength" + } + return s +} + +var ( + // Punycode is a Profile that does raw punycode processing with a minimum + // of validation. + Punycode *Profile = punycode + + // Lookup is the recommended profile for looking up domain names, according + // to Section 5 of RFC 5891. The exact configuration of this profile may + // change over time. + Lookup *Profile = lookup + + // Display is the recommended profile for displaying domain names. + // The configuration of this profile may change over time. + Display *Profile = display + + // Registration is the recommended profile for checking whether a given + // IDN is valid for registration, according to Section 4 of RFC 5891. + Registration *Profile = registration + + punycode = &Profile{} + lookup = &Profile{options{ + transitional: true, + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + display = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + registration = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + verifyDNSLength: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateRegistration, + bidirule: bidirule.ValidString, + }} + + // TODO: profiles + // Register: recommended for approving domain names: don't do any mappings + // but rather reject on invalid input. Bundle or block deviation characters. +) + +type labelError struct{ label, code_ string } + +func (e labelError) code() string { return e.code_ } +func (e labelError) Error() string { + return fmt.Sprintf("idna: invalid label %q", e.label) +} + +type runeError rune + +func (e runeError) code() string { return "P1" } +func (e runeError) Error() string { + return fmt.Sprintf("idna: disallowed rune %U", e) +} + +// process implements the algorithm described in section 4 of UTS #46, +// see http://www.unicode.org/reports/tr46. +func (p *Profile) process(s string, toASCII bool) (string, error) { + var err error + var isBidi bool + if p.mapping != nil { + s, isBidi, err = p.mapping(p, s) + } + // Remove leading empty labels. + if p.removeLeadingDots { + for ; len(s) > 0 && s[0] == '.'; s = s[1:] { + } + } + // TODO: allow for a quick check the tables data. + // It seems like we should only create this error on ToASCII, but the + // UTS 46 conformance tests suggests we should always check this. + if err == nil && p.verifyDNSLength && s == "" { + err = &labelError{s, "A4"} + } + labels := labelIter{orig: s} + for ; !labels.done(); labels.next() { + label := labels.label() + if label == "" { + // Empty labels are not okay. The label iterator skips the last + // label if it is empty. + if err == nil && p.verifyDNSLength { + err = &labelError{s, "A4"} + } + continue + } + if strings.HasPrefix(label, acePrefix) { + u, err2 := decode(label[len(acePrefix):]) + if err2 != nil { + if err == nil { + err = err2 + } + // Spec says keep the old label. + continue + } + isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight + labels.set(u) + if err == nil && p.validateLabels { + err = p.fromPuny(p, u) + } + if err == nil { + // This should be called on NonTransitional, according to the + // spec, but that currently does not have any effect. Use the + // original profile to preserve options. + err = p.validateLabel(u) + } + } else if err == nil { + err = p.validateLabel(label) + } + } + if isBidi && p.bidirule != nil && err == nil { + for labels.reset(); !labels.done(); labels.next() { + if !p.bidirule(labels.label()) { + err = &labelError{s, "B"} + break + } + } + } + if toASCII { + for labels.reset(); !labels.done(); labels.next() { + label := labels.label() + if !ascii(label) { + a, err2 := encode(acePrefix, label) + if err == nil { + err = err2 + } + label = a + labels.set(a) + } + n := len(label) + if p.verifyDNSLength && err == nil && (n == 0 || n > 63) { + err = &labelError{label, "A4"} + } + } + } + s = labels.result() + if toASCII && p.verifyDNSLength && err == nil { + // Compute the length of the domain name minus the root label and its dot. + n := len(s) + if n > 0 && s[n-1] == '.' { + n-- + } + if len(s) < 1 || n > 253 { + err = &labelError{s, "A4"} + } + } + return s, err +} + +func normalize(p *Profile, s string) (mapped string, isBidi bool, err error) { + // TODO: consider first doing a quick check to see if any of these checks + // need to be done. This will make it slower in the general case, but + // faster in the common case. + mapped = norm.NFC.String(s) + isBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft + return mapped, isBidi, nil +} + +func validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) { + // TODO: filter need for normalization in loop below. + if !norm.NFC.IsNormalString(s) { + return s, false, &labelError{s, "V1"} + } + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + bidi = bidi || info(v).isBidi(s[i:]) + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + // TODO: handle the NV8 defined in the Unicode idna data set to allow + // for strict conformance to IDNA2008. + case valid, deviation: + case disallowed, mapped, unknown, ignored: + r, _ := utf8.DecodeRuneInString(s[i:]) + return s, bidi, runeError(r) + } + i += sz + } + return s, bidi, nil +} + +func (c info) isBidi(s string) bool { + if !c.isMapped() { + return c&attributesMask == rtl + } + // TODO: also store bidi info for mapped data. This is possible, but a bit + // cumbersome and not for the common case. + p, _ := bidi.LookupString(s) + switch p.Class() { + case bidi.R, bidi.AL, bidi.AN: + return true + } + return false +} + +func validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) { + var ( + b []byte + k int + ) + // combinedInfoBits contains the or-ed bits of all runes. We use this + // to derive the mayNeedNorm bit later. This may trigger normalization + // overeagerly, but it will not do so in the common case. The end result + // is another 10% saving on BenchmarkProfile for the common case. + var combinedInfoBits info + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + combinedInfoBits |= info(v) + bidi = bidi || info(v).isBidi(s[i:]) + start := i + i += sz + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + case valid: + continue + case disallowed: + if err == nil { + r, _ := utf8.DecodeRuneInString(s[start:]) + err = runeError(r) + } + continue + case mapped, deviation: + b = append(b, s[k:start]...) + b = info(v).appendMapping(b, s[start:i]) + case ignored: + b = append(b, s[k:start]...) + // drop the rune + case unknown: + b = append(b, s[k:start]...) + b = append(b, "\ufffd"...) + } + k = i + } + if k == 0 { + // No changes so far. + if combinedInfoBits&mayNeedNorm != 0 { + s = norm.NFC.String(s) + } + } else { + b = append(b, s[k:]...) + if norm.NFC.QuickSpan(b) != len(b) { + b = norm.NFC.Bytes(b) + } + // TODO: the punycode converters require strings as input. + s = string(b) + } + return s, bidi, err +} + +// A labelIter allows iterating over domain name labels. +type labelIter struct { + orig string + slice []string + curStart int + curEnd int + i int +} + +func (l *labelIter) reset() { + l.curStart = 0 + l.curEnd = 0 + l.i = 0 +} + +func (l *labelIter) done() bool { + return l.curStart >= len(l.orig) +} + +func (l *labelIter) result() string { + if l.slice != nil { + return strings.Join(l.slice, ".") + } + return l.orig +} + +func (l *labelIter) label() string { + if l.slice != nil { + return l.slice[l.i] + } + p := strings.IndexByte(l.orig[l.curStart:], '.') + l.curEnd = l.curStart + p + if p == -1 { + l.curEnd = len(l.orig) + } + return l.orig[l.curStart:l.curEnd] +} + +// next sets the value to the next label. It skips the last label if it is empty. +func (l *labelIter) next() { + l.i++ + if l.slice != nil { + if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { + l.curStart = len(l.orig) + } + } else { + l.curStart = l.curEnd + 1 + if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { + l.curStart = len(l.orig) + } + } +} + +func (l *labelIter) set(s string) { + if l.slice == nil { + l.slice = strings.Split(l.orig, ".") + } + l.slice[l.i] = s +} + +// acePrefix is the ASCII Compatible Encoding prefix. +const acePrefix = "xn--" + +func (p *Profile) simplify(cat category) category { + switch cat { + case disallowedSTD3Mapped: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = mapped + } + case disallowedSTD3Valid: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = valid + } + case deviation: + if !p.transitional { + cat = valid + } + case validNV8, validXV8: + // TODO: handle V2008 + cat = valid + } + return cat +} + +func validateFromPunycode(p *Profile, s string) error { + if !norm.NFC.IsNormalString(s) { + return &labelError{s, "V1"} + } + // TODO: detect whether string may have to be normalized in the following + // loop. + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if c := p.simplify(info(v).category()); c != valid && c != deviation { + return &labelError{s, "V6"} + } + i += sz + } + return nil +} + +const ( + zwnj = "\u200c" + zwj = "\u200d" +) + +type joinState int8 + +const ( + stateStart joinState = iota + stateVirama + stateBefore + stateBeforeVirama + stateAfter + stateFAIL +) + +var joinStates = [][numJoinTypes]joinState{ + stateStart: { + joiningL: stateBefore, + joiningD: stateBefore, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateVirama, + }, + stateVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + }, + stateBefore: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + joinZWNJ: stateAfter, + joinZWJ: stateFAIL, + joinVirama: stateBeforeVirama, + }, + stateBeforeVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + }, + stateAfter: { + joiningL: stateFAIL, + joiningD: stateBefore, + joiningT: stateAfter, + joiningR: stateStart, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateAfter, // no-op as we can't accept joiners here + }, + stateFAIL: { + 0: stateFAIL, + joiningL: stateFAIL, + joiningD: stateFAIL, + joiningT: stateFAIL, + joiningR: stateFAIL, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateFAIL, + }, +} + +// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are +// already implicitly satisfied by the overall implementation. +func (p *Profile) validateLabel(s string) (err error) { + if s == "" { + if p.verifyDNSLength { + return &labelError{s, "A4"} + } + return nil + } + if !p.validateLabels { + return nil + } + trie := p.trie // p.validateLabels is only set if trie is set. + if len(s) > 4 && s[2] == '-' && s[3] == '-' { + return &labelError{s, "V2"} + } + if s[0] == '-' || s[len(s)-1] == '-' { + return &labelError{s, "V3"} + } + // TODO: merge the use of this in the trie. + v, sz := trie.lookupString(s) + x := info(v) + if x.isModifier() { + return &labelError{s, "V5"} + } + // Quickly return in the absence of zero-width (non) joiners. + if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 { + return nil + } + st := stateStart + for i := 0; ; { + jt := x.joinType() + if s[i:i+sz] == zwj { + jt = joinZWJ + } else if s[i:i+sz] == zwnj { + jt = joinZWNJ + } + st = joinStates[st][jt] + if x.isViramaModifier() { + st = joinStates[st][joinVirama] + } + if i += sz; i == len(s) { + break + } + v, sz = trie.lookupString(s[i:]) + x = info(v) + } + if st == stateFAIL || st == stateAfter { + return &labelError{s, "C"} + } + return nil +} + +func ascii(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go new file mode 100644 index 00000000000..02c7d59af3b --- /dev/null +++ b/vendor/golang.org/x/net/idna/punycode.go @@ -0,0 +1,203 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// This file implements the Punycode algorithm from RFC 3492. + +import ( + "math" + "strings" + "unicode/utf8" +) + +// These parameter values are specified in section 5. +// +// All computation is done with int32s, so that overflow behavior is identical +// regardless of whether int is 32-bit or 64-bit. +const ( + base int32 = 36 + damp int32 = 700 + initialBias int32 = 72 + initialN int32 = 128 + skew int32 = 38 + tmax int32 = 26 + tmin int32 = 1 +) + +func punyError(s string) error { return &labelError{s, "A3"} } + +// decode decodes a string as specified in section 6.2. +func decode(encoded string) (string, error) { + if encoded == "" { + return "", nil + } + pos := 1 + strings.LastIndex(encoded, "-") + if pos == 1 { + return "", punyError(encoded) + } + if pos == len(encoded) { + return encoded[:len(encoded)-1], nil + } + output := make([]rune, 0, len(encoded)) + if pos != 0 { + for _, r := range encoded[:pos-1] { + output = append(output, r) + } + } + i, n, bias := int32(0), initialN, initialBias + for pos < len(encoded) { + oldI, w := i, int32(1) + for k := base; ; k += base { + if pos == len(encoded) { + return "", punyError(encoded) + } + digit, ok := decodeDigit(encoded[pos]) + if !ok { + return "", punyError(encoded) + } + pos++ + i += digit * w + if i < 0 { + return "", punyError(encoded) + } + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if digit < t { + break + } + w *= base - t + if w >= math.MaxInt32/base { + return "", punyError(encoded) + } + } + x := int32(len(output) + 1) + bias = adapt(i-oldI, x, oldI == 0) + n += i / x + i %= x + if n > utf8.MaxRune || len(output) >= 1024 { + return "", punyError(encoded) + } + output = append(output, 0) + copy(output[i+1:], output[i:]) + output[i] = n + i++ + } + return string(output), nil +} + +// encode encodes a string as specified in section 6.3 and prepends prefix to +// the result. +// +// The "while h < length(input)" line in the specification becomes "for +// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. +func encode(prefix, s string) (string, error) { + output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) + copy(output, prefix) + delta, n, bias := int32(0), initialN, initialBias + b, remaining := int32(0), int32(0) + for _, r := range s { + if r < 0x80 { + b++ + output = append(output, byte(r)) + } else { + remaining++ + } + } + h := b + if b > 0 { + output = append(output, '-') + } + for remaining != 0 { + m := int32(0x7fffffff) + for _, r := range s { + if m > r && r >= n { + m = r + } + } + delta += (m - n) * (h + 1) + if delta < 0 { + return "", punyError(s) + } + n = m + for _, r := range s { + if r < n { + delta++ + if delta < 0 { + return "", punyError(s) + } + continue + } + if r > n { + continue + } + q := delta + for k := base; ; k += base { + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if q < t { + break + } + output = append(output, encodeDigit(t+(q-t)%(base-t))) + q = (q - t) / (base - t) + } + output = append(output, encodeDigit(q)) + bias = adapt(delta, h+1, h == b) + delta = 0 + h++ + remaining-- + } + delta++ + n++ + } + return string(output), nil +} + +func decodeDigit(x byte) (digit int32, ok bool) { + switch { + case '0' <= x && x <= '9': + return int32(x - ('0' - 26)), true + case 'A' <= x && x <= 'Z': + return int32(x - 'A'), true + case 'a' <= x && x <= 'z': + return int32(x - 'a'), true + } + return 0, false +} + +func encodeDigit(digit int32) byte { + switch { + case 0 <= digit && digit < 26: + return byte(digit + 'a') + case 26 <= digit && digit < 36: + return byte(digit + ('0' - 26)) + } + panic("idna: internal error in punycode encoding") +} + +// adapt is the bias adaptation function specified in section 6.1. +func adapt(delta, numPoints int32, firstTime bool) int32 { + if firstTime { + delta /= damp + } else { + delta /= 2 + } + delta += delta / numPoints + k := int32(0) + for delta > ((base-tmin)*tmax)/2 { + delta /= base - tmin + k += base + } + return k + (base-tmin+1)*delta/(delta+skew) +} diff --git a/vendor/golang.org/x/net/idna/tables.go b/vendor/golang.org/x/net/idna/tables.go new file mode 100644 index 00000000000..f910b269144 --- /dev/null +++ b/vendor/golang.org/x/net/idna/tables.go @@ -0,0 +1,4557 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package idna + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "10.0.0" + +var mappings string = "" + // Size: 8176 bytes + "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + + "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + + "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + + "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + + "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + + "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + + "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + + "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + + "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + + "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + + "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + + "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + + "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + + "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + + "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + + "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + + "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + + "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + + "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + + "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + + "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + + "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + + "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + + "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + + "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + + "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + + ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + + "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + + "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + + "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + + "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + + "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + + "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + + "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + + "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + + "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + + "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + + "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + + "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" + + "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" + + "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + + "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" + + "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" + + "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + + "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + + "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + + "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + + "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + + "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + + "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + + "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + + "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + + "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + + "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + + "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + + "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + + "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" + + "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" + + "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" + + "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" + + "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + + "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" + + "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" + + "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" + + "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" + + "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + + "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + + "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + + "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" + + "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + + "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + + "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + + "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" + + " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + + "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + + "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + + "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + + "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + + "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + + "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + + "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + + "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + + "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + + "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + + "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + + "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + + "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + + "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" + + "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + + "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + + "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + + "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" + + "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" + + "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" + + "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + + "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" + + "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" + + "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" + + "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + + "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + + "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + + "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + + "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" + + "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" + + "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" + + "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" + + "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" + + "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" + + "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" + + "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" + + "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" + + "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" + + "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" + + "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" + + "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" + + "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" + + "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" + + "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" + + "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" + + "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" + + "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" + + "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" + + "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" + + "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" + + "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" + + "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" + + "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" + + "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" + + "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" + + "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" + + "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" + + "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" + + "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" + + "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" + + "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" + + "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" + + "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" + +var xorData string = "" + // Size: 4855 bytes + "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + + "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" + + "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" + + "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" + + "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" + + "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" + + "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" + + "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" + + "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" + + "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" + + "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" + + "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" + + "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" + + "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" + + "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" + + "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" + + "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" + + "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" + + "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" + + "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" + + "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" + + "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" + + "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" + + "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" + + "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" + + "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" + + "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" + + "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" + + "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " + + "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" + + "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" + + "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" + + "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" + + "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" + + ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" + + "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" + + "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" + + "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" + + "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" + + "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" + + "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" + + "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" + + "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" + + "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" + + "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" + + "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" + + "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" + + "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" + + "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" + + "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" + + "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" + + "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" + + "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" + + "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + + "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" + + "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" + + "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" + + "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" + + "\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" + + "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" + + "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" + + "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" + + "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" + + "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" + + "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" + + "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" + + "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" + + "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" + + "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" + + "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" + + "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" + + "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" + + "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" + + "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" + + "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" + + "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." + + "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + + "\x04\x03\x0c?\x05\x03\x0c" + + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + + "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + + "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + + "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + + "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + + "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + + "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + + "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + + "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + + "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + + "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + + "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + + "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + + "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + + "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + + "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + + "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + + "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + + "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + + "\x05\x22\x05\x03\x050\x1d" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// idnaTrie. Total size: 29052 bytes (28.37 KiB). Checksum: ef06e7ecc26f36dd. +type idnaTrie struct{} + +func newIdnaTrie(i int) *idnaTrie { + return &idnaTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 125: + return uint16(idnaValues[n<<6+uint32(b)]) + default: + n -= 125 + return uint16(idnaSparse.lookup(n, b)) + } +} + +// idnaValues: 127 blocks, 8128 entries, 16256 bytes +// The third block is the zero block. +var idnaValues = [8128]uint16{ + // Block 0x0, offset 0x0 + 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, + 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, + 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, + 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, + 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, + 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, + 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, + 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, + 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, + 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, + 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, + // Block 0x1, offset 0x40 + 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, + 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, + 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, + 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, + 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, + 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, + 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, + 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, + 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, + 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, + 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, + 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, + 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, + // Block 0x4, offset 0x100 + 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, + 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, + 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, + 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, + 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, + 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, + 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, + 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, + 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, + // Block 0x5, offset 0x140 + 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, + 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, + 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, + 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, + 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, + 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, + 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, + 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, + // Block 0x6, offset 0x180 + 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, + 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, + 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, + 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, + 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, + 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, + 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, + 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, + 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, + 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, + 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, + 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, + 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, + 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, + 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, + 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, + 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, + 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, + 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, + 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, + // Block 0x8, offset 0x200 + 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, + 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, + 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, + 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, + 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, + 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, + 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, + 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, + 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, + // Block 0x9, offset 0x240 + 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, + 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, + 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, + 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, + 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, + 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, + 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, + 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, + 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, + 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, + // Block 0xa, offset 0x280 + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, + 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, + 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, + 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, + 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, + 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, + 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, + 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, + 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, + 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, + 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, + 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, + // Block 0xc, offset 0x300 + 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, + 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, + 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, + 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, + 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, + 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, + 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, + 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, + 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, + 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, + 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, + // Block 0xd, offset 0x340 + 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, + 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, + 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, + 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, + 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, + 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, + 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, + 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, + 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, + 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, + 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, + // Block 0xe, offset 0x380 + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, + 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, + 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, + 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, + 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, + 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, + 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, + 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, + 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, + 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, + 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, + 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, + 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, + 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, + 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, + 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, + 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, + 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, + 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, + // Block 0x10, offset 0x400 + 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, + 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, + 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, + 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, + 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, + 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, + 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, + 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, + 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, + 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, + 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, + // Block 0x11, offset 0x440 + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, + // Block 0x12, offset 0x480 + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, + 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, + // Block 0x14, offset 0x500 + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, + // Block 0x15, offset 0x540 + 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08, + 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08, + 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08, + 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808, + 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040, + 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08, + 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08, + 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040, + 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, + 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040, + 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040, + // Block 0x16, offset 0x580 + 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308, + 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008, + 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308, + 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308, + 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1, + 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, + 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, + 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, + 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008, + 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008, + 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008, + 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008, + 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, + 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, + 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, + 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, + 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, + 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, + 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040, + 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, + 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008, + // Block 0x18, offset 0x600 + 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040, + 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008, + 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008, + 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1, + 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, + 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, + 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, + 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018, + 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018, + 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x0040, 0x63f: 0x0040, + // Block 0x19, offset 0x640 + 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008, + 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040, + 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040, + 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, + 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, + 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, + 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, + 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, + 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008, + 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008, + // Block 0x1a, offset 0x680 + 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040, + 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308, + 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308, + 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, + 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040, + 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, + 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, + 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, + 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308, + 0x6b6: 0x0040, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008, + 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, + 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008, + 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, + 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, + 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, + 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040, + 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, + 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, + 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, + 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008, + // Block 0x1c, offset 0x700 + 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308, + 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008, + 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040, + 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040, + 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308, + 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, + 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, + 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040, + 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308, + 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308, + // Block 0x1d, offset 0x740 + 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008, + 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008, + 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008, + 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008, + 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008, + 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008, + 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, + 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308, + // Block 0x1e, offset 0x780 + 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040, + 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008, + 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, + 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008, + 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9, + 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308, + 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, + 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, + 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018, + 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008, + 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040, + 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, + 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040, + 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040, + 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008, + 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008, + 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, + 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008, + // Block 0x20, offset 0x800 + 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040, + 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308, + 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, + 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040, + 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, + 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308, + 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, + 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, + 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, + 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018, + 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018, + // Block 0x21, offset 0x840 + 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0040, 0x845: 0x0008, + 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008, + 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040, + 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008, + 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008, + 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008, + 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040, + 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, + 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008, + 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040, + 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308, + // Block 0x22, offset 0x880 + 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040, + 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, + 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040, + 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040, + 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, + 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040, + 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040, + 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040, + 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008, + 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008, + 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018, + 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308, + 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018, + 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008, + 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008, + // Block 0x24, offset 0x900 + 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040, + 0x906: 0x0040, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0040, 0x90a: 0x0008, 0x90b: 0x0040, + 0x90c: 0x0040, 0x90d: 0x0008, 0x90e: 0x0040, 0x90f: 0x0040, 0x910: 0x0040, 0x911: 0x0040, + 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008, + 0x918: 0x0040, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008, + 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0040, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, + 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0040, 0x929: 0x0040, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0040, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, + 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308, + 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x0040, 0x93b: 0x3308, + 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040, + // Block 0x25, offset 0x940 + 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008, + 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008, + 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, + 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9, + 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040, + 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59, + 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008, + // Block 0x26, offset 0x980 + 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, + 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308, + 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, + 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11, + 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308, + 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308, + 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, + 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308, + 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308, + 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, + 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, + 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, + 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, + 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008, + 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008, + 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008, + 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008, + 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41, + 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008, + 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269, + // Block 0x28, offset 0xa00 + 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1, + 0xa06: 0x059d, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011, + 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041, + 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05b5, 0xa15: 0x05b5, 0xa16: 0x0f99, 0xa17: 0x0fa9, + 0xa18: 0x0fb9, 0xa19: 0x059d, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05cd, 0xa1d: 0x1099, + 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269, + 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1, + 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, + 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008, + 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, + 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008, + // Block 0x29, offset 0xa40 + 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008, + 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008, + 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008, + 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, + 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169, + 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9, + 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05e5, 0xa68: 0x1239, 0xa69: 0x1251, + 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9, + 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359, + 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x05fd, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1, + 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429, + // Block 0x2a, offset 0xa80 + 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, + 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, + 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, + 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008, + 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008, + 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, + 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, + 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, + 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, + 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, + 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, + // Block 0x2b, offset 0xac0 + 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008, + 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008, + 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, + 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, + 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x0615, 0xadb: 0x0635, 0xadc: 0x0008, 0xadd: 0x0008, + 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, + 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, + 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, + 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, + 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008, + 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008, + 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045, + 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008, + 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008, + 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045, + 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008, + 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, + 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045, + 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489, + 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1, + 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040, + // Block 0x2d, offset 0xb40 + 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1, + 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591, + 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1, + 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1, + 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771, + 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891, + 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831, + 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951, + 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x064d, 0xb7b: 0x1459, + 0xb7c: 0x19b1, 0xb7d: 0x0666, 0xb7e: 0x1a31, 0xb7f: 0x0686, + // Block 0x2e, offset 0xb80 + 0xb80: 0x06a6, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040, + 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06c5, 0xb89: 0x1471, 0xb8a: 0x06dd, 0xb8b: 0x1489, + 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008, + 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, + 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x06f5, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2, + 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61, + 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045, + 0xbaa: 0x070d, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa, + 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040, + 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x0725, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9, + 0xbbc: 0x1ce9, 0xbbd: 0x073e, 0xbbe: 0x075e, 0xbbf: 0x0040, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a, + 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0, + 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d, + 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x077e, + 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, + 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, + 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040, + 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a, + 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018, + 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, + 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x079e, 0xbff: 0x0018, + // Block 0x30, offset 0xc00 + 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018, + 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018, + 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018, + 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9, + 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, + 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340, + 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040, + 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340, + 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61, + 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07bd, + 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71, + // Block 0x31, offset 0xc40 + 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61, + 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07d5, + 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09, + 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359, + 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040, + 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018, + 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018, + 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018, + 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018, + 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018, + 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018, + // Block 0x32, offset 0xc80 + 0xc80: 0x07ee, 0xc81: 0x080e, 0xc82: 0x1159, 0xc83: 0x082d, 0xc84: 0x0018, 0xc85: 0x084e, + 0xc86: 0x086e, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x088d, 0xc8a: 0x0f31, 0xc8b: 0x0249, + 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41, + 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018, + 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269, + 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08ad, 0xca2: 0x2061, 0xca3: 0x0018, + 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018, + 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09, + 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9, + 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08cd, + 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x08ed, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9, + 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018, + 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151, + 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279, + 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399, + 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x0905, 0xce3: 0x2439, + 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x0925, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369, + 0xcea: 0x24a9, 0xceb: 0x0945, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61, + 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x0965, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451, + 0xcf6: 0x0985, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09a5, + 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61, + // Block 0x34, offset 0xd00 + 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018, + 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040, + 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, + 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, + 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040, + 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51, + 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601, + 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691, + 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a06, 0xd35: 0x0a26, + 0xd36: 0x0a46, 0xd37: 0x0a66, 0xd38: 0x0a86, 0xd39: 0x0aa6, 0xd3a: 0x0ac6, 0xd3b: 0x0ae6, + 0xd3c: 0x0b06, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a, + // Block 0x35, offset 0xd40 + 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a, + 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, + 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, + 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, + 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b26, 0xd5d: 0x0b46, + 0xd5e: 0x0b66, 0xd5f: 0x0b86, 0xd60: 0x0ba6, 0xd61: 0x0bc6, 0xd62: 0x0be6, 0xd63: 0x0c06, + 0xd64: 0x0c26, 0xd65: 0x0c46, 0xd66: 0x0c66, 0xd67: 0x0c86, 0xd68: 0x0ca6, 0xd69: 0x0cc6, + 0xd6a: 0x0ce6, 0xd6b: 0x0d06, 0xd6c: 0x0d26, 0xd6d: 0x0d46, 0xd6e: 0x0d66, 0xd6f: 0x0d86, + 0xd70: 0x0da6, 0xd71: 0x0dc6, 0xd72: 0x0de6, 0xd73: 0x0e06, 0xd74: 0x0e26, 0xd75: 0x0e46, + 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199, + 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259, + // Block 0x36, offset 0xd80 + 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99, + 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089, + 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9, + 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249, + 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71, + 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9, + 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1, + 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, + 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018, + 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, + 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008, + 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008, + 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008, + 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008, + 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008, + 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ebd, + 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, + 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9, + 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, + 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008, + 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9, + // Block 0x38, offset 0xe00 + 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008, + 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008, + 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008, + 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008, + 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008, + 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008, + 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, + 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308, + 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040, + 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018, + 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, + // Block 0x39, offset 0xe40 + 0xe40: 0x26fd, 0xe41: 0x271d, 0xe42: 0x273d, 0xe43: 0x275d, 0xe44: 0x277d, 0xe45: 0x279d, + 0xe46: 0x27bd, 0xe47: 0x27dd, 0xe48: 0x27fd, 0xe49: 0x281d, 0xe4a: 0x283d, 0xe4b: 0x285d, + 0xe4c: 0x287d, 0xe4d: 0x289d, 0xe4e: 0x28bd, 0xe4f: 0x28dd, 0xe50: 0x28fd, 0xe51: 0x291d, + 0xe52: 0x293d, 0xe53: 0x295d, 0xe54: 0x297d, 0xe55: 0x299d, 0xe56: 0x0040, 0xe57: 0x0040, + 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040, + 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040, + 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040, + 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040, + 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040, + 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040, + 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040, + // Block 0x3a, offset 0xe80 + 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, + 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018, + 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018, + 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018, + 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018, + 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018, + 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018, + 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018, + 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018, + 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29bd, 0xeb9: 0x29dd, 0xeba: 0x29fd, 0xebb: 0x0018, + 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018, + // Block 0x3b, offset 0xec0 + 0xec0: 0x2b3d, 0xec1: 0x2b5d, 0xec2: 0x2b7d, 0xec3: 0x2b9d, 0xec4: 0x2bbd, 0xec5: 0x2bdd, + 0xec6: 0x2bdd, 0xec7: 0x2bdd, 0xec8: 0x2bfd, 0xec9: 0x2bfd, 0xeca: 0x2bfd, 0xecb: 0x2bfd, + 0xecc: 0x2c1d, 0xecd: 0x2c1d, 0xece: 0x2c1d, 0xecf: 0x2c3d, 0xed0: 0x2c5d, 0xed1: 0x2c5d, + 0xed2: 0x2a7d, 0xed3: 0x2a7d, 0xed4: 0x2c5d, 0xed5: 0x2c5d, 0xed6: 0x2c7d, 0xed7: 0x2c7d, + 0xed8: 0x2c5d, 0xed9: 0x2c5d, 0xeda: 0x2a7d, 0xedb: 0x2a7d, 0xedc: 0x2c5d, 0xedd: 0x2c5d, + 0xede: 0x2c3d, 0xedf: 0x2c3d, 0xee0: 0x2c9d, 0xee1: 0x2c9d, 0xee2: 0x2cbd, 0xee3: 0x2cbd, + 0xee4: 0x0040, 0xee5: 0x2cdd, 0xee6: 0x2cfd, 0xee7: 0x2d1d, 0xee8: 0x2d1d, 0xee9: 0x2d3d, + 0xeea: 0x2d5d, 0xeeb: 0x2d7d, 0xeec: 0x2d9d, 0xeed: 0x2dbd, 0xeee: 0x2ddd, 0xeef: 0x2dfd, + 0xef0: 0x2e1d, 0xef1: 0x2e3d, 0xef2: 0x2e3d, 0xef3: 0x2e5d, 0xef4: 0x2e7d, 0xef5: 0x2e7d, + 0xef6: 0x2e9d, 0xef7: 0x2ebd, 0xef8: 0x2e5d, 0xef9: 0x2edd, 0xefa: 0x2efd, 0xefb: 0x2edd, + 0xefc: 0x2e5d, 0xefd: 0x2f1d, 0xefe: 0x2f3d, 0xeff: 0x2f5d, + // Block 0x3c, offset 0xf00 + 0xf00: 0x2f7d, 0xf01: 0x2f9d, 0xf02: 0x2cfd, 0xf03: 0x2cdd, 0xf04: 0x2fbd, 0xf05: 0x2fdd, + 0xf06: 0x2ffd, 0xf07: 0x301d, 0xf08: 0x303d, 0xf09: 0x305d, 0xf0a: 0x307d, 0xf0b: 0x309d, + 0xf0c: 0x30bd, 0xf0d: 0x30dd, 0xf0e: 0x30fd, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018, + 0xf12: 0x311d, 0xf13: 0x313d, 0xf14: 0x315d, 0xf15: 0x317d, 0xf16: 0x319d, 0xf17: 0x31bd, + 0xf18: 0x31dd, 0xf19: 0x31fd, 0xf1a: 0x321d, 0xf1b: 0x323d, 0xf1c: 0x315d, 0xf1d: 0x325d, + 0xf1e: 0x327d, 0xf1f: 0x329d, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008, + 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008, + 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008, + 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008, + 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040, + 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040, + // Block 0x3d, offset 0xf40 + 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32bd, 0xf45: 0x32dd, + 0xf46: 0x32fd, 0xf47: 0x331d, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018, + 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x333d, 0xf51: 0x3761, + 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1, + 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881, + 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x335d, 0xf61: 0x337d, 0xf62: 0x339d, 0xf63: 0x33bd, + 0xf64: 0x33dd, 0xf65: 0x33dd, 0xf66: 0x33fd, 0xf67: 0x341d, 0xf68: 0x343d, 0xf69: 0x345d, + 0xf6a: 0x347d, 0xf6b: 0x349d, 0xf6c: 0x34bd, 0xf6d: 0x34dd, 0xf6e: 0x34fd, 0xf6f: 0x351d, + 0xf70: 0x353d, 0xf71: 0x355d, 0xf72: 0x357d, 0xf73: 0x359d, 0xf74: 0x35bd, 0xf75: 0x35dd, + 0xf76: 0x35fd, 0xf77: 0x361d, 0xf78: 0x363d, 0xf79: 0x365d, 0xf7a: 0x367d, 0xf7b: 0x369d, + 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36bd, 0xf7f: 0x0018, + // Block 0x3e, offset 0xf80 + 0xf80: 0x36dd, 0xf81: 0x36fd, 0xf82: 0x371d, 0xf83: 0x373d, 0xf84: 0x375d, 0xf85: 0x377d, + 0xf86: 0x379d, 0xf87: 0x37bd, 0xf88: 0x37dd, 0xf89: 0x37fd, 0xf8a: 0x381d, 0xf8b: 0x383d, + 0xf8c: 0x385d, 0xf8d: 0x387d, 0xf8e: 0x389d, 0xf8f: 0x38bd, 0xf90: 0x38dd, 0xf91: 0x38fd, + 0xf92: 0x391d, 0xf93: 0x393d, 0xf94: 0x395d, 0xf95: 0x397d, 0xf96: 0x399d, 0xf97: 0x39bd, + 0xf98: 0x39dd, 0xf99: 0x39fd, 0xf9a: 0x3a1d, 0xf9b: 0x3a3d, 0xf9c: 0x3a5d, 0xf9d: 0x3a7d, + 0xf9e: 0x3a9d, 0xf9f: 0x3abd, 0xfa0: 0x3add, 0xfa1: 0x3afd, 0xfa2: 0x3b1d, 0xfa3: 0x3b3d, + 0xfa4: 0x3b5d, 0xfa5: 0x3b7d, 0xfa6: 0x127d, 0xfa7: 0x3b9d, 0xfa8: 0x3bbd, 0xfa9: 0x3bdd, + 0xfaa: 0x3bfd, 0xfab: 0x3c1d, 0xfac: 0x3c3d, 0xfad: 0x3c5d, 0xfae: 0x239d, 0xfaf: 0x3c7d, + 0xfb0: 0x3c9d, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999, + 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29, + 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69, + 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69, + 0xfcc: 0x3c99, 0xfcd: 0x3cbd, 0xfce: 0x3cb1, 0xfcf: 0x3cdd, 0xfd0: 0x3cfd, 0xfd1: 0x3d15, + 0xfd2: 0x3d2d, 0xfd3: 0x3d45, 0xfd4: 0x3d5d, 0xfd5: 0x3d5d, 0xfd6: 0x3d45, 0xfd7: 0x3d75, + 0xfd8: 0x07bd, 0xfd9: 0x3d8d, 0xfda: 0x3da5, 0xfdb: 0x3dbd, 0xfdc: 0x3dd5, 0xfdd: 0x3ded, + 0xfde: 0x3e05, 0xfdf: 0x3e1d, 0xfe0: 0x3e35, 0xfe1: 0x3e4d, 0xfe2: 0x3e65, 0xfe3: 0x3e7d, + 0xfe4: 0x3e95, 0xfe5: 0x3e95, 0xfe6: 0x3ead, 0xfe7: 0x3ead, 0xfe8: 0x3ec5, 0xfe9: 0x3ec5, + 0xfea: 0x3edd, 0xfeb: 0x3ef5, 0xfec: 0x3f0d, 0xfed: 0x3f25, 0xfee: 0x3f3d, 0xfef: 0x3f3d, + 0xff0: 0x3f55, 0xff1: 0x3f55, 0xff2: 0x3f55, 0xff3: 0x3f6d, 0xff4: 0x3f85, 0xff5: 0x3f9d, + 0xff6: 0x3fb5, 0xff7: 0x3f9d, 0xff8: 0x3fcd, 0xff9: 0x3fe5, 0xffa: 0x3f6d, 0xffb: 0x3ffd, + 0xffc: 0x4015, 0xffd: 0x4015, 0xffe: 0x4015, 0xfff: 0x0040, + // Block 0x40, offset 0x1000 + 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9, + 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1, + 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9, + 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549, + 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1, + 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11, + 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91, + 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9, + 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011, + 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209, + 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361, + // Block 0x41, offset 0x1040 + 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541, + 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781, + 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979, + 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89, + 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1, + 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99, + 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9, + 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9, + 0x1070: 0x6009, 0x1071: 0x402d, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x404d, 0x1075: 0x6069, + 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x406d, 0x1079: 0x406d, 0x107a: 0x60b1, 0x107b: 0x60c9, + 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9, + // Block 0x42, offset 0x1080 + 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x408d, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271, + 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40ad, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9, + 0x108c: 0x40cd, 0x108d: 0x40cd, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x40ed, + 0x1092: 0x410d, 0x1093: 0x412d, 0x1094: 0x414d, 0x1095: 0x416d, 0x1096: 0x6359, 0x1097: 0x6371, + 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x418d, 0x109c: 0x63d1, 0x109d: 0x63e9, + 0x109e: 0x6401, 0x109f: 0x41ad, 0x10a0: 0x41cd, 0x10a1: 0x6419, 0x10a2: 0x41ed, 0x10a3: 0x420d, + 0x10a4: 0x422d, 0x10a5: 0x6431, 0x10a6: 0x424d, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211, + 0x10aa: 0x426d, 0x10ab: 0x428d, 0x10ac: 0x42ad, 0x10ad: 0x42cd, 0x10ae: 0x64b1, 0x10af: 0x64f1, + 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x42ed, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599, + 0x10b6: 0x430d, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9, + 0x10bc: 0x432d, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x434d, 0x10c1: 0x436d, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671, + 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709, + 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781, + 0x10d2: 0x438d, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43ad, 0x10d6: 0x43cd, 0x10d7: 0x67b1, + 0x10d8: 0x0040, 0x10d9: 0x43ed, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811, + 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901, + 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1, + 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11, + 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31, + 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51, + 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x440d, + // Block 0x44, offset 0x1100 + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008, + 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008, + 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, + 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308, + 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308, + 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308, + 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008, + // Block 0x45, offset 0x1140 + 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, + 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, + 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, + 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, + 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11, + 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008, + 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008, + 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, + 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008, + 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008, + // Block 0x46, offset 0x1180 + 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018, + 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018, + 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018, + 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008, + 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008, + 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008, + 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008, + 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008, + // Block 0x47, offset 0x11c0 + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008, + 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008, + 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008, + 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d, + 0x11fc: 0x0008, 0x11fd: 0x442d, 0x11fe: 0xe00d, 0x11ff: 0x0008, + // Block 0x48, offset 0x1200 + 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008, + 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d, + 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008, + 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, + 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008, + 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008, + 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008, + 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0040, + 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x444d, 0x1234: 0xe00d, 0x1235: 0x0008, + 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040, + 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040, + // Block 0x49, offset 0x1240 + 0x1240: 0x64d5, 0x1241: 0x64f5, 0x1242: 0x6515, 0x1243: 0x6535, 0x1244: 0x6555, 0x1245: 0x6575, + 0x1246: 0x6595, 0x1247: 0x65b5, 0x1248: 0x65d5, 0x1249: 0x65f5, 0x124a: 0x6615, 0x124b: 0x6635, + 0x124c: 0x6655, 0x124d: 0x6675, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x6695, 0x1251: 0x0008, + 0x1252: 0x66b5, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x66d5, 0x1256: 0x66f5, 0x1257: 0x6715, + 0x1258: 0x6735, 0x1259: 0x6755, 0x125a: 0x6775, 0x125b: 0x6795, 0x125c: 0x67b5, 0x125d: 0x67d5, + 0x125e: 0x67f5, 0x125f: 0x0008, 0x1260: 0x6815, 0x1261: 0x0008, 0x1262: 0x6835, 0x1263: 0x0008, + 0x1264: 0x0008, 0x1265: 0x6855, 0x1266: 0x6875, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, + 0x126a: 0x6895, 0x126b: 0x68b5, 0x126c: 0x68d5, 0x126d: 0x68f5, 0x126e: 0x6915, 0x126f: 0x6935, + 0x1270: 0x6955, 0x1271: 0x6975, 0x1272: 0x6995, 0x1273: 0x69b5, 0x1274: 0x69d5, 0x1275: 0x69f5, + 0x1276: 0x6a15, 0x1277: 0x6a35, 0x1278: 0x6a55, 0x1279: 0x6a75, 0x127a: 0x6a95, 0x127b: 0x6ab5, + 0x127c: 0x6ad5, 0x127d: 0x6af5, 0x127e: 0x6b15, 0x127f: 0x6b35, + // Block 0x4a, offset 0x1280 + 0x1280: 0x7a95, 0x1281: 0x7ab5, 0x1282: 0x7ad5, 0x1283: 0x7af5, 0x1284: 0x7b15, 0x1285: 0x7b35, + 0x1286: 0x7b55, 0x1287: 0x7b75, 0x1288: 0x7b95, 0x1289: 0x7bb5, 0x128a: 0x7bd5, 0x128b: 0x7bf5, + 0x128c: 0x7c15, 0x128d: 0x7c35, 0x128e: 0x7c55, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19, + 0x1292: 0x7c75, 0x1293: 0x7c95, 0x1294: 0x7cb5, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91, + 0x1298: 0x7cd5, 0x1299: 0x7cf5, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, + 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, + 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, + 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, + 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, + 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, + 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d15, 0x12c4: 0x7d35, 0x12c5: 0x7001, + 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, + 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, + 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9, + 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1, + 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149, + 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2, + 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1, + 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1, + 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479, + 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040, + // Block 0x4c, offset 0x1300 + 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040, + 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659, + 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721, + 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751, + 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769, + 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799, + 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1, + 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1, + 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9, + 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829, + 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841, + // Block 0x4d, offset 0x1340 + 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871, + 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9, + 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9, + 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919, + 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931, + 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961, + 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991, + 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1, + 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, + 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, + 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, + 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, + 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, + 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09, + 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479, + 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81, + 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1, + 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19, + 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91, + 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1, + 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1, + 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1, + 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1, + 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991, + 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81, + 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a, + 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99, + 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89, + 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79, + 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19, + 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469, + // Block 0x50, offset 0x1400 + 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649, + 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9, + 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49, + 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21, + 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9, + 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01, + 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91, + 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9, + 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171, + 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289, + 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329, + // Block 0x51, offset 0x1440 + 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1, + 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621, + 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739, + 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1, + 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9, + 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29, + 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079, + 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1, + 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171, + 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261, + 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301, + // Block 0x52, offset 0x1480 + 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1, + 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1, + 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171, + 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261, + 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351, + 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441, + 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509, + 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1, + 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081, + 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239, + 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040, + 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, + 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609, + 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721, + 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839, + 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919, + 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9, + 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9, + 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9, + 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1, + 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79, + // Block 0x54, offset 0x1500 + 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989, + 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, + 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, + 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, + 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, + 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9, + 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12, + 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040, + // Block 0x55, offset 0x1540 + 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, + 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, + 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d55, + 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7d75, + 0x1558: 0x7d95, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, + 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, + 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, + 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, + 0x1570: 0x0040, 0x1571: 0x7db5, 0x1572: 0x7dd5, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2, + 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7df5, 0x157a: 0x7e15, 0x157b: 0x7e35, + 0x157c: 0x7df5, 0x157d: 0x7e55, 0x157e: 0x7e75, 0x157f: 0x7e55, + // Block 0x56, offset 0x1580 + 0x1580: 0x7e95, 0x1581: 0x7eb5, 0x1582: 0x7ed5, 0x1583: 0x7eb5, 0x1584: 0x7ef5, 0x1585: 0x0018, + 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f16, 0x158a: 0x7f36, 0x158b: 0x7f56, + 0x158c: 0x7f76, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7f95, + 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa, + 0x1598: 0x7fb5, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7e95, + 0x159e: 0x7ef5, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99, + 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda, + 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, + 0x15b0: 0x7fd6, 0x15b1: 0xb009, 0x15b2: 0x7ff6, 0x15b3: 0x0808, 0x15b4: 0x8016, 0x15b5: 0x0040, + 0x15b6: 0x8036, 0x15b7: 0xb031, 0x15b8: 0x8056, 0x15b9: 0xb059, 0x15ba: 0x8076, 0x15bb: 0xb081, + 0x15bc: 0x8096, 0x15bd: 0xb0a9, 0x15be: 0x80b6, 0x15bf: 0xb0d1, + // Block 0x57, offset 0x15c0 + 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141, + 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171, + 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1, + 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1, + 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201, + 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219, + 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249, + 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291, + 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1, + 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9, + 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1, + // Block 0x58, offset 0x1600 + 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321, + 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339, + 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369, + 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381, + 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1, + 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9, + 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9, + 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1, + 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441, + 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9, + 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, + // Block 0x59, offset 0x1640 + 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea, + 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2, + 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9, + 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81, + 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2, + 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159, + 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41, + 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9, + 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9, + 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a, + 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09, + 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51, + 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039, + 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279, + 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a, + 0x169e: 0xb532, 0x169f: 0x80d5, 0x16a0: 0x80f5, 0x16a1: 0x29d1, 0x16a2: 0x8115, 0x16a3: 0x8115, + 0x16a4: 0x8135, 0x16a5: 0x8155, 0x16a6: 0x8175, 0x16a7: 0x8195, 0x16a8: 0x81b5, 0x16a9: 0x81d5, + 0x16aa: 0x81f5, 0x16ab: 0x8215, 0x16ac: 0x8235, 0x16ad: 0x8255, 0x16ae: 0x8275, 0x16af: 0x8295, + 0x16b0: 0x82b5, 0x16b1: 0x82d5, 0x16b2: 0x82f5, 0x16b3: 0x8315, 0x16b4: 0x8335, 0x16b5: 0x8355, + 0x16b6: 0x8375, 0x16b7: 0x8395, 0x16b8: 0x83b5, 0x16b9: 0x83d5, 0x16ba: 0x83f5, 0x16bb: 0x8415, + 0x16bc: 0x81b5, 0x16bd: 0x8435, 0x16be: 0x8455, 0x16bf: 0x8215, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x8475, 0x16c1: 0x8495, 0x16c2: 0x84b5, 0x16c3: 0x84d5, 0x16c4: 0x84f5, 0x16c5: 0x8515, + 0x16c6: 0x8535, 0x16c7: 0x8555, 0x16c8: 0x84d5, 0x16c9: 0x8575, 0x16ca: 0x84d5, 0x16cb: 0x8595, + 0x16cc: 0x8595, 0x16cd: 0x85b5, 0x16ce: 0x85b5, 0x16cf: 0x85d5, 0x16d0: 0x8515, 0x16d1: 0x85f5, + 0x16d2: 0x8615, 0x16d3: 0x85f5, 0x16d4: 0x8635, 0x16d5: 0x8615, 0x16d6: 0x8655, 0x16d7: 0x8655, + 0x16d8: 0x8675, 0x16d9: 0x8675, 0x16da: 0x8695, 0x16db: 0x8695, 0x16dc: 0x8615, 0x16dd: 0x8115, + 0x16de: 0x86b5, 0x16df: 0x86d5, 0x16e0: 0x0040, 0x16e1: 0x86f5, 0x16e2: 0x8715, 0x16e3: 0x8735, + 0x16e4: 0x8755, 0x16e5: 0x8735, 0x16e6: 0x8775, 0x16e7: 0x8795, 0x16e8: 0x87b5, 0x16e9: 0x87b5, + 0x16ea: 0x87d5, 0x16eb: 0x87d5, 0x16ec: 0x87f5, 0x16ed: 0x87f5, 0x16ee: 0x87d5, 0x16ef: 0x87d5, + 0x16f0: 0x8815, 0x16f1: 0x8835, 0x16f2: 0x8855, 0x16f3: 0x8875, 0x16f4: 0x8895, 0x16f5: 0x88b5, + 0x16f6: 0x88b5, 0x16f7: 0x88b5, 0x16f8: 0x88d5, 0x16f9: 0x88d5, 0x16fa: 0x88d5, 0x16fb: 0x88d5, + 0x16fc: 0x87b5, 0x16fd: 0x87b5, 0x16fe: 0x87b5, 0x16ff: 0x0040, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x8715, 0x1703: 0x86f5, 0x1704: 0x88f5, 0x1705: 0x86f5, + 0x1706: 0x8715, 0x1707: 0x86f5, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x8915, 0x170b: 0x8715, + 0x170c: 0x8935, 0x170d: 0x88f5, 0x170e: 0x8935, 0x170f: 0x8715, 0x1710: 0x0040, 0x1711: 0x0040, + 0x1712: 0x8955, 0x1713: 0x8975, 0x1714: 0x8875, 0x1715: 0x8935, 0x1716: 0x88f5, 0x1717: 0x8935, + 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x8995, 0x171b: 0x89b5, 0x171c: 0x8995, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x89d6, + 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x89f5, 0x1727: 0x0040, 0x1728: 0x8a15, 0x1729: 0x8a35, + 0x172a: 0x8a55, 0x172b: 0x8a35, 0x172c: 0x8a75, 0x172d: 0x8a95, 0x172e: 0x8ab5, 0x172f: 0x0040, + 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, + 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, + // Block 0x5d, offset 0x1740 + 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08, + 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808, + 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08, + 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908, + 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08, + 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808, + 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, + 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18, + 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818, + 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, + // Block 0x5e, offset 0x1780 + 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08, + 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08, + 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08, + 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040, + 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040, + 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040, + 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18, + 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818, + 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040, + 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, + 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008, + 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008, + 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040, + 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008, + 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008, + 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008, + 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040, + 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008, + 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008, + 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x0040, + 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008, + // Block 0x60, offset 0x1800 + 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040, + 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008, + 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040, + 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008, + 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008, + 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008, + 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308, + 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040, + 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040, + 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, + 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, + // Block 0x61, offset 0x1840 + 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199, + 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359, + 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269, + 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369, + 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9, + 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259, + 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99, + 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089, + 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9, + 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249, + 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359, + // Block 0x62, offset 0x1880 + 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269, + 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369, + 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9, + 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259, + 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99, + 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089, + 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9, + 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249, + 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71, + 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9, + 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9, + 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259, + 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99, + 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089, + 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040, + 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040, + 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71, + 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9, + 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1, + 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199, + 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259, + // Block 0x64, offset 0x1900 + 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99, + 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089, + 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9, + 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249, + 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71, + 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9, + 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1, + 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199, + 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359, + 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269, + 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089, + // Block 0x65, offset 0x1940 + 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9, + 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040, + 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71, + 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9, + 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040, + 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199, + 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359, + 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269, + 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369, + 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9, + 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040, + // Block 0x66, offset 0x1980 + 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040, + 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9, + 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040, + 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199, + 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359, + 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269, + 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369, + 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9, + 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259, + 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99, + 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1, + 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199, + 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359, + 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269, + 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369, + 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9, + 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259, + 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99, + 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089, + 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9, + 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359, + 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269, + 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369, + 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9, + 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259, + 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99, + 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089, + 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9, + 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249, + 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71, + 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369, + 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9, + 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259, + 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99, + 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089, + 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9, + 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249, + 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71, + 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9, + 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1, + 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259, + 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99, + 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089, + 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9, + 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249, + 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71, + 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9, + 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1, + 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199, + 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359, + 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089, + 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9, + 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249, + 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71, + 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9, + 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1, + 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099, + 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429, + 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71, + 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9, + 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9, + 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11, + 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109, + 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1, + 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429, + 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099, + 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429, + 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71, + 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9, + 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01, + 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11, + 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109, + 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1, + 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429, + 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099, + 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429, + 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71, + 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9, + 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01, + 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1, + 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109, + 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1, + 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429, + 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099, + 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429, + 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71, + 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9, + 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01, + 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1, + 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41, + 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1, + 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429, + 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099, + 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429, + 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71, + 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9, + 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01, + 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1, + 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41, + 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1, + 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429, + 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41, + 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079, + 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1, + 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61, + 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9, + 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81, + 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079, + 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1, + 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61, + 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1, + // Block 0x71, offset 0x1c40 + 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115, + 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135, + 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115, + 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175, + 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115, + 0x1c5e: 0x8b05, 0x1c5f: 0x8b05, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08, + 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08, + 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08, + 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08, + 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08, + 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08, + // Block 0x72, offset 0x1c80 + 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411, + 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1, + 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9, + 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231, + 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949, + 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, + 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429, + 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, + 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, + 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351, + 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040, + 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, + 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9, + 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231, + 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949, + 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040, + 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, + 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, + 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, + 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, + 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040, + // Block 0x74, offset 0x1d00 + 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411, + 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1, + 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9, + 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231, + 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040, + 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249, + 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429, + 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339, + 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1, + 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351, + 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040, + // Block 0x75, offset 0x1d40 + 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02, + 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018, + 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2, + 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72, + 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32, + 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2, + 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2, + 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0040, + 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199, + 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359, + 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99, + // Block 0x76, offset 0x1d80 + 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089, + 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1, + 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018, + 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018, + 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018, + 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018, + 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018, + 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0x0040, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040, + 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018, + 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018, + 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0xc1d9, 0x1dc1: 0xc211, 0x1dc2: 0xc249, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040, + 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040, + 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc269, 0x1dd1: 0xc289, + 0x1dd2: 0xc2a9, 0x1dd3: 0xc2c9, 0x1dd4: 0xc2e9, 0x1dd5: 0xc309, 0x1dd6: 0xc329, 0x1dd7: 0xc349, + 0x1dd8: 0xc369, 0x1dd9: 0xc389, 0x1dda: 0xc3a9, 0x1ddb: 0xc3c9, 0x1ddc: 0xc3e9, 0x1ddd: 0xc409, + 0x1dde: 0xc429, 0x1ddf: 0xc449, 0x1de0: 0xc469, 0x1de1: 0xc489, 0x1de2: 0xc4a9, 0x1de3: 0xc4c9, + 0x1de4: 0xc4e9, 0x1de5: 0xc509, 0x1de6: 0xc529, 0x1de7: 0xc549, 0x1de8: 0xc569, 0x1de9: 0xc589, + 0x1dea: 0xc5a9, 0x1deb: 0xc5c9, 0x1dec: 0xc5e9, 0x1ded: 0xc609, 0x1dee: 0xc629, 0x1def: 0xc649, + 0x1df0: 0xc669, 0x1df1: 0xc689, 0x1df2: 0xc6a9, 0x1df3: 0xc6c9, 0x1df4: 0xc6e9, 0x1df5: 0xc709, + 0x1df6: 0xc729, 0x1df7: 0xc749, 0x1df8: 0xc769, 0x1df9: 0xc789, 0x1dfa: 0xc7a9, 0x1dfb: 0xc7c9, + 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040, + // Block 0x78, offset 0x1e00 + 0x1e00: 0xcaf9, 0x1e01: 0xcb19, 0x1e02: 0xcb39, 0x1e03: 0x8b1d, 0x1e04: 0xcb59, 0x1e05: 0xcb79, + 0x1e06: 0xcb99, 0x1e07: 0xcbb9, 0x1e08: 0xcbd9, 0x1e09: 0xcbf9, 0x1e0a: 0xcc19, 0x1e0b: 0xcc39, + 0x1e0c: 0xcc59, 0x1e0d: 0x8b3d, 0x1e0e: 0xcc79, 0x1e0f: 0xcc99, 0x1e10: 0xccb9, 0x1e11: 0xccd9, + 0x1e12: 0x8b5d, 0x1e13: 0xccf9, 0x1e14: 0xcd19, 0x1e15: 0xc429, 0x1e16: 0x8b7d, 0x1e17: 0xcd39, + 0x1e18: 0xcd59, 0x1e19: 0xcd79, 0x1e1a: 0xcd99, 0x1e1b: 0xcdb9, 0x1e1c: 0x8b9d, 0x1e1d: 0xcdd9, + 0x1e1e: 0xcdf9, 0x1e1f: 0xce19, 0x1e20: 0xce39, 0x1e21: 0xce59, 0x1e22: 0xc789, 0x1e23: 0xce79, + 0x1e24: 0xce99, 0x1e25: 0xceb9, 0x1e26: 0xced9, 0x1e27: 0xcef9, 0x1e28: 0xcf19, 0x1e29: 0xcf39, + 0x1e2a: 0xcf59, 0x1e2b: 0xcf79, 0x1e2c: 0xcf99, 0x1e2d: 0xcfb9, 0x1e2e: 0xcfd9, 0x1e2f: 0xcff9, + 0x1e30: 0xd019, 0x1e31: 0xd039, 0x1e32: 0xd039, 0x1e33: 0xd039, 0x1e34: 0x8bbd, 0x1e35: 0xd059, + 0x1e36: 0xd079, 0x1e37: 0xd099, 0x1e38: 0x8bdd, 0x1e39: 0xd0b9, 0x1e3a: 0xd0d9, 0x1e3b: 0xd0f9, + 0x1e3c: 0xd119, 0x1e3d: 0xd139, 0x1e3e: 0xd159, 0x1e3f: 0xd179, + // Block 0x79, offset 0x1e40 + 0x1e40: 0xd199, 0x1e41: 0xd1b9, 0x1e42: 0xd1d9, 0x1e43: 0xd1f9, 0x1e44: 0xd219, 0x1e45: 0xd239, + 0x1e46: 0xd239, 0x1e47: 0xd259, 0x1e48: 0xd279, 0x1e49: 0xd299, 0x1e4a: 0xd2b9, 0x1e4b: 0xd2d9, + 0x1e4c: 0xd2f9, 0x1e4d: 0xd319, 0x1e4e: 0xd339, 0x1e4f: 0xd359, 0x1e50: 0xd379, 0x1e51: 0xd399, + 0x1e52: 0xd3b9, 0x1e53: 0xd3d9, 0x1e54: 0xd3f9, 0x1e55: 0xd419, 0x1e56: 0xd439, 0x1e57: 0xd459, + 0x1e58: 0xd479, 0x1e59: 0x8bfd, 0x1e5a: 0xd499, 0x1e5b: 0xd4b9, 0x1e5c: 0xd4d9, 0x1e5d: 0xc309, + 0x1e5e: 0xd4f9, 0x1e5f: 0xd519, 0x1e60: 0x8c1d, 0x1e61: 0x8c3d, 0x1e62: 0xd539, 0x1e63: 0xd559, + 0x1e64: 0xd579, 0x1e65: 0xd599, 0x1e66: 0xd5b9, 0x1e67: 0xd5d9, 0x1e68: 0x2040, 0x1e69: 0xd5f9, + 0x1e6a: 0xd619, 0x1e6b: 0xd619, 0x1e6c: 0x8c5d, 0x1e6d: 0xd639, 0x1e6e: 0xd659, 0x1e6f: 0xd679, + 0x1e70: 0xd699, 0x1e71: 0x8c7d, 0x1e72: 0xd6b9, 0x1e73: 0xd6d9, 0x1e74: 0x2040, 0x1e75: 0xd6f9, + 0x1e76: 0xd719, 0x1e77: 0xd739, 0x1e78: 0xd759, 0x1e79: 0xd779, 0x1e7a: 0xd799, 0x1e7b: 0x8c9d, + 0x1e7c: 0xd7b9, 0x1e7d: 0x8cbd, 0x1e7e: 0xd7d9, 0x1e7f: 0xd7f9, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0xd819, 0x1e81: 0xd839, 0x1e82: 0xd859, 0x1e83: 0xd879, 0x1e84: 0xd899, 0x1e85: 0xd8b9, + 0x1e86: 0xd8d9, 0x1e87: 0xd8f9, 0x1e88: 0xd919, 0x1e89: 0x8cdd, 0x1e8a: 0xd939, 0x1e8b: 0xd959, + 0x1e8c: 0xd979, 0x1e8d: 0xd999, 0x1e8e: 0xd9b9, 0x1e8f: 0x8cfd, 0x1e90: 0xd9d9, 0x1e91: 0x8d1d, + 0x1e92: 0x8d3d, 0x1e93: 0xd9f9, 0x1e94: 0xda19, 0x1e95: 0xda19, 0x1e96: 0xda39, 0x1e97: 0x8d5d, + 0x1e98: 0x8d7d, 0x1e99: 0xda59, 0x1e9a: 0xda79, 0x1e9b: 0xda99, 0x1e9c: 0xdab9, 0x1e9d: 0xdad9, + 0x1e9e: 0xdaf9, 0x1e9f: 0xdb19, 0x1ea0: 0xdb39, 0x1ea1: 0xdb59, 0x1ea2: 0xdb79, 0x1ea3: 0xdb99, + 0x1ea4: 0x8d9d, 0x1ea5: 0xdbb9, 0x1ea6: 0xdbd9, 0x1ea7: 0xdbf9, 0x1ea8: 0xdc19, 0x1ea9: 0xdbf9, + 0x1eaa: 0xdc39, 0x1eab: 0xdc59, 0x1eac: 0xdc79, 0x1ead: 0xdc99, 0x1eae: 0xdcb9, 0x1eaf: 0xdcd9, + 0x1eb0: 0xdcf9, 0x1eb1: 0xdd19, 0x1eb2: 0xdd39, 0x1eb3: 0xdd59, 0x1eb4: 0xdd79, 0x1eb5: 0xdd99, + 0x1eb6: 0xddb9, 0x1eb7: 0xddd9, 0x1eb8: 0x8dbd, 0x1eb9: 0xddf9, 0x1eba: 0xde19, 0x1ebb: 0xde39, + 0x1ebc: 0xde59, 0x1ebd: 0xde79, 0x1ebe: 0x8ddd, 0x1ebf: 0xde99, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0xe599, 0x1ec1: 0xe5b9, 0x1ec2: 0xe5d9, 0x1ec3: 0xe5f9, 0x1ec4: 0xe619, 0x1ec5: 0xe639, + 0x1ec6: 0x8efd, 0x1ec7: 0xe659, 0x1ec8: 0xe679, 0x1ec9: 0xe699, 0x1eca: 0xe6b9, 0x1ecb: 0xe6d9, + 0x1ecc: 0xe6f9, 0x1ecd: 0x8f1d, 0x1ece: 0xe719, 0x1ecf: 0xe739, 0x1ed0: 0x8f3d, 0x1ed1: 0x8f5d, + 0x1ed2: 0xe759, 0x1ed3: 0xe779, 0x1ed4: 0xe799, 0x1ed5: 0xe7b9, 0x1ed6: 0xe7d9, 0x1ed7: 0xe7f9, + 0x1ed8: 0xe819, 0x1ed9: 0xe839, 0x1eda: 0xe859, 0x1edb: 0x8f7d, 0x1edc: 0xe879, 0x1edd: 0x8f9d, + 0x1ede: 0xe899, 0x1edf: 0x2040, 0x1ee0: 0xe8b9, 0x1ee1: 0xe8d9, 0x1ee2: 0xe8f9, 0x1ee3: 0x8fbd, + 0x1ee4: 0xe919, 0x1ee5: 0xe939, 0x1ee6: 0x8fdd, 0x1ee7: 0x8ffd, 0x1ee8: 0xe959, 0x1ee9: 0xe979, + 0x1eea: 0xe999, 0x1eeb: 0xe9b9, 0x1eec: 0xe9d9, 0x1eed: 0xe9d9, 0x1eee: 0xe9f9, 0x1eef: 0xea19, + 0x1ef0: 0xea39, 0x1ef1: 0xea59, 0x1ef2: 0xea79, 0x1ef3: 0xea99, 0x1ef4: 0xeab9, 0x1ef5: 0x901d, + 0x1ef6: 0xead9, 0x1ef7: 0x903d, 0x1ef8: 0xeaf9, 0x1ef9: 0x905d, 0x1efa: 0xeb19, 0x1efb: 0x907d, + 0x1efc: 0x909d, 0x1efd: 0x90bd, 0x1efe: 0xeb39, 0x1eff: 0xeb59, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0xeb79, 0x1f01: 0x90dd, 0x1f02: 0x90fd, 0x1f03: 0x911d, 0x1f04: 0x913d, 0x1f05: 0xeb99, + 0x1f06: 0xebb9, 0x1f07: 0xebb9, 0x1f08: 0xebd9, 0x1f09: 0xebf9, 0x1f0a: 0xec19, 0x1f0b: 0xec39, + 0x1f0c: 0xec59, 0x1f0d: 0x915d, 0x1f0e: 0xec79, 0x1f0f: 0xec99, 0x1f10: 0xecb9, 0x1f11: 0xecd9, + 0x1f12: 0x917d, 0x1f13: 0xecf9, 0x1f14: 0x919d, 0x1f15: 0x91bd, 0x1f16: 0xed19, 0x1f17: 0xed39, + 0x1f18: 0xed59, 0x1f19: 0xed79, 0x1f1a: 0xed99, 0x1f1b: 0xedb9, 0x1f1c: 0x91dd, 0x1f1d: 0x91fd, + 0x1f1e: 0x921d, 0x1f1f: 0x2040, 0x1f20: 0xedd9, 0x1f21: 0x923d, 0x1f22: 0xedf9, 0x1f23: 0xee19, + 0x1f24: 0xee39, 0x1f25: 0x925d, 0x1f26: 0xee59, 0x1f27: 0xee79, 0x1f28: 0xee99, 0x1f29: 0xeeb9, + 0x1f2a: 0xeed9, 0x1f2b: 0x927d, 0x1f2c: 0xeef9, 0x1f2d: 0xef19, 0x1f2e: 0xef39, 0x1f2f: 0xef59, + 0x1f30: 0xef79, 0x1f31: 0xef99, 0x1f32: 0x929d, 0x1f33: 0x92bd, 0x1f34: 0xefb9, 0x1f35: 0x92dd, + 0x1f36: 0xefd9, 0x1f37: 0x92fd, 0x1f38: 0xeff9, 0x1f39: 0xf019, 0x1f3a: 0xf039, 0x1f3b: 0x931d, + 0x1f3c: 0x933d, 0x1f3d: 0xf059, 0x1f3e: 0x935d, 0x1f3f: 0xf079, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0xf6b9, 0x1f41: 0xf6d9, 0x1f42: 0xf6f9, 0x1f43: 0xf719, 0x1f44: 0xf739, 0x1f45: 0x951d, + 0x1f46: 0xf759, 0x1f47: 0xf779, 0x1f48: 0xf799, 0x1f49: 0xf7b9, 0x1f4a: 0xf7d9, 0x1f4b: 0x953d, + 0x1f4c: 0x955d, 0x1f4d: 0xf7f9, 0x1f4e: 0xf819, 0x1f4f: 0xf839, 0x1f50: 0xf859, 0x1f51: 0xf879, + 0x1f52: 0xf899, 0x1f53: 0x957d, 0x1f54: 0xf8b9, 0x1f55: 0xf8d9, 0x1f56: 0xf8f9, 0x1f57: 0xf919, + 0x1f58: 0x959d, 0x1f59: 0x95bd, 0x1f5a: 0xf939, 0x1f5b: 0xf959, 0x1f5c: 0xf979, 0x1f5d: 0x95dd, + 0x1f5e: 0xf999, 0x1f5f: 0xf9b9, 0x1f60: 0x6815, 0x1f61: 0x95fd, 0x1f62: 0xf9d9, 0x1f63: 0xf9f9, + 0x1f64: 0xfa19, 0x1f65: 0x961d, 0x1f66: 0xfa39, 0x1f67: 0xfa59, 0x1f68: 0xfa79, 0x1f69: 0xfa99, + 0x1f6a: 0xfab9, 0x1f6b: 0xfad9, 0x1f6c: 0xfaf9, 0x1f6d: 0x963d, 0x1f6e: 0xfb19, 0x1f6f: 0xfb39, + 0x1f70: 0xfb59, 0x1f71: 0x965d, 0x1f72: 0xfb79, 0x1f73: 0xfb99, 0x1f74: 0xfbb9, 0x1f75: 0xfbd9, + 0x1f76: 0x7b35, 0x1f77: 0x967d, 0x1f78: 0xfbf9, 0x1f79: 0xfc19, 0x1f7a: 0xfc39, 0x1f7b: 0x969d, + 0x1f7c: 0xfc59, 0x1f7d: 0x96bd, 0x1f7e: 0xfc79, 0x1f7f: 0xfc79, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0xfc99, 0x1f81: 0x96dd, 0x1f82: 0xfcb9, 0x1f83: 0xfcd9, 0x1f84: 0xfcf9, 0x1f85: 0xfd19, + 0x1f86: 0xfd39, 0x1f87: 0xfd59, 0x1f88: 0xfd79, 0x1f89: 0x96fd, 0x1f8a: 0xfd99, 0x1f8b: 0xfdb9, + 0x1f8c: 0xfdd9, 0x1f8d: 0xfdf9, 0x1f8e: 0xfe19, 0x1f8f: 0xfe39, 0x1f90: 0x971d, 0x1f91: 0xfe59, + 0x1f92: 0x973d, 0x1f93: 0x975d, 0x1f94: 0x977d, 0x1f95: 0xfe79, 0x1f96: 0xfe99, 0x1f97: 0xfeb9, + 0x1f98: 0xfed9, 0x1f99: 0xfef9, 0x1f9a: 0xff19, 0x1f9b: 0xff39, 0x1f9c: 0xff59, 0x1f9d: 0x979d, + 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040, + 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040, + 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040, + 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040, + 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040, + 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040, +} + +// idnaIndex: 36 blocks, 2304 entries, 4608 bytes +// Block 0 is the zero block. +var idnaIndex = [2304]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, + 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21, + // Block 0x4, offset 0x100 + 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16, + 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d, + 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91, + 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96, + // Block 0x5, offset 0x140 + 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, + 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, + 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, + 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, + 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, + 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, + 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3, + 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c, + // Block 0x6, offset 0x180 + 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b, + 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b, + 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, + 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, + 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, + 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0xd0, + 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5, + 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1, + 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41, + 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, + 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, + 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, + 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, + 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, + 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, + // Block 0x8, offset 0x200 + 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, + 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, + 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, + 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, + 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, + 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, + 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, + 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, + // Block 0x9, offset 0x240 + 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, + 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, + 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, + 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, + 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, + 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, + 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, + 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, + // Block 0xa, offset 0x280 + 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, + 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, + 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, + 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, + 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, + 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, + 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, + 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe3, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, + 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, + 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, + 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8, + 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0, + 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8, + 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, + 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, + // Block 0xc, offset 0x300 + 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, + 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, + 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, + 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf9, 0x31f: 0xfa, + // Block 0xd, offset 0x340 + 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, + 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, + 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, + 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, + 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, + 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, + 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, + 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, + // Block 0xe, offset 0x380 + 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, + 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, + 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, + 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, + 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfb, 0x3a5: 0xfc, 0x3a6: 0xfd, 0x3a7: 0xfe, + 0x3a8: 0x47, 0x3a9: 0xff, 0x3aa: 0x100, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c, + 0x3b0: 0x101, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x102, 0x3b7: 0x52, + 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x103, 0x3c1: 0x104, 0x3c2: 0x9f, 0x3c3: 0x105, 0x3c4: 0x106, 0x3c5: 0x9b, 0x3c6: 0x107, 0x3c7: 0x108, + 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x109, 0x3cb: 0x10a, 0x3cc: 0x10b, 0x3cd: 0x10c, 0x3ce: 0x10d, 0x3cf: 0x10e, + 0x3d0: 0x10f, 0x3d1: 0x9f, 0x3d2: 0x110, 0x3d3: 0x111, 0x3d4: 0x112, 0x3d5: 0x113, 0x3d6: 0xba, 0x3d7: 0xba, + 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x114, 0x3dd: 0x115, 0x3de: 0xba, 0x3df: 0xba, + 0x3e0: 0x116, 0x3e1: 0x117, 0x3e2: 0x118, 0x3e3: 0x119, 0x3e4: 0x11a, 0x3e5: 0xba, 0x3e6: 0x11b, 0x3e7: 0x11c, + 0x3e8: 0x11d, 0x3e9: 0x11e, 0x3ea: 0x11f, 0x3eb: 0x5b, 0x3ec: 0x120, 0x3ed: 0x121, 0x3ee: 0x5c, 0x3ef: 0xba, + 0x3f0: 0x122, 0x3f1: 0x123, 0x3f2: 0x124, 0x3f3: 0x125, 0x3f4: 0xba, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, + 0x3f8: 0xba, 0x3f9: 0x126, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0xba, 0x3fd: 0xba, 0x3fe: 0xba, 0x3ff: 0xba, + // Block 0x10, offset 0x400 + 0x400: 0x127, 0x401: 0x128, 0x402: 0x129, 0x403: 0x12a, 0x404: 0x12b, 0x405: 0x12c, 0x406: 0x12d, 0x407: 0x12e, + 0x408: 0x12f, 0x409: 0xba, 0x40a: 0x130, 0x40b: 0x131, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba, + 0x410: 0x132, 0x411: 0x133, 0x412: 0x134, 0x413: 0x135, 0x414: 0xba, 0x415: 0xba, 0x416: 0x136, 0x417: 0x137, + 0x418: 0x138, 0x419: 0x139, 0x41a: 0x13a, 0x41b: 0x13b, 0x41c: 0x13c, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, + 0x420: 0xba, 0x421: 0xba, 0x422: 0x13d, 0x423: 0x13e, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba, + 0x428: 0x13f, 0x429: 0x140, 0x42a: 0x141, 0x42b: 0x142, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, + 0x430: 0x143, 0x431: 0x144, 0x432: 0x145, 0x433: 0xba, 0x434: 0x146, 0x435: 0x147, 0x436: 0xba, 0x437: 0xba, + 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0xba, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba, + // Block 0x11, offset 0x440 + 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, + 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x148, 0x44f: 0xba, + 0x450: 0x9b, 0x451: 0x149, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x14a, 0x456: 0xba, 0x457: 0xba, + 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, + 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, + 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, + 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, + 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, + // Block 0x12, offset 0x480 + 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, + 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, + 0x490: 0x14b, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, + 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, + 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, + 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, + 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, + 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, + // Block 0x13, offset 0x4c0 + 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, + 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, + 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, + 0x4d8: 0x9f, 0x4d9: 0x14c, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, + 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, + 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, + 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, + 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, + // Block 0x14, offset 0x500 + 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, + 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, + 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, + 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, + 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, + 0x528: 0x142, 0x529: 0x14d, 0x52a: 0xba, 0x52b: 0x14e, 0x52c: 0x14f, 0x52d: 0x150, 0x52e: 0x151, 0x52f: 0xba, + 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, + 0x538: 0xba, 0x539: 0xba, 0x53a: 0xba, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x152, 0x53e: 0x153, 0x53f: 0x154, + // Block 0x15, offset 0x540 + 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, + 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, + 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, + 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x155, + 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, + 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x156, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, + 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, + 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, + // Block 0x16, offset 0x580 + 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x157, 0x585: 0x158, 0x586: 0x9f, 0x587: 0x9f, + 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x159, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, + 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, + 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, + 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, + 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, + 0x5b0: 0x9f, 0x5b1: 0x15a, 0x5b2: 0x15b, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, + 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x15c, 0x5c4: 0x15d, 0x5c5: 0x15e, 0x5c6: 0x15f, 0x5c7: 0x160, + 0x5c8: 0x9b, 0x5c9: 0x161, 0x5ca: 0xba, 0x5cb: 0xba, 0x5cc: 0x9b, 0x5cd: 0x162, 0x5ce: 0xba, 0x5cf: 0xba, + 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66, + 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e, + 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, + 0x5e8: 0x163, 0x5e9: 0x164, 0x5ea: 0x165, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, + 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, + 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, + // Block 0x18, offset 0x600 + 0x600: 0x166, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba, + 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, + 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, + 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, + 0x620: 0x122, 0x621: 0x122, 0x622: 0x122, 0x623: 0x167, 0x624: 0x6f, 0x625: 0x168, 0x626: 0xba, 0x627: 0xba, + 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, + 0x630: 0xba, 0x631: 0xba, 0x632: 0xba, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, + 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x169, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, + // Block 0x19, offset 0x640 + 0x640: 0x16a, 0x641: 0x9b, 0x642: 0x16b, 0x643: 0x16c, 0x644: 0x73, 0x645: 0x74, 0x646: 0x16d, 0x647: 0x16e, + 0x648: 0x75, 0x649: 0x16f, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, + 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, + 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x170, 0x65c: 0x9b, 0x65d: 0x171, 0x65e: 0x9b, 0x65f: 0x172, + 0x660: 0x173, 0x661: 0x174, 0x662: 0x175, 0x663: 0xba, 0x664: 0x176, 0x665: 0x177, 0x666: 0x178, 0x667: 0x179, + 0x668: 0xba, 0x669: 0xba, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, + 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, + 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, + // Block 0x1a, offset 0x680 + 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, + 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, + 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, + 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x17a, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, + 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, + 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, + 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, + 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, + 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, + 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, + 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x17b, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, + 0x6e0: 0x17c, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, + 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, + 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, + 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, + // Block 0x1c, offset 0x700 + 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, + 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, + 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, + 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, + 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, + 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, + 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, + 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x17d, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f, + // Block 0x1d, offset 0x740 + 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f, + 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f, + 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f, + 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f, + 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f, + 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x17e, + 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, + 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, + // Block 0x1e, offset 0x780 + 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba, + 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba, + 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba, + 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba, + 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x17f, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x180, 0x7a7: 0x7b, + 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba, + 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba, + 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba, + // Block 0x1f, offset 0x7c0 + 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07, + 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17, + 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07, + 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c, + 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, + 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, + // Block 0x20, offset 0x800 + 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b, + 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b, + 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b, + 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b, + 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b, + 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b, + 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b, + 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b, + // Block 0x21, offset 0x840 + 0x840: 0x181, 0x841: 0x182, 0x842: 0xba, 0x843: 0xba, 0x844: 0x183, 0x845: 0x183, 0x846: 0x183, 0x847: 0x184, + 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba, + 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba, + 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba, + 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba, + 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba, + 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba, + 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba, + // Block 0x22, offset 0x880 + 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, + 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, + 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b, + 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b, + 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b, + 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b, + 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b, + 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b, + 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b, +} + +// idnaSparseOffset: 264 entries, 528 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x4f, 0x5e, 0x63, 0x6b, 0x77, 0x85, 0x8a, 0x93, 0xa3, 0xb1, 0xbd, 0xc9, 0xda, 0xe4, 0xeb, 0xf8, 0x109, 0x110, 0x11b, 0x12a, 0x138, 0x142, 0x144, 0x149, 0x14c, 0x14f, 0x151, 0x15d, 0x168, 0x170, 0x176, 0x17c, 0x181, 0x186, 0x189, 0x18d, 0x193, 0x198, 0x1a4, 0x1ae, 0x1b4, 0x1c5, 0x1cf, 0x1d2, 0x1da, 0x1dd, 0x1ea, 0x1f2, 0x1f6, 0x1fd, 0x205, 0x215, 0x221, 0x223, 0x22d, 0x239, 0x245, 0x251, 0x259, 0x25e, 0x268, 0x279, 0x27d, 0x288, 0x28c, 0x295, 0x29d, 0x2a3, 0x2a8, 0x2ab, 0x2af, 0x2b5, 0x2b9, 0x2bd, 0x2c3, 0x2ca, 0x2d0, 0x2d8, 0x2df, 0x2ea, 0x2f4, 0x2f8, 0x2fb, 0x301, 0x305, 0x307, 0x30a, 0x30c, 0x30f, 0x319, 0x31c, 0x32b, 0x32f, 0x334, 0x337, 0x33b, 0x340, 0x345, 0x34b, 0x351, 0x360, 0x366, 0x36a, 0x379, 0x37e, 0x386, 0x390, 0x39b, 0x3a3, 0x3b4, 0x3bd, 0x3cd, 0x3da, 0x3e4, 0x3e9, 0x3f6, 0x3fa, 0x3ff, 0x401, 0x405, 0x407, 0x40b, 0x414, 0x41a, 0x41e, 0x42e, 0x438, 0x43d, 0x440, 0x446, 0x44d, 0x452, 0x456, 0x45c, 0x461, 0x46a, 0x46f, 0x475, 0x47c, 0x483, 0x48a, 0x48e, 0x493, 0x496, 0x49b, 0x4a7, 0x4ad, 0x4b2, 0x4b9, 0x4c1, 0x4c6, 0x4ca, 0x4da, 0x4e1, 0x4e5, 0x4e9, 0x4f0, 0x4f2, 0x4f5, 0x4f8, 0x4fc, 0x500, 0x506, 0x50f, 0x51b, 0x522, 0x52b, 0x533, 0x53a, 0x548, 0x555, 0x562, 0x56b, 0x56f, 0x57d, 0x585, 0x590, 0x599, 0x59f, 0x5a7, 0x5b0, 0x5ba, 0x5bd, 0x5c9, 0x5cc, 0x5d1, 0x5de, 0x5e7, 0x5f3, 0x5f6, 0x600, 0x609, 0x615, 0x622, 0x62a, 0x62d, 0x632, 0x635, 0x638, 0x63b, 0x642, 0x649, 0x64d, 0x658, 0x65b, 0x661, 0x666, 0x66a, 0x66d, 0x670, 0x673, 0x676, 0x679, 0x67e, 0x688, 0x68b, 0x68f, 0x69e, 0x6aa, 0x6ae, 0x6b3, 0x6b8, 0x6bc, 0x6c1, 0x6ca, 0x6d5, 0x6db, 0x6e3, 0x6e7, 0x6eb, 0x6f1, 0x6f7, 0x6fc, 0x6ff, 0x70f, 0x716, 0x719, 0x71c, 0x720, 0x726, 0x72b, 0x730, 0x735, 0x738, 0x73d, 0x740, 0x743, 0x747, 0x74b, 0x74e, 0x75e, 0x76f, 0x774, 0x776, 0x778} + +// idnaSparseValues: 1915 entries, 7660 bytes +var idnaSparseValues = [1915]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x07}, + {value: 0xe105, lo: 0x80, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x97}, + {value: 0xe105, lo: 0x98, hi: 0x9e}, + {value: 0x001f, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbf}, + // Block 0x1, offset 0x8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0xe01d, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0335, lo: 0x83, hi: 0x83}, + {value: 0x034d, lo: 0x84, hi: 0x84}, + {value: 0x0365, lo: 0x85, hi: 0x85}, + {value: 0xe00d, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0xe00d, lo: 0x88, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x89}, + {value: 0xe00d, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe00d, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0x8d}, + {value: 0xe00d, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0xbf}, + // Block 0x2, offset 0x19 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0249, lo: 0xb0, hi: 0xb0}, + {value: 0x037d, lo: 0xb1, hi: 0xb1}, + {value: 0x0259, lo: 0xb2, hi: 0xb2}, + {value: 0x0269, lo: 0xb3, hi: 0xb3}, + {value: 0x034d, lo: 0xb4, hi: 0xb4}, + {value: 0x0395, lo: 0xb5, hi: 0xb5}, + {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, + {value: 0x0279, lo: 0xb7, hi: 0xb7}, + {value: 0x0289, lo: 0xb8, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbf}, + // Block 0x3, offset 0x25 + {value: 0x0000, lo: 0x01}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, + // Block 0x4, offset 0x27 + {value: 0x0000, lo: 0x04}, + {value: 0x03f5, lo: 0x80, hi: 0x8f}, + {value: 0xe105, lo: 0x90, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x5, offset 0x2c + {value: 0x0000, lo: 0x07}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x0545, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x6, offset 0x34 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0401, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x88}, + {value: 0x0018, lo: 0x89, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x7, offset 0x3f + {value: 0x0000, lo: 0x0b}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x8, offset 0x4b + {value: 0x0000, lo: 0x03}, + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4f + {value: 0x0000, lo: 0x0e}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xb, offset 0x63 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0xc, offset 0x6b + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd, offset 0x77 + {value: 0x0000, lo: 0x0d}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0a08, lo: 0xa0, hi: 0xa9}, + {value: 0x0c08, lo: 0xaa, hi: 0xac}, + {value: 0x0808, lo: 0xad, hi: 0xad}, + {value: 0x0c08, lo: 0xae, hi: 0xae}, + {value: 0x0a08, lo: 0xaf, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb2}, + {value: 0x0a08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0a08, lo: 0xb6, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0xe, offset 0x85 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0xf, offset 0x8a + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x10, offset 0x93 + {value: 0x0000, lo: 0x0f}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x85}, + {value: 0x3008, lo: 0x86, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x11, offset 0xa3 + {value: 0x0000, lo: 0x0d}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x12, offset 0xb1 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xba}, + {value: 0x3b08, lo: 0xbb, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbd + {value: 0x0000, lo: 0x0b}, + {value: 0x0040, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x14, offset 0xc9 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x89}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x15, offset 0xda + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x08f1, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x16, offset 0xe4 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0xbf}, + // Block 0x17, offset 0xeb + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0961, lo: 0x9c, hi: 0x9c}, + {value: 0x0999, lo: 0x9d, hi: 0x9d}, + {value: 0x0008, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x18, offset 0xf8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe03d, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x19, offset 0x109 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0x1a, offset 0x110 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0x11b + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbf}, + // Block 0x1c, offset 0x12a + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1d, offset 0x138 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x86}, + {value: 0x055d, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8c}, + {value: 0x055d, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0xe105, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0x1e, offset 0x142 + {value: 0x0000, lo: 0x01}, + {value: 0x0018, lo: 0x80, hi: 0xbf}, + // Block 0x1f, offset 0x144 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa0}, + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x20, offset 0x149 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x21, offset 0x14c + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x22, offset 0x14f + {value: 0x0000, lo: 0x01}, + {value: 0x0008, lo: 0x80, hi: 0xbf}, + // Block 0x23, offset 0x151 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x24, offset 0x15d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x25, offset 0x168 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x26, offset 0x170 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x27, offset 0x176 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x28, offset 0x17c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x29, offset 0x181 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x2a, offset 0x186 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x2b, offset 0x189 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xbf}, + // Block 0x2c, offset 0x18d + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2d, offset 0x193 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x2e, offset 0x198 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x2f, offset 0x1a4 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x30, offset 0x1ae + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xb3}, + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x31, offset 0x1b4 + {value: 0x0000, lo: 0x10}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0x96}, + {value: 0x0008, lo: 0x97, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x32, offset 0x1c5 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x86}, + {value: 0x0218, lo: 0x87, hi: 0x87}, + {value: 0x0018, lo: 0x88, hi: 0x8a}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xbf}, + // Block 0x33, offset 0x1cf + {value: 0x0000, lo: 0x02}, + {value: 0x0208, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x34, offset 0x1d2 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0208, lo: 0x87, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, + {value: 0x0208, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x35, offset 0x1da + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x36, offset 0x1dd + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x37, offset 0x1ea + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x38, offset 0x1f2 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x39, offset 0x1f6 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0028, lo: 0x9a, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xbf}, + // Block 0x3a, offset 0x1fd + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x3b, offset 0x205 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x94}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x215 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x221 + {value: 0x0000, lo: 0x01}, + {value: 0x0040, lo: 0x80, hi: 0xbf}, + // Block 0x3e, offset 0x223 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x3f, offset 0x22d + {value: 0x0000, lo: 0x0b}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x40, offset 0x239 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xbf}, + // Block 0x41, offset 0x245 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x42, offset 0x251 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x43, offset 0x259 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x44, offset 0x25e + {value: 0x0000, lo: 0x09}, + {value: 0x0e29, lo: 0x80, hi: 0x80}, + {value: 0x0e41, lo: 0x81, hi: 0x81}, + {value: 0x0e59, lo: 0x82, hi: 0x82}, + {value: 0x0e71, lo: 0x83, hi: 0x83}, + {value: 0x0e89, lo: 0x84, hi: 0x85}, + {value: 0x0ea1, lo: 0x86, hi: 0x86}, + {value: 0x0eb9, lo: 0x87, hi: 0x87}, + {value: 0x057d, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0x45, offset 0x268 + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x92}, + {value: 0x0018, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, + {value: 0x0008, lo: 0xa9, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x46, offset 0x279 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x47, offset 0x27d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x87}, + {value: 0xe045, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0xe045, lo: 0x98, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0xe045, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbf}, + // Block 0x48, offset 0x288 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x49, offset 0x28c + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x24c1, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x4a, offset 0x295 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x24f1, lo: 0xac, hi: 0xac}, + {value: 0x2529, lo: 0xad, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xae}, + {value: 0x2579, lo: 0xaf, hi: 0xaf}, + {value: 0x25b1, lo: 0xb0, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x4b, offset 0x29d + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x9f}, + {value: 0x0080, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xad}, + {value: 0x0080, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x4c, offset 0x2a3 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa8}, + {value: 0x09c5, lo: 0xa9, hi: 0xa9}, + {value: 0x09e5, lo: 0xaa, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xbf}, + // Block 0x4d, offset 0x2a8 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0x4e, offset 0x2ab + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x28c1, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x4f, offset 0x2af + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0e66, lo: 0xb4, hi: 0xb4}, + {value: 0x292a, lo: 0xb5, hi: 0xb5}, + {value: 0x0e86, lo: 0xb6, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x50, offset 0x2b5 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9b}, + {value: 0x2941, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0xbf}, + // Block 0x51, offset 0x2b9 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x52, offset 0x2bd + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0018, lo: 0xbd, hi: 0xbf}, + // Block 0x53, offset 0x2c3 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0xab}, + {value: 0x0018, lo: 0xac, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x54, offset 0x2ca + {value: 0x0000, lo: 0x05}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x03f5, lo: 0x90, hi: 0x9f}, + {value: 0x0ea5, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x55, offset 0x2d0 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x56, offset 0x2d8 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xae}, + {value: 0xe075, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x57, offset 0x2df + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x58, offset 0x2ea + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x59, offset 0x2f4 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x5a, offset 0x2f8 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0x5b, offset 0x2fb + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9e}, + {value: 0x0edd, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x5c, offset 0x301 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb2}, + {value: 0x0efd, lo: 0xb3, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x5d, offset 0x305 + {value: 0x0020, lo: 0x01}, + {value: 0x0f1d, lo: 0x80, hi: 0xbf}, + // Block 0x5e, offset 0x307 + {value: 0x0020, lo: 0x02}, + {value: 0x171d, lo: 0x80, hi: 0x8f}, + {value: 0x18fd, lo: 0x90, hi: 0xbf}, + // Block 0x5f, offset 0x30a + {value: 0x0020, lo: 0x01}, + {value: 0x1efd, lo: 0x80, hi: 0xbf}, + // Block 0x60, offset 0x30c + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x61, offset 0x30f + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, + {value: 0x29e2, lo: 0x9b, hi: 0x9b}, + {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9e}, + {value: 0x2a31, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x62, offset 0x319 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x2a69, lo: 0xbf, hi: 0xbf}, + // Block 0x63, offset 0x31c + {value: 0x0000, lo: 0x0e}, + {value: 0x0040, lo: 0x80, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, + {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, + {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, + {value: 0x2a7d, lo: 0xb4, hi: 0xb4}, + {value: 0x2a5d, lo: 0xb5, hi: 0xb5}, + {value: 0x2a9d, lo: 0xb6, hi: 0xb6}, + {value: 0x2abd, lo: 0xb7, hi: 0xb7}, + {value: 0x2add, lo: 0xb8, hi: 0xb9}, + {value: 0x2afd, lo: 0xba, hi: 0xbb}, + {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, + {value: 0x2afd, lo: 0xbe, hi: 0xbf}, + // Block 0x64, offset 0x32b + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x65, offset 0x32f + {value: 0x0030, lo: 0x04}, + {value: 0x2aa2, lo: 0x80, hi: 0x9d}, + {value: 0x305a, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x30a2, lo: 0xa0, hi: 0xbf}, + // Block 0x66, offset 0x334 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0x67, offset 0x337 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x68, offset 0x33b + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x69, offset 0x340 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x6a, offset 0x345 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, + {value: 0x0018, lo: 0xb2, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6b, offset 0x34b + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xb7}, + {value: 0x2009, lo: 0xb8, hi: 0xb8}, + {value: 0x6e89, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xbf}, + // Block 0x6c, offset 0x351 + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, + {value: 0x0008, lo: 0x8c, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x6d, offset 0x360 + {value: 0x0000, lo: 0x05}, + {value: 0x0208, lo: 0x80, hi: 0xb1}, + {value: 0x0108, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6e, offset 0x366 + {value: 0x0000, lo: 0x03}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x6f, offset 0x36a + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0008, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x70, offset 0x379 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x71, offset 0x37e + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x72, offset 0x386 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x73, offset 0x390 + {value: 0x0000, lo: 0x0a}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x74, offset 0x39b + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x75, offset 0x3a3 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0x76, offset 0x3b4 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x77, offset 0x3bd + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x9a}, + {value: 0x0008, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x78, offset 0x3cd + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x90}, + {value: 0x0008, lo: 0x91, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x79, offset 0x3da + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x4465, lo: 0x9c, hi: 0x9c}, + {value: 0x447d, lo: 0x9d, hi: 0x9d}, + {value: 0x2971, lo: 0x9e, hi: 0x9e}, + {value: 0xe06d, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xaf}, + {value: 0x4495, lo: 0xb0, hi: 0xbf}, + // Block 0x7a, offset 0x3e4 + {value: 0x0000, lo: 0x04}, + {value: 0x44b5, lo: 0x80, hi: 0x8f}, + {value: 0x44d5, lo: 0x90, hi: 0x9f}, + {value: 0x44f5, lo: 0xa0, hi: 0xaf}, + {value: 0x44d5, lo: 0xb0, hi: 0xbf}, + // Block 0x7b, offset 0x3e9 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x7c, offset 0x3f6 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x7d, offset 0x3fa + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x7e, offset 0x3ff + {value: 0x0020, lo: 0x01}, + {value: 0x4515, lo: 0x80, hi: 0xbf}, + // Block 0x7f, offset 0x401 + {value: 0x0020, lo: 0x03}, + {value: 0x4d15, lo: 0x80, hi: 0x94}, + {value: 0x4ad5, lo: 0x95, hi: 0x95}, + {value: 0x4fb5, lo: 0x96, hi: 0xbf}, + // Block 0x80, offset 0x405 + {value: 0x0020, lo: 0x01}, + {value: 0x54f5, lo: 0x80, hi: 0xbf}, + // Block 0x81, offset 0x407 + {value: 0x0020, lo: 0x03}, + {value: 0x5cf5, lo: 0x80, hi: 0x84}, + {value: 0x5655, lo: 0x85, hi: 0x85}, + {value: 0x5d95, lo: 0x86, hi: 0xbf}, + // Block 0x82, offset 0x40b + {value: 0x0020, lo: 0x08}, + {value: 0x6b55, lo: 0x80, hi: 0x8f}, + {value: 0x6d15, lo: 0x90, hi: 0x90}, + {value: 0x6d55, lo: 0x91, hi: 0xab}, + {value: 0x6ea1, lo: 0xac, hi: 0xac}, + {value: 0x70b5, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x70d5, lo: 0xb0, hi: 0xbf}, + // Block 0x83, offset 0x414 + {value: 0x0020, lo: 0x05}, + {value: 0x72d5, lo: 0x80, hi: 0xad}, + {value: 0x6535, lo: 0xae, hi: 0xae}, + {value: 0x7895, lo: 0xaf, hi: 0xb5}, + {value: 0x6f55, lo: 0xb6, hi: 0xb6}, + {value: 0x7975, lo: 0xb7, hi: 0xbf}, + // Block 0x84, offset 0x41a + {value: 0x0028, lo: 0x03}, + {value: 0x7c21, lo: 0x80, hi: 0x82}, + {value: 0x7be1, lo: 0x83, hi: 0x83}, + {value: 0x7c99, lo: 0x84, hi: 0xbf}, + // Block 0x85, offset 0x41e + {value: 0x0038, lo: 0x0f}, + {value: 0x9db1, lo: 0x80, hi: 0x83}, + {value: 0x9e59, lo: 0x84, hi: 0x85}, + {value: 0x9e91, lo: 0x86, hi: 0x87}, + {value: 0x9ec9, lo: 0x88, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0xa089, lo: 0x92, hi: 0x97}, + {value: 0xa1a1, lo: 0x98, hi: 0x9c}, + {value: 0xa281, lo: 0x9d, hi: 0xb3}, + {value: 0x9d41, lo: 0xb4, hi: 0xb4}, + {value: 0x9db1, lo: 0xb5, hi: 0xb5}, + {value: 0xa789, lo: 0xb6, hi: 0xbb}, + {value: 0xa869, lo: 0xbc, hi: 0xbc}, + {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, + {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, + // Block 0x86, offset 0x42e + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x87, offset 0x438 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x88, offset 0x43d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x89, offset 0x440 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x8a, offset 0x446 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x8b, offset 0x44d + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x8c, offset 0x452 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8d, offset 0x456 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x8e, offset 0x45c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xbf}, + // Block 0x8f, offset 0x461 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x90, offset 0x46a + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x91, offset 0x46f + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x92, offset 0x475 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x97}, + {value: 0x8ad5, lo: 0x98, hi: 0x9f}, + {value: 0x8aed, lo: 0xa0, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xbf}, + // Block 0x93, offset 0x47c + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x8aed, lo: 0xb0, hi: 0xb7}, + {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, + // Block 0x94, offset 0x483 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x95, offset 0x48a + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x96, offset 0x48e + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xae}, + {value: 0x0018, lo: 0xaf, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x97, offset 0x493 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x98, offset 0x496 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xbf}, + // Block 0x99, offset 0x49b + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0808, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbb}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x4a7 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x9b, offset 0x4ad + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa6}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x9c, offset 0x4b2 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9d, offset 0x4b9 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x4c1 + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbb}, + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0x9f, offset 0x4c6 + {value: 0x0000, lo: 0x03}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0xa0, offset 0x4ca + {value: 0x0000, lo: 0x0f}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x94}, + {value: 0x0808, lo: 0x95, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x98}, + {value: 0x0808, lo: 0x99, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa1, offset 0x4da + {value: 0x0000, lo: 0x06}, + {value: 0x0818, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0818, lo: 0x90, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0xa2, offset 0x4e1 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xa3, offset 0x4e5 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xbf}, + // Block 0xa4, offset 0x4e9 + {value: 0x0000, lo: 0x06}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb7}, + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa5, offset 0x4f0 + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa6, offset 0x4f2 + {value: 0x0000, lo: 0x02}, + {value: 0x0808, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xa7, offset 0x4f5 + {value: 0x0000, lo: 0x02}, + {value: 0x03dd, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xa8, offset 0x4f8 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xa9, offset 0x4fc + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xaa, offset 0x500 + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x506 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xac, offset 0x50f + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbc}, + {value: 0x0340, lo: 0xbd, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0xad, offset 0x51b + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xae, offset 0x522 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xbf}, + // Block 0xaf, offset 0x52b + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb0, offset 0x533 + {value: 0x0000, lo: 0x06}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb1, offset 0x53a + {value: 0x0000, lo: 0x0d}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xb2, offset 0x548 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xb3, offset 0x555 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xb4, offset 0x562 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb5, offset 0x56b + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xb6, offset 0x56f + {value: 0x0000, lo: 0x0d}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xb7, offset 0x57d + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xb8, offset 0x585 + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xb9, offset 0x590 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xba, offset 0x599 + {value: 0x0000, lo: 0x05}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9b}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xbb, offset 0x59f + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbc, offset 0x5a7 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xbd, offset 0x5b0 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0xbe, offset 0x5ba + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xbf, offset 0x5bd + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xc0, offset 0x5c9 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xbf}, + // Block 0xc1, offset 0x5cc + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xc2, offset 0x5d1 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xc3, offset 0x5de + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x3b08, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0xbf}, + // Block 0xc4, offset 0x5e7 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x98}, + {value: 0x3b08, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xbf}, + // Block 0xc5, offset 0x5f3 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xc6, offset 0x5f6 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xc7, offset 0x600 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xbf}, + // Block 0xc8, offset 0x609 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xc9, offset 0x615 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xca, offset 0x622 + {value: 0x0000, lo: 0x07}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xcb, offset 0x62a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xcc, offset 0x62d + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xcd, offset 0x632 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xce, offset 0x635 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xbf}, + // Block 0xcf, offset 0x638 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xbf}, + // Block 0xd0, offset 0x63b + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xd1, offset 0x642 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xd2, offset 0x649 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0xd3, offset 0x64d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0xd4, offset 0x658 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xd5, offset 0x65b + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd6, offset 0x661 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xd7, offset 0x666 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xd8, offset 0x66a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xd9, offset 0x66d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xda, offset 0x670 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xdb, offset 0x673 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xdc, offset 0x676 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xdd, offset 0x679 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0xde, offset 0x67e + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x03c0, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xdf, offset 0x688 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xe0, offset 0x68b + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xbf}, + // Block 0xe1, offset 0x68f + {value: 0x0000, lo: 0x0e}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, + {value: 0xb601, lo: 0x9f, hi: 0x9f}, + {value: 0xb649, lo: 0xa0, hi: 0xa0}, + {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, + {value: 0xb719, lo: 0xa2, hi: 0xa2}, + {value: 0xb781, lo: 0xa3, hi: 0xa3}, + {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xac}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, + {value: 0x0340, lo: 0xb3, hi: 0xba}, + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xe2, offset 0x69e + {value: 0x0000, lo: 0x0b}, + {value: 0x3318, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x84}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, + {value: 0x0018, lo: 0x8c, hi: 0xa9}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xba}, + {value: 0xb851, lo: 0xbb, hi: 0xbb}, + {value: 0xb899, lo: 0xbc, hi: 0xbc}, + {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, + {value: 0xb949, lo: 0xbe, hi: 0xbe}, + {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, + // Block 0xe3, offset 0x6aa + {value: 0x0000, lo: 0x03}, + {value: 0xba19, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xbf}, + // Block 0xe4, offset 0x6ae + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x3318, lo: 0x82, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0xbf}, + // Block 0xe5, offset 0x6b3 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe6, offset 0x6b8 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0xe7, offset 0x6bc + {value: 0x0000, lo: 0x04}, + {value: 0x3308, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0xe8, offset 0x6c1 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xe9, offset 0x6ca + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0xea, offset 0x6d5 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x86}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xeb, offset 0x6db + {value: 0x0000, lo: 0x07}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xec, offset 0x6e3 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xed, offset 0x6e7 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0xee, offset 0x6eb + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0xef, offset 0x6f1 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xf0, offset 0x6f7 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0xc1c1, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xf1, offset 0x6fc + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xbf}, + // Block 0xf2, offset 0x6ff + {value: 0x0000, lo: 0x0f}, + {value: 0xc7e9, lo: 0x80, hi: 0x80}, + {value: 0xc839, lo: 0x81, hi: 0x81}, + {value: 0xc889, lo: 0x82, hi: 0x82}, + {value: 0xc8d9, lo: 0x83, hi: 0x83}, + {value: 0xc929, lo: 0x84, hi: 0x84}, + {value: 0xc979, lo: 0x85, hi: 0x85}, + {value: 0xc9c9, lo: 0x86, hi: 0x86}, + {value: 0xca19, lo: 0x87, hi: 0x87}, + {value: 0xca69, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0xcab9, lo: 0x90, hi: 0x90}, + {value: 0xcad9, lo: 0x91, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xbf}, + // Block 0xf3, offset 0x70f + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xf4, offset 0x716 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0xf5, offset 0x719 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0xbf}, + // Block 0xf6, offset 0x71c + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0xf7, offset 0x720 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0xf8, offset 0x726 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0xf9, offset 0x72b + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xfa, offset 0x730 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0xfb, offset 0x735 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xbf}, + // Block 0xfc, offset 0x738 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0xfd, offset 0x73d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xfe, offset 0x740 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xff, offset 0x743 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x100, offset 0x747 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x101, offset 0x74b + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x102, offset 0x74e + {value: 0x0020, lo: 0x0f}, + {value: 0xdeb9, lo: 0x80, hi: 0x89}, + {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, + {value: 0xdff9, lo: 0x8b, hi: 0x9c}, + {value: 0x8e1d, lo: 0x9d, hi: 0x9d}, + {value: 0xe239, lo: 0x9e, hi: 0xa2}, + {value: 0x8e3d, lo: 0xa3, hi: 0xa3}, + {value: 0xe2d9, lo: 0xa4, hi: 0xab}, + {value: 0x7ed5, lo: 0xac, hi: 0xac}, + {value: 0xe3d9, lo: 0xad, hi: 0xaf}, + {value: 0x8e5d, lo: 0xb0, hi: 0xb0}, + {value: 0xe439, lo: 0xb1, hi: 0xb6}, + {value: 0x8e7d, lo: 0xb7, hi: 0xb9}, + {value: 0xe4f9, lo: 0xba, hi: 0xba}, + {value: 0x8edd, lo: 0xbb, hi: 0xbb}, + {value: 0xe519, lo: 0xbc, hi: 0xbf}, + // Block 0x103, offset 0x75e + {value: 0x0020, lo: 0x10}, + {value: 0x937d, lo: 0x80, hi: 0x80}, + {value: 0xf099, lo: 0x81, hi: 0x86}, + {value: 0x939d, lo: 0x87, hi: 0x8a}, + {value: 0xd9f9, lo: 0x8b, hi: 0x8b}, + {value: 0xf159, lo: 0x8c, hi: 0x96}, + {value: 0x941d, lo: 0x97, hi: 0x97}, + {value: 0xf2b9, lo: 0x98, hi: 0xa3}, + {value: 0x943d, lo: 0xa4, hi: 0xa6}, + {value: 0xf439, lo: 0xa7, hi: 0xaa}, + {value: 0x949d, lo: 0xab, hi: 0xab}, + {value: 0xf4b9, lo: 0xac, hi: 0xac}, + {value: 0x94bd, lo: 0xad, hi: 0xad}, + {value: 0xf4d9, lo: 0xae, hi: 0xaf}, + {value: 0x94dd, lo: 0xb0, hi: 0xb1}, + {value: 0xf519, lo: 0xb2, hi: 0xbe}, + {value: 0x2040, lo: 0xbf, hi: 0xbf}, + // Block 0x104, offset 0x76f + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0340, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x9f}, + {value: 0x0340, lo: 0xa0, hi: 0xbf}, + // Block 0x105, offset 0x774 + {value: 0x0000, lo: 0x01}, + {value: 0x0340, lo: 0x80, hi: 0xbf}, + // Block 0x106, offset 0x776 + {value: 0x0000, lo: 0x01}, + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x107, offset 0x778 + {value: 0x0000, lo: 0x02}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, +} + +// Total table size 42115 bytes (41KiB); checksum: F4A1FA4E diff --git a/vendor/golang.org/x/net/idna/trie.go b/vendor/golang.org/x/net/idna/trie.go new file mode 100644 index 00000000000..c4ef847e7a3 --- /dev/null +++ b/vendor/golang.org/x/net/idna/trie.go @@ -0,0 +1,72 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// appendMapping appends the mapping for the respective rune. isMapped must be +// true. A mapping is a categorization of a rune as defined in UTS #46. +func (c info) appendMapping(b []byte, s string) []byte { + index := int(c >> indexShift) + if c&xorBit == 0 { + s := mappings[index:] + return append(b, s[1:s[0]+1]...) + } + b = append(b, s...) + if c&inlineXOR == inlineXOR { + // TODO: support and handle two-byte inline masks + b[len(b)-1] ^= byte(index) + } else { + for p := len(b) - int(xorData[index]); p < len(b); p++ { + index++ + b[p] ^= xorData[index] + } + } + return b +} + +// Sparse block handling code. + +type valueRange struct { + value uint16 // header: value:stride + lo, hi byte // header: lo:n +} + +type sparseBlocks struct { + values []valueRange + offset []uint16 +} + +var idnaSparse = sparseBlocks{ + values: idnaSparseValues[:], + offset: idnaSparseOffset[:], +} + +// Don't use newIdnaTrie to avoid unconditional linking in of the table. +var trie = &idnaTrie{} + +// lookup determines the type of block n and looks up the value for b. +// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block +// is a list of ranges with an accompanying value. Given a matching range r, +// the value for b is by r.value + (b - r.lo) * stride. +func (t *sparseBlocks) lookup(n uint32, b byte) uint16 { + offset := t.offset[n] + header := t.values[offset] + lo := offset + 1 + hi := lo + uint16(header.lo) + for lo < hi { + m := lo + (hi-lo)/2 + r := t.values[m] + if r.lo <= b && b <= r.hi { + return r.value + uint16(b-r.lo)*header.value + } + if b < r.lo { + hi = m + } else { + lo = m + 1 + } + } + return 0 +} diff --git a/vendor/golang.org/x/net/idna/trieval.go b/vendor/golang.org/x/net/idna/trieval.go new file mode 100644 index 00000000000..7a8cf889b5b --- /dev/null +++ b/vendor/golang.org/x/net/idna/trieval.go @@ -0,0 +1,119 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package idna + +// This file contains definitions for interpreting the trie value of the idna +// trie generated by "go run gen*.go". It is shared by both the generator +// program and the resultant package. Sharing is achieved by the generator +// copying gen_trieval.go to trieval.go and changing what's above this comment. + +// info holds information from the IDNA mapping table for a single rune. It is +// the value returned by a trie lookup. In most cases, all information fits in +// a 16-bit value. For mappings, this value may contain an index into a slice +// with the mapped string. Such mappings can consist of the actual mapped value +// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the +// input rune. This technique is used by the cases packages and reduces the +// table size significantly. +// +// The per-rune values have the following format: +// +// if mapped { +// if inlinedXOR { +// 15..13 inline XOR marker +// 12..11 unused +// 10..3 inline XOR mask +// } else { +// 15..3 index into xor or mapping table +// } +// } else { +// 15..14 unused +// 13 mayNeedNorm +// 12..11 attributes +// 10..8 joining type +// 7..3 category type +// } +// 2 use xor pattern +// 1..0 mapped category +// +// See the definitions below for a more detailed description of the various +// bits. +type info uint16 + +const ( + catSmallMask = 0x3 + catBigMask = 0xF8 + indexShift = 3 + xorBit = 0x4 // interpret the index as an xor pattern + inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined. + + joinShift = 8 + joinMask = 0x07 + + // Attributes + attributesMask = 0x1800 + viramaModifier = 0x1800 + modifier = 0x1000 + rtl = 0x0800 + + mayNeedNorm = 0x2000 +) + +// A category corresponds to a category defined in the IDNA mapping table. +type category uint16 + +const ( + unknown category = 0 // not currently defined in unicode. + mapped category = 1 + disallowedSTD3Mapped category = 2 + deviation category = 3 +) + +const ( + valid category = 0x08 + validNV8 category = 0x18 + validXV8 category = 0x28 + disallowed category = 0x40 + disallowedSTD3Valid category = 0x80 + ignored category = 0xC0 +) + +// join types and additional rune information +const ( + joiningL = (iota + 1) + joiningD + joiningT + joiningR + + //the following types are derived during processing + joinZWJ + joinZWNJ + joinVirama + numJoinTypes +) + +func (c info) isMapped() bool { + return c&0x3 != 0 +} + +func (c info) category() category { + small := c & catSmallMask + if small != 0 { + return category(small) + } + return category(c & catBigMask) +} + +func (c info) joinType() info { + if c.isMapped() { + return 0 + } + return (c >> joinShift) & joinMask +} + +func (c info) isModifier() bool { + return c&(modifier|catSmallMask) == modifier +} + +func (c info) isViramaModifier() bool { + return c&(attributesMask|catSmallMask) == viramaModifier +} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 00000000000..685f0e7ea23 --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := dstStart.Sub(srcStart) / srcInterval + srcIndex += int(advance) + srcStart = srcStart.Add(advance * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/lex/httplex/httplex.go b/vendor/golang.org/x/net/lex/httplex/httplex.go new file mode 100644 index 00000000000..20f2b8940ba --- /dev/null +++ b/vendor/golang.org/x/net/lex/httplex/httplex.go @@ -0,0 +1,351 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httplex contains rules around lexical matters of various +// HTTP-related specifications. +// +// This package is shared by the standard library (which vendors it) +// and x/net/http2. It comes with no API stability promise. +package httplex + +import ( + "net" + "strings" + "unicode/utf8" + + "golang.org/x/net/idna" +) + +var isTokenTable = [127]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +func IsTokenRune(r rune) bool { + i := int(r) + return i < len(isTokenTable) && isTokenTable[i] +} + +func isNotToken(r rune) bool { + return !IsTokenRune(r) +} + +// HeaderValuesContainsToken reports whether any string in values +// contains the provided token, ASCII case-insensitively. +func HeaderValuesContainsToken(values []string, token string) bool { + for _, v := range values { + if headerValueContainsToken(v, token) { + return true + } + } + return false +} + +// isOWS reports whether b is an optional whitespace byte, as defined +// by RFC 7230 section 3.2.3. +func isOWS(b byte) bool { return b == ' ' || b == '\t' } + +// trimOWS returns x with all optional whitespace removes from the +// beginning and end. +func trimOWS(x string) string { + // TODO: consider using strings.Trim(x, " \t") instead, + // if and when it's fast enough. See issue 10292. + // But this ASCII-only code will probably always beat UTF-8 + // aware code. + for len(x) > 0 && isOWS(x[0]) { + x = x[1:] + } + for len(x) > 0 && isOWS(x[len(x)-1]) { + x = x[:len(x)-1] + } + return x +} + +// headerValueContainsToken reports whether v (assumed to be a +// 0#element, in the ABNF extension described in RFC 7230 section 7) +// contains token amongst its comma-separated tokens, ASCII +// case-insensitively. +func headerValueContainsToken(v string, token string) bool { + v = trimOWS(v) + if comma := strings.IndexByte(v, ','); comma != -1 { + return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token) + } + return tokenEqual(v, token) +} + +// lowerASCII returns the ASCII lowercase version of b. +func lowerASCII(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively. +func tokenEqual(t1, t2 string) bool { + if len(t1) != len(t2) { + return false + } + for i, b := range t1 { + if b >= utf8.RuneSelf { + // No UTF-8 or non-ASCII allowed in tokens. + return false + } + if lowerASCII(byte(b)) != lowerASCII(t2[i]) { + return false + } + } + return true +} + +// isLWS reports whether b is linear white space, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// LWS = [CRLF] 1*( SP | HT ) +func isLWS(b byte) bool { return b == ' ' || b == '\t' } + +// isCTL reports whether b is a control byte, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// CTL = +func isCTL(b byte) bool { + const del = 0x7f // a CTL + return b < ' ' || b == del +} + +// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name. +// HTTP/2 imposes the additional restriction that uppercase ASCII +// letters are not allowed. +// +// RFC 7230 says: +// header-field = field-name ":" OWS field-value OWS +// field-name = token +// token = 1*tchar +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +func ValidHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if !IsTokenRune(r) { + return false + } + } + return true +} + +// ValidHostHeader reports whether h is a valid host header. +func ValidHostHeader(h string) bool { + // The latest spec is actually this: + // + // http://tools.ietf.org/html/rfc7230#section-5.4 + // Host = uri-host [ ":" port ] + // + // Where uri-host is: + // http://tools.ietf.org/html/rfc3986#section-3.2.2 + // + // But we're going to be much more lenient for now and just + // search for any byte that's not a valid byte in any of those + // expressions. + for i := 0; i < len(h); i++ { + if !validHostByte[h[i]] { + return false + } + } + return true +} + +// See the validHostHeader comment. +var validHostByte = [256]bool{ + '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, + '8': true, '9': true, + + 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, + 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, + 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, + 'y': true, 'z': true, + + 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, + 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, + 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true, + 'Y': true, 'Z': true, + + '!': true, // sub-delims + '$': true, // sub-delims + '%': true, // pct-encoded (and used in IPv6 zones) + '&': true, // sub-delims + '(': true, // sub-delims + ')': true, // sub-delims + '*': true, // sub-delims + '+': true, // sub-delims + ',': true, // sub-delims + '-': true, // unreserved + '.': true, // unreserved + ':': true, // IPv6address + Host expression's optional port + ';': true, // sub-delims + '=': true, // sub-delims + '[': true, + '\'': true, // sub-delims + ']': true, + '_': true, // unreserved + '~': true, // unreserved +} + +// ValidHeaderFieldValue reports whether v is a valid "field-value" according to +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : +// +// message-header = field-name ":" [ field-value ] +// field-value = *( field-content | LWS ) +// field-content = +// +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : +// +// TEXT = +// LWS = [CRLF] 1*( SP | HT ) +// CTL = +// +// RFC 7230 says: +// field-value = *( field-content / obs-fold ) +// obj-fold = N/A to http2, and deprecated +// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +// field-vchar = VCHAR / obs-text +// obs-text = %x80-FF +// VCHAR = "any visible [USASCII] character" +// +// http2 further says: "Similarly, HTTP/2 allows header field values +// that are not valid. While most of the values that can be encoded +// will not alter header field parsing, carriage return (CR, ASCII +// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII +// 0x0) might be exploited by an attacker if they are translated +// verbatim. Any request or response that contains a character not +// permitted in a header field value MUST be treated as malformed +// (Section 8.1.2.6). Valid characters are defined by the +// field-content ABNF rule in Section 3.2 of [RFC7230]." +// +// This function does not (yet?) properly handle the rejection of +// strings that begin or end with SP or HTAB. +func ValidHeaderFieldValue(v string) bool { + for i := 0; i < len(v); i++ { + b := v[i] + if isCTL(b) && !isLWS(b) { + return false + } + } + return true +} + +func isASCII(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} + +// PunycodeHostPort returns the IDNA Punycode version +// of the provided "host" or "host:port" string. +func PunycodeHostPort(v string) (string, error) { + if isASCII(v) { + return v, nil + } + + host, port, err := net.SplitHostPort(v) + if err != nil { + // The input 'v' argument was just a "host" argument, + // without a port. This error should not be returned + // to the caller. + host = v + port = "" + } + host, err = idna.ToASCII(host) + if err != nil { + // Non-UTF-8? Not representable in Punycode, in any + // case. + return "", err + } + if port == "" { + return host, nil + } + return net.JoinHostPort(host, port), nil +} diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 00000000000..c646a6952e5 --- /dev/null +++ b/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` + + + events + + + + +

/debug/events

+ +
+ {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
+ +{{if $.EventLogs}} +
+

Family: {{$.Family}}

+ +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+{{end}} + + +` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 00000000000..9bf4286c794 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
+
+ +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
+`)) + }) + return distTmplCache +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 00000000000..bb72a527e8c --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1082 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc("/debug/requests", Traces) + http.HandleFunc("/debug/events", Events) +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + tr.Elapsed = time.Now().Sub(tr.Start) + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Timing information. + Start time.Time + Elapsed time.Duration // zero while active + + // Trace information if non-zero. + traceID uint64 + spanID uint64 + + // Whether this trace resulted in an error. + IsError bool + + // Append-only sequence of events (modulo discards). + mu sync.RWMutex + events []event + maxEvents int + + refs int32 // how many buckets this is in + recycler func(interface{}) + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.refs = 0 + tr.recycler = nil + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { tr.IsError = true } + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.recycler = f +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.traceID, tr.spanID = traceID, spanID +} + +func (tr *trace) SetMaxEvents(m int) { + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + t := tr.Elapsed + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

/debug/requests

+{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
+{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
+

Family: {{$.Family}}

+ +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

Showing {{len $.Traces}} of {{$.Total}} traces.

+{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests +
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
+{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

+{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/vendor/golang.org/x/net/trace/trace_go16.go b/vendor/golang.org/x/net/trace/trace_go16.go new file mode 100644 index 00000000000..d6081911853 --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_go16.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package trace + +import "golang.org/x/net/context" + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} diff --git a/vendor/golang.org/x/net/trace/trace_go17.go b/vendor/golang.org/x/net/trace/trace_go17.go new file mode 100644 index 00000000000..df6e1fba7ca --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_go17.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package trace + +import "context" + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS new file mode 100644 index 00000000000..15167cd746c --- /dev/null +++ b/vendor/golang.org/x/oauth2/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md new file mode 100644 index 00000000000..46aa2b12dda --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS new file mode 100644 index 00000000000..1c4577e9680 --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE new file mode 100644 index 00000000000..6a66aea5eaf --- /dev/null +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md new file mode 100644 index 00000000000..eb8dcee179e --- /dev/null +++ b/vendor/golang.org/x/oauth2/README.md @@ -0,0 +1,77 @@ +# OAuth2 for Go + +[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) +[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2) + +oauth2 package contains a client implementation for OAuth 2.0 spec. + +## Installation + +~~~~ +go get golang.org/x/oauth2 +~~~~ + +Or you can manually git clone the repository to +`$(go env GOPATH)/src/golang.org/x/oauth2`. + +See godoc for further documentation and examples. + +* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) +* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) + + +## App Engine + +In change 96e89be (March 2015), we removed the `oauth2.Context2` type in favor +of the [`context.Context`](https://golang.org/x/net/context#Context) type from +the `golang.org/x/net/context` package + +This means it's no longer possible to use the "Classic App Engine" +`appengine.Context` type with the `oauth2` package. (You're using +Classic App Engine if you import the package `"appengine"`.) + +To work around this, you may use the new `"google.golang.org/appengine"` +package. This package has almost the same API as the `"appengine"` package, +but it can be fetched with `go get` and used on "Managed VMs" and well as +Classic App Engine. + +See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) +for information on updating your app. + +If you don't want to update your entire app to use the new App Engine packages, +you may use both sets of packages in parallel, using only the new packages +with the `oauth2` package. + +```go +import ( + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + newappengine "google.golang.org/appengine" + newurlfetch "google.golang.org/appengine/urlfetch" + + "appengine" +) + +func handler(w http.ResponseWriter, r *http.Request) { + var c appengine.Context = appengine.NewContext(r) + c.Infof("Logging a message with the old package") + + var ctx context.Context = newappengine.NewContext(r) + client := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.AppEngineTokenSource(ctx, "scope"), + Base: &newurlfetch.Transport{Context: ctx}, + }, + } + client.Get("...") +} +``` + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. + +The main issue tracker for the oauth2 repository is located at +https://github.com/golang/oauth2/issues. diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go new file mode 100644 index 00000000000..50d918b8788 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine.go @@ -0,0 +1,89 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "sort" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" +) + +// appengineFlex is set at init time by appengineflex_hook.go. If true, we are on App Engine Flex. +var appengineFlex bool + +// Set at init time by appengine_hook.go. If nil, we're not on App Engine. +var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) + +// Set at init time by appengine_hook.go. If nil, we're not on App Engine. +var appengineAppIDFunc func(c context.Context) string + +// AppEngineTokenSource returns a token source that fetches tokens +// issued to the current App Engine application's service account. +// If you are implementing a 3-legged OAuth 2.0 flow on App Engine +// that involves user accounts, see oauth2.Config instead. +// +// The provided context must have come from appengine.NewContext. +func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + if appengineTokenFunc == nil { + panic("google: AppEngineTokenSource can only be used on App Engine.") + } + scopes := append([]string{}, scope...) + sort.Strings(scopes) + return &appEngineTokenSource{ + ctx: ctx, + scopes: scopes, + key: strings.Join(scopes, " "), + } +} + +// aeTokens helps the fetched tokens to be reused until their expiration. +var ( + aeTokensMu sync.Mutex + aeTokens = make(map[string]*tokenLock) // key is space-separated scopes +) + +type tokenLock struct { + mu sync.Mutex // guards t; held while fetching or updating t + t *oauth2.Token +} + +type appEngineTokenSource struct { + ctx context.Context + scopes []string + key string // to aeTokens map; space-separated scopes +} + +func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) { + if appengineTokenFunc == nil { + panic("google: AppEngineTokenSource can only be used on App Engine.") + } + + aeTokensMu.Lock() + tok, ok := aeTokens[ts.key] + if !ok { + tok = &tokenLock{} + aeTokens[ts.key] = tok + } + aeTokensMu.Unlock() + + tok.mu.Lock() + defer tok.mu.Unlock() + if tok.t.Valid() { + return tok.t, nil + } + access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) + if err != nil { + return nil, err + } + tok.t = &oauth2.Token{ + AccessToken: access, + Expiry: exp, + } + return tok.t, nil +} diff --git a/vendor/golang.org/x/oauth2/google/appengine_hook.go b/vendor/golang.org/x/oauth2/google/appengine_hook.go new file mode 100644 index 00000000000..56669eaa98d --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine_hook.go @@ -0,0 +1,14 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine appenginevm + +package google + +import "google.golang.org/appengine" + +func init() { + appengineTokenFunc = appengine.AccessToken + appengineAppIDFunc = appengine.AppID +} diff --git a/vendor/golang.org/x/oauth2/google/appengineflex_hook.go b/vendor/golang.org/x/oauth2/google/appengineflex_hook.go new file mode 100644 index 00000000000..5d0231af2dd --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengineflex_hook.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appenginevm + +package google + +func init() { + appengineFlex = true // Flex doesn't support appengine.AccessToken; depend on metadata server. +} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go new file mode 100644 index 00000000000..b4b62745c45 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -0,0 +1,137 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "runtime" + + "cloud.google.com/go/compute/metadata" + "golang.org/x/net/context" + "golang.org/x/oauth2" +) + +// DefaultCredentials holds "Application Default Credentials". +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +type DefaultCredentials struct { + ProjectID string // may be empty + TokenSource oauth2.TokenSource + + // JSON contains the raw bytes from a JSON credentials file. + // This field may be nil if authentication is provided by the + // environment and not with a credentials file, e.g. when code is + // running on Google Cloud Platform. + JSON []byte +} + +// DefaultClient returns an HTTP Client that uses the +// DefaultTokenSource to obtain authentication credentials. +func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { + ts, err := DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return oauth2.NewClient(ctx, ts), nil +} + +// DefaultTokenSource returns the token source for +// "Application Default Credentials". +// It is a shortcut for FindDefaultCredentials(ctx, scope).TokenSource. +func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { + creds, err := FindDefaultCredentials(ctx, scope...) + if err != nil { + return nil, err + } + return creds.TokenSource, nil +} + +// FindDefaultCredentials searches for "Application Default Credentials". +// +// It looks for credentials in the following places, +// preferring the first location found: +// +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine it uses the appengine.AccessToken function. +// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches +// credentials from the metadata server. +// (In this final case any provided scopes are ignored.) +func FindDefaultCredentials(ctx context.Context, scope ...string) (*DefaultCredentials, error) { + // First, try the environment variable. + const envVar = "GOOGLE_APPLICATION_CREDENTIALS" + if filename := os.Getenv(envVar); filename != "" { + creds, err := readCredentialsFile(ctx, filename, scope) + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) + } + return creds, nil + } + + // Second, try a well-known file. + filename := wellKnownFile() + if creds, err := readCredentialsFile(ctx, filename, scope); err == nil { + return creds, nil + } else if !os.IsNotExist(err) { + return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) + } + + // Third, if we're on Google App Engine use those credentials. + if appengineTokenFunc != nil && !appengineFlex { + return &DefaultCredentials{ + ProjectID: appengineAppIDFunc(ctx), + TokenSource: AppEngineTokenSource(ctx, scope...), + }, nil + } + + // Fourth, if we're on Google Compute Engine use the metadata server. + if metadata.OnGCE() { + id, _ := metadata.ProjectID() + return &DefaultCredentials{ + ProjectID: id, + TokenSource: ComputeTokenSource(""), + }, nil + } + + // None are found; return helpful error. + const url = "https://developers.google.com/accounts/docs/application-default-credentials" + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) +} + +func wellKnownFile() string { + const f = "application_default_credentials.json" + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) + } + return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) +} + +func readCredentialsFile(ctx context.Context, filename string, scopes []string) (*DefaultCredentials, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + var f credentialsFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + ts, err := f.tokenSource(ctx, append([]string(nil), scopes...)) + if err != nil { + return nil, err + } + return &DefaultCredentials{ + ProjectID: f.ProjectID, + TokenSource: ts, + JSON: b, + }, nil +} diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go new file mode 100644 index 00000000000..66a8b0e1812 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -0,0 +1,202 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package google provides support for making OAuth2 authorized and +// authenticated HTTP requests to Google APIs. +// It supports the Web server flow, client-side credentials, service accounts, +// Google Compute Engine service accounts, and Google App Engine service +// accounts. +// +// For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +package google // import "golang.org/x/oauth2/google" + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "cloud.google.com/go/compute/metadata" + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/jwt" +) + +// Endpoint is Google's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://accounts.google.com/o/oauth2/token", +} + +// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. +const JWTTokenURL = "https://accounts.google.com/o/oauth2/token" + +// ConfigFromJSON uses a Google Developers Console client_credentials.json +// file to construct a config. +// client_credentials.json can be downloaded from +// https://console.developers.google.com, under "Credentials". Download the Web +// application credentials in the JSON format and provide the contents of the +// file as jsonKey. +func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { + type cred struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RedirectURIs []string `json:"redirect_uris"` + AuthURI string `json:"auth_uri"` + TokenURI string `json:"token_uri"` + } + var j struct { + Web *cred `json:"web"` + Installed *cred `json:"installed"` + } + if err := json.Unmarshal(jsonKey, &j); err != nil { + return nil, err + } + var c *cred + switch { + case j.Web != nil: + c = j.Web + case j.Installed != nil: + c = j.Installed + default: + return nil, fmt.Errorf("oauth2/google: no credentials found") + } + if len(c.RedirectURIs) < 1 { + return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") + } + return &oauth2.Config{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RedirectURL: c.RedirectURIs[0], + Scopes: scope, + Endpoint: oauth2.Endpoint{ + AuthURL: c.AuthURI, + TokenURL: c.TokenURI, + }, + }, nil +} + +// JWTConfigFromJSON uses a Google Developers service account JSON key file to read +// the credentials that authorize and authenticate the requests. +// Create a service account on "Credentials" for your project at +// https://console.developers.google.com to download a JSON key file. +func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { + var f credentialsFile + if err := json.Unmarshal(jsonKey, &f); err != nil { + return nil, err + } + if f.Type != serviceAccountKey { + return nil, fmt.Errorf("google: read JWT from JSON credentials: 'type' field is %q (expected %q)", f.Type, serviceAccountKey) + } + scope = append([]string(nil), scope...) // copy + return f.jwtConfig(scope), nil +} + +// JSON key file types. +const ( + serviceAccountKey = "service_account" + userCredentialsKey = "authorized_user" +) + +// credentialsFile is the unmarshalled representation of a credentials file. +type credentialsFile struct { + Type string `json:"type"` // serviceAccountKey or userCredentialsKey + + // Service Account fields + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + TokenURL string `json:"token_uri"` + ProjectID string `json:"project_id"` + + // User Credential fields + // (These typically come from gcloud auth.) + ClientSecret string `json:"client_secret"` + ClientID string `json:"client_id"` + RefreshToken string `json:"refresh_token"` +} + +func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config { + cfg := &jwt.Config{ + Email: f.ClientEmail, + PrivateKey: []byte(f.PrivateKey), + PrivateKeyID: f.PrivateKeyID, + Scopes: scopes, + TokenURL: f.TokenURL, + } + if cfg.TokenURL == "" { + cfg.TokenURL = JWTTokenURL + } + return cfg +} + +func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oauth2.TokenSource, error) { + switch f.Type { + case serviceAccountKey: + cfg := f.jwtConfig(scopes) + return cfg.TokenSource(ctx), nil + case userCredentialsKey: + cfg := &oauth2.Config{ + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + Scopes: scopes, + Endpoint: Endpoint, + } + tok := &oauth2.Token{RefreshToken: f.RefreshToken} + return cfg.TokenSource(ctx, tok), nil + case "": + return nil, errors.New("missing 'type' field in credentials") + default: + return nil, fmt.Errorf("unknown credential type: %q", f.Type) + } +} + +// ComputeTokenSource returns a token source that fetches access tokens +// from Google Compute Engine (GCE)'s metadata server. It's only valid to use +// this token source if your program is running on a GCE instance. +// If no account is specified, "default" is used. +// Further information about retrieving access tokens from the GCE metadata +// server can be found at https://cloud.google.com/compute/docs/authentication. +func ComputeTokenSource(account string) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, computeSource{account: account}) +} + +type computeSource struct { + account string +} + +func (cs computeSource) Token() (*oauth2.Token, error) { + if !metadata.OnGCE() { + return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") + } + acct := cs.account + if acct == "" { + acct = "default" + } + tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token") + if err != nil { + return nil, err + } + var res struct { + AccessToken string `json:"access_token"` + ExpiresInSec int `json:"expires_in"` + TokenType string `json:"token_type"` + } + err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) + if err != nil { + return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) + } + if res.ExpiresInSec == 0 || res.AccessToken == "" { + return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") + } + return &oauth2.Token{ + AccessToken: res.AccessToken, + TokenType: res.TokenType, + Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), + }, nil +} diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go new file mode 100644 index 00000000000..b0fdb3a888a --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/jwt.go @@ -0,0 +1,74 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "crypto/rsa" + "fmt" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The audience is typically a URL that specifies the scope of the credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { + cfg, err := JWTConfigFromJSON(jsonKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse JSON key: %v", err) + } + pk, err := internal.ParseKey(cfg.PrivateKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse key: %v", err) + } + ts := &jwtAccessTokenSource{ + email: cfg.Email, + audience: audience, + pk: pk, + pkID: cfg.PrivateKeyID, + } + tok, err := ts.Token() + if err != nil { + return nil, err + } + return oauth2.ReuseTokenSource(tok, ts), nil +} + +type jwtAccessTokenSource struct { + email, audience string + pk *rsa.PrivateKey + pkID string +} + +func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { + iat := time.Now() + exp := iat.Add(time.Hour) + cs := &jws.ClaimSet{ + Iss: ts.email, + Sub: ts.email, + Aud: ts.audience, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + hdr := &jws.Header{ + Algorithm: "RS256", + Typ: "JWT", + KeyID: string(ts.pkID), + } + msg, err := jws.Encode(hdr, cs, ts.pk) + if err != nil { + return nil, fmt.Errorf("google: could not encode JWT: %v", err) + } + return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil +} diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go new file mode 100644 index 00000000000..bdc18084b1f --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/sdk.go @@ -0,0 +1,172 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" +) + +type sdkCredentials struct { + Data []struct { + Credential struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + TokenExpiry *time.Time `json:"token_expiry"` + } `json:"credential"` + Key struct { + Account string `json:"account"` + Scope string `json:"scope"` + } `json:"key"` + } +} + +// An SDKConfig provides access to tokens from an account already +// authorized via the Google Cloud SDK. +type SDKConfig struct { + conf oauth2.Config + initialToken *oauth2.Token +} + +// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK +// account. If account is empty, the account currently active in +// Google Cloud SDK properties is used. +// Google Cloud SDK credentials must be created by running `gcloud auth` +// before using this function. +// The Google Cloud SDK is available at https://cloud.google.com/sdk/. +func NewSDKConfig(account string) (*SDKConfig, error) { + configPath, err := sdkConfigPath() + if err != nil { + return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) + } + credentialsPath := filepath.Join(configPath, "credentials") + f, err := os.Open(credentialsPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) + } + defer f.Close() + + var c sdkCredentials + if err := json.NewDecoder(f).Decode(&c); err != nil { + return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) + } + if len(c.Data) == 0 { + return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) + } + if account == "" { + propertiesPath := filepath.Join(configPath, "properties") + f, err := os.Open(propertiesPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) + } + defer f.Close() + ini, err := internal.ParseINI(f) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) + } + core, ok := ini["core"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) + } + active, ok := core["account"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) + } + account = active + } + + for _, d := range c.Data { + if account == "" || d.Key.Account == account { + if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { + return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) + } + var expiry time.Time + if d.Credential.TokenExpiry != nil { + expiry = *d.Credential.TokenExpiry + } + return &SDKConfig{ + conf: oauth2.Config{ + ClientID: d.Credential.ClientID, + ClientSecret: d.Credential.ClientSecret, + Scopes: strings.Split(d.Key.Scope, " "), + Endpoint: Endpoint, + RedirectURL: "oob", + }, + initialToken: &oauth2.Token{ + AccessToken: d.Credential.AccessToken, + RefreshToken: d.Credential.RefreshToken, + Expiry: expiry, + }, + }, nil + } + } + return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) +} + +// Client returns an HTTP client using Google Cloud SDK credentials to +// authorize requests. The token will auto-refresh as necessary. The +// underlying http.RoundTripper will be obtained using the provided +// context. The returned client and its Transport should not be +// modified. +func (c *SDKConfig) Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &oauth2.Transport{ + Source: c.TokenSource(ctx), + }, + } +} + +// TokenSource returns an oauth2.TokenSource that retrieve tokens from +// Google Cloud SDK credentials using the provided context. +// It will returns the current access token stored in the credentials, +// and refresh it when it expires, but it won't update the credentials +// with the new access token. +func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { + return c.conf.TokenSource(ctx, c.initialToken) +} + +// Scopes are the OAuth 2.0 scopes the current account is authorized for. +func (c *SDKConfig) Scopes() []string { + return c.conf.Scopes +} + +// sdkConfigPath tries to guess where the gcloud config is located. +// It can be overridden during tests. +var sdkConfigPath = func() (string, error) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil + } + homeDir := guessUnixHomeDir() + if homeDir == "" { + return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") + } + return filepath.Join(homeDir, ".config", "gcloud"), nil +} + +func guessUnixHomeDir() string { + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + if v := os.Getenv("HOME"); v != "" { + return v + } + // Else, fall back to user.Current: + if u, err := user.Current(); err == nil { + return u.HomeDir + } + return "" +} diff --git a/vendor/golang.org/x/oauth2/internal/doc.go b/vendor/golang.org/x/oauth2/internal/doc.go new file mode 100644 index 00000000000..03265e888af --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/doc.go @@ -0,0 +1,6 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go new file mode 100644 index 00000000000..6978192a99c --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "bufio" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io" + "strings" +) + +// ParseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} + +func ParseINI(ini io.Reader) (map[string]map[string]string, error) { + result := map[string]map[string]string{ + "": {}, // root section + } + scanner := bufio.NewScanner(ini) + currentSection := "" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, ";") { + // comment. + continue + } + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSection = strings.TrimSpace(line[1 : len(line)-1]) + result[currentSection] = map[string]string{} + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 && parts[0] != "" { + result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning ini: %v", err) + } + return result, nil +} + +func CondVal(v string) []string { + if v == "" { + return nil + } + return []string{v} +} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go new file mode 100644 index 00000000000..cf959ea69f6 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -0,0 +1,250 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// This type is a mirror of oauth2.Token and exists to break +// an otherwise-circular dependency. Other internal packages +// should convert this Token into an oauth2.Token before use. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time + + // Raw optionally contains extra metadata from the server + // when updating a token. + Raw interface{} +} + +// tokenJSON is the struct representing the HTTP response from OAuth2 +// providers returning a token in JSON form. +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + if v := e.Expires; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +type expirationTime int32 + +func (e *expirationTime) UnmarshalJSON(b []byte) error { + var n json.Number + err := json.Unmarshal(b, &n) + if err != nil { + return err + } + i, err := n.Int64() + if err != nil { + return err + } + *e = expirationTime(i) + return nil +} + +var brokenAuthHeaderProviders = []string{ + "https://accounts.google.com/", + "https://api.codeswholesale.com/oauth/token", + "https://api.dropbox.com/", + "https://api.dropboxapi.com/", + "https://api.instagram.com/", + "https://api.netatmo.net/", + "https://api.odnoklassniki.ru/", + "https://api.pushbullet.com/", + "https://api.soundcloud.com/", + "https://api.twitch.tv/", + "https://app.box.com/", + "https://connect.stripe.com/", + "https://graph.facebook.com", // see https://github.com/golang/oauth2/issues/214 + "https://login.microsoftonline.com/", + "https://login.salesforce.com/", + "https://login.windows.net", + "https://oauth.sandbox.trainingpeaks.com/", + "https://oauth.trainingpeaks.com/", + "https://oauth.vk.com/", + "https://openapi.baidu.com/", + "https://slack.com/", + "https://test-sandbox.auth.corp.google.com", + "https://test.salesforce.com/", + "https://user.gini.net/", + "https://www.douban.com/", + "https://www.googleapis.com/", + "https://www.linkedin.com/", + "https://www.strava.com/oauth/", + "https://www.wunderlist.com/oauth/", + "https://api.patreon.com/", + "https://sandbox.codeswholesale.com/oauth/token", + "https://api.sipgate.com/v1/authorization/oauth", +} + +// brokenAuthHeaderDomains lists broken providers that issue dynamic endpoints. +var brokenAuthHeaderDomains = []string{ + ".force.com", + ".myshopify.com", + ".okta.com", + ".oktapreview.com", +} + +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL) +} + +// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL +// implements the OAuth2 spec correctly +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +// In summary: +// - Reddit only accepts client secret in the Authorization header +// - Dropbox accepts either it in URL param or Auth header, but not both. +// - Google only accepts URL param (not spec compliant?), not Auth header +// - Stripe only accepts client secret in Auth header with Bearer method, not Basic +func providerAuthHeaderWorks(tokenURL string) bool { + for _, s := range brokenAuthHeaderProviders { + if strings.HasPrefix(tokenURL, s) { + // Some sites fail to implement the OAuth2 spec fully. + return false + } + } + + if u, err := url.Parse(tokenURL); err == nil { + for _, s := range brokenAuthHeaderDomains { + if strings.HasSuffix(u.Host, s) { + return false + } + } + } + + // Assume the provider implements the spec properly + // otherwise. We can add more exceptions as they're + // discovered. We will _not_ be adding configurable hooks + // to this package to let users select server bugs. + return true +} + +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values) (*Token, error) { + hc, err := ContextClient(ctx) + if err != nil { + return nil, err + } + bustedAuth := !providerAuthHeaderWorks(tokenURL) + if bustedAuth { + if clientID != "" { + v.Set("client_id", clientID) + } + if clientSecret != "" { + v.Set("client_secret", clientSecret) + } + } + req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if !bustedAuth { + req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret)) + } + r, err := ctxhttp.Do(ctx, hc, req) + if err != nil { + return nil, err + } + defer r.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body) + } + + var token *Token + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + vals, err := url.ParseQuery(string(body)) + if err != nil { + return nil, err + } + token = &Token{ + AccessToken: vals.Get("access_token"), + TokenType: vals.Get("token_type"), + RefreshToken: vals.Get("refresh_token"), + Raw: vals, + } + e := vals.Get("expires_in") + if e == "" { + // TODO(jbd): Facebook's OAuth2 implementation is broken and + // returns expires_in field in expires. Remove the fallback to expires, + // when Facebook fixes their implementation. + e = vals.Get("expires") + } + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + return nil, err + } + token = &Token{ + AccessToken: tj.AccessToken, + TokenType: tj.TokenType, + RefreshToken: tj.RefreshToken, + Expiry: tj.expiry(), + Raw: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Raw) // no error checks for optional fields + } + // Don't overwrite `RefreshToken` with an empty value + // if this was a token refreshing request. + if token.RefreshToken == "" { + token.RefreshToken = v.Get("refresh_token") + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go new file mode 100644 index 00000000000..783bd98c8b4 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -0,0 +1,68 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient ContextKey + +// ContextKey is just an empty struct. It exists so HTTPClient can be +// an immutable public variable with a unique type. It's immutable +// because nobody else can create a ContextKey, being unexported. +type ContextKey struct{} + +// ContextClientFunc is a func which tries to return an *http.Client +// given a Context value. If it returns an error, the search stops +// with that error. If it returns (nil, nil), the search continues +// down the list of registered funcs. +type ContextClientFunc func(context.Context) (*http.Client, error) + +var contextClientFuncs []ContextClientFunc + +func RegisterContextClientFunc(fn ContextClientFunc) { + contextClientFuncs = append(contextClientFuncs, fn) +} + +func ContextClient(ctx context.Context) (*http.Client, error) { + if ctx != nil { + if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { + return hc, nil + } + } + for _, fn := range contextClientFuncs { + c, err := fn(ctx) + if err != nil { + return nil, err + } + if c != nil { + return c, nil + } + } + return http.DefaultClient, nil +} + +func ContextTransport(ctx context.Context) http.RoundTripper { + hc, err := ContextClient(ctx) + // This is a rare error case (somebody using nil on App Engine). + if err != nil { + return ErrorTransport{err} + } + return hc.Transport +} + +// ErrorTransport returns the specified error on RoundTrip. +// This RoundTripper should be used in rare error cases where +// error handling can be postponed to response handling time. +type ErrorTransport struct{ Err error } + +func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) { + return nil, t.Err +} diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go new file mode 100644 index 00000000000..683d2d271a3 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jws/jws.go @@ -0,0 +1,182 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jws provides a partial implementation +// of JSON Web Signature encoding and decoding. +// It exists to support the golang.org/x/oauth2 package. +// +// See RFC 7515. +// +// Deprecated: this package is not intended for public use and might be +// removed in the future. It exists for internal use only. +// Please switch to another JWS package or copy this package into your own +// source tree. +package jws // import "golang.org/x/oauth2/jws" + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +// ClaimSet contains information about the JWT signature including the +// permissions being requested (scopes), the target of the token, the issuer, +// the time the token was issued, and the lifetime of the token. +type ClaimSet struct { + Iss string `json:"iss"` // email address of the client_id of the application making the access token request + Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests + Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). + Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) + Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) + Typ string `json:"typ,omitempty"` // token type (Optional). + + // Email for which the application is requesting delegated access (Optional). + Sub string `json:"sub,omitempty"` + + // The old name of Sub. Client keeps setting Prn to be + // complaint with legacy OAuth 2.0 providers. (Optional) + Prn string `json:"prn,omitempty"` + + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + // This array is marshalled using custom code (see (c *ClaimSet) encode()). + PrivateClaims map[string]interface{} `json:"-"` +} + +func (c *ClaimSet) encode() (string, error) { + // Reverting time back for machines whose time is not perfectly in sync. + // If client machine's time is in the future according + // to Google servers, an access token will not be issued. + now := time.Now().Add(-10 * time.Second) + if c.Iat == 0 { + c.Iat = now.Unix() + } + if c.Exp == 0 { + c.Exp = now.Add(time.Hour).Unix() + } + if c.Exp < c.Iat { + return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) + } + + b, err := json.Marshal(c) + if err != nil { + return "", err + } + + if len(c.PrivateClaims) == 0 { + return base64.RawURLEncoding.EncodeToString(b), nil + } + + // Marshal private claim set and then append it to b. + prv, err := json.Marshal(c.PrivateClaims) + if err != nil { + return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) + } + + // Concatenate public and private claim JSON objects. + if !bytes.HasSuffix(b, []byte{'}'}) { + return "", fmt.Errorf("jws: invalid JSON %s", b) + } + if !bytes.HasPrefix(prv, []byte{'{'}) { + return "", fmt.Errorf("jws: invalid JSON %s", prv) + } + b[len(b)-1] = ',' // Replace closing curly brace with a comma. + b = append(b, prv[1:]...) // Append private claims. + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Header represents the header for the signed JWS payloads. +type Header struct { + // The algorithm used for signature. + Algorithm string `json:"alg"` + + // Represents the token type. + Typ string `json:"typ"` + + // The optional hint of which key is being used. + KeyID string `json:"kid,omitempty"` +} + +func (h *Header) encode() (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Decode decodes a claim set from a JWS payload. +func Decode(payload string) (*ClaimSet, error) { + // decode returned id token to get expiry + s := strings.Split(payload, ".") + if len(s) < 2 { + // TODO(jbd): Provide more context about the error. + return nil, errors.New("jws: invalid token received") + } + decoded, err := base64.RawURLEncoding.DecodeString(s[1]) + if err != nil { + return nil, err + } + c := &ClaimSet{} + err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) + return c, err +} + +// Signer returns a signature for the given data. +type Signer func(data []byte) (sig []byte, err error) + +// EncodeWithSigner encodes a header and claim set with the provided signer. +func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { + head, err := header.encode() + if err != nil { + return "", err + } + cs, err := c.encode() + if err != nil { + return "", err + } + ss := fmt.Sprintf("%s.%s", head, cs) + sig, err := sg([]byte(ss)) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil +} + +// Encode encodes a signed JWS with provided header and claim set. +// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. +func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { + sg := func(data []byte) (sig []byte, err error) { + h := sha256.New() + h.Write(data) + return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + } + return EncodeWithSigner(header, c, sg) +} + +// Verify tests whether the provided JWT token's signature was produced by the private key +// associated with the supplied public key. +func Verify(token string, key *rsa.PublicKey) error { + parts := strings.Split(token, ".") + if len(parts) != 3 { + return errors.New("jws: invalid token received, token must have 3 parts") + } + + signedContent := parts[0] + "." + parts[1] + signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) + if err != nil { + return err + } + + h := sha256.New() + h.Write([]byte(signedContent)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) +} diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go new file mode 100644 index 00000000000..e016db42178 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jwt/jwt.go @@ -0,0 +1,159 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly +// known as "two-legged OAuth 2.0". +// +// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 +package jwt + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +var ( + defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" + defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} +) + +// Config is the configuration for using JWT to fetch tokens, +// commonly known as "two-legged OAuth 2.0". +type Config struct { + // Email is the OAuth client identifier used when communicating with + // the configured OAuth provider. + Email string + + // PrivateKey contains the contents of an RSA private key or the + // contents of a PEM file that contains a private key. The provided + // private key is used to sign JWT payloads. + // PEM containers with a passphrase are not supported. + // Use the following command to convert a PKCS 12 file into a PEM. + // + // $ openssl pkcs12 -in key.p12 -out key.pem -nodes + // + PrivateKey []byte + + // PrivateKeyID contains an optional hint indicating which key is being + // used. + PrivateKeyID string + + // Subject is the optional user to impersonate. + Subject string + + // Scopes optionally specifies a list of requested permission scopes. + Scopes []string + + // TokenURL is the endpoint required to complete the 2-legged JWT flow. + TokenURL string + + // Expires optionally specifies how long the token is valid for. + Expires time.Duration +} + +// TokenSource returns a JWT TokenSource using the configuration +// in c and the HTTP client from the provided context. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) +} + +// Client returns an HTTP client wrapping the context's +// HTTP transport and adding Authorization headers with tokens +// obtained from c. +// +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// jwtSource is a source that always does a signed JWT request for a token. +// It should typically be wrapped with a reuseTokenSource. +type jwtSource struct { + ctx context.Context + conf *Config +} + +func (js jwtSource) Token() (*oauth2.Token, error) { + pk, err := internal.ParseKey(js.conf.PrivateKey) + if err != nil { + return nil, err + } + hc := oauth2.NewClient(js.ctx, nil) + claimSet := &jws.ClaimSet{ + Iss: js.conf.Email, + Scope: strings.Join(js.conf.Scopes, " "), + Aud: js.conf.TokenURL, + } + if subject := js.conf.Subject; subject != "" { + claimSet.Sub = subject + // prn is the old name of sub. Keep setting it + // to be compatible with legacy OAuth 2.0 providers. + claimSet.Prn = subject + } + if t := js.conf.Expires; t > 0 { + claimSet.Exp = time.Now().Add(t).Unix() + } + h := *defaultHeader + h.KeyID = js.conf.PrivateKeyID + payload, err := jws.Encode(&h, claimSet, pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", defaultGrantType) + v.Set("assertion", payload) + resp, err := hc.PostForm(js.conf.TokenURL, v) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body) + } + // tokenRes is the JSON response body. + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + ExpiresIn int64 `json:"expires_in"` // relative seconds from now + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + token := &oauth2.Token{ + AccessToken: tokenRes.AccessToken, + TokenType: tokenRes.TokenType, + } + raw := make(map[string]interface{}) + json.Unmarshal(body, &raw) // no error checks for optional fields + token = token.WithExtra(raw) + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + if v := tokenRes.IDToken; v != "" { + // decode returned id token to get expiry + claimSet, err := jws.Decode(v) + if err != nil { + return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) + } + token.Expiry = time.Unix(claimSet.Exp, 0) + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go new file mode 100644 index 00000000000..4bafe873d0d --- /dev/null +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -0,0 +1,344 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package oauth2 provides support for making +// OAuth2 authorized and authenticated HTTP requests. +// It can additionally grant authorization with Bearer JWT. +package oauth2 // import "golang.org/x/oauth2" + +import ( + "bytes" + "errors" + "net/http" + "net/url" + "strings" + "sync" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// NoContext is the default context you should supply if not using +// your own context.Context (see https://golang.org/x/net/context). +// +// Deprecated: Use context.Background() or context.TODO() instead. +var NoContext = context.TODO() + +// RegisterBrokenAuthHeaderProvider registers an OAuth2 server +// identified by the tokenURL prefix as an OAuth2 implementation +// which doesn't support the HTTP Basic authentication +// scheme to authenticate with the authorization server. +// Once a server is registered, credentials (client_id and client_secret) +// will be passed as query parameters rather than being present +// in the Authorization header. +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + internal.RegisterBrokenAuthHeaderProvider(tokenURL) +} + +// Config describes a typical 3-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +// For the client credentials 2-legged OAuth2 flow, see the clientcredentials +// package (https://golang.org/x/oauth2/clientcredentials). +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // Endpoint contains the resource server's token endpoint + // URLs. These are constants specific to each server and are + // often available via site-specific packages, such as + // google.Endpoint or github.Endpoint. + Endpoint Endpoint + + // RedirectURL is the URL to redirect users going through + // the OAuth flow, after the resource owner's URLs. + RedirectURL string + + // Scope specifies optional requested permissions. + Scopes []string +} + +// A TokenSource is anything that can return a token. +type TokenSource interface { + // Token returns a token or an error. + // Token must be safe for concurrent use by multiple goroutines. + // The returned Token must not be modified. + Token() (*Token, error) +} + +// Endpoint contains the OAuth 2.0 provider's authorization and token +// endpoint URLs. +type Endpoint struct { + AuthURL string + TokenURL string +} + +var ( + // AccessTypeOnline and AccessTypeOffline are options passed + // to the Options.AuthCodeURL method. They modify the + // "access_type" field that gets sent in the URL returned by + // AuthCodeURL. + // + // Online is the default if neither is specified. If your + // application needs to refresh access tokens when the user + // is not present at the browser, then use offline. This will + // result in your application obtaining a refresh token the + // first time your application exchanges an authorization + // code for a user. + AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") + AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") + + // ApprovalForce forces the users to view the consent dialog + // and confirm the permissions request at the URL returned + // from AuthCodeURL, even if they've already done so. + ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") +) + +// An AuthCodeOption is passed to Config.AuthCodeURL. +type AuthCodeOption interface { + setValue(url.Values) +} + +type setParam struct{ k, v string } + +func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } + +// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// to a provider's authorization endpoint. +func SetAuthURLParam(key, value string) AuthCodeOption { + return setParam{key, value} +} + +// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page +// that asks for permissions for the required scopes explicitly. +// +// State is a token to protect the user from CSRF attacks. You must +// always provide a non-zero string and validate that it matches the +// the state query parameter on your redirect callback. +// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// +// Opts may include AccessTypeOnline or AccessTypeOffline, as well +// as ApprovalForce. +func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { + var buf bytes.Buffer + buf.WriteString(c.Endpoint.AuthURL) + v := url.Values{ + "response_type": {"code"}, + "client_id": {c.ClientID}, + "redirect_uri": internal.CondVal(c.RedirectURL), + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + "state": internal.CondVal(state), + } + for _, opt := range opts { + opt.setValue(v) + } + if strings.Contains(c.Endpoint.AuthURL, "?") { + buf.WriteByte('&') + } else { + buf.WriteByte('?') + } + buf.WriteString(v.Encode()) + return buf.String() +} + +// PasswordCredentialsToken converts a resource owner username and password +// pair into a token. +// +// Per the RFC, this grant type should only be used "when there is a high +// degree of trust between the resource owner and the client (e.g., the client +// is part of the device operating system or a highly privileged application), +// and when other authorization grant types are not available." +// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. +// +// The HTTP client to use is derived from the context. +// If nil, http.DefaultClient is used. +func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"password"}, + "username": {username}, + "password": {password}, + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Exchange converts an authorization code into a token. +// +// It is used after a resource provider redirects the user back +// to the Redirect URI (the URL obtained from AuthCodeURL). +// +// The HTTP client to use is derived from the context. +// If a client is not provided via the context, http.DefaultClient is used. +// +// The code will be in the *http.Request.FormValue("code"). Before +// calling Exchange, be sure to validate FormValue("state"). +func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + "redirect_uri": internal.CondVal(c.RedirectURL), + }) +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context, t *Token) *http.Client { + return NewClient(ctx, c.TokenSource(ctx, t)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { + tkr := &tokenRefresher{ + ctx: ctx, + conf: c, + } + if t != nil { + tkr.refreshToken = t.RefreshToken + } + return &reuseTokenSource{ + t: t, + new: tkr, + } +} + +// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// HTTP requests to renew a token using a RefreshToken. +type tokenRefresher struct { + ctx context.Context // used to get HTTP requests + conf *Config + refreshToken string +} + +// WARNING: Token is not safe for concurrent access, as it +// updates the tokenRefresher's refreshToken field. +// Within this package, it is used by reuseTokenSource which +// synchronizes calls to this method with its own mutex. +func (tf *tokenRefresher) Token() (*Token, error) { + if tf.refreshToken == "" { + return nil, errors.New("oauth2: token expired and refresh token is not set") + } + + tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {tf.refreshToken}, + }) + + if err != nil { + return nil, err + } + if tf.refreshToken != tk.RefreshToken { + tf.refreshToken = tk.RefreshToken + } + return tk, err +} + +// reuseTokenSource is a TokenSource that holds a single token in memory +// and validates its expiry before each call to retrieve it with +// Token. If it's expired, it will be auto-refreshed using the +// new TokenSource. +type reuseTokenSource struct { + new TokenSource // called when t is expired. + + mu sync.Mutex // guards t + t *Token +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *reuseTokenSource) Token() (*Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.t.Valid() { + return s.t, nil + } + t, err := s.new.Token() + if err != nil { + return nil, err + } + s.t = t + return t, nil +} + +// StaticTokenSource returns a TokenSource that always returns the same token. +// Because the provided token t is never refreshed, StaticTokenSource is only +// useful for tokens that never expire. +func StaticTokenSource(t *Token) TokenSource { + return staticTokenSource{t} +} + +// staticTokenSource is a TokenSource that always returns the same Token. +type staticTokenSource struct { + t *Token +} + +func (s staticTokenSource) Token() (*Token, error) { + return s.t, nil +} + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient internal.ContextKey + +// NewClient creates an *http.Client from a Context and TokenSource. +// The returned client is not valid beyond the lifetime of the context. +// +// Note that if a custom *http.Client is provided via the Context it +// is used only for token acquisition and is not used to configure the +// *http.Client returned from NewClient. +// +// As a special case, if src is nil, a non-OAuth2 client is returned +// using the provided context. This exists to support related OAuth2 +// packages. +func NewClient(ctx context.Context, src TokenSource) *http.Client { + if src == nil { + c, err := internal.ContextClient(ctx) + if err != nil { + return &http.Client{Transport: internal.ErrorTransport{Err: err}} + } + return c + } + return &http.Client{ + Transport: &Transport{ + Base: internal.ContextTransport(ctx), + Source: ReuseTokenSource(nil, src), + }, + } +} + +// ReuseTokenSource returns a TokenSource which repeatedly returns the +// same token as long as it's valid, starting with t. +// When its cached token is invalid, a new token is obtained from src. +// +// ReuseTokenSource is typically used to reuse tokens from a cache +// (such as a file on disk) between runs of a program, rather than +// obtaining new tokens unnecessarily. +// +// The initial token t may be nil, in which case the TokenSource is +// wrapped in a caching version if it isn't one already. This also +// means it's always safe to wrap ReuseTokenSource around any other +// TokenSource without adverse effects. +func ReuseTokenSource(t *Token, src TokenSource) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly. + return rt + } + src = rt.new + } + return &reuseTokenSource{ + t: t, + new: src, + } +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go new file mode 100644 index 00000000000..7a3167f15b0 --- /dev/null +++ b/vendor/golang.org/x/oauth2/token.go @@ -0,0 +1,158 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// expiryDelta determines how earlier a token should be considered +// expired than its actual expiration time. It is used to avoid late +// expirations due to client-server time mismatches. +const expiryDelta = 10 * time.Second + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// Most users of this package should not access fields of Token +// directly. They're exported mostly for use by related packages +// implementing derivative OAuth2 flows. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string `json:"access_token"` + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string `json:"token_type,omitempty"` + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string `json:"refresh_token,omitempty"` + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time `json:"expiry,omitempty"` + + // raw optionally contains extra metadata from the server + // when updating a token. + raw interface{} +} + +// Type returns t.TokenType if non-empty, else "Bearer". +func (t *Token) Type() string { + if strings.EqualFold(t.TokenType, "bearer") { + return "Bearer" + } + if strings.EqualFold(t.TokenType, "mac") { + return "MAC" + } + if strings.EqualFold(t.TokenType, "basic") { + return "Basic" + } + if t.TokenType != "" { + return t.TokenType + } + return "Bearer" +} + +// SetAuthHeader sets the Authorization header to r using the access +// token in t. +// +// This method is unnecessary when using Transport or an HTTP Client +// returned by this package. +func (t *Token) SetAuthHeader(r *http.Request) { + r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) +} + +// WithExtra returns a new Token that's a clone of t, but using the +// provided raw extra map. This is only intended for use by packages +// implementing derivative OAuth2 flows. +func (t *Token) WithExtra(extra interface{}) *Token { + t2 := new(Token) + *t2 = *t + t2.raw = extra + return t2 +} + +// Extra returns an extra field. +// Extra fields are key-value pairs returned by the server as a +// part of the token retrieval response. +func (t *Token) Extra(key string) interface{} { + if raw, ok := t.raw.(map[string]interface{}); ok { + return raw[key] + } + + vals, ok := t.raw.(url.Values) + if !ok { + return nil + } + + v := vals.Get(key) + switch s := strings.TrimSpace(v); strings.Count(s, ".") { + case 0: // Contains no "."; try to parse as int + if i, err := strconv.ParseInt(s, 10, 64); err == nil { + return i + } + case 1: // Contains a single "."; try to parse as float + if f, err := strconv.ParseFloat(s, 64); err == nil { + return f + } + } + + return v +} + +// expired reports whether the token is expired. +// t must be non-nil. +func (t *Token) expired() bool { + if t.Expiry.IsZero() { + return false + } + return t.Expiry.Add(-expiryDelta).Before(time.Now()) +} + +// Valid reports whether t is non-nil, has an AccessToken, and is not expired. +func (t *Token) Valid() bool { + return t != nil && t.AccessToken != "" && !t.expired() +} + +// tokenFromInternal maps an *internal.Token struct into +// a *Token struct. +func tokenFromInternal(t *internal.Token) *Token { + if t == nil { + return nil + } + return &Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry, + raw: t.Raw, + } +} + +// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. +// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along +// with an error.. +func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) + if err != nil { + return nil, err + } + return tokenFromInternal(tk), nil +} diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go new file mode 100644 index 00000000000..92ac7e2531f --- /dev/null +++ b/vendor/golang.org/x/oauth2/transport.go @@ -0,0 +1,132 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "errors" + "io" + "net/http" + "sync" +) + +// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, +// wrapping a base RoundTripper and adding an Authorization header +// with a token from the supplied Sources. +// +// Transport is a low-level mechanism. Most code will use the +// higher-level Config.Client method instead. +type Transport struct { + // Source supplies the token to add to outgoing requests' + // Authorization headers. + Source TokenSource + + // Base is the base RoundTripper used to make HTTP requests. + // If nil, http.DefaultTransport is used. + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.Source == nil { + return nil, errors.New("oauth2: Transport's Source is nil") + } + token, err := t.Source.Token() + if err != nil { + return nil, err + } + + req2 := cloneRequest(req) // per RoundTripper contract + token.SetAuthHeader(req2) + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *Transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/vendor/golang.org/x/text/LICENSE b/vendor/golang.org/x/text/LICENSE new file mode 100644 index 00000000000..6a66aea5eaf --- /dev/null +++ b/vendor/golang.org/x/text/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/text/PATENTS b/vendor/golang.org/x/text/PATENTS new file mode 100644 index 00000000000..733099041f8 --- /dev/null +++ b/vendor/golang.org/x/text/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule.go b/vendor/golang.org/x/text/secure/bidirule/bidirule.go new file mode 100644 index 00000000000..0e6b85824b7 --- /dev/null +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule.go @@ -0,0 +1,340 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bidirule implements the Bidi Rule defined by RFC 5893. +// +// This package is under development. The API may change without notice and +// without preserving backward compatibility. +package bidirule + +import ( + "errors" + "unicode/utf8" + + "golang.org/x/text/transform" + "golang.org/x/text/unicode/bidi" +) + +// This file contains an implementation of RFC 5893: Right-to-Left Scripts for +// Internationalized Domain Names for Applications (IDNA) +// +// A label is an individual component of a domain name. Labels are usually +// shown separated by dots; for example, the domain name "www.example.com" is +// composed of three labels: "www", "example", and "com". +// +// An RTL label is a label that contains at least one character of class R, AL, +// or AN. An LTR label is any label that is not an RTL label. +// +// A "Bidi domain name" is a domain name that contains at least one RTL label. +// +// The following guarantees can be made based on the above: +// +// o In a domain name consisting of only labels that satisfy the rule, +// the requirements of Section 3 are satisfied. Note that even LTR +// labels and pure ASCII labels have to be tested. +// +// o In a domain name consisting of only LDH labels (as defined in the +// Definitions document [RFC5890]) and labels that satisfy the rule, +// the requirements of Section 3 are satisfied as long as a label +// that starts with an ASCII digit does not come after a +// right-to-left label. +// +// No guarantee is given for other combinations. + +// ErrInvalid indicates a label is invalid according to the Bidi Rule. +var ErrInvalid = errors.New("bidirule: failed Bidi Rule") + +type ruleState uint8 + +const ( + ruleInitial ruleState = iota + ruleLTR + ruleLTRFinal + ruleRTL + ruleRTLFinal + ruleInvalid +) + +type ruleTransition struct { + next ruleState + mask uint16 +} + +var transitions = [...][2]ruleTransition{ + // [2.1] The first character must be a character with Bidi property L, R, or + // AL. If it has the R or AL property, it is an RTL label; if it has the L + // property, it is an LTR label. + ruleInitial: { + {ruleLTRFinal, 1 << bidi.L}, + {ruleRTLFinal, 1< 0 bytes returned + // before considering the error". + if r.src0 != r.src1 || r.err != nil { + r.dst0 = 0 + r.dst1, n, err = r.t.Transform(r.dst, r.src[r.src0:r.src1], r.err == io.EOF) + r.src0 += n + + switch { + case err == nil: + if r.src0 != r.src1 { + r.err = errInconsistentByteCount + } + // The Transform call was successful; we are complete if we + // cannot read more bytes into src. + r.transformComplete = r.err != nil + continue + case err == ErrShortDst && (r.dst1 != 0 || n != 0): + // Make room in dst by copying out, and try again. + continue + case err == ErrShortSrc && r.src1-r.src0 != len(r.src) && r.err == nil: + // Read more bytes into src via the code below, and try again. + default: + r.transformComplete = true + // The reader error (r.err) takes precedence over the + // transformer error (err) unless r.err is nil or io.EOF. + if r.err == nil || r.err == io.EOF { + r.err = err + } + continue + } + } + + // Move any untransformed source bytes to the start of the buffer + // and read more bytes. + if r.src0 != 0 { + r.src0, r.src1 = 0, copy(r.src, r.src[r.src0:r.src1]) + } + n, r.err = r.r.Read(r.src[r.src1:]) + r.src1 += n + } +} + +// TODO: implement ReadByte (and ReadRune??). + +// Writer wraps another io.Writer by transforming the bytes read. +// The user needs to call Close to flush unwritten bytes that may +// be buffered. +type Writer struct { + w io.Writer + t Transformer + dst []byte + + // src[:n] contains bytes that have not yet passed through t. + src []byte + n int +} + +// NewWriter returns a new Writer that wraps w by transforming the bytes written +// via t. It calls Reset on t. +func NewWriter(w io.Writer, t Transformer) *Writer { + t.Reset() + return &Writer{ + w: w, + t: t, + dst: make([]byte, defaultBufSize), + src: make([]byte, defaultBufSize), + } +} + +// Write implements the io.Writer interface. If there are not enough +// bytes available to complete a Transform, the bytes will be buffered +// for the next write. Call Close to convert the remaining bytes. +func (w *Writer) Write(data []byte) (n int, err error) { + src := data + if w.n > 0 { + // Append bytes from data to the last remainder. + // TODO: limit the amount copied on first try. + n = copy(w.src[w.n:], data) + w.n += n + src = w.src[:w.n] + } + for { + nDst, nSrc, err := w.t.Transform(w.dst, src, false) + if _, werr := w.w.Write(w.dst[:nDst]); werr != nil { + return n, werr + } + src = src[nSrc:] + if w.n == 0 { + n += nSrc + } else if len(src) <= n { + // Enough bytes from w.src have been consumed. We make src point + // to data instead to reduce the copying. + w.n = 0 + n -= len(src) + src = data[n:] + if n < len(data) && (err == nil || err == ErrShortSrc) { + continue + } + } + switch err { + case ErrShortDst: + // This error is okay as long as we are making progress. + if nDst > 0 || nSrc > 0 { + continue + } + case ErrShortSrc: + if len(src) < len(w.src) { + m := copy(w.src, src) + // If w.n > 0, bytes from data were already copied to w.src and n + // was already set to the number of bytes consumed. + if w.n == 0 { + n += m + } + w.n = m + err = nil + } else if nDst > 0 || nSrc > 0 { + // Not enough buffer to store the remainder. Keep processing as + // long as there is progress. Without this case, transforms that + // require a lookahead larger than the buffer may result in an + // error. This is not something one may expect to be common in + // practice, but it may occur when buffers are set to small + // sizes during testing. + continue + } + case nil: + if w.n > 0 { + err = errInconsistentByteCount + } + } + return n, err + } +} + +// Close implements the io.Closer interface. +func (w *Writer) Close() error { + src := w.src[:w.n] + for { + nDst, nSrc, err := w.t.Transform(w.dst, src, true) + if _, werr := w.w.Write(w.dst[:nDst]); werr != nil { + return werr + } + if err != ErrShortDst { + return err + } + src = src[nSrc:] + } +} + +type nop struct{ NopResetter } + +func (nop) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + n := copy(dst, src) + if n < len(src) { + err = ErrShortDst + } + return n, n, err +} + +func (nop) Span(src []byte, atEOF bool) (n int, err error) { + return len(src), nil +} + +type discard struct{ NopResetter } + +func (discard) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + return 0, len(src), nil +} + +var ( + // Discard is a Transformer for which all Transform calls succeed + // by consuming all bytes and writing nothing. + Discard Transformer = discard{} + + // Nop is a SpanningTransformer that copies src to dst. + Nop SpanningTransformer = nop{} +) + +// chain is a sequence of links. A chain with N Transformers has N+1 links and +// N+1 buffers. Of those N+1 buffers, the first and last are the src and dst +// buffers given to chain.Transform and the middle N-1 buffers are intermediate +// buffers owned by the chain. The i'th link transforms bytes from the i'th +// buffer chain.link[i].b at read offset chain.link[i].p to the i+1'th buffer +// chain.link[i+1].b at write offset chain.link[i+1].n, for i in [0, N). +type chain struct { + link []link + err error + // errStart is the index at which the error occurred plus 1. Processing + // errStart at this level at the next call to Transform. As long as + // errStart > 0, chain will not consume any more source bytes. + errStart int +} + +func (c *chain) fatalError(errIndex int, err error) { + if i := errIndex + 1; i > c.errStart { + c.errStart = i + c.err = err + } +} + +type link struct { + t Transformer + // b[p:n] holds the bytes to be transformed by t. + b []byte + p int + n int +} + +func (l *link) src() []byte { + return l.b[l.p:l.n] +} + +func (l *link) dst() []byte { + return l.b[l.n:] +} + +// Chain returns a Transformer that applies t in sequence. +func Chain(t ...Transformer) Transformer { + if len(t) == 0 { + return nop{} + } + c := &chain{link: make([]link, len(t)+1)} + for i, tt := range t { + c.link[i].t = tt + } + // Allocate intermediate buffers. + b := make([][defaultBufSize]byte, len(t)-1) + for i := range b { + c.link[i+1].b = b[i][:] + } + return c +} + +// Reset resets the state of Chain. It calls Reset on all the Transformers. +func (c *chain) Reset() { + for i, l := range c.link { + if l.t != nil { + l.t.Reset() + } + c.link[i].p, c.link[i].n = 0, 0 + } +} + +// TODO: make chain use Span (is going to be fun to implement!) + +// Transform applies the transformers of c in sequence. +func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + // Set up src and dst in the chain. + srcL := &c.link[0] + dstL := &c.link[len(c.link)-1] + srcL.b, srcL.p, srcL.n = src, 0, len(src) + dstL.b, dstL.n = dst, 0 + var lastFull, needProgress bool // for detecting progress + + // i is the index of the next Transformer to apply, for i in [low, high]. + // low is the lowest index for which c.link[low] may still produce bytes. + // high is the highest index for which c.link[high] has a Transformer. + // The error returned by Transform determines whether to increase or + // decrease i. We try to completely fill a buffer before converting it. + for low, i, high := c.errStart, c.errStart, len(c.link)-2; low <= i && i <= high; { + in, out := &c.link[i], &c.link[i+1] + nDst, nSrc, err0 := in.t.Transform(out.dst(), in.src(), atEOF && low == i) + out.n += nDst + in.p += nSrc + if i > 0 && in.p == in.n { + in.p, in.n = 0, 0 + } + needProgress, lastFull = lastFull, false + switch err0 { + case ErrShortDst: + // Process the destination buffer next. Return if we are already + // at the high index. + if i == high { + return dstL.n, srcL.p, ErrShortDst + } + if out.n != 0 { + i++ + // If the Transformer at the next index is not able to process any + // source bytes there is nothing that can be done to make progress + // and the bytes will remain unprocessed. lastFull is used to + // detect this and break out of the loop with a fatal error. + lastFull = true + continue + } + // The destination buffer was too small, but is completely empty. + // Return a fatal error as this transformation can never complete. + c.fatalError(i, errShortInternal) + case ErrShortSrc: + if i == 0 { + // Save ErrShortSrc in err. All other errors take precedence. + err = ErrShortSrc + break + } + // Source bytes were depleted before filling up the destination buffer. + // Verify we made some progress, move the remaining bytes to the errStart + // and try to get more source bytes. + if needProgress && nSrc == 0 || in.n-in.p == len(in.b) { + // There were not enough source bytes to proceed while the source + // buffer cannot hold any more bytes. Return a fatal error as this + // transformation can never complete. + c.fatalError(i, errShortInternal) + break + } + // in.b is an internal buffer and we can make progress. + in.p, in.n = 0, copy(in.b, in.src()) + fallthrough + case nil: + // if i == low, we have depleted the bytes at index i or any lower levels. + // In that case we increase low and i. In all other cases we decrease i to + // fetch more bytes before proceeding to the next index. + if i > low { + i-- + continue + } + default: + c.fatalError(i, err0) + } + // Exhausted level low or fatal error: increase low and continue + // to process the bytes accepted so far. + i++ + low = i + } + + // If c.errStart > 0, this means we found a fatal error. We will clear + // all upstream buffers. At this point, no more progress can be made + // downstream, as Transform would have bailed while handling ErrShortDst. + if c.errStart > 0 { + for i := 1; i < c.errStart; i++ { + c.link[i].p, c.link[i].n = 0, 0 + } + err, c.errStart, c.err = c.err, 0, nil + } + return dstL.n, srcL.p, err +} + +// Deprecated: use runes.Remove instead. +func RemoveFunc(f func(r rune) bool) Transformer { + return removeF(f) +} + +type removeF func(r rune) bool + +func (removeF) Reset() {} + +// Transform implements the Transformer interface. +func (t removeF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for r, sz := rune(0), 0; len(src) > 0; src = src[sz:] { + + if r = rune(src[0]); r < utf8.RuneSelf { + sz = 1 + } else { + r, sz = utf8.DecodeRune(src) + + if sz == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src) { + err = ErrShortSrc + break + } + // We replace illegal bytes with RuneError. Not doing so might + // otherwise turn a sequence of invalid UTF-8 into valid UTF-8. + // The resulting byte sequence may subsequently contain runes + // for which t(r) is true that were passed unnoticed. + if !t(r) { + if nDst+3 > len(dst) { + err = ErrShortDst + break + } + nDst += copy(dst[nDst:], "\uFFFD") + } + nSrc++ + continue + } + } + + if !t(r) { + if nDst+sz > len(dst) { + err = ErrShortDst + break + } + nDst += copy(dst[nDst:], src[:sz]) + } + nSrc += sz + } + return +} + +// grow returns a new []byte that is longer than b, and copies the first n bytes +// of b to the start of the new slice. +func grow(b []byte, n int) []byte { + m := len(b) + if m <= 32 { + m = 64 + } else if m <= 256 { + m *= 2 + } else { + m += m >> 1 + } + buf := make([]byte, m) + copy(buf, b[:n]) + return buf +} + +const initialBufSize = 128 + +// String returns a string with the result of converting s[:n] using t, where +// n <= len(s). If err == nil, n will be len(s). It calls Reset on t. +func String(t Transformer, s string) (result string, n int, err error) { + t.Reset() + if s == "" { + // Fast path for the common case for empty input. Results in about a + // 86% reduction of running time for BenchmarkStringLowerEmpty. + if _, _, err := t.Transform(nil, nil, true); err == nil { + return "", 0, nil + } + } + + // Allocate only once. Note that both dst and src escape when passed to + // Transform. + buf := [2 * initialBufSize]byte{} + dst := buf[:initialBufSize:initialBufSize] + src := buf[initialBufSize : 2*initialBufSize] + + // The input string s is transformed in multiple chunks (starting with a + // chunk size of initialBufSize). nDst and nSrc are per-chunk (or + // per-Transform-call) indexes, pDst and pSrc are overall indexes. + nDst, nSrc := 0, 0 + pDst, pSrc := 0, 0 + + // pPrefix is the length of a common prefix: the first pPrefix bytes of the + // result will equal the first pPrefix bytes of s. It is not guaranteed to + // be the largest such value, but if pPrefix, len(result) and len(s) are + // all equal after the final transform (i.e. calling Transform with atEOF + // being true returned nil error) then we don't need to allocate a new + // result string. + pPrefix := 0 + for { + // Invariant: pDst == pPrefix && pSrc == pPrefix. + + n := copy(src, s[pSrc:]) + nDst, nSrc, err = t.Transform(dst, src[:n], pSrc+n == len(s)) + pDst += nDst + pSrc += nSrc + + // TODO: let transformers implement an optional Spanner interface, akin + // to norm's QuickSpan. This would even allow us to avoid any allocation. + if !bytes.Equal(dst[:nDst], src[:nSrc]) { + break + } + pPrefix = pSrc + if err == ErrShortDst { + // A buffer can only be short if a transformer modifies its input. + break + } else if err == ErrShortSrc { + if nSrc == 0 { + // No progress was made. + break + } + // Equal so far and !atEOF, so continue checking. + } else if err != nil || pPrefix == len(s) { + return string(s[:pPrefix]), pPrefix, err + } + } + // Post-condition: pDst == pPrefix + nDst && pSrc == pPrefix + nSrc. + + // We have transformed the first pSrc bytes of the input s to become pDst + // transformed bytes. Those transformed bytes are discontiguous: the first + // pPrefix of them equal s[:pPrefix] and the last nDst of them equal + // dst[:nDst]. We copy them around, into a new dst buffer if necessary, so + // that they become one contiguous slice: dst[:pDst]. + if pPrefix != 0 { + newDst := dst + if pDst > len(newDst) { + newDst = make([]byte, len(s)+nDst-nSrc) + } + copy(newDst[pPrefix:pDst], dst[:nDst]) + copy(newDst[:pPrefix], s[:pPrefix]) + dst = newDst + } + + // Prevent duplicate Transform calls with atEOF being true at the end of + // the input. Also return if we have an unrecoverable error. + if (err == nil && pSrc == len(s)) || + (err != nil && err != ErrShortDst && err != ErrShortSrc) { + return string(dst[:pDst]), pSrc, err + } + + // Transform the remaining input, growing dst and src buffers as necessary. + for { + n := copy(src, s[pSrc:]) + nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], pSrc+n == len(s)) + pDst += nDst + pSrc += nSrc + + // If we got ErrShortDst or ErrShortSrc, do not grow as long as we can + // make progress. This may avoid excessive allocations. + if err == ErrShortDst { + if nDst == 0 { + dst = grow(dst, pDst) + } + } else if err == ErrShortSrc { + if nSrc == 0 { + src = grow(src, 0) + } + } else if err != nil || pSrc == len(s) { + return string(dst[:pDst]), pSrc, err + } + } +} + +// Bytes returns a new byte slice with the result of converting b[:n] using t, +// where n <= len(b). If err == nil, n will be len(b). It calls Reset on t. +func Bytes(t Transformer, b []byte) (result []byte, n int, err error) { + return doAppend(t, 0, make([]byte, len(b)), b) +} + +// Append appends the result of converting src[:n] using t to dst, where +// n <= len(src), If err == nil, n will be len(src). It calls Reset on t. +func Append(t Transformer, dst, src []byte) (result []byte, n int, err error) { + if len(dst) == cap(dst) { + n := len(src) + len(dst) // It is okay for this to be 0. + b := make([]byte, n) + dst = b[:copy(b, dst)] + } + return doAppend(t, len(dst), dst[:cap(dst)], src) +} + +func doAppend(t Transformer, pDst int, dst, src []byte) (result []byte, n int, err error) { + t.Reset() + pSrc := 0 + for { + nDst, nSrc, err := t.Transform(dst[pDst:], src[pSrc:], true) + pDst += nDst + pSrc += nSrc + if err != ErrShortDst { + return dst[:pDst], pSrc, err + } + + // Grow the destination buffer, but do not grow as long as we can make + // progress. This may avoid excessive allocations. + if nDst == 0 { + dst = grow(dst, pDst) + } + } +} diff --git a/vendor/golang.org/x/text/unicode/bidi/bidi.go b/vendor/golang.org/x/text/unicode/bidi/bidi.go new file mode 100644 index 00000000000..3fc4a62521a --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/bidi.go @@ -0,0 +1,198 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go gen_trieval.go gen_ranges.go + +// Package bidi contains functionality for bidirectional text support. +// +// See http://www.unicode.org/reports/tr9. +// +// NOTE: UNDER CONSTRUCTION. This API may change in backwards incompatible ways +// and without notice. +package bidi // import "golang.org/x/text/unicode/bidi" + +// TODO: +// The following functionality would not be hard to implement, but hinges on +// the definition of a Segmenter interface. For now this is up to the user. +// - Iterate over paragraphs +// - Segmenter to iterate over runs directly from a given text. +// Also: +// - Transformer for reordering? +// - Transformer (validator, really) for Bidi Rule. + +// This API tries to avoid dealing with embedding levels for now. Under the hood +// these will be computed, but the question is to which extent the user should +// know they exist. We should at some point allow the user to specify an +// embedding hierarchy, though. + +// A Direction indicates the overall flow of text. +type Direction int + +const ( + // LeftToRight indicates the text contains no right-to-left characters and + // that either there are some left-to-right characters or the option + // DefaultDirection(LeftToRight) was passed. + LeftToRight Direction = iota + + // RightToLeft indicates the text contains no left-to-right characters and + // that either there are some right-to-left characters or the option + // DefaultDirection(RightToLeft) was passed. + RightToLeft + + // Mixed indicates text contains both left-to-right and right-to-left + // characters. + Mixed + + // Neutral means that text contains no left-to-right and right-to-left + // characters and that no default direction has been set. + Neutral +) + +type options struct{} + +// An Option is an option for Bidi processing. +type Option func(*options) + +// ICU allows the user to define embedding levels. This may be used, for example, +// to use hierarchical structure of markup languages to define embeddings. +// The following option may be a way to expose this functionality in this API. +// // LevelFunc sets a function that associates nesting levels with the given text. +// // The levels function will be called with monotonically increasing values for p. +// func LevelFunc(levels func(p int) int) Option { +// panic("unimplemented") +// } + +// DefaultDirection sets the default direction for a Paragraph. The direction is +// overridden if the text contains directional characters. +func DefaultDirection(d Direction) Option { + panic("unimplemented") +} + +// A Paragraph holds a single Paragraph for Bidi processing. +type Paragraph struct { + // buffers +} + +// SetBytes configures p for the given paragraph text. It replaces text +// previously set by SetBytes or SetString. If b contains a paragraph separator +// it will only process the first paragraph and report the number of bytes +// consumed from b including this separator. Error may be non-nil if options are +// given. +func (p *Paragraph) SetBytes(b []byte, opts ...Option) (n int, err error) { + panic("unimplemented") +} + +// SetString configures p for the given paragraph text. It replaces text +// previously set by SetBytes or SetString. If b contains a paragraph separator +// it will only process the first paragraph and report the number of bytes +// consumed from b including this separator. Error may be non-nil if options are +// given. +func (p *Paragraph) SetString(s string, opts ...Option) (n int, err error) { + panic("unimplemented") +} + +// IsLeftToRight reports whether the principle direction of rendering for this +// paragraphs is left-to-right. If this returns false, the principle direction +// of rendering is right-to-left. +func (p *Paragraph) IsLeftToRight() bool { + panic("unimplemented") +} + +// Direction returns the direction of the text of this paragraph. +// +// The direction may be LeftToRight, RightToLeft, Mixed, or Neutral. +func (p *Paragraph) Direction() Direction { + panic("unimplemented") +} + +// RunAt reports the Run at the given position of the input text. +// +// This method can be used for computing line breaks on paragraphs. +func (p *Paragraph) RunAt(pos int) Run { + panic("unimplemented") +} + +// Order computes the visual ordering of all the runs in a Paragraph. +func (p *Paragraph) Order() (Ordering, error) { + panic("unimplemented") +} + +// Line computes the visual ordering of runs for a single line starting and +// ending at the given positions in the original text. +func (p *Paragraph) Line(start, end int) (Ordering, error) { + panic("unimplemented") +} + +// An Ordering holds the computed visual order of runs of a Paragraph. Calling +// SetBytes or SetString on the originating Paragraph invalidates an Ordering. +// The methods of an Ordering should only be called by one goroutine at a time. +type Ordering struct{} + +// Direction reports the directionality of the runs. +// +// The direction may be LeftToRight, RightToLeft, Mixed, or Neutral. +func (o *Ordering) Direction() Direction { + panic("unimplemented") +} + +// NumRuns returns the number of runs. +func (o *Ordering) NumRuns() int { + panic("unimplemented") +} + +// Run returns the ith run within the ordering. +func (o *Ordering) Run(i int) Run { + panic("unimplemented") +} + +// TODO: perhaps with options. +// // Reorder creates a reader that reads the runes in visual order per character. +// // Modifiers remain after the runes they modify. +// func (l *Runs) Reorder() io.Reader { +// panic("unimplemented") +// } + +// A Run is a continuous sequence of characters of a single direction. +type Run struct { +} + +// String returns the text of the run in its original order. +func (r *Run) String() string { + panic("unimplemented") +} + +// Bytes returns the text of the run in its original order. +func (r *Run) Bytes() []byte { + panic("unimplemented") +} + +// TODO: methods for +// - Display order +// - headers and footers +// - bracket replacement. + +// Direction reports the direction of the run. +func (r *Run) Direction() Direction { + panic("unimplemented") +} + +// Position of the Run within the text passed to SetBytes or SetString of the +// originating Paragraph value. +func (r *Run) Pos() (start, end int) { + panic("unimplemented") +} + +// AppendReverse reverses the order of characters of in, appends them to out, +// and returns the result. Modifiers will still follow the runes they modify. +// Brackets are replaced with their counterparts. +func AppendReverse(out, in []byte) []byte { + panic("unimplemented") +} + +// ReverseString reverses the order of characters in s and returns a new string. +// Modifiers will still follow the runes they modify. Brackets are replaced with +// their counterparts. +func ReverseString(s string) string { + panic("unimplemented") +} diff --git a/vendor/golang.org/x/text/unicode/bidi/bracket.go b/vendor/golang.org/x/text/unicode/bidi/bracket.go new file mode 100644 index 00000000000..601e259203e --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/bracket.go @@ -0,0 +1,335 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bidi + +import ( + "container/list" + "fmt" + "sort" +) + +// This file contains a port of the reference implementation of the +// Bidi Parentheses Algorithm: +// http://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/BidiPBAReference.java +// +// The implementation in this file covers definitions BD14-BD16 and rule N0 +// of UAX#9. +// +// Some preprocessing is done for each rune before data is passed to this +// algorithm: +// - opening and closing brackets are identified +// - a bracket pair type, like '(' and ')' is assigned a unique identifier that +// is identical for the opening and closing bracket. It is left to do these +// mappings. +// - The BPA algorithm requires that bracket characters that are canonical +// equivalents of each other be able to be substituted for each other. +// It is the responsibility of the caller to do this canonicalization. +// +// In implementing BD16, this implementation departs slightly from the "logical" +// algorithm defined in UAX#9. In particular, the stack referenced there +// supports operations that go beyond a "basic" stack. An equivalent +// implementation based on a linked list is used here. + +// Bidi_Paired_Bracket_Type +// BD14. An opening paired bracket is a character whose +// Bidi_Paired_Bracket_Type property value is Open. +// +// BD15. A closing paired bracket is a character whose +// Bidi_Paired_Bracket_Type property value is Close. +type bracketType byte + +const ( + bpNone bracketType = iota + bpOpen + bpClose +) + +// bracketPair holds a pair of index values for opening and closing bracket +// location of a bracket pair. +type bracketPair struct { + opener int + closer int +} + +func (b *bracketPair) String() string { + return fmt.Sprintf("(%v, %v)", b.opener, b.closer) +} + +// bracketPairs is a slice of bracketPairs with a sort.Interface implementation. +type bracketPairs []bracketPair + +func (b bracketPairs) Len() int { return len(b) } +func (b bracketPairs) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b bracketPairs) Less(i, j int) bool { return b[i].opener < b[j].opener } + +// resolvePairedBrackets runs the paired bracket part of the UBA algorithm. +// +// For each rune, it takes the indexes into the original string, the class the +// bracket type (in pairTypes) and the bracket identifier (pairValues). It also +// takes the direction type for the start-of-sentence and the embedding level. +// +// The identifiers for bracket types are the rune of the canonicalized opening +// bracket for brackets (open or close) or 0 for runes that are not brackets. +func resolvePairedBrackets(s *isolatingRunSequence) { + p := bracketPairer{ + sos: s.sos, + openers: list.New(), + codesIsolatedRun: s.types, + indexes: s.indexes, + } + dirEmbed := L + if s.level&1 != 0 { + dirEmbed = R + } + p.locateBrackets(s.p.pairTypes, s.p.pairValues) + p.resolveBrackets(dirEmbed, s.p.initialTypes) +} + +type bracketPairer struct { + sos Class // direction corresponding to start of sequence + + // The following is a restatement of BD 16 using non-algorithmic language. + // + // A bracket pair is a pair of characters consisting of an opening + // paired bracket and a closing paired bracket such that the + // Bidi_Paired_Bracket property value of the former equals the latter, + // subject to the following constraints. + // - both characters of a pair occur in the same isolating run sequence + // - the closing character of a pair follows the opening character + // - any bracket character can belong at most to one pair, the earliest possible one + // - any bracket character not part of a pair is treated like an ordinary character + // - pairs may nest properly, but their spans may not overlap otherwise + + // Bracket characters with canonical decompositions are supposed to be + // treated as if they had been normalized, to allow normalized and non- + // normalized text to give the same result. In this implementation that step + // is pushed out to the caller. The caller has to ensure that the pairValue + // slices contain the rune of the opening bracket after normalization for + // any opening or closing bracket. + + openers *list.List // list of positions for opening brackets + + // bracket pair positions sorted by location of opening bracket + pairPositions bracketPairs + + codesIsolatedRun []Class // directional bidi codes for an isolated run + indexes []int // array of index values into the original string + +} + +// matchOpener reports whether characters at given positions form a matching +// bracket pair. +func (p *bracketPairer) matchOpener(pairValues []rune, opener, closer int) bool { + return pairValues[p.indexes[opener]] == pairValues[p.indexes[closer]] +} + +const maxPairingDepth = 63 + +// locateBrackets locates matching bracket pairs according to BD16. +// +// This implementation uses a linked list instead of a stack, because, while +// elements are added at the front (like a push) they are not generally removed +// in atomic 'pop' operations, reducing the benefit of the stack archetype. +func (p *bracketPairer) locateBrackets(pairTypes []bracketType, pairValues []rune) { + // traverse the run + // do that explicitly (not in a for-each) so we can record position + for i, index := range p.indexes { + + // look at the bracket type for each character + if pairTypes[index] == bpNone || p.codesIsolatedRun[i] != ON { + // continue scanning + continue + } + switch pairTypes[index] { + case bpOpen: + // check if maximum pairing depth reached + if p.openers.Len() == maxPairingDepth { + p.openers.Init() + return + } + // remember opener location, most recent first + p.openers.PushFront(i) + + case bpClose: + // see if there is a match + count := 0 + for elem := p.openers.Front(); elem != nil; elem = elem.Next() { + count++ + opener := elem.Value.(int) + if p.matchOpener(pairValues, opener, i) { + // if the opener matches, add nested pair to the ordered list + p.pairPositions = append(p.pairPositions, bracketPair{opener, i}) + // remove up to and including matched opener + for ; count > 0; count-- { + p.openers.Remove(p.openers.Front()) + } + break + } + } + sort.Sort(p.pairPositions) + // if we get here, the closing bracket matched no openers + // and gets ignored + } + } +} + +// Bracket pairs within an isolating run sequence are processed as units so +// that both the opening and the closing paired bracket in a pair resolve to +// the same direction. +// +// N0. Process bracket pairs in an isolating run sequence sequentially in +// the logical order of the text positions of the opening paired brackets +// using the logic given below. Within this scope, bidirectional types EN +// and AN are treated as R. +// +// Identify the bracket pairs in the current isolating run sequence +// according to BD16. For each bracket-pair element in the list of pairs of +// text positions: +// +// a Inspect the bidirectional types of the characters enclosed within the +// bracket pair. +// +// b If any strong type (either L or R) matching the embedding direction is +// found, set the type for both brackets in the pair to match the embedding +// direction. +// +// o [ e ] o -> o e e e o +// +// o [ o e ] -> o e o e e +// +// o [ NI e ] -> o e NI e e +// +// c Otherwise, if a strong type (opposite the embedding direction) is +// found, test for adjacent strong types as follows: 1 First, check +// backwards before the opening paired bracket until the first strong type +// (L, R, or sos) is found. If that first preceding strong type is opposite +// the embedding direction, then set the type for both brackets in the pair +// to that type. 2 Otherwise, set the type for both brackets in the pair to +// the embedding direction. +// +// o [ o ] e -> o o o o e +// +// o [ o NI ] o -> o o o NI o o +// +// e [ o ] o -> e e o e o +// +// e [ o ] e -> e e o e e +// +// e ( o [ o ] NI ) e -> e e o o o o NI e e +// +// d Otherwise, do not set the type for the current bracket pair. Note that +// if the enclosed text contains no strong types the paired brackets will +// both resolve to the same level when resolved individually using rules N1 +// and N2. +// +// e ( NI ) o -> e ( NI ) o + +// getStrongTypeN0 maps character's directional code to strong type as required +// by rule N0. +// +// TODO: have separate type for "strong" directionality. +func (p *bracketPairer) getStrongTypeN0(index int) Class { + switch p.codesIsolatedRun[index] { + // in the scope of N0, number types are treated as R + case EN, AN, AL, R: + return R + case L: + return L + default: + return ON + } +} + +// classifyPairContent reports the strong types contained inside a Bracket Pair, +// assuming the given embedding direction. +// +// It returns ON if no strong type is found. If a single strong type is found, +// it returns this this type. Otherwise it returns the embedding direction. +// +// TODO: use separate type for "strong" directionality. +func (p *bracketPairer) classifyPairContent(loc bracketPair, dirEmbed Class) Class { + dirOpposite := ON + for i := loc.opener + 1; i < loc.closer; i++ { + dir := p.getStrongTypeN0(i) + if dir == ON { + continue + } + if dir == dirEmbed { + return dir // type matching embedding direction found + } + dirOpposite = dir + } + // return ON if no strong type found, or class opposite to dirEmbed + return dirOpposite +} + +// classBeforePair determines which strong types are present before a Bracket +// Pair. Return R or L if strong type found, otherwise ON. +func (p *bracketPairer) classBeforePair(loc bracketPair) Class { + for i := loc.opener - 1; i >= 0; i-- { + if dir := p.getStrongTypeN0(i); dir != ON { + return dir + } + } + // no strong types found, return sos + return p.sos +} + +// assignBracketType implements rule N0 for a single bracket pair. +func (p *bracketPairer) assignBracketType(loc bracketPair, dirEmbed Class, initialTypes []Class) { + // rule "N0, a", inspect contents of pair + dirPair := p.classifyPairContent(loc, dirEmbed) + + // dirPair is now L, R, or N (no strong type found) + + // the following logical tests are performed out of order compared to + // the statement of the rules but yield the same results + if dirPair == ON { + return // case "d" - nothing to do + } + + if dirPair != dirEmbed { + // case "c": strong type found, opposite - check before (c.1) + dirPair = p.classBeforePair(loc) + if dirPair == dirEmbed || dirPair == ON { + // no strong opposite type found before - use embedding (c.2) + dirPair = dirEmbed + } + } + // else: case "b", strong type found matching embedding, + // no explicit action needed, as dirPair is already set to embedding + // direction + + // set the bracket types to the type found + p.setBracketsToType(loc, dirPair, initialTypes) +} + +func (p *bracketPairer) setBracketsToType(loc bracketPair, dirPair Class, initialTypes []Class) { + p.codesIsolatedRun[loc.opener] = dirPair + p.codesIsolatedRun[loc.closer] = dirPair + + for i := loc.opener + 1; i < loc.closer; i++ { + index := p.indexes[i] + if initialTypes[index] != NSM { + break + } + p.codesIsolatedRun[i] = dirPair + } + + for i := loc.closer + 1; i < len(p.indexes); i++ { + index := p.indexes[i] + if initialTypes[index] != NSM { + break + } + p.codesIsolatedRun[i] = dirPair + } +} + +// resolveBrackets implements rule N0 for a list of pairs. +func (p *bracketPairer) resolveBrackets(dirEmbed Class, initialTypes []Class) { + for _, loc := range p.pairPositions { + p.assignBracketType(loc, dirEmbed, initialTypes) + } +} diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go new file mode 100644 index 00000000000..d4c1399f0da --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/core.go @@ -0,0 +1,1058 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bidi + +import "log" + +// This implementation is a port based on the reference implementation found at: +// http://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/ +// +// described in Unicode Bidirectional Algorithm (UAX #9). +// +// Input: +// There are two levels of input to the algorithm, since clients may prefer to +// supply some information from out-of-band sources rather than relying on the +// default behavior. +// +// - Bidi class array +// - Bidi class array, with externally supplied base line direction +// +// Output: +// Output is separated into several stages: +// +// - levels array over entire paragraph +// - reordering array over entire paragraph +// - levels array over line +// - reordering array over line +// +// Note that for conformance to the Unicode Bidirectional Algorithm, +// implementations are only required to generate correct reordering and +// character directionality (odd or even levels) over a line. Generating +// identical level arrays over a line is not required. Bidi explicit format +// codes (LRE, RLE, LRO, RLO, PDF) and BN can be assigned arbitrary levels and +// positions as long as the rest of the input is properly reordered. +// +// As the algorithm is defined to operate on a single paragraph at a time, this +// implementation is written to handle single paragraphs. Thus rule P1 is +// presumed by this implementation-- the data provided to the implementation is +// assumed to be a single paragraph, and either contains no 'B' codes, or a +// single 'B' code at the end of the input. 'B' is allowed as input to +// illustrate how the algorithm assigns it a level. +// +// Also note that rules L3 and L4 depend on the rendering engine that uses the +// result of the bidi algorithm. This implementation assumes that the rendering +// engine expects combining marks in visual order (e.g. to the left of their +// base character in RTL runs) and that it adjusts the glyphs used to render +// mirrored characters that are in RTL runs so that they render appropriately. + +// level is the embedding level of a character. Even embedding levels indicate +// left-to-right order and odd levels indicate right-to-left order. The special +// level of -1 is reserved for undefined order. +type level int8 + +const implicitLevel level = -1 + +// in returns if x is equal to any of the values in set. +func (c Class) in(set ...Class) bool { + for _, s := range set { + if c == s { + return true + } + } + return false +} + +// A paragraph contains the state of a paragraph. +type paragraph struct { + initialTypes []Class + + // Arrays of properties needed for paired bracket evaluation in N0 + pairTypes []bracketType // paired Bracket types for paragraph + pairValues []rune // rune for opening bracket or pbOpen and pbClose; 0 for pbNone + + embeddingLevel level // default: = implicitLevel; + + // at the paragraph levels + resultTypes []Class + resultLevels []level + + // Index of matching PDI for isolate initiator characters. For other + // characters, the value of matchingPDI will be set to -1. For isolate + // initiators with no matching PDI, matchingPDI will be set to the length of + // the input string. + matchingPDI []int + + // Index of matching isolate initiator for PDI characters. For other + // characters, and for PDIs with no matching isolate initiator, the value of + // matchingIsolateInitiator will be set to -1. + matchingIsolateInitiator []int +} + +// newParagraph initializes a paragraph. The user needs to supply a few arrays +// corresponding to the preprocessed text input. The types correspond to the +// Unicode BiDi classes for each rune. pairTypes indicates the bracket type for +// each rune. pairValues provides a unique bracket class identifier for each +// rune (suggested is the rune of the open bracket for opening and matching +// close brackets, after normalization). The embedding levels are optional, but +// may be supplied to encode embedding levels of styled text. +// +// TODO: return an error. +func newParagraph(types []Class, pairTypes []bracketType, pairValues []rune, levels level) *paragraph { + validateTypes(types) + validatePbTypes(pairTypes) + validatePbValues(pairValues, pairTypes) + validateParagraphEmbeddingLevel(levels) + + p := ¶graph{ + initialTypes: append([]Class(nil), types...), + embeddingLevel: levels, + + pairTypes: pairTypes, + pairValues: pairValues, + + resultTypes: append([]Class(nil), types...), + } + p.run() + return p +} + +func (p *paragraph) Len() int { return len(p.initialTypes) } + +// The algorithm. Does not include line-based processing (Rules L1, L2). +// These are applied later in the line-based phase of the algorithm. +func (p *paragraph) run() { + p.determineMatchingIsolates() + + // 1) determining the paragraph level + // Rule P1 is the requirement for entering this algorithm. + // Rules P2, P3. + // If no externally supplied paragraph embedding level, use default. + if p.embeddingLevel == implicitLevel { + p.embeddingLevel = p.determineParagraphEmbeddingLevel(0, p.Len()) + } + + // Initialize result levels to paragraph embedding level. + p.resultLevels = make([]level, p.Len()) + setLevels(p.resultLevels, p.embeddingLevel) + + // 2) Explicit levels and directions + // Rules X1-X8. + p.determineExplicitEmbeddingLevels() + + // Rule X9. + // We do not remove the embeddings, the overrides, the PDFs, and the BNs + // from the string explicitly. But they are not copied into isolating run + // sequences when they are created, so they are removed for all + // practical purposes. + + // Rule X10. + // Run remainder of algorithm one isolating run sequence at a time + for _, seq := range p.determineIsolatingRunSequences() { + // 3) resolving weak types + // Rules W1-W7. + seq.resolveWeakTypes() + + // 4a) resolving paired brackets + // Rule N0 + resolvePairedBrackets(seq) + + // 4b) resolving neutral types + // Rules N1-N3. + seq.resolveNeutralTypes() + + // 5) resolving implicit embedding levels + // Rules I1, I2. + seq.resolveImplicitLevels() + + // Apply the computed levels and types + seq.applyLevelsAndTypes() + } + + // Assign appropriate levels to 'hide' LREs, RLEs, LROs, RLOs, PDFs, and + // BNs. This is for convenience, so the resulting level array will have + // a value for every character. + p.assignLevelsToCharactersRemovedByX9() +} + +// determineMatchingIsolates determines the matching PDI for each isolate +// initiator and vice versa. +// +// Definition BD9. +// +// At the end of this function: +// +// - The member variable matchingPDI is set to point to the index of the +// matching PDI character for each isolate initiator character. If there is +// no matching PDI, it is set to the length of the input text. For other +// characters, it is set to -1. +// - The member variable matchingIsolateInitiator is set to point to the +// index of the matching isolate initiator character for each PDI character. +// If there is no matching isolate initiator, or the character is not a PDI, +// it is set to -1. +func (p *paragraph) determineMatchingIsolates() { + p.matchingPDI = make([]int, p.Len()) + p.matchingIsolateInitiator = make([]int, p.Len()) + + for i := range p.matchingIsolateInitiator { + p.matchingIsolateInitiator[i] = -1 + } + + for i := range p.matchingPDI { + p.matchingPDI[i] = -1 + + if t := p.resultTypes[i]; t.in(LRI, RLI, FSI) { + depthCounter := 1 + for j := i + 1; j < p.Len(); j++ { + if u := p.resultTypes[j]; u.in(LRI, RLI, FSI) { + depthCounter++ + } else if u == PDI { + if depthCounter--; depthCounter == 0 { + p.matchingPDI[i] = j + p.matchingIsolateInitiator[j] = i + break + } + } + } + if p.matchingPDI[i] == -1 { + p.matchingPDI[i] = p.Len() + } + } + } +} + +// determineParagraphEmbeddingLevel reports the resolved paragraph direction of +// the substring limited by the given range [start, end). +// +// Determines the paragraph level based on rules P2, P3. This is also used +// in rule X5c to find if an FSI should resolve to LRI or RLI. +func (p *paragraph) determineParagraphEmbeddingLevel(start, end int) level { + var strongType Class = unknownClass + + // Rule P2. + for i := start; i < end; i++ { + if t := p.resultTypes[i]; t.in(L, AL, R) { + strongType = t + break + } else if t.in(FSI, LRI, RLI) { + i = p.matchingPDI[i] // skip over to the matching PDI + if i > end { + log.Panic("assert (i <= end)") + } + } + } + // Rule P3. + switch strongType { + case unknownClass: // none found + // default embedding level when no strong types found is 0. + return 0 + case L: + return 0 + default: // AL, R + return 1 + } +} + +const maxDepth = 125 + +// This stack will store the embedding levels and override and isolated +// statuses +type directionalStatusStack struct { + stackCounter int + embeddingLevelStack [maxDepth + 1]level + overrideStatusStack [maxDepth + 1]Class + isolateStatusStack [maxDepth + 1]bool +} + +func (s *directionalStatusStack) empty() { s.stackCounter = 0 } +func (s *directionalStatusStack) pop() { s.stackCounter-- } +func (s *directionalStatusStack) depth() int { return s.stackCounter } + +func (s *directionalStatusStack) push(level level, overrideStatus Class, isolateStatus bool) { + s.embeddingLevelStack[s.stackCounter] = level + s.overrideStatusStack[s.stackCounter] = overrideStatus + s.isolateStatusStack[s.stackCounter] = isolateStatus + s.stackCounter++ +} + +func (s *directionalStatusStack) lastEmbeddingLevel() level { + return s.embeddingLevelStack[s.stackCounter-1] +} + +func (s *directionalStatusStack) lastDirectionalOverrideStatus() Class { + return s.overrideStatusStack[s.stackCounter-1] +} + +func (s *directionalStatusStack) lastDirectionalIsolateStatus() bool { + return s.isolateStatusStack[s.stackCounter-1] +} + +// Determine explicit levels using rules X1 - X8 +func (p *paragraph) determineExplicitEmbeddingLevels() { + var stack directionalStatusStack + var overflowIsolateCount, overflowEmbeddingCount, validIsolateCount int + + // Rule X1. + stack.push(p.embeddingLevel, ON, false) + + for i, t := range p.resultTypes { + // Rules X2, X3, X4, X5, X5a, X5b, X5c + switch t { + case RLE, LRE, RLO, LRO, RLI, LRI, FSI: + isIsolate := t.in(RLI, LRI, FSI) + isRTL := t.in(RLE, RLO, RLI) + + // override if this is an FSI that resolves to RLI + if t == FSI { + isRTL = (p.determineParagraphEmbeddingLevel(i+1, p.matchingPDI[i]) == 1) + } + if isIsolate { + p.resultLevels[i] = stack.lastEmbeddingLevel() + if stack.lastDirectionalOverrideStatus() != ON { + p.resultTypes[i] = stack.lastDirectionalOverrideStatus() + } + } + + var newLevel level + if isRTL { + // least greater odd + newLevel = (stack.lastEmbeddingLevel() + 1) | 1 + } else { + // least greater even + newLevel = (stack.lastEmbeddingLevel() + 2) &^ 1 + } + + if newLevel <= maxDepth && overflowIsolateCount == 0 && overflowEmbeddingCount == 0 { + if isIsolate { + validIsolateCount++ + } + // Push new embedding level, override status, and isolated + // status. + // No check for valid stack counter, since the level check + // suffices. + switch t { + case LRO: + stack.push(newLevel, L, isIsolate) + case RLO: + stack.push(newLevel, R, isIsolate) + default: + stack.push(newLevel, ON, isIsolate) + } + // Not really part of the spec + if !isIsolate { + p.resultLevels[i] = newLevel + } + } else { + // This is an invalid explicit formatting character, + // so apply the "Otherwise" part of rules X2-X5b. + if isIsolate { + overflowIsolateCount++ + } else { // !isIsolate + if overflowIsolateCount == 0 { + overflowEmbeddingCount++ + } + } + } + + // Rule X6a + case PDI: + if overflowIsolateCount > 0 { + overflowIsolateCount-- + } else if validIsolateCount == 0 { + // do nothing + } else { + overflowEmbeddingCount = 0 + for !stack.lastDirectionalIsolateStatus() { + stack.pop() + } + stack.pop() + validIsolateCount-- + } + p.resultLevels[i] = stack.lastEmbeddingLevel() + + // Rule X7 + case PDF: + // Not really part of the spec + p.resultLevels[i] = stack.lastEmbeddingLevel() + + if overflowIsolateCount > 0 { + // do nothing + } else if overflowEmbeddingCount > 0 { + overflowEmbeddingCount-- + } else if !stack.lastDirectionalIsolateStatus() && stack.depth() >= 2 { + stack.pop() + } + + case B: // paragraph separator. + // Rule X8. + + // These values are reset for clarity, in this implementation B + // can only occur as the last code in the array. + stack.empty() + overflowIsolateCount = 0 + overflowEmbeddingCount = 0 + validIsolateCount = 0 + p.resultLevels[i] = p.embeddingLevel + + default: + p.resultLevels[i] = stack.lastEmbeddingLevel() + if stack.lastDirectionalOverrideStatus() != ON { + p.resultTypes[i] = stack.lastDirectionalOverrideStatus() + } + } + } +} + +type isolatingRunSequence struct { + p *paragraph + + indexes []int // indexes to the original string + + types []Class // type of each character using the index + resolvedLevels []level // resolved levels after application of rules + level level + sos, eos Class +} + +func (i *isolatingRunSequence) Len() int { return len(i.indexes) } + +func maxLevel(a, b level) level { + if a > b { + return a + } + return b +} + +// Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types, +// either L or R, for each isolating run sequence. +func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { + length := len(indexes) + types := make([]Class, length) + for i, x := range indexes { + types[i] = p.resultTypes[x] + } + + // assign level, sos and eos + prevChar := indexes[0] - 1 + for prevChar >= 0 && isRemovedByX9(p.initialTypes[prevChar]) { + prevChar-- + } + prevLevel := p.embeddingLevel + if prevChar >= 0 { + prevLevel = p.resultLevels[prevChar] + } + + var succLevel level + lastType := types[length-1] + if lastType.in(LRI, RLI, FSI) { + succLevel = p.embeddingLevel + } else { + // the first character after the end of run sequence + limit := indexes[length-1] + 1 + for ; limit < p.Len() && isRemovedByX9(p.initialTypes[limit]); limit++ { + + } + succLevel = p.embeddingLevel + if limit < p.Len() { + succLevel = p.resultLevels[limit] + } + } + level := p.resultLevels[indexes[0]] + return &isolatingRunSequence{ + p: p, + indexes: indexes, + types: types, + level: level, + sos: typeForLevel(maxLevel(prevLevel, level)), + eos: typeForLevel(maxLevel(succLevel, level)), + } +} + +// Resolving weak types Rules W1-W7. +// +// Note that some weak types (EN, AN) remain after this processing is +// complete. +func (s *isolatingRunSequence) resolveWeakTypes() { + + // on entry, only these types remain + s.assertOnly(L, R, AL, EN, ES, ET, AN, CS, B, S, WS, ON, NSM, LRI, RLI, FSI, PDI) + + // Rule W1. + // Changes all NSMs. + preceedingCharacterType := s.sos + for i, t := range s.types { + if t == NSM { + s.types[i] = preceedingCharacterType + } else { + if t.in(LRI, RLI, FSI, PDI) { + preceedingCharacterType = ON + } + preceedingCharacterType = t + } + } + + // Rule W2. + // EN does not change at the start of the run, because sos != AL. + for i, t := range s.types { + if t == EN { + for j := i - 1; j >= 0; j-- { + if t := s.types[j]; t.in(L, R, AL) { + if t == AL { + s.types[i] = AN + } + break + } + } + } + } + + // Rule W3. + for i, t := range s.types { + if t == AL { + s.types[i] = R + } + } + + // Rule W4. + // Since there must be values on both sides for this rule to have an + // effect, the scan skips the first and last value. + // + // Although the scan proceeds left to right, and changes the type + // values in a way that would appear to affect the computations + // later in the scan, there is actually no problem. A change in the + // current value can only affect the value to its immediate right, + // and only affect it if it is ES or CS. But the current value can + // only change if the value to its right is not ES or CS. Thus + // either the current value will not change, or its change will have + // no effect on the remainder of the analysis. + + for i := 1; i < s.Len()-1; i++ { + t := s.types[i] + if t == ES || t == CS { + prevSepType := s.types[i-1] + succSepType := s.types[i+1] + if prevSepType == EN && succSepType == EN { + s.types[i] = EN + } else if s.types[i] == CS && prevSepType == AN && succSepType == AN { + s.types[i] = AN + } + } + } + + // Rule W5. + for i, t := range s.types { + if t == ET { + // locate end of sequence + runStart := i + runEnd := s.findRunLimit(runStart, ET) + + // check values at ends of sequence + t := s.sos + if runStart > 0 { + t = s.types[runStart-1] + } + if t != EN { + t = s.eos + if runEnd < len(s.types) { + t = s.types[runEnd] + } + } + if t == EN { + setTypes(s.types[runStart:runEnd], EN) + } + // continue at end of sequence + i = runEnd + } + } + + // Rule W6. + for i, t := range s.types { + if t.in(ES, ET, CS) { + s.types[i] = ON + } + } + + // Rule W7. + for i, t := range s.types { + if t == EN { + // set default if we reach start of run + prevStrongType := s.sos + for j := i - 1; j >= 0; j-- { + t = s.types[j] + if t == L || t == R { // AL's have been changed to R + prevStrongType = t + break + } + } + if prevStrongType == L { + s.types[i] = L + } + } + } +} + +// 6) resolving neutral types Rules N1-N2. +func (s *isolatingRunSequence) resolveNeutralTypes() { + + // on entry, only these types can be in resultTypes + s.assertOnly(L, R, EN, AN, B, S, WS, ON, RLI, LRI, FSI, PDI) + + for i, t := range s.types { + switch t { + case WS, ON, B, S, RLI, LRI, FSI, PDI: + // find bounds of run of neutrals + runStart := i + runEnd := s.findRunLimit(runStart, B, S, WS, ON, RLI, LRI, FSI, PDI) + + // determine effective types at ends of run + var leadType, trailType Class + + // Note that the character found can only be L, R, AN, or + // EN. + if runStart == 0 { + leadType = s.sos + } else { + leadType = s.types[runStart-1] + if leadType.in(AN, EN) { + leadType = R + } + } + if runEnd == len(s.types) { + trailType = s.eos + } else { + trailType = s.types[runEnd] + if trailType.in(AN, EN) { + trailType = R + } + } + + var resolvedType Class + if leadType == trailType { + // Rule N1. + resolvedType = leadType + } else { + // Rule N2. + // Notice the embedding level of the run is used, not + // the paragraph embedding level. + resolvedType = typeForLevel(s.level) + } + + setTypes(s.types[runStart:runEnd], resolvedType) + + // skip over run of (former) neutrals + i = runEnd + } + } +} + +func setLevels(levels []level, newLevel level) { + for i := range levels { + levels[i] = newLevel + } +} + +func setTypes(types []Class, newType Class) { + for i := range types { + types[i] = newType + } +} + +// 7) resolving implicit embedding levels Rules I1, I2. +func (s *isolatingRunSequence) resolveImplicitLevels() { + + // on entry, only these types can be in resultTypes + s.assertOnly(L, R, EN, AN) + + s.resolvedLevels = make([]level, len(s.types)) + setLevels(s.resolvedLevels, s.level) + + if (s.level & 1) == 0 { // even level + for i, t := range s.types { + // Rule I1. + if t == L { + // no change + } else if t == R { + s.resolvedLevels[i] += 1 + } else { // t == AN || t == EN + s.resolvedLevels[i] += 2 + } + } + } else { // odd level + for i, t := range s.types { + // Rule I2. + if t == R { + // no change + } else { // t == L || t == AN || t == EN + s.resolvedLevels[i] += 1 + } + } + } +} + +// Applies the levels and types resolved in rules W1-I2 to the +// resultLevels array. +func (s *isolatingRunSequence) applyLevelsAndTypes() { + for i, x := range s.indexes { + s.p.resultTypes[x] = s.types[i] + s.p.resultLevels[x] = s.resolvedLevels[i] + } +} + +// Return the limit of the run consisting only of the types in validSet +// starting at index. This checks the value at index, and will return +// index if that value is not in validSet. +func (s *isolatingRunSequence) findRunLimit(index int, validSet ...Class) int { +loop: + for ; index < len(s.types); index++ { + t := s.types[index] + for _, valid := range validSet { + if t == valid { + continue loop + } + } + return index // didn't find a match in validSet + } + return len(s.types) +} + +// Algorithm validation. Assert that all values in types are in the +// provided set. +func (s *isolatingRunSequence) assertOnly(codes ...Class) { +loop: + for i, t := range s.types { + for _, c := range codes { + if t == c { + continue loop + } + } + log.Panicf("invalid bidi code %v present in assertOnly at position %d", t, s.indexes[i]) + } +} + +// determineLevelRuns returns an array of level runs. Each level run is +// described as an array of indexes into the input string. +// +// Determines the level runs. Rule X9 will be applied in determining the +// runs, in the way that makes sure the characters that are supposed to be +// removed are not included in the runs. +func (p *paragraph) determineLevelRuns() [][]int { + run := []int{} + allRuns := [][]int{} + currentLevel := implicitLevel + + for i := range p.initialTypes { + if !isRemovedByX9(p.initialTypes[i]) { + if p.resultLevels[i] != currentLevel { + // we just encountered a new run; wrap up last run + if currentLevel >= 0 { // only wrap it up if there was a run + allRuns = append(allRuns, run) + run = nil + } + // Start new run + currentLevel = p.resultLevels[i] + } + run = append(run, i) + } + } + // Wrap up the final run, if any + if len(run) > 0 { + allRuns = append(allRuns, run) + } + return allRuns +} + +// Definition BD13. Determine isolating run sequences. +func (p *paragraph) determineIsolatingRunSequences() []*isolatingRunSequence { + levelRuns := p.determineLevelRuns() + + // Compute the run that each character belongs to + runForCharacter := make([]int, p.Len()) + for i, run := range levelRuns { + for _, index := range run { + runForCharacter[index] = i + } + } + + sequences := []*isolatingRunSequence{} + + var currentRunSequence []int + + for _, run := range levelRuns { + first := run[0] + if p.initialTypes[first] != PDI || p.matchingIsolateInitiator[first] == -1 { + currentRunSequence = nil + // int run = i; + for { + // Copy this level run into currentRunSequence + currentRunSequence = append(currentRunSequence, run...) + + last := currentRunSequence[len(currentRunSequence)-1] + lastT := p.initialTypes[last] + if lastT.in(LRI, RLI, FSI) && p.matchingPDI[last] != p.Len() { + run = levelRuns[runForCharacter[p.matchingPDI[last]]] + } else { + break + } + } + sequences = append(sequences, p.isolatingRunSequence(currentRunSequence)) + } + } + return sequences +} + +// Assign level information to characters removed by rule X9. This is for +// ease of relating the level information to the original input data. Note +// that the levels assigned to these codes are arbitrary, they're chosen so +// as to avoid breaking level runs. +func (p *paragraph) assignLevelsToCharactersRemovedByX9() { + for i, t := range p.initialTypes { + if t.in(LRE, RLE, LRO, RLO, PDF, BN) { + p.resultTypes[i] = t + p.resultLevels[i] = -1 + } + } + // now propagate forward the levels information (could have + // propagated backward, the main thing is not to introduce a level + // break where one doesn't already exist). + + if p.resultLevels[0] == -1 { + p.resultLevels[0] = p.embeddingLevel + } + for i := 1; i < len(p.initialTypes); i++ { + if p.resultLevels[i] == -1 { + p.resultLevels[i] = p.resultLevels[i-1] + } + } + // Embedding information is for informational purposes only so need not be + // adjusted. +} + +// +// Output +// + +// getLevels computes levels array breaking lines at offsets in linebreaks. +// Rule L1. +// +// The linebreaks array must include at least one value. The values must be +// in strictly increasing order (no duplicates) between 1 and the length of +// the text, inclusive. The last value must be the length of the text. +func (p *paragraph) getLevels(linebreaks []int) []level { + // Note that since the previous processing has removed all + // P, S, and WS values from resultTypes, the values referred to + // in these rules are the initial types, before any processing + // has been applied (including processing of overrides). + // + // This example implementation has reinserted explicit format codes + // and BN, in order that the levels array correspond to the + // initial text. Their final placement is not normative. + // These codes are treated like WS in this implementation, + // so they don't interrupt sequences of WS. + + validateLineBreaks(linebreaks, p.Len()) + + result := append([]level(nil), p.resultLevels...) + + // don't worry about linebreaks since if there is a break within + // a series of WS values preceding S, the linebreak itself + // causes the reset. + for i, t := range p.initialTypes { + if t.in(B, S) { + // Rule L1, clauses one and two. + result[i] = p.embeddingLevel + + // Rule L1, clause three. + for j := i - 1; j >= 0; j-- { + if isWhitespace(p.initialTypes[j]) { // including format codes + result[j] = p.embeddingLevel + } else { + break + } + } + } + } + + // Rule L1, clause four. + start := 0 + for _, limit := range linebreaks { + for j := limit - 1; j >= start; j-- { + if isWhitespace(p.initialTypes[j]) { // including format codes + result[j] = p.embeddingLevel + } else { + break + } + } + start = limit + } + + return result +} + +// getReordering returns the reordering of lines from a visual index to a +// logical index for line breaks at the given offsets. +// +// Lines are concatenated from left to right. So for example, the fifth +// character from the left on the third line is +// +// getReordering(linebreaks)[linebreaks[1] + 4] +// +// (linebreaks[1] is the position after the last character of the second +// line, which is also the index of the first character on the third line, +// and adding four gets the fifth character from the left). +// +// The linebreaks array must include at least one value. The values must be +// in strictly increasing order (no duplicates) between 1 and the length of +// the text, inclusive. The last value must be the length of the text. +func (p *paragraph) getReordering(linebreaks []int) []int { + validateLineBreaks(linebreaks, p.Len()) + + return computeMultilineReordering(p.getLevels(linebreaks), linebreaks) +} + +// Return multiline reordering array for a given level array. Reordering +// does not occur across a line break. +func computeMultilineReordering(levels []level, linebreaks []int) []int { + result := make([]int, len(levels)) + + start := 0 + for _, limit := range linebreaks { + tempLevels := make([]level, limit-start) + copy(tempLevels, levels[start:]) + + for j, order := range computeReordering(tempLevels) { + result[start+j] = order + start + } + start = limit + } + return result +} + +// Return reordering array for a given level array. This reorders a single +// line. The reordering is a visual to logical map. For example, the +// leftmost char is string.charAt(order[0]). Rule L2. +func computeReordering(levels []level) []int { + result := make([]int, len(levels)) + // initialize order + for i := range result { + result[i] = i + } + + // locate highest level found on line. + // Note the rules say text, but no reordering across line bounds is + // performed, so this is sufficient. + highestLevel := level(0) + lowestOddLevel := level(maxDepth + 2) + for _, level := range levels { + if level > highestLevel { + highestLevel = level + } + if level&1 != 0 && level < lowestOddLevel { + lowestOddLevel = level + } + } + + for level := highestLevel; level >= lowestOddLevel; level-- { + for i := 0; i < len(levels); i++ { + if levels[i] >= level { + // find range of text at or above this level + start := i + limit := i + 1 + for limit < len(levels) && levels[limit] >= level { + limit++ + } + + for j, k := start, limit-1; j < k; j, k = j+1, k-1 { + result[j], result[k] = result[k], result[j] + } + // skip to end of level run + i = limit + } + } + } + + return result +} + +// isWhitespace reports whether the type is considered a whitespace type for the +// line break rules. +func isWhitespace(c Class) bool { + switch c { + case LRE, RLE, LRO, RLO, PDF, LRI, RLI, FSI, PDI, BN, WS: + return true + } + return false +} + +// isRemovedByX9 reports whether the type is one of the types removed in X9. +func isRemovedByX9(c Class) bool { + switch c { + case LRE, RLE, LRO, RLO, PDF, BN: + return true + } + return false +} + +// typeForLevel reports the strong type (L or R) corresponding to the level. +func typeForLevel(level level) Class { + if (level & 0x1) == 0 { + return L + } + return R +} + +// TODO: change validation to not panic + +func validateTypes(types []Class) { + if len(types) == 0 { + log.Panic("types is null") + } + for i, t := range types[:len(types)-1] { + if t == B { + log.Panicf("B type before end of paragraph at index: %d", i) + } + } +} + +func validateParagraphEmbeddingLevel(embeddingLevel level) { + if embeddingLevel != implicitLevel && + embeddingLevel != 0 && + embeddingLevel != 1 { + log.Panicf("illegal paragraph embedding level: %d", embeddingLevel) + } +} + +func validateLineBreaks(linebreaks []int, textLength int) { + prev := 0 + for i, next := range linebreaks { + if next <= prev { + log.Panicf("bad linebreak: %d at index: %d", next, i) + } + prev = next + } + if prev != textLength { + log.Panicf("last linebreak was %d, want %d", prev, textLength) + } +} + +func validatePbTypes(pairTypes []bracketType) { + if len(pairTypes) == 0 { + log.Panic("pairTypes is null") + } + for i, pt := range pairTypes { + switch pt { + case bpNone, bpOpen, bpClose: + default: + log.Panicf("illegal pairType value at %d: %v", i, pairTypes[i]) + } + } +} + +func validatePbValues(pairValues []rune, pairTypes []bracketType) { + if pairValues == nil { + log.Panic("pairValues is null") + } + if len(pairTypes) != len(pairValues) { + log.Panic("pairTypes is different length from pairValues") + } +} diff --git a/vendor/golang.org/x/text/unicode/bidi/prop.go b/vendor/golang.org/x/text/unicode/bidi/prop.go new file mode 100644 index 00000000000..7c9484e1f50 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/prop.go @@ -0,0 +1,206 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bidi + +import "unicode/utf8" + +// Properties provides access to BiDi properties of runes. +type Properties struct { + entry uint8 + last uint8 +} + +var trie = newBidiTrie(0) + +// TODO: using this for bidirule reduces the running time by about 5%. Consider +// if this is worth exposing or if we can find a way to speed up the Class +// method. +// +// // CompactClass is like Class, but maps all of the BiDi control classes +// // (LRO, RLO, LRE, RLE, PDF, LRI, RLI, FSI, PDI) to the class Control. +// func (p Properties) CompactClass() Class { +// return Class(p.entry & 0x0F) +// } + +// Class returns the Bidi class for p. +func (p Properties) Class() Class { + c := Class(p.entry & 0x0F) + if c == Control { + c = controlByteToClass[p.last&0xF] + } + return c +} + +// IsBracket reports whether the rune is a bracket. +func (p Properties) IsBracket() bool { return p.entry&0xF0 != 0 } + +// IsOpeningBracket reports whether the rune is an opening bracket. +// IsBracket must return true. +func (p Properties) IsOpeningBracket() bool { return p.entry&openMask != 0 } + +// TODO: find a better API and expose. +func (p Properties) reverseBracket(r rune) rune { + return xorMasks[p.entry>>xorMaskShift] ^ r +} + +var controlByteToClass = [16]Class{ + 0xD: LRO, // U+202D LeftToRightOverride, + 0xE: RLO, // U+202E RightToLeftOverride, + 0xA: LRE, // U+202A LeftToRightEmbedding, + 0xB: RLE, // U+202B RightToLeftEmbedding, + 0xC: PDF, // U+202C PopDirectionalFormat, + 0x6: LRI, // U+2066 LeftToRightIsolate, + 0x7: RLI, // U+2067 RightToLeftIsolate, + 0x8: FSI, // U+2068 FirstStrongIsolate, + 0x9: PDI, // U+2069 PopDirectionalIsolate, +} + +// LookupRune returns properties for r. +func LookupRune(r rune) (p Properties, size int) { + var buf [4]byte + n := utf8.EncodeRune(buf[:], r) + return Lookup(buf[:n]) +} + +// TODO: these lookup methods are based on the generated trie code. The returned +// sizes have slightly different semantics from the generated code, in that it +// always returns size==1 for an illegal UTF-8 byte (instead of the length +// of the maximum invalid subsequence). Most Transformers, like unicode/norm, +// leave invalid UTF-8 untouched, in which case it has performance benefits to +// do so (without changing the semantics). Bidi requires the semantics used here +// for the bidirule implementation to be compatible with the Go semantics. +// They ultimately should perhaps be adopted by all trie implementations, for +// convenience sake. +// This unrolled code also boosts performance of the secure/bidirule package by +// about 30%. +// So, to remove this code: +// - add option to trie generator to define return type. +// - always return 1 byte size for ill-formed UTF-8 runes. + +// Lookup returns properties for the first rune in s and the width in bytes of +// its encoding. The size will be 0 if s does not hold enough bytes to complete +// the encoding. +func Lookup(s []byte) (p Properties, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return Properties{entry: bidiValues[c0]}, 1 + case c0 < 0xC2: + return Properties{}, 1 + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return Properties{}, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return Properties{}, 1 + } + return Properties{entry: trie.lookupValue(uint32(i), c1)}, 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return Properties{}, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return Properties{}, 1 + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return Properties{}, 1 + } + return Properties{entry: trie.lookupValue(uint32(i), c2), last: c2}, 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return Properties{}, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return Properties{}, 1 + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return Properties{}, 1 + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return Properties{}, 1 + } + return Properties{entry: trie.lookupValue(uint32(i), c3)}, 4 + } + // Illegal rune + return Properties{}, 1 +} + +// LookupString returns properties for the first rune in s and the width in +// bytes of its encoding. The size will be 0 if s does not hold enough bytes to +// complete the encoding. +func LookupString(s string) (p Properties, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return Properties{entry: bidiValues[c0]}, 1 + case c0 < 0xC2: + return Properties{}, 1 + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return Properties{}, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return Properties{}, 1 + } + return Properties{entry: trie.lookupValue(uint32(i), c1)}, 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return Properties{}, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return Properties{}, 1 + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return Properties{}, 1 + } + return Properties{entry: trie.lookupValue(uint32(i), c2), last: c2}, 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return Properties{}, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return Properties{}, 1 + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return Properties{}, 1 + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return Properties{}, 1 + } + return Properties{entry: trie.lookupValue(uint32(i), c3)}, 4 + } + // Illegal rune + return Properties{}, 1 +} diff --git a/vendor/golang.org/x/text/unicode/bidi/tables.go b/vendor/golang.org/x/text/unicode/bidi/tables.go new file mode 100644 index 00000000000..a0b2b17a1ef --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/tables.go @@ -0,0 +1,1813 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package bidi + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "10.0.0" + +// xorMasks contains masks to be xor-ed with brackets to get the reverse +// version. +var xorMasks = []int32{ // 8 elements + 0, 1, 6, 7, 3, 15, 29, 63, +} // Size: 56 bytes + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookup(s []byte) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupUnsafe(s []byte) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookupString(s string) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupStringUnsafe(s string) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// bidiTrie. Total size: 16128 bytes (15.75 KiB). Checksum: 8122d83e461996f. +type bidiTrie struct{} + +func newBidiTrie(i int) *bidiTrie { + return &bidiTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *bidiTrie) lookupValue(n uint32, b byte) uint8 { + switch { + default: + return uint8(bidiValues[n<<6+uint32(b)]) + } +} + +// bidiValues: 228 blocks, 14592 entries, 14592 bytes +// The third block is the zero block. +var bidiValues = [14592]uint8{ + // Block 0x0, offset 0x0 + 0x00: 0x000b, 0x01: 0x000b, 0x02: 0x000b, 0x03: 0x000b, 0x04: 0x000b, 0x05: 0x000b, + 0x06: 0x000b, 0x07: 0x000b, 0x08: 0x000b, 0x09: 0x0008, 0x0a: 0x0007, 0x0b: 0x0008, + 0x0c: 0x0009, 0x0d: 0x0007, 0x0e: 0x000b, 0x0f: 0x000b, 0x10: 0x000b, 0x11: 0x000b, + 0x12: 0x000b, 0x13: 0x000b, 0x14: 0x000b, 0x15: 0x000b, 0x16: 0x000b, 0x17: 0x000b, + 0x18: 0x000b, 0x19: 0x000b, 0x1a: 0x000b, 0x1b: 0x000b, 0x1c: 0x0007, 0x1d: 0x0007, + 0x1e: 0x0007, 0x1f: 0x0008, 0x20: 0x0009, 0x21: 0x000a, 0x22: 0x000a, 0x23: 0x0004, + 0x24: 0x0004, 0x25: 0x0004, 0x26: 0x000a, 0x27: 0x000a, 0x28: 0x003a, 0x29: 0x002a, + 0x2a: 0x000a, 0x2b: 0x0003, 0x2c: 0x0006, 0x2d: 0x0003, 0x2e: 0x0006, 0x2f: 0x0006, + 0x30: 0x0002, 0x31: 0x0002, 0x32: 0x0002, 0x33: 0x0002, 0x34: 0x0002, 0x35: 0x0002, + 0x36: 0x0002, 0x37: 0x0002, 0x38: 0x0002, 0x39: 0x0002, 0x3a: 0x0006, 0x3b: 0x000a, + 0x3c: 0x000a, 0x3d: 0x000a, 0x3e: 0x000a, 0x3f: 0x000a, + // Block 0x1, offset 0x40 + 0x40: 0x000a, + 0x5b: 0x005a, 0x5c: 0x000a, 0x5d: 0x004a, + 0x5e: 0x000a, 0x5f: 0x000a, 0x60: 0x000a, + 0x7b: 0x005a, + 0x7c: 0x000a, 0x7d: 0x004a, 0x7e: 0x000a, 0x7f: 0x000b, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x000b, 0xc1: 0x000b, 0xc2: 0x000b, 0xc3: 0x000b, 0xc4: 0x000b, 0xc5: 0x0007, + 0xc6: 0x000b, 0xc7: 0x000b, 0xc8: 0x000b, 0xc9: 0x000b, 0xca: 0x000b, 0xcb: 0x000b, + 0xcc: 0x000b, 0xcd: 0x000b, 0xce: 0x000b, 0xcf: 0x000b, 0xd0: 0x000b, 0xd1: 0x000b, + 0xd2: 0x000b, 0xd3: 0x000b, 0xd4: 0x000b, 0xd5: 0x000b, 0xd6: 0x000b, 0xd7: 0x000b, + 0xd8: 0x000b, 0xd9: 0x000b, 0xda: 0x000b, 0xdb: 0x000b, 0xdc: 0x000b, 0xdd: 0x000b, + 0xde: 0x000b, 0xdf: 0x000b, 0xe0: 0x0006, 0xe1: 0x000a, 0xe2: 0x0004, 0xe3: 0x0004, + 0xe4: 0x0004, 0xe5: 0x0004, 0xe6: 0x000a, 0xe7: 0x000a, 0xe8: 0x000a, 0xe9: 0x000a, + 0xeb: 0x000a, 0xec: 0x000a, 0xed: 0x000b, 0xee: 0x000a, 0xef: 0x000a, + 0xf0: 0x0004, 0xf1: 0x0004, 0xf2: 0x0002, 0xf3: 0x0002, 0xf4: 0x000a, + 0xf6: 0x000a, 0xf7: 0x000a, 0xf8: 0x000a, 0xf9: 0x0002, 0xfb: 0x000a, + 0xfc: 0x000a, 0xfd: 0x000a, 0xfe: 0x000a, 0xff: 0x000a, + // Block 0x4, offset 0x100 + 0x117: 0x000a, + 0x137: 0x000a, + // Block 0x5, offset 0x140 + 0x179: 0x000a, 0x17a: 0x000a, + // Block 0x6, offset 0x180 + 0x182: 0x000a, 0x183: 0x000a, 0x184: 0x000a, 0x185: 0x000a, + 0x186: 0x000a, 0x187: 0x000a, 0x188: 0x000a, 0x189: 0x000a, 0x18a: 0x000a, 0x18b: 0x000a, + 0x18c: 0x000a, 0x18d: 0x000a, 0x18e: 0x000a, 0x18f: 0x000a, + 0x192: 0x000a, 0x193: 0x000a, 0x194: 0x000a, 0x195: 0x000a, 0x196: 0x000a, 0x197: 0x000a, + 0x198: 0x000a, 0x199: 0x000a, 0x19a: 0x000a, 0x19b: 0x000a, 0x19c: 0x000a, 0x19d: 0x000a, + 0x19e: 0x000a, 0x19f: 0x000a, + 0x1a5: 0x000a, 0x1a6: 0x000a, 0x1a7: 0x000a, 0x1a8: 0x000a, 0x1a9: 0x000a, + 0x1aa: 0x000a, 0x1ab: 0x000a, 0x1ac: 0x000a, 0x1ad: 0x000a, 0x1af: 0x000a, + 0x1b0: 0x000a, 0x1b1: 0x000a, 0x1b2: 0x000a, 0x1b3: 0x000a, 0x1b4: 0x000a, 0x1b5: 0x000a, + 0x1b6: 0x000a, 0x1b7: 0x000a, 0x1b8: 0x000a, 0x1b9: 0x000a, 0x1ba: 0x000a, 0x1bb: 0x000a, + 0x1bc: 0x000a, 0x1bd: 0x000a, 0x1be: 0x000a, 0x1bf: 0x000a, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x000c, 0x1c1: 0x000c, 0x1c2: 0x000c, 0x1c3: 0x000c, 0x1c4: 0x000c, 0x1c5: 0x000c, + 0x1c6: 0x000c, 0x1c7: 0x000c, 0x1c8: 0x000c, 0x1c9: 0x000c, 0x1ca: 0x000c, 0x1cb: 0x000c, + 0x1cc: 0x000c, 0x1cd: 0x000c, 0x1ce: 0x000c, 0x1cf: 0x000c, 0x1d0: 0x000c, 0x1d1: 0x000c, + 0x1d2: 0x000c, 0x1d3: 0x000c, 0x1d4: 0x000c, 0x1d5: 0x000c, 0x1d6: 0x000c, 0x1d7: 0x000c, + 0x1d8: 0x000c, 0x1d9: 0x000c, 0x1da: 0x000c, 0x1db: 0x000c, 0x1dc: 0x000c, 0x1dd: 0x000c, + 0x1de: 0x000c, 0x1df: 0x000c, 0x1e0: 0x000c, 0x1e1: 0x000c, 0x1e2: 0x000c, 0x1e3: 0x000c, + 0x1e4: 0x000c, 0x1e5: 0x000c, 0x1e6: 0x000c, 0x1e7: 0x000c, 0x1e8: 0x000c, 0x1e9: 0x000c, + 0x1ea: 0x000c, 0x1eb: 0x000c, 0x1ec: 0x000c, 0x1ed: 0x000c, 0x1ee: 0x000c, 0x1ef: 0x000c, + 0x1f0: 0x000c, 0x1f1: 0x000c, 0x1f2: 0x000c, 0x1f3: 0x000c, 0x1f4: 0x000c, 0x1f5: 0x000c, + 0x1f6: 0x000c, 0x1f7: 0x000c, 0x1f8: 0x000c, 0x1f9: 0x000c, 0x1fa: 0x000c, 0x1fb: 0x000c, + 0x1fc: 0x000c, 0x1fd: 0x000c, 0x1fe: 0x000c, 0x1ff: 0x000c, + // Block 0x8, offset 0x200 + 0x200: 0x000c, 0x201: 0x000c, 0x202: 0x000c, 0x203: 0x000c, 0x204: 0x000c, 0x205: 0x000c, + 0x206: 0x000c, 0x207: 0x000c, 0x208: 0x000c, 0x209: 0x000c, 0x20a: 0x000c, 0x20b: 0x000c, + 0x20c: 0x000c, 0x20d: 0x000c, 0x20e: 0x000c, 0x20f: 0x000c, 0x210: 0x000c, 0x211: 0x000c, + 0x212: 0x000c, 0x213: 0x000c, 0x214: 0x000c, 0x215: 0x000c, 0x216: 0x000c, 0x217: 0x000c, + 0x218: 0x000c, 0x219: 0x000c, 0x21a: 0x000c, 0x21b: 0x000c, 0x21c: 0x000c, 0x21d: 0x000c, + 0x21e: 0x000c, 0x21f: 0x000c, 0x220: 0x000c, 0x221: 0x000c, 0x222: 0x000c, 0x223: 0x000c, + 0x224: 0x000c, 0x225: 0x000c, 0x226: 0x000c, 0x227: 0x000c, 0x228: 0x000c, 0x229: 0x000c, + 0x22a: 0x000c, 0x22b: 0x000c, 0x22c: 0x000c, 0x22d: 0x000c, 0x22e: 0x000c, 0x22f: 0x000c, + 0x234: 0x000a, 0x235: 0x000a, + 0x23e: 0x000a, + // Block 0x9, offset 0x240 + 0x244: 0x000a, 0x245: 0x000a, + 0x247: 0x000a, + // Block 0xa, offset 0x280 + 0x2b6: 0x000a, + // Block 0xb, offset 0x2c0 + 0x2c3: 0x000c, 0x2c4: 0x000c, 0x2c5: 0x000c, + 0x2c6: 0x000c, 0x2c7: 0x000c, 0x2c8: 0x000c, 0x2c9: 0x000c, + // Block 0xc, offset 0x300 + 0x30a: 0x000a, + 0x30d: 0x000a, 0x30e: 0x000a, 0x30f: 0x0004, 0x310: 0x0001, 0x311: 0x000c, + 0x312: 0x000c, 0x313: 0x000c, 0x314: 0x000c, 0x315: 0x000c, 0x316: 0x000c, 0x317: 0x000c, + 0x318: 0x000c, 0x319: 0x000c, 0x31a: 0x000c, 0x31b: 0x000c, 0x31c: 0x000c, 0x31d: 0x000c, + 0x31e: 0x000c, 0x31f: 0x000c, 0x320: 0x000c, 0x321: 0x000c, 0x322: 0x000c, 0x323: 0x000c, + 0x324: 0x000c, 0x325: 0x000c, 0x326: 0x000c, 0x327: 0x000c, 0x328: 0x000c, 0x329: 0x000c, + 0x32a: 0x000c, 0x32b: 0x000c, 0x32c: 0x000c, 0x32d: 0x000c, 0x32e: 0x000c, 0x32f: 0x000c, + 0x330: 0x000c, 0x331: 0x000c, 0x332: 0x000c, 0x333: 0x000c, 0x334: 0x000c, 0x335: 0x000c, + 0x336: 0x000c, 0x337: 0x000c, 0x338: 0x000c, 0x339: 0x000c, 0x33a: 0x000c, 0x33b: 0x000c, + 0x33c: 0x000c, 0x33d: 0x000c, 0x33e: 0x0001, 0x33f: 0x000c, + // Block 0xd, offset 0x340 + 0x340: 0x0001, 0x341: 0x000c, 0x342: 0x000c, 0x343: 0x0001, 0x344: 0x000c, 0x345: 0x000c, + 0x346: 0x0001, 0x347: 0x000c, 0x348: 0x0001, 0x349: 0x0001, 0x34a: 0x0001, 0x34b: 0x0001, + 0x34c: 0x0001, 0x34d: 0x0001, 0x34e: 0x0001, 0x34f: 0x0001, 0x350: 0x0001, 0x351: 0x0001, + 0x352: 0x0001, 0x353: 0x0001, 0x354: 0x0001, 0x355: 0x0001, 0x356: 0x0001, 0x357: 0x0001, + 0x358: 0x0001, 0x359: 0x0001, 0x35a: 0x0001, 0x35b: 0x0001, 0x35c: 0x0001, 0x35d: 0x0001, + 0x35e: 0x0001, 0x35f: 0x0001, 0x360: 0x0001, 0x361: 0x0001, 0x362: 0x0001, 0x363: 0x0001, + 0x364: 0x0001, 0x365: 0x0001, 0x366: 0x0001, 0x367: 0x0001, 0x368: 0x0001, 0x369: 0x0001, + 0x36a: 0x0001, 0x36b: 0x0001, 0x36c: 0x0001, 0x36d: 0x0001, 0x36e: 0x0001, 0x36f: 0x0001, + 0x370: 0x0001, 0x371: 0x0001, 0x372: 0x0001, 0x373: 0x0001, 0x374: 0x0001, 0x375: 0x0001, + 0x376: 0x0001, 0x377: 0x0001, 0x378: 0x0001, 0x379: 0x0001, 0x37a: 0x0001, 0x37b: 0x0001, + 0x37c: 0x0001, 0x37d: 0x0001, 0x37e: 0x0001, 0x37f: 0x0001, + // Block 0xe, offset 0x380 + 0x380: 0x0005, 0x381: 0x0005, 0x382: 0x0005, 0x383: 0x0005, 0x384: 0x0005, 0x385: 0x0005, + 0x386: 0x000a, 0x387: 0x000a, 0x388: 0x000d, 0x389: 0x0004, 0x38a: 0x0004, 0x38b: 0x000d, + 0x38c: 0x0006, 0x38d: 0x000d, 0x38e: 0x000a, 0x38f: 0x000a, 0x390: 0x000c, 0x391: 0x000c, + 0x392: 0x000c, 0x393: 0x000c, 0x394: 0x000c, 0x395: 0x000c, 0x396: 0x000c, 0x397: 0x000c, + 0x398: 0x000c, 0x399: 0x000c, 0x39a: 0x000c, 0x39b: 0x000d, 0x39c: 0x000d, 0x39d: 0x000d, + 0x39e: 0x000d, 0x39f: 0x000d, 0x3a0: 0x000d, 0x3a1: 0x000d, 0x3a2: 0x000d, 0x3a3: 0x000d, + 0x3a4: 0x000d, 0x3a5: 0x000d, 0x3a6: 0x000d, 0x3a7: 0x000d, 0x3a8: 0x000d, 0x3a9: 0x000d, + 0x3aa: 0x000d, 0x3ab: 0x000d, 0x3ac: 0x000d, 0x3ad: 0x000d, 0x3ae: 0x000d, 0x3af: 0x000d, + 0x3b0: 0x000d, 0x3b1: 0x000d, 0x3b2: 0x000d, 0x3b3: 0x000d, 0x3b4: 0x000d, 0x3b5: 0x000d, + 0x3b6: 0x000d, 0x3b7: 0x000d, 0x3b8: 0x000d, 0x3b9: 0x000d, 0x3ba: 0x000d, 0x3bb: 0x000d, + 0x3bc: 0x000d, 0x3bd: 0x000d, 0x3be: 0x000d, 0x3bf: 0x000d, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x000d, 0x3c1: 0x000d, 0x3c2: 0x000d, 0x3c3: 0x000d, 0x3c4: 0x000d, 0x3c5: 0x000d, + 0x3c6: 0x000d, 0x3c7: 0x000d, 0x3c8: 0x000d, 0x3c9: 0x000d, 0x3ca: 0x000d, 0x3cb: 0x000c, + 0x3cc: 0x000c, 0x3cd: 0x000c, 0x3ce: 0x000c, 0x3cf: 0x000c, 0x3d0: 0x000c, 0x3d1: 0x000c, + 0x3d2: 0x000c, 0x3d3: 0x000c, 0x3d4: 0x000c, 0x3d5: 0x000c, 0x3d6: 0x000c, 0x3d7: 0x000c, + 0x3d8: 0x000c, 0x3d9: 0x000c, 0x3da: 0x000c, 0x3db: 0x000c, 0x3dc: 0x000c, 0x3dd: 0x000c, + 0x3de: 0x000c, 0x3df: 0x000c, 0x3e0: 0x0005, 0x3e1: 0x0005, 0x3e2: 0x0005, 0x3e3: 0x0005, + 0x3e4: 0x0005, 0x3e5: 0x0005, 0x3e6: 0x0005, 0x3e7: 0x0005, 0x3e8: 0x0005, 0x3e9: 0x0005, + 0x3ea: 0x0004, 0x3eb: 0x0005, 0x3ec: 0x0005, 0x3ed: 0x000d, 0x3ee: 0x000d, 0x3ef: 0x000d, + 0x3f0: 0x000c, 0x3f1: 0x000d, 0x3f2: 0x000d, 0x3f3: 0x000d, 0x3f4: 0x000d, 0x3f5: 0x000d, + 0x3f6: 0x000d, 0x3f7: 0x000d, 0x3f8: 0x000d, 0x3f9: 0x000d, 0x3fa: 0x000d, 0x3fb: 0x000d, + 0x3fc: 0x000d, 0x3fd: 0x000d, 0x3fe: 0x000d, 0x3ff: 0x000d, + // Block 0x10, offset 0x400 + 0x400: 0x000d, 0x401: 0x000d, 0x402: 0x000d, 0x403: 0x000d, 0x404: 0x000d, 0x405: 0x000d, + 0x406: 0x000d, 0x407: 0x000d, 0x408: 0x000d, 0x409: 0x000d, 0x40a: 0x000d, 0x40b: 0x000d, + 0x40c: 0x000d, 0x40d: 0x000d, 0x40e: 0x000d, 0x40f: 0x000d, 0x410: 0x000d, 0x411: 0x000d, + 0x412: 0x000d, 0x413: 0x000d, 0x414: 0x000d, 0x415: 0x000d, 0x416: 0x000d, 0x417: 0x000d, + 0x418: 0x000d, 0x419: 0x000d, 0x41a: 0x000d, 0x41b: 0x000d, 0x41c: 0x000d, 0x41d: 0x000d, + 0x41e: 0x000d, 0x41f: 0x000d, 0x420: 0x000d, 0x421: 0x000d, 0x422: 0x000d, 0x423: 0x000d, + 0x424: 0x000d, 0x425: 0x000d, 0x426: 0x000d, 0x427: 0x000d, 0x428: 0x000d, 0x429: 0x000d, + 0x42a: 0x000d, 0x42b: 0x000d, 0x42c: 0x000d, 0x42d: 0x000d, 0x42e: 0x000d, 0x42f: 0x000d, + 0x430: 0x000d, 0x431: 0x000d, 0x432: 0x000d, 0x433: 0x000d, 0x434: 0x000d, 0x435: 0x000d, + 0x436: 0x000d, 0x437: 0x000d, 0x438: 0x000d, 0x439: 0x000d, 0x43a: 0x000d, 0x43b: 0x000d, + 0x43c: 0x000d, 0x43d: 0x000d, 0x43e: 0x000d, 0x43f: 0x000d, + // Block 0x11, offset 0x440 + 0x440: 0x000d, 0x441: 0x000d, 0x442: 0x000d, 0x443: 0x000d, 0x444: 0x000d, 0x445: 0x000d, + 0x446: 0x000d, 0x447: 0x000d, 0x448: 0x000d, 0x449: 0x000d, 0x44a: 0x000d, 0x44b: 0x000d, + 0x44c: 0x000d, 0x44d: 0x000d, 0x44e: 0x000d, 0x44f: 0x000d, 0x450: 0x000d, 0x451: 0x000d, + 0x452: 0x000d, 0x453: 0x000d, 0x454: 0x000d, 0x455: 0x000d, 0x456: 0x000c, 0x457: 0x000c, + 0x458: 0x000c, 0x459: 0x000c, 0x45a: 0x000c, 0x45b: 0x000c, 0x45c: 0x000c, 0x45d: 0x0005, + 0x45e: 0x000a, 0x45f: 0x000c, 0x460: 0x000c, 0x461: 0x000c, 0x462: 0x000c, 0x463: 0x000c, + 0x464: 0x000c, 0x465: 0x000d, 0x466: 0x000d, 0x467: 0x000c, 0x468: 0x000c, 0x469: 0x000a, + 0x46a: 0x000c, 0x46b: 0x000c, 0x46c: 0x000c, 0x46d: 0x000c, 0x46e: 0x000d, 0x46f: 0x000d, + 0x470: 0x0002, 0x471: 0x0002, 0x472: 0x0002, 0x473: 0x0002, 0x474: 0x0002, 0x475: 0x0002, + 0x476: 0x0002, 0x477: 0x0002, 0x478: 0x0002, 0x479: 0x0002, 0x47a: 0x000d, 0x47b: 0x000d, + 0x47c: 0x000d, 0x47d: 0x000d, 0x47e: 0x000d, 0x47f: 0x000d, + // Block 0x12, offset 0x480 + 0x480: 0x000d, 0x481: 0x000d, 0x482: 0x000d, 0x483: 0x000d, 0x484: 0x000d, 0x485: 0x000d, + 0x486: 0x000d, 0x487: 0x000d, 0x488: 0x000d, 0x489: 0x000d, 0x48a: 0x000d, 0x48b: 0x000d, + 0x48c: 0x000d, 0x48d: 0x000d, 0x48e: 0x000d, 0x48f: 0x000d, 0x490: 0x000d, 0x491: 0x000c, + 0x492: 0x000d, 0x493: 0x000d, 0x494: 0x000d, 0x495: 0x000d, 0x496: 0x000d, 0x497: 0x000d, + 0x498: 0x000d, 0x499: 0x000d, 0x49a: 0x000d, 0x49b: 0x000d, 0x49c: 0x000d, 0x49d: 0x000d, + 0x49e: 0x000d, 0x49f: 0x000d, 0x4a0: 0x000d, 0x4a1: 0x000d, 0x4a2: 0x000d, 0x4a3: 0x000d, + 0x4a4: 0x000d, 0x4a5: 0x000d, 0x4a6: 0x000d, 0x4a7: 0x000d, 0x4a8: 0x000d, 0x4a9: 0x000d, + 0x4aa: 0x000d, 0x4ab: 0x000d, 0x4ac: 0x000d, 0x4ad: 0x000d, 0x4ae: 0x000d, 0x4af: 0x000d, + 0x4b0: 0x000c, 0x4b1: 0x000c, 0x4b2: 0x000c, 0x4b3: 0x000c, 0x4b4: 0x000c, 0x4b5: 0x000c, + 0x4b6: 0x000c, 0x4b7: 0x000c, 0x4b8: 0x000c, 0x4b9: 0x000c, 0x4ba: 0x000c, 0x4bb: 0x000c, + 0x4bc: 0x000c, 0x4bd: 0x000c, 0x4be: 0x000c, 0x4bf: 0x000c, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x000c, 0x4c1: 0x000c, 0x4c2: 0x000c, 0x4c3: 0x000c, 0x4c4: 0x000c, 0x4c5: 0x000c, + 0x4c6: 0x000c, 0x4c7: 0x000c, 0x4c8: 0x000c, 0x4c9: 0x000c, 0x4ca: 0x000c, 0x4cb: 0x000d, + 0x4cc: 0x000d, 0x4cd: 0x000d, 0x4ce: 0x000d, 0x4cf: 0x000d, 0x4d0: 0x000d, 0x4d1: 0x000d, + 0x4d2: 0x000d, 0x4d3: 0x000d, 0x4d4: 0x000d, 0x4d5: 0x000d, 0x4d6: 0x000d, 0x4d7: 0x000d, + 0x4d8: 0x000d, 0x4d9: 0x000d, 0x4da: 0x000d, 0x4db: 0x000d, 0x4dc: 0x000d, 0x4dd: 0x000d, + 0x4de: 0x000d, 0x4df: 0x000d, 0x4e0: 0x000d, 0x4e1: 0x000d, 0x4e2: 0x000d, 0x4e3: 0x000d, + 0x4e4: 0x000d, 0x4e5: 0x000d, 0x4e6: 0x000d, 0x4e7: 0x000d, 0x4e8: 0x000d, 0x4e9: 0x000d, + 0x4ea: 0x000d, 0x4eb: 0x000d, 0x4ec: 0x000d, 0x4ed: 0x000d, 0x4ee: 0x000d, 0x4ef: 0x000d, + 0x4f0: 0x000d, 0x4f1: 0x000d, 0x4f2: 0x000d, 0x4f3: 0x000d, 0x4f4: 0x000d, 0x4f5: 0x000d, + 0x4f6: 0x000d, 0x4f7: 0x000d, 0x4f8: 0x000d, 0x4f9: 0x000d, 0x4fa: 0x000d, 0x4fb: 0x000d, + 0x4fc: 0x000d, 0x4fd: 0x000d, 0x4fe: 0x000d, 0x4ff: 0x000d, + // Block 0x14, offset 0x500 + 0x500: 0x000d, 0x501: 0x000d, 0x502: 0x000d, 0x503: 0x000d, 0x504: 0x000d, 0x505: 0x000d, + 0x506: 0x000d, 0x507: 0x000d, 0x508: 0x000d, 0x509: 0x000d, 0x50a: 0x000d, 0x50b: 0x000d, + 0x50c: 0x000d, 0x50d: 0x000d, 0x50e: 0x000d, 0x50f: 0x000d, 0x510: 0x000d, 0x511: 0x000d, + 0x512: 0x000d, 0x513: 0x000d, 0x514: 0x000d, 0x515: 0x000d, 0x516: 0x000d, 0x517: 0x000d, + 0x518: 0x000d, 0x519: 0x000d, 0x51a: 0x000d, 0x51b: 0x000d, 0x51c: 0x000d, 0x51d: 0x000d, + 0x51e: 0x000d, 0x51f: 0x000d, 0x520: 0x000d, 0x521: 0x000d, 0x522: 0x000d, 0x523: 0x000d, + 0x524: 0x000d, 0x525: 0x000d, 0x526: 0x000c, 0x527: 0x000c, 0x528: 0x000c, 0x529: 0x000c, + 0x52a: 0x000c, 0x52b: 0x000c, 0x52c: 0x000c, 0x52d: 0x000c, 0x52e: 0x000c, 0x52f: 0x000c, + 0x530: 0x000c, 0x531: 0x000d, 0x532: 0x000d, 0x533: 0x000d, 0x534: 0x000d, 0x535: 0x000d, + 0x536: 0x000d, 0x537: 0x000d, 0x538: 0x000d, 0x539: 0x000d, 0x53a: 0x000d, 0x53b: 0x000d, + 0x53c: 0x000d, 0x53d: 0x000d, 0x53e: 0x000d, 0x53f: 0x000d, + // Block 0x15, offset 0x540 + 0x540: 0x0001, 0x541: 0x0001, 0x542: 0x0001, 0x543: 0x0001, 0x544: 0x0001, 0x545: 0x0001, + 0x546: 0x0001, 0x547: 0x0001, 0x548: 0x0001, 0x549: 0x0001, 0x54a: 0x0001, 0x54b: 0x0001, + 0x54c: 0x0001, 0x54d: 0x0001, 0x54e: 0x0001, 0x54f: 0x0001, 0x550: 0x0001, 0x551: 0x0001, + 0x552: 0x0001, 0x553: 0x0001, 0x554: 0x0001, 0x555: 0x0001, 0x556: 0x0001, 0x557: 0x0001, + 0x558: 0x0001, 0x559: 0x0001, 0x55a: 0x0001, 0x55b: 0x0001, 0x55c: 0x0001, 0x55d: 0x0001, + 0x55e: 0x0001, 0x55f: 0x0001, 0x560: 0x0001, 0x561: 0x0001, 0x562: 0x0001, 0x563: 0x0001, + 0x564: 0x0001, 0x565: 0x0001, 0x566: 0x0001, 0x567: 0x0001, 0x568: 0x0001, 0x569: 0x0001, + 0x56a: 0x0001, 0x56b: 0x000c, 0x56c: 0x000c, 0x56d: 0x000c, 0x56e: 0x000c, 0x56f: 0x000c, + 0x570: 0x000c, 0x571: 0x000c, 0x572: 0x000c, 0x573: 0x000c, 0x574: 0x0001, 0x575: 0x0001, + 0x576: 0x000a, 0x577: 0x000a, 0x578: 0x000a, 0x579: 0x000a, 0x57a: 0x0001, 0x57b: 0x0001, + 0x57c: 0x0001, 0x57d: 0x0001, 0x57e: 0x0001, 0x57f: 0x0001, + // Block 0x16, offset 0x580 + 0x580: 0x0001, 0x581: 0x0001, 0x582: 0x0001, 0x583: 0x0001, 0x584: 0x0001, 0x585: 0x0001, + 0x586: 0x0001, 0x587: 0x0001, 0x588: 0x0001, 0x589: 0x0001, 0x58a: 0x0001, 0x58b: 0x0001, + 0x58c: 0x0001, 0x58d: 0x0001, 0x58e: 0x0001, 0x58f: 0x0001, 0x590: 0x0001, 0x591: 0x0001, + 0x592: 0x0001, 0x593: 0x0001, 0x594: 0x0001, 0x595: 0x0001, 0x596: 0x000c, 0x597: 0x000c, + 0x598: 0x000c, 0x599: 0x000c, 0x59a: 0x0001, 0x59b: 0x000c, 0x59c: 0x000c, 0x59d: 0x000c, + 0x59e: 0x000c, 0x59f: 0x000c, 0x5a0: 0x000c, 0x5a1: 0x000c, 0x5a2: 0x000c, 0x5a3: 0x000c, + 0x5a4: 0x0001, 0x5a5: 0x000c, 0x5a6: 0x000c, 0x5a7: 0x000c, 0x5a8: 0x0001, 0x5a9: 0x000c, + 0x5aa: 0x000c, 0x5ab: 0x000c, 0x5ac: 0x000c, 0x5ad: 0x000c, 0x5ae: 0x0001, 0x5af: 0x0001, + 0x5b0: 0x0001, 0x5b1: 0x0001, 0x5b2: 0x0001, 0x5b3: 0x0001, 0x5b4: 0x0001, 0x5b5: 0x0001, + 0x5b6: 0x0001, 0x5b7: 0x0001, 0x5b8: 0x0001, 0x5b9: 0x0001, 0x5ba: 0x0001, 0x5bb: 0x0001, + 0x5bc: 0x0001, 0x5bd: 0x0001, 0x5be: 0x0001, 0x5bf: 0x0001, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0001, 0x5c1: 0x0001, 0x5c2: 0x0001, 0x5c3: 0x0001, 0x5c4: 0x0001, 0x5c5: 0x0001, + 0x5c6: 0x0001, 0x5c7: 0x0001, 0x5c8: 0x0001, 0x5c9: 0x0001, 0x5ca: 0x0001, 0x5cb: 0x0001, + 0x5cc: 0x0001, 0x5cd: 0x0001, 0x5ce: 0x0001, 0x5cf: 0x0001, 0x5d0: 0x0001, 0x5d1: 0x0001, + 0x5d2: 0x0001, 0x5d3: 0x0001, 0x5d4: 0x0001, 0x5d5: 0x0001, 0x5d6: 0x0001, 0x5d7: 0x0001, + 0x5d8: 0x0001, 0x5d9: 0x000c, 0x5da: 0x000c, 0x5db: 0x000c, 0x5dc: 0x0001, 0x5dd: 0x0001, + 0x5de: 0x0001, 0x5df: 0x0001, 0x5e0: 0x000d, 0x5e1: 0x000d, 0x5e2: 0x000d, 0x5e3: 0x000d, + 0x5e4: 0x000d, 0x5e5: 0x000d, 0x5e6: 0x000d, 0x5e7: 0x000d, 0x5e8: 0x000d, 0x5e9: 0x000d, + 0x5ea: 0x000d, 0x5eb: 0x000d, 0x5ec: 0x000d, 0x5ed: 0x000d, 0x5ee: 0x000d, 0x5ef: 0x000d, + 0x5f0: 0x0001, 0x5f1: 0x0001, 0x5f2: 0x0001, 0x5f3: 0x0001, 0x5f4: 0x0001, 0x5f5: 0x0001, + 0x5f6: 0x0001, 0x5f7: 0x0001, 0x5f8: 0x0001, 0x5f9: 0x0001, 0x5fa: 0x0001, 0x5fb: 0x0001, + 0x5fc: 0x0001, 0x5fd: 0x0001, 0x5fe: 0x0001, 0x5ff: 0x0001, + // Block 0x18, offset 0x600 + 0x600: 0x0001, 0x601: 0x0001, 0x602: 0x0001, 0x603: 0x0001, 0x604: 0x0001, 0x605: 0x0001, + 0x606: 0x0001, 0x607: 0x0001, 0x608: 0x0001, 0x609: 0x0001, 0x60a: 0x0001, 0x60b: 0x0001, + 0x60c: 0x0001, 0x60d: 0x0001, 0x60e: 0x0001, 0x60f: 0x0001, 0x610: 0x0001, 0x611: 0x0001, + 0x612: 0x0001, 0x613: 0x0001, 0x614: 0x0001, 0x615: 0x0001, 0x616: 0x0001, 0x617: 0x0001, + 0x618: 0x0001, 0x619: 0x0001, 0x61a: 0x0001, 0x61b: 0x0001, 0x61c: 0x0001, 0x61d: 0x0001, + 0x61e: 0x0001, 0x61f: 0x0001, 0x620: 0x000d, 0x621: 0x000d, 0x622: 0x000d, 0x623: 0x000d, + 0x624: 0x000d, 0x625: 0x000d, 0x626: 0x000d, 0x627: 0x000d, 0x628: 0x000d, 0x629: 0x000d, + 0x62a: 0x000d, 0x62b: 0x000d, 0x62c: 0x000d, 0x62d: 0x000d, 0x62e: 0x000d, 0x62f: 0x000d, + 0x630: 0x000d, 0x631: 0x000d, 0x632: 0x000d, 0x633: 0x000d, 0x634: 0x000d, 0x635: 0x000d, + 0x636: 0x000d, 0x637: 0x000d, 0x638: 0x000d, 0x639: 0x000d, 0x63a: 0x000d, 0x63b: 0x000d, + 0x63c: 0x000d, 0x63d: 0x000d, 0x63e: 0x000d, 0x63f: 0x000d, + // Block 0x19, offset 0x640 + 0x640: 0x000d, 0x641: 0x000d, 0x642: 0x000d, 0x643: 0x000d, 0x644: 0x000d, 0x645: 0x000d, + 0x646: 0x000d, 0x647: 0x000d, 0x648: 0x000d, 0x649: 0x000d, 0x64a: 0x000d, 0x64b: 0x000d, + 0x64c: 0x000d, 0x64d: 0x000d, 0x64e: 0x000d, 0x64f: 0x000d, 0x650: 0x000d, 0x651: 0x000d, + 0x652: 0x000d, 0x653: 0x000d, 0x654: 0x000c, 0x655: 0x000c, 0x656: 0x000c, 0x657: 0x000c, + 0x658: 0x000c, 0x659: 0x000c, 0x65a: 0x000c, 0x65b: 0x000c, 0x65c: 0x000c, 0x65d: 0x000c, + 0x65e: 0x000c, 0x65f: 0x000c, 0x660: 0x000c, 0x661: 0x000c, 0x662: 0x0005, 0x663: 0x000c, + 0x664: 0x000c, 0x665: 0x000c, 0x666: 0x000c, 0x667: 0x000c, 0x668: 0x000c, 0x669: 0x000c, + 0x66a: 0x000c, 0x66b: 0x000c, 0x66c: 0x000c, 0x66d: 0x000c, 0x66e: 0x000c, 0x66f: 0x000c, + 0x670: 0x000c, 0x671: 0x000c, 0x672: 0x000c, 0x673: 0x000c, 0x674: 0x000c, 0x675: 0x000c, + 0x676: 0x000c, 0x677: 0x000c, 0x678: 0x000c, 0x679: 0x000c, 0x67a: 0x000c, 0x67b: 0x000c, + 0x67c: 0x000c, 0x67d: 0x000c, 0x67e: 0x000c, 0x67f: 0x000c, + // Block 0x1a, offset 0x680 + 0x680: 0x000c, 0x681: 0x000c, 0x682: 0x000c, + 0x6ba: 0x000c, + 0x6bc: 0x000c, + // Block 0x1b, offset 0x6c0 + 0x6c1: 0x000c, 0x6c2: 0x000c, 0x6c3: 0x000c, 0x6c4: 0x000c, 0x6c5: 0x000c, + 0x6c6: 0x000c, 0x6c7: 0x000c, 0x6c8: 0x000c, + 0x6cd: 0x000c, 0x6d1: 0x000c, + 0x6d2: 0x000c, 0x6d3: 0x000c, 0x6d4: 0x000c, 0x6d5: 0x000c, 0x6d6: 0x000c, 0x6d7: 0x000c, + 0x6e2: 0x000c, 0x6e3: 0x000c, + // Block 0x1c, offset 0x700 + 0x701: 0x000c, + 0x73c: 0x000c, + // Block 0x1d, offset 0x740 + 0x741: 0x000c, 0x742: 0x000c, 0x743: 0x000c, 0x744: 0x000c, + 0x74d: 0x000c, + 0x762: 0x000c, 0x763: 0x000c, + 0x772: 0x0004, 0x773: 0x0004, + 0x77b: 0x0004, + // Block 0x1e, offset 0x780 + 0x781: 0x000c, 0x782: 0x000c, + 0x7bc: 0x000c, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x000c, 0x7c2: 0x000c, + 0x7c7: 0x000c, 0x7c8: 0x000c, 0x7cb: 0x000c, + 0x7cc: 0x000c, 0x7cd: 0x000c, 0x7d1: 0x000c, + 0x7f0: 0x000c, 0x7f1: 0x000c, 0x7f5: 0x000c, + // Block 0x20, offset 0x800 + 0x801: 0x000c, 0x802: 0x000c, 0x803: 0x000c, 0x804: 0x000c, 0x805: 0x000c, + 0x807: 0x000c, 0x808: 0x000c, + 0x80d: 0x000c, + 0x822: 0x000c, 0x823: 0x000c, + 0x831: 0x0004, + 0x83a: 0x000c, 0x83b: 0x000c, + 0x83c: 0x000c, 0x83d: 0x000c, 0x83e: 0x000c, 0x83f: 0x000c, + // Block 0x21, offset 0x840 + 0x841: 0x000c, + 0x87c: 0x000c, 0x87f: 0x000c, + // Block 0x22, offset 0x880 + 0x881: 0x000c, 0x882: 0x000c, 0x883: 0x000c, 0x884: 0x000c, + 0x88d: 0x000c, + 0x896: 0x000c, + 0x8a2: 0x000c, 0x8a3: 0x000c, + // Block 0x23, offset 0x8c0 + 0x8c2: 0x000c, + // Block 0x24, offset 0x900 + 0x900: 0x000c, + 0x90d: 0x000c, + 0x933: 0x000a, 0x934: 0x000a, 0x935: 0x000a, + 0x936: 0x000a, 0x937: 0x000a, 0x938: 0x000a, 0x939: 0x0004, 0x93a: 0x000a, + // Block 0x25, offset 0x940 + 0x940: 0x000c, + 0x97e: 0x000c, 0x97f: 0x000c, + // Block 0x26, offset 0x980 + 0x980: 0x000c, + 0x986: 0x000c, 0x987: 0x000c, 0x988: 0x000c, 0x98a: 0x000c, 0x98b: 0x000c, + 0x98c: 0x000c, 0x98d: 0x000c, + 0x995: 0x000c, 0x996: 0x000c, + 0x9a2: 0x000c, 0x9a3: 0x000c, + 0x9b8: 0x000a, 0x9b9: 0x000a, 0x9ba: 0x000a, 0x9bb: 0x000a, + 0x9bc: 0x000a, 0x9bd: 0x000a, 0x9be: 0x000a, + // Block 0x27, offset 0x9c0 + 0x9cc: 0x000c, 0x9cd: 0x000c, + 0x9e2: 0x000c, 0x9e3: 0x000c, + // Block 0x28, offset 0xa00 + 0xa00: 0x000c, 0xa01: 0x000c, + 0xa3b: 0x000c, + 0xa3c: 0x000c, + // Block 0x29, offset 0xa40 + 0xa41: 0x000c, 0xa42: 0x000c, 0xa43: 0x000c, 0xa44: 0x000c, + 0xa4d: 0x000c, + 0xa62: 0x000c, 0xa63: 0x000c, + // Block 0x2a, offset 0xa80 + 0xa8a: 0x000c, + 0xa92: 0x000c, 0xa93: 0x000c, 0xa94: 0x000c, 0xa96: 0x000c, + // Block 0x2b, offset 0xac0 + 0xaf1: 0x000c, 0xaf4: 0x000c, 0xaf5: 0x000c, + 0xaf6: 0x000c, 0xaf7: 0x000c, 0xaf8: 0x000c, 0xaf9: 0x000c, 0xafa: 0x000c, + 0xaff: 0x0004, + // Block 0x2c, offset 0xb00 + 0xb07: 0x000c, 0xb08: 0x000c, 0xb09: 0x000c, 0xb0a: 0x000c, 0xb0b: 0x000c, + 0xb0c: 0x000c, 0xb0d: 0x000c, 0xb0e: 0x000c, + // Block 0x2d, offset 0xb40 + 0xb71: 0x000c, 0xb74: 0x000c, 0xb75: 0x000c, + 0xb76: 0x000c, 0xb77: 0x000c, 0xb78: 0x000c, 0xb79: 0x000c, 0xb7b: 0x000c, + 0xb7c: 0x000c, + // Block 0x2e, offset 0xb80 + 0xb88: 0x000c, 0xb89: 0x000c, 0xb8a: 0x000c, 0xb8b: 0x000c, + 0xb8c: 0x000c, 0xb8d: 0x000c, + // Block 0x2f, offset 0xbc0 + 0xbd8: 0x000c, 0xbd9: 0x000c, + 0xbf5: 0x000c, + 0xbf7: 0x000c, 0xbf9: 0x000c, 0xbfa: 0x003a, 0xbfb: 0x002a, + 0xbfc: 0x003a, 0xbfd: 0x002a, + // Block 0x30, offset 0xc00 + 0xc31: 0x000c, 0xc32: 0x000c, 0xc33: 0x000c, 0xc34: 0x000c, 0xc35: 0x000c, + 0xc36: 0x000c, 0xc37: 0x000c, 0xc38: 0x000c, 0xc39: 0x000c, 0xc3a: 0x000c, 0xc3b: 0x000c, + 0xc3c: 0x000c, 0xc3d: 0x000c, 0xc3e: 0x000c, + // Block 0x31, offset 0xc40 + 0xc40: 0x000c, 0xc41: 0x000c, 0xc42: 0x000c, 0xc43: 0x000c, 0xc44: 0x000c, + 0xc46: 0x000c, 0xc47: 0x000c, + 0xc4d: 0x000c, 0xc4e: 0x000c, 0xc4f: 0x000c, 0xc50: 0x000c, 0xc51: 0x000c, + 0xc52: 0x000c, 0xc53: 0x000c, 0xc54: 0x000c, 0xc55: 0x000c, 0xc56: 0x000c, 0xc57: 0x000c, + 0xc59: 0x000c, 0xc5a: 0x000c, 0xc5b: 0x000c, 0xc5c: 0x000c, 0xc5d: 0x000c, + 0xc5e: 0x000c, 0xc5f: 0x000c, 0xc60: 0x000c, 0xc61: 0x000c, 0xc62: 0x000c, 0xc63: 0x000c, + 0xc64: 0x000c, 0xc65: 0x000c, 0xc66: 0x000c, 0xc67: 0x000c, 0xc68: 0x000c, 0xc69: 0x000c, + 0xc6a: 0x000c, 0xc6b: 0x000c, 0xc6c: 0x000c, 0xc6d: 0x000c, 0xc6e: 0x000c, 0xc6f: 0x000c, + 0xc70: 0x000c, 0xc71: 0x000c, 0xc72: 0x000c, 0xc73: 0x000c, 0xc74: 0x000c, 0xc75: 0x000c, + 0xc76: 0x000c, 0xc77: 0x000c, 0xc78: 0x000c, 0xc79: 0x000c, 0xc7a: 0x000c, 0xc7b: 0x000c, + 0xc7c: 0x000c, + // Block 0x32, offset 0xc80 + 0xc86: 0x000c, + // Block 0x33, offset 0xcc0 + 0xced: 0x000c, 0xcee: 0x000c, 0xcef: 0x000c, + 0xcf0: 0x000c, 0xcf2: 0x000c, 0xcf3: 0x000c, 0xcf4: 0x000c, 0xcf5: 0x000c, + 0xcf6: 0x000c, 0xcf7: 0x000c, 0xcf9: 0x000c, 0xcfa: 0x000c, + 0xcfd: 0x000c, 0xcfe: 0x000c, + // Block 0x34, offset 0xd00 + 0xd18: 0x000c, 0xd19: 0x000c, + 0xd1e: 0x000c, 0xd1f: 0x000c, 0xd20: 0x000c, + 0xd31: 0x000c, 0xd32: 0x000c, 0xd33: 0x000c, 0xd34: 0x000c, + // Block 0x35, offset 0xd40 + 0xd42: 0x000c, 0xd45: 0x000c, + 0xd46: 0x000c, + 0xd4d: 0x000c, + 0xd5d: 0x000c, + // Block 0x36, offset 0xd80 + 0xd9d: 0x000c, + 0xd9e: 0x000c, 0xd9f: 0x000c, + // Block 0x37, offset 0xdc0 + 0xdd0: 0x000a, 0xdd1: 0x000a, + 0xdd2: 0x000a, 0xdd3: 0x000a, 0xdd4: 0x000a, 0xdd5: 0x000a, 0xdd6: 0x000a, 0xdd7: 0x000a, + 0xdd8: 0x000a, 0xdd9: 0x000a, + // Block 0x38, offset 0xe00 + 0xe00: 0x000a, + // Block 0x39, offset 0xe40 + 0xe40: 0x0009, + 0xe5b: 0x007a, 0xe5c: 0x006a, + // Block 0x3a, offset 0xe80 + 0xe92: 0x000c, 0xe93: 0x000c, 0xe94: 0x000c, + 0xeb2: 0x000c, 0xeb3: 0x000c, 0xeb4: 0x000c, + // Block 0x3b, offset 0xec0 + 0xed2: 0x000c, 0xed3: 0x000c, + 0xef2: 0x000c, 0xef3: 0x000c, + // Block 0x3c, offset 0xf00 + 0xf34: 0x000c, 0xf35: 0x000c, + 0xf37: 0x000c, 0xf38: 0x000c, 0xf39: 0x000c, 0xf3a: 0x000c, 0xf3b: 0x000c, + 0xf3c: 0x000c, 0xf3d: 0x000c, + // Block 0x3d, offset 0xf40 + 0xf46: 0x000c, 0xf49: 0x000c, 0xf4a: 0x000c, 0xf4b: 0x000c, + 0xf4c: 0x000c, 0xf4d: 0x000c, 0xf4e: 0x000c, 0xf4f: 0x000c, 0xf50: 0x000c, 0xf51: 0x000c, + 0xf52: 0x000c, 0xf53: 0x000c, + 0xf5b: 0x0004, 0xf5d: 0x000c, + 0xf70: 0x000a, 0xf71: 0x000a, 0xf72: 0x000a, 0xf73: 0x000a, 0xf74: 0x000a, 0xf75: 0x000a, + 0xf76: 0x000a, 0xf77: 0x000a, 0xf78: 0x000a, 0xf79: 0x000a, + // Block 0x3e, offset 0xf80 + 0xf80: 0x000a, 0xf81: 0x000a, 0xf82: 0x000a, 0xf83: 0x000a, 0xf84: 0x000a, 0xf85: 0x000a, + 0xf86: 0x000a, 0xf87: 0x000a, 0xf88: 0x000a, 0xf89: 0x000a, 0xf8a: 0x000a, 0xf8b: 0x000c, + 0xf8c: 0x000c, 0xf8d: 0x000c, 0xf8e: 0x000b, + // Block 0x3f, offset 0xfc0 + 0xfc5: 0x000c, + 0xfc6: 0x000c, + 0xfe9: 0x000c, + // Block 0x40, offset 0x1000 + 0x1020: 0x000c, 0x1021: 0x000c, 0x1022: 0x000c, + 0x1027: 0x000c, 0x1028: 0x000c, + 0x1032: 0x000c, + 0x1039: 0x000c, 0x103a: 0x000c, 0x103b: 0x000c, + // Block 0x41, offset 0x1040 + 0x1040: 0x000a, 0x1044: 0x000a, 0x1045: 0x000a, + // Block 0x42, offset 0x1080 + 0x109e: 0x000a, 0x109f: 0x000a, 0x10a0: 0x000a, 0x10a1: 0x000a, 0x10a2: 0x000a, 0x10a3: 0x000a, + 0x10a4: 0x000a, 0x10a5: 0x000a, 0x10a6: 0x000a, 0x10a7: 0x000a, 0x10a8: 0x000a, 0x10a9: 0x000a, + 0x10aa: 0x000a, 0x10ab: 0x000a, 0x10ac: 0x000a, 0x10ad: 0x000a, 0x10ae: 0x000a, 0x10af: 0x000a, + 0x10b0: 0x000a, 0x10b1: 0x000a, 0x10b2: 0x000a, 0x10b3: 0x000a, 0x10b4: 0x000a, 0x10b5: 0x000a, + 0x10b6: 0x000a, 0x10b7: 0x000a, 0x10b8: 0x000a, 0x10b9: 0x000a, 0x10ba: 0x000a, 0x10bb: 0x000a, + 0x10bc: 0x000a, 0x10bd: 0x000a, 0x10be: 0x000a, 0x10bf: 0x000a, + // Block 0x43, offset 0x10c0 + 0x10d7: 0x000c, + 0x10d8: 0x000c, 0x10db: 0x000c, + // Block 0x44, offset 0x1100 + 0x1116: 0x000c, + 0x1118: 0x000c, 0x1119: 0x000c, 0x111a: 0x000c, 0x111b: 0x000c, 0x111c: 0x000c, 0x111d: 0x000c, + 0x111e: 0x000c, 0x1120: 0x000c, 0x1122: 0x000c, + 0x1125: 0x000c, 0x1126: 0x000c, 0x1127: 0x000c, 0x1128: 0x000c, 0x1129: 0x000c, + 0x112a: 0x000c, 0x112b: 0x000c, 0x112c: 0x000c, + 0x1133: 0x000c, 0x1134: 0x000c, 0x1135: 0x000c, + 0x1136: 0x000c, 0x1137: 0x000c, 0x1138: 0x000c, 0x1139: 0x000c, 0x113a: 0x000c, 0x113b: 0x000c, + 0x113c: 0x000c, 0x113f: 0x000c, + // Block 0x45, offset 0x1140 + 0x1170: 0x000c, 0x1171: 0x000c, 0x1172: 0x000c, 0x1173: 0x000c, 0x1174: 0x000c, 0x1175: 0x000c, + 0x1176: 0x000c, 0x1177: 0x000c, 0x1178: 0x000c, 0x1179: 0x000c, 0x117a: 0x000c, 0x117b: 0x000c, + 0x117c: 0x000c, 0x117d: 0x000c, 0x117e: 0x000c, + // Block 0x46, offset 0x1180 + 0x1180: 0x000c, 0x1181: 0x000c, 0x1182: 0x000c, 0x1183: 0x000c, + 0x11b4: 0x000c, + 0x11b6: 0x000c, 0x11b7: 0x000c, 0x11b8: 0x000c, 0x11b9: 0x000c, 0x11ba: 0x000c, + 0x11bc: 0x000c, + // Block 0x47, offset 0x11c0 + 0x11c2: 0x000c, + 0x11eb: 0x000c, 0x11ec: 0x000c, 0x11ed: 0x000c, 0x11ee: 0x000c, 0x11ef: 0x000c, + 0x11f0: 0x000c, 0x11f1: 0x000c, 0x11f2: 0x000c, 0x11f3: 0x000c, + // Block 0x48, offset 0x1200 + 0x1200: 0x000c, 0x1201: 0x000c, + 0x1222: 0x000c, 0x1223: 0x000c, + 0x1224: 0x000c, 0x1225: 0x000c, 0x1228: 0x000c, 0x1229: 0x000c, + 0x122b: 0x000c, 0x122c: 0x000c, 0x122d: 0x000c, + // Block 0x49, offset 0x1240 + 0x1266: 0x000c, 0x1268: 0x000c, 0x1269: 0x000c, + 0x126d: 0x000c, 0x126f: 0x000c, + 0x1270: 0x000c, 0x1271: 0x000c, + // Block 0x4a, offset 0x1280 + 0x12ac: 0x000c, 0x12ad: 0x000c, 0x12ae: 0x000c, 0x12af: 0x000c, + 0x12b0: 0x000c, 0x12b1: 0x000c, 0x12b2: 0x000c, 0x12b3: 0x000c, + 0x12b6: 0x000c, 0x12b7: 0x000c, + // Block 0x4b, offset 0x12c0 + 0x12d0: 0x000c, 0x12d1: 0x000c, + 0x12d2: 0x000c, 0x12d4: 0x000c, 0x12d5: 0x000c, 0x12d6: 0x000c, 0x12d7: 0x000c, + 0x12d8: 0x000c, 0x12d9: 0x000c, 0x12da: 0x000c, 0x12db: 0x000c, 0x12dc: 0x000c, 0x12dd: 0x000c, + 0x12de: 0x000c, 0x12df: 0x000c, 0x12e0: 0x000c, 0x12e2: 0x000c, 0x12e3: 0x000c, + 0x12e4: 0x000c, 0x12e5: 0x000c, 0x12e6: 0x000c, 0x12e7: 0x000c, 0x12e8: 0x000c, + 0x12ed: 0x000c, + 0x12f4: 0x000c, + 0x12f8: 0x000c, 0x12f9: 0x000c, + // Block 0x4c, offset 0x1300 + 0x1300: 0x000c, 0x1301: 0x000c, 0x1302: 0x000c, 0x1303: 0x000c, 0x1304: 0x000c, 0x1305: 0x000c, + 0x1306: 0x000c, 0x1307: 0x000c, 0x1308: 0x000c, 0x1309: 0x000c, 0x130a: 0x000c, 0x130b: 0x000c, + 0x130c: 0x000c, 0x130d: 0x000c, 0x130e: 0x000c, 0x130f: 0x000c, 0x1310: 0x000c, 0x1311: 0x000c, + 0x1312: 0x000c, 0x1313: 0x000c, 0x1314: 0x000c, 0x1315: 0x000c, 0x1316: 0x000c, 0x1317: 0x000c, + 0x1318: 0x000c, 0x1319: 0x000c, 0x131a: 0x000c, 0x131b: 0x000c, 0x131c: 0x000c, 0x131d: 0x000c, + 0x131e: 0x000c, 0x131f: 0x000c, 0x1320: 0x000c, 0x1321: 0x000c, 0x1322: 0x000c, 0x1323: 0x000c, + 0x1324: 0x000c, 0x1325: 0x000c, 0x1326: 0x000c, 0x1327: 0x000c, 0x1328: 0x000c, 0x1329: 0x000c, + 0x132a: 0x000c, 0x132b: 0x000c, 0x132c: 0x000c, 0x132d: 0x000c, 0x132e: 0x000c, 0x132f: 0x000c, + 0x1330: 0x000c, 0x1331: 0x000c, 0x1332: 0x000c, 0x1333: 0x000c, 0x1334: 0x000c, 0x1335: 0x000c, + 0x1336: 0x000c, 0x1337: 0x000c, 0x1338: 0x000c, 0x1339: 0x000c, 0x133b: 0x000c, + 0x133c: 0x000c, 0x133d: 0x000c, 0x133e: 0x000c, 0x133f: 0x000c, + // Block 0x4d, offset 0x1340 + 0x137d: 0x000a, 0x137f: 0x000a, + // Block 0x4e, offset 0x1380 + 0x1380: 0x000a, 0x1381: 0x000a, + 0x138d: 0x000a, 0x138e: 0x000a, 0x138f: 0x000a, + 0x139d: 0x000a, + 0x139e: 0x000a, 0x139f: 0x000a, + 0x13ad: 0x000a, 0x13ae: 0x000a, 0x13af: 0x000a, + 0x13bd: 0x000a, 0x13be: 0x000a, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x0009, 0x13c1: 0x0009, 0x13c2: 0x0009, 0x13c3: 0x0009, 0x13c4: 0x0009, 0x13c5: 0x0009, + 0x13c6: 0x0009, 0x13c7: 0x0009, 0x13c8: 0x0009, 0x13c9: 0x0009, 0x13ca: 0x0009, 0x13cb: 0x000b, + 0x13cc: 0x000b, 0x13cd: 0x000b, 0x13cf: 0x0001, 0x13d0: 0x000a, 0x13d1: 0x000a, + 0x13d2: 0x000a, 0x13d3: 0x000a, 0x13d4: 0x000a, 0x13d5: 0x000a, 0x13d6: 0x000a, 0x13d7: 0x000a, + 0x13d8: 0x000a, 0x13d9: 0x000a, 0x13da: 0x000a, 0x13db: 0x000a, 0x13dc: 0x000a, 0x13dd: 0x000a, + 0x13de: 0x000a, 0x13df: 0x000a, 0x13e0: 0x000a, 0x13e1: 0x000a, 0x13e2: 0x000a, 0x13e3: 0x000a, + 0x13e4: 0x000a, 0x13e5: 0x000a, 0x13e6: 0x000a, 0x13e7: 0x000a, 0x13e8: 0x0009, 0x13e9: 0x0007, + 0x13ea: 0x000e, 0x13eb: 0x000e, 0x13ec: 0x000e, 0x13ed: 0x000e, 0x13ee: 0x000e, 0x13ef: 0x0006, + 0x13f0: 0x0004, 0x13f1: 0x0004, 0x13f2: 0x0004, 0x13f3: 0x0004, 0x13f4: 0x0004, 0x13f5: 0x000a, + 0x13f6: 0x000a, 0x13f7: 0x000a, 0x13f8: 0x000a, 0x13f9: 0x000a, 0x13fa: 0x000a, 0x13fb: 0x000a, + 0x13fc: 0x000a, 0x13fd: 0x000a, 0x13fe: 0x000a, 0x13ff: 0x000a, + // Block 0x50, offset 0x1400 + 0x1400: 0x000a, 0x1401: 0x000a, 0x1402: 0x000a, 0x1403: 0x000a, 0x1404: 0x0006, 0x1405: 0x009a, + 0x1406: 0x008a, 0x1407: 0x000a, 0x1408: 0x000a, 0x1409: 0x000a, 0x140a: 0x000a, 0x140b: 0x000a, + 0x140c: 0x000a, 0x140d: 0x000a, 0x140e: 0x000a, 0x140f: 0x000a, 0x1410: 0x000a, 0x1411: 0x000a, + 0x1412: 0x000a, 0x1413: 0x000a, 0x1414: 0x000a, 0x1415: 0x000a, 0x1416: 0x000a, 0x1417: 0x000a, + 0x1418: 0x000a, 0x1419: 0x000a, 0x141a: 0x000a, 0x141b: 0x000a, 0x141c: 0x000a, 0x141d: 0x000a, + 0x141e: 0x000a, 0x141f: 0x0009, 0x1420: 0x000b, 0x1421: 0x000b, 0x1422: 0x000b, 0x1423: 0x000b, + 0x1424: 0x000b, 0x1425: 0x000b, 0x1426: 0x000e, 0x1427: 0x000e, 0x1428: 0x000e, 0x1429: 0x000e, + 0x142a: 0x000b, 0x142b: 0x000b, 0x142c: 0x000b, 0x142d: 0x000b, 0x142e: 0x000b, 0x142f: 0x000b, + 0x1430: 0x0002, 0x1434: 0x0002, 0x1435: 0x0002, + 0x1436: 0x0002, 0x1437: 0x0002, 0x1438: 0x0002, 0x1439: 0x0002, 0x143a: 0x0003, 0x143b: 0x0003, + 0x143c: 0x000a, 0x143d: 0x009a, 0x143e: 0x008a, + // Block 0x51, offset 0x1440 + 0x1440: 0x0002, 0x1441: 0x0002, 0x1442: 0x0002, 0x1443: 0x0002, 0x1444: 0x0002, 0x1445: 0x0002, + 0x1446: 0x0002, 0x1447: 0x0002, 0x1448: 0x0002, 0x1449: 0x0002, 0x144a: 0x0003, 0x144b: 0x0003, + 0x144c: 0x000a, 0x144d: 0x009a, 0x144e: 0x008a, + 0x1460: 0x0004, 0x1461: 0x0004, 0x1462: 0x0004, 0x1463: 0x0004, + 0x1464: 0x0004, 0x1465: 0x0004, 0x1466: 0x0004, 0x1467: 0x0004, 0x1468: 0x0004, 0x1469: 0x0004, + 0x146a: 0x0004, 0x146b: 0x0004, 0x146c: 0x0004, 0x146d: 0x0004, 0x146e: 0x0004, 0x146f: 0x0004, + 0x1470: 0x0004, 0x1471: 0x0004, 0x1472: 0x0004, 0x1473: 0x0004, 0x1474: 0x0004, 0x1475: 0x0004, + 0x1476: 0x0004, 0x1477: 0x0004, 0x1478: 0x0004, 0x1479: 0x0004, 0x147a: 0x0004, 0x147b: 0x0004, + 0x147c: 0x0004, 0x147d: 0x0004, 0x147e: 0x0004, 0x147f: 0x0004, + // Block 0x52, offset 0x1480 + 0x1480: 0x0004, 0x1481: 0x0004, 0x1482: 0x0004, 0x1483: 0x0004, 0x1484: 0x0004, 0x1485: 0x0004, + 0x1486: 0x0004, 0x1487: 0x0004, 0x1488: 0x0004, 0x1489: 0x0004, 0x148a: 0x0004, 0x148b: 0x0004, + 0x148c: 0x0004, 0x148d: 0x0004, 0x148e: 0x0004, 0x148f: 0x0004, 0x1490: 0x000c, 0x1491: 0x000c, + 0x1492: 0x000c, 0x1493: 0x000c, 0x1494: 0x000c, 0x1495: 0x000c, 0x1496: 0x000c, 0x1497: 0x000c, + 0x1498: 0x000c, 0x1499: 0x000c, 0x149a: 0x000c, 0x149b: 0x000c, 0x149c: 0x000c, 0x149d: 0x000c, + 0x149e: 0x000c, 0x149f: 0x000c, 0x14a0: 0x000c, 0x14a1: 0x000c, 0x14a2: 0x000c, 0x14a3: 0x000c, + 0x14a4: 0x000c, 0x14a5: 0x000c, 0x14a6: 0x000c, 0x14a7: 0x000c, 0x14a8: 0x000c, 0x14a9: 0x000c, + 0x14aa: 0x000c, 0x14ab: 0x000c, 0x14ac: 0x000c, 0x14ad: 0x000c, 0x14ae: 0x000c, 0x14af: 0x000c, + 0x14b0: 0x000c, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x000a, 0x14c1: 0x000a, 0x14c3: 0x000a, 0x14c4: 0x000a, 0x14c5: 0x000a, + 0x14c6: 0x000a, 0x14c8: 0x000a, 0x14c9: 0x000a, + 0x14d4: 0x000a, 0x14d6: 0x000a, 0x14d7: 0x000a, + 0x14d8: 0x000a, + 0x14de: 0x000a, 0x14df: 0x000a, 0x14e0: 0x000a, 0x14e1: 0x000a, 0x14e2: 0x000a, 0x14e3: 0x000a, + 0x14e5: 0x000a, 0x14e7: 0x000a, 0x14e9: 0x000a, + 0x14ee: 0x0004, + 0x14fa: 0x000a, 0x14fb: 0x000a, + // Block 0x54, offset 0x1500 + 0x1500: 0x000a, 0x1501: 0x000a, 0x1502: 0x000a, 0x1503: 0x000a, 0x1504: 0x000a, + 0x150a: 0x000a, 0x150b: 0x000a, + 0x150c: 0x000a, 0x150d: 0x000a, 0x1510: 0x000a, 0x1511: 0x000a, + 0x1512: 0x000a, 0x1513: 0x000a, 0x1514: 0x000a, 0x1515: 0x000a, 0x1516: 0x000a, 0x1517: 0x000a, + 0x1518: 0x000a, 0x1519: 0x000a, 0x151a: 0x000a, 0x151b: 0x000a, 0x151c: 0x000a, 0x151d: 0x000a, + 0x151e: 0x000a, 0x151f: 0x000a, + // Block 0x55, offset 0x1540 + 0x1549: 0x000a, 0x154a: 0x000a, 0x154b: 0x000a, + 0x1550: 0x000a, 0x1551: 0x000a, + 0x1552: 0x000a, 0x1553: 0x000a, 0x1554: 0x000a, 0x1555: 0x000a, 0x1556: 0x000a, 0x1557: 0x000a, + 0x1558: 0x000a, 0x1559: 0x000a, 0x155a: 0x000a, 0x155b: 0x000a, 0x155c: 0x000a, 0x155d: 0x000a, + 0x155e: 0x000a, 0x155f: 0x000a, 0x1560: 0x000a, 0x1561: 0x000a, 0x1562: 0x000a, 0x1563: 0x000a, + 0x1564: 0x000a, 0x1565: 0x000a, 0x1566: 0x000a, 0x1567: 0x000a, 0x1568: 0x000a, 0x1569: 0x000a, + 0x156a: 0x000a, 0x156b: 0x000a, 0x156c: 0x000a, 0x156d: 0x000a, 0x156e: 0x000a, 0x156f: 0x000a, + 0x1570: 0x000a, 0x1571: 0x000a, 0x1572: 0x000a, 0x1573: 0x000a, 0x1574: 0x000a, 0x1575: 0x000a, + 0x1576: 0x000a, 0x1577: 0x000a, 0x1578: 0x000a, 0x1579: 0x000a, 0x157a: 0x000a, 0x157b: 0x000a, + 0x157c: 0x000a, 0x157d: 0x000a, 0x157e: 0x000a, 0x157f: 0x000a, + // Block 0x56, offset 0x1580 + 0x1580: 0x000a, 0x1581: 0x000a, 0x1582: 0x000a, 0x1583: 0x000a, 0x1584: 0x000a, 0x1585: 0x000a, + 0x1586: 0x000a, 0x1587: 0x000a, 0x1588: 0x000a, 0x1589: 0x000a, 0x158a: 0x000a, 0x158b: 0x000a, + 0x158c: 0x000a, 0x158d: 0x000a, 0x158e: 0x000a, 0x158f: 0x000a, 0x1590: 0x000a, 0x1591: 0x000a, + 0x1592: 0x000a, 0x1593: 0x000a, 0x1594: 0x000a, 0x1595: 0x000a, 0x1596: 0x000a, 0x1597: 0x000a, + 0x1598: 0x000a, 0x1599: 0x000a, 0x159a: 0x000a, 0x159b: 0x000a, 0x159c: 0x000a, 0x159d: 0x000a, + 0x159e: 0x000a, 0x159f: 0x000a, 0x15a0: 0x000a, 0x15a1: 0x000a, 0x15a2: 0x000a, 0x15a3: 0x000a, + 0x15a4: 0x000a, 0x15a5: 0x000a, 0x15a6: 0x000a, 0x15a7: 0x000a, 0x15a8: 0x000a, 0x15a9: 0x000a, + 0x15aa: 0x000a, 0x15ab: 0x000a, 0x15ac: 0x000a, 0x15ad: 0x000a, 0x15ae: 0x000a, 0x15af: 0x000a, + 0x15b0: 0x000a, 0x15b1: 0x000a, 0x15b2: 0x000a, 0x15b3: 0x000a, 0x15b4: 0x000a, 0x15b5: 0x000a, + 0x15b6: 0x000a, 0x15b7: 0x000a, 0x15b8: 0x000a, 0x15b9: 0x000a, 0x15ba: 0x000a, 0x15bb: 0x000a, + 0x15bc: 0x000a, 0x15bd: 0x000a, 0x15be: 0x000a, 0x15bf: 0x000a, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x000a, 0x15c1: 0x000a, 0x15c2: 0x000a, 0x15c3: 0x000a, 0x15c4: 0x000a, 0x15c5: 0x000a, + 0x15c6: 0x000a, 0x15c7: 0x000a, 0x15c8: 0x000a, 0x15c9: 0x000a, 0x15ca: 0x000a, 0x15cb: 0x000a, + 0x15cc: 0x000a, 0x15cd: 0x000a, 0x15ce: 0x000a, 0x15cf: 0x000a, 0x15d0: 0x000a, 0x15d1: 0x000a, + 0x15d2: 0x0003, 0x15d3: 0x0004, 0x15d4: 0x000a, 0x15d5: 0x000a, 0x15d6: 0x000a, 0x15d7: 0x000a, + 0x15d8: 0x000a, 0x15d9: 0x000a, 0x15da: 0x000a, 0x15db: 0x000a, 0x15dc: 0x000a, 0x15dd: 0x000a, + 0x15de: 0x000a, 0x15df: 0x000a, 0x15e0: 0x000a, 0x15e1: 0x000a, 0x15e2: 0x000a, 0x15e3: 0x000a, + 0x15e4: 0x000a, 0x15e5: 0x000a, 0x15e6: 0x000a, 0x15e7: 0x000a, 0x15e8: 0x000a, 0x15e9: 0x000a, + 0x15ea: 0x000a, 0x15eb: 0x000a, 0x15ec: 0x000a, 0x15ed: 0x000a, 0x15ee: 0x000a, 0x15ef: 0x000a, + 0x15f0: 0x000a, 0x15f1: 0x000a, 0x15f2: 0x000a, 0x15f3: 0x000a, 0x15f4: 0x000a, 0x15f5: 0x000a, + 0x15f6: 0x000a, 0x15f7: 0x000a, 0x15f8: 0x000a, 0x15f9: 0x000a, 0x15fa: 0x000a, 0x15fb: 0x000a, + 0x15fc: 0x000a, 0x15fd: 0x000a, 0x15fe: 0x000a, 0x15ff: 0x000a, + // Block 0x58, offset 0x1600 + 0x1600: 0x000a, 0x1601: 0x000a, 0x1602: 0x000a, 0x1603: 0x000a, 0x1604: 0x000a, 0x1605: 0x000a, + 0x1606: 0x000a, 0x1607: 0x000a, 0x1608: 0x003a, 0x1609: 0x002a, 0x160a: 0x003a, 0x160b: 0x002a, + 0x160c: 0x000a, 0x160d: 0x000a, 0x160e: 0x000a, 0x160f: 0x000a, 0x1610: 0x000a, 0x1611: 0x000a, + 0x1612: 0x000a, 0x1613: 0x000a, 0x1614: 0x000a, 0x1615: 0x000a, 0x1616: 0x000a, 0x1617: 0x000a, + 0x1618: 0x000a, 0x1619: 0x000a, 0x161a: 0x000a, 0x161b: 0x000a, 0x161c: 0x000a, 0x161d: 0x000a, + 0x161e: 0x000a, 0x161f: 0x000a, 0x1620: 0x000a, 0x1621: 0x000a, 0x1622: 0x000a, 0x1623: 0x000a, + 0x1624: 0x000a, 0x1625: 0x000a, 0x1626: 0x000a, 0x1627: 0x000a, 0x1628: 0x000a, 0x1629: 0x009a, + 0x162a: 0x008a, 0x162b: 0x000a, 0x162c: 0x000a, 0x162d: 0x000a, 0x162e: 0x000a, 0x162f: 0x000a, + 0x1630: 0x000a, 0x1631: 0x000a, 0x1632: 0x000a, 0x1633: 0x000a, 0x1634: 0x000a, 0x1635: 0x000a, + // Block 0x59, offset 0x1640 + 0x167b: 0x000a, + 0x167c: 0x000a, 0x167d: 0x000a, 0x167e: 0x000a, 0x167f: 0x000a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x000a, 0x1681: 0x000a, 0x1682: 0x000a, 0x1683: 0x000a, 0x1684: 0x000a, 0x1685: 0x000a, + 0x1686: 0x000a, 0x1687: 0x000a, 0x1688: 0x000a, 0x1689: 0x000a, 0x168a: 0x000a, 0x168b: 0x000a, + 0x168c: 0x000a, 0x168d: 0x000a, 0x168e: 0x000a, 0x168f: 0x000a, 0x1690: 0x000a, 0x1691: 0x000a, + 0x1692: 0x000a, 0x1693: 0x000a, 0x1694: 0x000a, 0x1696: 0x000a, 0x1697: 0x000a, + 0x1698: 0x000a, 0x1699: 0x000a, 0x169a: 0x000a, 0x169b: 0x000a, 0x169c: 0x000a, 0x169d: 0x000a, + 0x169e: 0x000a, 0x169f: 0x000a, 0x16a0: 0x000a, 0x16a1: 0x000a, 0x16a2: 0x000a, 0x16a3: 0x000a, + 0x16a4: 0x000a, 0x16a5: 0x000a, 0x16a6: 0x000a, 0x16a7: 0x000a, 0x16a8: 0x000a, 0x16a9: 0x000a, + 0x16aa: 0x000a, 0x16ab: 0x000a, 0x16ac: 0x000a, 0x16ad: 0x000a, 0x16ae: 0x000a, 0x16af: 0x000a, + 0x16b0: 0x000a, 0x16b1: 0x000a, 0x16b2: 0x000a, 0x16b3: 0x000a, 0x16b4: 0x000a, 0x16b5: 0x000a, + 0x16b6: 0x000a, 0x16b7: 0x000a, 0x16b8: 0x000a, 0x16b9: 0x000a, 0x16ba: 0x000a, 0x16bb: 0x000a, + 0x16bc: 0x000a, 0x16bd: 0x000a, 0x16be: 0x000a, 0x16bf: 0x000a, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x000a, 0x16c1: 0x000a, 0x16c2: 0x000a, 0x16c3: 0x000a, 0x16c4: 0x000a, 0x16c5: 0x000a, + 0x16c6: 0x000a, 0x16c7: 0x000a, 0x16c8: 0x000a, 0x16c9: 0x000a, 0x16ca: 0x000a, 0x16cb: 0x000a, + 0x16cc: 0x000a, 0x16cd: 0x000a, 0x16ce: 0x000a, 0x16cf: 0x000a, 0x16d0: 0x000a, 0x16d1: 0x000a, + 0x16d2: 0x000a, 0x16d3: 0x000a, 0x16d4: 0x000a, 0x16d5: 0x000a, 0x16d6: 0x000a, 0x16d7: 0x000a, + 0x16d8: 0x000a, 0x16d9: 0x000a, 0x16da: 0x000a, 0x16db: 0x000a, 0x16dc: 0x000a, 0x16dd: 0x000a, + 0x16de: 0x000a, 0x16df: 0x000a, 0x16e0: 0x000a, 0x16e1: 0x000a, 0x16e2: 0x000a, 0x16e3: 0x000a, + 0x16e4: 0x000a, 0x16e5: 0x000a, 0x16e6: 0x000a, + // Block 0x5c, offset 0x1700 + 0x1700: 0x000a, 0x1701: 0x000a, 0x1702: 0x000a, 0x1703: 0x000a, 0x1704: 0x000a, 0x1705: 0x000a, + 0x1706: 0x000a, 0x1707: 0x000a, 0x1708: 0x000a, 0x1709: 0x000a, 0x170a: 0x000a, + 0x1720: 0x000a, 0x1721: 0x000a, 0x1722: 0x000a, 0x1723: 0x000a, + 0x1724: 0x000a, 0x1725: 0x000a, 0x1726: 0x000a, 0x1727: 0x000a, 0x1728: 0x000a, 0x1729: 0x000a, + 0x172a: 0x000a, 0x172b: 0x000a, 0x172c: 0x000a, 0x172d: 0x000a, 0x172e: 0x000a, 0x172f: 0x000a, + 0x1730: 0x000a, 0x1731: 0x000a, 0x1732: 0x000a, 0x1733: 0x000a, 0x1734: 0x000a, 0x1735: 0x000a, + 0x1736: 0x000a, 0x1737: 0x000a, 0x1738: 0x000a, 0x1739: 0x000a, 0x173a: 0x000a, 0x173b: 0x000a, + 0x173c: 0x000a, 0x173d: 0x000a, 0x173e: 0x000a, 0x173f: 0x000a, + // Block 0x5d, offset 0x1740 + 0x1740: 0x000a, 0x1741: 0x000a, 0x1742: 0x000a, 0x1743: 0x000a, 0x1744: 0x000a, 0x1745: 0x000a, + 0x1746: 0x000a, 0x1747: 0x000a, 0x1748: 0x0002, 0x1749: 0x0002, 0x174a: 0x0002, 0x174b: 0x0002, + 0x174c: 0x0002, 0x174d: 0x0002, 0x174e: 0x0002, 0x174f: 0x0002, 0x1750: 0x0002, 0x1751: 0x0002, + 0x1752: 0x0002, 0x1753: 0x0002, 0x1754: 0x0002, 0x1755: 0x0002, 0x1756: 0x0002, 0x1757: 0x0002, + 0x1758: 0x0002, 0x1759: 0x0002, 0x175a: 0x0002, 0x175b: 0x0002, + // Block 0x5e, offset 0x1780 + 0x17aa: 0x000a, 0x17ab: 0x000a, 0x17ac: 0x000a, 0x17ad: 0x000a, 0x17ae: 0x000a, 0x17af: 0x000a, + 0x17b0: 0x000a, 0x17b1: 0x000a, 0x17b2: 0x000a, 0x17b3: 0x000a, 0x17b4: 0x000a, 0x17b5: 0x000a, + 0x17b6: 0x000a, 0x17b7: 0x000a, 0x17b8: 0x000a, 0x17b9: 0x000a, 0x17ba: 0x000a, 0x17bb: 0x000a, + 0x17bc: 0x000a, 0x17bd: 0x000a, 0x17be: 0x000a, 0x17bf: 0x000a, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x000a, 0x17c1: 0x000a, 0x17c2: 0x000a, 0x17c3: 0x000a, 0x17c4: 0x000a, 0x17c5: 0x000a, + 0x17c6: 0x000a, 0x17c7: 0x000a, 0x17c8: 0x000a, 0x17c9: 0x000a, 0x17ca: 0x000a, 0x17cb: 0x000a, + 0x17cc: 0x000a, 0x17cd: 0x000a, 0x17ce: 0x000a, 0x17cf: 0x000a, 0x17d0: 0x000a, 0x17d1: 0x000a, + 0x17d2: 0x000a, 0x17d3: 0x000a, 0x17d4: 0x000a, 0x17d5: 0x000a, 0x17d6: 0x000a, 0x17d7: 0x000a, + 0x17d8: 0x000a, 0x17d9: 0x000a, 0x17da: 0x000a, 0x17db: 0x000a, 0x17dc: 0x000a, 0x17dd: 0x000a, + 0x17de: 0x000a, 0x17df: 0x000a, 0x17e0: 0x000a, 0x17e1: 0x000a, 0x17e2: 0x000a, 0x17e3: 0x000a, + 0x17e4: 0x000a, 0x17e5: 0x000a, 0x17e6: 0x000a, 0x17e7: 0x000a, 0x17e8: 0x000a, 0x17e9: 0x000a, + 0x17ea: 0x000a, 0x17eb: 0x000a, 0x17ed: 0x000a, 0x17ee: 0x000a, 0x17ef: 0x000a, + 0x17f0: 0x000a, 0x17f1: 0x000a, 0x17f2: 0x000a, 0x17f3: 0x000a, 0x17f4: 0x000a, 0x17f5: 0x000a, + 0x17f6: 0x000a, 0x17f7: 0x000a, 0x17f8: 0x000a, 0x17f9: 0x000a, 0x17fa: 0x000a, 0x17fb: 0x000a, + 0x17fc: 0x000a, 0x17fd: 0x000a, 0x17fe: 0x000a, 0x17ff: 0x000a, + // Block 0x60, offset 0x1800 + 0x1800: 0x000a, 0x1801: 0x000a, 0x1802: 0x000a, 0x1803: 0x000a, 0x1804: 0x000a, 0x1805: 0x000a, + 0x1806: 0x000a, 0x1807: 0x000a, 0x1808: 0x000a, 0x1809: 0x000a, 0x180a: 0x000a, 0x180b: 0x000a, + 0x180c: 0x000a, 0x180d: 0x000a, 0x180e: 0x000a, 0x180f: 0x000a, 0x1810: 0x000a, 0x1811: 0x000a, + 0x1812: 0x000a, 0x1813: 0x000a, 0x1814: 0x000a, 0x1815: 0x000a, 0x1816: 0x000a, 0x1817: 0x000a, + 0x1818: 0x000a, 0x1819: 0x000a, 0x181a: 0x000a, 0x181b: 0x000a, 0x181c: 0x000a, 0x181d: 0x000a, + 0x181e: 0x000a, 0x181f: 0x000a, 0x1820: 0x000a, 0x1821: 0x000a, 0x1822: 0x000a, 0x1823: 0x000a, + 0x1824: 0x000a, 0x1825: 0x000a, 0x1826: 0x000a, 0x1827: 0x000a, 0x1828: 0x003a, 0x1829: 0x002a, + 0x182a: 0x003a, 0x182b: 0x002a, 0x182c: 0x003a, 0x182d: 0x002a, 0x182e: 0x003a, 0x182f: 0x002a, + 0x1830: 0x003a, 0x1831: 0x002a, 0x1832: 0x003a, 0x1833: 0x002a, 0x1834: 0x003a, 0x1835: 0x002a, + 0x1836: 0x000a, 0x1837: 0x000a, 0x1838: 0x000a, 0x1839: 0x000a, 0x183a: 0x000a, 0x183b: 0x000a, + 0x183c: 0x000a, 0x183d: 0x000a, 0x183e: 0x000a, 0x183f: 0x000a, + // Block 0x61, offset 0x1840 + 0x1840: 0x000a, 0x1841: 0x000a, 0x1842: 0x000a, 0x1843: 0x000a, 0x1844: 0x000a, 0x1845: 0x009a, + 0x1846: 0x008a, 0x1847: 0x000a, 0x1848: 0x000a, 0x1849: 0x000a, 0x184a: 0x000a, 0x184b: 0x000a, + 0x184c: 0x000a, 0x184d: 0x000a, 0x184e: 0x000a, 0x184f: 0x000a, 0x1850: 0x000a, 0x1851: 0x000a, + 0x1852: 0x000a, 0x1853: 0x000a, 0x1854: 0x000a, 0x1855: 0x000a, 0x1856: 0x000a, 0x1857: 0x000a, + 0x1858: 0x000a, 0x1859: 0x000a, 0x185a: 0x000a, 0x185b: 0x000a, 0x185c: 0x000a, 0x185d: 0x000a, + 0x185e: 0x000a, 0x185f: 0x000a, 0x1860: 0x000a, 0x1861: 0x000a, 0x1862: 0x000a, 0x1863: 0x000a, + 0x1864: 0x000a, 0x1865: 0x000a, 0x1866: 0x003a, 0x1867: 0x002a, 0x1868: 0x003a, 0x1869: 0x002a, + 0x186a: 0x003a, 0x186b: 0x002a, 0x186c: 0x003a, 0x186d: 0x002a, 0x186e: 0x003a, 0x186f: 0x002a, + 0x1870: 0x000a, 0x1871: 0x000a, 0x1872: 0x000a, 0x1873: 0x000a, 0x1874: 0x000a, 0x1875: 0x000a, + 0x1876: 0x000a, 0x1877: 0x000a, 0x1878: 0x000a, 0x1879: 0x000a, 0x187a: 0x000a, 0x187b: 0x000a, + 0x187c: 0x000a, 0x187d: 0x000a, 0x187e: 0x000a, 0x187f: 0x000a, + // Block 0x62, offset 0x1880 + 0x1880: 0x000a, 0x1881: 0x000a, 0x1882: 0x000a, 0x1883: 0x007a, 0x1884: 0x006a, 0x1885: 0x009a, + 0x1886: 0x008a, 0x1887: 0x00ba, 0x1888: 0x00aa, 0x1889: 0x009a, 0x188a: 0x008a, 0x188b: 0x007a, + 0x188c: 0x006a, 0x188d: 0x00da, 0x188e: 0x002a, 0x188f: 0x003a, 0x1890: 0x00ca, 0x1891: 0x009a, + 0x1892: 0x008a, 0x1893: 0x007a, 0x1894: 0x006a, 0x1895: 0x009a, 0x1896: 0x008a, 0x1897: 0x00ba, + 0x1898: 0x00aa, 0x1899: 0x000a, 0x189a: 0x000a, 0x189b: 0x000a, 0x189c: 0x000a, 0x189d: 0x000a, + 0x189e: 0x000a, 0x189f: 0x000a, 0x18a0: 0x000a, 0x18a1: 0x000a, 0x18a2: 0x000a, 0x18a3: 0x000a, + 0x18a4: 0x000a, 0x18a5: 0x000a, 0x18a6: 0x000a, 0x18a7: 0x000a, 0x18a8: 0x000a, 0x18a9: 0x000a, + 0x18aa: 0x000a, 0x18ab: 0x000a, 0x18ac: 0x000a, 0x18ad: 0x000a, 0x18ae: 0x000a, 0x18af: 0x000a, + 0x18b0: 0x000a, 0x18b1: 0x000a, 0x18b2: 0x000a, 0x18b3: 0x000a, 0x18b4: 0x000a, 0x18b5: 0x000a, + 0x18b6: 0x000a, 0x18b7: 0x000a, 0x18b8: 0x000a, 0x18b9: 0x000a, 0x18ba: 0x000a, 0x18bb: 0x000a, + 0x18bc: 0x000a, 0x18bd: 0x000a, 0x18be: 0x000a, 0x18bf: 0x000a, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x000a, 0x18c1: 0x000a, 0x18c2: 0x000a, 0x18c3: 0x000a, 0x18c4: 0x000a, 0x18c5: 0x000a, + 0x18c6: 0x000a, 0x18c7: 0x000a, 0x18c8: 0x000a, 0x18c9: 0x000a, 0x18ca: 0x000a, 0x18cb: 0x000a, + 0x18cc: 0x000a, 0x18cd: 0x000a, 0x18ce: 0x000a, 0x18cf: 0x000a, 0x18d0: 0x000a, 0x18d1: 0x000a, + 0x18d2: 0x000a, 0x18d3: 0x000a, 0x18d4: 0x000a, 0x18d5: 0x000a, 0x18d6: 0x000a, 0x18d7: 0x000a, + 0x18d8: 0x003a, 0x18d9: 0x002a, 0x18da: 0x003a, 0x18db: 0x002a, 0x18dc: 0x000a, 0x18dd: 0x000a, + 0x18de: 0x000a, 0x18df: 0x000a, 0x18e0: 0x000a, 0x18e1: 0x000a, 0x18e2: 0x000a, 0x18e3: 0x000a, + 0x18e4: 0x000a, 0x18e5: 0x000a, 0x18e6: 0x000a, 0x18e7: 0x000a, 0x18e8: 0x000a, 0x18e9: 0x000a, + 0x18ea: 0x000a, 0x18eb: 0x000a, 0x18ec: 0x000a, 0x18ed: 0x000a, 0x18ee: 0x000a, 0x18ef: 0x000a, + 0x18f0: 0x000a, 0x18f1: 0x000a, 0x18f2: 0x000a, 0x18f3: 0x000a, 0x18f4: 0x000a, 0x18f5: 0x000a, + 0x18f6: 0x000a, 0x18f7: 0x000a, 0x18f8: 0x000a, 0x18f9: 0x000a, 0x18fa: 0x000a, 0x18fb: 0x000a, + 0x18fc: 0x003a, 0x18fd: 0x002a, 0x18fe: 0x000a, 0x18ff: 0x000a, + // Block 0x64, offset 0x1900 + 0x1900: 0x000a, 0x1901: 0x000a, 0x1902: 0x000a, 0x1903: 0x000a, 0x1904: 0x000a, 0x1905: 0x000a, + 0x1906: 0x000a, 0x1907: 0x000a, 0x1908: 0x000a, 0x1909: 0x000a, 0x190a: 0x000a, 0x190b: 0x000a, + 0x190c: 0x000a, 0x190d: 0x000a, 0x190e: 0x000a, 0x190f: 0x000a, 0x1910: 0x000a, 0x1911: 0x000a, + 0x1912: 0x000a, 0x1913: 0x000a, 0x1914: 0x000a, 0x1915: 0x000a, 0x1916: 0x000a, 0x1917: 0x000a, + 0x1918: 0x000a, 0x1919: 0x000a, 0x191a: 0x000a, 0x191b: 0x000a, 0x191c: 0x000a, 0x191d: 0x000a, + 0x191e: 0x000a, 0x191f: 0x000a, 0x1920: 0x000a, 0x1921: 0x000a, 0x1922: 0x000a, 0x1923: 0x000a, + 0x1924: 0x000a, 0x1925: 0x000a, 0x1926: 0x000a, 0x1927: 0x000a, 0x1928: 0x000a, 0x1929: 0x000a, + 0x192a: 0x000a, 0x192b: 0x000a, 0x192c: 0x000a, 0x192d: 0x000a, 0x192e: 0x000a, 0x192f: 0x000a, + 0x1930: 0x000a, 0x1931: 0x000a, 0x1932: 0x000a, 0x1933: 0x000a, + 0x1936: 0x000a, 0x1937: 0x000a, 0x1938: 0x000a, 0x1939: 0x000a, 0x193a: 0x000a, 0x193b: 0x000a, + 0x193c: 0x000a, 0x193d: 0x000a, 0x193e: 0x000a, 0x193f: 0x000a, + // Block 0x65, offset 0x1940 + 0x1940: 0x000a, 0x1941: 0x000a, 0x1942: 0x000a, 0x1943: 0x000a, 0x1944: 0x000a, 0x1945: 0x000a, + 0x1946: 0x000a, 0x1947: 0x000a, 0x1948: 0x000a, 0x1949: 0x000a, 0x194a: 0x000a, 0x194b: 0x000a, + 0x194c: 0x000a, 0x194d: 0x000a, 0x194e: 0x000a, 0x194f: 0x000a, 0x1950: 0x000a, 0x1951: 0x000a, + 0x1952: 0x000a, 0x1953: 0x000a, 0x1954: 0x000a, 0x1955: 0x000a, + 0x1958: 0x000a, 0x1959: 0x000a, 0x195a: 0x000a, 0x195b: 0x000a, 0x195c: 0x000a, 0x195d: 0x000a, + 0x195e: 0x000a, 0x195f: 0x000a, 0x1960: 0x000a, 0x1961: 0x000a, 0x1962: 0x000a, 0x1963: 0x000a, + 0x1964: 0x000a, 0x1965: 0x000a, 0x1966: 0x000a, 0x1967: 0x000a, 0x1968: 0x000a, 0x1969: 0x000a, + 0x196a: 0x000a, 0x196b: 0x000a, 0x196c: 0x000a, 0x196d: 0x000a, 0x196e: 0x000a, 0x196f: 0x000a, + 0x1970: 0x000a, 0x1971: 0x000a, 0x1972: 0x000a, 0x1973: 0x000a, 0x1974: 0x000a, 0x1975: 0x000a, + 0x1976: 0x000a, 0x1977: 0x000a, 0x1978: 0x000a, 0x1979: 0x000a, + 0x197d: 0x000a, 0x197e: 0x000a, 0x197f: 0x000a, + // Block 0x66, offset 0x1980 + 0x1980: 0x000a, 0x1981: 0x000a, 0x1982: 0x000a, 0x1983: 0x000a, 0x1984: 0x000a, 0x1985: 0x000a, + 0x1986: 0x000a, 0x1987: 0x000a, 0x1988: 0x000a, 0x198a: 0x000a, 0x198b: 0x000a, + 0x198c: 0x000a, 0x198d: 0x000a, 0x198e: 0x000a, 0x198f: 0x000a, 0x1990: 0x000a, 0x1991: 0x000a, + 0x1992: 0x000a, + 0x19ac: 0x000a, 0x19ad: 0x000a, 0x19ae: 0x000a, 0x19af: 0x000a, + // Block 0x67, offset 0x19c0 + 0x19e5: 0x000a, 0x19e6: 0x000a, 0x19e7: 0x000a, 0x19e8: 0x000a, 0x19e9: 0x000a, + 0x19ea: 0x000a, 0x19ef: 0x000c, + 0x19f0: 0x000c, 0x19f1: 0x000c, + 0x19f9: 0x000a, 0x19fa: 0x000a, 0x19fb: 0x000a, + 0x19fc: 0x000a, 0x19fd: 0x000a, 0x19fe: 0x000a, 0x19ff: 0x000a, + // Block 0x68, offset 0x1a00 + 0x1a3f: 0x000c, + // Block 0x69, offset 0x1a40 + 0x1a60: 0x000c, 0x1a61: 0x000c, 0x1a62: 0x000c, 0x1a63: 0x000c, + 0x1a64: 0x000c, 0x1a65: 0x000c, 0x1a66: 0x000c, 0x1a67: 0x000c, 0x1a68: 0x000c, 0x1a69: 0x000c, + 0x1a6a: 0x000c, 0x1a6b: 0x000c, 0x1a6c: 0x000c, 0x1a6d: 0x000c, 0x1a6e: 0x000c, 0x1a6f: 0x000c, + 0x1a70: 0x000c, 0x1a71: 0x000c, 0x1a72: 0x000c, 0x1a73: 0x000c, 0x1a74: 0x000c, 0x1a75: 0x000c, + 0x1a76: 0x000c, 0x1a77: 0x000c, 0x1a78: 0x000c, 0x1a79: 0x000c, 0x1a7a: 0x000c, 0x1a7b: 0x000c, + 0x1a7c: 0x000c, 0x1a7d: 0x000c, 0x1a7e: 0x000c, 0x1a7f: 0x000c, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x000a, 0x1a81: 0x000a, 0x1a82: 0x000a, 0x1a83: 0x000a, 0x1a84: 0x000a, 0x1a85: 0x000a, + 0x1a86: 0x000a, 0x1a87: 0x000a, 0x1a88: 0x000a, 0x1a89: 0x000a, 0x1a8a: 0x000a, 0x1a8b: 0x000a, + 0x1a8c: 0x000a, 0x1a8d: 0x000a, 0x1a8e: 0x000a, 0x1a8f: 0x000a, 0x1a90: 0x000a, 0x1a91: 0x000a, + 0x1a92: 0x000a, 0x1a93: 0x000a, 0x1a94: 0x000a, 0x1a95: 0x000a, 0x1a96: 0x000a, 0x1a97: 0x000a, + 0x1a98: 0x000a, 0x1a99: 0x000a, 0x1a9a: 0x000a, 0x1a9b: 0x000a, 0x1a9c: 0x000a, 0x1a9d: 0x000a, + 0x1a9e: 0x000a, 0x1a9f: 0x000a, 0x1aa0: 0x000a, 0x1aa1: 0x000a, 0x1aa2: 0x003a, 0x1aa3: 0x002a, + 0x1aa4: 0x003a, 0x1aa5: 0x002a, 0x1aa6: 0x003a, 0x1aa7: 0x002a, 0x1aa8: 0x003a, 0x1aa9: 0x002a, + 0x1aaa: 0x000a, 0x1aab: 0x000a, 0x1aac: 0x000a, 0x1aad: 0x000a, 0x1aae: 0x000a, 0x1aaf: 0x000a, + 0x1ab0: 0x000a, 0x1ab1: 0x000a, 0x1ab2: 0x000a, 0x1ab3: 0x000a, 0x1ab4: 0x000a, 0x1ab5: 0x000a, + 0x1ab6: 0x000a, 0x1ab7: 0x000a, 0x1ab8: 0x000a, 0x1ab9: 0x000a, 0x1aba: 0x000a, 0x1abb: 0x000a, + 0x1abc: 0x000a, 0x1abd: 0x000a, 0x1abe: 0x000a, 0x1abf: 0x000a, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x000a, 0x1ac1: 0x000a, 0x1ac2: 0x000a, 0x1ac3: 0x000a, 0x1ac4: 0x000a, 0x1ac5: 0x000a, + 0x1ac6: 0x000a, 0x1ac7: 0x000a, 0x1ac8: 0x000a, 0x1ac9: 0x000a, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x000a, 0x1b01: 0x000a, 0x1b02: 0x000a, 0x1b03: 0x000a, 0x1b04: 0x000a, 0x1b05: 0x000a, + 0x1b06: 0x000a, 0x1b07: 0x000a, 0x1b08: 0x000a, 0x1b09: 0x000a, 0x1b0a: 0x000a, 0x1b0b: 0x000a, + 0x1b0c: 0x000a, 0x1b0d: 0x000a, 0x1b0e: 0x000a, 0x1b0f: 0x000a, 0x1b10: 0x000a, 0x1b11: 0x000a, + 0x1b12: 0x000a, 0x1b13: 0x000a, 0x1b14: 0x000a, 0x1b15: 0x000a, 0x1b16: 0x000a, 0x1b17: 0x000a, + 0x1b18: 0x000a, 0x1b19: 0x000a, 0x1b1b: 0x000a, 0x1b1c: 0x000a, 0x1b1d: 0x000a, + 0x1b1e: 0x000a, 0x1b1f: 0x000a, 0x1b20: 0x000a, 0x1b21: 0x000a, 0x1b22: 0x000a, 0x1b23: 0x000a, + 0x1b24: 0x000a, 0x1b25: 0x000a, 0x1b26: 0x000a, 0x1b27: 0x000a, 0x1b28: 0x000a, 0x1b29: 0x000a, + 0x1b2a: 0x000a, 0x1b2b: 0x000a, 0x1b2c: 0x000a, 0x1b2d: 0x000a, 0x1b2e: 0x000a, 0x1b2f: 0x000a, + 0x1b30: 0x000a, 0x1b31: 0x000a, 0x1b32: 0x000a, 0x1b33: 0x000a, 0x1b34: 0x000a, 0x1b35: 0x000a, + 0x1b36: 0x000a, 0x1b37: 0x000a, 0x1b38: 0x000a, 0x1b39: 0x000a, 0x1b3a: 0x000a, 0x1b3b: 0x000a, + 0x1b3c: 0x000a, 0x1b3d: 0x000a, 0x1b3e: 0x000a, 0x1b3f: 0x000a, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0x000a, 0x1b41: 0x000a, 0x1b42: 0x000a, 0x1b43: 0x000a, 0x1b44: 0x000a, 0x1b45: 0x000a, + 0x1b46: 0x000a, 0x1b47: 0x000a, 0x1b48: 0x000a, 0x1b49: 0x000a, 0x1b4a: 0x000a, 0x1b4b: 0x000a, + 0x1b4c: 0x000a, 0x1b4d: 0x000a, 0x1b4e: 0x000a, 0x1b4f: 0x000a, 0x1b50: 0x000a, 0x1b51: 0x000a, + 0x1b52: 0x000a, 0x1b53: 0x000a, 0x1b54: 0x000a, 0x1b55: 0x000a, 0x1b56: 0x000a, 0x1b57: 0x000a, + 0x1b58: 0x000a, 0x1b59: 0x000a, 0x1b5a: 0x000a, 0x1b5b: 0x000a, 0x1b5c: 0x000a, 0x1b5d: 0x000a, + 0x1b5e: 0x000a, 0x1b5f: 0x000a, 0x1b60: 0x000a, 0x1b61: 0x000a, 0x1b62: 0x000a, 0x1b63: 0x000a, + 0x1b64: 0x000a, 0x1b65: 0x000a, 0x1b66: 0x000a, 0x1b67: 0x000a, 0x1b68: 0x000a, 0x1b69: 0x000a, + 0x1b6a: 0x000a, 0x1b6b: 0x000a, 0x1b6c: 0x000a, 0x1b6d: 0x000a, 0x1b6e: 0x000a, 0x1b6f: 0x000a, + 0x1b70: 0x000a, 0x1b71: 0x000a, 0x1b72: 0x000a, 0x1b73: 0x000a, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x000a, 0x1b81: 0x000a, 0x1b82: 0x000a, 0x1b83: 0x000a, 0x1b84: 0x000a, 0x1b85: 0x000a, + 0x1b86: 0x000a, 0x1b87: 0x000a, 0x1b88: 0x000a, 0x1b89: 0x000a, 0x1b8a: 0x000a, 0x1b8b: 0x000a, + 0x1b8c: 0x000a, 0x1b8d: 0x000a, 0x1b8e: 0x000a, 0x1b8f: 0x000a, 0x1b90: 0x000a, 0x1b91: 0x000a, + 0x1b92: 0x000a, 0x1b93: 0x000a, 0x1b94: 0x000a, 0x1b95: 0x000a, + 0x1bb0: 0x000a, 0x1bb1: 0x000a, 0x1bb2: 0x000a, 0x1bb3: 0x000a, 0x1bb4: 0x000a, 0x1bb5: 0x000a, + 0x1bb6: 0x000a, 0x1bb7: 0x000a, 0x1bb8: 0x000a, 0x1bb9: 0x000a, 0x1bba: 0x000a, 0x1bbb: 0x000a, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x0009, 0x1bc1: 0x000a, 0x1bc2: 0x000a, 0x1bc3: 0x000a, 0x1bc4: 0x000a, + 0x1bc8: 0x003a, 0x1bc9: 0x002a, 0x1bca: 0x003a, 0x1bcb: 0x002a, + 0x1bcc: 0x003a, 0x1bcd: 0x002a, 0x1bce: 0x003a, 0x1bcf: 0x002a, 0x1bd0: 0x003a, 0x1bd1: 0x002a, + 0x1bd2: 0x000a, 0x1bd3: 0x000a, 0x1bd4: 0x003a, 0x1bd5: 0x002a, 0x1bd6: 0x003a, 0x1bd7: 0x002a, + 0x1bd8: 0x003a, 0x1bd9: 0x002a, 0x1bda: 0x003a, 0x1bdb: 0x002a, 0x1bdc: 0x000a, 0x1bdd: 0x000a, + 0x1bde: 0x000a, 0x1bdf: 0x000a, 0x1be0: 0x000a, + 0x1bea: 0x000c, 0x1beb: 0x000c, 0x1bec: 0x000c, 0x1bed: 0x000c, + 0x1bf0: 0x000a, + 0x1bf6: 0x000a, 0x1bf7: 0x000a, + 0x1bfd: 0x000a, 0x1bfe: 0x000a, 0x1bff: 0x000a, + // Block 0x70, offset 0x1c00 + 0x1c19: 0x000c, 0x1c1a: 0x000c, 0x1c1b: 0x000a, 0x1c1c: 0x000a, + 0x1c20: 0x000a, + // Block 0x71, offset 0x1c40 + 0x1c7b: 0x000a, + // Block 0x72, offset 0x1c80 + 0x1c80: 0x000a, 0x1c81: 0x000a, 0x1c82: 0x000a, 0x1c83: 0x000a, 0x1c84: 0x000a, 0x1c85: 0x000a, + 0x1c86: 0x000a, 0x1c87: 0x000a, 0x1c88: 0x000a, 0x1c89: 0x000a, 0x1c8a: 0x000a, 0x1c8b: 0x000a, + 0x1c8c: 0x000a, 0x1c8d: 0x000a, 0x1c8e: 0x000a, 0x1c8f: 0x000a, 0x1c90: 0x000a, 0x1c91: 0x000a, + 0x1c92: 0x000a, 0x1c93: 0x000a, 0x1c94: 0x000a, 0x1c95: 0x000a, 0x1c96: 0x000a, 0x1c97: 0x000a, + 0x1c98: 0x000a, 0x1c99: 0x000a, 0x1c9a: 0x000a, 0x1c9b: 0x000a, 0x1c9c: 0x000a, 0x1c9d: 0x000a, + 0x1c9e: 0x000a, 0x1c9f: 0x000a, 0x1ca0: 0x000a, 0x1ca1: 0x000a, 0x1ca2: 0x000a, 0x1ca3: 0x000a, + // Block 0x73, offset 0x1cc0 + 0x1cdd: 0x000a, + 0x1cde: 0x000a, + // Block 0x74, offset 0x1d00 + 0x1d10: 0x000a, 0x1d11: 0x000a, + 0x1d12: 0x000a, 0x1d13: 0x000a, 0x1d14: 0x000a, 0x1d15: 0x000a, 0x1d16: 0x000a, 0x1d17: 0x000a, + 0x1d18: 0x000a, 0x1d19: 0x000a, 0x1d1a: 0x000a, 0x1d1b: 0x000a, 0x1d1c: 0x000a, 0x1d1d: 0x000a, + 0x1d1e: 0x000a, 0x1d1f: 0x000a, + 0x1d3c: 0x000a, 0x1d3d: 0x000a, 0x1d3e: 0x000a, + // Block 0x75, offset 0x1d40 + 0x1d71: 0x000a, 0x1d72: 0x000a, 0x1d73: 0x000a, 0x1d74: 0x000a, 0x1d75: 0x000a, + 0x1d76: 0x000a, 0x1d77: 0x000a, 0x1d78: 0x000a, 0x1d79: 0x000a, 0x1d7a: 0x000a, 0x1d7b: 0x000a, + 0x1d7c: 0x000a, 0x1d7d: 0x000a, 0x1d7e: 0x000a, 0x1d7f: 0x000a, + // Block 0x76, offset 0x1d80 + 0x1d8c: 0x000a, 0x1d8d: 0x000a, 0x1d8e: 0x000a, 0x1d8f: 0x000a, + // Block 0x77, offset 0x1dc0 + 0x1df7: 0x000a, 0x1df8: 0x000a, 0x1df9: 0x000a, 0x1dfa: 0x000a, + // Block 0x78, offset 0x1e00 + 0x1e1e: 0x000a, 0x1e1f: 0x000a, + 0x1e3f: 0x000a, + // Block 0x79, offset 0x1e40 + 0x1e50: 0x000a, 0x1e51: 0x000a, + 0x1e52: 0x000a, 0x1e53: 0x000a, 0x1e54: 0x000a, 0x1e55: 0x000a, 0x1e56: 0x000a, 0x1e57: 0x000a, + 0x1e58: 0x000a, 0x1e59: 0x000a, 0x1e5a: 0x000a, 0x1e5b: 0x000a, 0x1e5c: 0x000a, 0x1e5d: 0x000a, + 0x1e5e: 0x000a, 0x1e5f: 0x000a, 0x1e60: 0x000a, 0x1e61: 0x000a, 0x1e62: 0x000a, 0x1e63: 0x000a, + 0x1e64: 0x000a, 0x1e65: 0x000a, 0x1e66: 0x000a, 0x1e67: 0x000a, 0x1e68: 0x000a, 0x1e69: 0x000a, + 0x1e6a: 0x000a, 0x1e6b: 0x000a, 0x1e6c: 0x000a, 0x1e6d: 0x000a, 0x1e6e: 0x000a, 0x1e6f: 0x000a, + 0x1e70: 0x000a, 0x1e71: 0x000a, 0x1e72: 0x000a, 0x1e73: 0x000a, 0x1e74: 0x000a, 0x1e75: 0x000a, + 0x1e76: 0x000a, 0x1e77: 0x000a, 0x1e78: 0x000a, 0x1e79: 0x000a, 0x1e7a: 0x000a, 0x1e7b: 0x000a, + 0x1e7c: 0x000a, 0x1e7d: 0x000a, 0x1e7e: 0x000a, 0x1e7f: 0x000a, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0x000a, 0x1e81: 0x000a, 0x1e82: 0x000a, 0x1e83: 0x000a, 0x1e84: 0x000a, 0x1e85: 0x000a, + 0x1e86: 0x000a, + // Block 0x7b, offset 0x1ec0 + 0x1ecd: 0x000a, 0x1ece: 0x000a, 0x1ecf: 0x000a, + // Block 0x7c, offset 0x1f00 + 0x1f2f: 0x000c, + 0x1f30: 0x000c, 0x1f31: 0x000c, 0x1f32: 0x000c, 0x1f33: 0x000a, 0x1f34: 0x000c, 0x1f35: 0x000c, + 0x1f36: 0x000c, 0x1f37: 0x000c, 0x1f38: 0x000c, 0x1f39: 0x000c, 0x1f3a: 0x000c, 0x1f3b: 0x000c, + 0x1f3c: 0x000c, 0x1f3d: 0x000c, 0x1f3e: 0x000a, 0x1f3f: 0x000a, + // Block 0x7d, offset 0x1f40 + 0x1f5e: 0x000c, 0x1f5f: 0x000c, + // Block 0x7e, offset 0x1f80 + 0x1fb0: 0x000c, 0x1fb1: 0x000c, + // Block 0x7f, offset 0x1fc0 + 0x1fc0: 0x000a, 0x1fc1: 0x000a, 0x1fc2: 0x000a, 0x1fc3: 0x000a, 0x1fc4: 0x000a, 0x1fc5: 0x000a, + 0x1fc6: 0x000a, 0x1fc7: 0x000a, 0x1fc8: 0x000a, 0x1fc9: 0x000a, 0x1fca: 0x000a, 0x1fcb: 0x000a, + 0x1fcc: 0x000a, 0x1fcd: 0x000a, 0x1fce: 0x000a, 0x1fcf: 0x000a, 0x1fd0: 0x000a, 0x1fd1: 0x000a, + 0x1fd2: 0x000a, 0x1fd3: 0x000a, 0x1fd4: 0x000a, 0x1fd5: 0x000a, 0x1fd6: 0x000a, 0x1fd7: 0x000a, + 0x1fd8: 0x000a, 0x1fd9: 0x000a, 0x1fda: 0x000a, 0x1fdb: 0x000a, 0x1fdc: 0x000a, 0x1fdd: 0x000a, + 0x1fde: 0x000a, 0x1fdf: 0x000a, 0x1fe0: 0x000a, 0x1fe1: 0x000a, + // Block 0x80, offset 0x2000 + 0x2008: 0x000a, + // Block 0x81, offset 0x2040 + 0x2042: 0x000c, + 0x2046: 0x000c, 0x204b: 0x000c, + 0x2065: 0x000c, 0x2066: 0x000c, 0x2068: 0x000a, 0x2069: 0x000a, + 0x206a: 0x000a, 0x206b: 0x000a, + 0x2078: 0x0004, 0x2079: 0x0004, + // Block 0x82, offset 0x2080 + 0x20b4: 0x000a, 0x20b5: 0x000a, + 0x20b6: 0x000a, 0x20b7: 0x000a, + // Block 0x83, offset 0x20c0 + 0x20c4: 0x000c, 0x20c5: 0x000c, + 0x20e0: 0x000c, 0x20e1: 0x000c, 0x20e2: 0x000c, 0x20e3: 0x000c, + 0x20e4: 0x000c, 0x20e5: 0x000c, 0x20e6: 0x000c, 0x20e7: 0x000c, 0x20e8: 0x000c, 0x20e9: 0x000c, + 0x20ea: 0x000c, 0x20eb: 0x000c, 0x20ec: 0x000c, 0x20ed: 0x000c, 0x20ee: 0x000c, 0x20ef: 0x000c, + 0x20f0: 0x000c, 0x20f1: 0x000c, + // Block 0x84, offset 0x2100 + 0x2126: 0x000c, 0x2127: 0x000c, 0x2128: 0x000c, 0x2129: 0x000c, + 0x212a: 0x000c, 0x212b: 0x000c, 0x212c: 0x000c, 0x212d: 0x000c, + // Block 0x85, offset 0x2140 + 0x2147: 0x000c, 0x2148: 0x000c, 0x2149: 0x000c, 0x214a: 0x000c, 0x214b: 0x000c, + 0x214c: 0x000c, 0x214d: 0x000c, 0x214e: 0x000c, 0x214f: 0x000c, 0x2150: 0x000c, 0x2151: 0x000c, + // Block 0x86, offset 0x2180 + 0x2180: 0x000c, 0x2181: 0x000c, 0x2182: 0x000c, + 0x21b3: 0x000c, + 0x21b6: 0x000c, 0x21b7: 0x000c, 0x21b8: 0x000c, 0x21b9: 0x000c, + 0x21bc: 0x000c, + // Block 0x87, offset 0x21c0 + 0x21e5: 0x000c, + // Block 0x88, offset 0x2200 + 0x2229: 0x000c, + 0x222a: 0x000c, 0x222b: 0x000c, 0x222c: 0x000c, 0x222d: 0x000c, 0x222e: 0x000c, + 0x2231: 0x000c, 0x2232: 0x000c, 0x2235: 0x000c, + 0x2236: 0x000c, + // Block 0x89, offset 0x2240 + 0x2243: 0x000c, + 0x224c: 0x000c, + 0x227c: 0x000c, + // Block 0x8a, offset 0x2280 + 0x22b0: 0x000c, 0x22b2: 0x000c, 0x22b3: 0x000c, 0x22b4: 0x000c, + 0x22b7: 0x000c, 0x22b8: 0x000c, + 0x22be: 0x000c, 0x22bf: 0x000c, + // Block 0x8b, offset 0x22c0 + 0x22c1: 0x000c, + 0x22ec: 0x000c, 0x22ed: 0x000c, + 0x22f6: 0x000c, + // Block 0x8c, offset 0x2300 + 0x2325: 0x000c, 0x2328: 0x000c, + 0x232d: 0x000c, + // Block 0x8d, offset 0x2340 + 0x235d: 0x0001, + 0x235e: 0x000c, 0x235f: 0x0001, 0x2360: 0x0001, 0x2361: 0x0001, 0x2362: 0x0001, 0x2363: 0x0001, + 0x2364: 0x0001, 0x2365: 0x0001, 0x2366: 0x0001, 0x2367: 0x0001, 0x2368: 0x0001, 0x2369: 0x0003, + 0x236a: 0x0001, 0x236b: 0x0001, 0x236c: 0x0001, 0x236d: 0x0001, 0x236e: 0x0001, 0x236f: 0x0001, + 0x2370: 0x0001, 0x2371: 0x0001, 0x2372: 0x0001, 0x2373: 0x0001, 0x2374: 0x0001, 0x2375: 0x0001, + 0x2376: 0x0001, 0x2377: 0x0001, 0x2378: 0x0001, 0x2379: 0x0001, 0x237a: 0x0001, 0x237b: 0x0001, + 0x237c: 0x0001, 0x237d: 0x0001, 0x237e: 0x0001, 0x237f: 0x0001, + // Block 0x8e, offset 0x2380 + 0x2380: 0x0001, 0x2381: 0x0001, 0x2382: 0x0001, 0x2383: 0x0001, 0x2384: 0x0001, 0x2385: 0x0001, + 0x2386: 0x0001, 0x2387: 0x0001, 0x2388: 0x0001, 0x2389: 0x0001, 0x238a: 0x0001, 0x238b: 0x0001, + 0x238c: 0x0001, 0x238d: 0x0001, 0x238e: 0x0001, 0x238f: 0x0001, 0x2390: 0x000d, 0x2391: 0x000d, + 0x2392: 0x000d, 0x2393: 0x000d, 0x2394: 0x000d, 0x2395: 0x000d, 0x2396: 0x000d, 0x2397: 0x000d, + 0x2398: 0x000d, 0x2399: 0x000d, 0x239a: 0x000d, 0x239b: 0x000d, 0x239c: 0x000d, 0x239d: 0x000d, + 0x239e: 0x000d, 0x239f: 0x000d, 0x23a0: 0x000d, 0x23a1: 0x000d, 0x23a2: 0x000d, 0x23a3: 0x000d, + 0x23a4: 0x000d, 0x23a5: 0x000d, 0x23a6: 0x000d, 0x23a7: 0x000d, 0x23a8: 0x000d, 0x23a9: 0x000d, + 0x23aa: 0x000d, 0x23ab: 0x000d, 0x23ac: 0x000d, 0x23ad: 0x000d, 0x23ae: 0x000d, 0x23af: 0x000d, + 0x23b0: 0x000d, 0x23b1: 0x000d, 0x23b2: 0x000d, 0x23b3: 0x000d, 0x23b4: 0x000d, 0x23b5: 0x000d, + 0x23b6: 0x000d, 0x23b7: 0x000d, 0x23b8: 0x000d, 0x23b9: 0x000d, 0x23ba: 0x000d, 0x23bb: 0x000d, + 0x23bc: 0x000d, 0x23bd: 0x000d, 0x23be: 0x000d, 0x23bf: 0x000d, + // Block 0x8f, offset 0x23c0 + 0x23c0: 0x000d, 0x23c1: 0x000d, 0x23c2: 0x000d, 0x23c3: 0x000d, 0x23c4: 0x000d, 0x23c5: 0x000d, + 0x23c6: 0x000d, 0x23c7: 0x000d, 0x23c8: 0x000d, 0x23c9: 0x000d, 0x23ca: 0x000d, 0x23cb: 0x000d, + 0x23cc: 0x000d, 0x23cd: 0x000d, 0x23ce: 0x000d, 0x23cf: 0x000d, 0x23d0: 0x000d, 0x23d1: 0x000d, + 0x23d2: 0x000d, 0x23d3: 0x000d, 0x23d4: 0x000d, 0x23d5: 0x000d, 0x23d6: 0x000d, 0x23d7: 0x000d, + 0x23d8: 0x000d, 0x23d9: 0x000d, 0x23da: 0x000d, 0x23db: 0x000d, 0x23dc: 0x000d, 0x23dd: 0x000d, + 0x23de: 0x000d, 0x23df: 0x000d, 0x23e0: 0x000d, 0x23e1: 0x000d, 0x23e2: 0x000d, 0x23e3: 0x000d, + 0x23e4: 0x000d, 0x23e5: 0x000d, 0x23e6: 0x000d, 0x23e7: 0x000d, 0x23e8: 0x000d, 0x23e9: 0x000d, + 0x23ea: 0x000d, 0x23eb: 0x000d, 0x23ec: 0x000d, 0x23ed: 0x000d, 0x23ee: 0x000d, 0x23ef: 0x000d, + 0x23f0: 0x000d, 0x23f1: 0x000d, 0x23f2: 0x000d, 0x23f3: 0x000d, 0x23f4: 0x000d, 0x23f5: 0x000d, + 0x23f6: 0x000d, 0x23f7: 0x000d, 0x23f8: 0x000d, 0x23f9: 0x000d, 0x23fa: 0x000d, 0x23fb: 0x000d, + 0x23fc: 0x000d, 0x23fd: 0x000d, 0x23fe: 0x000a, 0x23ff: 0x000a, + // Block 0x90, offset 0x2400 + 0x2400: 0x000d, 0x2401: 0x000d, 0x2402: 0x000d, 0x2403: 0x000d, 0x2404: 0x000d, 0x2405: 0x000d, + 0x2406: 0x000d, 0x2407: 0x000d, 0x2408: 0x000d, 0x2409: 0x000d, 0x240a: 0x000d, 0x240b: 0x000d, + 0x240c: 0x000d, 0x240d: 0x000d, 0x240e: 0x000d, 0x240f: 0x000d, 0x2410: 0x000b, 0x2411: 0x000b, + 0x2412: 0x000b, 0x2413: 0x000b, 0x2414: 0x000b, 0x2415: 0x000b, 0x2416: 0x000b, 0x2417: 0x000b, + 0x2418: 0x000b, 0x2419: 0x000b, 0x241a: 0x000b, 0x241b: 0x000b, 0x241c: 0x000b, 0x241d: 0x000b, + 0x241e: 0x000b, 0x241f: 0x000b, 0x2420: 0x000b, 0x2421: 0x000b, 0x2422: 0x000b, 0x2423: 0x000b, + 0x2424: 0x000b, 0x2425: 0x000b, 0x2426: 0x000b, 0x2427: 0x000b, 0x2428: 0x000b, 0x2429: 0x000b, + 0x242a: 0x000b, 0x242b: 0x000b, 0x242c: 0x000b, 0x242d: 0x000b, 0x242e: 0x000b, 0x242f: 0x000b, + 0x2430: 0x000d, 0x2431: 0x000d, 0x2432: 0x000d, 0x2433: 0x000d, 0x2434: 0x000d, 0x2435: 0x000d, + 0x2436: 0x000d, 0x2437: 0x000d, 0x2438: 0x000d, 0x2439: 0x000d, 0x243a: 0x000d, 0x243b: 0x000d, + 0x243c: 0x000d, 0x243d: 0x000a, 0x243e: 0x000d, 0x243f: 0x000d, + // Block 0x91, offset 0x2440 + 0x2440: 0x000c, 0x2441: 0x000c, 0x2442: 0x000c, 0x2443: 0x000c, 0x2444: 0x000c, 0x2445: 0x000c, + 0x2446: 0x000c, 0x2447: 0x000c, 0x2448: 0x000c, 0x2449: 0x000c, 0x244a: 0x000c, 0x244b: 0x000c, + 0x244c: 0x000c, 0x244d: 0x000c, 0x244e: 0x000c, 0x244f: 0x000c, 0x2450: 0x000a, 0x2451: 0x000a, + 0x2452: 0x000a, 0x2453: 0x000a, 0x2454: 0x000a, 0x2455: 0x000a, 0x2456: 0x000a, 0x2457: 0x000a, + 0x2458: 0x000a, 0x2459: 0x000a, + 0x2460: 0x000c, 0x2461: 0x000c, 0x2462: 0x000c, 0x2463: 0x000c, + 0x2464: 0x000c, 0x2465: 0x000c, 0x2466: 0x000c, 0x2467: 0x000c, 0x2468: 0x000c, 0x2469: 0x000c, + 0x246a: 0x000c, 0x246b: 0x000c, 0x246c: 0x000c, 0x246d: 0x000c, 0x246e: 0x000c, 0x246f: 0x000c, + 0x2470: 0x000a, 0x2471: 0x000a, 0x2472: 0x000a, 0x2473: 0x000a, 0x2474: 0x000a, 0x2475: 0x000a, + 0x2476: 0x000a, 0x2477: 0x000a, 0x2478: 0x000a, 0x2479: 0x000a, 0x247a: 0x000a, 0x247b: 0x000a, + 0x247c: 0x000a, 0x247d: 0x000a, 0x247e: 0x000a, 0x247f: 0x000a, + // Block 0x92, offset 0x2480 + 0x2480: 0x000a, 0x2481: 0x000a, 0x2482: 0x000a, 0x2483: 0x000a, 0x2484: 0x000a, 0x2485: 0x000a, + 0x2486: 0x000a, 0x2487: 0x000a, 0x2488: 0x000a, 0x2489: 0x000a, 0x248a: 0x000a, 0x248b: 0x000a, + 0x248c: 0x000a, 0x248d: 0x000a, 0x248e: 0x000a, 0x248f: 0x000a, 0x2490: 0x0006, 0x2491: 0x000a, + 0x2492: 0x0006, 0x2494: 0x000a, 0x2495: 0x0006, 0x2496: 0x000a, 0x2497: 0x000a, + 0x2498: 0x000a, 0x2499: 0x009a, 0x249a: 0x008a, 0x249b: 0x007a, 0x249c: 0x006a, 0x249d: 0x009a, + 0x249e: 0x008a, 0x249f: 0x0004, 0x24a0: 0x000a, 0x24a1: 0x000a, 0x24a2: 0x0003, 0x24a3: 0x0003, + 0x24a4: 0x000a, 0x24a5: 0x000a, 0x24a6: 0x000a, 0x24a8: 0x000a, 0x24a9: 0x0004, + 0x24aa: 0x0004, 0x24ab: 0x000a, + 0x24b0: 0x000d, 0x24b1: 0x000d, 0x24b2: 0x000d, 0x24b3: 0x000d, 0x24b4: 0x000d, 0x24b5: 0x000d, + 0x24b6: 0x000d, 0x24b7: 0x000d, 0x24b8: 0x000d, 0x24b9: 0x000d, 0x24ba: 0x000d, 0x24bb: 0x000d, + 0x24bc: 0x000d, 0x24bd: 0x000d, 0x24be: 0x000d, 0x24bf: 0x000d, + // Block 0x93, offset 0x24c0 + 0x24c0: 0x000d, 0x24c1: 0x000d, 0x24c2: 0x000d, 0x24c3: 0x000d, 0x24c4: 0x000d, 0x24c5: 0x000d, + 0x24c6: 0x000d, 0x24c7: 0x000d, 0x24c8: 0x000d, 0x24c9: 0x000d, 0x24ca: 0x000d, 0x24cb: 0x000d, + 0x24cc: 0x000d, 0x24cd: 0x000d, 0x24ce: 0x000d, 0x24cf: 0x000d, 0x24d0: 0x000d, 0x24d1: 0x000d, + 0x24d2: 0x000d, 0x24d3: 0x000d, 0x24d4: 0x000d, 0x24d5: 0x000d, 0x24d6: 0x000d, 0x24d7: 0x000d, + 0x24d8: 0x000d, 0x24d9: 0x000d, 0x24da: 0x000d, 0x24db: 0x000d, 0x24dc: 0x000d, 0x24dd: 0x000d, + 0x24de: 0x000d, 0x24df: 0x000d, 0x24e0: 0x000d, 0x24e1: 0x000d, 0x24e2: 0x000d, 0x24e3: 0x000d, + 0x24e4: 0x000d, 0x24e5: 0x000d, 0x24e6: 0x000d, 0x24e7: 0x000d, 0x24e8: 0x000d, 0x24e9: 0x000d, + 0x24ea: 0x000d, 0x24eb: 0x000d, 0x24ec: 0x000d, 0x24ed: 0x000d, 0x24ee: 0x000d, 0x24ef: 0x000d, + 0x24f0: 0x000d, 0x24f1: 0x000d, 0x24f2: 0x000d, 0x24f3: 0x000d, 0x24f4: 0x000d, 0x24f5: 0x000d, + 0x24f6: 0x000d, 0x24f7: 0x000d, 0x24f8: 0x000d, 0x24f9: 0x000d, 0x24fa: 0x000d, 0x24fb: 0x000d, + 0x24fc: 0x000d, 0x24fd: 0x000d, 0x24fe: 0x000d, 0x24ff: 0x000b, + // Block 0x94, offset 0x2500 + 0x2501: 0x000a, 0x2502: 0x000a, 0x2503: 0x0004, 0x2504: 0x0004, 0x2505: 0x0004, + 0x2506: 0x000a, 0x2507: 0x000a, 0x2508: 0x003a, 0x2509: 0x002a, 0x250a: 0x000a, 0x250b: 0x0003, + 0x250c: 0x0006, 0x250d: 0x0003, 0x250e: 0x0006, 0x250f: 0x0006, 0x2510: 0x0002, 0x2511: 0x0002, + 0x2512: 0x0002, 0x2513: 0x0002, 0x2514: 0x0002, 0x2515: 0x0002, 0x2516: 0x0002, 0x2517: 0x0002, + 0x2518: 0x0002, 0x2519: 0x0002, 0x251a: 0x0006, 0x251b: 0x000a, 0x251c: 0x000a, 0x251d: 0x000a, + 0x251e: 0x000a, 0x251f: 0x000a, 0x2520: 0x000a, + 0x253b: 0x005a, + 0x253c: 0x000a, 0x253d: 0x004a, 0x253e: 0x000a, 0x253f: 0x000a, + // Block 0x95, offset 0x2540 + 0x2540: 0x000a, + 0x255b: 0x005a, 0x255c: 0x000a, 0x255d: 0x004a, + 0x255e: 0x000a, 0x255f: 0x00fa, 0x2560: 0x00ea, 0x2561: 0x000a, 0x2562: 0x003a, 0x2563: 0x002a, + 0x2564: 0x000a, 0x2565: 0x000a, + // Block 0x96, offset 0x2580 + 0x25a0: 0x0004, 0x25a1: 0x0004, 0x25a2: 0x000a, 0x25a3: 0x000a, + 0x25a4: 0x000a, 0x25a5: 0x0004, 0x25a6: 0x0004, 0x25a8: 0x000a, 0x25a9: 0x000a, + 0x25aa: 0x000a, 0x25ab: 0x000a, 0x25ac: 0x000a, 0x25ad: 0x000a, 0x25ae: 0x000a, + 0x25b0: 0x000b, 0x25b1: 0x000b, 0x25b2: 0x000b, 0x25b3: 0x000b, 0x25b4: 0x000b, 0x25b5: 0x000b, + 0x25b6: 0x000b, 0x25b7: 0x000b, 0x25b8: 0x000b, 0x25b9: 0x000a, 0x25ba: 0x000a, 0x25bb: 0x000a, + 0x25bc: 0x000a, 0x25bd: 0x000a, 0x25be: 0x000b, 0x25bf: 0x000b, + // Block 0x97, offset 0x25c0 + 0x25c1: 0x000a, + // Block 0x98, offset 0x2600 + 0x2600: 0x000a, 0x2601: 0x000a, 0x2602: 0x000a, 0x2603: 0x000a, 0x2604: 0x000a, 0x2605: 0x000a, + 0x2606: 0x000a, 0x2607: 0x000a, 0x2608: 0x000a, 0x2609: 0x000a, 0x260a: 0x000a, 0x260b: 0x000a, + 0x260c: 0x000a, 0x2610: 0x000a, 0x2611: 0x000a, + 0x2612: 0x000a, 0x2613: 0x000a, 0x2614: 0x000a, 0x2615: 0x000a, 0x2616: 0x000a, 0x2617: 0x000a, + 0x2618: 0x000a, 0x2619: 0x000a, 0x261a: 0x000a, 0x261b: 0x000a, + 0x2620: 0x000a, + // Block 0x99, offset 0x2640 + 0x267d: 0x000c, + // Block 0x9a, offset 0x2680 + 0x26a0: 0x000c, 0x26a1: 0x0002, 0x26a2: 0x0002, 0x26a3: 0x0002, + 0x26a4: 0x0002, 0x26a5: 0x0002, 0x26a6: 0x0002, 0x26a7: 0x0002, 0x26a8: 0x0002, 0x26a9: 0x0002, + 0x26aa: 0x0002, 0x26ab: 0x0002, 0x26ac: 0x0002, 0x26ad: 0x0002, 0x26ae: 0x0002, 0x26af: 0x0002, + 0x26b0: 0x0002, 0x26b1: 0x0002, 0x26b2: 0x0002, 0x26b3: 0x0002, 0x26b4: 0x0002, 0x26b5: 0x0002, + 0x26b6: 0x0002, 0x26b7: 0x0002, 0x26b8: 0x0002, 0x26b9: 0x0002, 0x26ba: 0x0002, 0x26bb: 0x0002, + // Block 0x9b, offset 0x26c0 + 0x26f6: 0x000c, 0x26f7: 0x000c, 0x26f8: 0x000c, 0x26f9: 0x000c, 0x26fa: 0x000c, + // Block 0x9c, offset 0x2700 + 0x2700: 0x0001, 0x2701: 0x0001, 0x2702: 0x0001, 0x2703: 0x0001, 0x2704: 0x0001, 0x2705: 0x0001, + 0x2706: 0x0001, 0x2707: 0x0001, 0x2708: 0x0001, 0x2709: 0x0001, 0x270a: 0x0001, 0x270b: 0x0001, + 0x270c: 0x0001, 0x270d: 0x0001, 0x270e: 0x0001, 0x270f: 0x0001, 0x2710: 0x0001, 0x2711: 0x0001, + 0x2712: 0x0001, 0x2713: 0x0001, 0x2714: 0x0001, 0x2715: 0x0001, 0x2716: 0x0001, 0x2717: 0x0001, + 0x2718: 0x0001, 0x2719: 0x0001, 0x271a: 0x0001, 0x271b: 0x0001, 0x271c: 0x0001, 0x271d: 0x0001, + 0x271e: 0x0001, 0x271f: 0x0001, 0x2720: 0x0001, 0x2721: 0x0001, 0x2722: 0x0001, 0x2723: 0x0001, + 0x2724: 0x0001, 0x2725: 0x0001, 0x2726: 0x0001, 0x2727: 0x0001, 0x2728: 0x0001, 0x2729: 0x0001, + 0x272a: 0x0001, 0x272b: 0x0001, 0x272c: 0x0001, 0x272d: 0x0001, 0x272e: 0x0001, 0x272f: 0x0001, + 0x2730: 0x0001, 0x2731: 0x0001, 0x2732: 0x0001, 0x2733: 0x0001, 0x2734: 0x0001, 0x2735: 0x0001, + 0x2736: 0x0001, 0x2737: 0x0001, 0x2738: 0x0001, 0x2739: 0x0001, 0x273a: 0x0001, 0x273b: 0x0001, + 0x273c: 0x0001, 0x273d: 0x0001, 0x273e: 0x0001, 0x273f: 0x0001, + // Block 0x9d, offset 0x2740 + 0x2740: 0x0001, 0x2741: 0x0001, 0x2742: 0x0001, 0x2743: 0x0001, 0x2744: 0x0001, 0x2745: 0x0001, + 0x2746: 0x0001, 0x2747: 0x0001, 0x2748: 0x0001, 0x2749: 0x0001, 0x274a: 0x0001, 0x274b: 0x0001, + 0x274c: 0x0001, 0x274d: 0x0001, 0x274e: 0x0001, 0x274f: 0x0001, 0x2750: 0x0001, 0x2751: 0x0001, + 0x2752: 0x0001, 0x2753: 0x0001, 0x2754: 0x0001, 0x2755: 0x0001, 0x2756: 0x0001, 0x2757: 0x0001, + 0x2758: 0x0001, 0x2759: 0x0001, 0x275a: 0x0001, 0x275b: 0x0001, 0x275c: 0x0001, 0x275d: 0x0001, + 0x275e: 0x0001, 0x275f: 0x000a, 0x2760: 0x0001, 0x2761: 0x0001, 0x2762: 0x0001, 0x2763: 0x0001, + 0x2764: 0x0001, 0x2765: 0x0001, 0x2766: 0x0001, 0x2767: 0x0001, 0x2768: 0x0001, 0x2769: 0x0001, + 0x276a: 0x0001, 0x276b: 0x0001, 0x276c: 0x0001, 0x276d: 0x0001, 0x276e: 0x0001, 0x276f: 0x0001, + 0x2770: 0x0001, 0x2771: 0x0001, 0x2772: 0x0001, 0x2773: 0x0001, 0x2774: 0x0001, 0x2775: 0x0001, + 0x2776: 0x0001, 0x2777: 0x0001, 0x2778: 0x0001, 0x2779: 0x0001, 0x277a: 0x0001, 0x277b: 0x0001, + 0x277c: 0x0001, 0x277d: 0x0001, 0x277e: 0x0001, 0x277f: 0x0001, + // Block 0x9e, offset 0x2780 + 0x2780: 0x0001, 0x2781: 0x000c, 0x2782: 0x000c, 0x2783: 0x000c, 0x2784: 0x0001, 0x2785: 0x000c, + 0x2786: 0x000c, 0x2787: 0x0001, 0x2788: 0x0001, 0x2789: 0x0001, 0x278a: 0x0001, 0x278b: 0x0001, + 0x278c: 0x000c, 0x278d: 0x000c, 0x278e: 0x000c, 0x278f: 0x000c, 0x2790: 0x0001, 0x2791: 0x0001, + 0x2792: 0x0001, 0x2793: 0x0001, 0x2794: 0x0001, 0x2795: 0x0001, 0x2796: 0x0001, 0x2797: 0x0001, + 0x2798: 0x0001, 0x2799: 0x0001, 0x279a: 0x0001, 0x279b: 0x0001, 0x279c: 0x0001, 0x279d: 0x0001, + 0x279e: 0x0001, 0x279f: 0x0001, 0x27a0: 0x0001, 0x27a1: 0x0001, 0x27a2: 0x0001, 0x27a3: 0x0001, + 0x27a4: 0x0001, 0x27a5: 0x0001, 0x27a6: 0x0001, 0x27a7: 0x0001, 0x27a8: 0x0001, 0x27a9: 0x0001, + 0x27aa: 0x0001, 0x27ab: 0x0001, 0x27ac: 0x0001, 0x27ad: 0x0001, 0x27ae: 0x0001, 0x27af: 0x0001, + 0x27b0: 0x0001, 0x27b1: 0x0001, 0x27b2: 0x0001, 0x27b3: 0x0001, 0x27b4: 0x0001, 0x27b5: 0x0001, + 0x27b6: 0x0001, 0x27b7: 0x0001, 0x27b8: 0x000c, 0x27b9: 0x000c, 0x27ba: 0x000c, 0x27bb: 0x0001, + 0x27bc: 0x0001, 0x27bd: 0x0001, 0x27be: 0x0001, 0x27bf: 0x000c, + // Block 0x9f, offset 0x27c0 + 0x27c0: 0x0001, 0x27c1: 0x0001, 0x27c2: 0x0001, 0x27c3: 0x0001, 0x27c4: 0x0001, 0x27c5: 0x0001, + 0x27c6: 0x0001, 0x27c7: 0x0001, 0x27c8: 0x0001, 0x27c9: 0x0001, 0x27ca: 0x0001, 0x27cb: 0x0001, + 0x27cc: 0x0001, 0x27cd: 0x0001, 0x27ce: 0x0001, 0x27cf: 0x0001, 0x27d0: 0x0001, 0x27d1: 0x0001, + 0x27d2: 0x0001, 0x27d3: 0x0001, 0x27d4: 0x0001, 0x27d5: 0x0001, 0x27d6: 0x0001, 0x27d7: 0x0001, + 0x27d8: 0x0001, 0x27d9: 0x0001, 0x27da: 0x0001, 0x27db: 0x0001, 0x27dc: 0x0001, 0x27dd: 0x0001, + 0x27de: 0x0001, 0x27df: 0x0001, 0x27e0: 0x0001, 0x27e1: 0x0001, 0x27e2: 0x0001, 0x27e3: 0x0001, + 0x27e4: 0x0001, 0x27e5: 0x000c, 0x27e6: 0x000c, 0x27e7: 0x0001, 0x27e8: 0x0001, 0x27e9: 0x0001, + 0x27ea: 0x0001, 0x27eb: 0x0001, 0x27ec: 0x0001, 0x27ed: 0x0001, 0x27ee: 0x0001, 0x27ef: 0x0001, + 0x27f0: 0x0001, 0x27f1: 0x0001, 0x27f2: 0x0001, 0x27f3: 0x0001, 0x27f4: 0x0001, 0x27f5: 0x0001, + 0x27f6: 0x0001, 0x27f7: 0x0001, 0x27f8: 0x0001, 0x27f9: 0x0001, 0x27fa: 0x0001, 0x27fb: 0x0001, + 0x27fc: 0x0001, 0x27fd: 0x0001, 0x27fe: 0x0001, 0x27ff: 0x0001, + // Block 0xa0, offset 0x2800 + 0x2800: 0x0001, 0x2801: 0x0001, 0x2802: 0x0001, 0x2803: 0x0001, 0x2804: 0x0001, 0x2805: 0x0001, + 0x2806: 0x0001, 0x2807: 0x0001, 0x2808: 0x0001, 0x2809: 0x0001, 0x280a: 0x0001, 0x280b: 0x0001, + 0x280c: 0x0001, 0x280d: 0x0001, 0x280e: 0x0001, 0x280f: 0x0001, 0x2810: 0x0001, 0x2811: 0x0001, + 0x2812: 0x0001, 0x2813: 0x0001, 0x2814: 0x0001, 0x2815: 0x0001, 0x2816: 0x0001, 0x2817: 0x0001, + 0x2818: 0x0001, 0x2819: 0x0001, 0x281a: 0x0001, 0x281b: 0x0001, 0x281c: 0x0001, 0x281d: 0x0001, + 0x281e: 0x0001, 0x281f: 0x0001, 0x2820: 0x0001, 0x2821: 0x0001, 0x2822: 0x0001, 0x2823: 0x0001, + 0x2824: 0x0001, 0x2825: 0x0001, 0x2826: 0x0001, 0x2827: 0x0001, 0x2828: 0x0001, 0x2829: 0x0001, + 0x282a: 0x0001, 0x282b: 0x0001, 0x282c: 0x0001, 0x282d: 0x0001, 0x282e: 0x0001, 0x282f: 0x0001, + 0x2830: 0x0001, 0x2831: 0x0001, 0x2832: 0x0001, 0x2833: 0x0001, 0x2834: 0x0001, 0x2835: 0x0001, + 0x2836: 0x0001, 0x2837: 0x0001, 0x2838: 0x0001, 0x2839: 0x000a, 0x283a: 0x000a, 0x283b: 0x000a, + 0x283c: 0x000a, 0x283d: 0x000a, 0x283e: 0x000a, 0x283f: 0x000a, + // Block 0xa1, offset 0x2840 + 0x2840: 0x0001, 0x2841: 0x0001, 0x2842: 0x0001, 0x2843: 0x0001, 0x2844: 0x0001, 0x2845: 0x0001, + 0x2846: 0x0001, 0x2847: 0x0001, 0x2848: 0x0001, 0x2849: 0x0001, 0x284a: 0x0001, 0x284b: 0x0001, + 0x284c: 0x0001, 0x284d: 0x0001, 0x284e: 0x0001, 0x284f: 0x0001, 0x2850: 0x0001, 0x2851: 0x0001, + 0x2852: 0x0001, 0x2853: 0x0001, 0x2854: 0x0001, 0x2855: 0x0001, 0x2856: 0x0001, 0x2857: 0x0001, + 0x2858: 0x0001, 0x2859: 0x0001, 0x285a: 0x0001, 0x285b: 0x0001, 0x285c: 0x0001, 0x285d: 0x0001, + 0x285e: 0x0001, 0x285f: 0x0001, 0x2860: 0x0005, 0x2861: 0x0005, 0x2862: 0x0005, 0x2863: 0x0005, + 0x2864: 0x0005, 0x2865: 0x0005, 0x2866: 0x0005, 0x2867: 0x0005, 0x2868: 0x0005, 0x2869: 0x0005, + 0x286a: 0x0005, 0x286b: 0x0005, 0x286c: 0x0005, 0x286d: 0x0005, 0x286e: 0x0005, 0x286f: 0x0005, + 0x2870: 0x0005, 0x2871: 0x0005, 0x2872: 0x0005, 0x2873: 0x0005, 0x2874: 0x0005, 0x2875: 0x0005, + 0x2876: 0x0005, 0x2877: 0x0005, 0x2878: 0x0005, 0x2879: 0x0005, 0x287a: 0x0005, 0x287b: 0x0005, + 0x287c: 0x0005, 0x287d: 0x0005, 0x287e: 0x0005, 0x287f: 0x0001, + // Block 0xa2, offset 0x2880 + 0x2881: 0x000c, + 0x28b8: 0x000c, 0x28b9: 0x000c, 0x28ba: 0x000c, 0x28bb: 0x000c, + 0x28bc: 0x000c, 0x28bd: 0x000c, 0x28be: 0x000c, 0x28bf: 0x000c, + // Block 0xa3, offset 0x28c0 + 0x28c0: 0x000c, 0x28c1: 0x000c, 0x28c2: 0x000c, 0x28c3: 0x000c, 0x28c4: 0x000c, 0x28c5: 0x000c, + 0x28c6: 0x000c, + 0x28d2: 0x000a, 0x28d3: 0x000a, 0x28d4: 0x000a, 0x28d5: 0x000a, 0x28d6: 0x000a, 0x28d7: 0x000a, + 0x28d8: 0x000a, 0x28d9: 0x000a, 0x28da: 0x000a, 0x28db: 0x000a, 0x28dc: 0x000a, 0x28dd: 0x000a, + 0x28de: 0x000a, 0x28df: 0x000a, 0x28e0: 0x000a, 0x28e1: 0x000a, 0x28e2: 0x000a, 0x28e3: 0x000a, + 0x28e4: 0x000a, 0x28e5: 0x000a, + 0x28ff: 0x000c, + // Block 0xa4, offset 0x2900 + 0x2900: 0x000c, 0x2901: 0x000c, + 0x2933: 0x000c, 0x2934: 0x000c, 0x2935: 0x000c, + 0x2936: 0x000c, 0x2939: 0x000c, 0x293a: 0x000c, + // Block 0xa5, offset 0x2940 + 0x2940: 0x000c, 0x2941: 0x000c, 0x2942: 0x000c, + 0x2967: 0x000c, 0x2968: 0x000c, 0x2969: 0x000c, + 0x296a: 0x000c, 0x296b: 0x000c, 0x296d: 0x000c, 0x296e: 0x000c, 0x296f: 0x000c, + 0x2970: 0x000c, 0x2971: 0x000c, 0x2972: 0x000c, 0x2973: 0x000c, 0x2974: 0x000c, + // Block 0xa6, offset 0x2980 + 0x29b3: 0x000c, + // Block 0xa7, offset 0x29c0 + 0x29c0: 0x000c, 0x29c1: 0x000c, + 0x29f6: 0x000c, 0x29f7: 0x000c, 0x29f8: 0x000c, 0x29f9: 0x000c, 0x29fa: 0x000c, 0x29fb: 0x000c, + 0x29fc: 0x000c, 0x29fd: 0x000c, 0x29fe: 0x000c, + // Block 0xa8, offset 0x2a00 + 0x2a0a: 0x000c, 0x2a0b: 0x000c, + 0x2a0c: 0x000c, + // Block 0xa9, offset 0x2a40 + 0x2a6f: 0x000c, + 0x2a70: 0x000c, 0x2a71: 0x000c, 0x2a74: 0x000c, + 0x2a76: 0x000c, 0x2a77: 0x000c, + 0x2a7e: 0x000c, + // Block 0xaa, offset 0x2a80 + 0x2a9f: 0x000c, 0x2aa3: 0x000c, + 0x2aa4: 0x000c, 0x2aa5: 0x000c, 0x2aa6: 0x000c, 0x2aa7: 0x000c, 0x2aa8: 0x000c, 0x2aa9: 0x000c, + 0x2aaa: 0x000c, + // Block 0xab, offset 0x2ac0 + 0x2ac0: 0x000c, 0x2ac1: 0x000c, + 0x2afc: 0x000c, + // Block 0xac, offset 0x2b00 + 0x2b00: 0x000c, + 0x2b26: 0x000c, 0x2b27: 0x000c, 0x2b28: 0x000c, 0x2b29: 0x000c, + 0x2b2a: 0x000c, 0x2b2b: 0x000c, 0x2b2c: 0x000c, + 0x2b30: 0x000c, 0x2b31: 0x000c, 0x2b32: 0x000c, 0x2b33: 0x000c, 0x2b34: 0x000c, + // Block 0xad, offset 0x2b40 + 0x2b78: 0x000c, 0x2b79: 0x000c, 0x2b7a: 0x000c, 0x2b7b: 0x000c, + 0x2b7c: 0x000c, 0x2b7d: 0x000c, 0x2b7e: 0x000c, 0x2b7f: 0x000c, + // Block 0xae, offset 0x2b80 + 0x2b82: 0x000c, 0x2b83: 0x000c, 0x2b84: 0x000c, + 0x2b86: 0x000c, + // Block 0xaf, offset 0x2bc0 + 0x2bf3: 0x000c, 0x2bf4: 0x000c, 0x2bf5: 0x000c, + 0x2bf6: 0x000c, 0x2bf7: 0x000c, 0x2bf8: 0x000c, 0x2bfa: 0x000c, + 0x2bff: 0x000c, + // Block 0xb0, offset 0x2c00 + 0x2c00: 0x000c, 0x2c02: 0x000c, 0x2c03: 0x000c, + // Block 0xb1, offset 0x2c40 + 0x2c72: 0x000c, 0x2c73: 0x000c, 0x2c74: 0x000c, 0x2c75: 0x000c, + 0x2c7c: 0x000c, 0x2c7d: 0x000c, 0x2c7f: 0x000c, + // Block 0xb2, offset 0x2c80 + 0x2c80: 0x000c, + 0x2c9c: 0x000c, 0x2c9d: 0x000c, + // Block 0xb3, offset 0x2cc0 + 0x2cf3: 0x000c, 0x2cf4: 0x000c, 0x2cf5: 0x000c, + 0x2cf6: 0x000c, 0x2cf7: 0x000c, 0x2cf8: 0x000c, 0x2cf9: 0x000c, 0x2cfa: 0x000c, + 0x2cfd: 0x000c, 0x2cff: 0x000c, + // Block 0xb4, offset 0x2d00 + 0x2d00: 0x000c, + 0x2d20: 0x000a, 0x2d21: 0x000a, 0x2d22: 0x000a, 0x2d23: 0x000a, + 0x2d24: 0x000a, 0x2d25: 0x000a, 0x2d26: 0x000a, 0x2d27: 0x000a, 0x2d28: 0x000a, 0x2d29: 0x000a, + 0x2d2a: 0x000a, 0x2d2b: 0x000a, 0x2d2c: 0x000a, + // Block 0xb5, offset 0x2d40 + 0x2d6b: 0x000c, 0x2d6d: 0x000c, + 0x2d70: 0x000c, 0x2d71: 0x000c, 0x2d72: 0x000c, 0x2d73: 0x000c, 0x2d74: 0x000c, 0x2d75: 0x000c, + 0x2d77: 0x000c, + // Block 0xb6, offset 0x2d80 + 0x2d9d: 0x000c, + 0x2d9e: 0x000c, 0x2d9f: 0x000c, 0x2da2: 0x000c, 0x2da3: 0x000c, + 0x2da4: 0x000c, 0x2da5: 0x000c, 0x2da7: 0x000c, 0x2da8: 0x000c, 0x2da9: 0x000c, + 0x2daa: 0x000c, 0x2dab: 0x000c, + // Block 0xb7, offset 0x2dc0 + 0x2dc1: 0x000c, 0x2dc2: 0x000c, 0x2dc3: 0x000c, 0x2dc4: 0x000c, 0x2dc5: 0x000c, + 0x2dc6: 0x000c, 0x2dc9: 0x000c, 0x2dca: 0x000c, + 0x2df3: 0x000c, 0x2df4: 0x000c, 0x2df5: 0x000c, + 0x2df6: 0x000c, 0x2df7: 0x000c, 0x2df8: 0x000c, 0x2dfb: 0x000c, + 0x2dfc: 0x000c, 0x2dfd: 0x000c, 0x2dfe: 0x000c, + // Block 0xb8, offset 0x2e00 + 0x2e07: 0x000c, + 0x2e11: 0x000c, + 0x2e12: 0x000c, 0x2e13: 0x000c, 0x2e14: 0x000c, 0x2e15: 0x000c, 0x2e16: 0x000c, + 0x2e19: 0x000c, 0x2e1a: 0x000c, 0x2e1b: 0x000c, + // Block 0xb9, offset 0x2e40 + 0x2e4a: 0x000c, 0x2e4b: 0x000c, + 0x2e4c: 0x000c, 0x2e4d: 0x000c, 0x2e4e: 0x000c, 0x2e4f: 0x000c, 0x2e50: 0x000c, 0x2e51: 0x000c, + 0x2e52: 0x000c, 0x2e53: 0x000c, 0x2e54: 0x000c, 0x2e55: 0x000c, 0x2e56: 0x000c, + 0x2e58: 0x000c, 0x2e59: 0x000c, + // Block 0xba, offset 0x2e80 + 0x2eb0: 0x000c, 0x2eb1: 0x000c, 0x2eb2: 0x000c, 0x2eb3: 0x000c, 0x2eb4: 0x000c, 0x2eb5: 0x000c, + 0x2eb6: 0x000c, 0x2eb8: 0x000c, 0x2eb9: 0x000c, 0x2eba: 0x000c, 0x2ebb: 0x000c, + 0x2ebc: 0x000c, 0x2ebd: 0x000c, + // Block 0xbb, offset 0x2ec0 + 0x2ed2: 0x000c, 0x2ed3: 0x000c, 0x2ed4: 0x000c, 0x2ed5: 0x000c, 0x2ed6: 0x000c, 0x2ed7: 0x000c, + 0x2ed8: 0x000c, 0x2ed9: 0x000c, 0x2eda: 0x000c, 0x2edb: 0x000c, 0x2edc: 0x000c, 0x2edd: 0x000c, + 0x2ede: 0x000c, 0x2edf: 0x000c, 0x2ee0: 0x000c, 0x2ee1: 0x000c, 0x2ee2: 0x000c, 0x2ee3: 0x000c, + 0x2ee4: 0x000c, 0x2ee5: 0x000c, 0x2ee6: 0x000c, 0x2ee7: 0x000c, + 0x2eea: 0x000c, 0x2eeb: 0x000c, 0x2eec: 0x000c, 0x2eed: 0x000c, 0x2eee: 0x000c, 0x2eef: 0x000c, + 0x2ef0: 0x000c, 0x2ef2: 0x000c, 0x2ef3: 0x000c, 0x2ef5: 0x000c, + 0x2ef6: 0x000c, + // Block 0xbc, offset 0x2f00 + 0x2f31: 0x000c, 0x2f32: 0x000c, 0x2f33: 0x000c, 0x2f34: 0x000c, 0x2f35: 0x000c, + 0x2f36: 0x000c, 0x2f3a: 0x000c, + 0x2f3c: 0x000c, 0x2f3d: 0x000c, 0x2f3f: 0x000c, + // Block 0xbd, offset 0x2f40 + 0x2f40: 0x000c, 0x2f41: 0x000c, 0x2f42: 0x000c, 0x2f43: 0x000c, 0x2f44: 0x000c, 0x2f45: 0x000c, + 0x2f47: 0x000c, + // Block 0xbe, offset 0x2f80 + 0x2fb0: 0x000c, 0x2fb1: 0x000c, 0x2fb2: 0x000c, 0x2fb3: 0x000c, 0x2fb4: 0x000c, + // Block 0xbf, offset 0x2fc0 + 0x2ff0: 0x000c, 0x2ff1: 0x000c, 0x2ff2: 0x000c, 0x2ff3: 0x000c, 0x2ff4: 0x000c, 0x2ff5: 0x000c, + 0x2ff6: 0x000c, + // Block 0xc0, offset 0x3000 + 0x300f: 0x000c, 0x3010: 0x000c, 0x3011: 0x000c, + 0x3012: 0x000c, + // Block 0xc1, offset 0x3040 + 0x305d: 0x000c, + 0x305e: 0x000c, 0x3060: 0x000b, 0x3061: 0x000b, 0x3062: 0x000b, 0x3063: 0x000b, + // Block 0xc2, offset 0x3080 + 0x30a7: 0x000c, 0x30a8: 0x000c, 0x30a9: 0x000c, + 0x30b3: 0x000b, 0x30b4: 0x000b, 0x30b5: 0x000b, + 0x30b6: 0x000b, 0x30b7: 0x000b, 0x30b8: 0x000b, 0x30b9: 0x000b, 0x30ba: 0x000b, 0x30bb: 0x000c, + 0x30bc: 0x000c, 0x30bd: 0x000c, 0x30be: 0x000c, 0x30bf: 0x000c, + // Block 0xc3, offset 0x30c0 + 0x30c0: 0x000c, 0x30c1: 0x000c, 0x30c2: 0x000c, 0x30c5: 0x000c, + 0x30c6: 0x000c, 0x30c7: 0x000c, 0x30c8: 0x000c, 0x30c9: 0x000c, 0x30ca: 0x000c, 0x30cb: 0x000c, + 0x30ea: 0x000c, 0x30eb: 0x000c, 0x30ec: 0x000c, 0x30ed: 0x000c, + // Block 0xc4, offset 0x3100 + 0x3100: 0x000a, 0x3101: 0x000a, 0x3102: 0x000c, 0x3103: 0x000c, 0x3104: 0x000c, 0x3105: 0x000a, + // Block 0xc5, offset 0x3140 + 0x3140: 0x000a, 0x3141: 0x000a, 0x3142: 0x000a, 0x3143: 0x000a, 0x3144: 0x000a, 0x3145: 0x000a, + 0x3146: 0x000a, 0x3147: 0x000a, 0x3148: 0x000a, 0x3149: 0x000a, 0x314a: 0x000a, 0x314b: 0x000a, + 0x314c: 0x000a, 0x314d: 0x000a, 0x314e: 0x000a, 0x314f: 0x000a, 0x3150: 0x000a, 0x3151: 0x000a, + 0x3152: 0x000a, 0x3153: 0x000a, 0x3154: 0x000a, 0x3155: 0x000a, 0x3156: 0x000a, + // Block 0xc6, offset 0x3180 + 0x319b: 0x000a, + // Block 0xc7, offset 0x31c0 + 0x31d5: 0x000a, + // Block 0xc8, offset 0x3200 + 0x320f: 0x000a, + // Block 0xc9, offset 0x3240 + 0x3249: 0x000a, + // Block 0xca, offset 0x3280 + 0x3283: 0x000a, + 0x328e: 0x0002, 0x328f: 0x0002, 0x3290: 0x0002, 0x3291: 0x0002, + 0x3292: 0x0002, 0x3293: 0x0002, 0x3294: 0x0002, 0x3295: 0x0002, 0x3296: 0x0002, 0x3297: 0x0002, + 0x3298: 0x0002, 0x3299: 0x0002, 0x329a: 0x0002, 0x329b: 0x0002, 0x329c: 0x0002, 0x329d: 0x0002, + 0x329e: 0x0002, 0x329f: 0x0002, 0x32a0: 0x0002, 0x32a1: 0x0002, 0x32a2: 0x0002, 0x32a3: 0x0002, + 0x32a4: 0x0002, 0x32a5: 0x0002, 0x32a6: 0x0002, 0x32a7: 0x0002, 0x32a8: 0x0002, 0x32a9: 0x0002, + 0x32aa: 0x0002, 0x32ab: 0x0002, 0x32ac: 0x0002, 0x32ad: 0x0002, 0x32ae: 0x0002, 0x32af: 0x0002, + 0x32b0: 0x0002, 0x32b1: 0x0002, 0x32b2: 0x0002, 0x32b3: 0x0002, 0x32b4: 0x0002, 0x32b5: 0x0002, + 0x32b6: 0x0002, 0x32b7: 0x0002, 0x32b8: 0x0002, 0x32b9: 0x0002, 0x32ba: 0x0002, 0x32bb: 0x0002, + 0x32bc: 0x0002, 0x32bd: 0x0002, 0x32be: 0x0002, 0x32bf: 0x0002, + // Block 0xcb, offset 0x32c0 + 0x32c0: 0x000c, 0x32c1: 0x000c, 0x32c2: 0x000c, 0x32c3: 0x000c, 0x32c4: 0x000c, 0x32c5: 0x000c, + 0x32c6: 0x000c, 0x32c7: 0x000c, 0x32c8: 0x000c, 0x32c9: 0x000c, 0x32ca: 0x000c, 0x32cb: 0x000c, + 0x32cc: 0x000c, 0x32cd: 0x000c, 0x32ce: 0x000c, 0x32cf: 0x000c, 0x32d0: 0x000c, 0x32d1: 0x000c, + 0x32d2: 0x000c, 0x32d3: 0x000c, 0x32d4: 0x000c, 0x32d5: 0x000c, 0x32d6: 0x000c, 0x32d7: 0x000c, + 0x32d8: 0x000c, 0x32d9: 0x000c, 0x32da: 0x000c, 0x32db: 0x000c, 0x32dc: 0x000c, 0x32dd: 0x000c, + 0x32de: 0x000c, 0x32df: 0x000c, 0x32e0: 0x000c, 0x32e1: 0x000c, 0x32e2: 0x000c, 0x32e3: 0x000c, + 0x32e4: 0x000c, 0x32e5: 0x000c, 0x32e6: 0x000c, 0x32e7: 0x000c, 0x32e8: 0x000c, 0x32e9: 0x000c, + 0x32ea: 0x000c, 0x32eb: 0x000c, 0x32ec: 0x000c, 0x32ed: 0x000c, 0x32ee: 0x000c, 0x32ef: 0x000c, + 0x32f0: 0x000c, 0x32f1: 0x000c, 0x32f2: 0x000c, 0x32f3: 0x000c, 0x32f4: 0x000c, 0x32f5: 0x000c, + 0x32f6: 0x000c, 0x32fb: 0x000c, + 0x32fc: 0x000c, 0x32fd: 0x000c, 0x32fe: 0x000c, 0x32ff: 0x000c, + // Block 0xcc, offset 0x3300 + 0x3300: 0x000c, 0x3301: 0x000c, 0x3302: 0x000c, 0x3303: 0x000c, 0x3304: 0x000c, 0x3305: 0x000c, + 0x3306: 0x000c, 0x3307: 0x000c, 0x3308: 0x000c, 0x3309: 0x000c, 0x330a: 0x000c, 0x330b: 0x000c, + 0x330c: 0x000c, 0x330d: 0x000c, 0x330e: 0x000c, 0x330f: 0x000c, 0x3310: 0x000c, 0x3311: 0x000c, + 0x3312: 0x000c, 0x3313: 0x000c, 0x3314: 0x000c, 0x3315: 0x000c, 0x3316: 0x000c, 0x3317: 0x000c, + 0x3318: 0x000c, 0x3319: 0x000c, 0x331a: 0x000c, 0x331b: 0x000c, 0x331c: 0x000c, 0x331d: 0x000c, + 0x331e: 0x000c, 0x331f: 0x000c, 0x3320: 0x000c, 0x3321: 0x000c, 0x3322: 0x000c, 0x3323: 0x000c, + 0x3324: 0x000c, 0x3325: 0x000c, 0x3326: 0x000c, 0x3327: 0x000c, 0x3328: 0x000c, 0x3329: 0x000c, + 0x332a: 0x000c, 0x332b: 0x000c, 0x332c: 0x000c, + 0x3335: 0x000c, + // Block 0xcd, offset 0x3340 + 0x3344: 0x000c, + 0x335b: 0x000c, 0x335c: 0x000c, 0x335d: 0x000c, + 0x335e: 0x000c, 0x335f: 0x000c, 0x3361: 0x000c, 0x3362: 0x000c, 0x3363: 0x000c, + 0x3364: 0x000c, 0x3365: 0x000c, 0x3366: 0x000c, 0x3367: 0x000c, 0x3368: 0x000c, 0x3369: 0x000c, + 0x336a: 0x000c, 0x336b: 0x000c, 0x336c: 0x000c, 0x336d: 0x000c, 0x336e: 0x000c, 0x336f: 0x000c, + // Block 0xce, offset 0x3380 + 0x3380: 0x000c, 0x3381: 0x000c, 0x3382: 0x000c, 0x3383: 0x000c, 0x3384: 0x000c, 0x3385: 0x000c, + 0x3386: 0x000c, 0x3388: 0x000c, 0x3389: 0x000c, 0x338a: 0x000c, 0x338b: 0x000c, + 0x338c: 0x000c, 0x338d: 0x000c, 0x338e: 0x000c, 0x338f: 0x000c, 0x3390: 0x000c, 0x3391: 0x000c, + 0x3392: 0x000c, 0x3393: 0x000c, 0x3394: 0x000c, 0x3395: 0x000c, 0x3396: 0x000c, 0x3397: 0x000c, + 0x3398: 0x000c, 0x339b: 0x000c, 0x339c: 0x000c, 0x339d: 0x000c, + 0x339e: 0x000c, 0x339f: 0x000c, 0x33a0: 0x000c, 0x33a1: 0x000c, 0x33a3: 0x000c, + 0x33a4: 0x000c, 0x33a6: 0x000c, 0x33a7: 0x000c, 0x33a8: 0x000c, 0x33a9: 0x000c, + 0x33aa: 0x000c, + // Block 0xcf, offset 0x33c0 + 0x33c0: 0x0001, 0x33c1: 0x0001, 0x33c2: 0x0001, 0x33c3: 0x0001, 0x33c4: 0x0001, 0x33c5: 0x0001, + 0x33c6: 0x0001, 0x33c7: 0x0001, 0x33c8: 0x0001, 0x33c9: 0x0001, 0x33ca: 0x0001, 0x33cb: 0x0001, + 0x33cc: 0x0001, 0x33cd: 0x0001, 0x33ce: 0x0001, 0x33cf: 0x0001, 0x33d0: 0x000c, 0x33d1: 0x000c, + 0x33d2: 0x000c, 0x33d3: 0x000c, 0x33d4: 0x000c, 0x33d5: 0x000c, 0x33d6: 0x000c, 0x33d7: 0x0001, + 0x33d8: 0x0001, 0x33d9: 0x0001, 0x33da: 0x0001, 0x33db: 0x0001, 0x33dc: 0x0001, 0x33dd: 0x0001, + 0x33de: 0x0001, 0x33df: 0x0001, 0x33e0: 0x0001, 0x33e1: 0x0001, 0x33e2: 0x0001, 0x33e3: 0x0001, + 0x33e4: 0x0001, 0x33e5: 0x0001, 0x33e6: 0x0001, 0x33e7: 0x0001, 0x33e8: 0x0001, 0x33e9: 0x0001, + 0x33ea: 0x0001, 0x33eb: 0x0001, 0x33ec: 0x0001, 0x33ed: 0x0001, 0x33ee: 0x0001, 0x33ef: 0x0001, + 0x33f0: 0x0001, 0x33f1: 0x0001, 0x33f2: 0x0001, 0x33f3: 0x0001, 0x33f4: 0x0001, 0x33f5: 0x0001, + 0x33f6: 0x0001, 0x33f7: 0x0001, 0x33f8: 0x0001, 0x33f9: 0x0001, 0x33fa: 0x0001, 0x33fb: 0x0001, + 0x33fc: 0x0001, 0x33fd: 0x0001, 0x33fe: 0x0001, 0x33ff: 0x0001, + // Block 0xd0, offset 0x3400 + 0x3400: 0x0001, 0x3401: 0x0001, 0x3402: 0x0001, 0x3403: 0x0001, 0x3404: 0x000c, 0x3405: 0x000c, + 0x3406: 0x000c, 0x3407: 0x000c, 0x3408: 0x000c, 0x3409: 0x000c, 0x340a: 0x000c, 0x340b: 0x0001, + 0x340c: 0x0001, 0x340d: 0x0001, 0x340e: 0x0001, 0x340f: 0x0001, 0x3410: 0x0001, 0x3411: 0x0001, + 0x3412: 0x0001, 0x3413: 0x0001, 0x3414: 0x0001, 0x3415: 0x0001, 0x3416: 0x0001, 0x3417: 0x0001, + 0x3418: 0x0001, 0x3419: 0x0001, 0x341a: 0x0001, 0x341b: 0x0001, 0x341c: 0x0001, 0x341d: 0x0001, + 0x341e: 0x0001, 0x341f: 0x0001, 0x3420: 0x0001, 0x3421: 0x0001, 0x3422: 0x0001, 0x3423: 0x0001, + 0x3424: 0x0001, 0x3425: 0x0001, 0x3426: 0x0001, 0x3427: 0x0001, 0x3428: 0x0001, 0x3429: 0x0001, + 0x342a: 0x0001, 0x342b: 0x0001, 0x342c: 0x0001, 0x342d: 0x0001, 0x342e: 0x0001, 0x342f: 0x0001, + 0x3430: 0x0001, 0x3431: 0x0001, 0x3432: 0x0001, 0x3433: 0x0001, 0x3434: 0x0001, 0x3435: 0x0001, + 0x3436: 0x0001, 0x3437: 0x0001, 0x3438: 0x0001, 0x3439: 0x0001, 0x343a: 0x0001, 0x343b: 0x0001, + 0x343c: 0x0001, 0x343d: 0x0001, 0x343e: 0x0001, 0x343f: 0x0001, + // Block 0xd1, offset 0x3440 + 0x3440: 0x000d, 0x3441: 0x000d, 0x3442: 0x000d, 0x3443: 0x000d, 0x3444: 0x000d, 0x3445: 0x000d, + 0x3446: 0x000d, 0x3447: 0x000d, 0x3448: 0x000d, 0x3449: 0x000d, 0x344a: 0x000d, 0x344b: 0x000d, + 0x344c: 0x000d, 0x344d: 0x000d, 0x344e: 0x000d, 0x344f: 0x000d, 0x3450: 0x000d, 0x3451: 0x000d, + 0x3452: 0x000d, 0x3453: 0x000d, 0x3454: 0x000d, 0x3455: 0x000d, 0x3456: 0x000d, 0x3457: 0x000d, + 0x3458: 0x000d, 0x3459: 0x000d, 0x345a: 0x000d, 0x345b: 0x000d, 0x345c: 0x000d, 0x345d: 0x000d, + 0x345e: 0x000d, 0x345f: 0x000d, 0x3460: 0x000d, 0x3461: 0x000d, 0x3462: 0x000d, 0x3463: 0x000d, + 0x3464: 0x000d, 0x3465: 0x000d, 0x3466: 0x000d, 0x3467: 0x000d, 0x3468: 0x000d, 0x3469: 0x000d, + 0x346a: 0x000d, 0x346b: 0x000d, 0x346c: 0x000d, 0x346d: 0x000d, 0x346e: 0x000d, 0x346f: 0x000d, + 0x3470: 0x000a, 0x3471: 0x000a, 0x3472: 0x000d, 0x3473: 0x000d, 0x3474: 0x000d, 0x3475: 0x000d, + 0x3476: 0x000d, 0x3477: 0x000d, 0x3478: 0x000d, 0x3479: 0x000d, 0x347a: 0x000d, 0x347b: 0x000d, + 0x347c: 0x000d, 0x347d: 0x000d, 0x347e: 0x000d, 0x347f: 0x000d, + // Block 0xd2, offset 0x3480 + 0x3480: 0x000a, 0x3481: 0x000a, 0x3482: 0x000a, 0x3483: 0x000a, 0x3484: 0x000a, 0x3485: 0x000a, + 0x3486: 0x000a, 0x3487: 0x000a, 0x3488: 0x000a, 0x3489: 0x000a, 0x348a: 0x000a, 0x348b: 0x000a, + 0x348c: 0x000a, 0x348d: 0x000a, 0x348e: 0x000a, 0x348f: 0x000a, 0x3490: 0x000a, 0x3491: 0x000a, + 0x3492: 0x000a, 0x3493: 0x000a, 0x3494: 0x000a, 0x3495: 0x000a, 0x3496: 0x000a, 0x3497: 0x000a, + 0x3498: 0x000a, 0x3499: 0x000a, 0x349a: 0x000a, 0x349b: 0x000a, 0x349c: 0x000a, 0x349d: 0x000a, + 0x349e: 0x000a, 0x349f: 0x000a, 0x34a0: 0x000a, 0x34a1: 0x000a, 0x34a2: 0x000a, 0x34a3: 0x000a, + 0x34a4: 0x000a, 0x34a5: 0x000a, 0x34a6: 0x000a, 0x34a7: 0x000a, 0x34a8: 0x000a, 0x34a9: 0x000a, + 0x34aa: 0x000a, 0x34ab: 0x000a, + 0x34b0: 0x000a, 0x34b1: 0x000a, 0x34b2: 0x000a, 0x34b3: 0x000a, 0x34b4: 0x000a, 0x34b5: 0x000a, + 0x34b6: 0x000a, 0x34b7: 0x000a, 0x34b8: 0x000a, 0x34b9: 0x000a, 0x34ba: 0x000a, 0x34bb: 0x000a, + 0x34bc: 0x000a, 0x34bd: 0x000a, 0x34be: 0x000a, 0x34bf: 0x000a, + // Block 0xd3, offset 0x34c0 + 0x34c0: 0x000a, 0x34c1: 0x000a, 0x34c2: 0x000a, 0x34c3: 0x000a, 0x34c4: 0x000a, 0x34c5: 0x000a, + 0x34c6: 0x000a, 0x34c7: 0x000a, 0x34c8: 0x000a, 0x34c9: 0x000a, 0x34ca: 0x000a, 0x34cb: 0x000a, + 0x34cc: 0x000a, 0x34cd: 0x000a, 0x34ce: 0x000a, 0x34cf: 0x000a, 0x34d0: 0x000a, 0x34d1: 0x000a, + 0x34d2: 0x000a, 0x34d3: 0x000a, + 0x34e0: 0x000a, 0x34e1: 0x000a, 0x34e2: 0x000a, 0x34e3: 0x000a, + 0x34e4: 0x000a, 0x34e5: 0x000a, 0x34e6: 0x000a, 0x34e7: 0x000a, 0x34e8: 0x000a, 0x34e9: 0x000a, + 0x34ea: 0x000a, 0x34eb: 0x000a, 0x34ec: 0x000a, 0x34ed: 0x000a, 0x34ee: 0x000a, + 0x34f1: 0x000a, 0x34f2: 0x000a, 0x34f3: 0x000a, 0x34f4: 0x000a, 0x34f5: 0x000a, + 0x34f6: 0x000a, 0x34f7: 0x000a, 0x34f8: 0x000a, 0x34f9: 0x000a, 0x34fa: 0x000a, 0x34fb: 0x000a, + 0x34fc: 0x000a, 0x34fd: 0x000a, 0x34fe: 0x000a, 0x34ff: 0x000a, + // Block 0xd4, offset 0x3500 + 0x3501: 0x000a, 0x3502: 0x000a, 0x3503: 0x000a, 0x3504: 0x000a, 0x3505: 0x000a, + 0x3506: 0x000a, 0x3507: 0x000a, 0x3508: 0x000a, 0x3509: 0x000a, 0x350a: 0x000a, 0x350b: 0x000a, + 0x350c: 0x000a, 0x350d: 0x000a, 0x350e: 0x000a, 0x350f: 0x000a, 0x3511: 0x000a, + 0x3512: 0x000a, 0x3513: 0x000a, 0x3514: 0x000a, 0x3515: 0x000a, 0x3516: 0x000a, 0x3517: 0x000a, + 0x3518: 0x000a, 0x3519: 0x000a, 0x351a: 0x000a, 0x351b: 0x000a, 0x351c: 0x000a, 0x351d: 0x000a, + 0x351e: 0x000a, 0x351f: 0x000a, 0x3520: 0x000a, 0x3521: 0x000a, 0x3522: 0x000a, 0x3523: 0x000a, + 0x3524: 0x000a, 0x3525: 0x000a, 0x3526: 0x000a, 0x3527: 0x000a, 0x3528: 0x000a, 0x3529: 0x000a, + 0x352a: 0x000a, 0x352b: 0x000a, 0x352c: 0x000a, 0x352d: 0x000a, 0x352e: 0x000a, 0x352f: 0x000a, + 0x3530: 0x000a, 0x3531: 0x000a, 0x3532: 0x000a, 0x3533: 0x000a, 0x3534: 0x000a, 0x3535: 0x000a, + // Block 0xd5, offset 0x3540 + 0x3540: 0x0002, 0x3541: 0x0002, 0x3542: 0x0002, 0x3543: 0x0002, 0x3544: 0x0002, 0x3545: 0x0002, + 0x3546: 0x0002, 0x3547: 0x0002, 0x3548: 0x0002, 0x3549: 0x0002, 0x354a: 0x0002, 0x354b: 0x000a, + 0x354c: 0x000a, + // Block 0xd6, offset 0x3580 + 0x35aa: 0x000a, 0x35ab: 0x000a, + // Block 0xd7, offset 0x35c0 + 0x35e0: 0x000a, 0x35e1: 0x000a, 0x35e2: 0x000a, 0x35e3: 0x000a, + 0x35e4: 0x000a, 0x35e5: 0x000a, + // Block 0xd8, offset 0x3600 + 0x3600: 0x000a, 0x3601: 0x000a, 0x3602: 0x000a, 0x3603: 0x000a, 0x3604: 0x000a, 0x3605: 0x000a, + 0x3606: 0x000a, 0x3607: 0x000a, 0x3608: 0x000a, 0x3609: 0x000a, 0x360a: 0x000a, 0x360b: 0x000a, + 0x360c: 0x000a, 0x360d: 0x000a, 0x360e: 0x000a, 0x360f: 0x000a, 0x3610: 0x000a, 0x3611: 0x000a, + 0x3612: 0x000a, 0x3613: 0x000a, 0x3614: 0x000a, + 0x3620: 0x000a, 0x3621: 0x000a, 0x3622: 0x000a, 0x3623: 0x000a, + 0x3624: 0x000a, 0x3625: 0x000a, 0x3626: 0x000a, 0x3627: 0x000a, 0x3628: 0x000a, 0x3629: 0x000a, + 0x362a: 0x000a, 0x362b: 0x000a, 0x362c: 0x000a, + 0x3630: 0x000a, 0x3631: 0x000a, 0x3632: 0x000a, 0x3633: 0x000a, 0x3634: 0x000a, 0x3635: 0x000a, + 0x3636: 0x000a, 0x3637: 0x000a, 0x3638: 0x000a, + // Block 0xd9, offset 0x3640 + 0x3640: 0x000a, 0x3641: 0x000a, 0x3642: 0x000a, 0x3643: 0x000a, 0x3644: 0x000a, 0x3645: 0x000a, + 0x3646: 0x000a, 0x3647: 0x000a, 0x3648: 0x000a, 0x3649: 0x000a, 0x364a: 0x000a, 0x364b: 0x000a, + 0x364c: 0x000a, 0x364d: 0x000a, 0x364e: 0x000a, 0x364f: 0x000a, 0x3650: 0x000a, 0x3651: 0x000a, + 0x3652: 0x000a, 0x3653: 0x000a, 0x3654: 0x000a, + // Block 0xda, offset 0x3680 + 0x3680: 0x000a, 0x3681: 0x000a, 0x3682: 0x000a, 0x3683: 0x000a, 0x3684: 0x000a, 0x3685: 0x000a, + 0x3686: 0x000a, 0x3687: 0x000a, 0x3688: 0x000a, 0x3689: 0x000a, 0x368a: 0x000a, 0x368b: 0x000a, + 0x3690: 0x000a, 0x3691: 0x000a, + 0x3692: 0x000a, 0x3693: 0x000a, 0x3694: 0x000a, 0x3695: 0x000a, 0x3696: 0x000a, 0x3697: 0x000a, + 0x3698: 0x000a, 0x3699: 0x000a, 0x369a: 0x000a, 0x369b: 0x000a, 0x369c: 0x000a, 0x369d: 0x000a, + 0x369e: 0x000a, 0x369f: 0x000a, 0x36a0: 0x000a, 0x36a1: 0x000a, 0x36a2: 0x000a, 0x36a3: 0x000a, + 0x36a4: 0x000a, 0x36a5: 0x000a, 0x36a6: 0x000a, 0x36a7: 0x000a, 0x36a8: 0x000a, 0x36a9: 0x000a, + 0x36aa: 0x000a, 0x36ab: 0x000a, 0x36ac: 0x000a, 0x36ad: 0x000a, 0x36ae: 0x000a, 0x36af: 0x000a, + 0x36b0: 0x000a, 0x36b1: 0x000a, 0x36b2: 0x000a, 0x36b3: 0x000a, 0x36b4: 0x000a, 0x36b5: 0x000a, + 0x36b6: 0x000a, 0x36b7: 0x000a, 0x36b8: 0x000a, 0x36b9: 0x000a, 0x36ba: 0x000a, 0x36bb: 0x000a, + 0x36bc: 0x000a, 0x36bd: 0x000a, 0x36be: 0x000a, 0x36bf: 0x000a, + // Block 0xdb, offset 0x36c0 + 0x36c0: 0x000a, 0x36c1: 0x000a, 0x36c2: 0x000a, 0x36c3: 0x000a, 0x36c4: 0x000a, 0x36c5: 0x000a, + 0x36c6: 0x000a, 0x36c7: 0x000a, + 0x36d0: 0x000a, 0x36d1: 0x000a, + 0x36d2: 0x000a, 0x36d3: 0x000a, 0x36d4: 0x000a, 0x36d5: 0x000a, 0x36d6: 0x000a, 0x36d7: 0x000a, + 0x36d8: 0x000a, 0x36d9: 0x000a, + 0x36e0: 0x000a, 0x36e1: 0x000a, 0x36e2: 0x000a, 0x36e3: 0x000a, + 0x36e4: 0x000a, 0x36e5: 0x000a, 0x36e6: 0x000a, 0x36e7: 0x000a, 0x36e8: 0x000a, 0x36e9: 0x000a, + 0x36ea: 0x000a, 0x36eb: 0x000a, 0x36ec: 0x000a, 0x36ed: 0x000a, 0x36ee: 0x000a, 0x36ef: 0x000a, + 0x36f0: 0x000a, 0x36f1: 0x000a, 0x36f2: 0x000a, 0x36f3: 0x000a, 0x36f4: 0x000a, 0x36f5: 0x000a, + 0x36f6: 0x000a, 0x36f7: 0x000a, 0x36f8: 0x000a, 0x36f9: 0x000a, 0x36fa: 0x000a, 0x36fb: 0x000a, + 0x36fc: 0x000a, 0x36fd: 0x000a, 0x36fe: 0x000a, 0x36ff: 0x000a, + // Block 0xdc, offset 0x3700 + 0x3700: 0x000a, 0x3701: 0x000a, 0x3702: 0x000a, 0x3703: 0x000a, 0x3704: 0x000a, 0x3705: 0x000a, + 0x3706: 0x000a, 0x3707: 0x000a, + 0x3710: 0x000a, 0x3711: 0x000a, + 0x3712: 0x000a, 0x3713: 0x000a, 0x3714: 0x000a, 0x3715: 0x000a, 0x3716: 0x000a, 0x3717: 0x000a, + 0x3718: 0x000a, 0x3719: 0x000a, 0x371a: 0x000a, 0x371b: 0x000a, 0x371c: 0x000a, 0x371d: 0x000a, + 0x371e: 0x000a, 0x371f: 0x000a, 0x3720: 0x000a, 0x3721: 0x000a, 0x3722: 0x000a, 0x3723: 0x000a, + 0x3724: 0x000a, 0x3725: 0x000a, 0x3726: 0x000a, 0x3727: 0x000a, 0x3728: 0x000a, 0x3729: 0x000a, + 0x372a: 0x000a, 0x372b: 0x000a, 0x372c: 0x000a, 0x372d: 0x000a, + // Block 0xdd, offset 0x3740 + 0x3740: 0x000a, 0x3741: 0x000a, 0x3742: 0x000a, 0x3743: 0x000a, 0x3744: 0x000a, 0x3745: 0x000a, + 0x3746: 0x000a, 0x3747: 0x000a, 0x3748: 0x000a, 0x3749: 0x000a, 0x374a: 0x000a, 0x374b: 0x000a, + 0x3750: 0x000a, 0x3751: 0x000a, + 0x3752: 0x000a, 0x3753: 0x000a, 0x3754: 0x000a, 0x3755: 0x000a, 0x3756: 0x000a, 0x3757: 0x000a, + 0x3758: 0x000a, 0x3759: 0x000a, 0x375a: 0x000a, 0x375b: 0x000a, 0x375c: 0x000a, 0x375d: 0x000a, + 0x375e: 0x000a, 0x375f: 0x000a, 0x3760: 0x000a, 0x3761: 0x000a, 0x3762: 0x000a, 0x3763: 0x000a, + 0x3764: 0x000a, 0x3765: 0x000a, 0x3766: 0x000a, 0x3767: 0x000a, 0x3768: 0x000a, 0x3769: 0x000a, + 0x376a: 0x000a, 0x376b: 0x000a, 0x376c: 0x000a, 0x376d: 0x000a, 0x376e: 0x000a, 0x376f: 0x000a, + 0x3770: 0x000a, 0x3771: 0x000a, 0x3772: 0x000a, 0x3773: 0x000a, 0x3774: 0x000a, 0x3775: 0x000a, + 0x3776: 0x000a, 0x3777: 0x000a, 0x3778: 0x000a, 0x3779: 0x000a, 0x377a: 0x000a, 0x377b: 0x000a, + 0x377c: 0x000a, 0x377d: 0x000a, 0x377e: 0x000a, + // Block 0xde, offset 0x3780 + 0x3780: 0x000a, 0x3781: 0x000a, 0x3782: 0x000a, 0x3783: 0x000a, 0x3784: 0x000a, 0x3785: 0x000a, + 0x3786: 0x000a, 0x3787: 0x000a, 0x3788: 0x000a, 0x3789: 0x000a, 0x378a: 0x000a, 0x378b: 0x000a, + 0x378c: 0x000a, 0x3790: 0x000a, 0x3791: 0x000a, + 0x3792: 0x000a, 0x3793: 0x000a, 0x3794: 0x000a, 0x3795: 0x000a, 0x3796: 0x000a, 0x3797: 0x000a, + 0x3798: 0x000a, 0x3799: 0x000a, 0x379a: 0x000a, 0x379b: 0x000a, 0x379c: 0x000a, 0x379d: 0x000a, + 0x379e: 0x000a, 0x379f: 0x000a, 0x37a0: 0x000a, 0x37a1: 0x000a, 0x37a2: 0x000a, 0x37a3: 0x000a, + 0x37a4: 0x000a, 0x37a5: 0x000a, 0x37a6: 0x000a, 0x37a7: 0x000a, 0x37a8: 0x000a, 0x37a9: 0x000a, + 0x37aa: 0x000a, 0x37ab: 0x000a, + // Block 0xdf, offset 0x37c0 + 0x37c0: 0x000a, 0x37c1: 0x000a, 0x37c2: 0x000a, 0x37c3: 0x000a, 0x37c4: 0x000a, 0x37c5: 0x000a, + 0x37c6: 0x000a, 0x37c7: 0x000a, 0x37c8: 0x000a, 0x37c9: 0x000a, 0x37ca: 0x000a, 0x37cb: 0x000a, + 0x37cc: 0x000a, 0x37cd: 0x000a, 0x37ce: 0x000a, 0x37cf: 0x000a, 0x37d0: 0x000a, 0x37d1: 0x000a, + 0x37d2: 0x000a, 0x37d3: 0x000a, 0x37d4: 0x000a, 0x37d5: 0x000a, 0x37d6: 0x000a, 0x37d7: 0x000a, + // Block 0xe0, offset 0x3800 + 0x3800: 0x000a, + 0x3810: 0x000a, 0x3811: 0x000a, + 0x3812: 0x000a, 0x3813: 0x000a, 0x3814: 0x000a, 0x3815: 0x000a, 0x3816: 0x000a, 0x3817: 0x000a, + 0x3818: 0x000a, 0x3819: 0x000a, 0x381a: 0x000a, 0x381b: 0x000a, 0x381c: 0x000a, 0x381d: 0x000a, + 0x381e: 0x000a, 0x381f: 0x000a, 0x3820: 0x000a, 0x3821: 0x000a, 0x3822: 0x000a, 0x3823: 0x000a, + 0x3824: 0x000a, 0x3825: 0x000a, 0x3826: 0x000a, + // Block 0xe1, offset 0x3840 + 0x387e: 0x000b, 0x387f: 0x000b, + // Block 0xe2, offset 0x3880 + 0x3880: 0x000b, 0x3881: 0x000b, 0x3882: 0x000b, 0x3883: 0x000b, 0x3884: 0x000b, 0x3885: 0x000b, + 0x3886: 0x000b, 0x3887: 0x000b, 0x3888: 0x000b, 0x3889: 0x000b, 0x388a: 0x000b, 0x388b: 0x000b, + 0x388c: 0x000b, 0x388d: 0x000b, 0x388e: 0x000b, 0x388f: 0x000b, 0x3890: 0x000b, 0x3891: 0x000b, + 0x3892: 0x000b, 0x3893: 0x000b, 0x3894: 0x000b, 0x3895: 0x000b, 0x3896: 0x000b, 0x3897: 0x000b, + 0x3898: 0x000b, 0x3899: 0x000b, 0x389a: 0x000b, 0x389b: 0x000b, 0x389c: 0x000b, 0x389d: 0x000b, + 0x389e: 0x000b, 0x389f: 0x000b, 0x38a0: 0x000b, 0x38a1: 0x000b, 0x38a2: 0x000b, 0x38a3: 0x000b, + 0x38a4: 0x000b, 0x38a5: 0x000b, 0x38a6: 0x000b, 0x38a7: 0x000b, 0x38a8: 0x000b, 0x38a9: 0x000b, + 0x38aa: 0x000b, 0x38ab: 0x000b, 0x38ac: 0x000b, 0x38ad: 0x000b, 0x38ae: 0x000b, 0x38af: 0x000b, + 0x38b0: 0x000b, 0x38b1: 0x000b, 0x38b2: 0x000b, 0x38b3: 0x000b, 0x38b4: 0x000b, 0x38b5: 0x000b, + 0x38b6: 0x000b, 0x38b7: 0x000b, 0x38b8: 0x000b, 0x38b9: 0x000b, 0x38ba: 0x000b, 0x38bb: 0x000b, + 0x38bc: 0x000b, 0x38bd: 0x000b, 0x38be: 0x000b, 0x38bf: 0x000b, + // Block 0xe3, offset 0x38c0 + 0x38c0: 0x000c, 0x38c1: 0x000c, 0x38c2: 0x000c, 0x38c3: 0x000c, 0x38c4: 0x000c, 0x38c5: 0x000c, + 0x38c6: 0x000c, 0x38c7: 0x000c, 0x38c8: 0x000c, 0x38c9: 0x000c, 0x38ca: 0x000c, 0x38cb: 0x000c, + 0x38cc: 0x000c, 0x38cd: 0x000c, 0x38ce: 0x000c, 0x38cf: 0x000c, 0x38d0: 0x000c, 0x38d1: 0x000c, + 0x38d2: 0x000c, 0x38d3: 0x000c, 0x38d4: 0x000c, 0x38d5: 0x000c, 0x38d6: 0x000c, 0x38d7: 0x000c, + 0x38d8: 0x000c, 0x38d9: 0x000c, 0x38da: 0x000c, 0x38db: 0x000c, 0x38dc: 0x000c, 0x38dd: 0x000c, + 0x38de: 0x000c, 0x38df: 0x000c, 0x38e0: 0x000c, 0x38e1: 0x000c, 0x38e2: 0x000c, 0x38e3: 0x000c, + 0x38e4: 0x000c, 0x38e5: 0x000c, 0x38e6: 0x000c, 0x38e7: 0x000c, 0x38e8: 0x000c, 0x38e9: 0x000c, + 0x38ea: 0x000c, 0x38eb: 0x000c, 0x38ec: 0x000c, 0x38ed: 0x000c, 0x38ee: 0x000c, 0x38ef: 0x000c, + 0x38f0: 0x000b, 0x38f1: 0x000b, 0x38f2: 0x000b, 0x38f3: 0x000b, 0x38f4: 0x000b, 0x38f5: 0x000b, + 0x38f6: 0x000b, 0x38f7: 0x000b, 0x38f8: 0x000b, 0x38f9: 0x000b, 0x38fa: 0x000b, 0x38fb: 0x000b, + 0x38fc: 0x000b, 0x38fd: 0x000b, 0x38fe: 0x000b, 0x38ff: 0x000b, +} + +// bidiIndex: 24 blocks, 1536 entries, 1536 bytes +// Block 0 is the zero block. +var bidiIndex = [1536]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, + 0xca: 0x03, 0xcb: 0x04, 0xcc: 0x05, 0xcd: 0x06, 0xce: 0x07, 0xcf: 0x08, + 0xd2: 0x09, 0xd6: 0x0a, 0xd7: 0x0b, + 0xd8: 0x0c, 0xd9: 0x0d, 0xda: 0x0e, 0xdb: 0x0f, 0xdc: 0x10, 0xdd: 0x11, 0xde: 0x12, 0xdf: 0x13, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, + 0xea: 0x07, 0xef: 0x08, + 0xf0: 0x11, 0xf1: 0x12, 0xf2: 0x12, 0xf3: 0x14, 0xf4: 0x15, + // Block 0x4, offset 0x100 + 0x120: 0x14, 0x121: 0x15, 0x122: 0x16, 0x123: 0x17, 0x124: 0x18, 0x125: 0x19, 0x126: 0x1a, 0x127: 0x1b, + 0x128: 0x1c, 0x129: 0x1d, 0x12a: 0x1c, 0x12b: 0x1e, 0x12c: 0x1f, 0x12d: 0x20, 0x12e: 0x21, 0x12f: 0x22, + 0x130: 0x23, 0x131: 0x24, 0x132: 0x1a, 0x133: 0x25, 0x134: 0x26, 0x135: 0x27, 0x137: 0x28, + 0x138: 0x29, 0x139: 0x2a, 0x13a: 0x2b, 0x13b: 0x2c, 0x13c: 0x2d, 0x13d: 0x2e, 0x13e: 0x2f, 0x13f: 0x30, + // Block 0x5, offset 0x140 + 0x140: 0x31, 0x141: 0x32, 0x142: 0x33, + 0x14d: 0x34, 0x14e: 0x35, + 0x150: 0x36, + 0x15a: 0x37, 0x15c: 0x38, 0x15d: 0x39, 0x15e: 0x3a, 0x15f: 0x3b, + 0x160: 0x3c, 0x162: 0x3d, 0x164: 0x3e, 0x165: 0x3f, 0x167: 0x40, + 0x168: 0x41, 0x169: 0x42, 0x16a: 0x43, 0x16c: 0x44, 0x16d: 0x45, 0x16e: 0x46, 0x16f: 0x47, + 0x170: 0x48, 0x173: 0x49, 0x177: 0x4a, + 0x17e: 0x4b, 0x17f: 0x4c, + // Block 0x6, offset 0x180 + 0x180: 0x4d, 0x181: 0x4e, 0x182: 0x4f, 0x183: 0x50, 0x184: 0x51, 0x185: 0x52, 0x186: 0x53, 0x187: 0x54, + 0x188: 0x55, 0x189: 0x54, 0x18a: 0x54, 0x18b: 0x54, 0x18c: 0x56, 0x18d: 0x57, 0x18e: 0x58, 0x18f: 0x54, + 0x190: 0x59, 0x191: 0x5a, 0x192: 0x5b, 0x193: 0x5c, 0x194: 0x54, 0x195: 0x54, 0x196: 0x54, 0x197: 0x54, + 0x198: 0x54, 0x199: 0x54, 0x19a: 0x5d, 0x19b: 0x54, 0x19c: 0x54, 0x19d: 0x5e, 0x19e: 0x54, 0x19f: 0x5f, + 0x1a4: 0x54, 0x1a5: 0x54, 0x1a6: 0x60, 0x1a7: 0x61, + 0x1a8: 0x54, 0x1a9: 0x54, 0x1aa: 0x54, 0x1ab: 0x54, 0x1ac: 0x54, 0x1ad: 0x62, 0x1ae: 0x63, 0x1af: 0x64, + 0x1b3: 0x65, 0x1b5: 0x66, 0x1b7: 0x67, + 0x1b8: 0x68, 0x1b9: 0x69, 0x1ba: 0x6a, 0x1bb: 0x6b, 0x1bc: 0x54, 0x1bd: 0x54, 0x1be: 0x54, 0x1bf: 0x6c, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x6d, 0x1c2: 0x6e, 0x1c3: 0x6f, 0x1c7: 0x70, + 0x1c8: 0x71, 0x1c9: 0x72, 0x1ca: 0x73, 0x1cb: 0x74, 0x1cd: 0x75, 0x1cf: 0x76, + // Block 0x8, offset 0x200 + 0x237: 0x54, + // Block 0x9, offset 0x240 + 0x252: 0x77, 0x253: 0x78, + 0x258: 0x79, 0x259: 0x7a, 0x25a: 0x7b, 0x25b: 0x7c, 0x25c: 0x7d, 0x25e: 0x7e, + 0x260: 0x7f, 0x261: 0x80, 0x263: 0x81, 0x264: 0x82, 0x265: 0x83, 0x266: 0x84, 0x267: 0x85, + 0x268: 0x86, 0x269: 0x87, 0x26a: 0x88, 0x26b: 0x89, 0x26f: 0x8a, + // Block 0xa, offset 0x280 + 0x2ac: 0x8b, 0x2ad: 0x8c, 0x2ae: 0x0e, 0x2af: 0x0e, + 0x2b0: 0x0e, 0x2b1: 0x0e, 0x2b2: 0x0e, 0x2b3: 0x0e, 0x2b4: 0x8d, 0x2b5: 0x0e, 0x2b6: 0x0e, 0x2b7: 0x8e, + 0x2b8: 0x8f, 0x2b9: 0x90, 0x2ba: 0x0e, 0x2bb: 0x91, 0x2bc: 0x92, 0x2bd: 0x93, 0x2bf: 0x94, + // Block 0xb, offset 0x2c0 + 0x2c4: 0x95, 0x2c5: 0x54, 0x2c6: 0x96, 0x2c7: 0x97, + 0x2cb: 0x98, 0x2cd: 0x99, + 0x2e0: 0x9a, 0x2e1: 0x9a, 0x2e2: 0x9a, 0x2e3: 0x9a, 0x2e4: 0x9b, 0x2e5: 0x9a, 0x2e6: 0x9a, 0x2e7: 0x9a, + 0x2e8: 0x9c, 0x2e9: 0x9a, 0x2ea: 0x9a, 0x2eb: 0x9d, 0x2ec: 0x9e, 0x2ed: 0x9a, 0x2ee: 0x9a, 0x2ef: 0x9a, + 0x2f0: 0x9a, 0x2f1: 0x9a, 0x2f2: 0x9a, 0x2f3: 0x9a, 0x2f4: 0x9a, 0x2f5: 0x9a, 0x2f6: 0x9a, 0x2f7: 0x9a, + 0x2f8: 0x9a, 0x2f9: 0x9f, 0x2fa: 0x9a, 0x2fb: 0x9a, 0x2fc: 0x9a, 0x2fd: 0x9a, 0x2fe: 0x9a, 0x2ff: 0x9a, + // Block 0xc, offset 0x300 + 0x300: 0xa0, 0x301: 0xa1, 0x302: 0xa2, 0x304: 0xa3, 0x305: 0xa4, 0x306: 0xa5, 0x307: 0xa6, + 0x308: 0xa7, 0x30b: 0xa8, 0x30c: 0xa9, 0x30d: 0xaa, + 0x310: 0xab, 0x311: 0xac, 0x312: 0xad, 0x313: 0xae, 0x316: 0xaf, 0x317: 0xb0, + 0x318: 0xb1, 0x319: 0xb2, 0x31a: 0xb3, 0x31c: 0xb4, + 0x328: 0xb5, 0x329: 0xb6, 0x32a: 0xb7, + 0x330: 0xb8, 0x332: 0xb9, 0x334: 0xba, 0x335: 0xbb, + // Block 0xd, offset 0x340 + 0x36b: 0xbc, 0x36c: 0xbd, + 0x37e: 0xbe, + // Block 0xe, offset 0x380 + 0x3b2: 0xbf, + // Block 0xf, offset 0x3c0 + 0x3c5: 0xc0, 0x3c6: 0xc1, + 0x3c8: 0x54, 0x3c9: 0xc2, 0x3cc: 0x54, 0x3cd: 0xc3, + 0x3db: 0xc4, 0x3dc: 0xc5, 0x3dd: 0xc6, 0x3de: 0xc7, 0x3df: 0xc8, + 0x3e8: 0xc9, 0x3e9: 0xca, 0x3ea: 0xcb, + // Block 0x10, offset 0x400 + 0x400: 0xcc, + 0x420: 0x9a, 0x421: 0x9a, 0x422: 0x9a, 0x423: 0xcd, 0x424: 0x9a, 0x425: 0xce, 0x426: 0x9a, 0x427: 0x9a, + 0x428: 0x9a, 0x429: 0x9a, 0x42a: 0x9a, 0x42b: 0x9a, 0x42c: 0x9a, 0x42d: 0x9a, 0x42e: 0x9a, 0x42f: 0x9a, + 0x430: 0x9a, 0x431: 0x9a, 0x432: 0x9a, 0x433: 0x9a, 0x434: 0x9a, 0x435: 0x9a, 0x436: 0x9a, 0x437: 0x9a, + 0x438: 0x0e, 0x439: 0x0e, 0x43a: 0x0e, 0x43b: 0xcf, 0x43c: 0x9a, 0x43d: 0x9a, 0x43e: 0x9a, 0x43f: 0x9a, + // Block 0x11, offset 0x440 + 0x440: 0xd0, 0x441: 0x54, 0x442: 0xd1, 0x443: 0xd2, 0x444: 0xd3, 0x445: 0xd4, + 0x449: 0xd5, 0x44c: 0x54, 0x44d: 0x54, 0x44e: 0x54, 0x44f: 0x54, + 0x450: 0x54, 0x451: 0x54, 0x452: 0x54, 0x453: 0x54, 0x454: 0x54, 0x455: 0x54, 0x456: 0x54, 0x457: 0x54, + 0x458: 0x54, 0x459: 0x54, 0x45a: 0x54, 0x45b: 0xd6, 0x45c: 0x54, 0x45d: 0x6b, 0x45e: 0x54, 0x45f: 0xd7, + 0x460: 0xd8, 0x461: 0xd9, 0x462: 0xda, 0x464: 0xdb, 0x465: 0xdc, 0x466: 0xdd, 0x467: 0xde, + 0x47f: 0xdf, + // Block 0x12, offset 0x480 + 0x4bf: 0xdf, + // Block 0x13, offset 0x4c0 + 0x4d0: 0x09, 0x4d1: 0x0a, 0x4d6: 0x0b, + 0x4db: 0x0c, 0x4dd: 0x0d, 0x4de: 0x0e, 0x4df: 0x0f, + 0x4ef: 0x10, + 0x4ff: 0x10, + // Block 0x14, offset 0x500 + 0x50f: 0x10, + 0x51f: 0x10, + 0x52f: 0x10, + 0x53f: 0x10, + // Block 0x15, offset 0x540 + 0x540: 0xe0, 0x541: 0xe0, 0x542: 0xe0, 0x543: 0xe0, 0x544: 0x05, 0x545: 0x05, 0x546: 0x05, 0x547: 0xe1, + 0x548: 0xe0, 0x549: 0xe0, 0x54a: 0xe0, 0x54b: 0xe0, 0x54c: 0xe0, 0x54d: 0xe0, 0x54e: 0xe0, 0x54f: 0xe0, + 0x550: 0xe0, 0x551: 0xe0, 0x552: 0xe0, 0x553: 0xe0, 0x554: 0xe0, 0x555: 0xe0, 0x556: 0xe0, 0x557: 0xe0, + 0x558: 0xe0, 0x559: 0xe0, 0x55a: 0xe0, 0x55b: 0xe0, 0x55c: 0xe0, 0x55d: 0xe0, 0x55e: 0xe0, 0x55f: 0xe0, + 0x560: 0xe0, 0x561: 0xe0, 0x562: 0xe0, 0x563: 0xe0, 0x564: 0xe0, 0x565: 0xe0, 0x566: 0xe0, 0x567: 0xe0, + 0x568: 0xe0, 0x569: 0xe0, 0x56a: 0xe0, 0x56b: 0xe0, 0x56c: 0xe0, 0x56d: 0xe0, 0x56e: 0xe0, 0x56f: 0xe0, + 0x570: 0xe0, 0x571: 0xe0, 0x572: 0xe0, 0x573: 0xe0, 0x574: 0xe0, 0x575: 0xe0, 0x576: 0xe0, 0x577: 0xe0, + 0x578: 0xe0, 0x579: 0xe0, 0x57a: 0xe0, 0x57b: 0xe0, 0x57c: 0xe0, 0x57d: 0xe0, 0x57e: 0xe0, 0x57f: 0xe0, + // Block 0x16, offset 0x580 + 0x58f: 0x10, + 0x59f: 0x10, + 0x5a0: 0x13, + 0x5af: 0x10, + 0x5bf: 0x10, + // Block 0x17, offset 0x5c0 + 0x5cf: 0x10, +} + +// Total table size 16184 bytes (15KiB); checksum: F50EF68C diff --git a/vendor/golang.org/x/text/unicode/bidi/trieval.go b/vendor/golang.org/x/text/unicode/bidi/trieval.go new file mode 100644 index 00000000000..4c459c4b72e --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/trieval.go @@ -0,0 +1,60 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package bidi + +// Class is the Unicode BiDi class. Each rune has a single class. +type Class uint + +const ( + L Class = iota // LeftToRight + R // RightToLeft + EN // EuropeanNumber + ES // EuropeanSeparator + ET // EuropeanTerminator + AN // ArabicNumber + CS // CommonSeparator + B // ParagraphSeparator + S // SegmentSeparator + WS // WhiteSpace + ON // OtherNeutral + BN // BoundaryNeutral + NSM // NonspacingMark + AL // ArabicLetter + Control // Control LRO - PDI + + numClass + + LRO // LeftToRightOverride + RLO // RightToLeftOverride + LRE // LeftToRightEmbedding + RLE // RightToLeftEmbedding + PDF // PopDirectionalFormat + LRI // LeftToRightIsolate + RLI // RightToLeftIsolate + FSI // FirstStrongIsolate + PDI // PopDirectionalIsolate + + unknownClass = ^Class(0) +) + +var controlToClass = map[rune]Class{ + 0x202D: LRO, // LeftToRightOverride, + 0x202E: RLO, // RightToLeftOverride, + 0x202A: LRE, // LeftToRightEmbedding, + 0x202B: RLE, // RightToLeftEmbedding, + 0x202C: PDF, // PopDirectionalFormat, + 0x2066: LRI, // LeftToRightIsolate, + 0x2067: RLI, // RightToLeftIsolate, + 0x2068: FSI, // FirstStrongIsolate, + 0x2069: PDI, // PopDirectionalIsolate, +} + +// A trie entry has the following bits: +// 7..5 XOR mask for brackets +// 4 1: Bracket open, 0: Bracket close +// 3..0 Class type + +const ( + openMask = 0x10 + xorMaskShift = 5 +) diff --git a/vendor/golang.org/x/text/unicode/norm/composition.go b/vendor/golang.org/x/text/unicode/norm/composition.go new file mode 100644 index 00000000000..bab4c5de02f --- /dev/null +++ b/vendor/golang.org/x/text/unicode/norm/composition.go @@ -0,0 +1,508 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package norm + +import "unicode/utf8" + +const ( + maxNonStarters = 30 + // The maximum number of characters needed for a buffer is + // maxNonStarters + 1 for the starter + 1 for the GCJ + maxBufferSize = maxNonStarters + 2 + maxNFCExpansion = 3 // NFC(0x1D160) + maxNFKCExpansion = 18 // NFKC(0xFDFA) + + maxByteBufferSize = utf8.UTFMax * maxBufferSize // 128 +) + +// ssState is used for reporting the segment state after inserting a rune. +// It is returned by streamSafe.next. +type ssState int + +const ( + // Indicates a rune was successfully added to the segment. + ssSuccess ssState = iota + // Indicates a rune starts a new segment and should not be added. + ssStarter + // Indicates a rune caused a segment overflow and a CGJ should be inserted. + ssOverflow +) + +// streamSafe implements the policy of when a CGJ should be inserted. +type streamSafe uint8 + +// first inserts the first rune of a segment. It is a faster version of next if +// it is known p represents the first rune in a segment. +func (ss *streamSafe) first(p Properties) { + *ss = streamSafe(p.nTrailingNonStarters()) +} + +// insert returns a ssState value to indicate whether a rune represented by p +// can be inserted. +func (ss *streamSafe) next(p Properties) ssState { + if *ss > maxNonStarters { + panic("streamSafe was not reset") + } + n := p.nLeadingNonStarters() + if *ss += streamSafe(n); *ss > maxNonStarters { + *ss = 0 + return ssOverflow + } + // The Stream-Safe Text Processing prescribes that the counting can stop + // as soon as a starter is encountered. However, there are some starters, + // like Jamo V and T, that can combine with other runes, leaving their + // successive non-starters appended to the previous, possibly causing an + // overflow. We will therefore consider any rune with a non-zero nLead to + // be a non-starter. Note that it always hold that if nLead > 0 then + // nLead == nTrail. + if n == 0 { + *ss = streamSafe(p.nTrailingNonStarters()) + return ssStarter + } + return ssSuccess +} + +// backwards is used for checking for overflow and segment starts +// when traversing a string backwards. Users do not need to call first +// for the first rune. The state of the streamSafe retains the count of +// the non-starters loaded. +func (ss *streamSafe) backwards(p Properties) ssState { + if *ss > maxNonStarters { + panic("streamSafe was not reset") + } + c := *ss + streamSafe(p.nTrailingNonStarters()) + if c > maxNonStarters { + return ssOverflow + } + *ss = c + if p.nLeadingNonStarters() == 0 { + return ssStarter + } + return ssSuccess +} + +func (ss streamSafe) isMax() bool { + return ss == maxNonStarters +} + +// GraphemeJoiner is inserted after maxNonStarters non-starter runes. +const GraphemeJoiner = "\u034F" + +// reorderBuffer is used to normalize a single segment. Characters inserted with +// insert are decomposed and reordered based on CCC. The compose method can +// be used to recombine characters. Note that the byte buffer does not hold +// the UTF-8 characters in order. Only the rune array is maintained in sorted +// order. flush writes the resulting segment to a byte array. +type reorderBuffer struct { + rune [maxBufferSize]Properties // Per character info. + byte [maxByteBufferSize]byte // UTF-8 buffer. Referenced by runeInfo.pos. + nbyte uint8 // Number or bytes. + ss streamSafe // For limiting length of non-starter sequence. + nrune int // Number of runeInfos. + f formInfo + + src input + nsrc int + tmpBytes input + + out []byte + flushF func(*reorderBuffer) bool +} + +func (rb *reorderBuffer) init(f Form, src []byte) { + rb.f = *formTable[f] + rb.src.setBytes(src) + rb.nsrc = len(src) + rb.ss = 0 +} + +func (rb *reorderBuffer) initString(f Form, src string) { + rb.f = *formTable[f] + rb.src.setString(src) + rb.nsrc = len(src) + rb.ss = 0 +} + +func (rb *reorderBuffer) setFlusher(out []byte, f func(*reorderBuffer) bool) { + rb.out = out + rb.flushF = f +} + +// reset discards all characters from the buffer. +func (rb *reorderBuffer) reset() { + rb.nrune = 0 + rb.nbyte = 0 +} + +func (rb *reorderBuffer) doFlush() bool { + if rb.f.composing { + rb.compose() + } + res := rb.flushF(rb) + rb.reset() + return res +} + +// appendFlush appends the normalized segment to rb.out. +func appendFlush(rb *reorderBuffer) bool { + for i := 0; i < rb.nrune; i++ { + start := rb.rune[i].pos + end := start + rb.rune[i].size + rb.out = append(rb.out, rb.byte[start:end]...) + } + return true +} + +// flush appends the normalized segment to out and resets rb. +func (rb *reorderBuffer) flush(out []byte) []byte { + for i := 0; i < rb.nrune; i++ { + start := rb.rune[i].pos + end := start + rb.rune[i].size + out = append(out, rb.byte[start:end]...) + } + rb.reset() + return out +} + +// flushCopy copies the normalized segment to buf and resets rb. +// It returns the number of bytes written to buf. +func (rb *reorderBuffer) flushCopy(buf []byte) int { + p := 0 + for i := 0; i < rb.nrune; i++ { + runep := rb.rune[i] + p += copy(buf[p:], rb.byte[runep.pos:runep.pos+runep.size]) + } + rb.reset() + return p +} + +// insertOrdered inserts a rune in the buffer, ordered by Canonical Combining Class. +// It returns false if the buffer is not large enough to hold the rune. +// It is used internally by insert and insertString only. +func (rb *reorderBuffer) insertOrdered(info Properties) { + n := rb.nrune + b := rb.rune[:] + cc := info.ccc + if cc > 0 { + // Find insertion position + move elements to make room. + for ; n > 0; n-- { + if b[n-1].ccc <= cc { + break + } + b[n] = b[n-1] + } + } + rb.nrune += 1 + pos := uint8(rb.nbyte) + rb.nbyte += utf8.UTFMax + info.pos = pos + b[n] = info +} + +// insertErr is an error code returned by insert. Using this type instead +// of error improves performance up to 20% for many of the benchmarks. +type insertErr int + +const ( + iSuccess insertErr = -iota + iShortDst + iShortSrc +) + +// insertFlush inserts the given rune in the buffer ordered by CCC. +// If a decomposition with multiple segments are encountered, they leading +// ones are flushed. +// It returns a non-zero error code if the rune was not inserted. +func (rb *reorderBuffer) insertFlush(src input, i int, info Properties) insertErr { + if rune := src.hangul(i); rune != 0 { + rb.decomposeHangul(rune) + return iSuccess + } + if info.hasDecomposition() { + return rb.insertDecomposed(info.Decomposition()) + } + rb.insertSingle(src, i, info) + return iSuccess +} + +// insertUnsafe inserts the given rune in the buffer ordered by CCC. +// It is assumed there is sufficient space to hold the runes. It is the +// responsibility of the caller to ensure this. This can be done by checking +// the state returned by the streamSafe type. +func (rb *reorderBuffer) insertUnsafe(src input, i int, info Properties) { + if rune := src.hangul(i); rune != 0 { + rb.decomposeHangul(rune) + } + if info.hasDecomposition() { + // TODO: inline. + rb.insertDecomposed(info.Decomposition()) + } else { + rb.insertSingle(src, i, info) + } +} + +// insertDecomposed inserts an entry in to the reorderBuffer for each rune +// in dcomp. dcomp must be a sequence of decomposed UTF-8-encoded runes. +// It flushes the buffer on each new segment start. +func (rb *reorderBuffer) insertDecomposed(dcomp []byte) insertErr { + rb.tmpBytes.setBytes(dcomp) + // As the streamSafe accounting already handles the counting for modifiers, + // we don't have to call next. However, we do need to keep the accounting + // intact when flushing the buffer. + for i := 0; i < len(dcomp); { + info := rb.f.info(rb.tmpBytes, i) + if info.BoundaryBefore() && rb.nrune > 0 && !rb.doFlush() { + return iShortDst + } + i += copy(rb.byte[rb.nbyte:], dcomp[i:i+int(info.size)]) + rb.insertOrdered(info) + } + return iSuccess +} + +// insertSingle inserts an entry in the reorderBuffer for the rune at +// position i. info is the runeInfo for the rune at position i. +func (rb *reorderBuffer) insertSingle(src input, i int, info Properties) { + src.copySlice(rb.byte[rb.nbyte:], i, i+int(info.size)) + rb.insertOrdered(info) +} + +// insertCGJ inserts a Combining Grapheme Joiner (0x034f) into rb. +func (rb *reorderBuffer) insertCGJ() { + rb.insertSingle(input{str: GraphemeJoiner}, 0, Properties{size: uint8(len(GraphemeJoiner))}) +} + +// appendRune inserts a rune at the end of the buffer. It is used for Hangul. +func (rb *reorderBuffer) appendRune(r rune) { + bn := rb.nbyte + sz := utf8.EncodeRune(rb.byte[bn:], rune(r)) + rb.nbyte += utf8.UTFMax + rb.rune[rb.nrune] = Properties{pos: bn, size: uint8(sz)} + rb.nrune++ +} + +// assignRune sets a rune at position pos. It is used for Hangul and recomposition. +func (rb *reorderBuffer) assignRune(pos int, r rune) { + bn := rb.rune[pos].pos + sz := utf8.EncodeRune(rb.byte[bn:], rune(r)) + rb.rune[pos] = Properties{pos: bn, size: uint8(sz)} +} + +// runeAt returns the rune at position n. It is used for Hangul and recomposition. +func (rb *reorderBuffer) runeAt(n int) rune { + inf := rb.rune[n] + r, _ := utf8.DecodeRune(rb.byte[inf.pos : inf.pos+inf.size]) + return r +} + +// bytesAt returns the UTF-8 encoding of the rune at position n. +// It is used for Hangul and recomposition. +func (rb *reorderBuffer) bytesAt(n int) []byte { + inf := rb.rune[n] + return rb.byte[inf.pos : int(inf.pos)+int(inf.size)] +} + +// For Hangul we combine algorithmically, instead of using tables. +const ( + hangulBase = 0xAC00 // UTF-8(hangulBase) -> EA B0 80 + hangulBase0 = 0xEA + hangulBase1 = 0xB0 + hangulBase2 = 0x80 + + hangulEnd = hangulBase + jamoLVTCount // UTF-8(0xD7A4) -> ED 9E A4 + hangulEnd0 = 0xED + hangulEnd1 = 0x9E + hangulEnd2 = 0xA4 + + jamoLBase = 0x1100 // UTF-8(jamoLBase) -> E1 84 00 + jamoLBase0 = 0xE1 + jamoLBase1 = 0x84 + jamoLEnd = 0x1113 + jamoVBase = 0x1161 + jamoVEnd = 0x1176 + jamoTBase = 0x11A7 + jamoTEnd = 0x11C3 + + jamoTCount = 28 + jamoVCount = 21 + jamoVTCount = 21 * 28 + jamoLVTCount = 19 * 21 * 28 +) + +const hangulUTF8Size = 3 + +func isHangul(b []byte) bool { + if len(b) < hangulUTF8Size { + return false + } + b0 := b[0] + if b0 < hangulBase0 { + return false + } + b1 := b[1] + switch { + case b0 == hangulBase0: + return b1 >= hangulBase1 + case b0 < hangulEnd0: + return true + case b0 > hangulEnd0: + return false + case b1 < hangulEnd1: + return true + } + return b1 == hangulEnd1 && b[2] < hangulEnd2 +} + +func isHangulString(b string) bool { + if len(b) < hangulUTF8Size { + return false + } + b0 := b[0] + if b0 < hangulBase0 { + return false + } + b1 := b[1] + switch { + case b0 == hangulBase0: + return b1 >= hangulBase1 + case b0 < hangulEnd0: + return true + case b0 > hangulEnd0: + return false + case b1 < hangulEnd1: + return true + } + return b1 == hangulEnd1 && b[2] < hangulEnd2 +} + +// Caller must ensure len(b) >= 2. +func isJamoVT(b []byte) bool { + // True if (rune & 0xff00) == jamoLBase + return b[0] == jamoLBase0 && (b[1]&0xFC) == jamoLBase1 +} + +func isHangulWithoutJamoT(b []byte) bool { + c, _ := utf8.DecodeRune(b) + c -= hangulBase + return c < jamoLVTCount && c%jamoTCount == 0 +} + +// decomposeHangul writes the decomposed Hangul to buf and returns the number +// of bytes written. len(buf) should be at least 9. +func decomposeHangul(buf []byte, r rune) int { + const JamoUTF8Len = 3 + r -= hangulBase + x := r % jamoTCount + r /= jamoTCount + utf8.EncodeRune(buf, jamoLBase+r/jamoVCount) + utf8.EncodeRune(buf[JamoUTF8Len:], jamoVBase+r%jamoVCount) + if x != 0 { + utf8.EncodeRune(buf[2*JamoUTF8Len:], jamoTBase+x) + return 3 * JamoUTF8Len + } + return 2 * JamoUTF8Len +} + +// decomposeHangul algorithmically decomposes a Hangul rune into +// its Jamo components. +// See http://unicode.org/reports/tr15/#Hangul for details on decomposing Hangul. +func (rb *reorderBuffer) decomposeHangul(r rune) { + r -= hangulBase + x := r % jamoTCount + r /= jamoTCount + rb.appendRune(jamoLBase + r/jamoVCount) + rb.appendRune(jamoVBase + r%jamoVCount) + if x != 0 { + rb.appendRune(jamoTBase + x) + } +} + +// combineHangul algorithmically combines Jamo character components into Hangul. +// See http://unicode.org/reports/tr15/#Hangul for details on combining Hangul. +func (rb *reorderBuffer) combineHangul(s, i, k int) { + b := rb.rune[:] + bn := rb.nrune + for ; i < bn; i++ { + cccB := b[k-1].ccc + cccC := b[i].ccc + if cccB == 0 { + s = k - 1 + } + if s != k-1 && cccB >= cccC { + // b[i] is blocked by greater-equal cccX below it + b[k] = b[i] + k++ + } else { + l := rb.runeAt(s) // also used to compare to hangulBase + v := rb.runeAt(i) // also used to compare to jamoT + switch { + case jamoLBase <= l && l < jamoLEnd && + jamoVBase <= v && v < jamoVEnd: + // 11xx plus 116x to LV + rb.assignRune(s, hangulBase+ + (l-jamoLBase)*jamoVTCount+(v-jamoVBase)*jamoTCount) + case hangulBase <= l && l < hangulEnd && + jamoTBase < v && v < jamoTEnd && + ((l-hangulBase)%jamoTCount) == 0: + // ACxx plus 11Ax to LVT + rb.assignRune(s, l+v-jamoTBase) + default: + b[k] = b[i] + k++ + } + } + } + rb.nrune = k +} + +// compose recombines the runes in the buffer. +// It should only be used to recompose a single segment, as it will not +// handle alternations between Hangul and non-Hangul characters correctly. +func (rb *reorderBuffer) compose() { + // UAX #15, section X5 , including Corrigendum #5 + // "In any character sequence beginning with starter S, a character C is + // blocked from S if and only if there is some character B between S + // and C, and either B is a starter or it has the same or higher + // combining class as C." + bn := rb.nrune + if bn == 0 { + return + } + k := 1 + b := rb.rune[:] + for s, i := 0, 1; i < bn; i++ { + if isJamoVT(rb.bytesAt(i)) { + // Redo from start in Hangul mode. Necessary to support + // U+320E..U+321E in NFKC mode. + rb.combineHangul(s, i, k) + return + } + ii := b[i] + // We can only use combineForward as a filter if we later + // get the info for the combined character. This is more + // expensive than using the filter. Using combinesBackward() + // is safe. + if ii.combinesBackward() { + cccB := b[k-1].ccc + cccC := ii.ccc + blocked := false // b[i] blocked by starter or greater or equal CCC? + if cccB == 0 { + s = k - 1 + } else { + blocked = s != k-1 && cccB >= cccC + } + if !blocked { + combined := combine(rb.runeAt(s), rb.runeAt(i)) + if combined != 0 { + rb.assignRune(s, combined) + continue + } + } + } + b[k] = b[i] + k++ + } + rb.nrune = k +} diff --git a/vendor/golang.org/x/text/unicode/norm/forminfo.go b/vendor/golang.org/x/text/unicode/norm/forminfo.go new file mode 100644 index 00000000000..e67e7655c54 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/norm/forminfo.go @@ -0,0 +1,259 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package norm + +// This file contains Form-specific logic and wrappers for data in tables.go. + +// Rune info is stored in a separate trie per composing form. A composing form +// and its corresponding decomposing form share the same trie. Each trie maps +// a rune to a uint16. The values take two forms. For v >= 0x8000: +// bits +// 15: 1 (inverse of NFD_QC bit of qcInfo) +// 13..7: qcInfo (see below). isYesD is always true (no decompostion). +// 6..0: ccc (compressed CCC value). +// For v < 0x8000, the respective rune has a decomposition and v is an index +// into a byte array of UTF-8 decomposition sequences and additional info and +// has the form: +//
* [ []] +// The header contains the number of bytes in the decomposition (excluding this +// length byte). The two most significant bits of this length byte correspond +// to bit 5 and 4 of qcInfo (see below). The byte sequence itself starts at v+1. +// The byte sequence is followed by a trailing and leading CCC if the values +// for these are not zero. The value of v determines which ccc are appended +// to the sequences. For v < firstCCC, there are none, for v >= firstCCC, +// the sequence is followed by a trailing ccc, and for v >= firstLeadingCC +// there is an additional leading ccc. The value of tccc itself is the +// trailing CCC shifted left 2 bits. The two least-significant bits of tccc +// are the number of trailing non-starters. + +const ( + qcInfoMask = 0x3F // to clear all but the relevant bits in a qcInfo + headerLenMask = 0x3F // extract the length value from the header byte + headerFlagsMask = 0xC0 // extract the qcInfo bits from the header byte +) + +// Properties provides access to normalization properties of a rune. +type Properties struct { + pos uint8 // start position in reorderBuffer; used in composition.go + size uint8 // length of UTF-8 encoding of this rune + ccc uint8 // leading canonical combining class (ccc if not decomposition) + tccc uint8 // trailing canonical combining class (ccc if not decomposition) + nLead uint8 // number of leading non-starters. + flags qcInfo // quick check flags + index uint16 +} + +// functions dispatchable per form +type lookupFunc func(b input, i int) Properties + +// formInfo holds Form-specific functions and tables. +type formInfo struct { + form Form + composing, compatibility bool // form type + info lookupFunc + nextMain iterFunc +} + +var formTable = []*formInfo{{ + form: NFC, + composing: true, + compatibility: false, + info: lookupInfoNFC, + nextMain: nextComposed, +}, { + form: NFD, + composing: false, + compatibility: false, + info: lookupInfoNFC, + nextMain: nextDecomposed, +}, { + form: NFKC, + composing: true, + compatibility: true, + info: lookupInfoNFKC, + nextMain: nextComposed, +}, { + form: NFKD, + composing: false, + compatibility: true, + info: lookupInfoNFKC, + nextMain: nextDecomposed, +}} + +// We do not distinguish between boundaries for NFC, NFD, etc. to avoid +// unexpected behavior for the user. For example, in NFD, there is a boundary +// after 'a'. However, 'a' might combine with modifiers, so from the application's +// perspective it is not a good boundary. We will therefore always use the +// boundaries for the combining variants. + +// BoundaryBefore returns true if this rune starts a new segment and +// cannot combine with any rune on the left. +func (p Properties) BoundaryBefore() bool { + if p.ccc == 0 && !p.combinesBackward() { + return true + } + // We assume that the CCC of the first character in a decomposition + // is always non-zero if different from info.ccc and that we can return + // false at this point. This is verified by maketables. + return false +} + +// BoundaryAfter returns true if runes cannot combine with or otherwise +// interact with this or previous runes. +func (p Properties) BoundaryAfter() bool { + // TODO: loosen these conditions. + return p.isInert() +} + +// We pack quick check data in 4 bits: +// 5: Combines forward (0 == false, 1 == true) +// 4..3: NFC_QC Yes(00), No (10), or Maybe (11) +// 2: NFD_QC Yes (0) or No (1). No also means there is a decomposition. +// 1..0: Number of trailing non-starters. +// +// When all 4 bits are zero, the character is inert, meaning it is never +// influenced by normalization. +type qcInfo uint8 + +func (p Properties) isYesC() bool { return p.flags&0x10 == 0 } +func (p Properties) isYesD() bool { return p.flags&0x4 == 0 } + +func (p Properties) combinesForward() bool { return p.flags&0x20 != 0 } +func (p Properties) combinesBackward() bool { return p.flags&0x8 != 0 } // == isMaybe +func (p Properties) hasDecomposition() bool { return p.flags&0x4 != 0 } // == isNoD + +func (p Properties) isInert() bool { + return p.flags&qcInfoMask == 0 && p.ccc == 0 +} + +func (p Properties) multiSegment() bool { + return p.index >= firstMulti && p.index < endMulti +} + +func (p Properties) nLeadingNonStarters() uint8 { + return p.nLead +} + +func (p Properties) nTrailingNonStarters() uint8 { + return uint8(p.flags & 0x03) +} + +// Decomposition returns the decomposition for the underlying rune +// or nil if there is none. +func (p Properties) Decomposition() []byte { + // TODO: create the decomposition for Hangul? + if p.index == 0 { + return nil + } + i := p.index + n := decomps[i] & headerLenMask + i++ + return decomps[i : i+uint16(n)] +} + +// Size returns the length of UTF-8 encoding of the rune. +func (p Properties) Size() int { + return int(p.size) +} + +// CCC returns the canonical combining class of the underlying rune. +func (p Properties) CCC() uint8 { + if p.index >= firstCCCZeroExcept { + return 0 + } + return ccc[p.ccc] +} + +// LeadCCC returns the CCC of the first rune in the decomposition. +// If there is no decomposition, LeadCCC equals CCC. +func (p Properties) LeadCCC() uint8 { + return ccc[p.ccc] +} + +// TrailCCC returns the CCC of the last rune in the decomposition. +// If there is no decomposition, TrailCCC equals CCC. +func (p Properties) TrailCCC() uint8 { + return ccc[p.tccc] +} + +// Recomposition +// We use 32-bit keys instead of 64-bit for the two codepoint keys. +// This clips off the bits of three entries, but we know this will not +// result in a collision. In the unlikely event that changes to +// UnicodeData.txt introduce collisions, the compiler will catch it. +// Note that the recomposition map for NFC and NFKC are identical. + +// combine returns the combined rune or 0 if it doesn't exist. +func combine(a, b rune) rune { + key := uint32(uint16(a))<<16 + uint32(uint16(b)) + return recompMap[key] +} + +func lookupInfoNFC(b input, i int) Properties { + v, sz := b.charinfoNFC(i) + return compInfo(v, sz) +} + +func lookupInfoNFKC(b input, i int) Properties { + v, sz := b.charinfoNFKC(i) + return compInfo(v, sz) +} + +// Properties returns properties for the first rune in s. +func (f Form) Properties(s []byte) Properties { + if f == NFC || f == NFD { + return compInfo(nfcData.lookup(s)) + } + return compInfo(nfkcData.lookup(s)) +} + +// PropertiesString returns properties for the first rune in s. +func (f Form) PropertiesString(s string) Properties { + if f == NFC || f == NFD { + return compInfo(nfcData.lookupString(s)) + } + return compInfo(nfkcData.lookupString(s)) +} + +// compInfo converts the information contained in v and sz +// to a Properties. See the comment at the top of the file +// for more information on the format. +func compInfo(v uint16, sz int) Properties { + if v == 0 { + return Properties{size: uint8(sz)} + } else if v >= 0x8000 { + p := Properties{ + size: uint8(sz), + ccc: uint8(v), + tccc: uint8(v), + flags: qcInfo(v >> 8), + } + if p.ccc > 0 || p.combinesBackward() { + p.nLead = uint8(p.flags & 0x3) + } + return p + } + // has decomposition + h := decomps[v] + f := (qcInfo(h&headerFlagsMask) >> 2) | 0x4 + p := Properties{size: uint8(sz), flags: f, index: v} + if v >= firstCCC { + v += uint16(h&headerLenMask) + 1 + c := decomps[v] + p.tccc = c >> 2 + p.flags |= qcInfo(c & 0x3) + if v >= firstLeadingCCC { + p.nLead = c & 0x3 + if v >= firstStarterWithNLead { + // We were tricked. Remove the decomposition. + p.flags &= 0x03 + p.index = 0 + return p + } + p.ccc = decomps[v+1] + } + } + return p +} diff --git a/vendor/golang.org/x/text/unicode/norm/input.go b/vendor/golang.org/x/text/unicode/norm/input.go new file mode 100644 index 00000000000..479e35bc258 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/norm/input.go @@ -0,0 +1,109 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package norm + +import "unicode/utf8" + +type input struct { + str string + bytes []byte +} + +func inputBytes(str []byte) input { + return input{bytes: str} +} + +func inputString(str string) input { + return input{str: str} +} + +func (in *input) setBytes(str []byte) { + in.str = "" + in.bytes = str +} + +func (in *input) setString(str string) { + in.str = str + in.bytes = nil +} + +func (in *input) _byte(p int) byte { + if in.bytes == nil { + return in.str[p] + } + return in.bytes[p] +} + +func (in *input) skipASCII(p, max int) int { + if in.bytes == nil { + for ; p < max && in.str[p] < utf8.RuneSelf; p++ { + } + } else { + for ; p < max && in.bytes[p] < utf8.RuneSelf; p++ { + } + } + return p +} + +func (in *input) skipContinuationBytes(p int) int { + if in.bytes == nil { + for ; p < len(in.str) && !utf8.RuneStart(in.str[p]); p++ { + } + } else { + for ; p < len(in.bytes) && !utf8.RuneStart(in.bytes[p]); p++ { + } + } + return p +} + +func (in *input) appendSlice(buf []byte, b, e int) []byte { + if in.bytes != nil { + return append(buf, in.bytes[b:e]...) + } + for i := b; i < e; i++ { + buf = append(buf, in.str[i]) + } + return buf +} + +func (in *input) copySlice(buf []byte, b, e int) int { + if in.bytes == nil { + return copy(buf, in.str[b:e]) + } + return copy(buf, in.bytes[b:e]) +} + +func (in *input) charinfoNFC(p int) (uint16, int) { + if in.bytes == nil { + return nfcData.lookupString(in.str[p:]) + } + return nfcData.lookup(in.bytes[p:]) +} + +func (in *input) charinfoNFKC(p int) (uint16, int) { + if in.bytes == nil { + return nfkcData.lookupString(in.str[p:]) + } + return nfkcData.lookup(in.bytes[p:]) +} + +func (in *input) hangul(p int) (r rune) { + var size int + if in.bytes == nil { + if !isHangulString(in.str[p:]) { + return 0 + } + r, size = utf8.DecodeRuneInString(in.str[p:]) + } else { + if !isHangul(in.bytes[p:]) { + return 0 + } + r, size = utf8.DecodeRune(in.bytes[p:]) + } + if size != hangulUTF8Size { + return 0 + } + return r +} diff --git a/vendor/golang.org/x/text/unicode/norm/iter.go b/vendor/golang.org/x/text/unicode/norm/iter.go new file mode 100644 index 00000000000..ce17f96c2e0 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/norm/iter.go @@ -0,0 +1,457 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package norm + +import ( + "fmt" + "unicode/utf8" +) + +// MaxSegmentSize is the maximum size of a byte buffer needed to consider any +// sequence of starter and non-starter runes for the purpose of normalization. +const MaxSegmentSize = maxByteBufferSize + +// An Iter iterates over a string or byte slice, while normalizing it +// to a given Form. +type Iter struct { + rb reorderBuffer + buf [maxByteBufferSize]byte + info Properties // first character saved from previous iteration + next iterFunc // implementation of next depends on form + asciiF iterFunc + + p int // current position in input source + multiSeg []byte // remainder of multi-segment decomposition +} + +type iterFunc func(*Iter) []byte + +// Init initializes i to iterate over src after normalizing it to Form f. +func (i *Iter) Init(f Form, src []byte) { + i.p = 0 + if len(src) == 0 { + i.setDone() + i.rb.nsrc = 0 + return + } + i.multiSeg = nil + i.rb.init(f, src) + i.next = i.rb.f.nextMain + i.asciiF = nextASCIIBytes + i.info = i.rb.f.info(i.rb.src, i.p) + i.rb.ss.first(i.info) +} + +// InitString initializes i to iterate over src after normalizing it to Form f. +func (i *Iter) InitString(f Form, src string) { + i.p = 0 + if len(src) == 0 { + i.setDone() + i.rb.nsrc = 0 + return + } + i.multiSeg = nil + i.rb.initString(f, src) + i.next = i.rb.f.nextMain + i.asciiF = nextASCIIString + i.info = i.rb.f.info(i.rb.src, i.p) + i.rb.ss.first(i.info) +} + +// Seek sets the segment to be returned by the next call to Next to start +// at position p. It is the responsibility of the caller to set p to the +// start of a segment. +func (i *Iter) Seek(offset int64, whence int) (int64, error) { + var abs int64 + switch whence { + case 0: + abs = offset + case 1: + abs = int64(i.p) + offset + case 2: + abs = int64(i.rb.nsrc) + offset + default: + return 0, fmt.Errorf("norm: invalid whence") + } + if abs < 0 { + return 0, fmt.Errorf("norm: negative position") + } + if int(abs) >= i.rb.nsrc { + i.setDone() + return int64(i.p), nil + } + i.p = int(abs) + i.multiSeg = nil + i.next = i.rb.f.nextMain + i.info = i.rb.f.info(i.rb.src, i.p) + i.rb.ss.first(i.info) + return abs, nil +} + +// returnSlice returns a slice of the underlying input type as a byte slice. +// If the underlying is of type []byte, it will simply return a slice. +// If the underlying is of type string, it will copy the slice to the buffer +// and return that. +func (i *Iter) returnSlice(a, b int) []byte { + if i.rb.src.bytes == nil { + return i.buf[:copy(i.buf[:], i.rb.src.str[a:b])] + } + return i.rb.src.bytes[a:b] +} + +// Pos returns the byte position at which the next call to Next will commence processing. +func (i *Iter) Pos() int { + return i.p +} + +func (i *Iter) setDone() { + i.next = nextDone + i.p = i.rb.nsrc +} + +// Done returns true if there is no more input to process. +func (i *Iter) Done() bool { + return i.p >= i.rb.nsrc +} + +// Next returns f(i.input[i.Pos():n]), where n is a boundary of i.input. +// For any input a and b for which f(a) == f(b), subsequent calls +// to Next will return the same segments. +// Modifying runes are grouped together with the preceding starter, if such a starter exists. +// Although not guaranteed, n will typically be the smallest possible n. +func (i *Iter) Next() []byte { + return i.next(i) +} + +func nextASCIIBytes(i *Iter) []byte { + p := i.p + 1 + if p >= i.rb.nsrc { + i.setDone() + return i.rb.src.bytes[i.p:p] + } + if i.rb.src.bytes[p] < utf8.RuneSelf { + p0 := i.p + i.p = p + return i.rb.src.bytes[p0:p] + } + i.info = i.rb.f.info(i.rb.src, i.p) + i.next = i.rb.f.nextMain + return i.next(i) +} + +func nextASCIIString(i *Iter) []byte { + p := i.p + 1 + if p >= i.rb.nsrc { + i.buf[0] = i.rb.src.str[i.p] + i.setDone() + return i.buf[:1] + } + if i.rb.src.str[p] < utf8.RuneSelf { + i.buf[0] = i.rb.src.str[i.p] + i.p = p + return i.buf[:1] + } + i.info = i.rb.f.info(i.rb.src, i.p) + i.next = i.rb.f.nextMain + return i.next(i) +} + +func nextHangul(i *Iter) []byte { + p := i.p + next := p + hangulUTF8Size + if next >= i.rb.nsrc { + i.setDone() + } else if i.rb.src.hangul(next) == 0 { + i.rb.ss.next(i.info) + i.info = i.rb.f.info(i.rb.src, i.p) + i.next = i.rb.f.nextMain + return i.next(i) + } + i.p = next + return i.buf[:decomposeHangul(i.buf[:], i.rb.src.hangul(p))] +} + +func nextDone(i *Iter) []byte { + return nil +} + +// nextMulti is used for iterating over multi-segment decompositions +// for decomposing normal forms. +func nextMulti(i *Iter) []byte { + j := 0 + d := i.multiSeg + // skip first rune + for j = 1; j < len(d) && !utf8.RuneStart(d[j]); j++ { + } + for j < len(d) { + info := i.rb.f.info(input{bytes: d}, j) + if info.BoundaryBefore() { + i.multiSeg = d[j:] + return d[:j] + } + j += int(info.size) + } + // treat last segment as normal decomposition + i.next = i.rb.f.nextMain + return i.next(i) +} + +// nextMultiNorm is used for iterating over multi-segment decompositions +// for composing normal forms. +func nextMultiNorm(i *Iter) []byte { + j := 0 + d := i.multiSeg + for j < len(d) { + info := i.rb.f.info(input{bytes: d}, j) + if info.BoundaryBefore() { + i.rb.compose() + seg := i.buf[:i.rb.flushCopy(i.buf[:])] + i.rb.insertUnsafe(input{bytes: d}, j, info) + i.multiSeg = d[j+int(info.size):] + return seg + } + i.rb.insertUnsafe(input{bytes: d}, j, info) + j += int(info.size) + } + i.multiSeg = nil + i.next = nextComposed + return doNormComposed(i) +} + +// nextDecomposed is the implementation of Next for forms NFD and NFKD. +func nextDecomposed(i *Iter) (next []byte) { + outp := 0 + inCopyStart, outCopyStart := i.p, 0 + for { + if sz := int(i.info.size); sz <= 1 { + i.rb.ss = 0 + p := i.p + i.p++ // ASCII or illegal byte. Either way, advance by 1. + if i.p >= i.rb.nsrc { + i.setDone() + return i.returnSlice(p, i.p) + } else if i.rb.src._byte(i.p) < utf8.RuneSelf { + i.next = i.asciiF + return i.returnSlice(p, i.p) + } + outp++ + } else if d := i.info.Decomposition(); d != nil { + // Note: If leading CCC != 0, then len(d) == 2 and last is also non-zero. + // Case 1: there is a leftover to copy. In this case the decomposition + // must begin with a modifier and should always be appended. + // Case 2: no leftover. Simply return d if followed by a ccc == 0 value. + p := outp + len(d) + if outp > 0 { + i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p) + // TODO: this condition should not be possible, but we leave it + // in for defensive purposes. + if p > len(i.buf) { + return i.buf[:outp] + } + } else if i.info.multiSegment() { + // outp must be 0 as multi-segment decompositions always + // start a new segment. + if i.multiSeg == nil { + i.multiSeg = d + i.next = nextMulti + return nextMulti(i) + } + // We are in the last segment. Treat as normal decomposition. + d = i.multiSeg + i.multiSeg = nil + p = len(d) + } + prevCC := i.info.tccc + if i.p += sz; i.p >= i.rb.nsrc { + i.setDone() + i.info = Properties{} // Force BoundaryBefore to succeed. + } else { + i.info = i.rb.f.info(i.rb.src, i.p) + } + switch i.rb.ss.next(i.info) { + case ssOverflow: + i.next = nextCGJDecompose + fallthrough + case ssStarter: + if outp > 0 { + copy(i.buf[outp:], d) + return i.buf[:p] + } + return d + } + copy(i.buf[outp:], d) + outp = p + inCopyStart, outCopyStart = i.p, outp + if i.info.ccc < prevCC { + goto doNorm + } + continue + } else if r := i.rb.src.hangul(i.p); r != 0 { + outp = decomposeHangul(i.buf[:], r) + i.p += hangulUTF8Size + inCopyStart, outCopyStart = i.p, outp + if i.p >= i.rb.nsrc { + i.setDone() + break + } else if i.rb.src.hangul(i.p) != 0 { + i.next = nextHangul + return i.buf[:outp] + } + } else { + p := outp + sz + if p > len(i.buf) { + break + } + outp = p + i.p += sz + } + if i.p >= i.rb.nsrc { + i.setDone() + break + } + prevCC := i.info.tccc + i.info = i.rb.f.info(i.rb.src, i.p) + if v := i.rb.ss.next(i.info); v == ssStarter { + break + } else if v == ssOverflow { + i.next = nextCGJDecompose + break + } + if i.info.ccc < prevCC { + goto doNorm + } + } + if outCopyStart == 0 { + return i.returnSlice(inCopyStart, i.p) + } else if inCopyStart < i.p { + i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p) + } + return i.buf[:outp] +doNorm: + // Insert what we have decomposed so far in the reorderBuffer. + // As we will only reorder, there will always be enough room. + i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p) + i.rb.insertDecomposed(i.buf[0:outp]) + return doNormDecomposed(i) +} + +func doNormDecomposed(i *Iter) []byte { + for { + i.rb.insertUnsafe(i.rb.src, i.p, i.info) + if i.p += int(i.info.size); i.p >= i.rb.nsrc { + i.setDone() + break + } + i.info = i.rb.f.info(i.rb.src, i.p) + if i.info.ccc == 0 { + break + } + if s := i.rb.ss.next(i.info); s == ssOverflow { + i.next = nextCGJDecompose + break + } + } + // new segment or too many combining characters: exit normalization + return i.buf[:i.rb.flushCopy(i.buf[:])] +} + +func nextCGJDecompose(i *Iter) []byte { + i.rb.ss = 0 + i.rb.insertCGJ() + i.next = nextDecomposed + i.rb.ss.first(i.info) + buf := doNormDecomposed(i) + return buf +} + +// nextComposed is the implementation of Next for forms NFC and NFKC. +func nextComposed(i *Iter) []byte { + outp, startp := 0, i.p + var prevCC uint8 + for { + if !i.info.isYesC() { + goto doNorm + } + prevCC = i.info.tccc + sz := int(i.info.size) + if sz == 0 { + sz = 1 // illegal rune: copy byte-by-byte + } + p := outp + sz + if p > len(i.buf) { + break + } + outp = p + i.p += sz + if i.p >= i.rb.nsrc { + i.setDone() + break + } else if i.rb.src._byte(i.p) < utf8.RuneSelf { + i.rb.ss = 0 + i.next = i.asciiF + break + } + i.info = i.rb.f.info(i.rb.src, i.p) + if v := i.rb.ss.next(i.info); v == ssStarter { + break + } else if v == ssOverflow { + i.next = nextCGJCompose + break + } + if i.info.ccc < prevCC { + goto doNorm + } + } + return i.returnSlice(startp, i.p) +doNorm: + // reset to start position + i.p = startp + i.info = i.rb.f.info(i.rb.src, i.p) + i.rb.ss.first(i.info) + if i.info.multiSegment() { + d := i.info.Decomposition() + info := i.rb.f.info(input{bytes: d}, 0) + i.rb.insertUnsafe(input{bytes: d}, 0, info) + i.multiSeg = d[int(info.size):] + i.next = nextMultiNorm + return nextMultiNorm(i) + } + i.rb.ss.first(i.info) + i.rb.insertUnsafe(i.rb.src, i.p, i.info) + return doNormComposed(i) +} + +func doNormComposed(i *Iter) []byte { + // First rune should already be inserted. + for { + if i.p += int(i.info.size); i.p >= i.rb.nsrc { + i.setDone() + break + } + i.info = i.rb.f.info(i.rb.src, i.p) + if s := i.rb.ss.next(i.info); s == ssStarter { + break + } else if s == ssOverflow { + i.next = nextCGJCompose + break + } + i.rb.insertUnsafe(i.rb.src, i.p, i.info) + } + i.rb.compose() + seg := i.buf[:i.rb.flushCopy(i.buf[:])] + return seg +} + +func nextCGJCompose(i *Iter) []byte { + i.rb.ss = 0 // instead of first + i.rb.insertCGJ() + i.next = nextComposed + // Note that we treat any rune with nLeadingNonStarters > 0 as a non-starter, + // even if they are not. This is particularly dubious for U+FF9E and UFF9A. + // If we ever change that, insert a check here. + i.rb.ss.first(i.info) + i.rb.insertUnsafe(i.rb.src, i.p, i.info) + return doNormComposed(i) +} diff --git a/vendor/golang.org/x/text/unicode/norm/normalize.go b/vendor/golang.org/x/text/unicode/norm/normalize.go new file mode 100644 index 00000000000..e28ac641aca --- /dev/null +++ b/vendor/golang.org/x/text/unicode/norm/normalize.go @@ -0,0 +1,609 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Note: the file data_test.go that is generated should not be checked in. +//go:generate go run maketables.go triegen.go +//go:generate go test -tags test + +// Package norm contains types and functions for normalizing Unicode strings. +package norm // import "golang.org/x/text/unicode/norm" + +import ( + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// A Form denotes a canonical representation of Unicode code points. +// The Unicode-defined normalization and equivalence forms are: +// +// NFC Unicode Normalization Form C +// NFD Unicode Normalization Form D +// NFKC Unicode Normalization Form KC +// NFKD Unicode Normalization Form KD +// +// For a Form f, this documentation uses the notation f(x) to mean +// the bytes or string x converted to the given form. +// A position n in x is called a boundary if conversion to the form can +// proceed independently on both sides: +// f(x) == append(f(x[0:n]), f(x[n:])...) +// +// References: http://unicode.org/reports/tr15/ and +// http://unicode.org/notes/tn5/. +type Form int + +const ( + NFC Form = iota + NFD + NFKC + NFKD +) + +// Bytes returns f(b). May return b if f(b) = b. +func (f Form) Bytes(b []byte) []byte { + src := inputBytes(b) + ft := formTable[f] + n, ok := ft.quickSpan(src, 0, len(b), true) + if ok { + return b + } + out := make([]byte, n, len(b)) + copy(out, b[0:n]) + rb := reorderBuffer{f: *ft, src: src, nsrc: len(b), out: out, flushF: appendFlush} + return doAppendInner(&rb, n) +} + +// String returns f(s). +func (f Form) String(s string) string { + src := inputString(s) + ft := formTable[f] + n, ok := ft.quickSpan(src, 0, len(s), true) + if ok { + return s + } + out := make([]byte, n, len(s)) + copy(out, s[0:n]) + rb := reorderBuffer{f: *ft, src: src, nsrc: len(s), out: out, flushF: appendFlush} + return string(doAppendInner(&rb, n)) +} + +// IsNormal returns true if b == f(b). +func (f Form) IsNormal(b []byte) bool { + src := inputBytes(b) + ft := formTable[f] + bp, ok := ft.quickSpan(src, 0, len(b), true) + if ok { + return true + } + rb := reorderBuffer{f: *ft, src: src, nsrc: len(b)} + rb.setFlusher(nil, cmpNormalBytes) + for bp < len(b) { + rb.out = b[bp:] + if bp = decomposeSegment(&rb, bp, true); bp < 0 { + return false + } + bp, _ = rb.f.quickSpan(rb.src, bp, len(b), true) + } + return true +} + +func cmpNormalBytes(rb *reorderBuffer) bool { + b := rb.out + for i := 0; i < rb.nrune; i++ { + info := rb.rune[i] + if int(info.size) > len(b) { + return false + } + p := info.pos + pe := p + info.size + for ; p < pe; p++ { + if b[0] != rb.byte[p] { + return false + } + b = b[1:] + } + } + return true +} + +// IsNormalString returns true if s == f(s). +func (f Form) IsNormalString(s string) bool { + src := inputString(s) + ft := formTable[f] + bp, ok := ft.quickSpan(src, 0, len(s), true) + if ok { + return true + } + rb := reorderBuffer{f: *ft, src: src, nsrc: len(s)} + rb.setFlusher(nil, func(rb *reorderBuffer) bool { + for i := 0; i < rb.nrune; i++ { + info := rb.rune[i] + if bp+int(info.size) > len(s) { + return false + } + p := info.pos + pe := p + info.size + for ; p < pe; p++ { + if s[bp] != rb.byte[p] { + return false + } + bp++ + } + } + return true + }) + for bp < len(s) { + if bp = decomposeSegment(&rb, bp, true); bp < 0 { + return false + } + bp, _ = rb.f.quickSpan(rb.src, bp, len(s), true) + } + return true +} + +// patchTail fixes a case where a rune may be incorrectly normalized +// if it is followed by illegal continuation bytes. It returns the +// patched buffer and whether the decomposition is still in progress. +func patchTail(rb *reorderBuffer) bool { + info, p := lastRuneStart(&rb.f, rb.out) + if p == -1 || info.size == 0 { + return true + } + end := p + int(info.size) + extra := len(rb.out) - end + if extra > 0 { + // Potentially allocating memory. However, this only + // happens with ill-formed UTF-8. + x := make([]byte, 0) + x = append(x, rb.out[len(rb.out)-extra:]...) + rb.out = rb.out[:end] + decomposeToLastBoundary(rb) + rb.doFlush() + rb.out = append(rb.out, x...) + return false + } + buf := rb.out[p:] + rb.out = rb.out[:p] + decomposeToLastBoundary(rb) + if s := rb.ss.next(info); s == ssStarter { + rb.doFlush() + rb.ss.first(info) + } else if s == ssOverflow { + rb.doFlush() + rb.insertCGJ() + rb.ss = 0 + } + rb.insertUnsafe(inputBytes(buf), 0, info) + return true +} + +func appendQuick(rb *reorderBuffer, i int) int { + if rb.nsrc == i { + return i + } + end, _ := rb.f.quickSpan(rb.src, i, rb.nsrc, true) + rb.out = rb.src.appendSlice(rb.out, i, end) + return end +} + +// Append returns f(append(out, b...)). +// The buffer out must be nil, empty, or equal to f(out). +func (f Form) Append(out []byte, src ...byte) []byte { + return f.doAppend(out, inputBytes(src), len(src)) +} + +func (f Form) doAppend(out []byte, src input, n int) []byte { + if n == 0 { + return out + } + ft := formTable[f] + // Attempt to do a quickSpan first so we can avoid initializing the reorderBuffer. + if len(out) == 0 { + p, _ := ft.quickSpan(src, 0, n, true) + out = src.appendSlice(out, 0, p) + if p == n { + return out + } + rb := reorderBuffer{f: *ft, src: src, nsrc: n, out: out, flushF: appendFlush} + return doAppendInner(&rb, p) + } + rb := reorderBuffer{f: *ft, src: src, nsrc: n} + return doAppend(&rb, out, 0) +} + +func doAppend(rb *reorderBuffer, out []byte, p int) []byte { + rb.setFlusher(out, appendFlush) + src, n := rb.src, rb.nsrc + doMerge := len(out) > 0 + if q := src.skipContinuationBytes(p); q > p { + // Move leading non-starters to destination. + rb.out = src.appendSlice(rb.out, p, q) + p = q + doMerge = patchTail(rb) + } + fd := &rb.f + if doMerge { + var info Properties + if p < n { + info = fd.info(src, p) + if !info.BoundaryBefore() || info.nLeadingNonStarters() > 0 { + if p == 0 { + decomposeToLastBoundary(rb) + } + p = decomposeSegment(rb, p, true) + } + } + if info.size == 0 { + rb.doFlush() + // Append incomplete UTF-8 encoding. + return src.appendSlice(rb.out, p, n) + } + if rb.nrune > 0 { + return doAppendInner(rb, p) + } + } + p = appendQuick(rb, p) + return doAppendInner(rb, p) +} + +func doAppendInner(rb *reorderBuffer, p int) []byte { + for n := rb.nsrc; p < n; { + p = decomposeSegment(rb, p, true) + p = appendQuick(rb, p) + } + return rb.out +} + +// AppendString returns f(append(out, []byte(s))). +// The buffer out must be nil, empty, or equal to f(out). +func (f Form) AppendString(out []byte, src string) []byte { + return f.doAppend(out, inputString(src), len(src)) +} + +// QuickSpan returns a boundary n such that b[0:n] == f(b[0:n]). +// It is not guaranteed to return the largest such n. +func (f Form) QuickSpan(b []byte) int { + n, _ := formTable[f].quickSpan(inputBytes(b), 0, len(b), true) + return n +} + +// Span implements transform.SpanningTransformer. It returns a boundary n such +// that b[0:n] == f(b[0:n]). It is not guaranteed to return the largest such n. +func (f Form) Span(b []byte, atEOF bool) (n int, err error) { + n, ok := formTable[f].quickSpan(inputBytes(b), 0, len(b), atEOF) + if n < len(b) { + if !ok { + err = transform.ErrEndOfSpan + } else { + err = transform.ErrShortSrc + } + } + return n, err +} + +// SpanString returns a boundary n such that s[0:n] == f(s[0:n]). +// It is not guaranteed to return the largest such n. +func (f Form) SpanString(s string, atEOF bool) (n int, err error) { + n, ok := formTable[f].quickSpan(inputString(s), 0, len(s), atEOF) + if n < len(s) { + if !ok { + err = transform.ErrEndOfSpan + } else { + err = transform.ErrShortSrc + } + } + return n, err +} + +// quickSpan returns a boundary n such that src[0:n] == f(src[0:n]) and +// whether any non-normalized parts were found. If atEOF is false, n will +// not point past the last segment if this segment might be become +// non-normalized by appending other runes. +func (f *formInfo) quickSpan(src input, i, end int, atEOF bool) (n int, ok bool) { + var lastCC uint8 + ss := streamSafe(0) + lastSegStart := i + for n = end; i < n; { + if j := src.skipASCII(i, n); i != j { + i = j + lastSegStart = i - 1 + lastCC = 0 + ss = 0 + continue + } + info := f.info(src, i) + if info.size == 0 { + if atEOF { + // include incomplete runes + return n, true + } + return lastSegStart, true + } + // This block needs to be before the next, because it is possible to + // have an overflow for runes that are starters (e.g. with U+FF9E). + switch ss.next(info) { + case ssStarter: + lastSegStart = i + case ssOverflow: + return lastSegStart, false + case ssSuccess: + if lastCC > info.ccc { + return lastSegStart, false + } + } + if f.composing { + if !info.isYesC() { + break + } + } else { + if !info.isYesD() { + break + } + } + lastCC = info.ccc + i += int(info.size) + } + if i == n { + if !atEOF { + n = lastSegStart + } + return n, true + } + return lastSegStart, false +} + +// QuickSpanString returns a boundary n such that s[0:n] == f(s[0:n]). +// It is not guaranteed to return the largest such n. +func (f Form) QuickSpanString(s string) int { + n, _ := formTable[f].quickSpan(inputString(s), 0, len(s), true) + return n +} + +// FirstBoundary returns the position i of the first boundary in b +// or -1 if b contains no boundary. +func (f Form) FirstBoundary(b []byte) int { + return f.firstBoundary(inputBytes(b), len(b)) +} + +func (f Form) firstBoundary(src input, nsrc int) int { + i := src.skipContinuationBytes(0) + if i >= nsrc { + return -1 + } + fd := formTable[f] + ss := streamSafe(0) + // We should call ss.first here, but we can't as the first rune is + // skipped already. This means FirstBoundary can't really determine + // CGJ insertion points correctly. Luckily it doesn't have to. + for { + info := fd.info(src, i) + if info.size == 0 { + return -1 + } + if s := ss.next(info); s != ssSuccess { + return i + } + i += int(info.size) + if i >= nsrc { + if !info.BoundaryAfter() && !ss.isMax() { + return -1 + } + return nsrc + } + } +} + +// FirstBoundaryInString returns the position i of the first boundary in s +// or -1 if s contains no boundary. +func (f Form) FirstBoundaryInString(s string) int { + return f.firstBoundary(inputString(s), len(s)) +} + +// NextBoundary reports the index of the boundary between the first and next +// segment in b or -1 if atEOF is false and there are not enough bytes to +// determine this boundary. +func (f Form) NextBoundary(b []byte, atEOF bool) int { + return f.nextBoundary(inputBytes(b), len(b), atEOF) +} + +// NextBoundaryInString reports the index of the boundary between the first and +// next segment in b or -1 if atEOF is false and there are not enough bytes to +// determine this boundary. +func (f Form) NextBoundaryInString(s string, atEOF bool) int { + return f.nextBoundary(inputString(s), len(s), atEOF) +} + +func (f Form) nextBoundary(src input, nsrc int, atEOF bool) int { + if nsrc == 0 { + if atEOF { + return 0 + } + return -1 + } + fd := formTable[f] + info := fd.info(src, 0) + if info.size == 0 { + if atEOF { + return 1 + } + return -1 + } + ss := streamSafe(0) + ss.first(info) + + for i := int(info.size); i < nsrc; i += int(info.size) { + info = fd.info(src, i) + if info.size == 0 { + if atEOF { + return i + } + return -1 + } + // TODO: Using streamSafe to determine the boundary isn't the same as + // using BoundaryBefore. Determine which should be used. + if s := ss.next(info); s != ssSuccess { + return i + } + } + if !atEOF && !info.BoundaryAfter() && !ss.isMax() { + return -1 + } + return nsrc +} + +// LastBoundary returns the position i of the last boundary in b +// or -1 if b contains no boundary. +func (f Form) LastBoundary(b []byte) int { + return lastBoundary(formTable[f], b) +} + +func lastBoundary(fd *formInfo, b []byte) int { + i := len(b) + info, p := lastRuneStart(fd, b) + if p == -1 { + return -1 + } + if info.size == 0 { // ends with incomplete rune + if p == 0 { // starts with incomplete rune + return -1 + } + i = p + info, p = lastRuneStart(fd, b[:i]) + if p == -1 { // incomplete UTF-8 encoding or non-starter bytes without a starter + return i + } + } + if p+int(info.size) != i { // trailing non-starter bytes: illegal UTF-8 + return i + } + if info.BoundaryAfter() { + return i + } + ss := streamSafe(0) + v := ss.backwards(info) + for i = p; i >= 0 && v != ssStarter; i = p { + info, p = lastRuneStart(fd, b[:i]) + if v = ss.backwards(info); v == ssOverflow { + break + } + if p+int(info.size) != i { + if p == -1 { // no boundary found + return -1 + } + return i // boundary after an illegal UTF-8 encoding + } + } + return i +} + +// decomposeSegment scans the first segment in src into rb. It inserts 0x034f +// (Grapheme Joiner) when it encounters a sequence of more than 30 non-starters +// and returns the number of bytes consumed from src or iShortDst or iShortSrc. +func decomposeSegment(rb *reorderBuffer, sp int, atEOF bool) int { + // Force one character to be consumed. + info := rb.f.info(rb.src, sp) + if info.size == 0 { + return 0 + } + if s := rb.ss.next(info); s == ssStarter { + // TODO: this could be removed if we don't support merging. + if rb.nrune > 0 { + goto end + } + } else if s == ssOverflow { + rb.insertCGJ() + goto end + } + if err := rb.insertFlush(rb.src, sp, info); err != iSuccess { + return int(err) + } + for { + sp += int(info.size) + if sp >= rb.nsrc { + if !atEOF && !info.BoundaryAfter() { + return int(iShortSrc) + } + break + } + info = rb.f.info(rb.src, sp) + if info.size == 0 { + if !atEOF { + return int(iShortSrc) + } + break + } + if s := rb.ss.next(info); s == ssStarter { + break + } else if s == ssOverflow { + rb.insertCGJ() + break + } + if err := rb.insertFlush(rb.src, sp, info); err != iSuccess { + return int(err) + } + } +end: + if !rb.doFlush() { + return int(iShortDst) + } + return sp +} + +// lastRuneStart returns the runeInfo and position of the last +// rune in buf or the zero runeInfo and -1 if no rune was found. +func lastRuneStart(fd *formInfo, buf []byte) (Properties, int) { + p := len(buf) - 1 + for ; p >= 0 && !utf8.RuneStart(buf[p]); p-- { + } + if p < 0 { + return Properties{}, -1 + } + return fd.info(inputBytes(buf), p), p +} + +// decomposeToLastBoundary finds an open segment at the end of the buffer +// and scans it into rb. Returns the buffer minus the last segment. +func decomposeToLastBoundary(rb *reorderBuffer) { + fd := &rb.f + info, i := lastRuneStart(fd, rb.out) + if int(info.size) != len(rb.out)-i { + // illegal trailing continuation bytes + return + } + if info.BoundaryAfter() { + return + } + var add [maxNonStarters + 1]Properties // stores runeInfo in reverse order + padd := 0 + ss := streamSafe(0) + p := len(rb.out) + for { + add[padd] = info + v := ss.backwards(info) + if v == ssOverflow { + // Note that if we have an overflow, it the string we are appending to + // is not correctly normalized. In this case the behavior is undefined. + break + } + padd++ + p -= int(info.size) + if v == ssStarter || p < 0 { + break + } + info, i = lastRuneStart(fd, rb.out[:p]) + if int(info.size) != p-i { + break + } + } + rb.ss = ss + // Copy bytes for insertion as we may need to overwrite rb.out. + var buf [maxBufferSize * utf8.UTFMax]byte + cp := buf[:copy(buf[:], rb.out[p:])] + rb.out = rb.out[:p] + for padd--; padd >= 0; padd-- { + info = add[padd] + rb.insertUnsafe(inputBytes(cp), 0, info) + cp = cp[info.size:] + } +} diff --git a/vendor/golang.org/x/text/unicode/norm/readwriter.go b/vendor/golang.org/x/text/unicode/norm/readwriter.go new file mode 100644 index 00000000000..d926ee903e5 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/norm/readwriter.go @@ -0,0 +1,125 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package norm + +import "io" + +type normWriter struct { + rb reorderBuffer + w io.Writer + buf []byte +} + +// Write implements the standard write interface. If the last characters are +// not at a normalization boundary, the bytes will be buffered for the next +// write. The remaining bytes will be written on close. +func (w *normWriter) Write(data []byte) (n int, err error) { + // Process data in pieces to keep w.buf size bounded. + const chunk = 4000 + + for len(data) > 0 { + // Normalize into w.buf. + m := len(data) + if m > chunk { + m = chunk + } + w.rb.src = inputBytes(data[:m]) + w.rb.nsrc = m + w.buf = doAppend(&w.rb, w.buf, 0) + data = data[m:] + n += m + + // Write out complete prefix, save remainder. + // Note that lastBoundary looks back at most 31 runes. + i := lastBoundary(&w.rb.f, w.buf) + if i == -1 { + i = 0 + } + if i > 0 { + if _, err = w.w.Write(w.buf[:i]); err != nil { + break + } + bn := copy(w.buf, w.buf[i:]) + w.buf = w.buf[:bn] + } + } + return n, err +} + +// Close forces data that remains in the buffer to be written. +func (w *normWriter) Close() error { + if len(w.buf) > 0 { + _, err := w.w.Write(w.buf) + if err != nil { + return err + } + } + return nil +} + +// Writer returns a new writer that implements Write(b) +// by writing f(b) to w. The returned writer may use an +// an internal buffer to maintain state across Write calls. +// Calling its Close method writes any buffered data to w. +func (f Form) Writer(w io.Writer) io.WriteCloser { + wr := &normWriter{rb: reorderBuffer{}, w: w} + wr.rb.init(f, nil) + return wr +} + +type normReader struct { + rb reorderBuffer + r io.Reader + inbuf []byte + outbuf []byte + bufStart int + lastBoundary int + err error +} + +// Read implements the standard read interface. +func (r *normReader) Read(p []byte) (int, error) { + for { + if r.lastBoundary-r.bufStart > 0 { + n := copy(p, r.outbuf[r.bufStart:r.lastBoundary]) + r.bufStart += n + if r.lastBoundary-r.bufStart > 0 { + return n, nil + } + return n, r.err + } + if r.err != nil { + return 0, r.err + } + outn := copy(r.outbuf, r.outbuf[r.lastBoundary:]) + r.outbuf = r.outbuf[0:outn] + r.bufStart = 0 + + n, err := r.r.Read(r.inbuf) + r.rb.src = inputBytes(r.inbuf[0:n]) + r.rb.nsrc, r.err = n, err + if n > 0 { + r.outbuf = doAppend(&r.rb, r.outbuf, 0) + } + if err == io.EOF { + r.lastBoundary = len(r.outbuf) + } else { + r.lastBoundary = lastBoundary(&r.rb.f, r.outbuf) + if r.lastBoundary == -1 { + r.lastBoundary = 0 + } + } + } +} + +// Reader returns a new reader that implements Read +// by reading data from r and returning f(data). +func (f Form) Reader(r io.Reader) io.Reader { + const chunk = 4000 + buf := make([]byte, chunk) + rr := &normReader{rb: reorderBuffer{}, r: r, inbuf: buf} + rr.rb.init(f, buf) + return rr +} diff --git a/vendor/golang.org/x/text/unicode/norm/tables.go b/vendor/golang.org/x/text/unicode/norm/tables.go new file mode 100644 index 00000000000..316b093c53e --- /dev/null +++ b/vendor/golang.org/x/text/unicode/norm/tables.go @@ -0,0 +1,7651 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package norm + +const ( + // Version is the Unicode edition from which the tables are derived. + Version = "10.0.0" + + // MaxTransformChunkSize indicates the maximum number of bytes that Transform + // may need to write atomically for any Form. Making a destination buffer at + // least this size ensures that Transform can always make progress and that + // the user does not need to grow the buffer on an ErrShortDst. + MaxTransformChunkSize = 35 + maxNonStarters*4 +) + +var ccc = [55]uint8{ + 0, 1, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, + 84, 91, 103, 107, 118, 122, 129, 130, + 132, 202, 214, 216, 218, 220, 222, 224, + 226, 228, 230, 232, 233, 234, 240, +} + +const ( + firstMulti = 0x186D + firstCCC = 0x2C9E + endMulti = 0x2F60 + firstLeadingCCC = 0x49AE + firstCCCZeroExcept = 0x4A78 + firstStarterWithNLead = 0x4A9F + lastDecomp = 0x4AA1 + maxDecomp = 0x8000 +) + +// decomps: 19105 bytes +var decomps = [...]byte{ + // Bytes 0 - 3f + 0x00, 0x41, 0x20, 0x41, 0x21, 0x41, 0x22, 0x41, + 0x23, 0x41, 0x24, 0x41, 0x25, 0x41, 0x26, 0x41, + 0x27, 0x41, 0x28, 0x41, 0x29, 0x41, 0x2A, 0x41, + 0x2B, 0x41, 0x2C, 0x41, 0x2D, 0x41, 0x2E, 0x41, + 0x2F, 0x41, 0x30, 0x41, 0x31, 0x41, 0x32, 0x41, + 0x33, 0x41, 0x34, 0x41, 0x35, 0x41, 0x36, 0x41, + 0x37, 0x41, 0x38, 0x41, 0x39, 0x41, 0x3A, 0x41, + 0x3B, 0x41, 0x3C, 0x41, 0x3D, 0x41, 0x3E, 0x41, + // Bytes 40 - 7f + 0x3F, 0x41, 0x40, 0x41, 0x41, 0x41, 0x42, 0x41, + 0x43, 0x41, 0x44, 0x41, 0x45, 0x41, 0x46, 0x41, + 0x47, 0x41, 0x48, 0x41, 0x49, 0x41, 0x4A, 0x41, + 0x4B, 0x41, 0x4C, 0x41, 0x4D, 0x41, 0x4E, 0x41, + 0x4F, 0x41, 0x50, 0x41, 0x51, 0x41, 0x52, 0x41, + 0x53, 0x41, 0x54, 0x41, 0x55, 0x41, 0x56, 0x41, + 0x57, 0x41, 0x58, 0x41, 0x59, 0x41, 0x5A, 0x41, + 0x5B, 0x41, 0x5C, 0x41, 0x5D, 0x41, 0x5E, 0x41, + // Bytes 80 - bf + 0x5F, 0x41, 0x60, 0x41, 0x61, 0x41, 0x62, 0x41, + 0x63, 0x41, 0x64, 0x41, 0x65, 0x41, 0x66, 0x41, + 0x67, 0x41, 0x68, 0x41, 0x69, 0x41, 0x6A, 0x41, + 0x6B, 0x41, 0x6C, 0x41, 0x6D, 0x41, 0x6E, 0x41, + 0x6F, 0x41, 0x70, 0x41, 0x71, 0x41, 0x72, 0x41, + 0x73, 0x41, 0x74, 0x41, 0x75, 0x41, 0x76, 0x41, + 0x77, 0x41, 0x78, 0x41, 0x79, 0x41, 0x7A, 0x41, + 0x7B, 0x41, 0x7C, 0x41, 0x7D, 0x41, 0x7E, 0x42, + // Bytes c0 - ff + 0xC2, 0xA2, 0x42, 0xC2, 0xA3, 0x42, 0xC2, 0xA5, + 0x42, 0xC2, 0xA6, 0x42, 0xC2, 0xAC, 0x42, 0xC2, + 0xB7, 0x42, 0xC3, 0x86, 0x42, 0xC3, 0xB0, 0x42, + 0xC4, 0xA6, 0x42, 0xC4, 0xA7, 0x42, 0xC4, 0xB1, + 0x42, 0xC5, 0x8B, 0x42, 0xC5, 0x93, 0x42, 0xC6, + 0x8E, 0x42, 0xC6, 0x90, 0x42, 0xC6, 0xAB, 0x42, + 0xC8, 0xA2, 0x42, 0xC8, 0xB7, 0x42, 0xC9, 0x90, + 0x42, 0xC9, 0x91, 0x42, 0xC9, 0x92, 0x42, 0xC9, + // Bytes 100 - 13f + 0x94, 0x42, 0xC9, 0x95, 0x42, 0xC9, 0x99, 0x42, + 0xC9, 0x9B, 0x42, 0xC9, 0x9C, 0x42, 0xC9, 0x9F, + 0x42, 0xC9, 0xA1, 0x42, 0xC9, 0xA3, 0x42, 0xC9, + 0xA5, 0x42, 0xC9, 0xA6, 0x42, 0xC9, 0xA8, 0x42, + 0xC9, 0xA9, 0x42, 0xC9, 0xAA, 0x42, 0xC9, 0xAB, + 0x42, 0xC9, 0xAD, 0x42, 0xC9, 0xAF, 0x42, 0xC9, + 0xB0, 0x42, 0xC9, 0xB1, 0x42, 0xC9, 0xB2, 0x42, + 0xC9, 0xB3, 0x42, 0xC9, 0xB4, 0x42, 0xC9, 0xB5, + // Bytes 140 - 17f + 0x42, 0xC9, 0xB8, 0x42, 0xC9, 0xB9, 0x42, 0xC9, + 0xBB, 0x42, 0xCA, 0x81, 0x42, 0xCA, 0x82, 0x42, + 0xCA, 0x83, 0x42, 0xCA, 0x89, 0x42, 0xCA, 0x8A, + 0x42, 0xCA, 0x8B, 0x42, 0xCA, 0x8C, 0x42, 0xCA, + 0x90, 0x42, 0xCA, 0x91, 0x42, 0xCA, 0x92, 0x42, + 0xCA, 0x95, 0x42, 0xCA, 0x9D, 0x42, 0xCA, 0x9F, + 0x42, 0xCA, 0xB9, 0x42, 0xCE, 0x91, 0x42, 0xCE, + 0x92, 0x42, 0xCE, 0x93, 0x42, 0xCE, 0x94, 0x42, + // Bytes 180 - 1bf + 0xCE, 0x95, 0x42, 0xCE, 0x96, 0x42, 0xCE, 0x97, + 0x42, 0xCE, 0x98, 0x42, 0xCE, 0x99, 0x42, 0xCE, + 0x9A, 0x42, 0xCE, 0x9B, 0x42, 0xCE, 0x9C, 0x42, + 0xCE, 0x9D, 0x42, 0xCE, 0x9E, 0x42, 0xCE, 0x9F, + 0x42, 0xCE, 0xA0, 0x42, 0xCE, 0xA1, 0x42, 0xCE, + 0xA3, 0x42, 0xCE, 0xA4, 0x42, 0xCE, 0xA5, 0x42, + 0xCE, 0xA6, 0x42, 0xCE, 0xA7, 0x42, 0xCE, 0xA8, + 0x42, 0xCE, 0xA9, 0x42, 0xCE, 0xB1, 0x42, 0xCE, + // Bytes 1c0 - 1ff + 0xB2, 0x42, 0xCE, 0xB3, 0x42, 0xCE, 0xB4, 0x42, + 0xCE, 0xB5, 0x42, 0xCE, 0xB6, 0x42, 0xCE, 0xB7, + 0x42, 0xCE, 0xB8, 0x42, 0xCE, 0xB9, 0x42, 0xCE, + 0xBA, 0x42, 0xCE, 0xBB, 0x42, 0xCE, 0xBC, 0x42, + 0xCE, 0xBD, 0x42, 0xCE, 0xBE, 0x42, 0xCE, 0xBF, + 0x42, 0xCF, 0x80, 0x42, 0xCF, 0x81, 0x42, 0xCF, + 0x82, 0x42, 0xCF, 0x83, 0x42, 0xCF, 0x84, 0x42, + 0xCF, 0x85, 0x42, 0xCF, 0x86, 0x42, 0xCF, 0x87, + // Bytes 200 - 23f + 0x42, 0xCF, 0x88, 0x42, 0xCF, 0x89, 0x42, 0xCF, + 0x9C, 0x42, 0xCF, 0x9D, 0x42, 0xD0, 0xBD, 0x42, + 0xD1, 0x8A, 0x42, 0xD1, 0x8C, 0x42, 0xD7, 0x90, + 0x42, 0xD7, 0x91, 0x42, 0xD7, 0x92, 0x42, 0xD7, + 0x93, 0x42, 0xD7, 0x94, 0x42, 0xD7, 0x9B, 0x42, + 0xD7, 0x9C, 0x42, 0xD7, 0x9D, 0x42, 0xD7, 0xA2, + 0x42, 0xD7, 0xA8, 0x42, 0xD7, 0xAA, 0x42, 0xD8, + 0xA1, 0x42, 0xD8, 0xA7, 0x42, 0xD8, 0xA8, 0x42, + // Bytes 240 - 27f + 0xD8, 0xA9, 0x42, 0xD8, 0xAA, 0x42, 0xD8, 0xAB, + 0x42, 0xD8, 0xAC, 0x42, 0xD8, 0xAD, 0x42, 0xD8, + 0xAE, 0x42, 0xD8, 0xAF, 0x42, 0xD8, 0xB0, 0x42, + 0xD8, 0xB1, 0x42, 0xD8, 0xB2, 0x42, 0xD8, 0xB3, + 0x42, 0xD8, 0xB4, 0x42, 0xD8, 0xB5, 0x42, 0xD8, + 0xB6, 0x42, 0xD8, 0xB7, 0x42, 0xD8, 0xB8, 0x42, + 0xD8, 0xB9, 0x42, 0xD8, 0xBA, 0x42, 0xD9, 0x81, + 0x42, 0xD9, 0x82, 0x42, 0xD9, 0x83, 0x42, 0xD9, + // Bytes 280 - 2bf + 0x84, 0x42, 0xD9, 0x85, 0x42, 0xD9, 0x86, 0x42, + 0xD9, 0x87, 0x42, 0xD9, 0x88, 0x42, 0xD9, 0x89, + 0x42, 0xD9, 0x8A, 0x42, 0xD9, 0xAE, 0x42, 0xD9, + 0xAF, 0x42, 0xD9, 0xB1, 0x42, 0xD9, 0xB9, 0x42, + 0xD9, 0xBA, 0x42, 0xD9, 0xBB, 0x42, 0xD9, 0xBE, + 0x42, 0xD9, 0xBF, 0x42, 0xDA, 0x80, 0x42, 0xDA, + 0x83, 0x42, 0xDA, 0x84, 0x42, 0xDA, 0x86, 0x42, + 0xDA, 0x87, 0x42, 0xDA, 0x88, 0x42, 0xDA, 0x8C, + // Bytes 2c0 - 2ff + 0x42, 0xDA, 0x8D, 0x42, 0xDA, 0x8E, 0x42, 0xDA, + 0x91, 0x42, 0xDA, 0x98, 0x42, 0xDA, 0xA1, 0x42, + 0xDA, 0xA4, 0x42, 0xDA, 0xA6, 0x42, 0xDA, 0xA9, + 0x42, 0xDA, 0xAD, 0x42, 0xDA, 0xAF, 0x42, 0xDA, + 0xB1, 0x42, 0xDA, 0xB3, 0x42, 0xDA, 0xBA, 0x42, + 0xDA, 0xBB, 0x42, 0xDA, 0xBE, 0x42, 0xDB, 0x81, + 0x42, 0xDB, 0x85, 0x42, 0xDB, 0x86, 0x42, 0xDB, + 0x87, 0x42, 0xDB, 0x88, 0x42, 0xDB, 0x89, 0x42, + // Bytes 300 - 33f + 0xDB, 0x8B, 0x42, 0xDB, 0x8C, 0x42, 0xDB, 0x90, + 0x42, 0xDB, 0x92, 0x43, 0xE0, 0xBC, 0x8B, 0x43, + 0xE1, 0x83, 0x9C, 0x43, 0xE1, 0x84, 0x80, 0x43, + 0xE1, 0x84, 0x81, 0x43, 0xE1, 0x84, 0x82, 0x43, + 0xE1, 0x84, 0x83, 0x43, 0xE1, 0x84, 0x84, 0x43, + 0xE1, 0x84, 0x85, 0x43, 0xE1, 0x84, 0x86, 0x43, + 0xE1, 0x84, 0x87, 0x43, 0xE1, 0x84, 0x88, 0x43, + 0xE1, 0x84, 0x89, 0x43, 0xE1, 0x84, 0x8A, 0x43, + // Bytes 340 - 37f + 0xE1, 0x84, 0x8B, 0x43, 0xE1, 0x84, 0x8C, 0x43, + 0xE1, 0x84, 0x8D, 0x43, 0xE1, 0x84, 0x8E, 0x43, + 0xE1, 0x84, 0x8F, 0x43, 0xE1, 0x84, 0x90, 0x43, + 0xE1, 0x84, 0x91, 0x43, 0xE1, 0x84, 0x92, 0x43, + 0xE1, 0x84, 0x94, 0x43, 0xE1, 0x84, 0x95, 0x43, + 0xE1, 0x84, 0x9A, 0x43, 0xE1, 0x84, 0x9C, 0x43, + 0xE1, 0x84, 0x9D, 0x43, 0xE1, 0x84, 0x9E, 0x43, + 0xE1, 0x84, 0xA0, 0x43, 0xE1, 0x84, 0xA1, 0x43, + // Bytes 380 - 3bf + 0xE1, 0x84, 0xA2, 0x43, 0xE1, 0x84, 0xA3, 0x43, + 0xE1, 0x84, 0xA7, 0x43, 0xE1, 0x84, 0xA9, 0x43, + 0xE1, 0x84, 0xAB, 0x43, 0xE1, 0x84, 0xAC, 0x43, + 0xE1, 0x84, 0xAD, 0x43, 0xE1, 0x84, 0xAE, 0x43, + 0xE1, 0x84, 0xAF, 0x43, 0xE1, 0x84, 0xB2, 0x43, + 0xE1, 0x84, 0xB6, 0x43, 0xE1, 0x85, 0x80, 0x43, + 0xE1, 0x85, 0x87, 0x43, 0xE1, 0x85, 0x8C, 0x43, + 0xE1, 0x85, 0x97, 0x43, 0xE1, 0x85, 0x98, 0x43, + // Bytes 3c0 - 3ff + 0xE1, 0x85, 0x99, 0x43, 0xE1, 0x85, 0xA0, 0x43, + 0xE1, 0x86, 0x84, 0x43, 0xE1, 0x86, 0x85, 0x43, + 0xE1, 0x86, 0x88, 0x43, 0xE1, 0x86, 0x91, 0x43, + 0xE1, 0x86, 0x92, 0x43, 0xE1, 0x86, 0x94, 0x43, + 0xE1, 0x86, 0x9E, 0x43, 0xE1, 0x86, 0xA1, 0x43, + 0xE1, 0x87, 0x87, 0x43, 0xE1, 0x87, 0x88, 0x43, + 0xE1, 0x87, 0x8C, 0x43, 0xE1, 0x87, 0x8E, 0x43, + 0xE1, 0x87, 0x93, 0x43, 0xE1, 0x87, 0x97, 0x43, + // Bytes 400 - 43f + 0xE1, 0x87, 0x99, 0x43, 0xE1, 0x87, 0x9D, 0x43, + 0xE1, 0x87, 0x9F, 0x43, 0xE1, 0x87, 0xB1, 0x43, + 0xE1, 0x87, 0xB2, 0x43, 0xE1, 0xB4, 0x82, 0x43, + 0xE1, 0xB4, 0x96, 0x43, 0xE1, 0xB4, 0x97, 0x43, + 0xE1, 0xB4, 0x9C, 0x43, 0xE1, 0xB4, 0x9D, 0x43, + 0xE1, 0xB4, 0xA5, 0x43, 0xE1, 0xB5, 0xBB, 0x43, + 0xE1, 0xB6, 0x85, 0x43, 0xE2, 0x80, 0x82, 0x43, + 0xE2, 0x80, 0x83, 0x43, 0xE2, 0x80, 0x90, 0x43, + // Bytes 440 - 47f + 0xE2, 0x80, 0x93, 0x43, 0xE2, 0x80, 0x94, 0x43, + 0xE2, 0x82, 0xA9, 0x43, 0xE2, 0x86, 0x90, 0x43, + 0xE2, 0x86, 0x91, 0x43, 0xE2, 0x86, 0x92, 0x43, + 0xE2, 0x86, 0x93, 0x43, 0xE2, 0x88, 0x82, 0x43, + 0xE2, 0x88, 0x87, 0x43, 0xE2, 0x88, 0x91, 0x43, + 0xE2, 0x88, 0x92, 0x43, 0xE2, 0x94, 0x82, 0x43, + 0xE2, 0x96, 0xA0, 0x43, 0xE2, 0x97, 0x8B, 0x43, + 0xE2, 0xA6, 0x85, 0x43, 0xE2, 0xA6, 0x86, 0x43, + // Bytes 480 - 4bf + 0xE2, 0xB5, 0xA1, 0x43, 0xE3, 0x80, 0x81, 0x43, + 0xE3, 0x80, 0x82, 0x43, 0xE3, 0x80, 0x88, 0x43, + 0xE3, 0x80, 0x89, 0x43, 0xE3, 0x80, 0x8A, 0x43, + 0xE3, 0x80, 0x8B, 0x43, 0xE3, 0x80, 0x8C, 0x43, + 0xE3, 0x80, 0x8D, 0x43, 0xE3, 0x80, 0x8E, 0x43, + 0xE3, 0x80, 0x8F, 0x43, 0xE3, 0x80, 0x90, 0x43, + 0xE3, 0x80, 0x91, 0x43, 0xE3, 0x80, 0x92, 0x43, + 0xE3, 0x80, 0x94, 0x43, 0xE3, 0x80, 0x95, 0x43, + // Bytes 4c0 - 4ff + 0xE3, 0x80, 0x96, 0x43, 0xE3, 0x80, 0x97, 0x43, + 0xE3, 0x82, 0xA1, 0x43, 0xE3, 0x82, 0xA2, 0x43, + 0xE3, 0x82, 0xA3, 0x43, 0xE3, 0x82, 0xA4, 0x43, + 0xE3, 0x82, 0xA5, 0x43, 0xE3, 0x82, 0xA6, 0x43, + 0xE3, 0x82, 0xA7, 0x43, 0xE3, 0x82, 0xA8, 0x43, + 0xE3, 0x82, 0xA9, 0x43, 0xE3, 0x82, 0xAA, 0x43, + 0xE3, 0x82, 0xAB, 0x43, 0xE3, 0x82, 0xAD, 0x43, + 0xE3, 0x82, 0xAF, 0x43, 0xE3, 0x82, 0xB1, 0x43, + // Bytes 500 - 53f + 0xE3, 0x82, 0xB3, 0x43, 0xE3, 0x82, 0xB5, 0x43, + 0xE3, 0x82, 0xB7, 0x43, 0xE3, 0x82, 0xB9, 0x43, + 0xE3, 0x82, 0xBB, 0x43, 0xE3, 0x82, 0xBD, 0x43, + 0xE3, 0x82, 0xBF, 0x43, 0xE3, 0x83, 0x81, 0x43, + 0xE3, 0x83, 0x83, 0x43, 0xE3, 0x83, 0x84, 0x43, + 0xE3, 0x83, 0x86, 0x43, 0xE3, 0x83, 0x88, 0x43, + 0xE3, 0x83, 0x8A, 0x43, 0xE3, 0x83, 0x8B, 0x43, + 0xE3, 0x83, 0x8C, 0x43, 0xE3, 0x83, 0x8D, 0x43, + // Bytes 540 - 57f + 0xE3, 0x83, 0x8E, 0x43, 0xE3, 0x83, 0x8F, 0x43, + 0xE3, 0x83, 0x92, 0x43, 0xE3, 0x83, 0x95, 0x43, + 0xE3, 0x83, 0x98, 0x43, 0xE3, 0x83, 0x9B, 0x43, + 0xE3, 0x83, 0x9E, 0x43, 0xE3, 0x83, 0x9F, 0x43, + 0xE3, 0x83, 0xA0, 0x43, 0xE3, 0x83, 0xA1, 0x43, + 0xE3, 0x83, 0xA2, 0x43, 0xE3, 0x83, 0xA3, 0x43, + 0xE3, 0x83, 0xA4, 0x43, 0xE3, 0x83, 0xA5, 0x43, + 0xE3, 0x83, 0xA6, 0x43, 0xE3, 0x83, 0xA7, 0x43, + // Bytes 580 - 5bf + 0xE3, 0x83, 0xA8, 0x43, 0xE3, 0x83, 0xA9, 0x43, + 0xE3, 0x83, 0xAA, 0x43, 0xE3, 0x83, 0xAB, 0x43, + 0xE3, 0x83, 0xAC, 0x43, 0xE3, 0x83, 0xAD, 0x43, + 0xE3, 0x83, 0xAF, 0x43, 0xE3, 0x83, 0xB0, 0x43, + 0xE3, 0x83, 0xB1, 0x43, 0xE3, 0x83, 0xB2, 0x43, + 0xE3, 0x83, 0xB3, 0x43, 0xE3, 0x83, 0xBB, 0x43, + 0xE3, 0x83, 0xBC, 0x43, 0xE3, 0x92, 0x9E, 0x43, + 0xE3, 0x92, 0xB9, 0x43, 0xE3, 0x92, 0xBB, 0x43, + // Bytes 5c0 - 5ff + 0xE3, 0x93, 0x9F, 0x43, 0xE3, 0x94, 0x95, 0x43, + 0xE3, 0x9B, 0xAE, 0x43, 0xE3, 0x9B, 0xBC, 0x43, + 0xE3, 0x9E, 0x81, 0x43, 0xE3, 0xA0, 0xAF, 0x43, + 0xE3, 0xA1, 0xA2, 0x43, 0xE3, 0xA1, 0xBC, 0x43, + 0xE3, 0xA3, 0x87, 0x43, 0xE3, 0xA3, 0xA3, 0x43, + 0xE3, 0xA4, 0x9C, 0x43, 0xE3, 0xA4, 0xBA, 0x43, + 0xE3, 0xA8, 0xAE, 0x43, 0xE3, 0xA9, 0xAC, 0x43, + 0xE3, 0xAB, 0xA4, 0x43, 0xE3, 0xAC, 0x88, 0x43, + // Bytes 600 - 63f + 0xE3, 0xAC, 0x99, 0x43, 0xE3, 0xAD, 0x89, 0x43, + 0xE3, 0xAE, 0x9D, 0x43, 0xE3, 0xB0, 0x98, 0x43, + 0xE3, 0xB1, 0x8E, 0x43, 0xE3, 0xB4, 0xB3, 0x43, + 0xE3, 0xB6, 0x96, 0x43, 0xE3, 0xBA, 0xAC, 0x43, + 0xE3, 0xBA, 0xB8, 0x43, 0xE3, 0xBC, 0x9B, 0x43, + 0xE3, 0xBF, 0xBC, 0x43, 0xE4, 0x80, 0x88, 0x43, + 0xE4, 0x80, 0x98, 0x43, 0xE4, 0x80, 0xB9, 0x43, + 0xE4, 0x81, 0x86, 0x43, 0xE4, 0x82, 0x96, 0x43, + // Bytes 640 - 67f + 0xE4, 0x83, 0xA3, 0x43, 0xE4, 0x84, 0xAF, 0x43, + 0xE4, 0x88, 0x82, 0x43, 0xE4, 0x88, 0xA7, 0x43, + 0xE4, 0x8A, 0xA0, 0x43, 0xE4, 0x8C, 0x81, 0x43, + 0xE4, 0x8C, 0xB4, 0x43, 0xE4, 0x8D, 0x99, 0x43, + 0xE4, 0x8F, 0x95, 0x43, 0xE4, 0x8F, 0x99, 0x43, + 0xE4, 0x90, 0x8B, 0x43, 0xE4, 0x91, 0xAB, 0x43, + 0xE4, 0x94, 0xAB, 0x43, 0xE4, 0x95, 0x9D, 0x43, + 0xE4, 0x95, 0xA1, 0x43, 0xE4, 0x95, 0xAB, 0x43, + // Bytes 680 - 6bf + 0xE4, 0x97, 0x97, 0x43, 0xE4, 0x97, 0xB9, 0x43, + 0xE4, 0x98, 0xB5, 0x43, 0xE4, 0x9A, 0xBE, 0x43, + 0xE4, 0x9B, 0x87, 0x43, 0xE4, 0xA6, 0x95, 0x43, + 0xE4, 0xA7, 0xA6, 0x43, 0xE4, 0xA9, 0xAE, 0x43, + 0xE4, 0xA9, 0xB6, 0x43, 0xE4, 0xAA, 0xB2, 0x43, + 0xE4, 0xAC, 0xB3, 0x43, 0xE4, 0xAF, 0x8E, 0x43, + 0xE4, 0xB3, 0x8E, 0x43, 0xE4, 0xB3, 0xAD, 0x43, + 0xE4, 0xB3, 0xB8, 0x43, 0xE4, 0xB5, 0x96, 0x43, + // Bytes 6c0 - 6ff + 0xE4, 0xB8, 0x80, 0x43, 0xE4, 0xB8, 0x81, 0x43, + 0xE4, 0xB8, 0x83, 0x43, 0xE4, 0xB8, 0x89, 0x43, + 0xE4, 0xB8, 0x8A, 0x43, 0xE4, 0xB8, 0x8B, 0x43, + 0xE4, 0xB8, 0x8D, 0x43, 0xE4, 0xB8, 0x99, 0x43, + 0xE4, 0xB8, 0xA6, 0x43, 0xE4, 0xB8, 0xA8, 0x43, + 0xE4, 0xB8, 0xAD, 0x43, 0xE4, 0xB8, 0xB2, 0x43, + 0xE4, 0xB8, 0xB6, 0x43, 0xE4, 0xB8, 0xB8, 0x43, + 0xE4, 0xB8, 0xB9, 0x43, 0xE4, 0xB8, 0xBD, 0x43, + // Bytes 700 - 73f + 0xE4, 0xB8, 0xBF, 0x43, 0xE4, 0xB9, 0x81, 0x43, + 0xE4, 0xB9, 0x99, 0x43, 0xE4, 0xB9, 0x9D, 0x43, + 0xE4, 0xBA, 0x82, 0x43, 0xE4, 0xBA, 0x85, 0x43, + 0xE4, 0xBA, 0x86, 0x43, 0xE4, 0xBA, 0x8C, 0x43, + 0xE4, 0xBA, 0x94, 0x43, 0xE4, 0xBA, 0xA0, 0x43, + 0xE4, 0xBA, 0xA4, 0x43, 0xE4, 0xBA, 0xAE, 0x43, + 0xE4, 0xBA, 0xBA, 0x43, 0xE4, 0xBB, 0x80, 0x43, + 0xE4, 0xBB, 0x8C, 0x43, 0xE4, 0xBB, 0xA4, 0x43, + // Bytes 740 - 77f + 0xE4, 0xBC, 0x81, 0x43, 0xE4, 0xBC, 0x91, 0x43, + 0xE4, 0xBD, 0xA0, 0x43, 0xE4, 0xBE, 0x80, 0x43, + 0xE4, 0xBE, 0x86, 0x43, 0xE4, 0xBE, 0x8B, 0x43, + 0xE4, 0xBE, 0xAE, 0x43, 0xE4, 0xBE, 0xBB, 0x43, + 0xE4, 0xBE, 0xBF, 0x43, 0xE5, 0x80, 0x82, 0x43, + 0xE5, 0x80, 0xAB, 0x43, 0xE5, 0x81, 0xBA, 0x43, + 0xE5, 0x82, 0x99, 0x43, 0xE5, 0x83, 0x8F, 0x43, + 0xE5, 0x83, 0x9A, 0x43, 0xE5, 0x83, 0xA7, 0x43, + // Bytes 780 - 7bf + 0xE5, 0x84, 0xAA, 0x43, 0xE5, 0x84, 0xBF, 0x43, + 0xE5, 0x85, 0x80, 0x43, 0xE5, 0x85, 0x85, 0x43, + 0xE5, 0x85, 0x8D, 0x43, 0xE5, 0x85, 0x94, 0x43, + 0xE5, 0x85, 0xA4, 0x43, 0xE5, 0x85, 0xA5, 0x43, + 0xE5, 0x85, 0xA7, 0x43, 0xE5, 0x85, 0xA8, 0x43, + 0xE5, 0x85, 0xA9, 0x43, 0xE5, 0x85, 0xAB, 0x43, + 0xE5, 0x85, 0xAD, 0x43, 0xE5, 0x85, 0xB7, 0x43, + 0xE5, 0x86, 0x80, 0x43, 0xE5, 0x86, 0x82, 0x43, + // Bytes 7c0 - 7ff + 0xE5, 0x86, 0x8D, 0x43, 0xE5, 0x86, 0x92, 0x43, + 0xE5, 0x86, 0x95, 0x43, 0xE5, 0x86, 0x96, 0x43, + 0xE5, 0x86, 0x97, 0x43, 0xE5, 0x86, 0x99, 0x43, + 0xE5, 0x86, 0xA4, 0x43, 0xE5, 0x86, 0xAB, 0x43, + 0xE5, 0x86, 0xAC, 0x43, 0xE5, 0x86, 0xB5, 0x43, + 0xE5, 0x86, 0xB7, 0x43, 0xE5, 0x87, 0x89, 0x43, + 0xE5, 0x87, 0x8C, 0x43, 0xE5, 0x87, 0x9C, 0x43, + 0xE5, 0x87, 0x9E, 0x43, 0xE5, 0x87, 0xA0, 0x43, + // Bytes 800 - 83f + 0xE5, 0x87, 0xB5, 0x43, 0xE5, 0x88, 0x80, 0x43, + 0xE5, 0x88, 0x83, 0x43, 0xE5, 0x88, 0x87, 0x43, + 0xE5, 0x88, 0x97, 0x43, 0xE5, 0x88, 0x9D, 0x43, + 0xE5, 0x88, 0xA9, 0x43, 0xE5, 0x88, 0xBA, 0x43, + 0xE5, 0x88, 0xBB, 0x43, 0xE5, 0x89, 0x86, 0x43, + 0xE5, 0x89, 0x8D, 0x43, 0xE5, 0x89, 0xB2, 0x43, + 0xE5, 0x89, 0xB7, 0x43, 0xE5, 0x8A, 0x89, 0x43, + 0xE5, 0x8A, 0x9B, 0x43, 0xE5, 0x8A, 0xA3, 0x43, + // Bytes 840 - 87f + 0xE5, 0x8A, 0xB3, 0x43, 0xE5, 0x8A, 0xB4, 0x43, + 0xE5, 0x8B, 0x87, 0x43, 0xE5, 0x8B, 0x89, 0x43, + 0xE5, 0x8B, 0x92, 0x43, 0xE5, 0x8B, 0x9E, 0x43, + 0xE5, 0x8B, 0xA4, 0x43, 0xE5, 0x8B, 0xB5, 0x43, + 0xE5, 0x8B, 0xB9, 0x43, 0xE5, 0x8B, 0xBA, 0x43, + 0xE5, 0x8C, 0x85, 0x43, 0xE5, 0x8C, 0x86, 0x43, + 0xE5, 0x8C, 0x95, 0x43, 0xE5, 0x8C, 0x97, 0x43, + 0xE5, 0x8C, 0x9A, 0x43, 0xE5, 0x8C, 0xB8, 0x43, + // Bytes 880 - 8bf + 0xE5, 0x8C, 0xBB, 0x43, 0xE5, 0x8C, 0xBF, 0x43, + 0xE5, 0x8D, 0x81, 0x43, 0xE5, 0x8D, 0x84, 0x43, + 0xE5, 0x8D, 0x85, 0x43, 0xE5, 0x8D, 0x89, 0x43, + 0xE5, 0x8D, 0x91, 0x43, 0xE5, 0x8D, 0x94, 0x43, + 0xE5, 0x8D, 0x9A, 0x43, 0xE5, 0x8D, 0x9C, 0x43, + 0xE5, 0x8D, 0xA9, 0x43, 0xE5, 0x8D, 0xB0, 0x43, + 0xE5, 0x8D, 0xB3, 0x43, 0xE5, 0x8D, 0xB5, 0x43, + 0xE5, 0x8D, 0xBD, 0x43, 0xE5, 0x8D, 0xBF, 0x43, + // Bytes 8c0 - 8ff + 0xE5, 0x8E, 0x82, 0x43, 0xE5, 0x8E, 0xB6, 0x43, + 0xE5, 0x8F, 0x83, 0x43, 0xE5, 0x8F, 0x88, 0x43, + 0xE5, 0x8F, 0x8A, 0x43, 0xE5, 0x8F, 0x8C, 0x43, + 0xE5, 0x8F, 0x9F, 0x43, 0xE5, 0x8F, 0xA3, 0x43, + 0xE5, 0x8F, 0xA5, 0x43, 0xE5, 0x8F, 0xAB, 0x43, + 0xE5, 0x8F, 0xAF, 0x43, 0xE5, 0x8F, 0xB1, 0x43, + 0xE5, 0x8F, 0xB3, 0x43, 0xE5, 0x90, 0x86, 0x43, + 0xE5, 0x90, 0x88, 0x43, 0xE5, 0x90, 0x8D, 0x43, + // Bytes 900 - 93f + 0xE5, 0x90, 0x8F, 0x43, 0xE5, 0x90, 0x9D, 0x43, + 0xE5, 0x90, 0xB8, 0x43, 0xE5, 0x90, 0xB9, 0x43, + 0xE5, 0x91, 0x82, 0x43, 0xE5, 0x91, 0x88, 0x43, + 0xE5, 0x91, 0xA8, 0x43, 0xE5, 0x92, 0x9E, 0x43, + 0xE5, 0x92, 0xA2, 0x43, 0xE5, 0x92, 0xBD, 0x43, + 0xE5, 0x93, 0xB6, 0x43, 0xE5, 0x94, 0x90, 0x43, + 0xE5, 0x95, 0x8F, 0x43, 0xE5, 0x95, 0x93, 0x43, + 0xE5, 0x95, 0x95, 0x43, 0xE5, 0x95, 0xA3, 0x43, + // Bytes 940 - 97f + 0xE5, 0x96, 0x84, 0x43, 0xE5, 0x96, 0x87, 0x43, + 0xE5, 0x96, 0x99, 0x43, 0xE5, 0x96, 0x9D, 0x43, + 0xE5, 0x96, 0xAB, 0x43, 0xE5, 0x96, 0xB3, 0x43, + 0xE5, 0x96, 0xB6, 0x43, 0xE5, 0x97, 0x80, 0x43, + 0xE5, 0x97, 0x82, 0x43, 0xE5, 0x97, 0xA2, 0x43, + 0xE5, 0x98, 0x86, 0x43, 0xE5, 0x99, 0x91, 0x43, + 0xE5, 0x99, 0xA8, 0x43, 0xE5, 0x99, 0xB4, 0x43, + 0xE5, 0x9B, 0x97, 0x43, 0xE5, 0x9B, 0x9B, 0x43, + // Bytes 980 - 9bf + 0xE5, 0x9B, 0xB9, 0x43, 0xE5, 0x9C, 0x96, 0x43, + 0xE5, 0x9C, 0x97, 0x43, 0xE5, 0x9C, 0x9F, 0x43, + 0xE5, 0x9C, 0xB0, 0x43, 0xE5, 0x9E, 0x8B, 0x43, + 0xE5, 0x9F, 0x8E, 0x43, 0xE5, 0x9F, 0xB4, 0x43, + 0xE5, 0xA0, 0x8D, 0x43, 0xE5, 0xA0, 0xB1, 0x43, + 0xE5, 0xA0, 0xB2, 0x43, 0xE5, 0xA1, 0x80, 0x43, + 0xE5, 0xA1, 0x9A, 0x43, 0xE5, 0xA1, 0x9E, 0x43, + 0xE5, 0xA2, 0xA8, 0x43, 0xE5, 0xA2, 0xAC, 0x43, + // Bytes 9c0 - 9ff + 0xE5, 0xA2, 0xB3, 0x43, 0xE5, 0xA3, 0x98, 0x43, + 0xE5, 0xA3, 0x9F, 0x43, 0xE5, 0xA3, 0xAB, 0x43, + 0xE5, 0xA3, 0xAE, 0x43, 0xE5, 0xA3, 0xB0, 0x43, + 0xE5, 0xA3, 0xB2, 0x43, 0xE5, 0xA3, 0xB7, 0x43, + 0xE5, 0xA4, 0x82, 0x43, 0xE5, 0xA4, 0x86, 0x43, + 0xE5, 0xA4, 0x8A, 0x43, 0xE5, 0xA4, 0x95, 0x43, + 0xE5, 0xA4, 0x9A, 0x43, 0xE5, 0xA4, 0x9C, 0x43, + 0xE5, 0xA4, 0xA2, 0x43, 0xE5, 0xA4, 0xA7, 0x43, + // Bytes a00 - a3f + 0xE5, 0xA4, 0xA9, 0x43, 0xE5, 0xA5, 0x84, 0x43, + 0xE5, 0xA5, 0x88, 0x43, 0xE5, 0xA5, 0x91, 0x43, + 0xE5, 0xA5, 0x94, 0x43, 0xE5, 0xA5, 0xA2, 0x43, + 0xE5, 0xA5, 0xB3, 0x43, 0xE5, 0xA7, 0x98, 0x43, + 0xE5, 0xA7, 0xAC, 0x43, 0xE5, 0xA8, 0x9B, 0x43, + 0xE5, 0xA8, 0xA7, 0x43, 0xE5, 0xA9, 0xA2, 0x43, + 0xE5, 0xA9, 0xA6, 0x43, 0xE5, 0xAA, 0xB5, 0x43, + 0xE5, 0xAC, 0x88, 0x43, 0xE5, 0xAC, 0xA8, 0x43, + // Bytes a40 - a7f + 0xE5, 0xAC, 0xBE, 0x43, 0xE5, 0xAD, 0x90, 0x43, + 0xE5, 0xAD, 0x97, 0x43, 0xE5, 0xAD, 0xA6, 0x43, + 0xE5, 0xAE, 0x80, 0x43, 0xE5, 0xAE, 0x85, 0x43, + 0xE5, 0xAE, 0x97, 0x43, 0xE5, 0xAF, 0x83, 0x43, + 0xE5, 0xAF, 0x98, 0x43, 0xE5, 0xAF, 0xA7, 0x43, + 0xE5, 0xAF, 0xAE, 0x43, 0xE5, 0xAF, 0xB3, 0x43, + 0xE5, 0xAF, 0xB8, 0x43, 0xE5, 0xAF, 0xBF, 0x43, + 0xE5, 0xB0, 0x86, 0x43, 0xE5, 0xB0, 0x8F, 0x43, + // Bytes a80 - abf + 0xE5, 0xB0, 0xA2, 0x43, 0xE5, 0xB0, 0xB8, 0x43, + 0xE5, 0xB0, 0xBF, 0x43, 0xE5, 0xB1, 0xA0, 0x43, + 0xE5, 0xB1, 0xA2, 0x43, 0xE5, 0xB1, 0xA4, 0x43, + 0xE5, 0xB1, 0xA5, 0x43, 0xE5, 0xB1, 0xAE, 0x43, + 0xE5, 0xB1, 0xB1, 0x43, 0xE5, 0xB2, 0x8D, 0x43, + 0xE5, 0xB3, 0x80, 0x43, 0xE5, 0xB4, 0x99, 0x43, + 0xE5, 0xB5, 0x83, 0x43, 0xE5, 0xB5, 0x90, 0x43, + 0xE5, 0xB5, 0xAB, 0x43, 0xE5, 0xB5, 0xAE, 0x43, + // Bytes ac0 - aff + 0xE5, 0xB5, 0xBC, 0x43, 0xE5, 0xB6, 0xB2, 0x43, + 0xE5, 0xB6, 0xBA, 0x43, 0xE5, 0xB7, 0x9B, 0x43, + 0xE5, 0xB7, 0xA1, 0x43, 0xE5, 0xB7, 0xA2, 0x43, + 0xE5, 0xB7, 0xA5, 0x43, 0xE5, 0xB7, 0xA6, 0x43, + 0xE5, 0xB7, 0xB1, 0x43, 0xE5, 0xB7, 0xBD, 0x43, + 0xE5, 0xB7, 0xBE, 0x43, 0xE5, 0xB8, 0xA8, 0x43, + 0xE5, 0xB8, 0xBD, 0x43, 0xE5, 0xB9, 0xA9, 0x43, + 0xE5, 0xB9, 0xB2, 0x43, 0xE5, 0xB9, 0xB4, 0x43, + // Bytes b00 - b3f + 0xE5, 0xB9, 0xBA, 0x43, 0xE5, 0xB9, 0xBC, 0x43, + 0xE5, 0xB9, 0xBF, 0x43, 0xE5, 0xBA, 0xA6, 0x43, + 0xE5, 0xBA, 0xB0, 0x43, 0xE5, 0xBA, 0xB3, 0x43, + 0xE5, 0xBA, 0xB6, 0x43, 0xE5, 0xBB, 0x89, 0x43, + 0xE5, 0xBB, 0x8A, 0x43, 0xE5, 0xBB, 0x92, 0x43, + 0xE5, 0xBB, 0x93, 0x43, 0xE5, 0xBB, 0x99, 0x43, + 0xE5, 0xBB, 0xAC, 0x43, 0xE5, 0xBB, 0xB4, 0x43, + 0xE5, 0xBB, 0xBE, 0x43, 0xE5, 0xBC, 0x84, 0x43, + // Bytes b40 - b7f + 0xE5, 0xBC, 0x8B, 0x43, 0xE5, 0xBC, 0x93, 0x43, + 0xE5, 0xBC, 0xA2, 0x43, 0xE5, 0xBD, 0x90, 0x43, + 0xE5, 0xBD, 0x93, 0x43, 0xE5, 0xBD, 0xA1, 0x43, + 0xE5, 0xBD, 0xA2, 0x43, 0xE5, 0xBD, 0xA9, 0x43, + 0xE5, 0xBD, 0xAB, 0x43, 0xE5, 0xBD, 0xB3, 0x43, + 0xE5, 0xBE, 0x8B, 0x43, 0xE5, 0xBE, 0x8C, 0x43, + 0xE5, 0xBE, 0x97, 0x43, 0xE5, 0xBE, 0x9A, 0x43, + 0xE5, 0xBE, 0xA9, 0x43, 0xE5, 0xBE, 0xAD, 0x43, + // Bytes b80 - bbf + 0xE5, 0xBF, 0x83, 0x43, 0xE5, 0xBF, 0x8D, 0x43, + 0xE5, 0xBF, 0x97, 0x43, 0xE5, 0xBF, 0xB5, 0x43, + 0xE5, 0xBF, 0xB9, 0x43, 0xE6, 0x80, 0x92, 0x43, + 0xE6, 0x80, 0x9C, 0x43, 0xE6, 0x81, 0xB5, 0x43, + 0xE6, 0x82, 0x81, 0x43, 0xE6, 0x82, 0x94, 0x43, + 0xE6, 0x83, 0x87, 0x43, 0xE6, 0x83, 0x98, 0x43, + 0xE6, 0x83, 0xA1, 0x43, 0xE6, 0x84, 0x88, 0x43, + 0xE6, 0x85, 0x84, 0x43, 0xE6, 0x85, 0x88, 0x43, + // Bytes bc0 - bff + 0xE6, 0x85, 0x8C, 0x43, 0xE6, 0x85, 0x8E, 0x43, + 0xE6, 0x85, 0xA0, 0x43, 0xE6, 0x85, 0xA8, 0x43, + 0xE6, 0x85, 0xBA, 0x43, 0xE6, 0x86, 0x8E, 0x43, + 0xE6, 0x86, 0x90, 0x43, 0xE6, 0x86, 0xA4, 0x43, + 0xE6, 0x86, 0xAF, 0x43, 0xE6, 0x86, 0xB2, 0x43, + 0xE6, 0x87, 0x9E, 0x43, 0xE6, 0x87, 0xB2, 0x43, + 0xE6, 0x87, 0xB6, 0x43, 0xE6, 0x88, 0x80, 0x43, + 0xE6, 0x88, 0x88, 0x43, 0xE6, 0x88, 0x90, 0x43, + // Bytes c00 - c3f + 0xE6, 0x88, 0x9B, 0x43, 0xE6, 0x88, 0xAE, 0x43, + 0xE6, 0x88, 0xB4, 0x43, 0xE6, 0x88, 0xB6, 0x43, + 0xE6, 0x89, 0x8B, 0x43, 0xE6, 0x89, 0x93, 0x43, + 0xE6, 0x89, 0x9D, 0x43, 0xE6, 0x8A, 0x95, 0x43, + 0xE6, 0x8A, 0xB1, 0x43, 0xE6, 0x8B, 0x89, 0x43, + 0xE6, 0x8B, 0x8F, 0x43, 0xE6, 0x8B, 0x93, 0x43, + 0xE6, 0x8B, 0x94, 0x43, 0xE6, 0x8B, 0xBC, 0x43, + 0xE6, 0x8B, 0xBE, 0x43, 0xE6, 0x8C, 0x87, 0x43, + // Bytes c40 - c7f + 0xE6, 0x8C, 0xBD, 0x43, 0xE6, 0x8D, 0x90, 0x43, + 0xE6, 0x8D, 0x95, 0x43, 0xE6, 0x8D, 0xA8, 0x43, + 0xE6, 0x8D, 0xBB, 0x43, 0xE6, 0x8E, 0x83, 0x43, + 0xE6, 0x8E, 0xA0, 0x43, 0xE6, 0x8E, 0xA9, 0x43, + 0xE6, 0x8F, 0x84, 0x43, 0xE6, 0x8F, 0x85, 0x43, + 0xE6, 0x8F, 0xA4, 0x43, 0xE6, 0x90, 0x9C, 0x43, + 0xE6, 0x90, 0xA2, 0x43, 0xE6, 0x91, 0x92, 0x43, + 0xE6, 0x91, 0xA9, 0x43, 0xE6, 0x91, 0xB7, 0x43, + // Bytes c80 - cbf + 0xE6, 0x91, 0xBE, 0x43, 0xE6, 0x92, 0x9A, 0x43, + 0xE6, 0x92, 0x9D, 0x43, 0xE6, 0x93, 0x84, 0x43, + 0xE6, 0x94, 0xAF, 0x43, 0xE6, 0x94, 0xB4, 0x43, + 0xE6, 0x95, 0x8F, 0x43, 0xE6, 0x95, 0x96, 0x43, + 0xE6, 0x95, 0xAC, 0x43, 0xE6, 0x95, 0xB8, 0x43, + 0xE6, 0x96, 0x87, 0x43, 0xE6, 0x96, 0x97, 0x43, + 0xE6, 0x96, 0x99, 0x43, 0xE6, 0x96, 0xA4, 0x43, + 0xE6, 0x96, 0xB0, 0x43, 0xE6, 0x96, 0xB9, 0x43, + // Bytes cc0 - cff + 0xE6, 0x97, 0x85, 0x43, 0xE6, 0x97, 0xA0, 0x43, + 0xE6, 0x97, 0xA2, 0x43, 0xE6, 0x97, 0xA3, 0x43, + 0xE6, 0x97, 0xA5, 0x43, 0xE6, 0x98, 0x93, 0x43, + 0xE6, 0x98, 0xA0, 0x43, 0xE6, 0x99, 0x89, 0x43, + 0xE6, 0x99, 0xB4, 0x43, 0xE6, 0x9A, 0x88, 0x43, + 0xE6, 0x9A, 0x91, 0x43, 0xE6, 0x9A, 0x9C, 0x43, + 0xE6, 0x9A, 0xB4, 0x43, 0xE6, 0x9B, 0x86, 0x43, + 0xE6, 0x9B, 0xB0, 0x43, 0xE6, 0x9B, 0xB4, 0x43, + // Bytes d00 - d3f + 0xE6, 0x9B, 0xB8, 0x43, 0xE6, 0x9C, 0x80, 0x43, + 0xE6, 0x9C, 0x88, 0x43, 0xE6, 0x9C, 0x89, 0x43, + 0xE6, 0x9C, 0x97, 0x43, 0xE6, 0x9C, 0x9B, 0x43, + 0xE6, 0x9C, 0xA1, 0x43, 0xE6, 0x9C, 0xA8, 0x43, + 0xE6, 0x9D, 0x8E, 0x43, 0xE6, 0x9D, 0x93, 0x43, + 0xE6, 0x9D, 0x96, 0x43, 0xE6, 0x9D, 0x9E, 0x43, + 0xE6, 0x9D, 0xBB, 0x43, 0xE6, 0x9E, 0x85, 0x43, + 0xE6, 0x9E, 0x97, 0x43, 0xE6, 0x9F, 0xB3, 0x43, + // Bytes d40 - d7f + 0xE6, 0x9F, 0xBA, 0x43, 0xE6, 0xA0, 0x97, 0x43, + 0xE6, 0xA0, 0x9F, 0x43, 0xE6, 0xA0, 0xAA, 0x43, + 0xE6, 0xA1, 0x92, 0x43, 0xE6, 0xA2, 0x81, 0x43, + 0xE6, 0xA2, 0x85, 0x43, 0xE6, 0xA2, 0x8E, 0x43, + 0xE6, 0xA2, 0xA8, 0x43, 0xE6, 0xA4, 0x94, 0x43, + 0xE6, 0xA5, 0x82, 0x43, 0xE6, 0xA6, 0xA3, 0x43, + 0xE6, 0xA7, 0xAA, 0x43, 0xE6, 0xA8, 0x82, 0x43, + 0xE6, 0xA8, 0x93, 0x43, 0xE6, 0xAA, 0xA8, 0x43, + // Bytes d80 - dbf + 0xE6, 0xAB, 0x93, 0x43, 0xE6, 0xAB, 0x9B, 0x43, + 0xE6, 0xAC, 0x84, 0x43, 0xE6, 0xAC, 0xA0, 0x43, + 0xE6, 0xAC, 0xA1, 0x43, 0xE6, 0xAD, 0x94, 0x43, + 0xE6, 0xAD, 0xA2, 0x43, 0xE6, 0xAD, 0xA3, 0x43, + 0xE6, 0xAD, 0xB2, 0x43, 0xE6, 0xAD, 0xB7, 0x43, + 0xE6, 0xAD, 0xB9, 0x43, 0xE6, 0xAE, 0x9F, 0x43, + 0xE6, 0xAE, 0xAE, 0x43, 0xE6, 0xAE, 0xB3, 0x43, + 0xE6, 0xAE, 0xBA, 0x43, 0xE6, 0xAE, 0xBB, 0x43, + // Bytes dc0 - dff + 0xE6, 0xAF, 0x8B, 0x43, 0xE6, 0xAF, 0x8D, 0x43, + 0xE6, 0xAF, 0x94, 0x43, 0xE6, 0xAF, 0x9B, 0x43, + 0xE6, 0xB0, 0x8F, 0x43, 0xE6, 0xB0, 0x94, 0x43, + 0xE6, 0xB0, 0xB4, 0x43, 0xE6, 0xB1, 0x8E, 0x43, + 0xE6, 0xB1, 0xA7, 0x43, 0xE6, 0xB2, 0x88, 0x43, + 0xE6, 0xB2, 0xBF, 0x43, 0xE6, 0xB3, 0x8C, 0x43, + 0xE6, 0xB3, 0x8D, 0x43, 0xE6, 0xB3, 0xA5, 0x43, + 0xE6, 0xB3, 0xA8, 0x43, 0xE6, 0xB4, 0x96, 0x43, + // Bytes e00 - e3f + 0xE6, 0xB4, 0x9B, 0x43, 0xE6, 0xB4, 0x9E, 0x43, + 0xE6, 0xB4, 0xB4, 0x43, 0xE6, 0xB4, 0xBE, 0x43, + 0xE6, 0xB5, 0x81, 0x43, 0xE6, 0xB5, 0xA9, 0x43, + 0xE6, 0xB5, 0xAA, 0x43, 0xE6, 0xB5, 0xB7, 0x43, + 0xE6, 0xB5, 0xB8, 0x43, 0xE6, 0xB6, 0x85, 0x43, + 0xE6, 0xB7, 0x8B, 0x43, 0xE6, 0xB7, 0x9A, 0x43, + 0xE6, 0xB7, 0xAA, 0x43, 0xE6, 0xB7, 0xB9, 0x43, + 0xE6, 0xB8, 0x9A, 0x43, 0xE6, 0xB8, 0xAF, 0x43, + // Bytes e40 - e7f + 0xE6, 0xB9, 0xAE, 0x43, 0xE6, 0xBA, 0x80, 0x43, + 0xE6, 0xBA, 0x9C, 0x43, 0xE6, 0xBA, 0xBA, 0x43, + 0xE6, 0xBB, 0x87, 0x43, 0xE6, 0xBB, 0x8B, 0x43, + 0xE6, 0xBB, 0x91, 0x43, 0xE6, 0xBB, 0x9B, 0x43, + 0xE6, 0xBC, 0x8F, 0x43, 0xE6, 0xBC, 0x94, 0x43, + 0xE6, 0xBC, 0xA2, 0x43, 0xE6, 0xBC, 0xA3, 0x43, + 0xE6, 0xBD, 0xAE, 0x43, 0xE6, 0xBF, 0x86, 0x43, + 0xE6, 0xBF, 0xAB, 0x43, 0xE6, 0xBF, 0xBE, 0x43, + // Bytes e80 - ebf + 0xE7, 0x80, 0x9B, 0x43, 0xE7, 0x80, 0x9E, 0x43, + 0xE7, 0x80, 0xB9, 0x43, 0xE7, 0x81, 0x8A, 0x43, + 0xE7, 0x81, 0xAB, 0x43, 0xE7, 0x81, 0xB0, 0x43, + 0xE7, 0x81, 0xB7, 0x43, 0xE7, 0x81, 0xBD, 0x43, + 0xE7, 0x82, 0x99, 0x43, 0xE7, 0x82, 0xAD, 0x43, + 0xE7, 0x83, 0x88, 0x43, 0xE7, 0x83, 0x99, 0x43, + 0xE7, 0x84, 0xA1, 0x43, 0xE7, 0x85, 0x85, 0x43, + 0xE7, 0x85, 0x89, 0x43, 0xE7, 0x85, 0xAE, 0x43, + // Bytes ec0 - eff + 0xE7, 0x86, 0x9C, 0x43, 0xE7, 0x87, 0x8E, 0x43, + 0xE7, 0x87, 0x90, 0x43, 0xE7, 0x88, 0x90, 0x43, + 0xE7, 0x88, 0x9B, 0x43, 0xE7, 0x88, 0xA8, 0x43, + 0xE7, 0x88, 0xAA, 0x43, 0xE7, 0x88, 0xAB, 0x43, + 0xE7, 0x88, 0xB5, 0x43, 0xE7, 0x88, 0xB6, 0x43, + 0xE7, 0x88, 0xBB, 0x43, 0xE7, 0x88, 0xBF, 0x43, + 0xE7, 0x89, 0x87, 0x43, 0xE7, 0x89, 0x90, 0x43, + 0xE7, 0x89, 0x99, 0x43, 0xE7, 0x89, 0x9B, 0x43, + // Bytes f00 - f3f + 0xE7, 0x89, 0xA2, 0x43, 0xE7, 0x89, 0xB9, 0x43, + 0xE7, 0x8A, 0x80, 0x43, 0xE7, 0x8A, 0x95, 0x43, + 0xE7, 0x8A, 0xAC, 0x43, 0xE7, 0x8A, 0xAF, 0x43, + 0xE7, 0x8B, 0x80, 0x43, 0xE7, 0x8B, 0xBC, 0x43, + 0xE7, 0x8C, 0xAA, 0x43, 0xE7, 0x8D, 0xB5, 0x43, + 0xE7, 0x8D, 0xBA, 0x43, 0xE7, 0x8E, 0x84, 0x43, + 0xE7, 0x8E, 0x87, 0x43, 0xE7, 0x8E, 0x89, 0x43, + 0xE7, 0x8E, 0x8B, 0x43, 0xE7, 0x8E, 0xA5, 0x43, + // Bytes f40 - f7f + 0xE7, 0x8E, 0xB2, 0x43, 0xE7, 0x8F, 0x9E, 0x43, + 0xE7, 0x90, 0x86, 0x43, 0xE7, 0x90, 0x89, 0x43, + 0xE7, 0x90, 0xA2, 0x43, 0xE7, 0x91, 0x87, 0x43, + 0xE7, 0x91, 0x9C, 0x43, 0xE7, 0x91, 0xA9, 0x43, + 0xE7, 0x91, 0xB1, 0x43, 0xE7, 0x92, 0x85, 0x43, + 0xE7, 0x92, 0x89, 0x43, 0xE7, 0x92, 0x98, 0x43, + 0xE7, 0x93, 0x8A, 0x43, 0xE7, 0x93, 0x9C, 0x43, + 0xE7, 0x93, 0xA6, 0x43, 0xE7, 0x94, 0x86, 0x43, + // Bytes f80 - fbf + 0xE7, 0x94, 0x98, 0x43, 0xE7, 0x94, 0x9F, 0x43, + 0xE7, 0x94, 0xA4, 0x43, 0xE7, 0x94, 0xA8, 0x43, + 0xE7, 0x94, 0xB0, 0x43, 0xE7, 0x94, 0xB2, 0x43, + 0xE7, 0x94, 0xB3, 0x43, 0xE7, 0x94, 0xB7, 0x43, + 0xE7, 0x94, 0xBB, 0x43, 0xE7, 0x94, 0xBE, 0x43, + 0xE7, 0x95, 0x99, 0x43, 0xE7, 0x95, 0xA5, 0x43, + 0xE7, 0x95, 0xB0, 0x43, 0xE7, 0x96, 0x8B, 0x43, + 0xE7, 0x96, 0x92, 0x43, 0xE7, 0x97, 0xA2, 0x43, + // Bytes fc0 - fff + 0xE7, 0x98, 0x90, 0x43, 0xE7, 0x98, 0x9D, 0x43, + 0xE7, 0x98, 0x9F, 0x43, 0xE7, 0x99, 0x82, 0x43, + 0xE7, 0x99, 0xA9, 0x43, 0xE7, 0x99, 0xB6, 0x43, + 0xE7, 0x99, 0xBD, 0x43, 0xE7, 0x9A, 0xAE, 0x43, + 0xE7, 0x9A, 0xBF, 0x43, 0xE7, 0x9B, 0x8A, 0x43, + 0xE7, 0x9B, 0x9B, 0x43, 0xE7, 0x9B, 0xA3, 0x43, + 0xE7, 0x9B, 0xA7, 0x43, 0xE7, 0x9B, 0xAE, 0x43, + 0xE7, 0x9B, 0xB4, 0x43, 0xE7, 0x9C, 0x81, 0x43, + // Bytes 1000 - 103f + 0xE7, 0x9C, 0x9E, 0x43, 0xE7, 0x9C, 0x9F, 0x43, + 0xE7, 0x9D, 0x80, 0x43, 0xE7, 0x9D, 0x8A, 0x43, + 0xE7, 0x9E, 0x8B, 0x43, 0xE7, 0x9E, 0xA7, 0x43, + 0xE7, 0x9F, 0x9B, 0x43, 0xE7, 0x9F, 0xA2, 0x43, + 0xE7, 0x9F, 0xB3, 0x43, 0xE7, 0xA1, 0x8E, 0x43, + 0xE7, 0xA1, 0xAB, 0x43, 0xE7, 0xA2, 0x8C, 0x43, + 0xE7, 0xA2, 0x91, 0x43, 0xE7, 0xA3, 0x8A, 0x43, + 0xE7, 0xA3, 0x8C, 0x43, 0xE7, 0xA3, 0xBB, 0x43, + // Bytes 1040 - 107f + 0xE7, 0xA4, 0xAA, 0x43, 0xE7, 0xA4, 0xBA, 0x43, + 0xE7, 0xA4, 0xBC, 0x43, 0xE7, 0xA4, 0xBE, 0x43, + 0xE7, 0xA5, 0x88, 0x43, 0xE7, 0xA5, 0x89, 0x43, + 0xE7, 0xA5, 0x90, 0x43, 0xE7, 0xA5, 0x96, 0x43, + 0xE7, 0xA5, 0x9D, 0x43, 0xE7, 0xA5, 0x9E, 0x43, + 0xE7, 0xA5, 0xA5, 0x43, 0xE7, 0xA5, 0xBF, 0x43, + 0xE7, 0xA6, 0x81, 0x43, 0xE7, 0xA6, 0x8D, 0x43, + 0xE7, 0xA6, 0x8E, 0x43, 0xE7, 0xA6, 0x8F, 0x43, + // Bytes 1080 - 10bf + 0xE7, 0xA6, 0xAE, 0x43, 0xE7, 0xA6, 0xB8, 0x43, + 0xE7, 0xA6, 0xBE, 0x43, 0xE7, 0xA7, 0x8A, 0x43, + 0xE7, 0xA7, 0x98, 0x43, 0xE7, 0xA7, 0xAB, 0x43, + 0xE7, 0xA8, 0x9C, 0x43, 0xE7, 0xA9, 0x80, 0x43, + 0xE7, 0xA9, 0x8A, 0x43, 0xE7, 0xA9, 0x8F, 0x43, + 0xE7, 0xA9, 0xB4, 0x43, 0xE7, 0xA9, 0xBA, 0x43, + 0xE7, 0xAA, 0x81, 0x43, 0xE7, 0xAA, 0xB1, 0x43, + 0xE7, 0xAB, 0x8B, 0x43, 0xE7, 0xAB, 0xAE, 0x43, + // Bytes 10c0 - 10ff + 0xE7, 0xAB, 0xB9, 0x43, 0xE7, 0xAC, 0xA0, 0x43, + 0xE7, 0xAE, 0x8F, 0x43, 0xE7, 0xAF, 0x80, 0x43, + 0xE7, 0xAF, 0x86, 0x43, 0xE7, 0xAF, 0x89, 0x43, + 0xE7, 0xB0, 0xBE, 0x43, 0xE7, 0xB1, 0xA0, 0x43, + 0xE7, 0xB1, 0xB3, 0x43, 0xE7, 0xB1, 0xBB, 0x43, + 0xE7, 0xB2, 0x92, 0x43, 0xE7, 0xB2, 0xBE, 0x43, + 0xE7, 0xB3, 0x92, 0x43, 0xE7, 0xB3, 0x96, 0x43, + 0xE7, 0xB3, 0xA3, 0x43, 0xE7, 0xB3, 0xA7, 0x43, + // Bytes 1100 - 113f + 0xE7, 0xB3, 0xA8, 0x43, 0xE7, 0xB3, 0xB8, 0x43, + 0xE7, 0xB4, 0x80, 0x43, 0xE7, 0xB4, 0x90, 0x43, + 0xE7, 0xB4, 0xA2, 0x43, 0xE7, 0xB4, 0xAF, 0x43, + 0xE7, 0xB5, 0x82, 0x43, 0xE7, 0xB5, 0x9B, 0x43, + 0xE7, 0xB5, 0xA3, 0x43, 0xE7, 0xB6, 0xA0, 0x43, + 0xE7, 0xB6, 0xBE, 0x43, 0xE7, 0xB7, 0x87, 0x43, + 0xE7, 0xB7, 0xB4, 0x43, 0xE7, 0xB8, 0x82, 0x43, + 0xE7, 0xB8, 0x89, 0x43, 0xE7, 0xB8, 0xB7, 0x43, + // Bytes 1140 - 117f + 0xE7, 0xB9, 0x81, 0x43, 0xE7, 0xB9, 0x85, 0x43, + 0xE7, 0xBC, 0xB6, 0x43, 0xE7, 0xBC, 0xBE, 0x43, + 0xE7, 0xBD, 0x91, 0x43, 0xE7, 0xBD, 0xB2, 0x43, + 0xE7, 0xBD, 0xB9, 0x43, 0xE7, 0xBD, 0xBA, 0x43, + 0xE7, 0xBE, 0x85, 0x43, 0xE7, 0xBE, 0x8A, 0x43, + 0xE7, 0xBE, 0x95, 0x43, 0xE7, 0xBE, 0x9A, 0x43, + 0xE7, 0xBE, 0xBD, 0x43, 0xE7, 0xBF, 0xBA, 0x43, + 0xE8, 0x80, 0x81, 0x43, 0xE8, 0x80, 0x85, 0x43, + // Bytes 1180 - 11bf + 0xE8, 0x80, 0x8C, 0x43, 0xE8, 0x80, 0x92, 0x43, + 0xE8, 0x80, 0xB3, 0x43, 0xE8, 0x81, 0x86, 0x43, + 0xE8, 0x81, 0xA0, 0x43, 0xE8, 0x81, 0xAF, 0x43, + 0xE8, 0x81, 0xB0, 0x43, 0xE8, 0x81, 0xBE, 0x43, + 0xE8, 0x81, 0xBF, 0x43, 0xE8, 0x82, 0x89, 0x43, + 0xE8, 0x82, 0x8B, 0x43, 0xE8, 0x82, 0xAD, 0x43, + 0xE8, 0x82, 0xB2, 0x43, 0xE8, 0x84, 0x83, 0x43, + 0xE8, 0x84, 0xBE, 0x43, 0xE8, 0x87, 0x98, 0x43, + // Bytes 11c0 - 11ff + 0xE8, 0x87, 0xA3, 0x43, 0xE8, 0x87, 0xA8, 0x43, + 0xE8, 0x87, 0xAA, 0x43, 0xE8, 0x87, 0xAD, 0x43, + 0xE8, 0x87, 0xB3, 0x43, 0xE8, 0x87, 0xBC, 0x43, + 0xE8, 0x88, 0x81, 0x43, 0xE8, 0x88, 0x84, 0x43, + 0xE8, 0x88, 0x8C, 0x43, 0xE8, 0x88, 0x98, 0x43, + 0xE8, 0x88, 0x9B, 0x43, 0xE8, 0x88, 0x9F, 0x43, + 0xE8, 0x89, 0xAE, 0x43, 0xE8, 0x89, 0xAF, 0x43, + 0xE8, 0x89, 0xB2, 0x43, 0xE8, 0x89, 0xB8, 0x43, + // Bytes 1200 - 123f + 0xE8, 0x89, 0xB9, 0x43, 0xE8, 0x8A, 0x8B, 0x43, + 0xE8, 0x8A, 0x91, 0x43, 0xE8, 0x8A, 0x9D, 0x43, + 0xE8, 0x8A, 0xB1, 0x43, 0xE8, 0x8A, 0xB3, 0x43, + 0xE8, 0x8A, 0xBD, 0x43, 0xE8, 0x8B, 0xA5, 0x43, + 0xE8, 0x8B, 0xA6, 0x43, 0xE8, 0x8C, 0x9D, 0x43, + 0xE8, 0x8C, 0xA3, 0x43, 0xE8, 0x8C, 0xB6, 0x43, + 0xE8, 0x8D, 0x92, 0x43, 0xE8, 0x8D, 0x93, 0x43, + 0xE8, 0x8D, 0xA3, 0x43, 0xE8, 0x8E, 0xAD, 0x43, + // Bytes 1240 - 127f + 0xE8, 0x8E, 0xBD, 0x43, 0xE8, 0x8F, 0x89, 0x43, + 0xE8, 0x8F, 0x8A, 0x43, 0xE8, 0x8F, 0x8C, 0x43, + 0xE8, 0x8F, 0x9C, 0x43, 0xE8, 0x8F, 0xA7, 0x43, + 0xE8, 0x8F, 0xAF, 0x43, 0xE8, 0x8F, 0xB1, 0x43, + 0xE8, 0x90, 0xBD, 0x43, 0xE8, 0x91, 0x89, 0x43, + 0xE8, 0x91, 0x97, 0x43, 0xE8, 0x93, 0xAE, 0x43, + 0xE8, 0x93, 0xB1, 0x43, 0xE8, 0x93, 0xB3, 0x43, + 0xE8, 0x93, 0xBC, 0x43, 0xE8, 0x94, 0x96, 0x43, + // Bytes 1280 - 12bf + 0xE8, 0x95, 0xA4, 0x43, 0xE8, 0x97, 0x8D, 0x43, + 0xE8, 0x97, 0xBA, 0x43, 0xE8, 0x98, 0x86, 0x43, + 0xE8, 0x98, 0x92, 0x43, 0xE8, 0x98, 0xAD, 0x43, + 0xE8, 0x98, 0xBF, 0x43, 0xE8, 0x99, 0x8D, 0x43, + 0xE8, 0x99, 0x90, 0x43, 0xE8, 0x99, 0x9C, 0x43, + 0xE8, 0x99, 0xA7, 0x43, 0xE8, 0x99, 0xA9, 0x43, + 0xE8, 0x99, 0xAB, 0x43, 0xE8, 0x9A, 0x88, 0x43, + 0xE8, 0x9A, 0xA9, 0x43, 0xE8, 0x9B, 0xA2, 0x43, + // Bytes 12c0 - 12ff + 0xE8, 0x9C, 0x8E, 0x43, 0xE8, 0x9C, 0xA8, 0x43, + 0xE8, 0x9D, 0xAB, 0x43, 0xE8, 0x9D, 0xB9, 0x43, + 0xE8, 0x9E, 0x86, 0x43, 0xE8, 0x9E, 0xBA, 0x43, + 0xE8, 0x9F, 0xA1, 0x43, 0xE8, 0xA0, 0x81, 0x43, + 0xE8, 0xA0, 0x9F, 0x43, 0xE8, 0xA1, 0x80, 0x43, + 0xE8, 0xA1, 0x8C, 0x43, 0xE8, 0xA1, 0xA0, 0x43, + 0xE8, 0xA1, 0xA3, 0x43, 0xE8, 0xA3, 0x82, 0x43, + 0xE8, 0xA3, 0x8F, 0x43, 0xE8, 0xA3, 0x97, 0x43, + // Bytes 1300 - 133f + 0xE8, 0xA3, 0x9E, 0x43, 0xE8, 0xA3, 0xA1, 0x43, + 0xE8, 0xA3, 0xB8, 0x43, 0xE8, 0xA3, 0xBA, 0x43, + 0xE8, 0xA4, 0x90, 0x43, 0xE8, 0xA5, 0x81, 0x43, + 0xE8, 0xA5, 0xA4, 0x43, 0xE8, 0xA5, 0xBE, 0x43, + 0xE8, 0xA6, 0x86, 0x43, 0xE8, 0xA6, 0x8B, 0x43, + 0xE8, 0xA6, 0x96, 0x43, 0xE8, 0xA7, 0x92, 0x43, + 0xE8, 0xA7, 0xA3, 0x43, 0xE8, 0xA8, 0x80, 0x43, + 0xE8, 0xAA, 0xA0, 0x43, 0xE8, 0xAA, 0xAA, 0x43, + // Bytes 1340 - 137f + 0xE8, 0xAA, 0xBF, 0x43, 0xE8, 0xAB, 0x8B, 0x43, + 0xE8, 0xAB, 0x92, 0x43, 0xE8, 0xAB, 0x96, 0x43, + 0xE8, 0xAB, 0xAD, 0x43, 0xE8, 0xAB, 0xB8, 0x43, + 0xE8, 0xAB, 0xBE, 0x43, 0xE8, 0xAC, 0x81, 0x43, + 0xE8, 0xAC, 0xB9, 0x43, 0xE8, 0xAD, 0x98, 0x43, + 0xE8, 0xAE, 0x80, 0x43, 0xE8, 0xAE, 0x8A, 0x43, + 0xE8, 0xB0, 0xB7, 0x43, 0xE8, 0xB1, 0x86, 0x43, + 0xE8, 0xB1, 0x88, 0x43, 0xE8, 0xB1, 0x95, 0x43, + // Bytes 1380 - 13bf + 0xE8, 0xB1, 0xB8, 0x43, 0xE8, 0xB2, 0x9D, 0x43, + 0xE8, 0xB2, 0xA1, 0x43, 0xE8, 0xB2, 0xA9, 0x43, + 0xE8, 0xB2, 0xAB, 0x43, 0xE8, 0xB3, 0x81, 0x43, + 0xE8, 0xB3, 0x82, 0x43, 0xE8, 0xB3, 0x87, 0x43, + 0xE8, 0xB3, 0x88, 0x43, 0xE8, 0xB3, 0x93, 0x43, + 0xE8, 0xB4, 0x88, 0x43, 0xE8, 0xB4, 0x9B, 0x43, + 0xE8, 0xB5, 0xA4, 0x43, 0xE8, 0xB5, 0xB0, 0x43, + 0xE8, 0xB5, 0xB7, 0x43, 0xE8, 0xB6, 0xB3, 0x43, + // Bytes 13c0 - 13ff + 0xE8, 0xB6, 0xBC, 0x43, 0xE8, 0xB7, 0x8B, 0x43, + 0xE8, 0xB7, 0xAF, 0x43, 0xE8, 0xB7, 0xB0, 0x43, + 0xE8, 0xBA, 0xAB, 0x43, 0xE8, 0xBB, 0x8A, 0x43, + 0xE8, 0xBB, 0x94, 0x43, 0xE8, 0xBC, 0xA6, 0x43, + 0xE8, 0xBC, 0xAA, 0x43, 0xE8, 0xBC, 0xB8, 0x43, + 0xE8, 0xBC, 0xBB, 0x43, 0xE8, 0xBD, 0xA2, 0x43, + 0xE8, 0xBE, 0x9B, 0x43, 0xE8, 0xBE, 0x9E, 0x43, + 0xE8, 0xBE, 0xB0, 0x43, 0xE8, 0xBE, 0xB5, 0x43, + // Bytes 1400 - 143f + 0xE8, 0xBE, 0xB6, 0x43, 0xE9, 0x80, 0xA3, 0x43, + 0xE9, 0x80, 0xB8, 0x43, 0xE9, 0x81, 0x8A, 0x43, + 0xE9, 0x81, 0xA9, 0x43, 0xE9, 0x81, 0xB2, 0x43, + 0xE9, 0x81, 0xBC, 0x43, 0xE9, 0x82, 0x8F, 0x43, + 0xE9, 0x82, 0x91, 0x43, 0xE9, 0x82, 0x94, 0x43, + 0xE9, 0x83, 0x8E, 0x43, 0xE9, 0x83, 0x9E, 0x43, + 0xE9, 0x83, 0xB1, 0x43, 0xE9, 0x83, 0xBD, 0x43, + 0xE9, 0x84, 0x91, 0x43, 0xE9, 0x84, 0x9B, 0x43, + // Bytes 1440 - 147f + 0xE9, 0x85, 0x89, 0x43, 0xE9, 0x85, 0x8D, 0x43, + 0xE9, 0x85, 0xAA, 0x43, 0xE9, 0x86, 0x99, 0x43, + 0xE9, 0x86, 0xB4, 0x43, 0xE9, 0x87, 0x86, 0x43, + 0xE9, 0x87, 0x8C, 0x43, 0xE9, 0x87, 0x8F, 0x43, + 0xE9, 0x87, 0x91, 0x43, 0xE9, 0x88, 0xB4, 0x43, + 0xE9, 0x88, 0xB8, 0x43, 0xE9, 0x89, 0xB6, 0x43, + 0xE9, 0x89, 0xBC, 0x43, 0xE9, 0x8B, 0x97, 0x43, + 0xE9, 0x8B, 0x98, 0x43, 0xE9, 0x8C, 0x84, 0x43, + // Bytes 1480 - 14bf + 0xE9, 0x8D, 0x8A, 0x43, 0xE9, 0x8F, 0xB9, 0x43, + 0xE9, 0x90, 0x95, 0x43, 0xE9, 0x95, 0xB7, 0x43, + 0xE9, 0x96, 0x80, 0x43, 0xE9, 0x96, 0x8B, 0x43, + 0xE9, 0x96, 0xAD, 0x43, 0xE9, 0x96, 0xB7, 0x43, + 0xE9, 0x98, 0x9C, 0x43, 0xE9, 0x98, 0xAE, 0x43, + 0xE9, 0x99, 0x8B, 0x43, 0xE9, 0x99, 0x8D, 0x43, + 0xE9, 0x99, 0xB5, 0x43, 0xE9, 0x99, 0xB8, 0x43, + 0xE9, 0x99, 0xBC, 0x43, 0xE9, 0x9A, 0x86, 0x43, + // Bytes 14c0 - 14ff + 0xE9, 0x9A, 0xA3, 0x43, 0xE9, 0x9A, 0xB6, 0x43, + 0xE9, 0x9A, 0xB7, 0x43, 0xE9, 0x9A, 0xB8, 0x43, + 0xE9, 0x9A, 0xB9, 0x43, 0xE9, 0x9B, 0x83, 0x43, + 0xE9, 0x9B, 0xA2, 0x43, 0xE9, 0x9B, 0xA3, 0x43, + 0xE9, 0x9B, 0xA8, 0x43, 0xE9, 0x9B, 0xB6, 0x43, + 0xE9, 0x9B, 0xB7, 0x43, 0xE9, 0x9C, 0xA3, 0x43, + 0xE9, 0x9C, 0xB2, 0x43, 0xE9, 0x9D, 0x88, 0x43, + 0xE9, 0x9D, 0x91, 0x43, 0xE9, 0x9D, 0x96, 0x43, + // Bytes 1500 - 153f + 0xE9, 0x9D, 0x9E, 0x43, 0xE9, 0x9D, 0xA2, 0x43, + 0xE9, 0x9D, 0xA9, 0x43, 0xE9, 0x9F, 0x8B, 0x43, + 0xE9, 0x9F, 0x9B, 0x43, 0xE9, 0x9F, 0xA0, 0x43, + 0xE9, 0x9F, 0xAD, 0x43, 0xE9, 0x9F, 0xB3, 0x43, + 0xE9, 0x9F, 0xBF, 0x43, 0xE9, 0xA0, 0x81, 0x43, + 0xE9, 0xA0, 0x85, 0x43, 0xE9, 0xA0, 0x8B, 0x43, + 0xE9, 0xA0, 0x98, 0x43, 0xE9, 0xA0, 0xA9, 0x43, + 0xE9, 0xA0, 0xBB, 0x43, 0xE9, 0xA1, 0x9E, 0x43, + // Bytes 1540 - 157f + 0xE9, 0xA2, 0xA8, 0x43, 0xE9, 0xA3, 0x9B, 0x43, + 0xE9, 0xA3, 0x9F, 0x43, 0xE9, 0xA3, 0xA2, 0x43, + 0xE9, 0xA3, 0xAF, 0x43, 0xE9, 0xA3, 0xBC, 0x43, + 0xE9, 0xA4, 0xA8, 0x43, 0xE9, 0xA4, 0xA9, 0x43, + 0xE9, 0xA6, 0x96, 0x43, 0xE9, 0xA6, 0x99, 0x43, + 0xE9, 0xA6, 0xA7, 0x43, 0xE9, 0xA6, 0xAC, 0x43, + 0xE9, 0xA7, 0x82, 0x43, 0xE9, 0xA7, 0xB1, 0x43, + 0xE9, 0xA7, 0xBE, 0x43, 0xE9, 0xA9, 0xAA, 0x43, + // Bytes 1580 - 15bf + 0xE9, 0xAA, 0xA8, 0x43, 0xE9, 0xAB, 0x98, 0x43, + 0xE9, 0xAB, 0x9F, 0x43, 0xE9, 0xAC, 0x92, 0x43, + 0xE9, 0xAC, 0xA5, 0x43, 0xE9, 0xAC, 0xAF, 0x43, + 0xE9, 0xAC, 0xB2, 0x43, 0xE9, 0xAC, 0xBC, 0x43, + 0xE9, 0xAD, 0x9A, 0x43, 0xE9, 0xAD, 0xAF, 0x43, + 0xE9, 0xB1, 0x80, 0x43, 0xE9, 0xB1, 0x97, 0x43, + 0xE9, 0xB3, 0xA5, 0x43, 0xE9, 0xB3, 0xBD, 0x43, + 0xE9, 0xB5, 0xA7, 0x43, 0xE9, 0xB6, 0xB4, 0x43, + // Bytes 15c0 - 15ff + 0xE9, 0xB7, 0xBA, 0x43, 0xE9, 0xB8, 0x9E, 0x43, + 0xE9, 0xB9, 0xB5, 0x43, 0xE9, 0xB9, 0xBF, 0x43, + 0xE9, 0xBA, 0x97, 0x43, 0xE9, 0xBA, 0x9F, 0x43, + 0xE9, 0xBA, 0xA5, 0x43, 0xE9, 0xBA, 0xBB, 0x43, + 0xE9, 0xBB, 0x83, 0x43, 0xE9, 0xBB, 0x8D, 0x43, + 0xE9, 0xBB, 0x8E, 0x43, 0xE9, 0xBB, 0x91, 0x43, + 0xE9, 0xBB, 0xB9, 0x43, 0xE9, 0xBB, 0xBD, 0x43, + 0xE9, 0xBB, 0xBE, 0x43, 0xE9, 0xBC, 0x85, 0x43, + // Bytes 1600 - 163f + 0xE9, 0xBC, 0x8E, 0x43, 0xE9, 0xBC, 0x8F, 0x43, + 0xE9, 0xBC, 0x93, 0x43, 0xE9, 0xBC, 0x96, 0x43, + 0xE9, 0xBC, 0xA0, 0x43, 0xE9, 0xBC, 0xBB, 0x43, + 0xE9, 0xBD, 0x83, 0x43, 0xE9, 0xBD, 0x8A, 0x43, + 0xE9, 0xBD, 0x92, 0x43, 0xE9, 0xBE, 0x8D, 0x43, + 0xE9, 0xBE, 0x8E, 0x43, 0xE9, 0xBE, 0x9C, 0x43, + 0xE9, 0xBE, 0x9F, 0x43, 0xE9, 0xBE, 0xA0, 0x43, + 0xEA, 0x9C, 0xA7, 0x43, 0xEA, 0x9D, 0xAF, 0x43, + // Bytes 1640 - 167f + 0xEA, 0xAC, 0xB7, 0x43, 0xEA, 0xAD, 0x92, 0x44, + 0xF0, 0xA0, 0x84, 0xA2, 0x44, 0xF0, 0xA0, 0x94, + 0x9C, 0x44, 0xF0, 0xA0, 0x94, 0xA5, 0x44, 0xF0, + 0xA0, 0x95, 0x8B, 0x44, 0xF0, 0xA0, 0x98, 0xBA, + 0x44, 0xF0, 0xA0, 0xA0, 0x84, 0x44, 0xF0, 0xA0, + 0xA3, 0x9E, 0x44, 0xF0, 0xA0, 0xA8, 0xAC, 0x44, + 0xF0, 0xA0, 0xAD, 0xA3, 0x44, 0xF0, 0xA1, 0x93, + 0xA4, 0x44, 0xF0, 0xA1, 0x9A, 0xA8, 0x44, 0xF0, + // Bytes 1680 - 16bf + 0xA1, 0x9B, 0xAA, 0x44, 0xF0, 0xA1, 0xA7, 0x88, + 0x44, 0xF0, 0xA1, 0xAC, 0x98, 0x44, 0xF0, 0xA1, + 0xB4, 0x8B, 0x44, 0xF0, 0xA1, 0xB7, 0xA4, 0x44, + 0xF0, 0xA1, 0xB7, 0xA6, 0x44, 0xF0, 0xA2, 0x86, + 0x83, 0x44, 0xF0, 0xA2, 0x86, 0x9F, 0x44, 0xF0, + 0xA2, 0x8C, 0xB1, 0x44, 0xF0, 0xA2, 0x9B, 0x94, + 0x44, 0xF0, 0xA2, 0xA1, 0x84, 0x44, 0xF0, 0xA2, + 0xA1, 0x8A, 0x44, 0xF0, 0xA2, 0xAC, 0x8C, 0x44, + // Bytes 16c0 - 16ff + 0xF0, 0xA2, 0xAF, 0xB1, 0x44, 0xF0, 0xA3, 0x80, + 0x8A, 0x44, 0xF0, 0xA3, 0x8A, 0xB8, 0x44, 0xF0, + 0xA3, 0x8D, 0x9F, 0x44, 0xF0, 0xA3, 0x8E, 0x93, + 0x44, 0xF0, 0xA3, 0x8E, 0x9C, 0x44, 0xF0, 0xA3, + 0x8F, 0x83, 0x44, 0xF0, 0xA3, 0x8F, 0x95, 0x44, + 0xF0, 0xA3, 0x91, 0xAD, 0x44, 0xF0, 0xA3, 0x9A, + 0xA3, 0x44, 0xF0, 0xA3, 0xA2, 0xA7, 0x44, 0xF0, + 0xA3, 0xAA, 0x8D, 0x44, 0xF0, 0xA3, 0xAB, 0xBA, + // Bytes 1700 - 173f + 0x44, 0xF0, 0xA3, 0xB2, 0xBC, 0x44, 0xF0, 0xA3, + 0xB4, 0x9E, 0x44, 0xF0, 0xA3, 0xBB, 0x91, 0x44, + 0xF0, 0xA3, 0xBD, 0x9E, 0x44, 0xF0, 0xA3, 0xBE, + 0x8E, 0x44, 0xF0, 0xA4, 0x89, 0xA3, 0x44, 0xF0, + 0xA4, 0x8B, 0xAE, 0x44, 0xF0, 0xA4, 0x8E, 0xAB, + 0x44, 0xF0, 0xA4, 0x98, 0x88, 0x44, 0xF0, 0xA4, + 0x9C, 0xB5, 0x44, 0xF0, 0xA4, 0xA0, 0x94, 0x44, + 0xF0, 0xA4, 0xB0, 0xB6, 0x44, 0xF0, 0xA4, 0xB2, + // Bytes 1740 - 177f + 0x92, 0x44, 0xF0, 0xA4, 0xBE, 0xA1, 0x44, 0xF0, + 0xA4, 0xBE, 0xB8, 0x44, 0xF0, 0xA5, 0x81, 0x84, + 0x44, 0xF0, 0xA5, 0x83, 0xB2, 0x44, 0xF0, 0xA5, + 0x83, 0xB3, 0x44, 0xF0, 0xA5, 0x84, 0x99, 0x44, + 0xF0, 0xA5, 0x84, 0xB3, 0x44, 0xF0, 0xA5, 0x89, + 0x89, 0x44, 0xF0, 0xA5, 0x90, 0x9D, 0x44, 0xF0, + 0xA5, 0x98, 0xA6, 0x44, 0xF0, 0xA5, 0x9A, 0x9A, + 0x44, 0xF0, 0xA5, 0x9B, 0x85, 0x44, 0xF0, 0xA5, + // Bytes 1780 - 17bf + 0xA5, 0xBC, 0x44, 0xF0, 0xA5, 0xAA, 0xA7, 0x44, + 0xF0, 0xA5, 0xAE, 0xAB, 0x44, 0xF0, 0xA5, 0xB2, + 0x80, 0x44, 0xF0, 0xA5, 0xB3, 0x90, 0x44, 0xF0, + 0xA5, 0xBE, 0x86, 0x44, 0xF0, 0xA6, 0x87, 0x9A, + 0x44, 0xF0, 0xA6, 0x88, 0xA8, 0x44, 0xF0, 0xA6, + 0x89, 0x87, 0x44, 0xF0, 0xA6, 0x8B, 0x99, 0x44, + 0xF0, 0xA6, 0x8C, 0xBE, 0x44, 0xF0, 0xA6, 0x93, + 0x9A, 0x44, 0xF0, 0xA6, 0x94, 0xA3, 0x44, 0xF0, + // Bytes 17c0 - 17ff + 0xA6, 0x96, 0xA8, 0x44, 0xF0, 0xA6, 0x9E, 0xA7, + 0x44, 0xF0, 0xA6, 0x9E, 0xB5, 0x44, 0xF0, 0xA6, + 0xAC, 0xBC, 0x44, 0xF0, 0xA6, 0xB0, 0xB6, 0x44, + 0xF0, 0xA6, 0xB3, 0x95, 0x44, 0xF0, 0xA6, 0xB5, + 0xAB, 0x44, 0xF0, 0xA6, 0xBC, 0xAC, 0x44, 0xF0, + 0xA6, 0xBE, 0xB1, 0x44, 0xF0, 0xA7, 0x83, 0x92, + 0x44, 0xF0, 0xA7, 0x8F, 0x8A, 0x44, 0xF0, 0xA7, + 0x99, 0xA7, 0x44, 0xF0, 0xA7, 0xA2, 0xAE, 0x44, + // Bytes 1800 - 183f + 0xF0, 0xA7, 0xA5, 0xA6, 0x44, 0xF0, 0xA7, 0xB2, + 0xA8, 0x44, 0xF0, 0xA7, 0xBB, 0x93, 0x44, 0xF0, + 0xA7, 0xBC, 0xAF, 0x44, 0xF0, 0xA8, 0x97, 0x92, + 0x44, 0xF0, 0xA8, 0x97, 0xAD, 0x44, 0xF0, 0xA8, + 0x9C, 0xAE, 0x44, 0xF0, 0xA8, 0xAF, 0xBA, 0x44, + 0xF0, 0xA8, 0xB5, 0xB7, 0x44, 0xF0, 0xA9, 0x85, + 0x85, 0x44, 0xF0, 0xA9, 0x87, 0x9F, 0x44, 0xF0, + 0xA9, 0x88, 0x9A, 0x44, 0xF0, 0xA9, 0x90, 0x8A, + // Bytes 1840 - 187f + 0x44, 0xF0, 0xA9, 0x92, 0x96, 0x44, 0xF0, 0xA9, + 0x96, 0xB6, 0x44, 0xF0, 0xA9, 0xAC, 0xB0, 0x44, + 0xF0, 0xAA, 0x83, 0x8E, 0x44, 0xF0, 0xAA, 0x84, + 0x85, 0x44, 0xF0, 0xAA, 0x88, 0x8E, 0x44, 0xF0, + 0xAA, 0x8A, 0x91, 0x44, 0xF0, 0xAA, 0x8E, 0x92, + 0x44, 0xF0, 0xAA, 0x98, 0x80, 0x42, 0x21, 0x21, + 0x42, 0x21, 0x3F, 0x42, 0x2E, 0x2E, 0x42, 0x30, + 0x2C, 0x42, 0x30, 0x2E, 0x42, 0x31, 0x2C, 0x42, + // Bytes 1880 - 18bf + 0x31, 0x2E, 0x42, 0x31, 0x30, 0x42, 0x31, 0x31, + 0x42, 0x31, 0x32, 0x42, 0x31, 0x33, 0x42, 0x31, + 0x34, 0x42, 0x31, 0x35, 0x42, 0x31, 0x36, 0x42, + 0x31, 0x37, 0x42, 0x31, 0x38, 0x42, 0x31, 0x39, + 0x42, 0x32, 0x2C, 0x42, 0x32, 0x2E, 0x42, 0x32, + 0x30, 0x42, 0x32, 0x31, 0x42, 0x32, 0x32, 0x42, + 0x32, 0x33, 0x42, 0x32, 0x34, 0x42, 0x32, 0x35, + 0x42, 0x32, 0x36, 0x42, 0x32, 0x37, 0x42, 0x32, + // Bytes 18c0 - 18ff + 0x38, 0x42, 0x32, 0x39, 0x42, 0x33, 0x2C, 0x42, + 0x33, 0x2E, 0x42, 0x33, 0x30, 0x42, 0x33, 0x31, + 0x42, 0x33, 0x32, 0x42, 0x33, 0x33, 0x42, 0x33, + 0x34, 0x42, 0x33, 0x35, 0x42, 0x33, 0x36, 0x42, + 0x33, 0x37, 0x42, 0x33, 0x38, 0x42, 0x33, 0x39, + 0x42, 0x34, 0x2C, 0x42, 0x34, 0x2E, 0x42, 0x34, + 0x30, 0x42, 0x34, 0x31, 0x42, 0x34, 0x32, 0x42, + 0x34, 0x33, 0x42, 0x34, 0x34, 0x42, 0x34, 0x35, + // Bytes 1900 - 193f + 0x42, 0x34, 0x36, 0x42, 0x34, 0x37, 0x42, 0x34, + 0x38, 0x42, 0x34, 0x39, 0x42, 0x35, 0x2C, 0x42, + 0x35, 0x2E, 0x42, 0x35, 0x30, 0x42, 0x36, 0x2C, + 0x42, 0x36, 0x2E, 0x42, 0x37, 0x2C, 0x42, 0x37, + 0x2E, 0x42, 0x38, 0x2C, 0x42, 0x38, 0x2E, 0x42, + 0x39, 0x2C, 0x42, 0x39, 0x2E, 0x42, 0x3D, 0x3D, + 0x42, 0x3F, 0x21, 0x42, 0x3F, 0x3F, 0x42, 0x41, + 0x55, 0x42, 0x42, 0x71, 0x42, 0x43, 0x44, 0x42, + // Bytes 1940 - 197f + 0x44, 0x4A, 0x42, 0x44, 0x5A, 0x42, 0x44, 0x7A, + 0x42, 0x47, 0x42, 0x42, 0x47, 0x79, 0x42, 0x48, + 0x50, 0x42, 0x48, 0x56, 0x42, 0x48, 0x67, 0x42, + 0x48, 0x7A, 0x42, 0x49, 0x49, 0x42, 0x49, 0x4A, + 0x42, 0x49, 0x55, 0x42, 0x49, 0x56, 0x42, 0x49, + 0x58, 0x42, 0x4B, 0x42, 0x42, 0x4B, 0x4B, 0x42, + 0x4B, 0x4D, 0x42, 0x4C, 0x4A, 0x42, 0x4C, 0x6A, + 0x42, 0x4D, 0x42, 0x42, 0x4D, 0x43, 0x42, 0x4D, + // Bytes 1980 - 19bf + 0x44, 0x42, 0x4D, 0x56, 0x42, 0x4D, 0x57, 0x42, + 0x4E, 0x4A, 0x42, 0x4E, 0x6A, 0x42, 0x4E, 0x6F, + 0x42, 0x50, 0x48, 0x42, 0x50, 0x52, 0x42, 0x50, + 0x61, 0x42, 0x52, 0x73, 0x42, 0x53, 0x44, 0x42, + 0x53, 0x4D, 0x42, 0x53, 0x53, 0x42, 0x53, 0x76, + 0x42, 0x54, 0x4D, 0x42, 0x56, 0x49, 0x42, 0x57, + 0x43, 0x42, 0x57, 0x5A, 0x42, 0x57, 0x62, 0x42, + 0x58, 0x49, 0x42, 0x63, 0x63, 0x42, 0x63, 0x64, + // Bytes 19c0 - 19ff + 0x42, 0x63, 0x6D, 0x42, 0x64, 0x42, 0x42, 0x64, + 0x61, 0x42, 0x64, 0x6C, 0x42, 0x64, 0x6D, 0x42, + 0x64, 0x7A, 0x42, 0x65, 0x56, 0x42, 0x66, 0x66, + 0x42, 0x66, 0x69, 0x42, 0x66, 0x6C, 0x42, 0x66, + 0x6D, 0x42, 0x68, 0x61, 0x42, 0x69, 0x69, 0x42, + 0x69, 0x6A, 0x42, 0x69, 0x6E, 0x42, 0x69, 0x76, + 0x42, 0x69, 0x78, 0x42, 0x6B, 0x41, 0x42, 0x6B, + 0x56, 0x42, 0x6B, 0x57, 0x42, 0x6B, 0x67, 0x42, + // Bytes 1a00 - 1a3f + 0x6B, 0x6C, 0x42, 0x6B, 0x6D, 0x42, 0x6B, 0x74, + 0x42, 0x6C, 0x6A, 0x42, 0x6C, 0x6D, 0x42, 0x6C, + 0x6E, 0x42, 0x6C, 0x78, 0x42, 0x6D, 0x32, 0x42, + 0x6D, 0x33, 0x42, 0x6D, 0x41, 0x42, 0x6D, 0x56, + 0x42, 0x6D, 0x57, 0x42, 0x6D, 0x62, 0x42, 0x6D, + 0x67, 0x42, 0x6D, 0x6C, 0x42, 0x6D, 0x6D, 0x42, + 0x6D, 0x73, 0x42, 0x6E, 0x41, 0x42, 0x6E, 0x46, + 0x42, 0x6E, 0x56, 0x42, 0x6E, 0x57, 0x42, 0x6E, + // Bytes 1a40 - 1a7f + 0x6A, 0x42, 0x6E, 0x6D, 0x42, 0x6E, 0x73, 0x42, + 0x6F, 0x56, 0x42, 0x70, 0x41, 0x42, 0x70, 0x46, + 0x42, 0x70, 0x56, 0x42, 0x70, 0x57, 0x42, 0x70, + 0x63, 0x42, 0x70, 0x73, 0x42, 0x73, 0x72, 0x42, + 0x73, 0x74, 0x42, 0x76, 0x69, 0x42, 0x78, 0x69, + 0x43, 0x28, 0x31, 0x29, 0x43, 0x28, 0x32, 0x29, + 0x43, 0x28, 0x33, 0x29, 0x43, 0x28, 0x34, 0x29, + 0x43, 0x28, 0x35, 0x29, 0x43, 0x28, 0x36, 0x29, + // Bytes 1a80 - 1abf + 0x43, 0x28, 0x37, 0x29, 0x43, 0x28, 0x38, 0x29, + 0x43, 0x28, 0x39, 0x29, 0x43, 0x28, 0x41, 0x29, + 0x43, 0x28, 0x42, 0x29, 0x43, 0x28, 0x43, 0x29, + 0x43, 0x28, 0x44, 0x29, 0x43, 0x28, 0x45, 0x29, + 0x43, 0x28, 0x46, 0x29, 0x43, 0x28, 0x47, 0x29, + 0x43, 0x28, 0x48, 0x29, 0x43, 0x28, 0x49, 0x29, + 0x43, 0x28, 0x4A, 0x29, 0x43, 0x28, 0x4B, 0x29, + 0x43, 0x28, 0x4C, 0x29, 0x43, 0x28, 0x4D, 0x29, + // Bytes 1ac0 - 1aff + 0x43, 0x28, 0x4E, 0x29, 0x43, 0x28, 0x4F, 0x29, + 0x43, 0x28, 0x50, 0x29, 0x43, 0x28, 0x51, 0x29, + 0x43, 0x28, 0x52, 0x29, 0x43, 0x28, 0x53, 0x29, + 0x43, 0x28, 0x54, 0x29, 0x43, 0x28, 0x55, 0x29, + 0x43, 0x28, 0x56, 0x29, 0x43, 0x28, 0x57, 0x29, + 0x43, 0x28, 0x58, 0x29, 0x43, 0x28, 0x59, 0x29, + 0x43, 0x28, 0x5A, 0x29, 0x43, 0x28, 0x61, 0x29, + 0x43, 0x28, 0x62, 0x29, 0x43, 0x28, 0x63, 0x29, + // Bytes 1b00 - 1b3f + 0x43, 0x28, 0x64, 0x29, 0x43, 0x28, 0x65, 0x29, + 0x43, 0x28, 0x66, 0x29, 0x43, 0x28, 0x67, 0x29, + 0x43, 0x28, 0x68, 0x29, 0x43, 0x28, 0x69, 0x29, + 0x43, 0x28, 0x6A, 0x29, 0x43, 0x28, 0x6B, 0x29, + 0x43, 0x28, 0x6C, 0x29, 0x43, 0x28, 0x6D, 0x29, + 0x43, 0x28, 0x6E, 0x29, 0x43, 0x28, 0x6F, 0x29, + 0x43, 0x28, 0x70, 0x29, 0x43, 0x28, 0x71, 0x29, + 0x43, 0x28, 0x72, 0x29, 0x43, 0x28, 0x73, 0x29, + // Bytes 1b40 - 1b7f + 0x43, 0x28, 0x74, 0x29, 0x43, 0x28, 0x75, 0x29, + 0x43, 0x28, 0x76, 0x29, 0x43, 0x28, 0x77, 0x29, + 0x43, 0x28, 0x78, 0x29, 0x43, 0x28, 0x79, 0x29, + 0x43, 0x28, 0x7A, 0x29, 0x43, 0x2E, 0x2E, 0x2E, + 0x43, 0x31, 0x30, 0x2E, 0x43, 0x31, 0x31, 0x2E, + 0x43, 0x31, 0x32, 0x2E, 0x43, 0x31, 0x33, 0x2E, + 0x43, 0x31, 0x34, 0x2E, 0x43, 0x31, 0x35, 0x2E, + 0x43, 0x31, 0x36, 0x2E, 0x43, 0x31, 0x37, 0x2E, + // Bytes 1b80 - 1bbf + 0x43, 0x31, 0x38, 0x2E, 0x43, 0x31, 0x39, 0x2E, + 0x43, 0x32, 0x30, 0x2E, 0x43, 0x3A, 0x3A, 0x3D, + 0x43, 0x3D, 0x3D, 0x3D, 0x43, 0x43, 0x6F, 0x2E, + 0x43, 0x46, 0x41, 0x58, 0x43, 0x47, 0x48, 0x7A, + 0x43, 0x47, 0x50, 0x61, 0x43, 0x49, 0x49, 0x49, + 0x43, 0x4C, 0x54, 0x44, 0x43, 0x4C, 0xC2, 0xB7, + 0x43, 0x4D, 0x48, 0x7A, 0x43, 0x4D, 0x50, 0x61, + 0x43, 0x4D, 0xCE, 0xA9, 0x43, 0x50, 0x50, 0x4D, + // Bytes 1bc0 - 1bff + 0x43, 0x50, 0x50, 0x56, 0x43, 0x50, 0x54, 0x45, + 0x43, 0x54, 0x45, 0x4C, 0x43, 0x54, 0x48, 0x7A, + 0x43, 0x56, 0x49, 0x49, 0x43, 0x58, 0x49, 0x49, + 0x43, 0x61, 0x2F, 0x63, 0x43, 0x61, 0x2F, 0x73, + 0x43, 0x61, 0xCA, 0xBE, 0x43, 0x62, 0x61, 0x72, + 0x43, 0x63, 0x2F, 0x6F, 0x43, 0x63, 0x2F, 0x75, + 0x43, 0x63, 0x61, 0x6C, 0x43, 0x63, 0x6D, 0x32, + 0x43, 0x63, 0x6D, 0x33, 0x43, 0x64, 0x6D, 0x32, + // Bytes 1c00 - 1c3f + 0x43, 0x64, 0x6D, 0x33, 0x43, 0x65, 0x72, 0x67, + 0x43, 0x66, 0x66, 0x69, 0x43, 0x66, 0x66, 0x6C, + 0x43, 0x67, 0x61, 0x6C, 0x43, 0x68, 0x50, 0x61, + 0x43, 0x69, 0x69, 0x69, 0x43, 0x6B, 0x48, 0x7A, + 0x43, 0x6B, 0x50, 0x61, 0x43, 0x6B, 0x6D, 0x32, + 0x43, 0x6B, 0x6D, 0x33, 0x43, 0x6B, 0xCE, 0xA9, + 0x43, 0x6C, 0x6F, 0x67, 0x43, 0x6C, 0xC2, 0xB7, + 0x43, 0x6D, 0x69, 0x6C, 0x43, 0x6D, 0x6D, 0x32, + // Bytes 1c40 - 1c7f + 0x43, 0x6D, 0x6D, 0x33, 0x43, 0x6D, 0x6F, 0x6C, + 0x43, 0x72, 0x61, 0x64, 0x43, 0x76, 0x69, 0x69, + 0x43, 0x78, 0x69, 0x69, 0x43, 0xC2, 0xB0, 0x43, + 0x43, 0xC2, 0xB0, 0x46, 0x43, 0xCA, 0xBC, 0x6E, + 0x43, 0xCE, 0xBC, 0x41, 0x43, 0xCE, 0xBC, 0x46, + 0x43, 0xCE, 0xBC, 0x56, 0x43, 0xCE, 0xBC, 0x57, + 0x43, 0xCE, 0xBC, 0x67, 0x43, 0xCE, 0xBC, 0x6C, + 0x43, 0xCE, 0xBC, 0x6D, 0x43, 0xCE, 0xBC, 0x73, + // Bytes 1c80 - 1cbf + 0x44, 0x28, 0x31, 0x30, 0x29, 0x44, 0x28, 0x31, + 0x31, 0x29, 0x44, 0x28, 0x31, 0x32, 0x29, 0x44, + 0x28, 0x31, 0x33, 0x29, 0x44, 0x28, 0x31, 0x34, + 0x29, 0x44, 0x28, 0x31, 0x35, 0x29, 0x44, 0x28, + 0x31, 0x36, 0x29, 0x44, 0x28, 0x31, 0x37, 0x29, + 0x44, 0x28, 0x31, 0x38, 0x29, 0x44, 0x28, 0x31, + 0x39, 0x29, 0x44, 0x28, 0x32, 0x30, 0x29, 0x44, + 0x30, 0xE7, 0x82, 0xB9, 0x44, 0x31, 0xE2, 0x81, + // Bytes 1cc0 - 1cff + 0x84, 0x44, 0x31, 0xE6, 0x97, 0xA5, 0x44, 0x31, + 0xE6, 0x9C, 0x88, 0x44, 0x31, 0xE7, 0x82, 0xB9, + 0x44, 0x32, 0xE6, 0x97, 0xA5, 0x44, 0x32, 0xE6, + 0x9C, 0x88, 0x44, 0x32, 0xE7, 0x82, 0xB9, 0x44, + 0x33, 0xE6, 0x97, 0xA5, 0x44, 0x33, 0xE6, 0x9C, + 0x88, 0x44, 0x33, 0xE7, 0x82, 0xB9, 0x44, 0x34, + 0xE6, 0x97, 0xA5, 0x44, 0x34, 0xE6, 0x9C, 0x88, + 0x44, 0x34, 0xE7, 0x82, 0xB9, 0x44, 0x35, 0xE6, + // Bytes 1d00 - 1d3f + 0x97, 0xA5, 0x44, 0x35, 0xE6, 0x9C, 0x88, 0x44, + 0x35, 0xE7, 0x82, 0xB9, 0x44, 0x36, 0xE6, 0x97, + 0xA5, 0x44, 0x36, 0xE6, 0x9C, 0x88, 0x44, 0x36, + 0xE7, 0x82, 0xB9, 0x44, 0x37, 0xE6, 0x97, 0xA5, + 0x44, 0x37, 0xE6, 0x9C, 0x88, 0x44, 0x37, 0xE7, + 0x82, 0xB9, 0x44, 0x38, 0xE6, 0x97, 0xA5, 0x44, + 0x38, 0xE6, 0x9C, 0x88, 0x44, 0x38, 0xE7, 0x82, + 0xB9, 0x44, 0x39, 0xE6, 0x97, 0xA5, 0x44, 0x39, + // Bytes 1d40 - 1d7f + 0xE6, 0x9C, 0x88, 0x44, 0x39, 0xE7, 0x82, 0xB9, + 0x44, 0x56, 0x49, 0x49, 0x49, 0x44, 0x61, 0x2E, + 0x6D, 0x2E, 0x44, 0x6B, 0x63, 0x61, 0x6C, 0x44, + 0x70, 0x2E, 0x6D, 0x2E, 0x44, 0x76, 0x69, 0x69, + 0x69, 0x44, 0xD5, 0xA5, 0xD6, 0x82, 0x44, 0xD5, + 0xB4, 0xD5, 0xA5, 0x44, 0xD5, 0xB4, 0xD5, 0xAB, + 0x44, 0xD5, 0xB4, 0xD5, 0xAD, 0x44, 0xD5, 0xB4, + 0xD5, 0xB6, 0x44, 0xD5, 0xBE, 0xD5, 0xB6, 0x44, + // Bytes 1d80 - 1dbf + 0xD7, 0x90, 0xD7, 0x9C, 0x44, 0xD8, 0xA7, 0xD9, + 0xB4, 0x44, 0xD8, 0xA8, 0xD8, 0xAC, 0x44, 0xD8, + 0xA8, 0xD8, 0xAD, 0x44, 0xD8, 0xA8, 0xD8, 0xAE, + 0x44, 0xD8, 0xA8, 0xD8, 0xB1, 0x44, 0xD8, 0xA8, + 0xD8, 0xB2, 0x44, 0xD8, 0xA8, 0xD9, 0x85, 0x44, + 0xD8, 0xA8, 0xD9, 0x86, 0x44, 0xD8, 0xA8, 0xD9, + 0x87, 0x44, 0xD8, 0xA8, 0xD9, 0x89, 0x44, 0xD8, + 0xA8, 0xD9, 0x8A, 0x44, 0xD8, 0xAA, 0xD8, 0xAC, + // Bytes 1dc0 - 1dff + 0x44, 0xD8, 0xAA, 0xD8, 0xAD, 0x44, 0xD8, 0xAA, + 0xD8, 0xAE, 0x44, 0xD8, 0xAA, 0xD8, 0xB1, 0x44, + 0xD8, 0xAA, 0xD8, 0xB2, 0x44, 0xD8, 0xAA, 0xD9, + 0x85, 0x44, 0xD8, 0xAA, 0xD9, 0x86, 0x44, 0xD8, + 0xAA, 0xD9, 0x87, 0x44, 0xD8, 0xAA, 0xD9, 0x89, + 0x44, 0xD8, 0xAA, 0xD9, 0x8A, 0x44, 0xD8, 0xAB, + 0xD8, 0xAC, 0x44, 0xD8, 0xAB, 0xD8, 0xB1, 0x44, + 0xD8, 0xAB, 0xD8, 0xB2, 0x44, 0xD8, 0xAB, 0xD9, + // Bytes 1e00 - 1e3f + 0x85, 0x44, 0xD8, 0xAB, 0xD9, 0x86, 0x44, 0xD8, + 0xAB, 0xD9, 0x87, 0x44, 0xD8, 0xAB, 0xD9, 0x89, + 0x44, 0xD8, 0xAB, 0xD9, 0x8A, 0x44, 0xD8, 0xAC, + 0xD8, 0xAD, 0x44, 0xD8, 0xAC, 0xD9, 0x85, 0x44, + 0xD8, 0xAC, 0xD9, 0x89, 0x44, 0xD8, 0xAC, 0xD9, + 0x8A, 0x44, 0xD8, 0xAD, 0xD8, 0xAC, 0x44, 0xD8, + 0xAD, 0xD9, 0x85, 0x44, 0xD8, 0xAD, 0xD9, 0x89, + 0x44, 0xD8, 0xAD, 0xD9, 0x8A, 0x44, 0xD8, 0xAE, + // Bytes 1e40 - 1e7f + 0xD8, 0xAC, 0x44, 0xD8, 0xAE, 0xD8, 0xAD, 0x44, + 0xD8, 0xAE, 0xD9, 0x85, 0x44, 0xD8, 0xAE, 0xD9, + 0x89, 0x44, 0xD8, 0xAE, 0xD9, 0x8A, 0x44, 0xD8, + 0xB3, 0xD8, 0xAC, 0x44, 0xD8, 0xB3, 0xD8, 0xAD, + 0x44, 0xD8, 0xB3, 0xD8, 0xAE, 0x44, 0xD8, 0xB3, + 0xD8, 0xB1, 0x44, 0xD8, 0xB3, 0xD9, 0x85, 0x44, + 0xD8, 0xB3, 0xD9, 0x87, 0x44, 0xD8, 0xB3, 0xD9, + 0x89, 0x44, 0xD8, 0xB3, 0xD9, 0x8A, 0x44, 0xD8, + // Bytes 1e80 - 1ebf + 0xB4, 0xD8, 0xAC, 0x44, 0xD8, 0xB4, 0xD8, 0xAD, + 0x44, 0xD8, 0xB4, 0xD8, 0xAE, 0x44, 0xD8, 0xB4, + 0xD8, 0xB1, 0x44, 0xD8, 0xB4, 0xD9, 0x85, 0x44, + 0xD8, 0xB4, 0xD9, 0x87, 0x44, 0xD8, 0xB4, 0xD9, + 0x89, 0x44, 0xD8, 0xB4, 0xD9, 0x8A, 0x44, 0xD8, + 0xB5, 0xD8, 0xAD, 0x44, 0xD8, 0xB5, 0xD8, 0xAE, + 0x44, 0xD8, 0xB5, 0xD8, 0xB1, 0x44, 0xD8, 0xB5, + 0xD9, 0x85, 0x44, 0xD8, 0xB5, 0xD9, 0x89, 0x44, + // Bytes 1ec0 - 1eff + 0xD8, 0xB5, 0xD9, 0x8A, 0x44, 0xD8, 0xB6, 0xD8, + 0xAC, 0x44, 0xD8, 0xB6, 0xD8, 0xAD, 0x44, 0xD8, + 0xB6, 0xD8, 0xAE, 0x44, 0xD8, 0xB6, 0xD8, 0xB1, + 0x44, 0xD8, 0xB6, 0xD9, 0x85, 0x44, 0xD8, 0xB6, + 0xD9, 0x89, 0x44, 0xD8, 0xB6, 0xD9, 0x8A, 0x44, + 0xD8, 0xB7, 0xD8, 0xAD, 0x44, 0xD8, 0xB7, 0xD9, + 0x85, 0x44, 0xD8, 0xB7, 0xD9, 0x89, 0x44, 0xD8, + 0xB7, 0xD9, 0x8A, 0x44, 0xD8, 0xB8, 0xD9, 0x85, + // Bytes 1f00 - 1f3f + 0x44, 0xD8, 0xB9, 0xD8, 0xAC, 0x44, 0xD8, 0xB9, + 0xD9, 0x85, 0x44, 0xD8, 0xB9, 0xD9, 0x89, 0x44, + 0xD8, 0xB9, 0xD9, 0x8A, 0x44, 0xD8, 0xBA, 0xD8, + 0xAC, 0x44, 0xD8, 0xBA, 0xD9, 0x85, 0x44, 0xD8, + 0xBA, 0xD9, 0x89, 0x44, 0xD8, 0xBA, 0xD9, 0x8A, + 0x44, 0xD9, 0x81, 0xD8, 0xAC, 0x44, 0xD9, 0x81, + 0xD8, 0xAD, 0x44, 0xD9, 0x81, 0xD8, 0xAE, 0x44, + 0xD9, 0x81, 0xD9, 0x85, 0x44, 0xD9, 0x81, 0xD9, + // Bytes 1f40 - 1f7f + 0x89, 0x44, 0xD9, 0x81, 0xD9, 0x8A, 0x44, 0xD9, + 0x82, 0xD8, 0xAD, 0x44, 0xD9, 0x82, 0xD9, 0x85, + 0x44, 0xD9, 0x82, 0xD9, 0x89, 0x44, 0xD9, 0x82, + 0xD9, 0x8A, 0x44, 0xD9, 0x83, 0xD8, 0xA7, 0x44, + 0xD9, 0x83, 0xD8, 0xAC, 0x44, 0xD9, 0x83, 0xD8, + 0xAD, 0x44, 0xD9, 0x83, 0xD8, 0xAE, 0x44, 0xD9, + 0x83, 0xD9, 0x84, 0x44, 0xD9, 0x83, 0xD9, 0x85, + 0x44, 0xD9, 0x83, 0xD9, 0x89, 0x44, 0xD9, 0x83, + // Bytes 1f80 - 1fbf + 0xD9, 0x8A, 0x44, 0xD9, 0x84, 0xD8, 0xA7, 0x44, + 0xD9, 0x84, 0xD8, 0xAC, 0x44, 0xD9, 0x84, 0xD8, + 0xAD, 0x44, 0xD9, 0x84, 0xD8, 0xAE, 0x44, 0xD9, + 0x84, 0xD9, 0x85, 0x44, 0xD9, 0x84, 0xD9, 0x87, + 0x44, 0xD9, 0x84, 0xD9, 0x89, 0x44, 0xD9, 0x84, + 0xD9, 0x8A, 0x44, 0xD9, 0x85, 0xD8, 0xA7, 0x44, + 0xD9, 0x85, 0xD8, 0xAC, 0x44, 0xD9, 0x85, 0xD8, + 0xAD, 0x44, 0xD9, 0x85, 0xD8, 0xAE, 0x44, 0xD9, + // Bytes 1fc0 - 1fff + 0x85, 0xD9, 0x85, 0x44, 0xD9, 0x85, 0xD9, 0x89, + 0x44, 0xD9, 0x85, 0xD9, 0x8A, 0x44, 0xD9, 0x86, + 0xD8, 0xAC, 0x44, 0xD9, 0x86, 0xD8, 0xAD, 0x44, + 0xD9, 0x86, 0xD8, 0xAE, 0x44, 0xD9, 0x86, 0xD8, + 0xB1, 0x44, 0xD9, 0x86, 0xD8, 0xB2, 0x44, 0xD9, + 0x86, 0xD9, 0x85, 0x44, 0xD9, 0x86, 0xD9, 0x86, + 0x44, 0xD9, 0x86, 0xD9, 0x87, 0x44, 0xD9, 0x86, + 0xD9, 0x89, 0x44, 0xD9, 0x86, 0xD9, 0x8A, 0x44, + // Bytes 2000 - 203f + 0xD9, 0x87, 0xD8, 0xAC, 0x44, 0xD9, 0x87, 0xD9, + 0x85, 0x44, 0xD9, 0x87, 0xD9, 0x89, 0x44, 0xD9, + 0x87, 0xD9, 0x8A, 0x44, 0xD9, 0x88, 0xD9, 0xB4, + 0x44, 0xD9, 0x8A, 0xD8, 0xAC, 0x44, 0xD9, 0x8A, + 0xD8, 0xAD, 0x44, 0xD9, 0x8A, 0xD8, 0xAE, 0x44, + 0xD9, 0x8A, 0xD8, 0xB1, 0x44, 0xD9, 0x8A, 0xD8, + 0xB2, 0x44, 0xD9, 0x8A, 0xD9, 0x85, 0x44, 0xD9, + 0x8A, 0xD9, 0x86, 0x44, 0xD9, 0x8A, 0xD9, 0x87, + // Bytes 2040 - 207f + 0x44, 0xD9, 0x8A, 0xD9, 0x89, 0x44, 0xD9, 0x8A, + 0xD9, 0x8A, 0x44, 0xD9, 0x8A, 0xD9, 0xB4, 0x44, + 0xDB, 0x87, 0xD9, 0xB4, 0x45, 0x28, 0xE1, 0x84, + 0x80, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x82, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x83, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x85, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x86, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x87, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x89, 0x29, 0x45, 0x28, + // Bytes 2080 - 20bf + 0xE1, 0x84, 0x8B, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x8C, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x8E, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x8F, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x90, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x91, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x92, 0x29, + 0x45, 0x28, 0xE4, 0xB8, 0x80, 0x29, 0x45, 0x28, + 0xE4, 0xB8, 0x83, 0x29, 0x45, 0x28, 0xE4, 0xB8, + 0x89, 0x29, 0x45, 0x28, 0xE4, 0xB9, 0x9D, 0x29, + // Bytes 20c0 - 20ff + 0x45, 0x28, 0xE4, 0xBA, 0x8C, 0x29, 0x45, 0x28, + 0xE4, 0xBA, 0x94, 0x29, 0x45, 0x28, 0xE4, 0xBB, + 0xA3, 0x29, 0x45, 0x28, 0xE4, 0xBC, 0x81, 0x29, + 0x45, 0x28, 0xE4, 0xBC, 0x91, 0x29, 0x45, 0x28, + 0xE5, 0x85, 0xAB, 0x29, 0x45, 0x28, 0xE5, 0x85, + 0xAD, 0x29, 0x45, 0x28, 0xE5, 0x8A, 0xB4, 0x29, + 0x45, 0x28, 0xE5, 0x8D, 0x81, 0x29, 0x45, 0x28, + 0xE5, 0x8D, 0x94, 0x29, 0x45, 0x28, 0xE5, 0x90, + // Bytes 2100 - 213f + 0x8D, 0x29, 0x45, 0x28, 0xE5, 0x91, 0xBC, 0x29, + 0x45, 0x28, 0xE5, 0x9B, 0x9B, 0x29, 0x45, 0x28, + 0xE5, 0x9C, 0x9F, 0x29, 0x45, 0x28, 0xE5, 0xAD, + 0xA6, 0x29, 0x45, 0x28, 0xE6, 0x97, 0xA5, 0x29, + 0x45, 0x28, 0xE6, 0x9C, 0x88, 0x29, 0x45, 0x28, + 0xE6, 0x9C, 0x89, 0x29, 0x45, 0x28, 0xE6, 0x9C, + 0xA8, 0x29, 0x45, 0x28, 0xE6, 0xA0, 0xAA, 0x29, + 0x45, 0x28, 0xE6, 0xB0, 0xB4, 0x29, 0x45, 0x28, + // Bytes 2140 - 217f + 0xE7, 0x81, 0xAB, 0x29, 0x45, 0x28, 0xE7, 0x89, + 0xB9, 0x29, 0x45, 0x28, 0xE7, 0x9B, 0xA3, 0x29, + 0x45, 0x28, 0xE7, 0xA4, 0xBE, 0x29, 0x45, 0x28, + 0xE7, 0xA5, 0x9D, 0x29, 0x45, 0x28, 0xE7, 0xA5, + 0xAD, 0x29, 0x45, 0x28, 0xE8, 0x87, 0xAA, 0x29, + 0x45, 0x28, 0xE8, 0x87, 0xB3, 0x29, 0x45, 0x28, + 0xE8, 0xB2, 0xA1, 0x29, 0x45, 0x28, 0xE8, 0xB3, + 0x87, 0x29, 0x45, 0x28, 0xE9, 0x87, 0x91, 0x29, + // Bytes 2180 - 21bf + 0x45, 0x30, 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31, + 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x30, 0xE6, + 0x9C, 0x88, 0x45, 0x31, 0x30, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x31, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x31, 0xE6, 0x9C, 0x88, 0x45, 0x31, 0x31, 0xE7, + 0x82, 0xB9, 0x45, 0x31, 0x32, 0xE6, 0x97, 0xA5, + 0x45, 0x31, 0x32, 0xE6, 0x9C, 0x88, 0x45, 0x31, + 0x32, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x33, 0xE6, + // Bytes 21c0 - 21ff + 0x97, 0xA5, 0x45, 0x31, 0x33, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x34, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x35, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x35, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x36, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x36, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x37, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x37, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x31, + // Bytes 2200 - 223f + 0x38, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x39, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x39, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x32, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x34, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x35, + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x36, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x37, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x38, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x39, + // Bytes 2240 - 227f + 0x45, 0x32, 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x30, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x31, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0x31, 0xE7, 0x82, 0xB9, + 0x45, 0x32, 0x32, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x32, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x33, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0x33, 0xE7, 0x82, 0xB9, + 0x45, 0x32, 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x34, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x35, 0xE6, + // Bytes 2280 - 22bf + 0x97, 0xA5, 0x45, 0x32, 0x36, 0xE6, 0x97, 0xA5, + 0x45, 0x32, 0x37, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x39, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0xE2, 0x81, 0x84, 0x33, + 0x45, 0x32, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, + 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x33, 0x31, 0xE6, + 0x97, 0xA5, 0x45, 0x33, 0xE2, 0x81, 0x84, 0x34, + 0x45, 0x33, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, + // Bytes 22c0 - 22ff + 0xE2, 0x81, 0x84, 0x38, 0x45, 0x34, 0xE2, 0x81, + 0x84, 0x35, 0x45, 0x35, 0xE2, 0x81, 0x84, 0x36, + 0x45, 0x35, 0xE2, 0x81, 0x84, 0x38, 0x45, 0x37, + 0xE2, 0x81, 0x84, 0x38, 0x45, 0x41, 0xE2, 0x88, + 0x95, 0x6D, 0x45, 0x56, 0xE2, 0x88, 0x95, 0x6D, + 0x45, 0x6D, 0xE2, 0x88, 0x95, 0x73, 0x46, 0x31, + 0xE2, 0x81, 0x84, 0x31, 0x30, 0x46, 0x43, 0xE2, + 0x88, 0x95, 0x6B, 0x67, 0x46, 0x6D, 0xE2, 0x88, + // Bytes 2300 - 233f + 0x95, 0x73, 0x32, 0x46, 0xD8, 0xA8, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD8, 0xA8, 0xD8, 0xAE, 0xD9, + 0x8A, 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x85, + 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x89, 0x46, + 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, + 0xAA, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, 0xD8, 0xAA, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, + 0xAE, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, + // Bytes 2340 - 237f + 0xD9, 0x89, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, 0xD9, + 0x8A, 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAC, + 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAD, 0x46, + 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAE, 0x46, 0xD8, + 0xAA, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAA, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD8, + 0xAD, 0xD9, 0x89, 0x46, 0xD8, 0xAC, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD8, + // Bytes 2380 - 23bf + 0xAD, 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x89, + 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD8, 0xAD, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, + 0xAD, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAD, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xB3, 0xD8, + 0xAC, 0xD8, 0xAD, 0x46, 0xD8, 0xB3, 0xD8, 0xAC, + 0xD9, 0x89, 0x46, 0xD8, 0xB3, 0xD8, 0xAD, 0xD8, + 0xAC, 0x46, 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x89, + // Bytes 23c0 - 23ff + 0x46, 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x8A, 0x46, + 0xD8, 0xB3, 0xD9, 0x85, 0xD8, 0xAC, 0x46, 0xD8, + 0xB3, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, 0xB3, + 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8, + 0xAC, 0xD9, 0x8A, 0x46, 0xD8, 0xB4, 0xD8, 0xAD, + 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8, 0xAD, 0xD9, + 0x8A, 0x46, 0xD8, 0xB4, 0xD9, 0x85, 0xD8, 0xAE, + 0x46, 0xD8, 0xB4, 0xD9, 0x85, 0xD9, 0x85, 0x46, + // Bytes 2400 - 243f + 0xD8, 0xB5, 0xD8, 0xAD, 0xD8, 0xAD, 0x46, 0xD8, + 0xB5, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xB5, + 0xD9, 0x84, 0xD9, 0x89, 0x46, 0xD8, 0xB5, 0xD9, + 0x84, 0xDB, 0x92, 0x46, 0xD8, 0xB5, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, + 0x89, 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, 0x8A, + 0x46, 0xD8, 0xB6, 0xD8, 0xAE, 0xD9, 0x85, 0x46, + 0xD8, 0xB7, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, + // Bytes 2440 - 247f + 0xB7, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB7, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xB9, 0xD8, + 0xAC, 0xD9, 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, + 0x89, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, 0x8A, + 0x46, 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x85, 0x46, + 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, + 0xBA, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x81, + // Bytes 2480 - 24bf + 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x81, 0xD9, + 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x82, 0xD9, 0x84, + 0xDB, 0x92, 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD8, + 0xAD, 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x85, + 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD9, 0x83, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + 0x83, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x84, + 0xD8, 0xAC, 0xD8, 0xAC, 0x46, 0xD9, 0x84, 0xD8, + // Bytes 24c0 - 24ff + 0xAC, 0xD9, 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAC, + 0xD9, 0x8A, 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, + 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x89, + 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, + 0xD9, 0x84, 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, + 0x84, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD9, 0x84, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD8, + 0xAC, 0xD8, 0xAD, 0x46, 0xD9, 0x85, 0xD8, 0xAC, + // Bytes 2500 - 253f + 0xD8, 0xAE, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9, + 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9, 0x8A, + 0x46, 0xD9, 0x85, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, + 0xD9, 0x85, 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, + 0x85, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x85, + 0xD8, 0xAE, 0xD8, 0xAC, 0x46, 0xD9, 0x85, 0xD8, + 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAE, + 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD9, 0x85, 0xD9, + // Bytes 2540 - 257f + 0x8A, 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD8, 0xAD, + 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x85, 0x46, + 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x89, 0x46, 0xD9, + 0x86, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x86, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, 0x86, 0xD8, + 0xAD, 0xD9, 0x89, 0x46, 0xD9, 0x86, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9, + 0x89, 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9, 0x8A, + // Bytes 2580 - 25bf + 0x46, 0xD9, 0x87, 0xD9, 0x85, 0xD8, 0xAC, 0x46, + 0xD9, 0x87, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + 0x8A, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, + 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, + 0x85, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, + 0xA7, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAC, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAD, 0x46, + // Bytes 25c0 - 25ff + 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAE, 0x46, 0xD9, + 0x8A, 0xD9, 0x94, 0xD8, 0xB1, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xD8, 0xB2, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xD9, 0x86, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, + 0x87, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x88, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x89, 0x46, + 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x8A, 0x46, 0xD9, + // Bytes 2600 - 263f + 0x8A, 0xD9, 0x94, 0xDB, 0x86, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xDB, 0x87, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xDB, 0x88, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xDB, 0x90, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, + 0x95, 0x46, 0xE0, 0xB9, 0x8D, 0xE0, 0xB8, 0xB2, + 0x46, 0xE0, 0xBA, 0xAB, 0xE0, 0xBA, 0x99, 0x46, + 0xE0, 0xBA, 0xAB, 0xE0, 0xBA, 0xA1, 0x46, 0xE0, + 0xBB, 0x8D, 0xE0, 0xBA, 0xB2, 0x46, 0xE0, 0xBD, + // Bytes 2640 - 267f + 0x80, 0xE0, 0xBE, 0xB5, 0x46, 0xE0, 0xBD, 0x82, + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x8C, 0xE0, + 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x91, 0xE0, 0xBE, + 0xB7, 0x46, 0xE0, 0xBD, 0x96, 0xE0, 0xBE, 0xB7, + 0x46, 0xE0, 0xBD, 0x9B, 0xE0, 0xBE, 0xB7, 0x46, + 0xE0, 0xBE, 0x90, 0xE0, 0xBE, 0xB5, 0x46, 0xE0, + 0xBE, 0x92, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, + 0x9C, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA1, + // Bytes 2680 - 26bf + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA6, 0xE0, + 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xAB, 0xE0, 0xBE, + 0xB7, 0x46, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0x46, 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0x46, + 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0x46, 0xE2, + 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0x46, 0xE3, 0x81, + 0xBB, 0xE3, 0x81, 0x8B, 0x46, 0xE3, 0x82, 0x88, + 0xE3, 0x82, 0x8A, 0x46, 0xE3, 0x82, 0xAD, 0xE3, + // Bytes 26c0 - 26ff + 0x83, 0xAD, 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x82, + 0xB3, 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0x88, + 0x46, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xB3, 0x46, + 0xE3, 0x83, 0x8A, 0xE3, 0x83, 0x8E, 0x46, 0xE3, + 0x83, 0x9B, 0xE3, 0x83, 0xB3, 0x46, 0xE3, 0x83, + 0x9F, 0xE3, 0x83, 0xAA, 0x46, 0xE3, 0x83, 0xAA, + 0xE3, 0x83, 0xA9, 0x46, 0xE3, 0x83, 0xAC, 0xE3, + 0x83, 0xA0, 0x46, 0xE5, 0xA4, 0xA7, 0xE6, 0xAD, + // Bytes 2700 - 273f + 0xA3, 0x46, 0xE5, 0xB9, 0xB3, 0xE6, 0x88, 0x90, + 0x46, 0xE6, 0x98, 0x8E, 0xE6, 0xB2, 0xBB, 0x46, + 0xE6, 0x98, 0xAD, 0xE5, 0x92, 0x8C, 0x47, 0x72, + 0x61, 0x64, 0xE2, 0x88, 0x95, 0x73, 0x47, 0xE3, + 0x80, 0x94, 0x53, 0xE3, 0x80, 0x95, 0x48, 0x28, + 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, 0x29, + 0x48, 0x28, 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1, + // Bytes 2740 - 277f + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x85, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x86, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x87, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + 0x89, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, + 0x84, 0x8B, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, + 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xAE, 0x29, + // Bytes 2780 - 27bf + 0x48, 0x28, 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1, + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x8F, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x90, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x91, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + 0x92, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x72, 0x61, + 0x64, 0xE2, 0x88, 0x95, 0x73, 0x32, 0x48, 0xD8, + 0xA7, 0xD9, 0x83, 0xD8, 0xA8, 0xD8, 0xB1, 0x48, + // Bytes 27c0 - 27ff + 0xD8, 0xA7, 0xD9, 0x84, 0xD9, 0x84, 0xD9, 0x87, + 0x48, 0xD8, 0xB1, 0xD8, 0xB3, 0xD9, 0x88, 0xD9, + 0x84, 0x48, 0xD8, 0xB1, 0xDB, 0x8C, 0xD8, 0xA7, + 0xD9, 0x84, 0x48, 0xD8, 0xB5, 0xD9, 0x84, 0xD8, + 0xB9, 0xD9, 0x85, 0x48, 0xD8, 0xB9, 0xD9, 0x84, + 0xD9, 0x8A, 0xD9, 0x87, 0x48, 0xD9, 0x85, 0xD8, + 0xAD, 0xD9, 0x85, 0xD8, 0xAF, 0x48, 0xD9, 0x88, + 0xD8, 0xB3, 0xD9, 0x84, 0xD9, 0x85, 0x49, 0xE2, + // Bytes 2800 - 283f + 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0x49, 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0xE2, + 0x80, 0xB5, 0x49, 0xE2, 0x88, 0xAB, 0xE2, 0x88, + 0xAB, 0xE2, 0x88, 0xAB, 0x49, 0xE2, 0x88, 0xAE, + 0xE2, 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0x49, 0xE3, + 0x80, 0x94, 0xE4, 0xB8, 0x89, 0xE3, 0x80, 0x95, + 0x49, 0xE3, 0x80, 0x94, 0xE4, 0xBA, 0x8C, 0xE3, + 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE5, 0x8B, + // Bytes 2840 - 287f + 0x9D, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, + 0xE5, 0xAE, 0x89, 0xE3, 0x80, 0x95, 0x49, 0xE3, + 0x80, 0x94, 0xE6, 0x89, 0x93, 0xE3, 0x80, 0x95, + 0x49, 0xE3, 0x80, 0x94, 0xE6, 0x95, 0x97, 0xE3, + 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE6, 0x9C, + 0xAC, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, + 0xE7, 0x82, 0xB9, 0xE3, 0x80, 0x95, 0x49, 0xE3, + 0x80, 0x94, 0xE7, 0x9B, 0x97, 0xE3, 0x80, 0x95, + // Bytes 2880 - 28bf + 0x49, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xAB, 0x49, 0xE3, 0x82, 0xA4, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x81, 0x49, 0xE3, 0x82, 0xA6, + 0xE3, 0x82, 0xA9, 0xE3, 0x83, 0xB3, 0x49, 0xE3, + 0x82, 0xAA, 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xB9, + 0x49, 0xE3, 0x82, 0xAA, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xA0, 0x49, 0xE3, 0x82, 0xAB, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0xAA, 0x49, 0xE3, 0x82, 0xB1, + // Bytes 28c0 - 28ff + 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xB9, 0x49, 0xE3, + 0x82, 0xB3, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x8A, + 0x49, 0xE3, 0x82, 0xBB, 0xE3, 0x83, 0xB3, 0xE3, + 0x83, 0x81, 0x49, 0xE3, 0x82, 0xBB, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x88, 0x49, 0xE3, 0x83, 0x86, + 0xE3, 0x82, 0x99, 0xE3, 0x82, 0xB7, 0x49, 0xE3, + 0x83, 0x88, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, + 0x49, 0xE3, 0x83, 0x8E, 0xE3, 0x83, 0x83, 0xE3, + // Bytes 2900 - 293f + 0x83, 0x88, 0x49, 0xE3, 0x83, 0x8F, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0x84, 0x49, 0xE3, 0x83, 0x92, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, 0x49, 0xE3, + 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xB3, + 0x49, 0xE3, 0x83, 0x95, 0xE3, 0x83, 0xA9, 0xE3, + 0x83, 0xB3, 0x49, 0xE3, 0x83, 0x98, 0xE3, 0x82, + 0x9A, 0xE3, 0x82, 0xBD, 0x49, 0xE3, 0x83, 0x98, + 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x84, 0x49, 0xE3, + // Bytes 2940 - 297f + 0x83, 0x9B, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, + 0x49, 0xE3, 0x83, 0x9B, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xB3, 0x49, 0xE3, 0x83, 0x9E, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, 0x9E, + 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x8F, 0x49, 0xE3, + 0x83, 0x9E, 0xE3, 0x83, 0xAB, 0xE3, 0x82, 0xAF, + 0x49, 0xE3, 0x83, 0xA4, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xAB, 0x49, 0xE3, 0x83, 0xA6, 0xE3, 0x82, + // Bytes 2980 - 29bf + 0xA2, 0xE3, 0x83, 0xB3, 0x49, 0xE3, 0x83, 0xAF, + 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, 0x4C, 0xE2, + 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0xE2, 0x80, 0xB2, 0x4C, 0xE2, 0x88, 0xAB, 0xE2, + 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, + 0x4C, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xAB, 0xE3, + 0x83, 0x95, 0xE3, 0x82, 0xA1, 0x4C, 0xE3, 0x82, + 0xA8, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xAB, 0xE3, + // Bytes 29c0 - 29ff + 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xB3, 0x4C, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x9E, 0x4C, 0xE3, 0x82, 0xAB, + 0xE3, 0x83, 0xA9, 0xE3, 0x83, 0x83, 0xE3, 0x83, + 0x88, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x83, 0xAD, + 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xBC, 0x4C, 0xE3, + 0x82, 0xAD, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0x8B, + // Bytes 2a00 - 2a3f + 0xE3, 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, + 0x83, 0xA5, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xBC, + 0x4C, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, + 0x83, 0xA9, 0xE3, 0x83, 0xA0, 0x4C, 0xE3, 0x82, + 0xAF, 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0x8D, 0x4C, 0xE3, 0x82, 0xB5, 0xE3, 0x82, + 0xA4, 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0x4C, + 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + // Bytes 2a40 - 2a7f + 0xBC, 0xE3, 0x82, 0xB9, 0x4C, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0x84, 0x4C, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, + 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, + 0x83, 0x95, 0xE3, 0x82, 0xA3, 0xE3, 0x83, 0xBC, + 0xE3, 0x83, 0x88, 0x4C, 0xE3, 0x83, 0x98, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xBF, + 0x4C, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, + // Bytes 2a80 - 2abf + 0x83, 0x8B, 0xE3, 0x83, 0x92, 0x4C, 0xE3, 0x83, + 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xB3, 0xE3, + 0x82, 0xB9, 0x4C, 0xE3, 0x83, 0x9B, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x88, 0x4C, + 0xE3, 0x83, 0x9E, 0xE3, 0x82, 0xA4, 0xE3, 0x82, + 0xAF, 0xE3, 0x83, 0xAD, 0x4C, 0xE3, 0x83, 0x9F, + 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAD, 0xE3, 0x83, + 0xB3, 0x4C, 0xE3, 0x83, 0xA1, 0xE3, 0x83, 0xBC, + // Bytes 2ac0 - 2aff + 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, + 0x83, 0xAA, 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, + 0xE3, 0x83, 0xAB, 0x4C, 0xE3, 0x83, 0xAB, 0xE3, + 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, + 0x4C, 0xE6, 0xA0, 0xAA, 0xE5, 0xBC, 0x8F, 0xE4, + 0xBC, 0x9A, 0xE7, 0xA4, 0xBE, 0x4E, 0x28, 0xE1, + 0x84, 0x8B, 0xE1, 0x85, 0xA9, 0xE1, 0x84, 0x92, + 0xE1, 0x85, 0xAE, 0x29, 0x4F, 0xD8, 0xAC, 0xD9, + // Bytes 2b00 - 2b3f + 0x84, 0x20, 0xD8, 0xAC, 0xD9, 0x84, 0xD8, 0xA7, + 0xD9, 0x84, 0xD9, 0x87, 0x4F, 0xE3, 0x82, 0xA2, + 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x82, 0xA2, + 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x98, 0xE3, 0x82, + 0x9A, 0xE3, 0x82, 0xA2, 0x4F, 0xE3, 0x82, 0xAD, + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xAF, 0xE3, 0x83, + 0x83, 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x82, 0xB5, + // Bytes 2b40 - 2b7f + 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x81, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0xA0, 0x4F, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xAC, 0xE3, 0x83, 0xAB, 0x4F, 0xE3, 0x83, 0x98, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0xBF, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0xAB, 0x4F, 0xE3, 0x83, 0x9B, + 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xA4, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x83, 0x9E, + // Bytes 2b80 - 2bbf + 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xB7, 0xE3, 0x83, + 0xA7, 0xE3, 0x83, 0xB3, 0x4F, 0xE3, 0x83, 0xA1, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0x88, 0xE3, 0x83, 0xB3, 0x4F, 0xE3, 0x83, 0xAB, + 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x95, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xAB, 0x51, 0x28, 0xE1, 0x84, + 0x8B, 0xE1, 0x85, 0xA9, 0xE1, 0x84, 0x8C, 0xE1, + 0x85, 0xA5, 0xE1, 0x86, 0xAB, 0x29, 0x52, 0xE3, + // Bytes 2bc0 - 2bff + 0x82, 0xAD, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, + 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xBC, 0x52, 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xA9, 0xE3, 0x83, 0xA0, 0x52, 0xE3, 0x82, 0xAD, + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xA1, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x52, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + // Bytes 2c00 - 2c3f + 0xA9, 0xE3, 0x83, 0xA0, 0xE3, 0x83, 0x88, 0xE3, + 0x83, 0xB3, 0x52, 0xE3, 0x82, 0xAF, 0xE3, 0x83, + 0xAB, 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, 0xE3, + 0x82, 0xA4, 0xE3, 0x83, 0xAD, 0x52, 0xE3, 0x83, + 0x8F, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, + 0x82, 0xBB, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, + 0x52, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, + 0x82, 0xA2, 0xE3, 0x82, 0xB9, 0xE3, 0x83, 0x88, + // Bytes 2c40 - 2c7f + 0xE3, 0x83, 0xAB, 0x52, 0xE3, 0x83, 0x95, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0x83, 0xE3, 0x82, 0xB7, + 0xE3, 0x82, 0xA7, 0xE3, 0x83, 0xAB, 0x52, 0xE3, + 0x83, 0x9F, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xAB, 0x52, 0xE3, 0x83, 0xAC, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x88, 0xE3, 0x82, 0xB1, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xB3, 0x61, 0xD8, 0xB5, 0xD9, + // Bytes 2c80 - 2cbf + 0x84, 0xD9, 0x89, 0x20, 0xD8, 0xA7, 0xD9, 0x84, + 0xD9, 0x84, 0xD9, 0x87, 0x20, 0xD8, 0xB9, 0xD9, + 0x84, 0xD9, 0x8A, 0xD9, 0x87, 0x20, 0xD9, 0x88, + 0xD8, 0xB3, 0xD9, 0x84, 0xD9, 0x85, 0x06, 0xE0, + 0xA7, 0x87, 0xE0, 0xA6, 0xBE, 0x01, 0x06, 0xE0, + 0xA7, 0x87, 0xE0, 0xA7, 0x97, 0x01, 0x06, 0xE0, + 0xAD, 0x87, 0xE0, 0xAC, 0xBE, 0x01, 0x06, 0xE0, + 0xAD, 0x87, 0xE0, 0xAD, 0x96, 0x01, 0x06, 0xE0, + // Bytes 2cc0 - 2cff + 0xAD, 0x87, 0xE0, 0xAD, 0x97, 0x01, 0x06, 0xE0, + 0xAE, 0x92, 0xE0, 0xAF, 0x97, 0x01, 0x06, 0xE0, + 0xAF, 0x86, 0xE0, 0xAE, 0xBE, 0x01, 0x06, 0xE0, + 0xAF, 0x86, 0xE0, 0xAF, 0x97, 0x01, 0x06, 0xE0, + 0xAF, 0x87, 0xE0, 0xAE, 0xBE, 0x01, 0x06, 0xE0, + 0xB2, 0xBF, 0xE0, 0xB3, 0x95, 0x01, 0x06, 0xE0, + 0xB3, 0x86, 0xE0, 0xB3, 0x95, 0x01, 0x06, 0xE0, + 0xB3, 0x86, 0xE0, 0xB3, 0x96, 0x01, 0x06, 0xE0, + // Bytes 2d00 - 2d3f + 0xB5, 0x86, 0xE0, 0xB4, 0xBE, 0x01, 0x06, 0xE0, + 0xB5, 0x86, 0xE0, 0xB5, 0x97, 0x01, 0x06, 0xE0, + 0xB5, 0x87, 0xE0, 0xB4, 0xBE, 0x01, 0x06, 0xE0, + 0xB7, 0x99, 0xE0, 0xB7, 0x9F, 0x01, 0x06, 0xE1, + 0x80, 0xA5, 0xE1, 0x80, 0xAE, 0x01, 0x06, 0xE1, + 0xAC, 0x85, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x87, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x89, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + // Bytes 2d40 - 2d7f + 0xAC, 0x8B, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x8D, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x91, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBA, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBC, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBE, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBF, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAD, 0x82, 0xE1, 0xAC, 0xB5, 0x01, 0x08, 0xF0, + // Bytes 2d80 - 2dbf + 0x91, 0x84, 0xB1, 0xF0, 0x91, 0x84, 0xA7, 0x01, + 0x08, 0xF0, 0x91, 0x84, 0xB2, 0xF0, 0x91, 0x84, + 0xA7, 0x01, 0x08, 0xF0, 0x91, 0x8D, 0x87, 0xF0, + 0x91, 0x8C, 0xBE, 0x01, 0x08, 0xF0, 0x91, 0x8D, + 0x87, 0xF0, 0x91, 0x8D, 0x97, 0x01, 0x08, 0xF0, + 0x91, 0x92, 0xB9, 0xF0, 0x91, 0x92, 0xB0, 0x01, + 0x08, 0xF0, 0x91, 0x92, 0xB9, 0xF0, 0x91, 0x92, + 0xBA, 0x01, 0x08, 0xF0, 0x91, 0x92, 0xB9, 0xF0, + // Bytes 2dc0 - 2dff + 0x91, 0x92, 0xBD, 0x01, 0x08, 0xF0, 0x91, 0x96, + 0xB8, 0xF0, 0x91, 0x96, 0xAF, 0x01, 0x08, 0xF0, + 0x91, 0x96, 0xB9, 0xF0, 0x91, 0x96, 0xAF, 0x01, + 0x09, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, 0xE0, + 0xB3, 0x95, 0x02, 0x09, 0xE0, 0xB7, 0x99, 0xE0, + 0xB7, 0x8F, 0xE0, 0xB7, 0x8A, 0x12, 0x44, 0x44, + 0x5A, 0xCC, 0x8C, 0xC9, 0x44, 0x44, 0x7A, 0xCC, + 0x8C, 0xC9, 0x44, 0x64, 0x7A, 0xCC, 0x8C, 0xC9, + // Bytes 2e00 - 2e3f + 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x93, 0xC9, + 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x94, 0xC9, + 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x95, 0xB5, + 0x46, 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x85, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x86, 0xE1, 0x85, 0xA1, 0x01, + // Bytes 2e40 - 2e7f + 0x46, 0xE1, 0x84, 0x87, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x89, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xAE, 0x01, + 0x46, 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8F, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x90, 0xE1, 0x85, 0xA1, 0x01, + // Bytes 2e80 - 2ebf + 0x46, 0xE1, 0x84, 0x91, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x92, 0xE1, 0x85, 0xA1, 0x01, + 0x49, 0xE3, 0x83, 0xA1, 0xE3, 0x82, 0xAB, 0xE3, + 0x82, 0x99, 0x0D, 0x4C, 0xE1, 0x84, 0x8C, 0xE1, + 0x85, 0xAE, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xB4, + 0x01, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x0D, 0x4C, + 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + // Bytes 2ec0 - 2eff + 0x9B, 0xE3, 0x82, 0x9A, 0x0D, 0x4C, 0xE3, 0x83, + 0xA4, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0xE3, + 0x82, 0x99, 0x0D, 0x4F, 0xE1, 0x84, 0x8E, 0xE1, + 0x85, 0xA1, 0xE1, 0x86, 0xB7, 0xE1, 0x84, 0x80, + 0xE1, 0x85, 0xA9, 0x01, 0x4F, 0xE3, 0x82, 0xA4, + 0xE3, 0x83, 0x8B, 0xE3, 0x83, 0xB3, 0xE3, 0x82, + 0xAF, 0xE3, 0x82, 0x99, 0x0D, 0x4F, 0xE3, 0x82, + 0xB7, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xB3, 0xE3, + // Bytes 2f00 - 2f3f + 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x0D, 0x4F, 0xE3, + 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, + 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x0D, 0x4F, + 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, + 0x52, 0xE3, 0x82, 0xA8, 0xE3, 0x82, 0xB9, 0xE3, + 0x82, 0xAF, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, + 0xE3, 0x82, 0x99, 0x0D, 0x52, 0xE3, 0x83, 0x95, + // Bytes 2f40 - 2f7f + 0xE3, 0x82, 0xA1, 0xE3, 0x83, 0xA9, 0xE3, 0x83, + 0x83, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, + 0x86, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, 0x01, + 0x86, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8F, 0x01, + 0x03, 0x3C, 0xCC, 0xB8, 0x05, 0x03, 0x3D, 0xCC, + 0xB8, 0x05, 0x03, 0x3E, 0xCC, 0xB8, 0x05, 0x03, + 0x41, 0xCC, 0x80, 0xC9, 0x03, 0x41, 0xCC, 0x81, + 0xC9, 0x03, 0x41, 0xCC, 0x83, 0xC9, 0x03, 0x41, + // Bytes 2f80 - 2fbf + 0xCC, 0x84, 0xC9, 0x03, 0x41, 0xCC, 0x89, 0xC9, + 0x03, 0x41, 0xCC, 0x8C, 0xC9, 0x03, 0x41, 0xCC, + 0x8F, 0xC9, 0x03, 0x41, 0xCC, 0x91, 0xC9, 0x03, + 0x41, 0xCC, 0xA5, 0xB5, 0x03, 0x41, 0xCC, 0xA8, + 0xA5, 0x03, 0x42, 0xCC, 0x87, 0xC9, 0x03, 0x42, + 0xCC, 0xA3, 0xB5, 0x03, 0x42, 0xCC, 0xB1, 0xB5, + 0x03, 0x43, 0xCC, 0x81, 0xC9, 0x03, 0x43, 0xCC, + 0x82, 0xC9, 0x03, 0x43, 0xCC, 0x87, 0xC9, 0x03, + // Bytes 2fc0 - 2fff + 0x43, 0xCC, 0x8C, 0xC9, 0x03, 0x44, 0xCC, 0x87, + 0xC9, 0x03, 0x44, 0xCC, 0x8C, 0xC9, 0x03, 0x44, + 0xCC, 0xA3, 0xB5, 0x03, 0x44, 0xCC, 0xA7, 0xA5, + 0x03, 0x44, 0xCC, 0xAD, 0xB5, 0x03, 0x44, 0xCC, + 0xB1, 0xB5, 0x03, 0x45, 0xCC, 0x80, 0xC9, 0x03, + 0x45, 0xCC, 0x81, 0xC9, 0x03, 0x45, 0xCC, 0x83, + 0xC9, 0x03, 0x45, 0xCC, 0x86, 0xC9, 0x03, 0x45, + 0xCC, 0x87, 0xC9, 0x03, 0x45, 0xCC, 0x88, 0xC9, + // Bytes 3000 - 303f + 0x03, 0x45, 0xCC, 0x89, 0xC9, 0x03, 0x45, 0xCC, + 0x8C, 0xC9, 0x03, 0x45, 0xCC, 0x8F, 0xC9, 0x03, + 0x45, 0xCC, 0x91, 0xC9, 0x03, 0x45, 0xCC, 0xA8, + 0xA5, 0x03, 0x45, 0xCC, 0xAD, 0xB5, 0x03, 0x45, + 0xCC, 0xB0, 0xB5, 0x03, 0x46, 0xCC, 0x87, 0xC9, + 0x03, 0x47, 0xCC, 0x81, 0xC9, 0x03, 0x47, 0xCC, + 0x82, 0xC9, 0x03, 0x47, 0xCC, 0x84, 0xC9, 0x03, + 0x47, 0xCC, 0x86, 0xC9, 0x03, 0x47, 0xCC, 0x87, + // Bytes 3040 - 307f + 0xC9, 0x03, 0x47, 0xCC, 0x8C, 0xC9, 0x03, 0x47, + 0xCC, 0xA7, 0xA5, 0x03, 0x48, 0xCC, 0x82, 0xC9, + 0x03, 0x48, 0xCC, 0x87, 0xC9, 0x03, 0x48, 0xCC, + 0x88, 0xC9, 0x03, 0x48, 0xCC, 0x8C, 0xC9, 0x03, + 0x48, 0xCC, 0xA3, 0xB5, 0x03, 0x48, 0xCC, 0xA7, + 0xA5, 0x03, 0x48, 0xCC, 0xAE, 0xB5, 0x03, 0x49, + 0xCC, 0x80, 0xC9, 0x03, 0x49, 0xCC, 0x81, 0xC9, + 0x03, 0x49, 0xCC, 0x82, 0xC9, 0x03, 0x49, 0xCC, + // Bytes 3080 - 30bf + 0x83, 0xC9, 0x03, 0x49, 0xCC, 0x84, 0xC9, 0x03, + 0x49, 0xCC, 0x86, 0xC9, 0x03, 0x49, 0xCC, 0x87, + 0xC9, 0x03, 0x49, 0xCC, 0x89, 0xC9, 0x03, 0x49, + 0xCC, 0x8C, 0xC9, 0x03, 0x49, 0xCC, 0x8F, 0xC9, + 0x03, 0x49, 0xCC, 0x91, 0xC9, 0x03, 0x49, 0xCC, + 0xA3, 0xB5, 0x03, 0x49, 0xCC, 0xA8, 0xA5, 0x03, + 0x49, 0xCC, 0xB0, 0xB5, 0x03, 0x4A, 0xCC, 0x82, + 0xC9, 0x03, 0x4B, 0xCC, 0x81, 0xC9, 0x03, 0x4B, + // Bytes 30c0 - 30ff + 0xCC, 0x8C, 0xC9, 0x03, 0x4B, 0xCC, 0xA3, 0xB5, + 0x03, 0x4B, 0xCC, 0xA7, 0xA5, 0x03, 0x4B, 0xCC, + 0xB1, 0xB5, 0x03, 0x4C, 0xCC, 0x81, 0xC9, 0x03, + 0x4C, 0xCC, 0x8C, 0xC9, 0x03, 0x4C, 0xCC, 0xA7, + 0xA5, 0x03, 0x4C, 0xCC, 0xAD, 0xB5, 0x03, 0x4C, + 0xCC, 0xB1, 0xB5, 0x03, 0x4D, 0xCC, 0x81, 0xC9, + 0x03, 0x4D, 0xCC, 0x87, 0xC9, 0x03, 0x4D, 0xCC, + 0xA3, 0xB5, 0x03, 0x4E, 0xCC, 0x80, 0xC9, 0x03, + // Bytes 3100 - 313f + 0x4E, 0xCC, 0x81, 0xC9, 0x03, 0x4E, 0xCC, 0x83, + 0xC9, 0x03, 0x4E, 0xCC, 0x87, 0xC9, 0x03, 0x4E, + 0xCC, 0x8C, 0xC9, 0x03, 0x4E, 0xCC, 0xA3, 0xB5, + 0x03, 0x4E, 0xCC, 0xA7, 0xA5, 0x03, 0x4E, 0xCC, + 0xAD, 0xB5, 0x03, 0x4E, 0xCC, 0xB1, 0xB5, 0x03, + 0x4F, 0xCC, 0x80, 0xC9, 0x03, 0x4F, 0xCC, 0x81, + 0xC9, 0x03, 0x4F, 0xCC, 0x86, 0xC9, 0x03, 0x4F, + 0xCC, 0x89, 0xC9, 0x03, 0x4F, 0xCC, 0x8B, 0xC9, + // Bytes 3140 - 317f + 0x03, 0x4F, 0xCC, 0x8C, 0xC9, 0x03, 0x4F, 0xCC, + 0x8F, 0xC9, 0x03, 0x4F, 0xCC, 0x91, 0xC9, 0x03, + 0x50, 0xCC, 0x81, 0xC9, 0x03, 0x50, 0xCC, 0x87, + 0xC9, 0x03, 0x52, 0xCC, 0x81, 0xC9, 0x03, 0x52, + 0xCC, 0x87, 0xC9, 0x03, 0x52, 0xCC, 0x8C, 0xC9, + 0x03, 0x52, 0xCC, 0x8F, 0xC9, 0x03, 0x52, 0xCC, + 0x91, 0xC9, 0x03, 0x52, 0xCC, 0xA7, 0xA5, 0x03, + 0x52, 0xCC, 0xB1, 0xB5, 0x03, 0x53, 0xCC, 0x82, + // Bytes 3180 - 31bf + 0xC9, 0x03, 0x53, 0xCC, 0x87, 0xC9, 0x03, 0x53, + 0xCC, 0xA6, 0xB5, 0x03, 0x53, 0xCC, 0xA7, 0xA5, + 0x03, 0x54, 0xCC, 0x87, 0xC9, 0x03, 0x54, 0xCC, + 0x8C, 0xC9, 0x03, 0x54, 0xCC, 0xA3, 0xB5, 0x03, + 0x54, 0xCC, 0xA6, 0xB5, 0x03, 0x54, 0xCC, 0xA7, + 0xA5, 0x03, 0x54, 0xCC, 0xAD, 0xB5, 0x03, 0x54, + 0xCC, 0xB1, 0xB5, 0x03, 0x55, 0xCC, 0x80, 0xC9, + 0x03, 0x55, 0xCC, 0x81, 0xC9, 0x03, 0x55, 0xCC, + // Bytes 31c0 - 31ff + 0x82, 0xC9, 0x03, 0x55, 0xCC, 0x86, 0xC9, 0x03, + 0x55, 0xCC, 0x89, 0xC9, 0x03, 0x55, 0xCC, 0x8A, + 0xC9, 0x03, 0x55, 0xCC, 0x8B, 0xC9, 0x03, 0x55, + 0xCC, 0x8C, 0xC9, 0x03, 0x55, 0xCC, 0x8F, 0xC9, + 0x03, 0x55, 0xCC, 0x91, 0xC9, 0x03, 0x55, 0xCC, + 0xA3, 0xB5, 0x03, 0x55, 0xCC, 0xA4, 0xB5, 0x03, + 0x55, 0xCC, 0xA8, 0xA5, 0x03, 0x55, 0xCC, 0xAD, + 0xB5, 0x03, 0x55, 0xCC, 0xB0, 0xB5, 0x03, 0x56, + // Bytes 3200 - 323f + 0xCC, 0x83, 0xC9, 0x03, 0x56, 0xCC, 0xA3, 0xB5, + 0x03, 0x57, 0xCC, 0x80, 0xC9, 0x03, 0x57, 0xCC, + 0x81, 0xC9, 0x03, 0x57, 0xCC, 0x82, 0xC9, 0x03, + 0x57, 0xCC, 0x87, 0xC9, 0x03, 0x57, 0xCC, 0x88, + 0xC9, 0x03, 0x57, 0xCC, 0xA3, 0xB5, 0x03, 0x58, + 0xCC, 0x87, 0xC9, 0x03, 0x58, 0xCC, 0x88, 0xC9, + 0x03, 0x59, 0xCC, 0x80, 0xC9, 0x03, 0x59, 0xCC, + 0x81, 0xC9, 0x03, 0x59, 0xCC, 0x82, 0xC9, 0x03, + // Bytes 3240 - 327f + 0x59, 0xCC, 0x83, 0xC9, 0x03, 0x59, 0xCC, 0x84, + 0xC9, 0x03, 0x59, 0xCC, 0x87, 0xC9, 0x03, 0x59, + 0xCC, 0x88, 0xC9, 0x03, 0x59, 0xCC, 0x89, 0xC9, + 0x03, 0x59, 0xCC, 0xA3, 0xB5, 0x03, 0x5A, 0xCC, + 0x81, 0xC9, 0x03, 0x5A, 0xCC, 0x82, 0xC9, 0x03, + 0x5A, 0xCC, 0x87, 0xC9, 0x03, 0x5A, 0xCC, 0x8C, + 0xC9, 0x03, 0x5A, 0xCC, 0xA3, 0xB5, 0x03, 0x5A, + 0xCC, 0xB1, 0xB5, 0x03, 0x61, 0xCC, 0x80, 0xC9, + // Bytes 3280 - 32bf + 0x03, 0x61, 0xCC, 0x81, 0xC9, 0x03, 0x61, 0xCC, + 0x83, 0xC9, 0x03, 0x61, 0xCC, 0x84, 0xC9, 0x03, + 0x61, 0xCC, 0x89, 0xC9, 0x03, 0x61, 0xCC, 0x8C, + 0xC9, 0x03, 0x61, 0xCC, 0x8F, 0xC9, 0x03, 0x61, + 0xCC, 0x91, 0xC9, 0x03, 0x61, 0xCC, 0xA5, 0xB5, + 0x03, 0x61, 0xCC, 0xA8, 0xA5, 0x03, 0x62, 0xCC, + 0x87, 0xC9, 0x03, 0x62, 0xCC, 0xA3, 0xB5, 0x03, + 0x62, 0xCC, 0xB1, 0xB5, 0x03, 0x63, 0xCC, 0x81, + // Bytes 32c0 - 32ff + 0xC9, 0x03, 0x63, 0xCC, 0x82, 0xC9, 0x03, 0x63, + 0xCC, 0x87, 0xC9, 0x03, 0x63, 0xCC, 0x8C, 0xC9, + 0x03, 0x64, 0xCC, 0x87, 0xC9, 0x03, 0x64, 0xCC, + 0x8C, 0xC9, 0x03, 0x64, 0xCC, 0xA3, 0xB5, 0x03, + 0x64, 0xCC, 0xA7, 0xA5, 0x03, 0x64, 0xCC, 0xAD, + 0xB5, 0x03, 0x64, 0xCC, 0xB1, 0xB5, 0x03, 0x65, + 0xCC, 0x80, 0xC9, 0x03, 0x65, 0xCC, 0x81, 0xC9, + 0x03, 0x65, 0xCC, 0x83, 0xC9, 0x03, 0x65, 0xCC, + // Bytes 3300 - 333f + 0x86, 0xC9, 0x03, 0x65, 0xCC, 0x87, 0xC9, 0x03, + 0x65, 0xCC, 0x88, 0xC9, 0x03, 0x65, 0xCC, 0x89, + 0xC9, 0x03, 0x65, 0xCC, 0x8C, 0xC9, 0x03, 0x65, + 0xCC, 0x8F, 0xC9, 0x03, 0x65, 0xCC, 0x91, 0xC9, + 0x03, 0x65, 0xCC, 0xA8, 0xA5, 0x03, 0x65, 0xCC, + 0xAD, 0xB5, 0x03, 0x65, 0xCC, 0xB0, 0xB5, 0x03, + 0x66, 0xCC, 0x87, 0xC9, 0x03, 0x67, 0xCC, 0x81, + 0xC9, 0x03, 0x67, 0xCC, 0x82, 0xC9, 0x03, 0x67, + // Bytes 3340 - 337f + 0xCC, 0x84, 0xC9, 0x03, 0x67, 0xCC, 0x86, 0xC9, + 0x03, 0x67, 0xCC, 0x87, 0xC9, 0x03, 0x67, 0xCC, + 0x8C, 0xC9, 0x03, 0x67, 0xCC, 0xA7, 0xA5, 0x03, + 0x68, 0xCC, 0x82, 0xC9, 0x03, 0x68, 0xCC, 0x87, + 0xC9, 0x03, 0x68, 0xCC, 0x88, 0xC9, 0x03, 0x68, + 0xCC, 0x8C, 0xC9, 0x03, 0x68, 0xCC, 0xA3, 0xB5, + 0x03, 0x68, 0xCC, 0xA7, 0xA5, 0x03, 0x68, 0xCC, + 0xAE, 0xB5, 0x03, 0x68, 0xCC, 0xB1, 0xB5, 0x03, + // Bytes 3380 - 33bf + 0x69, 0xCC, 0x80, 0xC9, 0x03, 0x69, 0xCC, 0x81, + 0xC9, 0x03, 0x69, 0xCC, 0x82, 0xC9, 0x03, 0x69, + 0xCC, 0x83, 0xC9, 0x03, 0x69, 0xCC, 0x84, 0xC9, + 0x03, 0x69, 0xCC, 0x86, 0xC9, 0x03, 0x69, 0xCC, + 0x89, 0xC9, 0x03, 0x69, 0xCC, 0x8C, 0xC9, 0x03, + 0x69, 0xCC, 0x8F, 0xC9, 0x03, 0x69, 0xCC, 0x91, + 0xC9, 0x03, 0x69, 0xCC, 0xA3, 0xB5, 0x03, 0x69, + 0xCC, 0xA8, 0xA5, 0x03, 0x69, 0xCC, 0xB0, 0xB5, + // Bytes 33c0 - 33ff + 0x03, 0x6A, 0xCC, 0x82, 0xC9, 0x03, 0x6A, 0xCC, + 0x8C, 0xC9, 0x03, 0x6B, 0xCC, 0x81, 0xC9, 0x03, + 0x6B, 0xCC, 0x8C, 0xC9, 0x03, 0x6B, 0xCC, 0xA3, + 0xB5, 0x03, 0x6B, 0xCC, 0xA7, 0xA5, 0x03, 0x6B, + 0xCC, 0xB1, 0xB5, 0x03, 0x6C, 0xCC, 0x81, 0xC9, + 0x03, 0x6C, 0xCC, 0x8C, 0xC9, 0x03, 0x6C, 0xCC, + 0xA7, 0xA5, 0x03, 0x6C, 0xCC, 0xAD, 0xB5, 0x03, + 0x6C, 0xCC, 0xB1, 0xB5, 0x03, 0x6D, 0xCC, 0x81, + // Bytes 3400 - 343f + 0xC9, 0x03, 0x6D, 0xCC, 0x87, 0xC9, 0x03, 0x6D, + 0xCC, 0xA3, 0xB5, 0x03, 0x6E, 0xCC, 0x80, 0xC9, + 0x03, 0x6E, 0xCC, 0x81, 0xC9, 0x03, 0x6E, 0xCC, + 0x83, 0xC9, 0x03, 0x6E, 0xCC, 0x87, 0xC9, 0x03, + 0x6E, 0xCC, 0x8C, 0xC9, 0x03, 0x6E, 0xCC, 0xA3, + 0xB5, 0x03, 0x6E, 0xCC, 0xA7, 0xA5, 0x03, 0x6E, + 0xCC, 0xAD, 0xB5, 0x03, 0x6E, 0xCC, 0xB1, 0xB5, + 0x03, 0x6F, 0xCC, 0x80, 0xC9, 0x03, 0x6F, 0xCC, + // Bytes 3440 - 347f + 0x81, 0xC9, 0x03, 0x6F, 0xCC, 0x86, 0xC9, 0x03, + 0x6F, 0xCC, 0x89, 0xC9, 0x03, 0x6F, 0xCC, 0x8B, + 0xC9, 0x03, 0x6F, 0xCC, 0x8C, 0xC9, 0x03, 0x6F, + 0xCC, 0x8F, 0xC9, 0x03, 0x6F, 0xCC, 0x91, 0xC9, + 0x03, 0x70, 0xCC, 0x81, 0xC9, 0x03, 0x70, 0xCC, + 0x87, 0xC9, 0x03, 0x72, 0xCC, 0x81, 0xC9, 0x03, + 0x72, 0xCC, 0x87, 0xC9, 0x03, 0x72, 0xCC, 0x8C, + 0xC9, 0x03, 0x72, 0xCC, 0x8F, 0xC9, 0x03, 0x72, + // Bytes 3480 - 34bf + 0xCC, 0x91, 0xC9, 0x03, 0x72, 0xCC, 0xA7, 0xA5, + 0x03, 0x72, 0xCC, 0xB1, 0xB5, 0x03, 0x73, 0xCC, + 0x82, 0xC9, 0x03, 0x73, 0xCC, 0x87, 0xC9, 0x03, + 0x73, 0xCC, 0xA6, 0xB5, 0x03, 0x73, 0xCC, 0xA7, + 0xA5, 0x03, 0x74, 0xCC, 0x87, 0xC9, 0x03, 0x74, + 0xCC, 0x88, 0xC9, 0x03, 0x74, 0xCC, 0x8C, 0xC9, + 0x03, 0x74, 0xCC, 0xA3, 0xB5, 0x03, 0x74, 0xCC, + 0xA6, 0xB5, 0x03, 0x74, 0xCC, 0xA7, 0xA5, 0x03, + // Bytes 34c0 - 34ff + 0x74, 0xCC, 0xAD, 0xB5, 0x03, 0x74, 0xCC, 0xB1, + 0xB5, 0x03, 0x75, 0xCC, 0x80, 0xC9, 0x03, 0x75, + 0xCC, 0x81, 0xC9, 0x03, 0x75, 0xCC, 0x82, 0xC9, + 0x03, 0x75, 0xCC, 0x86, 0xC9, 0x03, 0x75, 0xCC, + 0x89, 0xC9, 0x03, 0x75, 0xCC, 0x8A, 0xC9, 0x03, + 0x75, 0xCC, 0x8B, 0xC9, 0x03, 0x75, 0xCC, 0x8C, + 0xC9, 0x03, 0x75, 0xCC, 0x8F, 0xC9, 0x03, 0x75, + 0xCC, 0x91, 0xC9, 0x03, 0x75, 0xCC, 0xA3, 0xB5, + // Bytes 3500 - 353f + 0x03, 0x75, 0xCC, 0xA4, 0xB5, 0x03, 0x75, 0xCC, + 0xA8, 0xA5, 0x03, 0x75, 0xCC, 0xAD, 0xB5, 0x03, + 0x75, 0xCC, 0xB0, 0xB5, 0x03, 0x76, 0xCC, 0x83, + 0xC9, 0x03, 0x76, 0xCC, 0xA3, 0xB5, 0x03, 0x77, + 0xCC, 0x80, 0xC9, 0x03, 0x77, 0xCC, 0x81, 0xC9, + 0x03, 0x77, 0xCC, 0x82, 0xC9, 0x03, 0x77, 0xCC, + 0x87, 0xC9, 0x03, 0x77, 0xCC, 0x88, 0xC9, 0x03, + 0x77, 0xCC, 0x8A, 0xC9, 0x03, 0x77, 0xCC, 0xA3, + // Bytes 3540 - 357f + 0xB5, 0x03, 0x78, 0xCC, 0x87, 0xC9, 0x03, 0x78, + 0xCC, 0x88, 0xC9, 0x03, 0x79, 0xCC, 0x80, 0xC9, + 0x03, 0x79, 0xCC, 0x81, 0xC9, 0x03, 0x79, 0xCC, + 0x82, 0xC9, 0x03, 0x79, 0xCC, 0x83, 0xC9, 0x03, + 0x79, 0xCC, 0x84, 0xC9, 0x03, 0x79, 0xCC, 0x87, + 0xC9, 0x03, 0x79, 0xCC, 0x88, 0xC9, 0x03, 0x79, + 0xCC, 0x89, 0xC9, 0x03, 0x79, 0xCC, 0x8A, 0xC9, + 0x03, 0x79, 0xCC, 0xA3, 0xB5, 0x03, 0x7A, 0xCC, + // Bytes 3580 - 35bf + 0x81, 0xC9, 0x03, 0x7A, 0xCC, 0x82, 0xC9, 0x03, + 0x7A, 0xCC, 0x87, 0xC9, 0x03, 0x7A, 0xCC, 0x8C, + 0xC9, 0x03, 0x7A, 0xCC, 0xA3, 0xB5, 0x03, 0x7A, + 0xCC, 0xB1, 0xB5, 0x04, 0xC2, 0xA8, 0xCC, 0x80, + 0xCA, 0x04, 0xC2, 0xA8, 0xCC, 0x81, 0xCA, 0x04, + 0xC2, 0xA8, 0xCD, 0x82, 0xCA, 0x04, 0xC3, 0x86, + 0xCC, 0x81, 0xC9, 0x04, 0xC3, 0x86, 0xCC, 0x84, + 0xC9, 0x04, 0xC3, 0x98, 0xCC, 0x81, 0xC9, 0x04, + // Bytes 35c0 - 35ff + 0xC3, 0xA6, 0xCC, 0x81, 0xC9, 0x04, 0xC3, 0xA6, + 0xCC, 0x84, 0xC9, 0x04, 0xC3, 0xB8, 0xCC, 0x81, + 0xC9, 0x04, 0xC5, 0xBF, 0xCC, 0x87, 0xC9, 0x04, + 0xC6, 0xB7, 0xCC, 0x8C, 0xC9, 0x04, 0xCA, 0x92, + 0xCC, 0x8C, 0xC9, 0x04, 0xCE, 0x91, 0xCC, 0x80, + 0xC9, 0x04, 0xCE, 0x91, 0xCC, 0x81, 0xC9, 0x04, + 0xCE, 0x91, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0x91, + 0xCC, 0x86, 0xC9, 0x04, 0xCE, 0x91, 0xCD, 0x85, + // Bytes 3600 - 363f + 0xD9, 0x04, 0xCE, 0x95, 0xCC, 0x80, 0xC9, 0x04, + 0xCE, 0x95, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0x97, + 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x97, 0xCC, 0x81, + 0xC9, 0x04, 0xCE, 0x97, 0xCD, 0x85, 0xD9, 0x04, + 0xCE, 0x99, 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x99, + 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0x99, 0xCC, 0x84, + 0xC9, 0x04, 0xCE, 0x99, 0xCC, 0x86, 0xC9, 0x04, + 0xCE, 0x99, 0xCC, 0x88, 0xC9, 0x04, 0xCE, 0x9F, + // Bytes 3640 - 367f + 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x9F, 0xCC, 0x81, + 0xC9, 0x04, 0xCE, 0xA1, 0xCC, 0x94, 0xC9, 0x04, + 0xCE, 0xA5, 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0xA5, + 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0xA5, 0xCC, 0x84, + 0xC9, 0x04, 0xCE, 0xA5, 0xCC, 0x86, 0xC9, 0x04, + 0xCE, 0xA5, 0xCC, 0x88, 0xC9, 0x04, 0xCE, 0xA9, + 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0xA9, 0xCC, 0x81, + 0xC9, 0x04, 0xCE, 0xA9, 0xCD, 0x85, 0xD9, 0x04, + // Bytes 3680 - 36bf + 0xCE, 0xB1, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0xB1, + 0xCC, 0x86, 0xC9, 0x04, 0xCE, 0xB1, 0xCD, 0x85, + 0xD9, 0x04, 0xCE, 0xB5, 0xCC, 0x80, 0xC9, 0x04, + 0xCE, 0xB5, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0xB7, + 0xCD, 0x85, 0xD9, 0x04, 0xCE, 0xB9, 0xCC, 0x80, + 0xC9, 0x04, 0xCE, 0xB9, 0xCC, 0x81, 0xC9, 0x04, + 0xCE, 0xB9, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0xB9, + 0xCC, 0x86, 0xC9, 0x04, 0xCE, 0xB9, 0xCD, 0x82, + // Bytes 36c0 - 36ff + 0xC9, 0x04, 0xCE, 0xBF, 0xCC, 0x80, 0xC9, 0x04, + 0xCE, 0xBF, 0xCC, 0x81, 0xC9, 0x04, 0xCF, 0x81, + 0xCC, 0x93, 0xC9, 0x04, 0xCF, 0x81, 0xCC, 0x94, + 0xC9, 0x04, 0xCF, 0x85, 0xCC, 0x80, 0xC9, 0x04, + 0xCF, 0x85, 0xCC, 0x81, 0xC9, 0x04, 0xCF, 0x85, + 0xCC, 0x84, 0xC9, 0x04, 0xCF, 0x85, 0xCC, 0x86, + 0xC9, 0x04, 0xCF, 0x85, 0xCD, 0x82, 0xC9, 0x04, + 0xCF, 0x89, 0xCD, 0x85, 0xD9, 0x04, 0xCF, 0x92, + // Bytes 3700 - 373f + 0xCC, 0x81, 0xC9, 0x04, 0xCF, 0x92, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0x86, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0x90, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0x90, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x93, 0xCC, 0x81, + 0xC9, 0x04, 0xD0, 0x95, 0xCC, 0x80, 0xC9, 0x04, + 0xD0, 0x95, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0x95, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x96, 0xCC, 0x86, + 0xC9, 0x04, 0xD0, 0x96, 0xCC, 0x88, 0xC9, 0x04, + // Bytes 3740 - 377f + 0xD0, 0x97, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x98, + 0xCC, 0x80, 0xC9, 0x04, 0xD0, 0x98, 0xCC, 0x84, + 0xC9, 0x04, 0xD0, 0x98, 0xCC, 0x86, 0xC9, 0x04, + 0xD0, 0x98, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x9A, + 0xCC, 0x81, 0xC9, 0x04, 0xD0, 0x9E, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0xA3, 0xCC, 0x84, 0xC9, 0x04, + 0xD0, 0xA3, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xA3, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xA3, 0xCC, 0x8B, + // Bytes 3780 - 37bf + 0xC9, 0x04, 0xD0, 0xA7, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0xAB, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xAD, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xB0, 0xCC, 0x86, + 0xC9, 0x04, 0xD0, 0xB0, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0xB3, 0xCC, 0x81, 0xC9, 0x04, 0xD0, 0xB5, + 0xCC, 0x80, 0xC9, 0x04, 0xD0, 0xB5, 0xCC, 0x86, + 0xC9, 0x04, 0xD0, 0xB5, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0xB6, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xB6, + // Bytes 37c0 - 37ff + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xB7, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0xB8, 0xCC, 0x80, 0xC9, 0x04, + 0xD0, 0xB8, 0xCC, 0x84, 0xC9, 0x04, 0xD0, 0xB8, + 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xB8, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0xBA, 0xCC, 0x81, 0xC9, 0x04, + 0xD0, 0xBE, 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0x83, + 0xCC, 0x84, 0xC9, 0x04, 0xD1, 0x83, 0xCC, 0x86, + 0xC9, 0x04, 0xD1, 0x83, 0xCC, 0x88, 0xC9, 0x04, + // Bytes 3800 - 383f + 0xD1, 0x83, 0xCC, 0x8B, 0xC9, 0x04, 0xD1, 0x87, + 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0x8B, 0xCC, 0x88, + 0xC9, 0x04, 0xD1, 0x8D, 0xCC, 0x88, 0xC9, 0x04, + 0xD1, 0x96, 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0xB4, + 0xCC, 0x8F, 0xC9, 0x04, 0xD1, 0xB5, 0xCC, 0x8F, + 0xC9, 0x04, 0xD3, 0x98, 0xCC, 0x88, 0xC9, 0x04, + 0xD3, 0x99, 0xCC, 0x88, 0xC9, 0x04, 0xD3, 0xA8, + 0xCC, 0x88, 0xC9, 0x04, 0xD3, 0xA9, 0xCC, 0x88, + // Bytes 3840 - 387f + 0xC9, 0x04, 0xD8, 0xA7, 0xD9, 0x93, 0xC9, 0x04, + 0xD8, 0xA7, 0xD9, 0x94, 0xC9, 0x04, 0xD8, 0xA7, + 0xD9, 0x95, 0xB5, 0x04, 0xD9, 0x88, 0xD9, 0x94, + 0xC9, 0x04, 0xD9, 0x8A, 0xD9, 0x94, 0xC9, 0x04, + 0xDB, 0x81, 0xD9, 0x94, 0xC9, 0x04, 0xDB, 0x92, + 0xD9, 0x94, 0xC9, 0x04, 0xDB, 0x95, 0xD9, 0x94, + 0xC9, 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x80, 0xCA, + 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, + // Bytes 3880 - 38bf + 0x41, 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x41, + 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x41, 0xCC, + 0x86, 0xCC, 0x80, 0xCA, 0x05, 0x41, 0xCC, 0x86, + 0xCC, 0x81, 0xCA, 0x05, 0x41, 0xCC, 0x86, 0xCC, + 0x83, 0xCA, 0x05, 0x41, 0xCC, 0x86, 0xCC, 0x89, + 0xCA, 0x05, 0x41, 0xCC, 0x87, 0xCC, 0x84, 0xCA, + 0x05, 0x41, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, + 0x41, 0xCC, 0x8A, 0xCC, 0x81, 0xCA, 0x05, 0x41, + // Bytes 38c0 - 38ff + 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x41, 0xCC, + 0xA3, 0xCC, 0x86, 0xCA, 0x05, 0x43, 0xCC, 0xA7, + 0xCC, 0x81, 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, + 0x80, 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x81, + 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x83, 0xCA, + 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, + 0x45, 0xCC, 0x84, 0xCC, 0x80, 0xCA, 0x05, 0x45, + 0xCC, 0x84, 0xCC, 0x81, 0xCA, 0x05, 0x45, 0xCC, + // Bytes 3900 - 393f + 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x45, 0xCC, 0xA7, + 0xCC, 0x86, 0xCA, 0x05, 0x49, 0xCC, 0x88, 0xCC, + 0x81, 0xCA, 0x05, 0x4C, 0xCC, 0xA3, 0xCC, 0x84, + 0xCA, 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x80, 0xCA, + 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, + 0x4F, 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x4F, + 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x4F, 0xCC, + 0x83, 0xCC, 0x81, 0xCA, 0x05, 0x4F, 0xCC, 0x83, + // Bytes 3940 - 397f + 0xCC, 0x84, 0xCA, 0x05, 0x4F, 0xCC, 0x83, 0xCC, + 0x88, 0xCA, 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x80, + 0xCA, 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x81, 0xCA, + 0x05, 0x4F, 0xCC, 0x87, 0xCC, 0x84, 0xCA, 0x05, + 0x4F, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x4F, + 0xCC, 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x4F, 0xCC, + 0x9B, 0xCC, 0x81, 0xCA, 0x05, 0x4F, 0xCC, 0x9B, + 0xCC, 0x83, 0xCA, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, + // Bytes 3980 - 39bf + 0x89, 0xCA, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, 0xA3, + 0xB6, 0x05, 0x4F, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, + 0x05, 0x4F, 0xCC, 0xA8, 0xCC, 0x84, 0xCA, 0x05, + 0x52, 0xCC, 0xA3, 0xCC, 0x84, 0xCA, 0x05, 0x53, + 0xCC, 0x81, 0xCC, 0x87, 0xCA, 0x05, 0x53, 0xCC, + 0x8C, 0xCC, 0x87, 0xCA, 0x05, 0x53, 0xCC, 0xA3, + 0xCC, 0x87, 0xCA, 0x05, 0x55, 0xCC, 0x83, 0xCC, + 0x81, 0xCA, 0x05, 0x55, 0xCC, 0x84, 0xCC, 0x88, + // Bytes 39c0 - 39ff + 0xCA, 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x80, 0xCA, + 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x05, + 0x55, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x55, + 0xCC, 0x88, 0xCC, 0x8C, 0xCA, 0x05, 0x55, 0xCC, + 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x55, 0xCC, 0x9B, + 0xCC, 0x81, 0xCA, 0x05, 0x55, 0xCC, 0x9B, 0xCC, + 0x83, 0xCA, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0x89, + 0xCA, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0xA3, 0xB6, + // Bytes 3a00 - 3a3f + 0x05, 0x61, 0xCC, 0x82, 0xCC, 0x80, 0xCA, 0x05, + 0x61, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, 0x61, + 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x61, 0xCC, + 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x61, 0xCC, 0x86, + 0xCC, 0x80, 0xCA, 0x05, 0x61, 0xCC, 0x86, 0xCC, + 0x81, 0xCA, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x83, + 0xCA, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x89, 0xCA, + 0x05, 0x61, 0xCC, 0x87, 0xCC, 0x84, 0xCA, 0x05, + // Bytes 3a40 - 3a7f + 0x61, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x61, + 0xCC, 0x8A, 0xCC, 0x81, 0xCA, 0x05, 0x61, 0xCC, + 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x61, 0xCC, 0xA3, + 0xCC, 0x86, 0xCA, 0x05, 0x63, 0xCC, 0xA7, 0xCC, + 0x81, 0xCA, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x80, + 0xCA, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x81, 0xCA, + 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, + 0x65, 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x65, + // Bytes 3a80 - 3abf + 0xCC, 0x84, 0xCC, 0x80, 0xCA, 0x05, 0x65, 0xCC, + 0x84, 0xCC, 0x81, 0xCA, 0x05, 0x65, 0xCC, 0xA3, + 0xCC, 0x82, 0xCA, 0x05, 0x65, 0xCC, 0xA7, 0xCC, + 0x86, 0xCA, 0x05, 0x69, 0xCC, 0x88, 0xCC, 0x81, + 0xCA, 0x05, 0x6C, 0xCC, 0xA3, 0xCC, 0x84, 0xCA, + 0x05, 0x6F, 0xCC, 0x82, 0xCC, 0x80, 0xCA, 0x05, + 0x6F, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, 0x6F, + 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x6F, 0xCC, + // Bytes 3ac0 - 3aff + 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x6F, 0xCC, 0x83, + 0xCC, 0x81, 0xCA, 0x05, 0x6F, 0xCC, 0x83, 0xCC, + 0x84, 0xCA, 0x05, 0x6F, 0xCC, 0x83, 0xCC, 0x88, + 0xCA, 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x80, 0xCA, + 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x81, 0xCA, 0x05, + 0x6F, 0xCC, 0x87, 0xCC, 0x84, 0xCA, 0x05, 0x6F, + 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x6F, 0xCC, + 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, + // Bytes 3b00 - 3b3f + 0xCC, 0x81, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, + 0x83, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0x89, + 0xCA, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0xA3, 0xB6, + 0x05, 0x6F, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05, + 0x6F, 0xCC, 0xA8, 0xCC, 0x84, 0xCA, 0x05, 0x72, + 0xCC, 0xA3, 0xCC, 0x84, 0xCA, 0x05, 0x73, 0xCC, + 0x81, 0xCC, 0x87, 0xCA, 0x05, 0x73, 0xCC, 0x8C, + 0xCC, 0x87, 0xCA, 0x05, 0x73, 0xCC, 0xA3, 0xCC, + // Bytes 3b40 - 3b7f + 0x87, 0xCA, 0x05, 0x75, 0xCC, 0x83, 0xCC, 0x81, + 0xCA, 0x05, 0x75, 0xCC, 0x84, 0xCC, 0x88, 0xCA, + 0x05, 0x75, 0xCC, 0x88, 0xCC, 0x80, 0xCA, 0x05, + 0x75, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x05, 0x75, + 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x75, 0xCC, + 0x88, 0xCC, 0x8C, 0xCA, 0x05, 0x75, 0xCC, 0x9B, + 0xCC, 0x80, 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, + 0x81, 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x83, + // Bytes 3b80 - 3bbf + 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x89, 0xCA, + 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0xA3, 0xB6, 0x05, + 0xE1, 0xBE, 0xBF, 0xCC, 0x80, 0xCA, 0x05, 0xE1, + 0xBE, 0xBF, 0xCC, 0x81, 0xCA, 0x05, 0xE1, 0xBE, + 0xBF, 0xCD, 0x82, 0xCA, 0x05, 0xE1, 0xBF, 0xBE, + 0xCC, 0x80, 0xCA, 0x05, 0xE1, 0xBF, 0xBE, 0xCC, + 0x81, 0xCA, 0x05, 0xE1, 0xBF, 0xBE, 0xCD, 0x82, + 0xCA, 0x05, 0xE2, 0x86, 0x90, 0xCC, 0xB8, 0x05, + // Bytes 3bc0 - 3bff + 0x05, 0xE2, 0x86, 0x92, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x86, 0x94, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x87, 0x90, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, + 0x92, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, 0x94, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x83, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x88, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x88, 0x8B, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x88, 0xA3, 0xCC, 0xB8, 0x05, 0x05, + // Bytes 3c00 - 3c3f + 0xE2, 0x88, 0xA5, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x88, 0xBC, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x85, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x88, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x8D, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0xA1, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xA4, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x89, 0xA5, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + // Bytes 3c40 - 3c7f + 0x89, 0xB2, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0xB3, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB6, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB7, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xBA, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0xBB, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xBC, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x89, 0xBD, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x8A, 0x82, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + // Bytes 3c80 - 3cbf + 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x86, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x87, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x91, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0x92, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x8A, 0xA2, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x8A, 0xA8, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x8A, 0xA9, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + 0xAB, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB2, + // Bytes 3cc0 - 3cff + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB3, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB4, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0xB5, 0xCC, 0xB8, 0x05, + 0x06, 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + // Bytes 3d00 - 3d3f + 0x06, 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + // Bytes 3d40 - 3d7f + 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + // Bytes 3d80 - 3dbf + 0x06, 0xCE, 0xA9, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x80, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x81, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCD, 0x82, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + // Bytes 3dc0 - 3dff + 0x06, 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB7, 0xCC, 0x80, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCC, 0x81, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCD, 0x82, 0xCD, 0x85, 0xDA, + // Bytes 3e00 - 3e3f + 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + // Bytes 3e40 - 3e7f + 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x80, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCD, 0x82, 0xCA, + // Bytes 3e80 - 3ebf + 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCD, 0x82, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + 0x06, 0xCF, 0x89, 0xCC, 0x80, 0xCD, 0x85, 0xDA, + 0x06, 0xCF, 0x89, 0xCC, 0x81, 0xCD, 0x85, 0xDA, + // Bytes 3ec0 - 3eff + 0x06, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCF, 0x89, 0xCD, 0x82, 0xCD, 0x85, 0xDA, + 0x06, 0xE0, 0xA4, 0xA8, 0xE0, 0xA4, 0xBC, 0x09, + 0x06, 0xE0, 0xA4, 0xB0, 0xE0, 0xA4, 0xBC, 0x09, + 0x06, 0xE0, 0xA4, 0xB3, 0xE0, 0xA4, 0xBC, 0x09, + 0x06, 0xE0, 0xB1, 0x86, 0xE0, 0xB1, 0x96, 0x85, + 0x06, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8A, 0x11, + // Bytes 3f00 - 3f3f + 0x06, 0xE3, 0x81, 0x86, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x8B, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x8D, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x8F, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x91, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x93, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x95, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x97, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 3f40 - 3f7f + 0x06, 0xE3, 0x81, 0x99, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x9B, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x9D, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x9F, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA1, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA4, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA6, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA8, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 3f80 - 3fbf + 0x06, 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x9A, 0x0D, + // Bytes 3fc0 - 3fff + 0x06, 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x82, 0x9D, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xA6, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB1, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 4000 - 403f + 0x06, 0xE3, 0x82, 0xB3, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB5, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB9, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xBD, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x81, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 4040 - 407f + 0x06, 0xE3, 0x83, 0x84, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x86, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 4080 - 40bf + 0x06, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0xAF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0xB0, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0xB1, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 40c0 - 40ff + 0x06, 0xE3, 0x83, 0xB2, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0xBD, 0xE3, 0x82, 0x99, 0x0D, + 0x08, 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x80, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, 0x93, 0xCC, + 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, + 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0x91, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x81, 0xCD, + // Bytes 4100 - 413f + 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCD, + 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, + 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0x97, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x82, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, 0x94, 0xCC, + 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, + 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + // Bytes 4140 - 417f + 0x97, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x80, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, + 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, + 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xA9, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x81, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, + // Bytes 4180 - 41bf + 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, + 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xB1, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x82, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, 0x94, 0xCC, + 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, + 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xB1, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, + // Bytes 41c0 - 41ff + 0x08, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x80, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, + 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, + 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xB7, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x81, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCD, + 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, + // Bytes 4200 - 423f + 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCF, + 0x89, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, + 0x08, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x82, 0xCD, + 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, 0x94, 0xCC, + 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, + 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCF, + 0x89, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, + 0x08, 0xF0, 0x91, 0x82, 0x99, 0xF0, 0x91, 0x82, + // Bytes 4240 - 427f + 0xBA, 0x09, 0x08, 0xF0, 0x91, 0x82, 0x9B, 0xF0, + 0x91, 0x82, 0xBA, 0x09, 0x08, 0xF0, 0x91, 0x82, + 0xA5, 0xF0, 0x91, 0x82, 0xBA, 0x09, 0x42, 0xC2, + 0xB4, 0x01, 0x43, 0x20, 0xCC, 0x81, 0xC9, 0x43, + 0x20, 0xCC, 0x83, 0xC9, 0x43, 0x20, 0xCC, 0x84, + 0xC9, 0x43, 0x20, 0xCC, 0x85, 0xC9, 0x43, 0x20, + 0xCC, 0x86, 0xC9, 0x43, 0x20, 0xCC, 0x87, 0xC9, + 0x43, 0x20, 0xCC, 0x88, 0xC9, 0x43, 0x20, 0xCC, + // Bytes 4280 - 42bf + 0x8A, 0xC9, 0x43, 0x20, 0xCC, 0x8B, 0xC9, 0x43, + 0x20, 0xCC, 0x93, 0xC9, 0x43, 0x20, 0xCC, 0x94, + 0xC9, 0x43, 0x20, 0xCC, 0xA7, 0xA5, 0x43, 0x20, + 0xCC, 0xA8, 0xA5, 0x43, 0x20, 0xCC, 0xB3, 0xB5, + 0x43, 0x20, 0xCD, 0x82, 0xC9, 0x43, 0x20, 0xCD, + 0x85, 0xD9, 0x43, 0x20, 0xD9, 0x8B, 0x59, 0x43, + 0x20, 0xD9, 0x8C, 0x5D, 0x43, 0x20, 0xD9, 0x8D, + 0x61, 0x43, 0x20, 0xD9, 0x8E, 0x65, 0x43, 0x20, + // Bytes 42c0 - 42ff + 0xD9, 0x8F, 0x69, 0x43, 0x20, 0xD9, 0x90, 0x6D, + 0x43, 0x20, 0xD9, 0x91, 0x71, 0x43, 0x20, 0xD9, + 0x92, 0x75, 0x43, 0x41, 0xCC, 0x8A, 0xC9, 0x43, + 0x73, 0xCC, 0x87, 0xC9, 0x44, 0x20, 0xE3, 0x82, + 0x99, 0x0D, 0x44, 0x20, 0xE3, 0x82, 0x9A, 0x0D, + 0x44, 0xC2, 0xA8, 0xCC, 0x81, 0xCA, 0x44, 0xCE, + 0x91, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0x95, 0xCC, + 0x81, 0xC9, 0x44, 0xCE, 0x97, 0xCC, 0x81, 0xC9, + // Bytes 4300 - 433f + 0x44, 0xCE, 0x99, 0xCC, 0x81, 0xC9, 0x44, 0xCE, + 0x9F, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xA5, 0xCC, + 0x81, 0xC9, 0x44, 0xCE, 0xA5, 0xCC, 0x88, 0xC9, + 0x44, 0xCE, 0xA9, 0xCC, 0x81, 0xC9, 0x44, 0xCE, + 0xB1, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xB5, 0xCC, + 0x81, 0xC9, 0x44, 0xCE, 0xB7, 0xCC, 0x81, 0xC9, + 0x44, 0xCE, 0xB9, 0xCC, 0x81, 0xC9, 0x44, 0xCE, + 0xBF, 0xCC, 0x81, 0xC9, 0x44, 0xCF, 0x85, 0xCC, + // Bytes 4340 - 437f + 0x81, 0xC9, 0x44, 0xCF, 0x89, 0xCC, 0x81, 0xC9, + 0x44, 0xD7, 0x90, 0xD6, 0xB7, 0x31, 0x44, 0xD7, + 0x90, 0xD6, 0xB8, 0x35, 0x44, 0xD7, 0x90, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x91, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x91, 0xD6, 0xBF, 0x49, 0x44, 0xD7, + 0x92, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x93, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x94, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x95, 0xD6, 0xB9, 0x39, 0x44, 0xD7, + // Bytes 4380 - 43bf + 0x95, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x96, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x98, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x99, 0xD6, 0xB4, 0x25, 0x44, 0xD7, + 0x99, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x9A, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x9B, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x9B, 0xD6, 0xBF, 0x49, 0x44, 0xD7, + 0x9C, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x9E, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0xA0, 0xD6, 0xBC, 0x41, + // Bytes 43c0 - 43ff + 0x44, 0xD7, 0xA1, 0xD6, 0xBC, 0x41, 0x44, 0xD7, + 0xA3, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA4, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0xA4, 0xD6, 0xBF, 0x49, + 0x44, 0xD7, 0xA6, 0xD6, 0xBC, 0x41, 0x44, 0xD7, + 0xA7, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA8, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0xA9, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0xA9, 0xD7, 0x81, 0x4D, 0x44, 0xD7, + 0xA9, 0xD7, 0x82, 0x51, 0x44, 0xD7, 0xAA, 0xD6, + // Bytes 4400 - 443f + 0xBC, 0x41, 0x44, 0xD7, 0xB2, 0xD6, 0xB7, 0x31, + 0x44, 0xD8, 0xA7, 0xD9, 0x8B, 0x59, 0x44, 0xD8, + 0xA7, 0xD9, 0x93, 0xC9, 0x44, 0xD8, 0xA7, 0xD9, + 0x94, 0xC9, 0x44, 0xD8, 0xA7, 0xD9, 0x95, 0xB5, + 0x44, 0xD8, 0xB0, 0xD9, 0xB0, 0x79, 0x44, 0xD8, + 0xB1, 0xD9, 0xB0, 0x79, 0x44, 0xD9, 0x80, 0xD9, + 0x8B, 0x59, 0x44, 0xD9, 0x80, 0xD9, 0x8E, 0x65, + 0x44, 0xD9, 0x80, 0xD9, 0x8F, 0x69, 0x44, 0xD9, + // Bytes 4440 - 447f + 0x80, 0xD9, 0x90, 0x6D, 0x44, 0xD9, 0x80, 0xD9, + 0x91, 0x71, 0x44, 0xD9, 0x80, 0xD9, 0x92, 0x75, + 0x44, 0xD9, 0x87, 0xD9, 0xB0, 0x79, 0x44, 0xD9, + 0x88, 0xD9, 0x94, 0xC9, 0x44, 0xD9, 0x89, 0xD9, + 0xB0, 0x79, 0x44, 0xD9, 0x8A, 0xD9, 0x94, 0xC9, + 0x44, 0xDB, 0x92, 0xD9, 0x94, 0xC9, 0x44, 0xDB, + 0x95, 0xD9, 0x94, 0xC9, 0x45, 0x20, 0xCC, 0x88, + 0xCC, 0x80, 0xCA, 0x45, 0x20, 0xCC, 0x88, 0xCC, + // Bytes 4480 - 44bf + 0x81, 0xCA, 0x45, 0x20, 0xCC, 0x88, 0xCD, 0x82, + 0xCA, 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x45, + 0x20, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x45, 0x20, + 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x45, 0x20, 0xCC, + 0x94, 0xCC, 0x81, 0xCA, 0x45, 0x20, 0xCC, 0x94, + 0xCD, 0x82, 0xCA, 0x45, 0x20, 0xD9, 0x8C, 0xD9, + 0x91, 0x72, 0x45, 0x20, 0xD9, 0x8D, 0xD9, 0x91, + // Bytes 44c0 - 44ff + 0x72, 0x45, 0x20, 0xD9, 0x8E, 0xD9, 0x91, 0x72, + 0x45, 0x20, 0xD9, 0x8F, 0xD9, 0x91, 0x72, 0x45, + 0x20, 0xD9, 0x90, 0xD9, 0x91, 0x72, 0x45, 0x20, + 0xD9, 0x91, 0xD9, 0xB0, 0x7A, 0x45, 0xE2, 0xAB, + 0x9D, 0xCC, 0xB8, 0x05, 0x46, 0xCE, 0xB9, 0xCC, + 0x88, 0xCC, 0x81, 0xCA, 0x46, 0xCF, 0x85, 0xCC, + 0x88, 0xCC, 0x81, 0xCA, 0x46, 0xD7, 0xA9, 0xD6, + 0xBC, 0xD7, 0x81, 0x4E, 0x46, 0xD7, 0xA9, 0xD6, + // Bytes 4500 - 453f + 0xBC, 0xD7, 0x82, 0x52, 0x46, 0xD9, 0x80, 0xD9, + 0x8E, 0xD9, 0x91, 0x72, 0x46, 0xD9, 0x80, 0xD9, + 0x8F, 0xD9, 0x91, 0x72, 0x46, 0xD9, 0x80, 0xD9, + 0x90, 0xD9, 0x91, 0x72, 0x46, 0xE0, 0xA4, 0x95, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x96, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x97, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x9C, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xA1, + // Bytes 4540 - 457f + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xA2, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xAB, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xAF, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xA1, + 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xA2, + 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xAF, + 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x96, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x97, + // Bytes 4580 - 45bf + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x9C, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xAB, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xB2, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xB8, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xAC, 0xA1, + 0xE0, 0xAC, 0xBC, 0x09, 0x46, 0xE0, 0xAC, 0xA2, + 0xE0, 0xAC, 0xBC, 0x09, 0x46, 0xE0, 0xBE, 0xB2, + 0xE0, 0xBE, 0x80, 0x9D, 0x46, 0xE0, 0xBE, 0xB3, + // Bytes 45c0 - 45ff + 0xE0, 0xBE, 0x80, 0x9D, 0x46, 0xE3, 0x83, 0x86, + 0xE3, 0x82, 0x99, 0x0D, 0x48, 0xF0, 0x9D, 0x85, + 0x97, 0xF0, 0x9D, 0x85, 0xA5, 0xAD, 0x48, 0xF0, + 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xAD, + 0x48, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, + 0xA5, 0xAD, 0x48, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, + 0x9D, 0x85, 0xA5, 0xAD, 0x49, 0xE0, 0xBE, 0xB2, + 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0x9E, 0x49, + // Bytes 4600 - 463f + 0xE0, 0xBE, 0xB3, 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, + 0x80, 0x9E, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, + 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xAE, + 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, + 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xAE, 0x4C, 0xF0, + 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, + 0x9D, 0x85, 0xB0, 0xAE, 0x4C, 0xF0, 0x9D, 0x85, + 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, + // Bytes 4640 - 467f + 0xB1, 0xAE, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, + 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xB2, 0xAE, + 0x4C, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, + 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xAE, 0x4C, 0xF0, + 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, + 0x9D, 0x85, 0xAF, 0xAE, 0x4C, 0xF0, 0x9D, 0x86, + 0xBA, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, + 0xAE, 0xAE, 0x4C, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, + // Bytes 4680 - 46bf + 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xAE, + 0x83, 0x41, 0xCC, 0x82, 0xC9, 0x83, 0x41, 0xCC, + 0x86, 0xC9, 0x83, 0x41, 0xCC, 0x87, 0xC9, 0x83, + 0x41, 0xCC, 0x88, 0xC9, 0x83, 0x41, 0xCC, 0x8A, + 0xC9, 0x83, 0x41, 0xCC, 0xA3, 0xB5, 0x83, 0x43, + 0xCC, 0xA7, 0xA5, 0x83, 0x45, 0xCC, 0x82, 0xC9, + 0x83, 0x45, 0xCC, 0x84, 0xC9, 0x83, 0x45, 0xCC, + 0xA3, 0xB5, 0x83, 0x45, 0xCC, 0xA7, 0xA5, 0x83, + // Bytes 46c0 - 46ff + 0x49, 0xCC, 0x88, 0xC9, 0x83, 0x4C, 0xCC, 0xA3, + 0xB5, 0x83, 0x4F, 0xCC, 0x82, 0xC9, 0x83, 0x4F, + 0xCC, 0x83, 0xC9, 0x83, 0x4F, 0xCC, 0x84, 0xC9, + 0x83, 0x4F, 0xCC, 0x87, 0xC9, 0x83, 0x4F, 0xCC, + 0x88, 0xC9, 0x83, 0x4F, 0xCC, 0x9B, 0xAD, 0x83, + 0x4F, 0xCC, 0xA3, 0xB5, 0x83, 0x4F, 0xCC, 0xA8, + 0xA5, 0x83, 0x52, 0xCC, 0xA3, 0xB5, 0x83, 0x53, + 0xCC, 0x81, 0xC9, 0x83, 0x53, 0xCC, 0x8C, 0xC9, + // Bytes 4700 - 473f + 0x83, 0x53, 0xCC, 0xA3, 0xB5, 0x83, 0x55, 0xCC, + 0x83, 0xC9, 0x83, 0x55, 0xCC, 0x84, 0xC9, 0x83, + 0x55, 0xCC, 0x88, 0xC9, 0x83, 0x55, 0xCC, 0x9B, + 0xAD, 0x83, 0x61, 0xCC, 0x82, 0xC9, 0x83, 0x61, + 0xCC, 0x86, 0xC9, 0x83, 0x61, 0xCC, 0x87, 0xC9, + 0x83, 0x61, 0xCC, 0x88, 0xC9, 0x83, 0x61, 0xCC, + 0x8A, 0xC9, 0x83, 0x61, 0xCC, 0xA3, 0xB5, 0x83, + 0x63, 0xCC, 0xA7, 0xA5, 0x83, 0x65, 0xCC, 0x82, + // Bytes 4740 - 477f + 0xC9, 0x83, 0x65, 0xCC, 0x84, 0xC9, 0x83, 0x65, + 0xCC, 0xA3, 0xB5, 0x83, 0x65, 0xCC, 0xA7, 0xA5, + 0x83, 0x69, 0xCC, 0x88, 0xC9, 0x83, 0x6C, 0xCC, + 0xA3, 0xB5, 0x83, 0x6F, 0xCC, 0x82, 0xC9, 0x83, + 0x6F, 0xCC, 0x83, 0xC9, 0x83, 0x6F, 0xCC, 0x84, + 0xC9, 0x83, 0x6F, 0xCC, 0x87, 0xC9, 0x83, 0x6F, + 0xCC, 0x88, 0xC9, 0x83, 0x6F, 0xCC, 0x9B, 0xAD, + 0x83, 0x6F, 0xCC, 0xA3, 0xB5, 0x83, 0x6F, 0xCC, + // Bytes 4780 - 47bf + 0xA8, 0xA5, 0x83, 0x72, 0xCC, 0xA3, 0xB5, 0x83, + 0x73, 0xCC, 0x81, 0xC9, 0x83, 0x73, 0xCC, 0x8C, + 0xC9, 0x83, 0x73, 0xCC, 0xA3, 0xB5, 0x83, 0x75, + 0xCC, 0x83, 0xC9, 0x83, 0x75, 0xCC, 0x84, 0xC9, + 0x83, 0x75, 0xCC, 0x88, 0xC9, 0x83, 0x75, 0xCC, + 0x9B, 0xAD, 0x84, 0xCE, 0x91, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0x91, 0xCC, 0x94, 0xC9, 0x84, 0xCE, + 0x95, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x95, 0xCC, + // Bytes 47c0 - 47ff + 0x94, 0xC9, 0x84, 0xCE, 0x97, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0x97, 0xCC, 0x94, 0xC9, 0x84, 0xCE, + 0x99, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x99, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0x9F, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0x9F, 0xCC, 0x94, 0xC9, 0x84, 0xCE, + 0xA5, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xA9, 0xCC, + 0x93, 0xC9, 0x84, 0xCE, 0xA9, 0xCC, 0x94, 0xC9, + 0x84, 0xCE, 0xB1, 0xCC, 0x80, 0xC9, 0x84, 0xCE, + // Bytes 4800 - 483f + 0xB1, 0xCC, 0x81, 0xC9, 0x84, 0xCE, 0xB1, 0xCC, + 0x93, 0xC9, 0x84, 0xCE, 0xB1, 0xCC, 0x94, 0xC9, + 0x84, 0xCE, 0xB1, 0xCD, 0x82, 0xC9, 0x84, 0xCE, + 0xB5, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB5, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0xB7, 0xCC, 0x80, 0xC9, + 0x84, 0xCE, 0xB7, 0xCC, 0x81, 0xC9, 0x84, 0xCE, + 0xB7, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB7, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0xB7, 0xCD, 0x82, 0xC9, + // Bytes 4840 - 487f + 0x84, 0xCE, 0xB9, 0xCC, 0x88, 0xC9, 0x84, 0xCE, + 0xB9, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB9, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0xBF, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0xBF, 0xCC, 0x94, 0xC9, 0x84, 0xCF, + 0x85, 0xCC, 0x88, 0xC9, 0x84, 0xCF, 0x85, 0xCC, + 0x93, 0xC9, 0x84, 0xCF, 0x85, 0xCC, 0x94, 0xC9, + 0x84, 0xCF, 0x89, 0xCC, 0x80, 0xC9, 0x84, 0xCF, + 0x89, 0xCC, 0x81, 0xC9, 0x84, 0xCF, 0x89, 0xCC, + // Bytes 4880 - 48bf + 0x93, 0xC9, 0x84, 0xCF, 0x89, 0xCC, 0x94, 0xC9, + 0x84, 0xCF, 0x89, 0xCD, 0x82, 0xC9, 0x86, 0xCE, + 0x91, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + // Bytes 48c0 - 48ff + 0x97, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + // Bytes 4900 - 493f + 0xA9, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + // Bytes 4940 - 497f + 0xB1, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCF, + // Bytes 4980 - 49bf + 0x89, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x42, 0xCC, + 0x80, 0xC9, 0x32, 0x42, 0xCC, 0x81, 0xC9, 0x32, + 0x42, 0xCC, 0x93, 0xC9, 0x32, 0x43, 0xE1, 0x85, + // Bytes 49c0 - 49ff + 0xA1, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA2, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xA3, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xA4, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xA5, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA6, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xA7, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xA8, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xA9, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAA, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xAB, 0x01, 0x00, 0x43, + // Bytes 4a00 - 4a3f + 0xE1, 0x85, 0xAC, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xAD, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAE, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xAF, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xB0, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xB1, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB2, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xB3, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xB4, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xB5, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xAA, 0x01, + // Bytes 4a40 - 4a7f + 0x00, 0x43, 0xE1, 0x86, 0xAC, 0x01, 0x00, 0x43, + 0xE1, 0x86, 0xAD, 0x01, 0x00, 0x43, 0xE1, 0x86, + 0xB0, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB1, 0x01, + 0x00, 0x43, 0xE1, 0x86, 0xB2, 0x01, 0x00, 0x43, + 0xE1, 0x86, 0xB3, 0x01, 0x00, 0x43, 0xE1, 0x86, + 0xB4, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB5, 0x01, + 0x00, 0x44, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x32, + 0x43, 0xE3, 0x82, 0x99, 0x0D, 0x03, 0x43, 0xE3, + // Bytes 4a80 - 4abf + 0x82, 0x9A, 0x0D, 0x03, 0x46, 0xE0, 0xBD, 0xB1, + 0xE0, 0xBD, 0xB2, 0x9E, 0x26, 0x46, 0xE0, 0xBD, + 0xB1, 0xE0, 0xBD, 0xB4, 0xA2, 0x26, 0x46, 0xE0, + 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0x9E, 0x26, 0x00, + 0x01, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfcTrie. Total size: 10442 bytes (10.20 KiB). Checksum: 4ba400a9d8208e03. +type nfcTrie struct{} + +func newNfcTrie(i int) *nfcTrie { + return &nfcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 45: + return uint16(nfcValues[n<<6+uint32(b)]) + default: + n -= 45 + return uint16(nfcSparse.lookup(n, b)) + } +} + +// nfcValues: 47 blocks, 3008 entries, 6016 bytes +// The third block is the zero block. +var nfcValues = [3008]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x2f6f, 0xc1: 0x2f74, 0xc2: 0x4688, 0xc3: 0x2f79, 0xc4: 0x4697, 0xc5: 0x469c, + 0xc6: 0xa000, 0xc7: 0x46a6, 0xc8: 0x2fe2, 0xc9: 0x2fe7, 0xca: 0x46ab, 0xcb: 0x2ffb, + 0xcc: 0x306e, 0xcd: 0x3073, 0xce: 0x3078, 0xcf: 0x46bf, 0xd1: 0x3104, + 0xd2: 0x3127, 0xd3: 0x312c, 0xd4: 0x46c9, 0xd5: 0x46ce, 0xd6: 0x46dd, + 0xd8: 0xa000, 0xd9: 0x31b3, 0xda: 0x31b8, 0xdb: 0x31bd, 0xdc: 0x470f, 0xdd: 0x3235, + 0xe0: 0x327b, 0xe1: 0x3280, 0xe2: 0x4719, 0xe3: 0x3285, + 0xe4: 0x4728, 0xe5: 0x472d, 0xe6: 0xa000, 0xe7: 0x4737, 0xe8: 0x32ee, 0xe9: 0x32f3, + 0xea: 0x473c, 0xeb: 0x3307, 0xec: 0x337f, 0xed: 0x3384, 0xee: 0x3389, 0xef: 0x4750, + 0xf1: 0x3415, 0xf2: 0x3438, 0xf3: 0x343d, 0xf4: 0x475a, 0xf5: 0x475f, + 0xf6: 0x476e, 0xf8: 0xa000, 0xf9: 0x34c9, 0xfa: 0x34ce, 0xfb: 0x34d3, + 0xfc: 0x47a0, 0xfd: 0x3550, 0xff: 0x3569, + // Block 0x4, offset 0x100 + 0x100: 0x2f7e, 0x101: 0x328a, 0x102: 0x468d, 0x103: 0x471e, 0x104: 0x2f9c, 0x105: 0x32a8, + 0x106: 0x2fb0, 0x107: 0x32bc, 0x108: 0x2fb5, 0x109: 0x32c1, 0x10a: 0x2fba, 0x10b: 0x32c6, + 0x10c: 0x2fbf, 0x10d: 0x32cb, 0x10e: 0x2fc9, 0x10f: 0x32d5, + 0x112: 0x46b0, 0x113: 0x4741, 0x114: 0x2ff1, 0x115: 0x32fd, 0x116: 0x2ff6, 0x117: 0x3302, + 0x118: 0x3014, 0x119: 0x3320, 0x11a: 0x3005, 0x11b: 0x3311, 0x11c: 0x302d, 0x11d: 0x3339, + 0x11e: 0x3037, 0x11f: 0x3343, 0x120: 0x303c, 0x121: 0x3348, 0x122: 0x3046, 0x123: 0x3352, + 0x124: 0x304b, 0x125: 0x3357, 0x128: 0x307d, 0x129: 0x338e, + 0x12a: 0x3082, 0x12b: 0x3393, 0x12c: 0x3087, 0x12d: 0x3398, 0x12e: 0x30aa, 0x12f: 0x33b6, + 0x130: 0x308c, 0x134: 0x30b4, 0x135: 0x33c0, + 0x136: 0x30c8, 0x137: 0x33d9, 0x139: 0x30d2, 0x13a: 0x33e3, 0x13b: 0x30dc, + 0x13c: 0x33ed, 0x13d: 0x30d7, 0x13e: 0x33e8, + // Block 0x5, offset 0x140 + 0x143: 0x30ff, 0x144: 0x3410, 0x145: 0x3118, + 0x146: 0x3429, 0x147: 0x310e, 0x148: 0x341f, + 0x14c: 0x46d3, 0x14d: 0x4764, 0x14e: 0x3131, 0x14f: 0x3442, 0x150: 0x313b, 0x151: 0x344c, + 0x154: 0x3159, 0x155: 0x346a, 0x156: 0x3172, 0x157: 0x3483, + 0x158: 0x3163, 0x159: 0x3474, 0x15a: 0x46f6, 0x15b: 0x4787, 0x15c: 0x317c, 0x15d: 0x348d, + 0x15e: 0x318b, 0x15f: 0x349c, 0x160: 0x46fb, 0x161: 0x478c, 0x162: 0x31a4, 0x163: 0x34ba, + 0x164: 0x3195, 0x165: 0x34ab, 0x168: 0x4705, 0x169: 0x4796, + 0x16a: 0x470a, 0x16b: 0x479b, 0x16c: 0x31c2, 0x16d: 0x34d8, 0x16e: 0x31cc, 0x16f: 0x34e2, + 0x170: 0x31d1, 0x171: 0x34e7, 0x172: 0x31ef, 0x173: 0x3505, 0x174: 0x3212, 0x175: 0x3528, + 0x176: 0x323a, 0x177: 0x3555, 0x178: 0x324e, 0x179: 0x325d, 0x17a: 0x357d, 0x17b: 0x3267, + 0x17c: 0x3587, 0x17d: 0x326c, 0x17e: 0x358c, 0x17f: 0xa000, + // Block 0x6, offset 0x180 + 0x184: 0x8100, 0x185: 0x8100, + 0x186: 0x8100, + 0x18d: 0x2f88, 0x18e: 0x3294, 0x18f: 0x3096, 0x190: 0x33a2, 0x191: 0x3140, + 0x192: 0x3451, 0x193: 0x31d6, 0x194: 0x34ec, 0x195: 0x39cf, 0x196: 0x3b5e, 0x197: 0x39c8, + 0x198: 0x3b57, 0x199: 0x39d6, 0x19a: 0x3b65, 0x19b: 0x39c1, 0x19c: 0x3b50, + 0x19e: 0x38b0, 0x19f: 0x3a3f, 0x1a0: 0x38a9, 0x1a1: 0x3a38, 0x1a2: 0x35b3, 0x1a3: 0x35c5, + 0x1a6: 0x3041, 0x1a7: 0x334d, 0x1a8: 0x30be, 0x1a9: 0x33cf, + 0x1aa: 0x46ec, 0x1ab: 0x477d, 0x1ac: 0x3990, 0x1ad: 0x3b1f, 0x1ae: 0x35d7, 0x1af: 0x35dd, + 0x1b0: 0x33c5, 0x1b4: 0x3028, 0x1b5: 0x3334, + 0x1b8: 0x30fa, 0x1b9: 0x340b, 0x1ba: 0x38b7, 0x1bb: 0x3a46, + 0x1bc: 0x35ad, 0x1bd: 0x35bf, 0x1be: 0x35b9, 0x1bf: 0x35cb, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x2f8d, 0x1c1: 0x3299, 0x1c2: 0x2f92, 0x1c3: 0x329e, 0x1c4: 0x300a, 0x1c5: 0x3316, + 0x1c6: 0x300f, 0x1c7: 0x331b, 0x1c8: 0x309b, 0x1c9: 0x33a7, 0x1ca: 0x30a0, 0x1cb: 0x33ac, + 0x1cc: 0x3145, 0x1cd: 0x3456, 0x1ce: 0x314a, 0x1cf: 0x345b, 0x1d0: 0x3168, 0x1d1: 0x3479, + 0x1d2: 0x316d, 0x1d3: 0x347e, 0x1d4: 0x31db, 0x1d5: 0x34f1, 0x1d6: 0x31e0, 0x1d7: 0x34f6, + 0x1d8: 0x3186, 0x1d9: 0x3497, 0x1da: 0x319f, 0x1db: 0x34b5, + 0x1de: 0x305a, 0x1df: 0x3366, + 0x1e6: 0x4692, 0x1e7: 0x4723, 0x1e8: 0x46ba, 0x1e9: 0x474b, + 0x1ea: 0x395f, 0x1eb: 0x3aee, 0x1ec: 0x393c, 0x1ed: 0x3acb, 0x1ee: 0x46d8, 0x1ef: 0x4769, + 0x1f0: 0x3958, 0x1f1: 0x3ae7, 0x1f2: 0x3244, 0x1f3: 0x355f, + // Block 0x8, offset 0x200 + 0x200: 0x9932, 0x201: 0x9932, 0x202: 0x9932, 0x203: 0x9932, 0x204: 0x9932, 0x205: 0x8132, + 0x206: 0x9932, 0x207: 0x9932, 0x208: 0x9932, 0x209: 0x9932, 0x20a: 0x9932, 0x20b: 0x9932, + 0x20c: 0x9932, 0x20d: 0x8132, 0x20e: 0x8132, 0x20f: 0x9932, 0x210: 0x8132, 0x211: 0x9932, + 0x212: 0x8132, 0x213: 0x9932, 0x214: 0x9932, 0x215: 0x8133, 0x216: 0x812d, 0x217: 0x812d, + 0x218: 0x812d, 0x219: 0x812d, 0x21a: 0x8133, 0x21b: 0x992b, 0x21c: 0x812d, 0x21d: 0x812d, + 0x21e: 0x812d, 0x21f: 0x812d, 0x220: 0x812d, 0x221: 0x8129, 0x222: 0x8129, 0x223: 0x992d, + 0x224: 0x992d, 0x225: 0x992d, 0x226: 0x992d, 0x227: 0x9929, 0x228: 0x9929, 0x229: 0x812d, + 0x22a: 0x812d, 0x22b: 0x812d, 0x22c: 0x812d, 0x22d: 0x992d, 0x22e: 0x992d, 0x22f: 0x812d, + 0x230: 0x992d, 0x231: 0x992d, 0x232: 0x812d, 0x233: 0x812d, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812d, 0x23a: 0x812d, 0x23b: 0x812d, + 0x23c: 0x812d, 0x23d: 0x8132, 0x23e: 0x8132, 0x23f: 0x8132, + // Block 0x9, offset 0x240 + 0x240: 0x49ae, 0x241: 0x49b3, 0x242: 0x9932, 0x243: 0x49b8, 0x244: 0x4a71, 0x245: 0x9936, + 0x246: 0x8132, 0x247: 0x812d, 0x248: 0x812d, 0x249: 0x812d, 0x24a: 0x8132, 0x24b: 0x8132, + 0x24c: 0x8132, 0x24d: 0x812d, 0x24e: 0x812d, 0x250: 0x8132, 0x251: 0x8132, + 0x252: 0x8132, 0x253: 0x812d, 0x254: 0x812d, 0x255: 0x812d, 0x256: 0x812d, 0x257: 0x8132, + 0x258: 0x8133, 0x259: 0x812d, 0x25a: 0x812d, 0x25b: 0x8132, 0x25c: 0x8134, 0x25d: 0x8135, + 0x25e: 0x8135, 0x25f: 0x8134, 0x260: 0x8135, 0x261: 0x8135, 0x262: 0x8134, 0x263: 0x8132, + 0x264: 0x8132, 0x265: 0x8132, 0x266: 0x8132, 0x267: 0x8132, 0x268: 0x8132, 0x269: 0x8132, + 0x26a: 0x8132, 0x26b: 0x8132, 0x26c: 0x8132, 0x26d: 0x8132, 0x26e: 0x8132, 0x26f: 0x8132, + 0x274: 0x0170, + 0x27a: 0x8100, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x8100, 0x285: 0x35a1, + 0x286: 0x35e9, 0x287: 0x00ce, 0x288: 0x3607, 0x289: 0x3613, 0x28a: 0x3625, + 0x28c: 0x3643, 0x28e: 0x3655, 0x28f: 0x3673, 0x290: 0x3e08, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x3637, 0x2ab: 0x3667, 0x2ac: 0x47fe, 0x2ad: 0x3697, 0x2ae: 0x4828, 0x2af: 0x36a9, + 0x2b0: 0x3e70, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x3721, 0x2c1: 0x372d, 0x2c3: 0x371b, + 0x2c6: 0xa000, 0x2c7: 0x3709, + 0x2cc: 0x375d, 0x2cd: 0x3745, 0x2ce: 0x376f, 0x2d0: 0xa000, + 0x2d3: 0xa000, 0x2d5: 0xa000, 0x2d6: 0xa000, 0x2d7: 0xa000, + 0x2d8: 0xa000, 0x2d9: 0x3751, 0x2da: 0xa000, + 0x2de: 0xa000, 0x2e3: 0xa000, + 0x2e7: 0xa000, + 0x2eb: 0xa000, 0x2ed: 0xa000, + 0x2f0: 0xa000, 0x2f3: 0xa000, 0x2f5: 0xa000, + 0x2f6: 0xa000, 0x2f7: 0xa000, 0x2f8: 0xa000, 0x2f9: 0x37d5, 0x2fa: 0xa000, + 0x2fe: 0xa000, + // Block 0xc, offset 0x300 + 0x301: 0x3733, 0x302: 0x37b7, + 0x310: 0x370f, 0x311: 0x3793, + 0x312: 0x3715, 0x313: 0x3799, 0x316: 0x3727, 0x317: 0x37ab, + 0x318: 0xa000, 0x319: 0xa000, 0x31a: 0x3829, 0x31b: 0x382f, 0x31c: 0x3739, 0x31d: 0x37bd, + 0x31e: 0x373f, 0x31f: 0x37c3, 0x322: 0x374b, 0x323: 0x37cf, + 0x324: 0x3757, 0x325: 0x37db, 0x326: 0x3763, 0x327: 0x37e7, 0x328: 0xa000, 0x329: 0xa000, + 0x32a: 0x3835, 0x32b: 0x383b, 0x32c: 0x378d, 0x32d: 0x3811, 0x32e: 0x3769, 0x32f: 0x37ed, + 0x330: 0x3775, 0x331: 0x37f9, 0x332: 0x377b, 0x333: 0x37ff, 0x334: 0x3781, 0x335: 0x3805, + 0x338: 0x3787, 0x339: 0x380b, + // Block 0xd, offset 0x340 + 0x351: 0x812d, + 0x352: 0x8132, 0x353: 0x8132, 0x354: 0x8132, 0x355: 0x8132, 0x356: 0x812d, 0x357: 0x8132, + 0x358: 0x8132, 0x359: 0x8132, 0x35a: 0x812e, 0x35b: 0x812d, 0x35c: 0x8132, 0x35d: 0x8132, + 0x35e: 0x8132, 0x35f: 0x8132, 0x360: 0x8132, 0x361: 0x8132, 0x362: 0x812d, 0x363: 0x812d, + 0x364: 0x812d, 0x365: 0x812d, 0x366: 0x812d, 0x367: 0x812d, 0x368: 0x8132, 0x369: 0x8132, + 0x36a: 0x812d, 0x36b: 0x8132, 0x36c: 0x8132, 0x36d: 0x812e, 0x36e: 0x8131, 0x36f: 0x8132, + 0x370: 0x8105, 0x371: 0x8106, 0x372: 0x8107, 0x373: 0x8108, 0x374: 0x8109, 0x375: 0x810a, + 0x376: 0x810b, 0x377: 0x810c, 0x378: 0x810d, 0x379: 0x810e, 0x37a: 0x810e, 0x37b: 0x810f, + 0x37c: 0x8110, 0x37d: 0x8111, 0x37f: 0x8112, + // Block 0xe, offset 0x380 + 0x388: 0xa000, 0x38a: 0xa000, 0x38b: 0x8116, + 0x38c: 0x8117, 0x38d: 0x8118, 0x38e: 0x8119, 0x38f: 0x811a, 0x390: 0x811b, 0x391: 0x811c, + 0x392: 0x811d, 0x393: 0x9932, 0x394: 0x9932, 0x395: 0x992d, 0x396: 0x812d, 0x397: 0x8132, + 0x398: 0x8132, 0x399: 0x8132, 0x39a: 0x8132, 0x39b: 0x8132, 0x39c: 0x812d, 0x39d: 0x8132, + 0x39e: 0x8132, 0x39f: 0x812d, + 0x3b0: 0x811e, + // Block 0xf, offset 0x3c0 + 0x3c5: 0xa000, + 0x3c6: 0x2d26, 0x3c7: 0xa000, 0x3c8: 0x2d2e, 0x3c9: 0xa000, 0x3ca: 0x2d36, 0x3cb: 0xa000, + 0x3cc: 0x2d3e, 0x3cd: 0xa000, 0x3ce: 0x2d46, 0x3d1: 0xa000, + 0x3d2: 0x2d4e, + 0x3f4: 0x8102, 0x3f5: 0x9900, + 0x3fa: 0xa000, 0x3fb: 0x2d56, + 0x3fc: 0xa000, 0x3fd: 0x2d5e, 0x3fe: 0xa000, 0x3ff: 0xa000, + // Block 0x10, offset 0x400 + 0x400: 0x8132, 0x401: 0x8132, 0x402: 0x812d, 0x403: 0x8132, 0x404: 0x8132, 0x405: 0x8132, + 0x406: 0x8132, 0x407: 0x8132, 0x408: 0x8132, 0x409: 0x8132, 0x40a: 0x812d, 0x40b: 0x8132, + 0x40c: 0x8132, 0x40d: 0x8135, 0x40e: 0x812a, 0x40f: 0x812d, 0x410: 0x8129, 0x411: 0x8132, + 0x412: 0x8132, 0x413: 0x8132, 0x414: 0x8132, 0x415: 0x8132, 0x416: 0x8132, 0x417: 0x8132, + 0x418: 0x8132, 0x419: 0x8132, 0x41a: 0x8132, 0x41b: 0x8132, 0x41c: 0x8132, 0x41d: 0x8132, + 0x41e: 0x8132, 0x41f: 0x8132, 0x420: 0x8132, 0x421: 0x8132, 0x422: 0x8132, 0x423: 0x8132, + 0x424: 0x8132, 0x425: 0x8132, 0x426: 0x8132, 0x427: 0x8132, 0x428: 0x8132, 0x429: 0x8132, + 0x42a: 0x8132, 0x42b: 0x8132, 0x42c: 0x8132, 0x42d: 0x8132, 0x42e: 0x8132, 0x42f: 0x8132, + 0x430: 0x8132, 0x431: 0x8132, 0x432: 0x8132, 0x433: 0x8132, 0x434: 0x8132, 0x435: 0x8132, + 0x436: 0x8133, 0x437: 0x8131, 0x438: 0x8131, 0x439: 0x812d, 0x43b: 0x8132, + 0x43c: 0x8134, 0x43d: 0x812d, 0x43e: 0x8132, 0x43f: 0x812d, + // Block 0x11, offset 0x440 + 0x440: 0x2f97, 0x441: 0x32a3, 0x442: 0x2fa1, 0x443: 0x32ad, 0x444: 0x2fa6, 0x445: 0x32b2, + 0x446: 0x2fab, 0x447: 0x32b7, 0x448: 0x38cc, 0x449: 0x3a5b, 0x44a: 0x2fc4, 0x44b: 0x32d0, + 0x44c: 0x2fce, 0x44d: 0x32da, 0x44e: 0x2fdd, 0x44f: 0x32e9, 0x450: 0x2fd3, 0x451: 0x32df, + 0x452: 0x2fd8, 0x453: 0x32e4, 0x454: 0x38ef, 0x455: 0x3a7e, 0x456: 0x38f6, 0x457: 0x3a85, + 0x458: 0x3019, 0x459: 0x3325, 0x45a: 0x301e, 0x45b: 0x332a, 0x45c: 0x3904, 0x45d: 0x3a93, + 0x45e: 0x3023, 0x45f: 0x332f, 0x460: 0x3032, 0x461: 0x333e, 0x462: 0x3050, 0x463: 0x335c, + 0x464: 0x305f, 0x465: 0x336b, 0x466: 0x3055, 0x467: 0x3361, 0x468: 0x3064, 0x469: 0x3370, + 0x46a: 0x3069, 0x46b: 0x3375, 0x46c: 0x30af, 0x46d: 0x33bb, 0x46e: 0x390b, 0x46f: 0x3a9a, + 0x470: 0x30b9, 0x471: 0x33ca, 0x472: 0x30c3, 0x473: 0x33d4, 0x474: 0x30cd, 0x475: 0x33de, + 0x476: 0x46c4, 0x477: 0x4755, 0x478: 0x3912, 0x479: 0x3aa1, 0x47a: 0x30e6, 0x47b: 0x33f7, + 0x47c: 0x30e1, 0x47d: 0x33f2, 0x47e: 0x30eb, 0x47f: 0x33fc, + // Block 0x12, offset 0x480 + 0x480: 0x30f0, 0x481: 0x3401, 0x482: 0x30f5, 0x483: 0x3406, 0x484: 0x3109, 0x485: 0x341a, + 0x486: 0x3113, 0x487: 0x3424, 0x488: 0x3122, 0x489: 0x3433, 0x48a: 0x311d, 0x48b: 0x342e, + 0x48c: 0x3935, 0x48d: 0x3ac4, 0x48e: 0x3943, 0x48f: 0x3ad2, 0x490: 0x394a, 0x491: 0x3ad9, + 0x492: 0x3951, 0x493: 0x3ae0, 0x494: 0x314f, 0x495: 0x3460, 0x496: 0x3154, 0x497: 0x3465, + 0x498: 0x315e, 0x499: 0x346f, 0x49a: 0x46f1, 0x49b: 0x4782, 0x49c: 0x3997, 0x49d: 0x3b26, + 0x49e: 0x3177, 0x49f: 0x3488, 0x4a0: 0x3181, 0x4a1: 0x3492, 0x4a2: 0x4700, 0x4a3: 0x4791, + 0x4a4: 0x399e, 0x4a5: 0x3b2d, 0x4a6: 0x39a5, 0x4a7: 0x3b34, 0x4a8: 0x39ac, 0x4a9: 0x3b3b, + 0x4aa: 0x3190, 0x4ab: 0x34a1, 0x4ac: 0x319a, 0x4ad: 0x34b0, 0x4ae: 0x31ae, 0x4af: 0x34c4, + 0x4b0: 0x31a9, 0x4b1: 0x34bf, 0x4b2: 0x31ea, 0x4b3: 0x3500, 0x4b4: 0x31f9, 0x4b5: 0x350f, + 0x4b6: 0x31f4, 0x4b7: 0x350a, 0x4b8: 0x39b3, 0x4b9: 0x3b42, 0x4ba: 0x39ba, 0x4bb: 0x3b49, + 0x4bc: 0x31fe, 0x4bd: 0x3514, 0x4be: 0x3203, 0x4bf: 0x3519, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x3208, 0x4c1: 0x351e, 0x4c2: 0x320d, 0x4c3: 0x3523, 0x4c4: 0x321c, 0x4c5: 0x3532, + 0x4c6: 0x3217, 0x4c7: 0x352d, 0x4c8: 0x3221, 0x4c9: 0x353c, 0x4ca: 0x3226, 0x4cb: 0x3541, + 0x4cc: 0x322b, 0x4cd: 0x3546, 0x4ce: 0x3249, 0x4cf: 0x3564, 0x4d0: 0x3262, 0x4d1: 0x3582, + 0x4d2: 0x3271, 0x4d3: 0x3591, 0x4d4: 0x3276, 0x4d5: 0x3596, 0x4d6: 0x337a, 0x4d7: 0x34a6, + 0x4d8: 0x3537, 0x4d9: 0x3573, 0x4db: 0x35d1, + 0x4e0: 0x46a1, 0x4e1: 0x4732, 0x4e2: 0x2f83, 0x4e3: 0x328f, + 0x4e4: 0x3878, 0x4e5: 0x3a07, 0x4e6: 0x3871, 0x4e7: 0x3a00, 0x4e8: 0x3886, 0x4e9: 0x3a15, + 0x4ea: 0x387f, 0x4eb: 0x3a0e, 0x4ec: 0x38be, 0x4ed: 0x3a4d, 0x4ee: 0x3894, 0x4ef: 0x3a23, + 0x4f0: 0x388d, 0x4f1: 0x3a1c, 0x4f2: 0x38a2, 0x4f3: 0x3a31, 0x4f4: 0x389b, 0x4f5: 0x3a2a, + 0x4f6: 0x38c5, 0x4f7: 0x3a54, 0x4f8: 0x46b5, 0x4f9: 0x4746, 0x4fa: 0x3000, 0x4fb: 0x330c, + 0x4fc: 0x2fec, 0x4fd: 0x32f8, 0x4fe: 0x38da, 0x4ff: 0x3a69, + // Block 0x14, offset 0x500 + 0x500: 0x38d3, 0x501: 0x3a62, 0x502: 0x38e8, 0x503: 0x3a77, 0x504: 0x38e1, 0x505: 0x3a70, + 0x506: 0x38fd, 0x507: 0x3a8c, 0x508: 0x3091, 0x509: 0x339d, 0x50a: 0x30a5, 0x50b: 0x33b1, + 0x50c: 0x46e7, 0x50d: 0x4778, 0x50e: 0x3136, 0x50f: 0x3447, 0x510: 0x3920, 0x511: 0x3aaf, + 0x512: 0x3919, 0x513: 0x3aa8, 0x514: 0x392e, 0x515: 0x3abd, 0x516: 0x3927, 0x517: 0x3ab6, + 0x518: 0x3989, 0x519: 0x3b18, 0x51a: 0x396d, 0x51b: 0x3afc, 0x51c: 0x3966, 0x51d: 0x3af5, + 0x51e: 0x397b, 0x51f: 0x3b0a, 0x520: 0x3974, 0x521: 0x3b03, 0x522: 0x3982, 0x523: 0x3b11, + 0x524: 0x31e5, 0x525: 0x34fb, 0x526: 0x31c7, 0x527: 0x34dd, 0x528: 0x39e4, 0x529: 0x3b73, + 0x52a: 0x39dd, 0x52b: 0x3b6c, 0x52c: 0x39f2, 0x52d: 0x3b81, 0x52e: 0x39eb, 0x52f: 0x3b7a, + 0x530: 0x39f9, 0x531: 0x3b88, 0x532: 0x3230, 0x533: 0x354b, 0x534: 0x3258, 0x535: 0x3578, + 0x536: 0x3253, 0x537: 0x356e, 0x538: 0x323f, 0x539: 0x355a, + // Block 0x15, offset 0x540 + 0x540: 0x4804, 0x541: 0x480a, 0x542: 0x491e, 0x543: 0x4936, 0x544: 0x4926, 0x545: 0x493e, + 0x546: 0x492e, 0x547: 0x4946, 0x548: 0x47aa, 0x549: 0x47b0, 0x54a: 0x488e, 0x54b: 0x48a6, + 0x54c: 0x4896, 0x54d: 0x48ae, 0x54e: 0x489e, 0x54f: 0x48b6, 0x550: 0x4816, 0x551: 0x481c, + 0x552: 0x3db8, 0x553: 0x3dc8, 0x554: 0x3dc0, 0x555: 0x3dd0, + 0x558: 0x47b6, 0x559: 0x47bc, 0x55a: 0x3ce8, 0x55b: 0x3cf8, 0x55c: 0x3cf0, 0x55d: 0x3d00, + 0x560: 0x482e, 0x561: 0x4834, 0x562: 0x494e, 0x563: 0x4966, + 0x564: 0x4956, 0x565: 0x496e, 0x566: 0x495e, 0x567: 0x4976, 0x568: 0x47c2, 0x569: 0x47c8, + 0x56a: 0x48be, 0x56b: 0x48d6, 0x56c: 0x48c6, 0x56d: 0x48de, 0x56e: 0x48ce, 0x56f: 0x48e6, + 0x570: 0x4846, 0x571: 0x484c, 0x572: 0x3e18, 0x573: 0x3e30, 0x574: 0x3e20, 0x575: 0x3e38, + 0x576: 0x3e28, 0x577: 0x3e40, 0x578: 0x47ce, 0x579: 0x47d4, 0x57a: 0x3d18, 0x57b: 0x3d30, + 0x57c: 0x3d20, 0x57d: 0x3d38, 0x57e: 0x3d28, 0x57f: 0x3d40, + // Block 0x16, offset 0x580 + 0x580: 0x4852, 0x581: 0x4858, 0x582: 0x3e48, 0x583: 0x3e58, 0x584: 0x3e50, 0x585: 0x3e60, + 0x588: 0x47da, 0x589: 0x47e0, 0x58a: 0x3d48, 0x58b: 0x3d58, + 0x58c: 0x3d50, 0x58d: 0x3d60, 0x590: 0x4864, 0x591: 0x486a, + 0x592: 0x3e80, 0x593: 0x3e98, 0x594: 0x3e88, 0x595: 0x3ea0, 0x596: 0x3e90, 0x597: 0x3ea8, + 0x599: 0x47e6, 0x59b: 0x3d68, 0x59d: 0x3d70, + 0x59f: 0x3d78, 0x5a0: 0x487c, 0x5a1: 0x4882, 0x5a2: 0x497e, 0x5a3: 0x4996, + 0x5a4: 0x4986, 0x5a5: 0x499e, 0x5a6: 0x498e, 0x5a7: 0x49a6, 0x5a8: 0x47ec, 0x5a9: 0x47f2, + 0x5aa: 0x48ee, 0x5ab: 0x4906, 0x5ac: 0x48f6, 0x5ad: 0x490e, 0x5ae: 0x48fe, 0x5af: 0x4916, + 0x5b0: 0x47f8, 0x5b1: 0x431e, 0x5b2: 0x3691, 0x5b3: 0x4324, 0x5b4: 0x4822, 0x5b5: 0x432a, + 0x5b6: 0x36a3, 0x5b7: 0x4330, 0x5b8: 0x36c1, 0x5b9: 0x4336, 0x5ba: 0x36d9, 0x5bb: 0x433c, + 0x5bc: 0x4870, 0x5bd: 0x4342, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x3da0, 0x5c1: 0x3da8, 0x5c2: 0x4184, 0x5c3: 0x41a2, 0x5c4: 0x418e, 0x5c5: 0x41ac, + 0x5c6: 0x4198, 0x5c7: 0x41b6, 0x5c8: 0x3cd8, 0x5c9: 0x3ce0, 0x5ca: 0x40d0, 0x5cb: 0x40ee, + 0x5cc: 0x40da, 0x5cd: 0x40f8, 0x5ce: 0x40e4, 0x5cf: 0x4102, 0x5d0: 0x3de8, 0x5d1: 0x3df0, + 0x5d2: 0x41c0, 0x5d3: 0x41de, 0x5d4: 0x41ca, 0x5d5: 0x41e8, 0x5d6: 0x41d4, 0x5d7: 0x41f2, + 0x5d8: 0x3d08, 0x5d9: 0x3d10, 0x5da: 0x410c, 0x5db: 0x412a, 0x5dc: 0x4116, 0x5dd: 0x4134, + 0x5de: 0x4120, 0x5df: 0x413e, 0x5e0: 0x3ec0, 0x5e1: 0x3ec8, 0x5e2: 0x41fc, 0x5e3: 0x421a, + 0x5e4: 0x4206, 0x5e5: 0x4224, 0x5e6: 0x4210, 0x5e7: 0x422e, 0x5e8: 0x3d80, 0x5e9: 0x3d88, + 0x5ea: 0x4148, 0x5eb: 0x4166, 0x5ec: 0x4152, 0x5ed: 0x4170, 0x5ee: 0x415c, 0x5ef: 0x417a, + 0x5f0: 0x3685, 0x5f1: 0x367f, 0x5f2: 0x3d90, 0x5f3: 0x368b, 0x5f4: 0x3d98, + 0x5f6: 0x4810, 0x5f7: 0x3db0, 0x5f8: 0x35f5, 0x5f9: 0x35ef, 0x5fa: 0x35e3, 0x5fb: 0x42ee, + 0x5fc: 0x35fb, 0x5fd: 0x8100, 0x5fe: 0x01d3, 0x5ff: 0xa100, + // Block 0x18, offset 0x600 + 0x600: 0x8100, 0x601: 0x35a7, 0x602: 0x3dd8, 0x603: 0x369d, 0x604: 0x3de0, + 0x606: 0x483a, 0x607: 0x3df8, 0x608: 0x3601, 0x609: 0x42f4, 0x60a: 0x360d, 0x60b: 0x42fa, + 0x60c: 0x3619, 0x60d: 0x3b8f, 0x60e: 0x3b96, 0x60f: 0x3b9d, 0x610: 0x36b5, 0x611: 0x36af, + 0x612: 0x3e00, 0x613: 0x44e4, 0x616: 0x36bb, 0x617: 0x3e10, + 0x618: 0x3631, 0x619: 0x362b, 0x61a: 0x361f, 0x61b: 0x4300, 0x61d: 0x3ba4, + 0x61e: 0x3bab, 0x61f: 0x3bb2, 0x620: 0x36eb, 0x621: 0x36e5, 0x622: 0x3e68, 0x623: 0x44ec, + 0x624: 0x36cd, 0x625: 0x36d3, 0x626: 0x36f1, 0x627: 0x3e78, 0x628: 0x3661, 0x629: 0x365b, + 0x62a: 0x364f, 0x62b: 0x430c, 0x62c: 0x3649, 0x62d: 0x359b, 0x62e: 0x42e8, 0x62f: 0x0081, + 0x632: 0x3eb0, 0x633: 0x36f7, 0x634: 0x3eb8, + 0x636: 0x4888, 0x637: 0x3ed0, 0x638: 0x363d, 0x639: 0x4306, 0x63a: 0x366d, 0x63b: 0x4318, + 0x63c: 0x3679, 0x63d: 0x4256, 0x63e: 0xa100, + // Block 0x19, offset 0x640 + 0x641: 0x3c06, 0x643: 0xa000, 0x644: 0x3c0d, 0x645: 0xa000, + 0x647: 0x3c14, 0x648: 0xa000, 0x649: 0x3c1b, + 0x64d: 0xa000, + 0x660: 0x2f65, 0x661: 0xa000, 0x662: 0x3c29, + 0x664: 0xa000, 0x665: 0xa000, + 0x66d: 0x3c22, 0x66e: 0x2f60, 0x66f: 0x2f6a, + 0x670: 0x3c30, 0x671: 0x3c37, 0x672: 0xa000, 0x673: 0xa000, 0x674: 0x3c3e, 0x675: 0x3c45, + 0x676: 0xa000, 0x677: 0xa000, 0x678: 0x3c4c, 0x679: 0x3c53, 0x67a: 0xa000, 0x67b: 0xa000, + 0x67c: 0xa000, 0x67d: 0xa000, + // Block 0x1a, offset 0x680 + 0x680: 0x3c5a, 0x681: 0x3c61, 0x682: 0xa000, 0x683: 0xa000, 0x684: 0x3c76, 0x685: 0x3c7d, + 0x686: 0xa000, 0x687: 0xa000, 0x688: 0x3c84, 0x689: 0x3c8b, + 0x691: 0xa000, + 0x692: 0xa000, + 0x6a2: 0xa000, + 0x6a8: 0xa000, 0x6a9: 0xa000, + 0x6ab: 0xa000, 0x6ac: 0x3ca0, 0x6ad: 0x3ca7, 0x6ae: 0x3cae, 0x6af: 0x3cb5, + 0x6b2: 0xa000, 0x6b3: 0xa000, 0x6b4: 0xa000, 0x6b5: 0xa000, + // Block 0x1b, offset 0x6c0 + 0x6c6: 0xa000, 0x6cb: 0xa000, + 0x6cc: 0x3f08, 0x6cd: 0xa000, 0x6ce: 0x3f10, 0x6cf: 0xa000, 0x6d0: 0x3f18, 0x6d1: 0xa000, + 0x6d2: 0x3f20, 0x6d3: 0xa000, 0x6d4: 0x3f28, 0x6d5: 0xa000, 0x6d6: 0x3f30, 0x6d7: 0xa000, + 0x6d8: 0x3f38, 0x6d9: 0xa000, 0x6da: 0x3f40, 0x6db: 0xa000, 0x6dc: 0x3f48, 0x6dd: 0xa000, + 0x6de: 0x3f50, 0x6df: 0xa000, 0x6e0: 0x3f58, 0x6e1: 0xa000, 0x6e2: 0x3f60, + 0x6e4: 0xa000, 0x6e5: 0x3f68, 0x6e6: 0xa000, 0x6e7: 0x3f70, 0x6e8: 0xa000, 0x6e9: 0x3f78, + 0x6ef: 0xa000, + 0x6f0: 0x3f80, 0x6f1: 0x3f88, 0x6f2: 0xa000, 0x6f3: 0x3f90, 0x6f4: 0x3f98, 0x6f5: 0xa000, + 0x6f6: 0x3fa0, 0x6f7: 0x3fa8, 0x6f8: 0xa000, 0x6f9: 0x3fb0, 0x6fa: 0x3fb8, 0x6fb: 0xa000, + 0x6fc: 0x3fc0, 0x6fd: 0x3fc8, + // Block 0x1c, offset 0x700 + 0x714: 0x3f00, + 0x719: 0x9903, 0x71a: 0x9903, 0x71b: 0x8100, 0x71c: 0x8100, 0x71d: 0xa000, + 0x71e: 0x3fd0, + 0x726: 0xa000, + 0x72b: 0xa000, 0x72c: 0x3fe0, 0x72d: 0xa000, 0x72e: 0x3fe8, 0x72f: 0xa000, + 0x730: 0x3ff0, 0x731: 0xa000, 0x732: 0x3ff8, 0x733: 0xa000, 0x734: 0x4000, 0x735: 0xa000, + 0x736: 0x4008, 0x737: 0xa000, 0x738: 0x4010, 0x739: 0xa000, 0x73a: 0x4018, 0x73b: 0xa000, + 0x73c: 0x4020, 0x73d: 0xa000, 0x73e: 0x4028, 0x73f: 0xa000, + // Block 0x1d, offset 0x740 + 0x740: 0x4030, 0x741: 0xa000, 0x742: 0x4038, 0x744: 0xa000, 0x745: 0x4040, + 0x746: 0xa000, 0x747: 0x4048, 0x748: 0xa000, 0x749: 0x4050, + 0x74f: 0xa000, 0x750: 0x4058, 0x751: 0x4060, + 0x752: 0xa000, 0x753: 0x4068, 0x754: 0x4070, 0x755: 0xa000, 0x756: 0x4078, 0x757: 0x4080, + 0x758: 0xa000, 0x759: 0x4088, 0x75a: 0x4090, 0x75b: 0xa000, 0x75c: 0x4098, 0x75d: 0x40a0, + 0x76f: 0xa000, + 0x770: 0xa000, 0x771: 0xa000, 0x772: 0xa000, 0x774: 0x3fd8, + 0x777: 0x40a8, 0x778: 0x40b0, 0x779: 0x40b8, 0x77a: 0x40c0, + 0x77d: 0xa000, 0x77e: 0x40c8, + // Block 0x1e, offset 0x780 + 0x780: 0x1377, 0x781: 0x0cfb, 0x782: 0x13d3, 0x783: 0x139f, 0x784: 0x0e57, 0x785: 0x06eb, + 0x786: 0x08df, 0x787: 0x162b, 0x788: 0x162b, 0x789: 0x0a0b, 0x78a: 0x145f, 0x78b: 0x0943, + 0x78c: 0x0a07, 0x78d: 0x0bef, 0x78e: 0x0fcf, 0x78f: 0x115f, 0x790: 0x1297, 0x791: 0x12d3, + 0x792: 0x1307, 0x793: 0x141b, 0x794: 0x0d73, 0x795: 0x0dff, 0x796: 0x0eab, 0x797: 0x0f43, + 0x798: 0x125f, 0x799: 0x1447, 0x79a: 0x1573, 0x79b: 0x070f, 0x79c: 0x08b3, 0x79d: 0x0d87, + 0x79e: 0x0ecf, 0x79f: 0x1293, 0x7a0: 0x15c3, 0x7a1: 0x0ab3, 0x7a2: 0x0e77, 0x7a3: 0x1283, + 0x7a4: 0x1317, 0x7a5: 0x0c23, 0x7a6: 0x11bb, 0x7a7: 0x12df, 0x7a8: 0x0b1f, 0x7a9: 0x0d0f, + 0x7aa: 0x0e17, 0x7ab: 0x0f1b, 0x7ac: 0x1427, 0x7ad: 0x074f, 0x7ae: 0x07e7, 0x7af: 0x0853, + 0x7b0: 0x0c8b, 0x7b1: 0x0d7f, 0x7b2: 0x0ecb, 0x7b3: 0x0fef, 0x7b4: 0x1177, 0x7b5: 0x128b, + 0x7b6: 0x12a3, 0x7b7: 0x13c7, 0x7b8: 0x14ef, 0x7b9: 0x15a3, 0x7ba: 0x15bf, 0x7bb: 0x102b, + 0x7bc: 0x106b, 0x7bd: 0x1123, 0x7be: 0x1243, 0x7bf: 0x147b, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x15cb, 0x7c1: 0x134b, 0x7c2: 0x09c7, 0x7c3: 0x0b3b, 0x7c4: 0x10db, 0x7c5: 0x119b, + 0x7c6: 0x0eff, 0x7c7: 0x1033, 0x7c8: 0x1397, 0x7c9: 0x14e7, 0x7ca: 0x09c3, 0x7cb: 0x0a8f, + 0x7cc: 0x0d77, 0x7cd: 0x0e2b, 0x7ce: 0x0e5f, 0x7cf: 0x1113, 0x7d0: 0x113b, 0x7d1: 0x14a7, + 0x7d2: 0x084f, 0x7d3: 0x11a7, 0x7d4: 0x07f3, 0x7d5: 0x07ef, 0x7d6: 0x1097, 0x7d7: 0x1127, + 0x7d8: 0x125b, 0x7d9: 0x14af, 0x7da: 0x1367, 0x7db: 0x0c27, 0x7dc: 0x0d73, 0x7dd: 0x1357, + 0x7de: 0x06f7, 0x7df: 0x0a63, 0x7e0: 0x0b93, 0x7e1: 0x0f2f, 0x7e2: 0x0faf, 0x7e3: 0x0873, + 0x7e4: 0x103b, 0x7e5: 0x075f, 0x7e6: 0x0b77, 0x7e7: 0x06d7, 0x7e8: 0x0deb, 0x7e9: 0x0ca3, + 0x7ea: 0x110f, 0x7eb: 0x08c7, 0x7ec: 0x09b3, 0x7ed: 0x0ffb, 0x7ee: 0x1263, 0x7ef: 0x133b, + 0x7f0: 0x0db7, 0x7f1: 0x13f7, 0x7f2: 0x0de3, 0x7f3: 0x0c37, 0x7f4: 0x121b, 0x7f5: 0x0c57, + 0x7f6: 0x0fab, 0x7f7: 0x072b, 0x7f8: 0x07a7, 0x7f9: 0x07eb, 0x7fa: 0x0d53, 0x7fb: 0x10fb, + 0x7fc: 0x11f3, 0x7fd: 0x1347, 0x7fe: 0x145b, 0x7ff: 0x085b, + // Block 0x20, offset 0x800 + 0x800: 0x090f, 0x801: 0x0a17, 0x802: 0x0b2f, 0x803: 0x0cbf, 0x804: 0x0e7b, 0x805: 0x103f, + 0x806: 0x1497, 0x807: 0x157b, 0x808: 0x15cf, 0x809: 0x15e7, 0x80a: 0x0837, 0x80b: 0x0cf3, + 0x80c: 0x0da3, 0x80d: 0x13eb, 0x80e: 0x0afb, 0x80f: 0x0bd7, 0x810: 0x0bf3, 0x811: 0x0c83, + 0x812: 0x0e6b, 0x813: 0x0eb7, 0x814: 0x0f67, 0x815: 0x108b, 0x816: 0x112f, 0x817: 0x1193, + 0x818: 0x13db, 0x819: 0x126b, 0x81a: 0x1403, 0x81b: 0x147f, 0x81c: 0x080f, 0x81d: 0x083b, + 0x81e: 0x0923, 0x81f: 0x0ea7, 0x820: 0x12f3, 0x821: 0x133b, 0x822: 0x0b1b, 0x823: 0x0b8b, + 0x824: 0x0c4f, 0x825: 0x0daf, 0x826: 0x10d7, 0x827: 0x0f23, 0x828: 0x073b, 0x829: 0x097f, + 0x82a: 0x0a63, 0x82b: 0x0ac7, 0x82c: 0x0b97, 0x82d: 0x0f3f, 0x82e: 0x0f5b, 0x82f: 0x116b, + 0x830: 0x118b, 0x831: 0x1463, 0x832: 0x14e3, 0x833: 0x14f3, 0x834: 0x152f, 0x835: 0x0753, + 0x836: 0x107f, 0x837: 0x144f, 0x838: 0x14cb, 0x839: 0x0baf, 0x83a: 0x0717, 0x83b: 0x0777, + 0x83c: 0x0a67, 0x83d: 0x0a87, 0x83e: 0x0caf, 0x83f: 0x0d73, + // Block 0x21, offset 0x840 + 0x840: 0x0ec3, 0x841: 0x0fcb, 0x842: 0x1277, 0x843: 0x1417, 0x844: 0x1623, 0x845: 0x0ce3, + 0x846: 0x14a3, 0x847: 0x0833, 0x848: 0x0d2f, 0x849: 0x0d3b, 0x84a: 0x0e0f, 0x84b: 0x0e47, + 0x84c: 0x0f4b, 0x84d: 0x0fa7, 0x84e: 0x1027, 0x84f: 0x110b, 0x850: 0x153b, 0x851: 0x07af, + 0x852: 0x0c03, 0x853: 0x14b3, 0x854: 0x0767, 0x855: 0x0aab, 0x856: 0x0e2f, 0x857: 0x13df, + 0x858: 0x0b67, 0x859: 0x0bb7, 0x85a: 0x0d43, 0x85b: 0x0f2f, 0x85c: 0x14bb, 0x85d: 0x0817, + 0x85e: 0x08ff, 0x85f: 0x0a97, 0x860: 0x0cd3, 0x861: 0x0d1f, 0x862: 0x0d5f, 0x863: 0x0df3, + 0x864: 0x0f47, 0x865: 0x0fbb, 0x866: 0x1157, 0x867: 0x12f7, 0x868: 0x1303, 0x869: 0x1457, + 0x86a: 0x14d7, 0x86b: 0x0883, 0x86c: 0x0e4b, 0x86d: 0x0903, 0x86e: 0x0ec7, 0x86f: 0x0f6b, + 0x870: 0x1287, 0x871: 0x14bf, 0x872: 0x15ab, 0x873: 0x15d3, 0x874: 0x0d37, 0x875: 0x0e27, + 0x876: 0x11c3, 0x877: 0x10b7, 0x878: 0x10c3, 0x879: 0x10e7, 0x87a: 0x0f17, 0x87b: 0x0e9f, + 0x87c: 0x1363, 0x87d: 0x0733, 0x87e: 0x122b, 0x87f: 0x081b, + // Block 0x22, offset 0x880 + 0x880: 0x080b, 0x881: 0x0b0b, 0x882: 0x0c2b, 0x883: 0x10f3, 0x884: 0x0a53, 0x885: 0x0e03, + 0x886: 0x0cef, 0x887: 0x13e7, 0x888: 0x12e7, 0x889: 0x14ab, 0x88a: 0x1323, 0x88b: 0x0b27, + 0x88c: 0x0787, 0x88d: 0x095b, 0x890: 0x09af, + 0x892: 0x0cdf, 0x895: 0x07f7, 0x896: 0x0f1f, 0x897: 0x0fe3, + 0x898: 0x1047, 0x899: 0x1063, 0x89a: 0x1067, 0x89b: 0x107b, 0x89c: 0x14fb, 0x89d: 0x10eb, + 0x89e: 0x116f, 0x8a0: 0x128f, 0x8a2: 0x1353, + 0x8a5: 0x1407, 0x8a6: 0x1433, + 0x8aa: 0x154f, 0x8ab: 0x1553, 0x8ac: 0x1557, 0x8ad: 0x15bb, 0x8ae: 0x142b, 0x8af: 0x14c7, + 0x8b0: 0x0757, 0x8b1: 0x077b, 0x8b2: 0x078f, 0x8b3: 0x084b, 0x8b4: 0x0857, 0x8b5: 0x0897, + 0x8b6: 0x094b, 0x8b7: 0x0967, 0x8b8: 0x096f, 0x8b9: 0x09ab, 0x8ba: 0x09b7, 0x8bb: 0x0a93, + 0x8bc: 0x0a9b, 0x8bd: 0x0ba3, 0x8be: 0x0bcb, 0x8bf: 0x0bd3, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0beb, 0x8c1: 0x0c97, 0x8c2: 0x0cc7, 0x8c3: 0x0ce7, 0x8c4: 0x0d57, 0x8c5: 0x0e1b, + 0x8c6: 0x0e37, 0x8c7: 0x0e67, 0x8c8: 0x0ebb, 0x8c9: 0x0edb, 0x8ca: 0x0f4f, 0x8cb: 0x102f, + 0x8cc: 0x104b, 0x8cd: 0x1053, 0x8ce: 0x104f, 0x8cf: 0x1057, 0x8d0: 0x105b, 0x8d1: 0x105f, + 0x8d2: 0x1073, 0x8d3: 0x1077, 0x8d4: 0x109b, 0x8d5: 0x10af, 0x8d6: 0x10cb, 0x8d7: 0x112f, + 0x8d8: 0x1137, 0x8d9: 0x113f, 0x8da: 0x1153, 0x8db: 0x117b, 0x8dc: 0x11cb, 0x8dd: 0x11ff, + 0x8de: 0x11ff, 0x8df: 0x1267, 0x8e0: 0x130f, 0x8e1: 0x1327, 0x8e2: 0x135b, 0x8e3: 0x135f, + 0x8e4: 0x13a3, 0x8e5: 0x13a7, 0x8e6: 0x13ff, 0x8e7: 0x1407, 0x8e8: 0x14db, 0x8e9: 0x151f, + 0x8ea: 0x1537, 0x8eb: 0x0b9b, 0x8ec: 0x171e, 0x8ed: 0x11e3, + 0x8f0: 0x06df, 0x8f1: 0x07e3, 0x8f2: 0x07a3, 0x8f3: 0x074b, 0x8f4: 0x078b, 0x8f5: 0x07b7, + 0x8f6: 0x0847, 0x8f7: 0x0863, 0x8f8: 0x094b, 0x8f9: 0x0937, 0x8fa: 0x0947, 0x8fb: 0x0963, + 0x8fc: 0x09af, 0x8fd: 0x09bf, 0x8fe: 0x0a03, 0x8ff: 0x0a0f, + // Block 0x24, offset 0x900 + 0x900: 0x0a2b, 0x901: 0x0a3b, 0x902: 0x0b23, 0x903: 0x0b2b, 0x904: 0x0b5b, 0x905: 0x0b7b, + 0x906: 0x0bab, 0x907: 0x0bc3, 0x908: 0x0bb3, 0x909: 0x0bd3, 0x90a: 0x0bc7, 0x90b: 0x0beb, + 0x90c: 0x0c07, 0x90d: 0x0c5f, 0x90e: 0x0c6b, 0x90f: 0x0c73, 0x910: 0x0c9b, 0x911: 0x0cdf, + 0x912: 0x0d0f, 0x913: 0x0d13, 0x914: 0x0d27, 0x915: 0x0da7, 0x916: 0x0db7, 0x917: 0x0e0f, + 0x918: 0x0e5b, 0x919: 0x0e53, 0x91a: 0x0e67, 0x91b: 0x0e83, 0x91c: 0x0ebb, 0x91d: 0x1013, + 0x91e: 0x0edf, 0x91f: 0x0f13, 0x920: 0x0f1f, 0x921: 0x0f5f, 0x922: 0x0f7b, 0x923: 0x0f9f, + 0x924: 0x0fc3, 0x925: 0x0fc7, 0x926: 0x0fe3, 0x927: 0x0fe7, 0x928: 0x0ff7, 0x929: 0x100b, + 0x92a: 0x1007, 0x92b: 0x1037, 0x92c: 0x10b3, 0x92d: 0x10cb, 0x92e: 0x10e3, 0x92f: 0x111b, + 0x930: 0x112f, 0x931: 0x114b, 0x932: 0x117b, 0x933: 0x122f, 0x934: 0x1257, 0x935: 0x12cb, + 0x936: 0x1313, 0x937: 0x131f, 0x938: 0x1327, 0x939: 0x133f, 0x93a: 0x1353, 0x93b: 0x1343, + 0x93c: 0x135b, 0x93d: 0x1357, 0x93e: 0x134f, 0x93f: 0x135f, + // Block 0x25, offset 0x940 + 0x940: 0x136b, 0x941: 0x13a7, 0x942: 0x13e3, 0x943: 0x1413, 0x944: 0x144b, 0x945: 0x146b, + 0x946: 0x14b7, 0x947: 0x14db, 0x948: 0x14fb, 0x949: 0x150f, 0x94a: 0x151f, 0x94b: 0x152b, + 0x94c: 0x1537, 0x94d: 0x158b, 0x94e: 0x162b, 0x94f: 0x16b5, 0x950: 0x16b0, 0x951: 0x16e2, + 0x952: 0x0607, 0x953: 0x062f, 0x954: 0x0633, 0x955: 0x1764, 0x956: 0x1791, 0x957: 0x1809, + 0x958: 0x1617, 0x959: 0x1627, + // Block 0x26, offset 0x980 + 0x980: 0x06fb, 0x981: 0x06f3, 0x982: 0x0703, 0x983: 0x1647, 0x984: 0x0747, 0x985: 0x0757, + 0x986: 0x075b, 0x987: 0x0763, 0x988: 0x076b, 0x989: 0x076f, 0x98a: 0x077b, 0x98b: 0x0773, + 0x98c: 0x05b3, 0x98d: 0x165b, 0x98e: 0x078f, 0x98f: 0x0793, 0x990: 0x0797, 0x991: 0x07b3, + 0x992: 0x164c, 0x993: 0x05b7, 0x994: 0x079f, 0x995: 0x07bf, 0x996: 0x1656, 0x997: 0x07cf, + 0x998: 0x07d7, 0x999: 0x0737, 0x99a: 0x07df, 0x99b: 0x07e3, 0x99c: 0x1831, 0x99d: 0x07ff, + 0x99e: 0x0807, 0x99f: 0x05bf, 0x9a0: 0x081f, 0x9a1: 0x0823, 0x9a2: 0x082b, 0x9a3: 0x082f, + 0x9a4: 0x05c3, 0x9a5: 0x0847, 0x9a6: 0x084b, 0x9a7: 0x0857, 0x9a8: 0x0863, 0x9a9: 0x0867, + 0x9aa: 0x086b, 0x9ab: 0x0873, 0x9ac: 0x0893, 0x9ad: 0x0897, 0x9ae: 0x089f, 0x9af: 0x08af, + 0x9b0: 0x08b7, 0x9b1: 0x08bb, 0x9b2: 0x08bb, 0x9b3: 0x08bb, 0x9b4: 0x166a, 0x9b5: 0x0e93, + 0x9b6: 0x08cf, 0x9b7: 0x08d7, 0x9b8: 0x166f, 0x9b9: 0x08e3, 0x9ba: 0x08eb, 0x9bb: 0x08f3, + 0x9bc: 0x091b, 0x9bd: 0x0907, 0x9be: 0x0913, 0x9bf: 0x0917, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x091f, 0x9c1: 0x0927, 0x9c2: 0x092b, 0x9c3: 0x0933, 0x9c4: 0x093b, 0x9c5: 0x093f, + 0x9c6: 0x093f, 0x9c7: 0x0947, 0x9c8: 0x094f, 0x9c9: 0x0953, 0x9ca: 0x095f, 0x9cb: 0x0983, + 0x9cc: 0x0967, 0x9cd: 0x0987, 0x9ce: 0x096b, 0x9cf: 0x0973, 0x9d0: 0x080b, 0x9d1: 0x09cf, + 0x9d2: 0x0997, 0x9d3: 0x099b, 0x9d4: 0x099f, 0x9d5: 0x0993, 0x9d6: 0x09a7, 0x9d7: 0x09a3, + 0x9d8: 0x09bb, 0x9d9: 0x1674, 0x9da: 0x09d7, 0x9db: 0x09db, 0x9dc: 0x09e3, 0x9dd: 0x09ef, + 0x9de: 0x09f7, 0x9df: 0x0a13, 0x9e0: 0x1679, 0x9e1: 0x167e, 0x9e2: 0x0a1f, 0x9e3: 0x0a23, + 0x9e4: 0x0a27, 0x9e5: 0x0a1b, 0x9e6: 0x0a2f, 0x9e7: 0x05c7, 0x9e8: 0x05cb, 0x9e9: 0x0a37, + 0x9ea: 0x0a3f, 0x9eb: 0x0a3f, 0x9ec: 0x1683, 0x9ed: 0x0a5b, 0x9ee: 0x0a5f, 0x9ef: 0x0a63, + 0x9f0: 0x0a6b, 0x9f1: 0x1688, 0x9f2: 0x0a73, 0x9f3: 0x0a77, 0x9f4: 0x0b4f, 0x9f5: 0x0a7f, + 0x9f6: 0x05cf, 0x9f7: 0x0a8b, 0x9f8: 0x0a9b, 0x9f9: 0x0aa7, 0x9fa: 0x0aa3, 0x9fb: 0x1692, + 0x9fc: 0x0aaf, 0x9fd: 0x1697, 0x9fe: 0x0abb, 0x9ff: 0x0ab7, + // Block 0x28, offset 0xa00 + 0xa00: 0x0abf, 0xa01: 0x0acf, 0xa02: 0x0ad3, 0xa03: 0x05d3, 0xa04: 0x0ae3, 0xa05: 0x0aeb, + 0xa06: 0x0aef, 0xa07: 0x0af3, 0xa08: 0x05d7, 0xa09: 0x169c, 0xa0a: 0x05db, 0xa0b: 0x0b0f, + 0xa0c: 0x0b13, 0xa0d: 0x0b17, 0xa0e: 0x0b1f, 0xa0f: 0x1863, 0xa10: 0x0b37, 0xa11: 0x16a6, + 0xa12: 0x16a6, 0xa13: 0x11d7, 0xa14: 0x0b47, 0xa15: 0x0b47, 0xa16: 0x05df, 0xa17: 0x16c9, + 0xa18: 0x179b, 0xa19: 0x0b57, 0xa1a: 0x0b5f, 0xa1b: 0x05e3, 0xa1c: 0x0b73, 0xa1d: 0x0b83, + 0xa1e: 0x0b87, 0xa1f: 0x0b8f, 0xa20: 0x0b9f, 0xa21: 0x05eb, 0xa22: 0x05e7, 0xa23: 0x0ba3, + 0xa24: 0x16ab, 0xa25: 0x0ba7, 0xa26: 0x0bbb, 0xa27: 0x0bbf, 0xa28: 0x0bc3, 0xa29: 0x0bbf, + 0xa2a: 0x0bcf, 0xa2b: 0x0bd3, 0xa2c: 0x0be3, 0xa2d: 0x0bdb, 0xa2e: 0x0bdf, 0xa2f: 0x0be7, + 0xa30: 0x0beb, 0xa31: 0x0bef, 0xa32: 0x0bfb, 0xa33: 0x0bff, 0xa34: 0x0c17, 0xa35: 0x0c1f, + 0xa36: 0x0c2f, 0xa37: 0x0c43, 0xa38: 0x16ba, 0xa39: 0x0c3f, 0xa3a: 0x0c33, 0xa3b: 0x0c4b, + 0xa3c: 0x0c53, 0xa3d: 0x0c67, 0xa3e: 0x16bf, 0xa3f: 0x0c6f, + // Block 0x29, offset 0xa40 + 0xa40: 0x0c63, 0xa41: 0x0c5b, 0xa42: 0x05ef, 0xa43: 0x0c77, 0xa44: 0x0c7f, 0xa45: 0x0c87, + 0xa46: 0x0c7b, 0xa47: 0x05f3, 0xa48: 0x0c97, 0xa49: 0x0c9f, 0xa4a: 0x16c4, 0xa4b: 0x0ccb, + 0xa4c: 0x0cff, 0xa4d: 0x0cdb, 0xa4e: 0x05ff, 0xa4f: 0x0ce7, 0xa50: 0x05fb, 0xa51: 0x05f7, + 0xa52: 0x07c3, 0xa53: 0x07c7, 0xa54: 0x0d03, 0xa55: 0x0ceb, 0xa56: 0x11ab, 0xa57: 0x0663, + 0xa58: 0x0d0f, 0xa59: 0x0d13, 0xa5a: 0x0d17, 0xa5b: 0x0d2b, 0xa5c: 0x0d23, 0xa5d: 0x16dd, + 0xa5e: 0x0603, 0xa5f: 0x0d3f, 0xa60: 0x0d33, 0xa61: 0x0d4f, 0xa62: 0x0d57, 0xa63: 0x16e7, + 0xa64: 0x0d5b, 0xa65: 0x0d47, 0xa66: 0x0d63, 0xa67: 0x0607, 0xa68: 0x0d67, 0xa69: 0x0d6b, + 0xa6a: 0x0d6f, 0xa6b: 0x0d7b, 0xa6c: 0x16ec, 0xa6d: 0x0d83, 0xa6e: 0x060b, 0xa6f: 0x0d8f, + 0xa70: 0x16f1, 0xa71: 0x0d93, 0xa72: 0x060f, 0xa73: 0x0d9f, 0xa74: 0x0dab, 0xa75: 0x0db7, + 0xa76: 0x0dbb, 0xa77: 0x16f6, 0xa78: 0x168d, 0xa79: 0x16fb, 0xa7a: 0x0ddb, 0xa7b: 0x1700, + 0xa7c: 0x0de7, 0xa7d: 0x0def, 0xa7e: 0x0ddf, 0xa7f: 0x0dfb, + // Block 0x2a, offset 0xa80 + 0xa80: 0x0e0b, 0xa81: 0x0e1b, 0xa82: 0x0e0f, 0xa83: 0x0e13, 0xa84: 0x0e1f, 0xa85: 0x0e23, + 0xa86: 0x1705, 0xa87: 0x0e07, 0xa88: 0x0e3b, 0xa89: 0x0e3f, 0xa8a: 0x0613, 0xa8b: 0x0e53, + 0xa8c: 0x0e4f, 0xa8d: 0x170a, 0xa8e: 0x0e33, 0xa8f: 0x0e6f, 0xa90: 0x170f, 0xa91: 0x1714, + 0xa92: 0x0e73, 0xa93: 0x0e87, 0xa94: 0x0e83, 0xa95: 0x0e7f, 0xa96: 0x0617, 0xa97: 0x0e8b, + 0xa98: 0x0e9b, 0xa99: 0x0e97, 0xa9a: 0x0ea3, 0xa9b: 0x1651, 0xa9c: 0x0eb3, 0xa9d: 0x1719, + 0xa9e: 0x0ebf, 0xa9f: 0x1723, 0xaa0: 0x0ed3, 0xaa1: 0x0edf, 0xaa2: 0x0ef3, 0xaa3: 0x1728, + 0xaa4: 0x0f07, 0xaa5: 0x0f0b, 0xaa6: 0x172d, 0xaa7: 0x1732, 0xaa8: 0x0f27, 0xaa9: 0x0f37, + 0xaaa: 0x061b, 0xaab: 0x0f3b, 0xaac: 0x061f, 0xaad: 0x061f, 0xaae: 0x0f53, 0xaaf: 0x0f57, + 0xab0: 0x0f5f, 0xab1: 0x0f63, 0xab2: 0x0f6f, 0xab3: 0x0623, 0xab4: 0x0f87, 0xab5: 0x1737, + 0xab6: 0x0fa3, 0xab7: 0x173c, 0xab8: 0x0faf, 0xab9: 0x16a1, 0xaba: 0x0fbf, 0xabb: 0x1741, + 0xabc: 0x1746, 0xabd: 0x174b, 0xabe: 0x0627, 0xabf: 0x062b, + // Block 0x2b, offset 0xac0 + 0xac0: 0x0ff7, 0xac1: 0x1755, 0xac2: 0x1750, 0xac3: 0x175a, 0xac4: 0x175f, 0xac5: 0x0fff, + 0xac6: 0x1003, 0xac7: 0x1003, 0xac8: 0x100b, 0xac9: 0x0633, 0xaca: 0x100f, 0xacb: 0x0637, + 0xacc: 0x063b, 0xacd: 0x1769, 0xace: 0x1023, 0xacf: 0x102b, 0xad0: 0x1037, 0xad1: 0x063f, + 0xad2: 0x176e, 0xad3: 0x105b, 0xad4: 0x1773, 0xad5: 0x1778, 0xad6: 0x107b, 0xad7: 0x1093, + 0xad8: 0x0643, 0xad9: 0x109b, 0xada: 0x109f, 0xadb: 0x10a3, 0xadc: 0x177d, 0xadd: 0x1782, + 0xade: 0x1782, 0xadf: 0x10bb, 0xae0: 0x0647, 0xae1: 0x1787, 0xae2: 0x10cf, 0xae3: 0x10d3, + 0xae4: 0x064b, 0xae5: 0x178c, 0xae6: 0x10ef, 0xae7: 0x064f, 0xae8: 0x10ff, 0xae9: 0x10f7, + 0xaea: 0x1107, 0xaeb: 0x1796, 0xaec: 0x111f, 0xaed: 0x0653, 0xaee: 0x112b, 0xaef: 0x1133, + 0xaf0: 0x1143, 0xaf1: 0x0657, 0xaf2: 0x17a0, 0xaf3: 0x17a5, 0xaf4: 0x065b, 0xaf5: 0x17aa, + 0xaf6: 0x115b, 0xaf7: 0x17af, 0xaf8: 0x1167, 0xaf9: 0x1173, 0xafa: 0x117b, 0xafb: 0x17b4, + 0xafc: 0x17b9, 0xafd: 0x118f, 0xafe: 0x17be, 0xaff: 0x1197, + // Block 0x2c, offset 0xb00 + 0xb00: 0x16ce, 0xb01: 0x065f, 0xb02: 0x11af, 0xb03: 0x11b3, 0xb04: 0x0667, 0xb05: 0x11b7, + 0xb06: 0x0a33, 0xb07: 0x17c3, 0xb08: 0x17c8, 0xb09: 0x16d3, 0xb0a: 0x16d8, 0xb0b: 0x11d7, + 0xb0c: 0x11db, 0xb0d: 0x13f3, 0xb0e: 0x066b, 0xb0f: 0x1207, 0xb10: 0x1203, 0xb11: 0x120b, + 0xb12: 0x083f, 0xb13: 0x120f, 0xb14: 0x1213, 0xb15: 0x1217, 0xb16: 0x121f, 0xb17: 0x17cd, + 0xb18: 0x121b, 0xb19: 0x1223, 0xb1a: 0x1237, 0xb1b: 0x123b, 0xb1c: 0x1227, 0xb1d: 0x123f, + 0xb1e: 0x1253, 0xb1f: 0x1267, 0xb20: 0x1233, 0xb21: 0x1247, 0xb22: 0x124b, 0xb23: 0x124f, + 0xb24: 0x17d2, 0xb25: 0x17dc, 0xb26: 0x17d7, 0xb27: 0x066f, 0xb28: 0x126f, 0xb29: 0x1273, + 0xb2a: 0x127b, 0xb2b: 0x17f0, 0xb2c: 0x127f, 0xb2d: 0x17e1, 0xb2e: 0x0673, 0xb2f: 0x0677, + 0xb30: 0x17e6, 0xb31: 0x17eb, 0xb32: 0x067b, 0xb33: 0x129f, 0xb34: 0x12a3, 0xb35: 0x12a7, + 0xb36: 0x12ab, 0xb37: 0x12b7, 0xb38: 0x12b3, 0xb39: 0x12bf, 0xb3a: 0x12bb, 0xb3b: 0x12cb, + 0xb3c: 0x12c3, 0xb3d: 0x12c7, 0xb3e: 0x12cf, 0xb3f: 0x067f, + // Block 0x2d, offset 0xb40 + 0xb40: 0x12d7, 0xb41: 0x12db, 0xb42: 0x0683, 0xb43: 0x12eb, 0xb44: 0x12ef, 0xb45: 0x17f5, + 0xb46: 0x12fb, 0xb47: 0x12ff, 0xb48: 0x0687, 0xb49: 0x130b, 0xb4a: 0x05bb, 0xb4b: 0x17fa, + 0xb4c: 0x17ff, 0xb4d: 0x068b, 0xb4e: 0x068f, 0xb4f: 0x1337, 0xb50: 0x134f, 0xb51: 0x136b, + 0xb52: 0x137b, 0xb53: 0x1804, 0xb54: 0x138f, 0xb55: 0x1393, 0xb56: 0x13ab, 0xb57: 0x13b7, + 0xb58: 0x180e, 0xb59: 0x1660, 0xb5a: 0x13c3, 0xb5b: 0x13bf, 0xb5c: 0x13cb, 0xb5d: 0x1665, + 0xb5e: 0x13d7, 0xb5f: 0x13e3, 0xb60: 0x1813, 0xb61: 0x1818, 0xb62: 0x1423, 0xb63: 0x142f, + 0xb64: 0x1437, 0xb65: 0x181d, 0xb66: 0x143b, 0xb67: 0x1467, 0xb68: 0x1473, 0xb69: 0x1477, + 0xb6a: 0x146f, 0xb6b: 0x1483, 0xb6c: 0x1487, 0xb6d: 0x1822, 0xb6e: 0x1493, 0xb6f: 0x0693, + 0xb70: 0x149b, 0xb71: 0x1827, 0xb72: 0x0697, 0xb73: 0x14d3, 0xb74: 0x0ac3, 0xb75: 0x14eb, + 0xb76: 0x182c, 0xb77: 0x1836, 0xb78: 0x069b, 0xb79: 0x069f, 0xb7a: 0x1513, 0xb7b: 0x183b, + 0xb7c: 0x06a3, 0xb7d: 0x1840, 0xb7e: 0x152b, 0xb7f: 0x152b, + // Block 0x2e, offset 0xb80 + 0xb80: 0x1533, 0xb81: 0x1845, 0xb82: 0x154b, 0xb83: 0x06a7, 0xb84: 0x155b, 0xb85: 0x1567, + 0xb86: 0x156f, 0xb87: 0x1577, 0xb88: 0x06ab, 0xb89: 0x184a, 0xb8a: 0x158b, 0xb8b: 0x15a7, + 0xb8c: 0x15b3, 0xb8d: 0x06af, 0xb8e: 0x06b3, 0xb8f: 0x15b7, 0xb90: 0x184f, 0xb91: 0x06b7, + 0xb92: 0x1854, 0xb93: 0x1859, 0xb94: 0x185e, 0xb95: 0x15db, 0xb96: 0x06bb, 0xb97: 0x15ef, + 0xb98: 0x15f7, 0xb99: 0x15fb, 0xb9a: 0x1603, 0xb9b: 0x160b, 0xb9c: 0x1613, 0xb9d: 0x1868, +} + +// nfcIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var nfcIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x2d, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x2e, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x2f, 0xcb: 0x30, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x31, + 0xd0: 0x09, 0xd1: 0x32, 0xd2: 0x33, 0xd3: 0x0a, 0xd6: 0x0b, 0xd7: 0x34, + 0xd8: 0x35, 0xd9: 0x0c, 0xdb: 0x36, 0xdc: 0x37, 0xdd: 0x38, 0xdf: 0x39, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x3a, 0x121: 0x3b, 0x123: 0x3c, 0x124: 0x3d, 0x125: 0x3e, 0x126: 0x3f, 0x127: 0x40, + 0x128: 0x41, 0x129: 0x42, 0x12a: 0x43, 0x12b: 0x44, 0x12c: 0x3f, 0x12d: 0x45, 0x12e: 0x46, 0x12f: 0x47, + 0x131: 0x48, 0x132: 0x49, 0x133: 0x4a, 0x134: 0x4b, 0x135: 0x4c, 0x137: 0x4d, + 0x138: 0x4e, 0x139: 0x4f, 0x13a: 0x50, 0x13b: 0x51, 0x13c: 0x52, 0x13d: 0x53, 0x13e: 0x54, 0x13f: 0x55, + // Block 0x5, offset 0x140 + 0x140: 0x56, 0x142: 0x57, 0x144: 0x58, 0x145: 0x59, 0x146: 0x5a, 0x147: 0x5b, + 0x14d: 0x5c, + 0x15c: 0x5d, 0x15f: 0x5e, + 0x162: 0x5f, 0x164: 0x60, + 0x168: 0x61, 0x169: 0x62, 0x16a: 0x63, 0x16c: 0x0d, 0x16d: 0x64, 0x16e: 0x65, 0x16f: 0x66, + 0x170: 0x67, 0x173: 0x68, 0x177: 0x0e, + 0x178: 0x0f, 0x179: 0x10, 0x17a: 0x11, 0x17b: 0x12, 0x17c: 0x13, 0x17d: 0x14, 0x17e: 0x15, 0x17f: 0x16, + // Block 0x6, offset 0x180 + 0x180: 0x69, 0x183: 0x6a, 0x184: 0x6b, 0x186: 0x6c, 0x187: 0x6d, + 0x188: 0x6e, 0x189: 0x17, 0x18a: 0x18, 0x18b: 0x6f, 0x18c: 0x70, + 0x1ab: 0x71, + 0x1b3: 0x72, 0x1b5: 0x73, 0x1b7: 0x74, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x75, 0x1c1: 0x19, 0x1c2: 0x1a, 0x1c3: 0x1b, 0x1c4: 0x76, 0x1c5: 0x77, + 0x1c9: 0x78, 0x1cc: 0x79, 0x1cd: 0x7a, + // Block 0x8, offset 0x200 + 0x219: 0x7b, 0x21a: 0x7c, 0x21b: 0x7d, + 0x220: 0x7e, 0x223: 0x7f, 0x224: 0x80, 0x225: 0x81, 0x226: 0x82, 0x227: 0x83, + 0x22a: 0x84, 0x22b: 0x85, 0x22f: 0x86, + 0x230: 0x87, 0x231: 0x88, 0x232: 0x89, 0x233: 0x8a, 0x234: 0x8b, 0x235: 0x8c, 0x236: 0x8d, 0x237: 0x87, + 0x238: 0x88, 0x239: 0x89, 0x23a: 0x8a, 0x23b: 0x8b, 0x23c: 0x8c, 0x23d: 0x8d, 0x23e: 0x87, 0x23f: 0x88, + // Block 0x9, offset 0x240 + 0x240: 0x89, 0x241: 0x8a, 0x242: 0x8b, 0x243: 0x8c, 0x244: 0x8d, 0x245: 0x87, 0x246: 0x88, 0x247: 0x89, + 0x248: 0x8a, 0x249: 0x8b, 0x24a: 0x8c, 0x24b: 0x8d, 0x24c: 0x87, 0x24d: 0x88, 0x24e: 0x89, 0x24f: 0x8a, + 0x250: 0x8b, 0x251: 0x8c, 0x252: 0x8d, 0x253: 0x87, 0x254: 0x88, 0x255: 0x89, 0x256: 0x8a, 0x257: 0x8b, + 0x258: 0x8c, 0x259: 0x8d, 0x25a: 0x87, 0x25b: 0x88, 0x25c: 0x89, 0x25d: 0x8a, 0x25e: 0x8b, 0x25f: 0x8c, + 0x260: 0x8d, 0x261: 0x87, 0x262: 0x88, 0x263: 0x89, 0x264: 0x8a, 0x265: 0x8b, 0x266: 0x8c, 0x267: 0x8d, + 0x268: 0x87, 0x269: 0x88, 0x26a: 0x89, 0x26b: 0x8a, 0x26c: 0x8b, 0x26d: 0x8c, 0x26e: 0x8d, 0x26f: 0x87, + 0x270: 0x88, 0x271: 0x89, 0x272: 0x8a, 0x273: 0x8b, 0x274: 0x8c, 0x275: 0x8d, 0x276: 0x87, 0x277: 0x88, + 0x278: 0x89, 0x279: 0x8a, 0x27a: 0x8b, 0x27b: 0x8c, 0x27c: 0x8d, 0x27d: 0x87, 0x27e: 0x88, 0x27f: 0x89, + // Block 0xa, offset 0x280 + 0x280: 0x8a, 0x281: 0x8b, 0x282: 0x8c, 0x283: 0x8d, 0x284: 0x87, 0x285: 0x88, 0x286: 0x89, 0x287: 0x8a, + 0x288: 0x8b, 0x289: 0x8c, 0x28a: 0x8d, 0x28b: 0x87, 0x28c: 0x88, 0x28d: 0x89, 0x28e: 0x8a, 0x28f: 0x8b, + 0x290: 0x8c, 0x291: 0x8d, 0x292: 0x87, 0x293: 0x88, 0x294: 0x89, 0x295: 0x8a, 0x296: 0x8b, 0x297: 0x8c, + 0x298: 0x8d, 0x299: 0x87, 0x29a: 0x88, 0x29b: 0x89, 0x29c: 0x8a, 0x29d: 0x8b, 0x29e: 0x8c, 0x29f: 0x8d, + 0x2a0: 0x87, 0x2a1: 0x88, 0x2a2: 0x89, 0x2a3: 0x8a, 0x2a4: 0x8b, 0x2a5: 0x8c, 0x2a6: 0x8d, 0x2a7: 0x87, + 0x2a8: 0x88, 0x2a9: 0x89, 0x2aa: 0x8a, 0x2ab: 0x8b, 0x2ac: 0x8c, 0x2ad: 0x8d, 0x2ae: 0x87, 0x2af: 0x88, + 0x2b0: 0x89, 0x2b1: 0x8a, 0x2b2: 0x8b, 0x2b3: 0x8c, 0x2b4: 0x8d, 0x2b5: 0x87, 0x2b6: 0x88, 0x2b7: 0x89, + 0x2b8: 0x8a, 0x2b9: 0x8b, 0x2ba: 0x8c, 0x2bb: 0x8d, 0x2bc: 0x87, 0x2bd: 0x88, 0x2be: 0x89, 0x2bf: 0x8a, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x8b, 0x2c1: 0x8c, 0x2c2: 0x8d, 0x2c3: 0x87, 0x2c4: 0x88, 0x2c5: 0x89, 0x2c6: 0x8a, 0x2c7: 0x8b, + 0x2c8: 0x8c, 0x2c9: 0x8d, 0x2ca: 0x87, 0x2cb: 0x88, 0x2cc: 0x89, 0x2cd: 0x8a, 0x2ce: 0x8b, 0x2cf: 0x8c, + 0x2d0: 0x8d, 0x2d1: 0x87, 0x2d2: 0x88, 0x2d3: 0x89, 0x2d4: 0x8a, 0x2d5: 0x8b, 0x2d6: 0x8c, 0x2d7: 0x8d, + 0x2d8: 0x87, 0x2d9: 0x88, 0x2da: 0x89, 0x2db: 0x8a, 0x2dc: 0x8b, 0x2dd: 0x8c, 0x2de: 0x8e, + // Block 0xc, offset 0x300 + 0x324: 0x1c, 0x325: 0x1d, 0x326: 0x1e, 0x327: 0x1f, + 0x328: 0x20, 0x329: 0x21, 0x32a: 0x22, 0x32b: 0x23, 0x32c: 0x8f, 0x32d: 0x90, 0x32e: 0x91, + 0x331: 0x92, 0x332: 0x93, 0x333: 0x94, 0x334: 0x95, + 0x338: 0x96, 0x339: 0x97, 0x33a: 0x98, 0x33b: 0x99, 0x33e: 0x9a, 0x33f: 0x9b, + // Block 0xd, offset 0x340 + 0x347: 0x9c, + 0x34b: 0x9d, 0x34d: 0x9e, + 0x368: 0x9f, 0x36b: 0xa0, + // Block 0xe, offset 0x380 + 0x381: 0xa1, 0x382: 0xa2, 0x384: 0xa3, 0x385: 0x82, 0x387: 0xa4, + 0x388: 0xa5, 0x38b: 0xa6, 0x38c: 0x3f, 0x38d: 0xa7, + 0x391: 0xa8, 0x392: 0xa9, 0x393: 0xaa, 0x396: 0xab, 0x397: 0xac, + 0x398: 0x73, 0x39a: 0xad, 0x39c: 0xae, + 0x3a8: 0xaf, 0x3a9: 0xb0, 0x3aa: 0xb1, + 0x3b0: 0x73, 0x3b5: 0xb2, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xb3, 0x3ec: 0xb4, + // Block 0x10, offset 0x400 + 0x432: 0xb5, + // Block 0x11, offset 0x440 + 0x445: 0xb6, 0x446: 0xb7, 0x447: 0xb8, + 0x449: 0xb9, + // Block 0x12, offset 0x480 + 0x480: 0xba, + 0x4a3: 0xbb, 0x4a5: 0xbc, + // Block 0x13, offset 0x4c0 + 0x4c8: 0xbd, + // Block 0x14, offset 0x500 + 0x520: 0x24, 0x521: 0x25, 0x522: 0x26, 0x523: 0x27, 0x524: 0x28, 0x525: 0x29, 0x526: 0x2a, 0x527: 0x2b, + 0x528: 0x2c, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfcSparseOffset: 145 entries, 290 bytes +var nfcSparseOffset = []uint16{0x0, 0x5, 0x9, 0xb, 0xd, 0x18, 0x28, 0x2a, 0x2f, 0x3a, 0x49, 0x56, 0x5e, 0x62, 0x67, 0x69, 0x7a, 0x82, 0x89, 0x8c, 0x93, 0x97, 0x9b, 0x9d, 0x9f, 0xa8, 0xac, 0xb3, 0xb8, 0xbb, 0xc5, 0xc8, 0xcf, 0xd7, 0xda, 0xdc, 0xde, 0xe0, 0xe5, 0xf6, 0x102, 0x104, 0x10a, 0x10c, 0x10e, 0x110, 0x112, 0x114, 0x116, 0x119, 0x11c, 0x11e, 0x121, 0x124, 0x128, 0x12d, 0x136, 0x138, 0x13b, 0x13d, 0x148, 0x14c, 0x15a, 0x15d, 0x163, 0x169, 0x174, 0x178, 0x17a, 0x17c, 0x17e, 0x180, 0x182, 0x188, 0x18c, 0x18e, 0x190, 0x198, 0x19c, 0x19f, 0x1a1, 0x1a3, 0x1a5, 0x1a8, 0x1aa, 0x1ac, 0x1ae, 0x1b0, 0x1b6, 0x1b9, 0x1bb, 0x1c2, 0x1c8, 0x1ce, 0x1d6, 0x1dc, 0x1e2, 0x1e8, 0x1ec, 0x1fa, 0x203, 0x206, 0x209, 0x20b, 0x20e, 0x210, 0x214, 0x219, 0x21b, 0x21d, 0x222, 0x228, 0x22a, 0x22c, 0x22e, 0x234, 0x237, 0x23a, 0x242, 0x249, 0x24c, 0x24f, 0x251, 0x259, 0x25c, 0x263, 0x266, 0x26c, 0x26e, 0x271, 0x273, 0x275, 0x277, 0x279, 0x27c, 0x27e, 0x280, 0x282, 0x28f, 0x299, 0x29b, 0x29d, 0x2a3, 0x2a5, 0x2a8} + +// nfcSparseValues: 682 entries, 2728 bytes +var nfcSparseValues = [682]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x04}, + {value: 0xa100, lo: 0xa8, hi: 0xa8}, + {value: 0x8100, lo: 0xaf, hi: 0xaf}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb8, hi: 0xb8}, + // Block 0x1, offset 0x5 + {value: 0x0091, lo: 0x03}, + {value: 0x46e2, lo: 0xa0, hi: 0xa1}, + {value: 0x4714, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x9 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + // Block 0x3, offset 0xb + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x98, hi: 0x9d}, + // Block 0x4, offset 0xd + {value: 0x0006, lo: 0x0a}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x85, hi: 0x85}, + {value: 0xa000, lo: 0x89, hi: 0x89}, + {value: 0x4840, lo: 0x8a, hi: 0x8a}, + {value: 0x485e, lo: 0x8b, hi: 0x8b}, + {value: 0x36c7, lo: 0x8c, hi: 0x8c}, + {value: 0x36df, lo: 0x8d, hi: 0x8d}, + {value: 0x4876, lo: 0x8e, hi: 0x8e}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x36fd, lo: 0x93, hi: 0x94}, + // Block 0x5, offset 0x18 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x37a5, lo: 0x90, hi: 0x90}, + {value: 0x37b1, lo: 0x91, hi: 0x91}, + {value: 0x379f, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x3817, lo: 0x97, hi: 0x97}, + {value: 0x37e1, lo: 0x9c, hi: 0x9c}, + {value: 0x37c9, lo: 0x9d, hi: 0x9d}, + {value: 0x37f3, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x381d, lo: 0xb6, hi: 0xb6}, + {value: 0x3823, lo: 0xb7, hi: 0xb7}, + // Block 0x6, offset 0x28 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x83, hi: 0x87}, + // Block 0x7, offset 0x2a + {value: 0x0001, lo: 0x04}, + {value: 0x8113, lo: 0x81, hi: 0x82}, + {value: 0x8132, lo: 0x84, hi: 0x84}, + {value: 0x812d, lo: 0x85, hi: 0x85}, + {value: 0x810d, lo: 0x87, hi: 0x87}, + // Block 0x8, offset 0x2f + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x97}, + {value: 0x8119, lo: 0x98, hi: 0x98}, + {value: 0x811a, lo: 0x99, hi: 0x99}, + {value: 0x811b, lo: 0x9a, hi: 0x9a}, + {value: 0x3841, lo: 0xa2, hi: 0xa2}, + {value: 0x3847, lo: 0xa3, hi: 0xa3}, + {value: 0x3853, lo: 0xa4, hi: 0xa4}, + {value: 0x384d, lo: 0xa5, hi: 0xa5}, + {value: 0x3859, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x9, offset 0x3a + {value: 0x0000, lo: 0x0e}, + {value: 0x386b, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x385f, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x3865, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8132, lo: 0x96, hi: 0x9c}, + {value: 0x8132, lo: 0x9f, hi: 0xa2}, + {value: 0x812d, lo: 0xa3, hi: 0xa3}, + {value: 0x8132, lo: 0xa4, hi: 0xa4}, + {value: 0x8132, lo: 0xa7, hi: 0xa8}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xac}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + // Block 0xa, offset 0x49 + {value: 0x0000, lo: 0x0c}, + {value: 0x811f, lo: 0x91, hi: 0x91}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x812d, lo: 0xb1, hi: 0xb1}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb5, hi: 0xb6}, + {value: 0x812d, lo: 0xb7, hi: 0xb9}, + {value: 0x8132, lo: 0xba, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbc}, + {value: 0x8132, lo: 0xbd, hi: 0xbd}, + {value: 0x812d, lo: 0xbe, hi: 0xbe}, + {value: 0x8132, lo: 0xbf, hi: 0xbf}, + // Block 0xb, offset 0x56 + {value: 0x0005, lo: 0x07}, + {value: 0x8132, lo: 0x80, hi: 0x80}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x812d, lo: 0x82, hi: 0x83}, + {value: 0x812d, lo: 0x84, hi: 0x85}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x812d, lo: 0x88, hi: 0x89}, + {value: 0x8132, lo: 0x8a, hi: 0x8a}, + // Block 0xc, offset 0x5e + {value: 0x0000, lo: 0x03}, + {value: 0x8132, lo: 0xab, hi: 0xb1}, + {value: 0x812d, lo: 0xb2, hi: 0xb2}, + {value: 0x8132, lo: 0xb3, hi: 0xb3}, + // Block 0xd, offset 0x62 + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0x96, hi: 0x99}, + {value: 0x8132, lo: 0x9b, hi: 0xa3}, + {value: 0x8132, lo: 0xa5, hi: 0xa7}, + {value: 0x8132, lo: 0xa9, hi: 0xad}, + // Block 0xe, offset 0x67 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x99, hi: 0x9b}, + // Block 0xf, offset 0x69 + {value: 0x0000, lo: 0x10}, + {value: 0x8132, lo: 0x94, hi: 0xa1}, + {value: 0x812d, lo: 0xa3, hi: 0xa3}, + {value: 0x8132, lo: 0xa4, hi: 0xa5}, + {value: 0x812d, lo: 0xa6, hi: 0xa6}, + {value: 0x8132, lo: 0xa7, hi: 0xa8}, + {value: 0x812d, lo: 0xa9, hi: 0xa9}, + {value: 0x8132, lo: 0xaa, hi: 0xac}, + {value: 0x812d, lo: 0xad, hi: 0xaf}, + {value: 0x8116, lo: 0xb0, hi: 0xb0}, + {value: 0x8117, lo: 0xb1, hi: 0xb1}, + {value: 0x8118, lo: 0xb2, hi: 0xb2}, + {value: 0x8132, lo: 0xb3, hi: 0xb5}, + {value: 0x812d, lo: 0xb6, hi: 0xb6}, + {value: 0x8132, lo: 0xb7, hi: 0xb8}, + {value: 0x812d, lo: 0xb9, hi: 0xba}, + {value: 0x8132, lo: 0xbb, hi: 0xbf}, + // Block 0x10, offset 0x7a + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x3ed8, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x3ee0, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x3ee8, lo: 0xb4, hi: 0xb4}, + {value: 0x9902, lo: 0xbc, hi: 0xbc}, + // Block 0x11, offset 0x82 + {value: 0x0008, lo: 0x06}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x91, hi: 0x91}, + {value: 0x812d, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x93, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x94}, + {value: 0x451c, lo: 0x98, hi: 0x9f}, + // Block 0x12, offset 0x89 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x13, offset 0x8c + {value: 0x0008, lo: 0x06}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2c9e, lo: 0x8b, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x455c, lo: 0x9c, hi: 0x9d}, + {value: 0x456c, lo: 0x9f, hi: 0x9f}, + // Block 0x14, offset 0x93 + {value: 0x0000, lo: 0x03}, + {value: 0x4594, lo: 0xb3, hi: 0xb3}, + {value: 0x459c, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x15, offset 0x97 + {value: 0x0008, lo: 0x03}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x4574, lo: 0x99, hi: 0x9b}, + {value: 0x458c, lo: 0x9e, hi: 0x9e}, + // Block 0x16, offset 0x9b + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x17, offset 0x9d + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + // Block 0x18, offset 0x9f + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cb6, lo: 0x88, hi: 0x88}, + {value: 0x2cae, lo: 0x8b, hi: 0x8b}, + {value: 0x2cbe, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x45a4, lo: 0x9c, hi: 0x9c}, + {value: 0x45ac, lo: 0x9d, hi: 0x9d}, + // Block 0x19, offset 0xa8 + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2cc6, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1a, offset 0xac + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cce, lo: 0x8a, hi: 0x8a}, + {value: 0x2cde, lo: 0x8b, hi: 0x8b}, + {value: 0x2cd6, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1b, offset 0xb3 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x3ef0, lo: 0x88, hi: 0x88}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8120, lo: 0x95, hi: 0x96}, + // Block 0x1c, offset 0xb8 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1d, offset 0xbb + {value: 0x0000, lo: 0x09}, + {value: 0x2ce6, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2cee, lo: 0x87, hi: 0x87}, + {value: 0x2cf6, lo: 0x88, hi: 0x88}, + {value: 0x2f50, lo: 0x8a, hi: 0x8a}, + {value: 0x2dd8, lo: 0x8b, hi: 0x8b}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1e, offset 0xc5 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1f, offset 0xc8 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cfe, lo: 0x8a, hi: 0x8a}, + {value: 0x2d0e, lo: 0x8b, hi: 0x8b}, + {value: 0x2d06, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x20, offset 0xcf + {value: 0x6bea, lo: 0x07}, + {value: 0x9904, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x3ef8, lo: 0x9a, hi: 0x9a}, + {value: 0x2f58, lo: 0x9c, hi: 0x9c}, + {value: 0x2de3, lo: 0x9d, hi: 0x9d}, + {value: 0x2d16, lo: 0x9e, hi: 0x9f}, + // Block 0x21, offset 0xd7 + {value: 0x0000, lo: 0x02}, + {value: 0x8122, lo: 0xb8, hi: 0xb9}, + {value: 0x8104, lo: 0xba, hi: 0xba}, + // Block 0x22, offset 0xda + {value: 0x0000, lo: 0x01}, + {value: 0x8123, lo: 0x88, hi: 0x8b}, + // Block 0x23, offset 0xdc + {value: 0x0000, lo: 0x01}, + {value: 0x8124, lo: 0xb8, hi: 0xb9}, + // Block 0x24, offset 0xde + {value: 0x0000, lo: 0x01}, + {value: 0x8125, lo: 0x88, hi: 0x8b}, + // Block 0x25, offset 0xe0 + {value: 0x0000, lo: 0x04}, + {value: 0x812d, lo: 0x98, hi: 0x99}, + {value: 0x812d, lo: 0xb5, hi: 0xb5}, + {value: 0x812d, lo: 0xb7, hi: 0xb7}, + {value: 0x812b, lo: 0xb9, hi: 0xb9}, + // Block 0x26, offset 0xe5 + {value: 0x0000, lo: 0x10}, + {value: 0x2644, lo: 0x83, hi: 0x83}, + {value: 0x264b, lo: 0x8d, hi: 0x8d}, + {value: 0x2652, lo: 0x92, hi: 0x92}, + {value: 0x2659, lo: 0x97, hi: 0x97}, + {value: 0x2660, lo: 0x9c, hi: 0x9c}, + {value: 0x263d, lo: 0xa9, hi: 0xa9}, + {value: 0x8126, lo: 0xb1, hi: 0xb1}, + {value: 0x8127, lo: 0xb2, hi: 0xb2}, + {value: 0x4a84, lo: 0xb3, hi: 0xb3}, + {value: 0x8128, lo: 0xb4, hi: 0xb4}, + {value: 0x4a8d, lo: 0xb5, hi: 0xb5}, + {value: 0x45b4, lo: 0xb6, hi: 0xb6}, + {value: 0x8200, lo: 0xb7, hi: 0xb7}, + {value: 0x45bc, lo: 0xb8, hi: 0xb8}, + {value: 0x8200, lo: 0xb9, hi: 0xb9}, + {value: 0x8127, lo: 0xba, hi: 0xbd}, + // Block 0x27, offset 0xf6 + {value: 0x0000, lo: 0x0b}, + {value: 0x8127, lo: 0x80, hi: 0x80}, + {value: 0x4a96, lo: 0x81, hi: 0x81}, + {value: 0x8132, lo: 0x82, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0x86, hi: 0x87}, + {value: 0x266e, lo: 0x93, hi: 0x93}, + {value: 0x2675, lo: 0x9d, hi: 0x9d}, + {value: 0x267c, lo: 0xa2, hi: 0xa2}, + {value: 0x2683, lo: 0xa7, hi: 0xa7}, + {value: 0x268a, lo: 0xac, hi: 0xac}, + {value: 0x2667, lo: 0xb9, hi: 0xb9}, + // Block 0x28, offset 0x102 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x86, hi: 0x86}, + // Block 0x29, offset 0x104 + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2d1e, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x2a, offset 0x10a + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + // Block 0x2b, offset 0x10c + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2c, offset 0x10e + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2d, offset 0x110 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2e, offset 0x112 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2f, offset 0x114 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9d, hi: 0x9f}, + // Block 0x30, offset 0x116 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x94, hi: 0x94}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x31, offset 0x119 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x9d, hi: 0x9d}, + // Block 0x32, offset 0x11c + {value: 0x0000, lo: 0x01}, + {value: 0x8131, lo: 0xa9, hi: 0xa9}, + // Block 0x33, offset 0x11e + {value: 0x0004, lo: 0x02}, + {value: 0x812e, lo: 0xb9, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbb}, + // Block 0x34, offset 0x121 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x97, hi: 0x97}, + {value: 0x812d, lo: 0x98, hi: 0x98}, + // Block 0x35, offset 0x124 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0xa0, hi: 0xa0}, + {value: 0x8132, lo: 0xb5, hi: 0xbc}, + {value: 0x812d, lo: 0xbf, hi: 0xbf}, + // Block 0x36, offset 0x128 + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + {value: 0x812d, lo: 0xb5, hi: 0xba}, + {value: 0x8132, lo: 0xbb, hi: 0xbc}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x37, offset 0x12d + {value: 0x0000, lo: 0x08}, + {value: 0x2d66, lo: 0x80, hi: 0x80}, + {value: 0x2d6e, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2d76, lo: 0x83, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xac}, + {value: 0x8132, lo: 0xad, hi: 0xb3}, + // Block 0x38, offset 0x136 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xaa, hi: 0xab}, + // Block 0x39, offset 0x138 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xa6, hi: 0xa6}, + {value: 0x8104, lo: 0xb2, hi: 0xb3}, + // Block 0x3a, offset 0x13b + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x3b, offset 0x13d + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812d, lo: 0x95, hi: 0x99}, + {value: 0x8132, lo: 0x9a, hi: 0x9b}, + {value: 0x812d, lo: 0x9c, hi: 0x9f}, + {value: 0x8132, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + {value: 0x8132, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb8, hi: 0xb9}, + // Block 0x3c, offset 0x148 + {value: 0x0004, lo: 0x03}, + {value: 0x0433, lo: 0x80, hi: 0x81}, + {value: 0x8100, lo: 0x97, hi: 0x97}, + {value: 0x8100, lo: 0xbe, hi: 0xbe}, + // Block 0x3d, offset 0x14c + {value: 0x0000, lo: 0x0d}, + {value: 0x8132, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8132, lo: 0x9b, hi: 0x9c}, + {value: 0x8132, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8132, lo: 0xa7, hi: 0xa7}, + {value: 0x812d, lo: 0xa8, hi: 0xa8}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xaf}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + // Block 0x3e, offset 0x15a + {value: 0x427b, lo: 0x02}, + {value: 0x01b8, lo: 0xa6, hi: 0xa6}, + {value: 0x0057, lo: 0xaa, hi: 0xab}, + // Block 0x3f, offset 0x15d + {value: 0x0007, lo: 0x05}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3bb9, lo: 0x9a, hi: 0x9b}, + {value: 0x3bc7, lo: 0xae, hi: 0xae}, + // Block 0x40, offset 0x163 + {value: 0x000e, lo: 0x05}, + {value: 0x3bce, lo: 0x8d, hi: 0x8e}, + {value: 0x3bd5, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x41, offset 0x169 + {value: 0x6408, lo: 0x0a}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3be3, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3bea, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3bf1, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3bf8, lo: 0xa4, hi: 0xa5}, + {value: 0x3bff, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x42, offset 0x174 + {value: 0x0007, lo: 0x03}, + {value: 0x3c68, lo: 0xa0, hi: 0xa1}, + {value: 0x3c92, lo: 0xa2, hi: 0xa3}, + {value: 0x3cbc, lo: 0xaa, hi: 0xad}, + // Block 0x43, offset 0x178 + {value: 0x0004, lo: 0x01}, + {value: 0x048b, lo: 0xa9, hi: 0xaa}, + // Block 0x44, offset 0x17a + {value: 0x0000, lo: 0x01}, + {value: 0x44dd, lo: 0x9c, hi: 0x9c}, + // Block 0x45, offset 0x17c + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xaf, hi: 0xb1}, + // Block 0x46, offset 0x17e + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x47, offset 0x180 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa0, hi: 0xbf}, + // Block 0x48, offset 0x182 + {value: 0x0000, lo: 0x05}, + {value: 0x812c, lo: 0xaa, hi: 0xaa}, + {value: 0x8131, lo: 0xab, hi: 0xab}, + {value: 0x8133, lo: 0xac, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x812f, lo: 0xae, hi: 0xaf}, + // Block 0x49, offset 0x188 + {value: 0x0000, lo: 0x03}, + {value: 0x4a9f, lo: 0xb3, hi: 0xb3}, + {value: 0x4a9f, lo: 0xb5, hi: 0xb6}, + {value: 0x4a9f, lo: 0xba, hi: 0xbf}, + // Block 0x4a, offset 0x18c + {value: 0x0000, lo: 0x01}, + {value: 0x4a9f, lo: 0x8f, hi: 0xa3}, + // Block 0x4b, offset 0x18e + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xae, hi: 0xbe}, + // Block 0x4c, offset 0x190 + {value: 0x0000, lo: 0x07}, + {value: 0x8100, lo: 0x84, hi: 0x84}, + {value: 0x8100, lo: 0x87, hi: 0x87}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + {value: 0x8100, lo: 0x9e, hi: 0x9e}, + {value: 0x8100, lo: 0xa1, hi: 0xa1}, + {value: 0x8100, lo: 0xb2, hi: 0xb2}, + {value: 0x8100, lo: 0xbb, hi: 0xbb}, + // Block 0x4d, offset 0x198 + {value: 0x0000, lo: 0x03}, + {value: 0x8100, lo: 0x80, hi: 0x80}, + {value: 0x8100, lo: 0x8b, hi: 0x8b}, + {value: 0x8100, lo: 0x8e, hi: 0x8e}, + // Block 0x4e, offset 0x19c + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xaf, hi: 0xaf}, + {value: 0x8132, lo: 0xb4, hi: 0xbd}, + // Block 0x4f, offset 0x19f + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9e, hi: 0x9f}, + // Block 0x50, offset 0x1a1 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb1}, + // Block 0x51, offset 0x1a3 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + // Block 0x52, offset 0x1a5 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xa0, hi: 0xb1}, + // Block 0x53, offset 0x1a8 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xab, hi: 0xad}, + // Block 0x54, offset 0x1aa + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x93, hi: 0x93}, + // Block 0x55, offset 0x1ac + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb3, hi: 0xb3}, + // Block 0x56, offset 0x1ae + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + // Block 0x57, offset 0x1b0 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb7, hi: 0xb8}, + {value: 0x8132, lo: 0xbe, hi: 0xbf}, + // Block 0x58, offset 0x1b6 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + // Block 0x59, offset 0x1b9 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xad, hi: 0xad}, + // Block 0x5a, offset 0x1bb + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x5b, offset 0x1c2 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x5c, offset 0x1c8 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x5d, offset 0x1ce + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x5e, offset 0x1d6 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x5f, offset 0x1dc + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x60, offset 0x1e2 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x61, offset 0x1e8 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x62, offset 0x1ec + {value: 0x0006, lo: 0x0d}, + {value: 0x4390, lo: 0x9d, hi: 0x9d}, + {value: 0x8115, lo: 0x9e, hi: 0x9e}, + {value: 0x4402, lo: 0x9f, hi: 0x9f}, + {value: 0x43f0, lo: 0xaa, hi: 0xab}, + {value: 0x44f4, lo: 0xac, hi: 0xac}, + {value: 0x44fc, lo: 0xad, hi: 0xad}, + {value: 0x4348, lo: 0xae, hi: 0xb1}, + {value: 0x4366, lo: 0xb2, hi: 0xb4}, + {value: 0x437e, lo: 0xb5, hi: 0xb6}, + {value: 0x438a, lo: 0xb8, hi: 0xb8}, + {value: 0x4396, lo: 0xb9, hi: 0xbb}, + {value: 0x43ae, lo: 0xbc, hi: 0xbc}, + {value: 0x43b4, lo: 0xbe, hi: 0xbe}, + // Block 0x63, offset 0x1fa + {value: 0x0006, lo: 0x08}, + {value: 0x43ba, lo: 0x80, hi: 0x81}, + {value: 0x43c6, lo: 0x83, hi: 0x84}, + {value: 0x43d8, lo: 0x86, hi: 0x89}, + {value: 0x43fc, lo: 0x8a, hi: 0x8a}, + {value: 0x4378, lo: 0x8b, hi: 0x8b}, + {value: 0x4360, lo: 0x8c, hi: 0x8c}, + {value: 0x43a8, lo: 0x8d, hi: 0x8d}, + {value: 0x43d2, lo: 0x8e, hi: 0x8e}, + // Block 0x64, offset 0x203 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0xa4, hi: 0xa5}, + {value: 0x8100, lo: 0xb0, hi: 0xb1}, + // Block 0x65, offset 0x206 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x9b, hi: 0x9d}, + {value: 0x8200, lo: 0x9e, hi: 0xa3}, + // Block 0x66, offset 0x209 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + // Block 0x67, offset 0x20b + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x99, hi: 0x99}, + {value: 0x8200, lo: 0xb2, hi: 0xb4}, + // Block 0x68, offset 0x20e + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xbc, hi: 0xbd}, + // Block 0x69, offset 0x210 + {value: 0x0000, lo: 0x03}, + {value: 0x8132, lo: 0xa0, hi: 0xa6}, + {value: 0x812d, lo: 0xa7, hi: 0xad}, + {value: 0x8132, lo: 0xae, hi: 0xaf}, + // Block 0x6a, offset 0x214 + {value: 0x0000, lo: 0x04}, + {value: 0x8100, lo: 0x89, hi: 0x8c}, + {value: 0x8100, lo: 0xb0, hi: 0xb2}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb6, hi: 0xbf}, + // Block 0x6b, offset 0x219 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x81, hi: 0x8c}, + // Block 0x6c, offset 0x21b + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xb5, hi: 0xba}, + // Block 0x6d, offset 0x21d + {value: 0x0000, lo: 0x04}, + {value: 0x4a9f, lo: 0x9e, hi: 0x9f}, + {value: 0x4a9f, lo: 0xa3, hi: 0xa3}, + {value: 0x4a9f, lo: 0xa5, hi: 0xa6}, + {value: 0x4a9f, lo: 0xaa, hi: 0xaf}, + // Block 0x6e, offset 0x222 + {value: 0x0000, lo: 0x05}, + {value: 0x4a9f, lo: 0x82, hi: 0x87}, + {value: 0x4a9f, lo: 0x8a, hi: 0x8f}, + {value: 0x4a9f, lo: 0x92, hi: 0x97}, + {value: 0x4a9f, lo: 0x9a, hi: 0x9c}, + {value: 0x8100, lo: 0xa3, hi: 0xa3}, + // Block 0x6f, offset 0x228 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x70, offset 0x22a + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xa0, hi: 0xa0}, + // Block 0x71, offset 0x22c + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb6, hi: 0xba}, + // Block 0x72, offset 0x22e + {value: 0x002c, lo: 0x05}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x8f, hi: 0x8f}, + {value: 0x8132, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x73, offset 0x234 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xa5, hi: 0xa5}, + {value: 0x812d, lo: 0xa6, hi: 0xa6}, + // Block 0x74, offset 0x237 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x75, offset 0x23a + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4238, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4242, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x424c, lo: 0xab, hi: 0xab}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x76, offset 0x242 + {value: 0x0000, lo: 0x06}, + {value: 0x8132, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2d7e, lo: 0xae, hi: 0xae}, + {value: 0x2d88, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8104, lo: 0xb3, hi: 0xb4}, + // Block 0x77, offset 0x249 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x78, offset 0x24c + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb5, hi: 0xb5}, + {value: 0x8102, lo: 0xb6, hi: 0xb6}, + // Block 0x79, offset 0x24f + {value: 0x0002, lo: 0x01}, + {value: 0x8102, lo: 0xa9, hi: 0xaa}, + // Block 0x7a, offset 0x251 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2d92, lo: 0x8b, hi: 0x8b}, + {value: 0x2d9c, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8132, lo: 0xa6, hi: 0xac}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + // Block 0x7b, offset 0x259 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x86, hi: 0x86}, + // Block 0x7c, offset 0x25c + {value: 0x6b5a, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2db0, lo: 0xbb, hi: 0xbb}, + {value: 0x2da6, lo: 0xbc, hi: 0xbd}, + {value: 0x2dba, lo: 0xbe, hi: 0xbe}, + // Block 0x7d, offset 0x263 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x83, hi: 0x83}, + // Block 0x7e, offset 0x266 + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2dc4, lo: 0xba, hi: 0xba}, + {value: 0x2dce, lo: 0xbb, hi: 0xbb}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x7f, offset 0x26c + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0x80, hi: 0x80}, + // Block 0x80, offset 0x26e + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x81, offset 0x271 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xab, hi: 0xab}, + // Block 0x82, offset 0x273 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x83, offset 0x275 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x87, hi: 0x87}, + // Block 0x84, offset 0x277 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x99, hi: 0x99}, + // Block 0x85, offset 0x279 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0x82, hi: 0x82}, + {value: 0x8104, lo: 0x84, hi: 0x85}, + // Block 0x86, offset 0x27c + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x87, offset 0x27e + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb6}, + // Block 0x88, offset 0x280 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x89, offset 0x282 + {value: 0x0000, lo: 0x0c}, + {value: 0x45cc, lo: 0x9e, hi: 0x9e}, + {value: 0x45d6, lo: 0x9f, hi: 0x9f}, + {value: 0x460a, lo: 0xa0, hi: 0xa0}, + {value: 0x4618, lo: 0xa1, hi: 0xa1}, + {value: 0x4626, lo: 0xa2, hi: 0xa2}, + {value: 0x4634, lo: 0xa3, hi: 0xa3}, + {value: 0x4642, lo: 0xa4, hi: 0xa4}, + {value: 0x812b, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8130, lo: 0xad, hi: 0xad}, + {value: 0x812b, lo: 0xae, hi: 0xb2}, + {value: 0x812d, lo: 0xbb, hi: 0xbf}, + // Block 0x8a, offset 0x28f + {value: 0x0000, lo: 0x09}, + {value: 0x812d, lo: 0x80, hi: 0x82}, + {value: 0x8132, lo: 0x85, hi: 0x89}, + {value: 0x812d, lo: 0x8a, hi: 0x8b}, + {value: 0x8132, lo: 0xaa, hi: 0xad}, + {value: 0x45e0, lo: 0xbb, hi: 0xbb}, + {value: 0x45ea, lo: 0xbc, hi: 0xbc}, + {value: 0x4650, lo: 0xbd, hi: 0xbd}, + {value: 0x466c, lo: 0xbe, hi: 0xbe}, + {value: 0x465e, lo: 0xbf, hi: 0xbf}, + // Block 0x8b, offset 0x299 + {value: 0x0000, lo: 0x01}, + {value: 0x467a, lo: 0x80, hi: 0x80}, + // Block 0x8c, offset 0x29b + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x82, hi: 0x84}, + // Block 0x8d, offset 0x29d + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0x80, hi: 0x86}, + {value: 0x8132, lo: 0x88, hi: 0x98}, + {value: 0x8132, lo: 0x9b, hi: 0xa1}, + {value: 0x8132, lo: 0xa3, hi: 0xa4}, + {value: 0x8132, lo: 0xa6, hi: 0xaa}, + // Block 0x8e, offset 0x2a3 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x90, hi: 0x96}, + // Block 0x8f, offset 0x2a5 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x84, hi: 0x89}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x90, offset 0x2a8 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x93, hi: 0x93}, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfkcTrie. Total size: 17104 bytes (16.70 KiB). Checksum: d985061cf5307b35. +type nfkcTrie struct{} + +func newNfkcTrie(i int) *nfkcTrie { + return &nfkcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfkcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 91: + return uint16(nfkcValues[n<<6+uint32(b)]) + default: + n -= 91 + return uint16(nfkcSparse.lookup(n, b)) + } +} + +// nfkcValues: 93 blocks, 5952 entries, 11904 bytes +// The third block is the zero block. +var nfkcValues = [5952]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x2f6f, 0xc1: 0x2f74, 0xc2: 0x4688, 0xc3: 0x2f79, 0xc4: 0x4697, 0xc5: 0x469c, + 0xc6: 0xa000, 0xc7: 0x46a6, 0xc8: 0x2fe2, 0xc9: 0x2fe7, 0xca: 0x46ab, 0xcb: 0x2ffb, + 0xcc: 0x306e, 0xcd: 0x3073, 0xce: 0x3078, 0xcf: 0x46bf, 0xd1: 0x3104, + 0xd2: 0x3127, 0xd3: 0x312c, 0xd4: 0x46c9, 0xd5: 0x46ce, 0xd6: 0x46dd, + 0xd8: 0xa000, 0xd9: 0x31b3, 0xda: 0x31b8, 0xdb: 0x31bd, 0xdc: 0x470f, 0xdd: 0x3235, + 0xe0: 0x327b, 0xe1: 0x3280, 0xe2: 0x4719, 0xe3: 0x3285, + 0xe4: 0x4728, 0xe5: 0x472d, 0xe6: 0xa000, 0xe7: 0x4737, 0xe8: 0x32ee, 0xe9: 0x32f3, + 0xea: 0x473c, 0xeb: 0x3307, 0xec: 0x337f, 0xed: 0x3384, 0xee: 0x3389, 0xef: 0x4750, + 0xf1: 0x3415, 0xf2: 0x3438, 0xf3: 0x343d, 0xf4: 0x475a, 0xf5: 0x475f, + 0xf6: 0x476e, 0xf8: 0xa000, 0xf9: 0x34c9, 0xfa: 0x34ce, 0xfb: 0x34d3, + 0xfc: 0x47a0, 0xfd: 0x3550, 0xff: 0x3569, + // Block 0x4, offset 0x100 + 0x100: 0x2f7e, 0x101: 0x328a, 0x102: 0x468d, 0x103: 0x471e, 0x104: 0x2f9c, 0x105: 0x32a8, + 0x106: 0x2fb0, 0x107: 0x32bc, 0x108: 0x2fb5, 0x109: 0x32c1, 0x10a: 0x2fba, 0x10b: 0x32c6, + 0x10c: 0x2fbf, 0x10d: 0x32cb, 0x10e: 0x2fc9, 0x10f: 0x32d5, + 0x112: 0x46b0, 0x113: 0x4741, 0x114: 0x2ff1, 0x115: 0x32fd, 0x116: 0x2ff6, 0x117: 0x3302, + 0x118: 0x3014, 0x119: 0x3320, 0x11a: 0x3005, 0x11b: 0x3311, 0x11c: 0x302d, 0x11d: 0x3339, + 0x11e: 0x3037, 0x11f: 0x3343, 0x120: 0x303c, 0x121: 0x3348, 0x122: 0x3046, 0x123: 0x3352, + 0x124: 0x304b, 0x125: 0x3357, 0x128: 0x307d, 0x129: 0x338e, + 0x12a: 0x3082, 0x12b: 0x3393, 0x12c: 0x3087, 0x12d: 0x3398, 0x12e: 0x30aa, 0x12f: 0x33b6, + 0x130: 0x308c, 0x132: 0x195d, 0x133: 0x19e7, 0x134: 0x30b4, 0x135: 0x33c0, + 0x136: 0x30c8, 0x137: 0x33d9, 0x139: 0x30d2, 0x13a: 0x33e3, 0x13b: 0x30dc, + 0x13c: 0x33ed, 0x13d: 0x30d7, 0x13e: 0x33e8, 0x13f: 0x1bac, + // Block 0x5, offset 0x140 + 0x140: 0x1c34, 0x143: 0x30ff, 0x144: 0x3410, 0x145: 0x3118, + 0x146: 0x3429, 0x147: 0x310e, 0x148: 0x341f, 0x149: 0x1c5c, + 0x14c: 0x46d3, 0x14d: 0x4764, 0x14e: 0x3131, 0x14f: 0x3442, 0x150: 0x313b, 0x151: 0x344c, + 0x154: 0x3159, 0x155: 0x346a, 0x156: 0x3172, 0x157: 0x3483, + 0x158: 0x3163, 0x159: 0x3474, 0x15a: 0x46f6, 0x15b: 0x4787, 0x15c: 0x317c, 0x15d: 0x348d, + 0x15e: 0x318b, 0x15f: 0x349c, 0x160: 0x46fb, 0x161: 0x478c, 0x162: 0x31a4, 0x163: 0x34ba, + 0x164: 0x3195, 0x165: 0x34ab, 0x168: 0x4705, 0x169: 0x4796, + 0x16a: 0x470a, 0x16b: 0x479b, 0x16c: 0x31c2, 0x16d: 0x34d8, 0x16e: 0x31cc, 0x16f: 0x34e2, + 0x170: 0x31d1, 0x171: 0x34e7, 0x172: 0x31ef, 0x173: 0x3505, 0x174: 0x3212, 0x175: 0x3528, + 0x176: 0x323a, 0x177: 0x3555, 0x178: 0x324e, 0x179: 0x325d, 0x17a: 0x357d, 0x17b: 0x3267, + 0x17c: 0x3587, 0x17d: 0x326c, 0x17e: 0x358c, 0x17f: 0x00a7, + // Block 0x6, offset 0x180 + 0x184: 0x2dee, 0x185: 0x2df4, + 0x186: 0x2dfa, 0x187: 0x1972, 0x188: 0x1975, 0x189: 0x1a08, 0x18a: 0x1987, 0x18b: 0x198a, + 0x18c: 0x1a3e, 0x18d: 0x2f88, 0x18e: 0x3294, 0x18f: 0x3096, 0x190: 0x33a2, 0x191: 0x3140, + 0x192: 0x3451, 0x193: 0x31d6, 0x194: 0x34ec, 0x195: 0x39cf, 0x196: 0x3b5e, 0x197: 0x39c8, + 0x198: 0x3b57, 0x199: 0x39d6, 0x19a: 0x3b65, 0x19b: 0x39c1, 0x19c: 0x3b50, + 0x19e: 0x38b0, 0x19f: 0x3a3f, 0x1a0: 0x38a9, 0x1a1: 0x3a38, 0x1a2: 0x35b3, 0x1a3: 0x35c5, + 0x1a6: 0x3041, 0x1a7: 0x334d, 0x1a8: 0x30be, 0x1a9: 0x33cf, + 0x1aa: 0x46ec, 0x1ab: 0x477d, 0x1ac: 0x3990, 0x1ad: 0x3b1f, 0x1ae: 0x35d7, 0x1af: 0x35dd, + 0x1b0: 0x33c5, 0x1b1: 0x1942, 0x1b2: 0x1945, 0x1b3: 0x19cf, 0x1b4: 0x3028, 0x1b5: 0x3334, + 0x1b8: 0x30fa, 0x1b9: 0x340b, 0x1ba: 0x38b7, 0x1bb: 0x3a46, + 0x1bc: 0x35ad, 0x1bd: 0x35bf, 0x1be: 0x35b9, 0x1bf: 0x35cb, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x2f8d, 0x1c1: 0x3299, 0x1c2: 0x2f92, 0x1c3: 0x329e, 0x1c4: 0x300a, 0x1c5: 0x3316, + 0x1c6: 0x300f, 0x1c7: 0x331b, 0x1c8: 0x309b, 0x1c9: 0x33a7, 0x1ca: 0x30a0, 0x1cb: 0x33ac, + 0x1cc: 0x3145, 0x1cd: 0x3456, 0x1ce: 0x314a, 0x1cf: 0x345b, 0x1d0: 0x3168, 0x1d1: 0x3479, + 0x1d2: 0x316d, 0x1d3: 0x347e, 0x1d4: 0x31db, 0x1d5: 0x34f1, 0x1d6: 0x31e0, 0x1d7: 0x34f6, + 0x1d8: 0x3186, 0x1d9: 0x3497, 0x1da: 0x319f, 0x1db: 0x34b5, + 0x1de: 0x305a, 0x1df: 0x3366, + 0x1e6: 0x4692, 0x1e7: 0x4723, 0x1e8: 0x46ba, 0x1e9: 0x474b, + 0x1ea: 0x395f, 0x1eb: 0x3aee, 0x1ec: 0x393c, 0x1ed: 0x3acb, 0x1ee: 0x46d8, 0x1ef: 0x4769, + 0x1f0: 0x3958, 0x1f1: 0x3ae7, 0x1f2: 0x3244, 0x1f3: 0x355f, + // Block 0x8, offset 0x200 + 0x200: 0x9932, 0x201: 0x9932, 0x202: 0x9932, 0x203: 0x9932, 0x204: 0x9932, 0x205: 0x8132, + 0x206: 0x9932, 0x207: 0x9932, 0x208: 0x9932, 0x209: 0x9932, 0x20a: 0x9932, 0x20b: 0x9932, + 0x20c: 0x9932, 0x20d: 0x8132, 0x20e: 0x8132, 0x20f: 0x9932, 0x210: 0x8132, 0x211: 0x9932, + 0x212: 0x8132, 0x213: 0x9932, 0x214: 0x9932, 0x215: 0x8133, 0x216: 0x812d, 0x217: 0x812d, + 0x218: 0x812d, 0x219: 0x812d, 0x21a: 0x8133, 0x21b: 0x992b, 0x21c: 0x812d, 0x21d: 0x812d, + 0x21e: 0x812d, 0x21f: 0x812d, 0x220: 0x812d, 0x221: 0x8129, 0x222: 0x8129, 0x223: 0x992d, + 0x224: 0x992d, 0x225: 0x992d, 0x226: 0x992d, 0x227: 0x9929, 0x228: 0x9929, 0x229: 0x812d, + 0x22a: 0x812d, 0x22b: 0x812d, 0x22c: 0x812d, 0x22d: 0x992d, 0x22e: 0x992d, 0x22f: 0x812d, + 0x230: 0x992d, 0x231: 0x992d, 0x232: 0x812d, 0x233: 0x812d, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812d, 0x23a: 0x812d, 0x23b: 0x812d, + 0x23c: 0x812d, 0x23d: 0x8132, 0x23e: 0x8132, 0x23f: 0x8132, + // Block 0x9, offset 0x240 + 0x240: 0x49ae, 0x241: 0x49b3, 0x242: 0x9932, 0x243: 0x49b8, 0x244: 0x4a71, 0x245: 0x9936, + 0x246: 0x8132, 0x247: 0x812d, 0x248: 0x812d, 0x249: 0x812d, 0x24a: 0x8132, 0x24b: 0x8132, + 0x24c: 0x8132, 0x24d: 0x812d, 0x24e: 0x812d, 0x250: 0x8132, 0x251: 0x8132, + 0x252: 0x8132, 0x253: 0x812d, 0x254: 0x812d, 0x255: 0x812d, 0x256: 0x812d, 0x257: 0x8132, + 0x258: 0x8133, 0x259: 0x812d, 0x25a: 0x812d, 0x25b: 0x8132, 0x25c: 0x8134, 0x25d: 0x8135, + 0x25e: 0x8135, 0x25f: 0x8134, 0x260: 0x8135, 0x261: 0x8135, 0x262: 0x8134, 0x263: 0x8132, + 0x264: 0x8132, 0x265: 0x8132, 0x266: 0x8132, 0x267: 0x8132, 0x268: 0x8132, 0x269: 0x8132, + 0x26a: 0x8132, 0x26b: 0x8132, 0x26c: 0x8132, 0x26d: 0x8132, 0x26e: 0x8132, 0x26f: 0x8132, + 0x274: 0x0170, + 0x27a: 0x42a5, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x425a, 0x285: 0x447b, + 0x286: 0x35e9, 0x287: 0x00ce, 0x288: 0x3607, 0x289: 0x3613, 0x28a: 0x3625, + 0x28c: 0x3643, 0x28e: 0x3655, 0x28f: 0x3673, 0x290: 0x3e08, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x3637, 0x2ab: 0x3667, 0x2ac: 0x47fe, 0x2ad: 0x3697, 0x2ae: 0x4828, 0x2af: 0x36a9, + 0x2b0: 0x3e70, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c1: 0xa000, 0x2c5: 0xa000, + 0x2c9: 0xa000, 0x2ca: 0x4840, 0x2cb: 0x485e, + 0x2cc: 0x36c7, 0x2cd: 0x36df, 0x2ce: 0x4876, 0x2d0: 0x01be, 0x2d1: 0x01d0, + 0x2d2: 0x01ac, 0x2d3: 0x430c, 0x2d4: 0x4312, 0x2d5: 0x01fa, 0x2d6: 0x01e8, + 0x2f0: 0x01d6, 0x2f1: 0x01eb, 0x2f2: 0x01ee, 0x2f4: 0x0188, 0x2f5: 0x01c7, + 0x2f9: 0x01a6, + // Block 0xc, offset 0x300 + 0x300: 0x3721, 0x301: 0x372d, 0x303: 0x371b, + 0x306: 0xa000, 0x307: 0x3709, + 0x30c: 0x375d, 0x30d: 0x3745, 0x30e: 0x376f, 0x310: 0xa000, + 0x313: 0xa000, 0x315: 0xa000, 0x316: 0xa000, 0x317: 0xa000, + 0x318: 0xa000, 0x319: 0x3751, 0x31a: 0xa000, + 0x31e: 0xa000, 0x323: 0xa000, + 0x327: 0xa000, + 0x32b: 0xa000, 0x32d: 0xa000, + 0x330: 0xa000, 0x333: 0xa000, 0x335: 0xa000, + 0x336: 0xa000, 0x337: 0xa000, 0x338: 0xa000, 0x339: 0x37d5, 0x33a: 0xa000, + 0x33e: 0xa000, + // Block 0xd, offset 0x340 + 0x341: 0x3733, 0x342: 0x37b7, + 0x350: 0x370f, 0x351: 0x3793, + 0x352: 0x3715, 0x353: 0x3799, 0x356: 0x3727, 0x357: 0x37ab, + 0x358: 0xa000, 0x359: 0xa000, 0x35a: 0x3829, 0x35b: 0x382f, 0x35c: 0x3739, 0x35d: 0x37bd, + 0x35e: 0x373f, 0x35f: 0x37c3, 0x362: 0x374b, 0x363: 0x37cf, + 0x364: 0x3757, 0x365: 0x37db, 0x366: 0x3763, 0x367: 0x37e7, 0x368: 0xa000, 0x369: 0xa000, + 0x36a: 0x3835, 0x36b: 0x383b, 0x36c: 0x378d, 0x36d: 0x3811, 0x36e: 0x3769, 0x36f: 0x37ed, + 0x370: 0x3775, 0x371: 0x37f9, 0x372: 0x377b, 0x373: 0x37ff, 0x374: 0x3781, 0x375: 0x3805, + 0x378: 0x3787, 0x379: 0x380b, + // Block 0xe, offset 0x380 + 0x387: 0x1d61, + 0x391: 0x812d, + 0x392: 0x8132, 0x393: 0x8132, 0x394: 0x8132, 0x395: 0x8132, 0x396: 0x812d, 0x397: 0x8132, + 0x398: 0x8132, 0x399: 0x8132, 0x39a: 0x812e, 0x39b: 0x812d, 0x39c: 0x8132, 0x39d: 0x8132, + 0x39e: 0x8132, 0x39f: 0x8132, 0x3a0: 0x8132, 0x3a1: 0x8132, 0x3a2: 0x812d, 0x3a3: 0x812d, + 0x3a4: 0x812d, 0x3a5: 0x812d, 0x3a6: 0x812d, 0x3a7: 0x812d, 0x3a8: 0x8132, 0x3a9: 0x8132, + 0x3aa: 0x812d, 0x3ab: 0x8132, 0x3ac: 0x8132, 0x3ad: 0x812e, 0x3ae: 0x8131, 0x3af: 0x8132, + 0x3b0: 0x8105, 0x3b1: 0x8106, 0x3b2: 0x8107, 0x3b3: 0x8108, 0x3b4: 0x8109, 0x3b5: 0x810a, + 0x3b6: 0x810b, 0x3b7: 0x810c, 0x3b8: 0x810d, 0x3b9: 0x810e, 0x3ba: 0x810e, 0x3bb: 0x810f, + 0x3bc: 0x8110, 0x3bd: 0x8111, 0x3bf: 0x8112, + // Block 0xf, offset 0x3c0 + 0x3c8: 0xa000, 0x3ca: 0xa000, 0x3cb: 0x8116, + 0x3cc: 0x8117, 0x3cd: 0x8118, 0x3ce: 0x8119, 0x3cf: 0x811a, 0x3d0: 0x811b, 0x3d1: 0x811c, + 0x3d2: 0x811d, 0x3d3: 0x9932, 0x3d4: 0x9932, 0x3d5: 0x992d, 0x3d6: 0x812d, 0x3d7: 0x8132, + 0x3d8: 0x8132, 0x3d9: 0x8132, 0x3da: 0x8132, 0x3db: 0x8132, 0x3dc: 0x812d, 0x3dd: 0x8132, + 0x3de: 0x8132, 0x3df: 0x812d, + 0x3f0: 0x811e, 0x3f5: 0x1d84, + 0x3f6: 0x2013, 0x3f7: 0x204f, 0x3f8: 0x204a, + // Block 0x10, offset 0x400 + 0x405: 0xa000, + 0x406: 0x2d26, 0x407: 0xa000, 0x408: 0x2d2e, 0x409: 0xa000, 0x40a: 0x2d36, 0x40b: 0xa000, + 0x40c: 0x2d3e, 0x40d: 0xa000, 0x40e: 0x2d46, 0x411: 0xa000, + 0x412: 0x2d4e, + 0x434: 0x8102, 0x435: 0x9900, + 0x43a: 0xa000, 0x43b: 0x2d56, + 0x43c: 0xa000, 0x43d: 0x2d5e, 0x43e: 0xa000, 0x43f: 0xa000, + // Block 0x11, offset 0x440 + 0x440: 0x0069, 0x441: 0x006b, 0x442: 0x006f, 0x443: 0x0083, 0x444: 0x00f5, 0x445: 0x00f8, + 0x446: 0x0413, 0x447: 0x0085, 0x448: 0x0089, 0x449: 0x008b, 0x44a: 0x0104, 0x44b: 0x0107, + 0x44c: 0x010a, 0x44d: 0x008f, 0x44f: 0x0097, 0x450: 0x009b, 0x451: 0x00e0, + 0x452: 0x009f, 0x453: 0x00fe, 0x454: 0x0417, 0x455: 0x041b, 0x456: 0x00a1, 0x457: 0x00a9, + 0x458: 0x00ab, 0x459: 0x0423, 0x45a: 0x012b, 0x45b: 0x00ad, 0x45c: 0x0427, 0x45d: 0x01be, + 0x45e: 0x01c1, 0x45f: 0x01c4, 0x460: 0x01fa, 0x461: 0x01fd, 0x462: 0x0093, 0x463: 0x00a5, + 0x464: 0x00ab, 0x465: 0x00ad, 0x466: 0x01be, 0x467: 0x01c1, 0x468: 0x01eb, 0x469: 0x01fa, + 0x46a: 0x01fd, + 0x478: 0x020c, + // Block 0x12, offset 0x480 + 0x49b: 0x00fb, 0x49c: 0x0087, 0x49d: 0x0101, + 0x49e: 0x00d4, 0x49f: 0x010a, 0x4a0: 0x008d, 0x4a1: 0x010d, 0x4a2: 0x0110, 0x4a3: 0x0116, + 0x4a4: 0x011c, 0x4a5: 0x011f, 0x4a6: 0x0122, 0x4a7: 0x042b, 0x4a8: 0x016a, 0x4a9: 0x0128, + 0x4aa: 0x042f, 0x4ab: 0x016d, 0x4ac: 0x0131, 0x4ad: 0x012e, 0x4ae: 0x0134, 0x4af: 0x0137, + 0x4b0: 0x013a, 0x4b1: 0x013d, 0x4b2: 0x0140, 0x4b3: 0x014c, 0x4b4: 0x014f, 0x4b5: 0x00ec, + 0x4b6: 0x0152, 0x4b7: 0x0155, 0x4b8: 0x041f, 0x4b9: 0x0158, 0x4ba: 0x015b, 0x4bb: 0x00b5, + 0x4bc: 0x015e, 0x4bd: 0x0161, 0x4be: 0x0164, 0x4bf: 0x01d0, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x8132, 0x4c1: 0x8132, 0x4c2: 0x812d, 0x4c3: 0x8132, 0x4c4: 0x8132, 0x4c5: 0x8132, + 0x4c6: 0x8132, 0x4c7: 0x8132, 0x4c8: 0x8132, 0x4c9: 0x8132, 0x4ca: 0x812d, 0x4cb: 0x8132, + 0x4cc: 0x8132, 0x4cd: 0x8135, 0x4ce: 0x812a, 0x4cf: 0x812d, 0x4d0: 0x8129, 0x4d1: 0x8132, + 0x4d2: 0x8132, 0x4d3: 0x8132, 0x4d4: 0x8132, 0x4d5: 0x8132, 0x4d6: 0x8132, 0x4d7: 0x8132, + 0x4d8: 0x8132, 0x4d9: 0x8132, 0x4da: 0x8132, 0x4db: 0x8132, 0x4dc: 0x8132, 0x4dd: 0x8132, + 0x4de: 0x8132, 0x4df: 0x8132, 0x4e0: 0x8132, 0x4e1: 0x8132, 0x4e2: 0x8132, 0x4e3: 0x8132, + 0x4e4: 0x8132, 0x4e5: 0x8132, 0x4e6: 0x8132, 0x4e7: 0x8132, 0x4e8: 0x8132, 0x4e9: 0x8132, + 0x4ea: 0x8132, 0x4eb: 0x8132, 0x4ec: 0x8132, 0x4ed: 0x8132, 0x4ee: 0x8132, 0x4ef: 0x8132, + 0x4f0: 0x8132, 0x4f1: 0x8132, 0x4f2: 0x8132, 0x4f3: 0x8132, 0x4f4: 0x8132, 0x4f5: 0x8132, + 0x4f6: 0x8133, 0x4f7: 0x8131, 0x4f8: 0x8131, 0x4f9: 0x812d, 0x4fb: 0x8132, + 0x4fc: 0x8134, 0x4fd: 0x812d, 0x4fe: 0x8132, 0x4ff: 0x812d, + // Block 0x14, offset 0x500 + 0x500: 0x2f97, 0x501: 0x32a3, 0x502: 0x2fa1, 0x503: 0x32ad, 0x504: 0x2fa6, 0x505: 0x32b2, + 0x506: 0x2fab, 0x507: 0x32b7, 0x508: 0x38cc, 0x509: 0x3a5b, 0x50a: 0x2fc4, 0x50b: 0x32d0, + 0x50c: 0x2fce, 0x50d: 0x32da, 0x50e: 0x2fdd, 0x50f: 0x32e9, 0x510: 0x2fd3, 0x511: 0x32df, + 0x512: 0x2fd8, 0x513: 0x32e4, 0x514: 0x38ef, 0x515: 0x3a7e, 0x516: 0x38f6, 0x517: 0x3a85, + 0x518: 0x3019, 0x519: 0x3325, 0x51a: 0x301e, 0x51b: 0x332a, 0x51c: 0x3904, 0x51d: 0x3a93, + 0x51e: 0x3023, 0x51f: 0x332f, 0x520: 0x3032, 0x521: 0x333e, 0x522: 0x3050, 0x523: 0x335c, + 0x524: 0x305f, 0x525: 0x336b, 0x526: 0x3055, 0x527: 0x3361, 0x528: 0x3064, 0x529: 0x3370, + 0x52a: 0x3069, 0x52b: 0x3375, 0x52c: 0x30af, 0x52d: 0x33bb, 0x52e: 0x390b, 0x52f: 0x3a9a, + 0x530: 0x30b9, 0x531: 0x33ca, 0x532: 0x30c3, 0x533: 0x33d4, 0x534: 0x30cd, 0x535: 0x33de, + 0x536: 0x46c4, 0x537: 0x4755, 0x538: 0x3912, 0x539: 0x3aa1, 0x53a: 0x30e6, 0x53b: 0x33f7, + 0x53c: 0x30e1, 0x53d: 0x33f2, 0x53e: 0x30eb, 0x53f: 0x33fc, + // Block 0x15, offset 0x540 + 0x540: 0x30f0, 0x541: 0x3401, 0x542: 0x30f5, 0x543: 0x3406, 0x544: 0x3109, 0x545: 0x341a, + 0x546: 0x3113, 0x547: 0x3424, 0x548: 0x3122, 0x549: 0x3433, 0x54a: 0x311d, 0x54b: 0x342e, + 0x54c: 0x3935, 0x54d: 0x3ac4, 0x54e: 0x3943, 0x54f: 0x3ad2, 0x550: 0x394a, 0x551: 0x3ad9, + 0x552: 0x3951, 0x553: 0x3ae0, 0x554: 0x314f, 0x555: 0x3460, 0x556: 0x3154, 0x557: 0x3465, + 0x558: 0x315e, 0x559: 0x346f, 0x55a: 0x46f1, 0x55b: 0x4782, 0x55c: 0x3997, 0x55d: 0x3b26, + 0x55e: 0x3177, 0x55f: 0x3488, 0x560: 0x3181, 0x561: 0x3492, 0x562: 0x4700, 0x563: 0x4791, + 0x564: 0x399e, 0x565: 0x3b2d, 0x566: 0x39a5, 0x567: 0x3b34, 0x568: 0x39ac, 0x569: 0x3b3b, + 0x56a: 0x3190, 0x56b: 0x34a1, 0x56c: 0x319a, 0x56d: 0x34b0, 0x56e: 0x31ae, 0x56f: 0x34c4, + 0x570: 0x31a9, 0x571: 0x34bf, 0x572: 0x31ea, 0x573: 0x3500, 0x574: 0x31f9, 0x575: 0x350f, + 0x576: 0x31f4, 0x577: 0x350a, 0x578: 0x39b3, 0x579: 0x3b42, 0x57a: 0x39ba, 0x57b: 0x3b49, + 0x57c: 0x31fe, 0x57d: 0x3514, 0x57e: 0x3203, 0x57f: 0x3519, + // Block 0x16, offset 0x580 + 0x580: 0x3208, 0x581: 0x351e, 0x582: 0x320d, 0x583: 0x3523, 0x584: 0x321c, 0x585: 0x3532, + 0x586: 0x3217, 0x587: 0x352d, 0x588: 0x3221, 0x589: 0x353c, 0x58a: 0x3226, 0x58b: 0x3541, + 0x58c: 0x322b, 0x58d: 0x3546, 0x58e: 0x3249, 0x58f: 0x3564, 0x590: 0x3262, 0x591: 0x3582, + 0x592: 0x3271, 0x593: 0x3591, 0x594: 0x3276, 0x595: 0x3596, 0x596: 0x337a, 0x597: 0x34a6, + 0x598: 0x3537, 0x599: 0x3573, 0x59a: 0x1be0, 0x59b: 0x42d7, + 0x5a0: 0x46a1, 0x5a1: 0x4732, 0x5a2: 0x2f83, 0x5a3: 0x328f, + 0x5a4: 0x3878, 0x5a5: 0x3a07, 0x5a6: 0x3871, 0x5a7: 0x3a00, 0x5a8: 0x3886, 0x5a9: 0x3a15, + 0x5aa: 0x387f, 0x5ab: 0x3a0e, 0x5ac: 0x38be, 0x5ad: 0x3a4d, 0x5ae: 0x3894, 0x5af: 0x3a23, + 0x5b0: 0x388d, 0x5b1: 0x3a1c, 0x5b2: 0x38a2, 0x5b3: 0x3a31, 0x5b4: 0x389b, 0x5b5: 0x3a2a, + 0x5b6: 0x38c5, 0x5b7: 0x3a54, 0x5b8: 0x46b5, 0x5b9: 0x4746, 0x5ba: 0x3000, 0x5bb: 0x330c, + 0x5bc: 0x2fec, 0x5bd: 0x32f8, 0x5be: 0x38da, 0x5bf: 0x3a69, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x38d3, 0x5c1: 0x3a62, 0x5c2: 0x38e8, 0x5c3: 0x3a77, 0x5c4: 0x38e1, 0x5c5: 0x3a70, + 0x5c6: 0x38fd, 0x5c7: 0x3a8c, 0x5c8: 0x3091, 0x5c9: 0x339d, 0x5ca: 0x30a5, 0x5cb: 0x33b1, + 0x5cc: 0x46e7, 0x5cd: 0x4778, 0x5ce: 0x3136, 0x5cf: 0x3447, 0x5d0: 0x3920, 0x5d1: 0x3aaf, + 0x5d2: 0x3919, 0x5d3: 0x3aa8, 0x5d4: 0x392e, 0x5d5: 0x3abd, 0x5d6: 0x3927, 0x5d7: 0x3ab6, + 0x5d8: 0x3989, 0x5d9: 0x3b18, 0x5da: 0x396d, 0x5db: 0x3afc, 0x5dc: 0x3966, 0x5dd: 0x3af5, + 0x5de: 0x397b, 0x5df: 0x3b0a, 0x5e0: 0x3974, 0x5e1: 0x3b03, 0x5e2: 0x3982, 0x5e3: 0x3b11, + 0x5e4: 0x31e5, 0x5e5: 0x34fb, 0x5e6: 0x31c7, 0x5e7: 0x34dd, 0x5e8: 0x39e4, 0x5e9: 0x3b73, + 0x5ea: 0x39dd, 0x5eb: 0x3b6c, 0x5ec: 0x39f2, 0x5ed: 0x3b81, 0x5ee: 0x39eb, 0x5ef: 0x3b7a, + 0x5f0: 0x39f9, 0x5f1: 0x3b88, 0x5f2: 0x3230, 0x5f3: 0x354b, 0x5f4: 0x3258, 0x5f5: 0x3578, + 0x5f6: 0x3253, 0x5f7: 0x356e, 0x5f8: 0x323f, 0x5f9: 0x355a, + // Block 0x18, offset 0x600 + 0x600: 0x4804, 0x601: 0x480a, 0x602: 0x491e, 0x603: 0x4936, 0x604: 0x4926, 0x605: 0x493e, + 0x606: 0x492e, 0x607: 0x4946, 0x608: 0x47aa, 0x609: 0x47b0, 0x60a: 0x488e, 0x60b: 0x48a6, + 0x60c: 0x4896, 0x60d: 0x48ae, 0x60e: 0x489e, 0x60f: 0x48b6, 0x610: 0x4816, 0x611: 0x481c, + 0x612: 0x3db8, 0x613: 0x3dc8, 0x614: 0x3dc0, 0x615: 0x3dd0, + 0x618: 0x47b6, 0x619: 0x47bc, 0x61a: 0x3ce8, 0x61b: 0x3cf8, 0x61c: 0x3cf0, 0x61d: 0x3d00, + 0x620: 0x482e, 0x621: 0x4834, 0x622: 0x494e, 0x623: 0x4966, + 0x624: 0x4956, 0x625: 0x496e, 0x626: 0x495e, 0x627: 0x4976, 0x628: 0x47c2, 0x629: 0x47c8, + 0x62a: 0x48be, 0x62b: 0x48d6, 0x62c: 0x48c6, 0x62d: 0x48de, 0x62e: 0x48ce, 0x62f: 0x48e6, + 0x630: 0x4846, 0x631: 0x484c, 0x632: 0x3e18, 0x633: 0x3e30, 0x634: 0x3e20, 0x635: 0x3e38, + 0x636: 0x3e28, 0x637: 0x3e40, 0x638: 0x47ce, 0x639: 0x47d4, 0x63a: 0x3d18, 0x63b: 0x3d30, + 0x63c: 0x3d20, 0x63d: 0x3d38, 0x63e: 0x3d28, 0x63f: 0x3d40, + // Block 0x19, offset 0x640 + 0x640: 0x4852, 0x641: 0x4858, 0x642: 0x3e48, 0x643: 0x3e58, 0x644: 0x3e50, 0x645: 0x3e60, + 0x648: 0x47da, 0x649: 0x47e0, 0x64a: 0x3d48, 0x64b: 0x3d58, + 0x64c: 0x3d50, 0x64d: 0x3d60, 0x650: 0x4864, 0x651: 0x486a, + 0x652: 0x3e80, 0x653: 0x3e98, 0x654: 0x3e88, 0x655: 0x3ea0, 0x656: 0x3e90, 0x657: 0x3ea8, + 0x659: 0x47e6, 0x65b: 0x3d68, 0x65d: 0x3d70, + 0x65f: 0x3d78, 0x660: 0x487c, 0x661: 0x4882, 0x662: 0x497e, 0x663: 0x4996, + 0x664: 0x4986, 0x665: 0x499e, 0x666: 0x498e, 0x667: 0x49a6, 0x668: 0x47ec, 0x669: 0x47f2, + 0x66a: 0x48ee, 0x66b: 0x4906, 0x66c: 0x48f6, 0x66d: 0x490e, 0x66e: 0x48fe, 0x66f: 0x4916, + 0x670: 0x47f8, 0x671: 0x431e, 0x672: 0x3691, 0x673: 0x4324, 0x674: 0x4822, 0x675: 0x432a, + 0x676: 0x36a3, 0x677: 0x4330, 0x678: 0x36c1, 0x679: 0x4336, 0x67a: 0x36d9, 0x67b: 0x433c, + 0x67c: 0x4870, 0x67d: 0x4342, + // Block 0x1a, offset 0x680 + 0x680: 0x3da0, 0x681: 0x3da8, 0x682: 0x4184, 0x683: 0x41a2, 0x684: 0x418e, 0x685: 0x41ac, + 0x686: 0x4198, 0x687: 0x41b6, 0x688: 0x3cd8, 0x689: 0x3ce0, 0x68a: 0x40d0, 0x68b: 0x40ee, + 0x68c: 0x40da, 0x68d: 0x40f8, 0x68e: 0x40e4, 0x68f: 0x4102, 0x690: 0x3de8, 0x691: 0x3df0, + 0x692: 0x41c0, 0x693: 0x41de, 0x694: 0x41ca, 0x695: 0x41e8, 0x696: 0x41d4, 0x697: 0x41f2, + 0x698: 0x3d08, 0x699: 0x3d10, 0x69a: 0x410c, 0x69b: 0x412a, 0x69c: 0x4116, 0x69d: 0x4134, + 0x69e: 0x4120, 0x69f: 0x413e, 0x6a0: 0x3ec0, 0x6a1: 0x3ec8, 0x6a2: 0x41fc, 0x6a3: 0x421a, + 0x6a4: 0x4206, 0x6a5: 0x4224, 0x6a6: 0x4210, 0x6a7: 0x422e, 0x6a8: 0x3d80, 0x6a9: 0x3d88, + 0x6aa: 0x4148, 0x6ab: 0x4166, 0x6ac: 0x4152, 0x6ad: 0x4170, 0x6ae: 0x415c, 0x6af: 0x417a, + 0x6b0: 0x3685, 0x6b1: 0x367f, 0x6b2: 0x3d90, 0x6b3: 0x368b, 0x6b4: 0x3d98, + 0x6b6: 0x4810, 0x6b7: 0x3db0, 0x6b8: 0x35f5, 0x6b9: 0x35ef, 0x6ba: 0x35e3, 0x6bb: 0x42ee, + 0x6bc: 0x35fb, 0x6bd: 0x4287, 0x6be: 0x01d3, 0x6bf: 0x4287, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x42a0, 0x6c1: 0x4482, 0x6c2: 0x3dd8, 0x6c3: 0x369d, 0x6c4: 0x3de0, + 0x6c6: 0x483a, 0x6c7: 0x3df8, 0x6c8: 0x3601, 0x6c9: 0x42f4, 0x6ca: 0x360d, 0x6cb: 0x42fa, + 0x6cc: 0x3619, 0x6cd: 0x4489, 0x6ce: 0x4490, 0x6cf: 0x4497, 0x6d0: 0x36b5, 0x6d1: 0x36af, + 0x6d2: 0x3e00, 0x6d3: 0x44e4, 0x6d6: 0x36bb, 0x6d7: 0x3e10, + 0x6d8: 0x3631, 0x6d9: 0x362b, 0x6da: 0x361f, 0x6db: 0x4300, 0x6dd: 0x449e, + 0x6de: 0x44a5, 0x6df: 0x44ac, 0x6e0: 0x36eb, 0x6e1: 0x36e5, 0x6e2: 0x3e68, 0x6e3: 0x44ec, + 0x6e4: 0x36cd, 0x6e5: 0x36d3, 0x6e6: 0x36f1, 0x6e7: 0x3e78, 0x6e8: 0x3661, 0x6e9: 0x365b, + 0x6ea: 0x364f, 0x6eb: 0x430c, 0x6ec: 0x3649, 0x6ed: 0x4474, 0x6ee: 0x447b, 0x6ef: 0x0081, + 0x6f2: 0x3eb0, 0x6f3: 0x36f7, 0x6f4: 0x3eb8, + 0x6f6: 0x4888, 0x6f7: 0x3ed0, 0x6f8: 0x363d, 0x6f9: 0x4306, 0x6fa: 0x366d, 0x6fb: 0x4318, + 0x6fc: 0x3679, 0x6fd: 0x425a, 0x6fe: 0x428c, + // Block 0x1c, offset 0x700 + 0x700: 0x1bd8, 0x701: 0x1bdc, 0x702: 0x0047, 0x703: 0x1c54, 0x705: 0x1be8, + 0x706: 0x1bec, 0x707: 0x00e9, 0x709: 0x1c58, 0x70a: 0x008f, 0x70b: 0x0051, + 0x70c: 0x0051, 0x70d: 0x0051, 0x70e: 0x0091, 0x70f: 0x00da, 0x710: 0x0053, 0x711: 0x0053, + 0x712: 0x0059, 0x713: 0x0099, 0x715: 0x005d, 0x716: 0x198d, + 0x719: 0x0061, 0x71a: 0x0063, 0x71b: 0x0065, 0x71c: 0x0065, 0x71d: 0x0065, + 0x720: 0x199f, 0x721: 0x1bc8, 0x722: 0x19a8, + 0x724: 0x0075, 0x726: 0x01b8, 0x728: 0x0075, + 0x72a: 0x0057, 0x72b: 0x42d2, 0x72c: 0x0045, 0x72d: 0x0047, 0x72f: 0x008b, + 0x730: 0x004b, 0x731: 0x004d, 0x733: 0x005b, 0x734: 0x009f, 0x735: 0x0215, + 0x736: 0x0218, 0x737: 0x021b, 0x738: 0x021e, 0x739: 0x0093, 0x73b: 0x1b98, + 0x73c: 0x01e8, 0x73d: 0x01c1, 0x73e: 0x0179, 0x73f: 0x01a0, + // Block 0x1d, offset 0x740 + 0x740: 0x0463, 0x745: 0x0049, + 0x746: 0x0089, 0x747: 0x008b, 0x748: 0x0093, 0x749: 0x0095, + 0x750: 0x222e, 0x751: 0x223a, + 0x752: 0x22ee, 0x753: 0x2216, 0x754: 0x229a, 0x755: 0x2222, 0x756: 0x22a0, 0x757: 0x22b8, + 0x758: 0x22c4, 0x759: 0x2228, 0x75a: 0x22ca, 0x75b: 0x2234, 0x75c: 0x22be, 0x75d: 0x22d0, + 0x75e: 0x22d6, 0x75f: 0x1cbc, 0x760: 0x0053, 0x761: 0x195a, 0x762: 0x1ba4, 0x763: 0x1963, + 0x764: 0x006d, 0x765: 0x19ab, 0x766: 0x1bd0, 0x767: 0x1d48, 0x768: 0x1966, 0x769: 0x0071, + 0x76a: 0x19b7, 0x76b: 0x1bd4, 0x76c: 0x0059, 0x76d: 0x0047, 0x76e: 0x0049, 0x76f: 0x005b, + 0x770: 0x0093, 0x771: 0x19e4, 0x772: 0x1c18, 0x773: 0x19ed, 0x774: 0x00ad, 0x775: 0x1a62, + 0x776: 0x1c4c, 0x777: 0x1d5c, 0x778: 0x19f0, 0x779: 0x00b1, 0x77a: 0x1a65, 0x77b: 0x1c50, + 0x77c: 0x0099, 0x77d: 0x0087, 0x77e: 0x0089, 0x77f: 0x009b, + // Block 0x1e, offset 0x780 + 0x781: 0x3c06, 0x783: 0xa000, 0x784: 0x3c0d, 0x785: 0xa000, + 0x787: 0x3c14, 0x788: 0xa000, 0x789: 0x3c1b, + 0x78d: 0xa000, + 0x7a0: 0x2f65, 0x7a1: 0xa000, 0x7a2: 0x3c29, + 0x7a4: 0xa000, 0x7a5: 0xa000, + 0x7ad: 0x3c22, 0x7ae: 0x2f60, 0x7af: 0x2f6a, + 0x7b0: 0x3c30, 0x7b1: 0x3c37, 0x7b2: 0xa000, 0x7b3: 0xa000, 0x7b4: 0x3c3e, 0x7b5: 0x3c45, + 0x7b6: 0xa000, 0x7b7: 0xa000, 0x7b8: 0x3c4c, 0x7b9: 0x3c53, 0x7ba: 0xa000, 0x7bb: 0xa000, + 0x7bc: 0xa000, 0x7bd: 0xa000, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x3c5a, 0x7c1: 0x3c61, 0x7c2: 0xa000, 0x7c3: 0xa000, 0x7c4: 0x3c76, 0x7c5: 0x3c7d, + 0x7c6: 0xa000, 0x7c7: 0xa000, 0x7c8: 0x3c84, 0x7c9: 0x3c8b, + 0x7d1: 0xa000, + 0x7d2: 0xa000, + 0x7e2: 0xa000, + 0x7e8: 0xa000, 0x7e9: 0xa000, + 0x7eb: 0xa000, 0x7ec: 0x3ca0, 0x7ed: 0x3ca7, 0x7ee: 0x3cae, 0x7ef: 0x3cb5, + 0x7f2: 0xa000, 0x7f3: 0xa000, 0x7f4: 0xa000, 0x7f5: 0xa000, + // Block 0x20, offset 0x800 + 0x820: 0x0023, 0x821: 0x0025, 0x822: 0x0027, 0x823: 0x0029, + 0x824: 0x002b, 0x825: 0x002d, 0x826: 0x002f, 0x827: 0x0031, 0x828: 0x0033, 0x829: 0x1882, + 0x82a: 0x1885, 0x82b: 0x1888, 0x82c: 0x188b, 0x82d: 0x188e, 0x82e: 0x1891, 0x82f: 0x1894, + 0x830: 0x1897, 0x831: 0x189a, 0x832: 0x189d, 0x833: 0x18a6, 0x834: 0x1a68, 0x835: 0x1a6c, + 0x836: 0x1a70, 0x837: 0x1a74, 0x838: 0x1a78, 0x839: 0x1a7c, 0x83a: 0x1a80, 0x83b: 0x1a84, + 0x83c: 0x1a88, 0x83d: 0x1c80, 0x83e: 0x1c85, 0x83f: 0x1c8a, + // Block 0x21, offset 0x840 + 0x840: 0x1c8f, 0x841: 0x1c94, 0x842: 0x1c99, 0x843: 0x1c9e, 0x844: 0x1ca3, 0x845: 0x1ca8, + 0x846: 0x1cad, 0x847: 0x1cb2, 0x848: 0x187f, 0x849: 0x18a3, 0x84a: 0x18c7, 0x84b: 0x18eb, + 0x84c: 0x190f, 0x84d: 0x1918, 0x84e: 0x191e, 0x84f: 0x1924, 0x850: 0x192a, 0x851: 0x1b60, + 0x852: 0x1b64, 0x853: 0x1b68, 0x854: 0x1b6c, 0x855: 0x1b70, 0x856: 0x1b74, 0x857: 0x1b78, + 0x858: 0x1b7c, 0x859: 0x1b80, 0x85a: 0x1b84, 0x85b: 0x1b88, 0x85c: 0x1af4, 0x85d: 0x1af8, + 0x85e: 0x1afc, 0x85f: 0x1b00, 0x860: 0x1b04, 0x861: 0x1b08, 0x862: 0x1b0c, 0x863: 0x1b10, + 0x864: 0x1b14, 0x865: 0x1b18, 0x866: 0x1b1c, 0x867: 0x1b20, 0x868: 0x1b24, 0x869: 0x1b28, + 0x86a: 0x1b2c, 0x86b: 0x1b30, 0x86c: 0x1b34, 0x86d: 0x1b38, 0x86e: 0x1b3c, 0x86f: 0x1b40, + 0x870: 0x1b44, 0x871: 0x1b48, 0x872: 0x1b4c, 0x873: 0x1b50, 0x874: 0x1b54, 0x875: 0x1b58, + 0x876: 0x0043, 0x877: 0x0045, 0x878: 0x0047, 0x879: 0x0049, 0x87a: 0x004b, 0x87b: 0x004d, + 0x87c: 0x004f, 0x87d: 0x0051, 0x87e: 0x0053, 0x87f: 0x0055, + // Block 0x22, offset 0x880 + 0x880: 0x06bf, 0x881: 0x06e3, 0x882: 0x06ef, 0x883: 0x06ff, 0x884: 0x0707, 0x885: 0x0713, + 0x886: 0x071b, 0x887: 0x0723, 0x888: 0x072f, 0x889: 0x0783, 0x88a: 0x079b, 0x88b: 0x07ab, + 0x88c: 0x07bb, 0x88d: 0x07cb, 0x88e: 0x07db, 0x88f: 0x07fb, 0x890: 0x07ff, 0x891: 0x0803, + 0x892: 0x0837, 0x893: 0x085f, 0x894: 0x086f, 0x895: 0x0877, 0x896: 0x087b, 0x897: 0x0887, + 0x898: 0x08a3, 0x899: 0x08a7, 0x89a: 0x08bf, 0x89b: 0x08c3, 0x89c: 0x08cb, 0x89d: 0x08db, + 0x89e: 0x0977, 0x89f: 0x098b, 0x8a0: 0x09cb, 0x8a1: 0x09df, 0x8a2: 0x09e7, 0x8a3: 0x09eb, + 0x8a4: 0x09fb, 0x8a5: 0x0a17, 0x8a6: 0x0a43, 0x8a7: 0x0a4f, 0x8a8: 0x0a6f, 0x8a9: 0x0a7b, + 0x8aa: 0x0a7f, 0x8ab: 0x0a83, 0x8ac: 0x0a9b, 0x8ad: 0x0a9f, 0x8ae: 0x0acb, 0x8af: 0x0ad7, + 0x8b0: 0x0adf, 0x8b1: 0x0ae7, 0x8b2: 0x0af7, 0x8b3: 0x0aff, 0x8b4: 0x0b07, 0x8b5: 0x0b33, + 0x8b6: 0x0b37, 0x8b7: 0x0b3f, 0x8b8: 0x0b43, 0x8b9: 0x0b4b, 0x8ba: 0x0b53, 0x8bb: 0x0b63, + 0x8bc: 0x0b7f, 0x8bd: 0x0bf7, 0x8be: 0x0c0b, 0x8bf: 0x0c0f, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0c8f, 0x8c1: 0x0c93, 0x8c2: 0x0ca7, 0x8c3: 0x0cab, 0x8c4: 0x0cb3, 0x8c5: 0x0cbb, + 0x8c6: 0x0cc3, 0x8c7: 0x0ccf, 0x8c8: 0x0cf7, 0x8c9: 0x0d07, 0x8ca: 0x0d1b, 0x8cb: 0x0d8b, + 0x8cc: 0x0d97, 0x8cd: 0x0da7, 0x8ce: 0x0db3, 0x8cf: 0x0dbf, 0x8d0: 0x0dc7, 0x8d1: 0x0dcb, + 0x8d2: 0x0dcf, 0x8d3: 0x0dd3, 0x8d4: 0x0dd7, 0x8d5: 0x0e8f, 0x8d6: 0x0ed7, 0x8d7: 0x0ee3, + 0x8d8: 0x0ee7, 0x8d9: 0x0eeb, 0x8da: 0x0eef, 0x8db: 0x0ef7, 0x8dc: 0x0efb, 0x8dd: 0x0f0f, + 0x8de: 0x0f2b, 0x8df: 0x0f33, 0x8e0: 0x0f73, 0x8e1: 0x0f77, 0x8e2: 0x0f7f, 0x8e3: 0x0f83, + 0x8e4: 0x0f8b, 0x8e5: 0x0f8f, 0x8e6: 0x0fb3, 0x8e7: 0x0fb7, 0x8e8: 0x0fd3, 0x8e9: 0x0fd7, + 0x8ea: 0x0fdb, 0x8eb: 0x0fdf, 0x8ec: 0x0ff3, 0x8ed: 0x1017, 0x8ee: 0x101b, 0x8ef: 0x101f, + 0x8f0: 0x1043, 0x8f1: 0x1083, 0x8f2: 0x1087, 0x8f3: 0x10a7, 0x8f4: 0x10b7, 0x8f5: 0x10bf, + 0x8f6: 0x10df, 0x8f7: 0x1103, 0x8f8: 0x1147, 0x8f9: 0x114f, 0x8fa: 0x1163, 0x8fb: 0x116f, + 0x8fc: 0x1177, 0x8fd: 0x117f, 0x8fe: 0x1183, 0x8ff: 0x1187, + // Block 0x24, offset 0x900 + 0x900: 0x119f, 0x901: 0x11a3, 0x902: 0x11bf, 0x903: 0x11c7, 0x904: 0x11cf, 0x905: 0x11d3, + 0x906: 0x11df, 0x907: 0x11e7, 0x908: 0x11eb, 0x909: 0x11ef, 0x90a: 0x11f7, 0x90b: 0x11fb, + 0x90c: 0x129b, 0x90d: 0x12af, 0x90e: 0x12e3, 0x90f: 0x12e7, 0x910: 0x12ef, 0x911: 0x131b, + 0x912: 0x1323, 0x913: 0x132b, 0x914: 0x1333, 0x915: 0x136f, 0x916: 0x1373, 0x917: 0x137b, + 0x918: 0x137f, 0x919: 0x1383, 0x91a: 0x13af, 0x91b: 0x13b3, 0x91c: 0x13bb, 0x91d: 0x13cf, + 0x91e: 0x13d3, 0x91f: 0x13ef, 0x920: 0x13f7, 0x921: 0x13fb, 0x922: 0x141f, 0x923: 0x143f, + 0x924: 0x1453, 0x925: 0x1457, 0x926: 0x145f, 0x927: 0x148b, 0x928: 0x148f, 0x929: 0x149f, + 0x92a: 0x14c3, 0x92b: 0x14cf, 0x92c: 0x14df, 0x92d: 0x14f7, 0x92e: 0x14ff, 0x92f: 0x1503, + 0x930: 0x1507, 0x931: 0x150b, 0x932: 0x1517, 0x933: 0x151b, 0x934: 0x1523, 0x935: 0x153f, + 0x936: 0x1543, 0x937: 0x1547, 0x938: 0x155f, 0x939: 0x1563, 0x93a: 0x156b, 0x93b: 0x157f, + 0x93c: 0x1583, 0x93d: 0x1587, 0x93e: 0x158f, 0x93f: 0x1593, + // Block 0x25, offset 0x940 + 0x946: 0xa000, 0x94b: 0xa000, + 0x94c: 0x3f08, 0x94d: 0xa000, 0x94e: 0x3f10, 0x94f: 0xa000, 0x950: 0x3f18, 0x951: 0xa000, + 0x952: 0x3f20, 0x953: 0xa000, 0x954: 0x3f28, 0x955: 0xa000, 0x956: 0x3f30, 0x957: 0xa000, + 0x958: 0x3f38, 0x959: 0xa000, 0x95a: 0x3f40, 0x95b: 0xa000, 0x95c: 0x3f48, 0x95d: 0xa000, + 0x95e: 0x3f50, 0x95f: 0xa000, 0x960: 0x3f58, 0x961: 0xa000, 0x962: 0x3f60, + 0x964: 0xa000, 0x965: 0x3f68, 0x966: 0xa000, 0x967: 0x3f70, 0x968: 0xa000, 0x969: 0x3f78, + 0x96f: 0xa000, + 0x970: 0x3f80, 0x971: 0x3f88, 0x972: 0xa000, 0x973: 0x3f90, 0x974: 0x3f98, 0x975: 0xa000, + 0x976: 0x3fa0, 0x977: 0x3fa8, 0x978: 0xa000, 0x979: 0x3fb0, 0x97a: 0x3fb8, 0x97b: 0xa000, + 0x97c: 0x3fc0, 0x97d: 0x3fc8, + // Block 0x26, offset 0x980 + 0x994: 0x3f00, + 0x999: 0x9903, 0x99a: 0x9903, 0x99b: 0x42dc, 0x99c: 0x42e2, 0x99d: 0xa000, + 0x99e: 0x3fd0, 0x99f: 0x26b4, + 0x9a6: 0xa000, + 0x9ab: 0xa000, 0x9ac: 0x3fe0, 0x9ad: 0xa000, 0x9ae: 0x3fe8, 0x9af: 0xa000, + 0x9b0: 0x3ff0, 0x9b1: 0xa000, 0x9b2: 0x3ff8, 0x9b3: 0xa000, 0x9b4: 0x4000, 0x9b5: 0xa000, + 0x9b6: 0x4008, 0x9b7: 0xa000, 0x9b8: 0x4010, 0x9b9: 0xa000, 0x9ba: 0x4018, 0x9bb: 0xa000, + 0x9bc: 0x4020, 0x9bd: 0xa000, 0x9be: 0x4028, 0x9bf: 0xa000, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x4030, 0x9c1: 0xa000, 0x9c2: 0x4038, 0x9c4: 0xa000, 0x9c5: 0x4040, + 0x9c6: 0xa000, 0x9c7: 0x4048, 0x9c8: 0xa000, 0x9c9: 0x4050, + 0x9cf: 0xa000, 0x9d0: 0x4058, 0x9d1: 0x4060, + 0x9d2: 0xa000, 0x9d3: 0x4068, 0x9d4: 0x4070, 0x9d5: 0xa000, 0x9d6: 0x4078, 0x9d7: 0x4080, + 0x9d8: 0xa000, 0x9d9: 0x4088, 0x9da: 0x4090, 0x9db: 0xa000, 0x9dc: 0x4098, 0x9dd: 0x40a0, + 0x9ef: 0xa000, + 0x9f0: 0xa000, 0x9f1: 0xa000, 0x9f2: 0xa000, 0x9f4: 0x3fd8, + 0x9f7: 0x40a8, 0x9f8: 0x40b0, 0x9f9: 0x40b8, 0x9fa: 0x40c0, + 0x9fd: 0xa000, 0x9fe: 0x40c8, 0x9ff: 0x26c9, + // Block 0x28, offset 0xa00 + 0xa00: 0x0367, 0xa01: 0x032b, 0xa02: 0x032f, 0xa03: 0x0333, 0xa04: 0x037b, 0xa05: 0x0337, + 0xa06: 0x033b, 0xa07: 0x033f, 0xa08: 0x0343, 0xa09: 0x0347, 0xa0a: 0x034b, 0xa0b: 0x034f, + 0xa0c: 0x0353, 0xa0d: 0x0357, 0xa0e: 0x035b, 0xa0f: 0x49bd, 0xa10: 0x49c3, 0xa11: 0x49c9, + 0xa12: 0x49cf, 0xa13: 0x49d5, 0xa14: 0x49db, 0xa15: 0x49e1, 0xa16: 0x49e7, 0xa17: 0x49ed, + 0xa18: 0x49f3, 0xa19: 0x49f9, 0xa1a: 0x49ff, 0xa1b: 0x4a05, 0xa1c: 0x4a0b, 0xa1d: 0x4a11, + 0xa1e: 0x4a17, 0xa1f: 0x4a1d, 0xa20: 0x4a23, 0xa21: 0x4a29, 0xa22: 0x4a2f, 0xa23: 0x4a35, + 0xa24: 0x03c3, 0xa25: 0x035f, 0xa26: 0x0363, 0xa27: 0x03e7, 0xa28: 0x03eb, 0xa29: 0x03ef, + 0xa2a: 0x03f3, 0xa2b: 0x03f7, 0xa2c: 0x03fb, 0xa2d: 0x03ff, 0xa2e: 0x036b, 0xa2f: 0x0403, + 0xa30: 0x0407, 0xa31: 0x036f, 0xa32: 0x0373, 0xa33: 0x0377, 0xa34: 0x037f, 0xa35: 0x0383, + 0xa36: 0x0387, 0xa37: 0x038b, 0xa38: 0x038f, 0xa39: 0x0393, 0xa3a: 0x0397, 0xa3b: 0x039b, + 0xa3c: 0x039f, 0xa3d: 0x03a3, 0xa3e: 0x03a7, 0xa3f: 0x03ab, + // Block 0x29, offset 0xa40 + 0xa40: 0x03af, 0xa41: 0x03b3, 0xa42: 0x040b, 0xa43: 0x040f, 0xa44: 0x03b7, 0xa45: 0x03bb, + 0xa46: 0x03bf, 0xa47: 0x03c7, 0xa48: 0x03cb, 0xa49: 0x03cf, 0xa4a: 0x03d3, 0xa4b: 0x03d7, + 0xa4c: 0x03db, 0xa4d: 0x03df, 0xa4e: 0x03e3, + 0xa52: 0x06bf, 0xa53: 0x071b, 0xa54: 0x06cb, 0xa55: 0x097b, 0xa56: 0x06cf, 0xa57: 0x06e7, + 0xa58: 0x06d3, 0xa59: 0x0f93, 0xa5a: 0x0707, 0xa5b: 0x06db, 0xa5c: 0x06c3, 0xa5d: 0x09ff, + 0xa5e: 0x098f, 0xa5f: 0x072f, + // Block 0x2a, offset 0xa80 + 0xa80: 0x2054, 0xa81: 0x205a, 0xa82: 0x2060, 0xa83: 0x2066, 0xa84: 0x206c, 0xa85: 0x2072, + 0xa86: 0x2078, 0xa87: 0x207e, 0xa88: 0x2084, 0xa89: 0x208a, 0xa8a: 0x2090, 0xa8b: 0x2096, + 0xa8c: 0x209c, 0xa8d: 0x20a2, 0xa8e: 0x2726, 0xa8f: 0x272f, 0xa90: 0x2738, 0xa91: 0x2741, + 0xa92: 0x274a, 0xa93: 0x2753, 0xa94: 0x275c, 0xa95: 0x2765, 0xa96: 0x276e, 0xa97: 0x2780, + 0xa98: 0x2789, 0xa99: 0x2792, 0xa9a: 0x279b, 0xa9b: 0x27a4, 0xa9c: 0x2777, 0xa9d: 0x2bac, + 0xa9e: 0x2aed, 0xaa0: 0x20a8, 0xaa1: 0x20c0, 0xaa2: 0x20b4, 0xaa3: 0x2108, + 0xaa4: 0x20c6, 0xaa5: 0x20e4, 0xaa6: 0x20ae, 0xaa7: 0x20de, 0xaa8: 0x20ba, 0xaa9: 0x20f0, + 0xaaa: 0x2120, 0xaab: 0x213e, 0xaac: 0x2138, 0xaad: 0x212c, 0xaae: 0x217a, 0xaaf: 0x210e, + 0xab0: 0x211a, 0xab1: 0x2132, 0xab2: 0x2126, 0xab3: 0x2150, 0xab4: 0x20fc, 0xab5: 0x2144, + 0xab6: 0x216e, 0xab7: 0x2156, 0xab8: 0x20ea, 0xab9: 0x20cc, 0xaba: 0x2102, 0xabb: 0x2114, + 0xabc: 0x214a, 0xabd: 0x20d2, 0xabe: 0x2174, 0xabf: 0x20f6, + // Block 0x2b, offset 0xac0 + 0xac0: 0x215c, 0xac1: 0x20d8, 0xac2: 0x2162, 0xac3: 0x2168, 0xac4: 0x092f, 0xac5: 0x0b03, + 0xac6: 0x0ca7, 0xac7: 0x10c7, + 0xad0: 0x1bc4, 0xad1: 0x18a9, + 0xad2: 0x18ac, 0xad3: 0x18af, 0xad4: 0x18b2, 0xad5: 0x18b5, 0xad6: 0x18b8, 0xad7: 0x18bb, + 0xad8: 0x18be, 0xad9: 0x18c1, 0xada: 0x18ca, 0xadb: 0x18cd, 0xadc: 0x18d0, 0xadd: 0x18d3, + 0xade: 0x18d6, 0xadf: 0x18d9, 0xae0: 0x0313, 0xae1: 0x031b, 0xae2: 0x031f, 0xae3: 0x0327, + 0xae4: 0x032b, 0xae5: 0x032f, 0xae6: 0x0337, 0xae7: 0x033f, 0xae8: 0x0343, 0xae9: 0x034b, + 0xaea: 0x034f, 0xaeb: 0x0353, 0xaec: 0x0357, 0xaed: 0x035b, 0xaee: 0x2e18, 0xaef: 0x2e20, + 0xaf0: 0x2e28, 0xaf1: 0x2e30, 0xaf2: 0x2e38, 0xaf3: 0x2e40, 0xaf4: 0x2e48, 0xaf5: 0x2e50, + 0xaf6: 0x2e60, 0xaf7: 0x2e68, 0xaf8: 0x2e70, 0xaf9: 0x2e78, 0xafa: 0x2e80, 0xafb: 0x2e88, + 0xafc: 0x2ed3, 0xafd: 0x2e9b, 0xafe: 0x2e58, + // Block 0x2c, offset 0xb00 + 0xb00: 0x06bf, 0xb01: 0x071b, 0xb02: 0x06cb, 0xb03: 0x097b, 0xb04: 0x071f, 0xb05: 0x07af, + 0xb06: 0x06c7, 0xb07: 0x07ab, 0xb08: 0x070b, 0xb09: 0x0887, 0xb0a: 0x0d07, 0xb0b: 0x0e8f, + 0xb0c: 0x0dd7, 0xb0d: 0x0d1b, 0xb0e: 0x145f, 0xb0f: 0x098b, 0xb10: 0x0ccf, 0xb11: 0x0d4b, + 0xb12: 0x0d0b, 0xb13: 0x104b, 0xb14: 0x08fb, 0xb15: 0x0f03, 0xb16: 0x1387, 0xb17: 0x105f, + 0xb18: 0x0843, 0xb19: 0x108f, 0xb1a: 0x0f9b, 0xb1b: 0x0a17, 0xb1c: 0x140f, 0xb1d: 0x077f, + 0xb1e: 0x08ab, 0xb1f: 0x0df7, 0xb20: 0x1527, 0xb21: 0x0743, 0xb22: 0x07d3, 0xb23: 0x0d9b, + 0xb24: 0x06cf, 0xb25: 0x06e7, 0xb26: 0x06d3, 0xb27: 0x0adb, 0xb28: 0x08ef, 0xb29: 0x087f, + 0xb2a: 0x0a57, 0xb2b: 0x0a4b, 0xb2c: 0x0feb, 0xb2d: 0x073f, 0xb2e: 0x139b, 0xb2f: 0x089b, + 0xb30: 0x09f3, 0xb31: 0x18dc, 0xb32: 0x18df, 0xb33: 0x18e2, 0xb34: 0x18e5, 0xb35: 0x18ee, + 0xb36: 0x18f1, 0xb37: 0x18f4, 0xb38: 0x18f7, 0xb39: 0x18fa, 0xb3a: 0x18fd, 0xb3b: 0x1900, + 0xb3c: 0x1903, 0xb3d: 0x1906, 0xb3e: 0x1909, 0xb3f: 0x1912, + // Block 0x2d, offset 0xb40 + 0xb40: 0x1cc6, 0xb41: 0x1cd5, 0xb42: 0x1ce4, 0xb43: 0x1cf3, 0xb44: 0x1d02, 0xb45: 0x1d11, + 0xb46: 0x1d20, 0xb47: 0x1d2f, 0xb48: 0x1d3e, 0xb49: 0x218c, 0xb4a: 0x219e, 0xb4b: 0x21b0, + 0xb4c: 0x1954, 0xb4d: 0x1c04, 0xb4e: 0x19d2, 0xb4f: 0x1ba8, 0xb50: 0x04cb, 0xb51: 0x04d3, + 0xb52: 0x04db, 0xb53: 0x04e3, 0xb54: 0x04eb, 0xb55: 0x04ef, 0xb56: 0x04f3, 0xb57: 0x04f7, + 0xb58: 0x04fb, 0xb59: 0x04ff, 0xb5a: 0x0503, 0xb5b: 0x0507, 0xb5c: 0x050b, 0xb5d: 0x050f, + 0xb5e: 0x0513, 0xb5f: 0x0517, 0xb60: 0x051b, 0xb61: 0x0523, 0xb62: 0x0527, 0xb63: 0x052b, + 0xb64: 0x052f, 0xb65: 0x0533, 0xb66: 0x0537, 0xb67: 0x053b, 0xb68: 0x053f, 0xb69: 0x0543, + 0xb6a: 0x0547, 0xb6b: 0x054b, 0xb6c: 0x054f, 0xb6d: 0x0553, 0xb6e: 0x0557, 0xb6f: 0x055b, + 0xb70: 0x055f, 0xb71: 0x0563, 0xb72: 0x0567, 0xb73: 0x056f, 0xb74: 0x0577, 0xb75: 0x057f, + 0xb76: 0x0583, 0xb77: 0x0587, 0xb78: 0x058b, 0xb79: 0x058f, 0xb7a: 0x0593, 0xb7b: 0x0597, + 0xb7c: 0x059b, 0xb7d: 0x059f, 0xb7e: 0x05a3, + // Block 0x2e, offset 0xb80 + 0xb80: 0x2b0c, 0xb81: 0x29a8, 0xb82: 0x2b1c, 0xb83: 0x2880, 0xb84: 0x2ee4, 0xb85: 0x288a, + 0xb86: 0x2894, 0xb87: 0x2f28, 0xb88: 0x29b5, 0xb89: 0x289e, 0xb8a: 0x28a8, 0xb8b: 0x28b2, + 0xb8c: 0x29dc, 0xb8d: 0x29e9, 0xb8e: 0x29c2, 0xb8f: 0x29cf, 0xb90: 0x2ea9, 0xb91: 0x29f6, + 0xb92: 0x2a03, 0xb93: 0x2bbe, 0xb94: 0x26bb, 0xb95: 0x2bd1, 0xb96: 0x2be4, 0xb97: 0x2b2c, + 0xb98: 0x2a10, 0xb99: 0x2bf7, 0xb9a: 0x2c0a, 0xb9b: 0x2a1d, 0xb9c: 0x28bc, 0xb9d: 0x28c6, + 0xb9e: 0x2eb7, 0xb9f: 0x2a2a, 0xba0: 0x2b3c, 0xba1: 0x2ef5, 0xba2: 0x28d0, 0xba3: 0x28da, + 0xba4: 0x2a37, 0xba5: 0x28e4, 0xba6: 0x28ee, 0xba7: 0x26d0, 0xba8: 0x26d7, 0xba9: 0x28f8, + 0xbaa: 0x2902, 0xbab: 0x2c1d, 0xbac: 0x2a44, 0xbad: 0x2b4c, 0xbae: 0x2c30, 0xbaf: 0x2a51, + 0xbb0: 0x2916, 0xbb1: 0x290c, 0xbb2: 0x2f3c, 0xbb3: 0x2a5e, 0xbb4: 0x2c43, 0xbb5: 0x2920, + 0xbb6: 0x2b5c, 0xbb7: 0x292a, 0xbb8: 0x2a78, 0xbb9: 0x2934, 0xbba: 0x2a85, 0xbbb: 0x2f06, + 0xbbc: 0x2a6b, 0xbbd: 0x2b6c, 0xbbe: 0x2a92, 0xbbf: 0x26de, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x2f17, 0xbc1: 0x293e, 0xbc2: 0x2948, 0xbc3: 0x2a9f, 0xbc4: 0x2952, 0xbc5: 0x295c, + 0xbc6: 0x2966, 0xbc7: 0x2b7c, 0xbc8: 0x2aac, 0xbc9: 0x26e5, 0xbca: 0x2c56, 0xbcb: 0x2e90, + 0xbcc: 0x2b8c, 0xbcd: 0x2ab9, 0xbce: 0x2ec5, 0xbcf: 0x2970, 0xbd0: 0x297a, 0xbd1: 0x2ac6, + 0xbd2: 0x26ec, 0xbd3: 0x2ad3, 0xbd4: 0x2b9c, 0xbd5: 0x26f3, 0xbd6: 0x2c69, 0xbd7: 0x2984, + 0xbd8: 0x1cb7, 0xbd9: 0x1ccb, 0xbda: 0x1cda, 0xbdb: 0x1ce9, 0xbdc: 0x1cf8, 0xbdd: 0x1d07, + 0xbde: 0x1d16, 0xbdf: 0x1d25, 0xbe0: 0x1d34, 0xbe1: 0x1d43, 0xbe2: 0x2192, 0xbe3: 0x21a4, + 0xbe4: 0x21b6, 0xbe5: 0x21c2, 0xbe6: 0x21ce, 0xbe7: 0x21da, 0xbe8: 0x21e6, 0xbe9: 0x21f2, + 0xbea: 0x21fe, 0xbeb: 0x220a, 0xbec: 0x2246, 0xbed: 0x2252, 0xbee: 0x225e, 0xbef: 0x226a, + 0xbf0: 0x2276, 0xbf1: 0x1c14, 0xbf2: 0x19c6, 0xbf3: 0x1936, 0xbf4: 0x1be4, 0xbf5: 0x1a47, + 0xbf6: 0x1a56, 0xbf7: 0x19cc, 0xbf8: 0x1bfc, 0xbf9: 0x1c00, 0xbfa: 0x1960, 0xbfb: 0x2701, + 0xbfc: 0x270f, 0xbfd: 0x26fa, 0xbfe: 0x2708, 0xbff: 0x2ae0, + // Block 0x30, offset 0xc00 + 0xc00: 0x1a4a, 0xc01: 0x1a32, 0xc02: 0x1c60, 0xc03: 0x1a1a, 0xc04: 0x19f3, 0xc05: 0x1969, + 0xc06: 0x1978, 0xc07: 0x1948, 0xc08: 0x1bf0, 0xc09: 0x1d52, 0xc0a: 0x1a4d, 0xc0b: 0x1a35, + 0xc0c: 0x1c64, 0xc0d: 0x1c70, 0xc0e: 0x1a26, 0xc0f: 0x19fc, 0xc10: 0x1957, 0xc11: 0x1c1c, + 0xc12: 0x1bb0, 0xc13: 0x1b9c, 0xc14: 0x1bcc, 0xc15: 0x1c74, 0xc16: 0x1a29, 0xc17: 0x19c9, + 0xc18: 0x19ff, 0xc19: 0x19de, 0xc1a: 0x1a41, 0xc1b: 0x1c78, 0xc1c: 0x1a2c, 0xc1d: 0x19c0, + 0xc1e: 0x1a02, 0xc1f: 0x1c3c, 0xc20: 0x1bf4, 0xc21: 0x1a14, 0xc22: 0x1c24, 0xc23: 0x1c40, + 0xc24: 0x1bf8, 0xc25: 0x1a17, 0xc26: 0x1c28, 0xc27: 0x22e8, 0xc28: 0x22fc, 0xc29: 0x1996, + 0xc2a: 0x1c20, 0xc2b: 0x1bb4, 0xc2c: 0x1ba0, 0xc2d: 0x1c48, 0xc2e: 0x2716, 0xc2f: 0x27ad, + 0xc30: 0x1a59, 0xc31: 0x1a44, 0xc32: 0x1c7c, 0xc33: 0x1a2f, 0xc34: 0x1a50, 0xc35: 0x1a38, + 0xc36: 0x1c68, 0xc37: 0x1a1d, 0xc38: 0x19f6, 0xc39: 0x1981, 0xc3a: 0x1a53, 0xc3b: 0x1a3b, + 0xc3c: 0x1c6c, 0xc3d: 0x1a20, 0xc3e: 0x19f9, 0xc3f: 0x1984, + // Block 0x31, offset 0xc40 + 0xc40: 0x1c2c, 0xc41: 0x1bb8, 0xc42: 0x1d4d, 0xc43: 0x1939, 0xc44: 0x19ba, 0xc45: 0x19bd, + 0xc46: 0x22f5, 0xc47: 0x1b94, 0xc48: 0x19c3, 0xc49: 0x194b, 0xc4a: 0x19e1, 0xc4b: 0x194e, + 0xc4c: 0x19ea, 0xc4d: 0x196c, 0xc4e: 0x196f, 0xc4f: 0x1a05, 0xc50: 0x1a0b, 0xc51: 0x1a0e, + 0xc52: 0x1c30, 0xc53: 0x1a11, 0xc54: 0x1a23, 0xc55: 0x1c38, 0xc56: 0x1c44, 0xc57: 0x1990, + 0xc58: 0x1d57, 0xc59: 0x1bbc, 0xc5a: 0x1993, 0xc5b: 0x1a5c, 0xc5c: 0x19a5, 0xc5d: 0x19b4, + 0xc5e: 0x22e2, 0xc5f: 0x22dc, 0xc60: 0x1cc1, 0xc61: 0x1cd0, 0xc62: 0x1cdf, 0xc63: 0x1cee, + 0xc64: 0x1cfd, 0xc65: 0x1d0c, 0xc66: 0x1d1b, 0xc67: 0x1d2a, 0xc68: 0x1d39, 0xc69: 0x2186, + 0xc6a: 0x2198, 0xc6b: 0x21aa, 0xc6c: 0x21bc, 0xc6d: 0x21c8, 0xc6e: 0x21d4, 0xc6f: 0x21e0, + 0xc70: 0x21ec, 0xc71: 0x21f8, 0xc72: 0x2204, 0xc73: 0x2240, 0xc74: 0x224c, 0xc75: 0x2258, + 0xc76: 0x2264, 0xc77: 0x2270, 0xc78: 0x227c, 0xc79: 0x2282, 0xc7a: 0x2288, 0xc7b: 0x228e, + 0xc7c: 0x2294, 0xc7d: 0x22a6, 0xc7e: 0x22ac, 0xc7f: 0x1c10, + // Block 0x32, offset 0xc80 + 0xc80: 0x1377, 0xc81: 0x0cfb, 0xc82: 0x13d3, 0xc83: 0x139f, 0xc84: 0x0e57, 0xc85: 0x06eb, + 0xc86: 0x08df, 0xc87: 0x162b, 0xc88: 0x162b, 0xc89: 0x0a0b, 0xc8a: 0x145f, 0xc8b: 0x0943, + 0xc8c: 0x0a07, 0xc8d: 0x0bef, 0xc8e: 0x0fcf, 0xc8f: 0x115f, 0xc90: 0x1297, 0xc91: 0x12d3, + 0xc92: 0x1307, 0xc93: 0x141b, 0xc94: 0x0d73, 0xc95: 0x0dff, 0xc96: 0x0eab, 0xc97: 0x0f43, + 0xc98: 0x125f, 0xc99: 0x1447, 0xc9a: 0x1573, 0xc9b: 0x070f, 0xc9c: 0x08b3, 0xc9d: 0x0d87, + 0xc9e: 0x0ecf, 0xc9f: 0x1293, 0xca0: 0x15c3, 0xca1: 0x0ab3, 0xca2: 0x0e77, 0xca3: 0x1283, + 0xca4: 0x1317, 0xca5: 0x0c23, 0xca6: 0x11bb, 0xca7: 0x12df, 0xca8: 0x0b1f, 0xca9: 0x0d0f, + 0xcaa: 0x0e17, 0xcab: 0x0f1b, 0xcac: 0x1427, 0xcad: 0x074f, 0xcae: 0x07e7, 0xcaf: 0x0853, + 0xcb0: 0x0c8b, 0xcb1: 0x0d7f, 0xcb2: 0x0ecb, 0xcb3: 0x0fef, 0xcb4: 0x1177, 0xcb5: 0x128b, + 0xcb6: 0x12a3, 0xcb7: 0x13c7, 0xcb8: 0x14ef, 0xcb9: 0x15a3, 0xcba: 0x15bf, 0xcbb: 0x102b, + 0xcbc: 0x106b, 0xcbd: 0x1123, 0xcbe: 0x1243, 0xcbf: 0x147b, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x15cb, 0xcc1: 0x134b, 0xcc2: 0x09c7, 0xcc3: 0x0b3b, 0xcc4: 0x10db, 0xcc5: 0x119b, + 0xcc6: 0x0eff, 0xcc7: 0x1033, 0xcc8: 0x1397, 0xcc9: 0x14e7, 0xcca: 0x09c3, 0xccb: 0x0a8f, + 0xccc: 0x0d77, 0xccd: 0x0e2b, 0xcce: 0x0e5f, 0xccf: 0x1113, 0xcd0: 0x113b, 0xcd1: 0x14a7, + 0xcd2: 0x084f, 0xcd3: 0x11a7, 0xcd4: 0x07f3, 0xcd5: 0x07ef, 0xcd6: 0x1097, 0xcd7: 0x1127, + 0xcd8: 0x125b, 0xcd9: 0x14af, 0xcda: 0x1367, 0xcdb: 0x0c27, 0xcdc: 0x0d73, 0xcdd: 0x1357, + 0xcde: 0x06f7, 0xcdf: 0x0a63, 0xce0: 0x0b93, 0xce1: 0x0f2f, 0xce2: 0x0faf, 0xce3: 0x0873, + 0xce4: 0x103b, 0xce5: 0x075f, 0xce6: 0x0b77, 0xce7: 0x06d7, 0xce8: 0x0deb, 0xce9: 0x0ca3, + 0xcea: 0x110f, 0xceb: 0x08c7, 0xcec: 0x09b3, 0xced: 0x0ffb, 0xcee: 0x1263, 0xcef: 0x133b, + 0xcf0: 0x0db7, 0xcf1: 0x13f7, 0xcf2: 0x0de3, 0xcf3: 0x0c37, 0xcf4: 0x121b, 0xcf5: 0x0c57, + 0xcf6: 0x0fab, 0xcf7: 0x072b, 0xcf8: 0x07a7, 0xcf9: 0x07eb, 0xcfa: 0x0d53, 0xcfb: 0x10fb, + 0xcfc: 0x11f3, 0xcfd: 0x1347, 0xcfe: 0x145b, 0xcff: 0x085b, + // Block 0x34, offset 0xd00 + 0xd00: 0x090f, 0xd01: 0x0a17, 0xd02: 0x0b2f, 0xd03: 0x0cbf, 0xd04: 0x0e7b, 0xd05: 0x103f, + 0xd06: 0x1497, 0xd07: 0x157b, 0xd08: 0x15cf, 0xd09: 0x15e7, 0xd0a: 0x0837, 0xd0b: 0x0cf3, + 0xd0c: 0x0da3, 0xd0d: 0x13eb, 0xd0e: 0x0afb, 0xd0f: 0x0bd7, 0xd10: 0x0bf3, 0xd11: 0x0c83, + 0xd12: 0x0e6b, 0xd13: 0x0eb7, 0xd14: 0x0f67, 0xd15: 0x108b, 0xd16: 0x112f, 0xd17: 0x1193, + 0xd18: 0x13db, 0xd19: 0x126b, 0xd1a: 0x1403, 0xd1b: 0x147f, 0xd1c: 0x080f, 0xd1d: 0x083b, + 0xd1e: 0x0923, 0xd1f: 0x0ea7, 0xd20: 0x12f3, 0xd21: 0x133b, 0xd22: 0x0b1b, 0xd23: 0x0b8b, + 0xd24: 0x0c4f, 0xd25: 0x0daf, 0xd26: 0x10d7, 0xd27: 0x0f23, 0xd28: 0x073b, 0xd29: 0x097f, + 0xd2a: 0x0a63, 0xd2b: 0x0ac7, 0xd2c: 0x0b97, 0xd2d: 0x0f3f, 0xd2e: 0x0f5b, 0xd2f: 0x116b, + 0xd30: 0x118b, 0xd31: 0x1463, 0xd32: 0x14e3, 0xd33: 0x14f3, 0xd34: 0x152f, 0xd35: 0x0753, + 0xd36: 0x107f, 0xd37: 0x144f, 0xd38: 0x14cb, 0xd39: 0x0baf, 0xd3a: 0x0717, 0xd3b: 0x0777, + 0xd3c: 0x0a67, 0xd3d: 0x0a87, 0xd3e: 0x0caf, 0xd3f: 0x0d73, + // Block 0x35, offset 0xd40 + 0xd40: 0x0ec3, 0xd41: 0x0fcb, 0xd42: 0x1277, 0xd43: 0x1417, 0xd44: 0x1623, 0xd45: 0x0ce3, + 0xd46: 0x14a3, 0xd47: 0x0833, 0xd48: 0x0d2f, 0xd49: 0x0d3b, 0xd4a: 0x0e0f, 0xd4b: 0x0e47, + 0xd4c: 0x0f4b, 0xd4d: 0x0fa7, 0xd4e: 0x1027, 0xd4f: 0x110b, 0xd50: 0x153b, 0xd51: 0x07af, + 0xd52: 0x0c03, 0xd53: 0x14b3, 0xd54: 0x0767, 0xd55: 0x0aab, 0xd56: 0x0e2f, 0xd57: 0x13df, + 0xd58: 0x0b67, 0xd59: 0x0bb7, 0xd5a: 0x0d43, 0xd5b: 0x0f2f, 0xd5c: 0x14bb, 0xd5d: 0x0817, + 0xd5e: 0x08ff, 0xd5f: 0x0a97, 0xd60: 0x0cd3, 0xd61: 0x0d1f, 0xd62: 0x0d5f, 0xd63: 0x0df3, + 0xd64: 0x0f47, 0xd65: 0x0fbb, 0xd66: 0x1157, 0xd67: 0x12f7, 0xd68: 0x1303, 0xd69: 0x1457, + 0xd6a: 0x14d7, 0xd6b: 0x0883, 0xd6c: 0x0e4b, 0xd6d: 0x0903, 0xd6e: 0x0ec7, 0xd6f: 0x0f6b, + 0xd70: 0x1287, 0xd71: 0x14bf, 0xd72: 0x15ab, 0xd73: 0x15d3, 0xd74: 0x0d37, 0xd75: 0x0e27, + 0xd76: 0x11c3, 0xd77: 0x10b7, 0xd78: 0x10c3, 0xd79: 0x10e7, 0xd7a: 0x0f17, 0xd7b: 0x0e9f, + 0xd7c: 0x1363, 0xd7d: 0x0733, 0xd7e: 0x122b, 0xd7f: 0x081b, + // Block 0x36, offset 0xd80 + 0xd80: 0x080b, 0xd81: 0x0b0b, 0xd82: 0x0c2b, 0xd83: 0x10f3, 0xd84: 0x0a53, 0xd85: 0x0e03, + 0xd86: 0x0cef, 0xd87: 0x13e7, 0xd88: 0x12e7, 0xd89: 0x14ab, 0xd8a: 0x1323, 0xd8b: 0x0b27, + 0xd8c: 0x0787, 0xd8d: 0x095b, 0xd90: 0x09af, + 0xd92: 0x0cdf, 0xd95: 0x07f7, 0xd96: 0x0f1f, 0xd97: 0x0fe3, + 0xd98: 0x1047, 0xd99: 0x1063, 0xd9a: 0x1067, 0xd9b: 0x107b, 0xd9c: 0x14fb, 0xd9d: 0x10eb, + 0xd9e: 0x116f, 0xda0: 0x128f, 0xda2: 0x1353, + 0xda5: 0x1407, 0xda6: 0x1433, + 0xdaa: 0x154f, 0xdab: 0x1553, 0xdac: 0x1557, 0xdad: 0x15bb, 0xdae: 0x142b, 0xdaf: 0x14c7, + 0xdb0: 0x0757, 0xdb1: 0x077b, 0xdb2: 0x078f, 0xdb3: 0x084b, 0xdb4: 0x0857, 0xdb5: 0x0897, + 0xdb6: 0x094b, 0xdb7: 0x0967, 0xdb8: 0x096f, 0xdb9: 0x09ab, 0xdba: 0x09b7, 0xdbb: 0x0a93, + 0xdbc: 0x0a9b, 0xdbd: 0x0ba3, 0xdbe: 0x0bcb, 0xdbf: 0x0bd3, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x0beb, 0xdc1: 0x0c97, 0xdc2: 0x0cc7, 0xdc3: 0x0ce7, 0xdc4: 0x0d57, 0xdc5: 0x0e1b, + 0xdc6: 0x0e37, 0xdc7: 0x0e67, 0xdc8: 0x0ebb, 0xdc9: 0x0edb, 0xdca: 0x0f4f, 0xdcb: 0x102f, + 0xdcc: 0x104b, 0xdcd: 0x1053, 0xdce: 0x104f, 0xdcf: 0x1057, 0xdd0: 0x105b, 0xdd1: 0x105f, + 0xdd2: 0x1073, 0xdd3: 0x1077, 0xdd4: 0x109b, 0xdd5: 0x10af, 0xdd6: 0x10cb, 0xdd7: 0x112f, + 0xdd8: 0x1137, 0xdd9: 0x113f, 0xdda: 0x1153, 0xddb: 0x117b, 0xddc: 0x11cb, 0xddd: 0x11ff, + 0xdde: 0x11ff, 0xddf: 0x1267, 0xde0: 0x130f, 0xde1: 0x1327, 0xde2: 0x135b, 0xde3: 0x135f, + 0xde4: 0x13a3, 0xde5: 0x13a7, 0xde6: 0x13ff, 0xde7: 0x1407, 0xde8: 0x14db, 0xde9: 0x151f, + 0xdea: 0x1537, 0xdeb: 0x0b9b, 0xdec: 0x171e, 0xded: 0x11e3, + 0xdf0: 0x06df, 0xdf1: 0x07e3, 0xdf2: 0x07a3, 0xdf3: 0x074b, 0xdf4: 0x078b, 0xdf5: 0x07b7, + 0xdf6: 0x0847, 0xdf7: 0x0863, 0xdf8: 0x094b, 0xdf9: 0x0937, 0xdfa: 0x0947, 0xdfb: 0x0963, + 0xdfc: 0x09af, 0xdfd: 0x09bf, 0xdfe: 0x0a03, 0xdff: 0x0a0f, + // Block 0x38, offset 0xe00 + 0xe00: 0x0a2b, 0xe01: 0x0a3b, 0xe02: 0x0b23, 0xe03: 0x0b2b, 0xe04: 0x0b5b, 0xe05: 0x0b7b, + 0xe06: 0x0bab, 0xe07: 0x0bc3, 0xe08: 0x0bb3, 0xe09: 0x0bd3, 0xe0a: 0x0bc7, 0xe0b: 0x0beb, + 0xe0c: 0x0c07, 0xe0d: 0x0c5f, 0xe0e: 0x0c6b, 0xe0f: 0x0c73, 0xe10: 0x0c9b, 0xe11: 0x0cdf, + 0xe12: 0x0d0f, 0xe13: 0x0d13, 0xe14: 0x0d27, 0xe15: 0x0da7, 0xe16: 0x0db7, 0xe17: 0x0e0f, + 0xe18: 0x0e5b, 0xe19: 0x0e53, 0xe1a: 0x0e67, 0xe1b: 0x0e83, 0xe1c: 0x0ebb, 0xe1d: 0x1013, + 0xe1e: 0x0edf, 0xe1f: 0x0f13, 0xe20: 0x0f1f, 0xe21: 0x0f5f, 0xe22: 0x0f7b, 0xe23: 0x0f9f, + 0xe24: 0x0fc3, 0xe25: 0x0fc7, 0xe26: 0x0fe3, 0xe27: 0x0fe7, 0xe28: 0x0ff7, 0xe29: 0x100b, + 0xe2a: 0x1007, 0xe2b: 0x1037, 0xe2c: 0x10b3, 0xe2d: 0x10cb, 0xe2e: 0x10e3, 0xe2f: 0x111b, + 0xe30: 0x112f, 0xe31: 0x114b, 0xe32: 0x117b, 0xe33: 0x122f, 0xe34: 0x1257, 0xe35: 0x12cb, + 0xe36: 0x1313, 0xe37: 0x131f, 0xe38: 0x1327, 0xe39: 0x133f, 0xe3a: 0x1353, 0xe3b: 0x1343, + 0xe3c: 0x135b, 0xe3d: 0x1357, 0xe3e: 0x134f, 0xe3f: 0x135f, + // Block 0x39, offset 0xe40 + 0xe40: 0x136b, 0xe41: 0x13a7, 0xe42: 0x13e3, 0xe43: 0x1413, 0xe44: 0x144b, 0xe45: 0x146b, + 0xe46: 0x14b7, 0xe47: 0x14db, 0xe48: 0x14fb, 0xe49: 0x150f, 0xe4a: 0x151f, 0xe4b: 0x152b, + 0xe4c: 0x1537, 0xe4d: 0x158b, 0xe4e: 0x162b, 0xe4f: 0x16b5, 0xe50: 0x16b0, 0xe51: 0x16e2, + 0xe52: 0x0607, 0xe53: 0x062f, 0xe54: 0x0633, 0xe55: 0x1764, 0xe56: 0x1791, 0xe57: 0x1809, + 0xe58: 0x1617, 0xe59: 0x1627, + // Block 0x3a, offset 0xe80 + 0xe80: 0x19d5, 0xe81: 0x19d8, 0xe82: 0x19db, 0xe83: 0x1c08, 0xe84: 0x1c0c, 0xe85: 0x1a5f, + 0xe86: 0x1a5f, + 0xe93: 0x1d75, 0xe94: 0x1d66, 0xe95: 0x1d6b, 0xe96: 0x1d7a, 0xe97: 0x1d70, + 0xe9d: 0x4390, + 0xe9e: 0x8115, 0xe9f: 0x4402, 0xea0: 0x022d, 0xea1: 0x0215, 0xea2: 0x021e, 0xea3: 0x0221, + 0xea4: 0x0224, 0xea5: 0x0227, 0xea6: 0x022a, 0xea7: 0x0230, 0xea8: 0x0233, 0xea9: 0x0017, + 0xeaa: 0x43f0, 0xeab: 0x43f6, 0xeac: 0x44f4, 0xead: 0x44fc, 0xeae: 0x4348, 0xeaf: 0x434e, + 0xeb0: 0x4354, 0xeb1: 0x435a, 0xeb2: 0x4366, 0xeb3: 0x436c, 0xeb4: 0x4372, 0xeb5: 0x437e, + 0xeb6: 0x4384, 0xeb8: 0x438a, 0xeb9: 0x4396, 0xeba: 0x439c, 0xebb: 0x43a2, + 0xebc: 0x43ae, 0xebe: 0x43b4, + // Block 0x3b, offset 0xec0 + 0xec0: 0x43ba, 0xec1: 0x43c0, 0xec3: 0x43c6, 0xec4: 0x43cc, + 0xec6: 0x43d8, 0xec7: 0x43de, 0xec8: 0x43e4, 0xec9: 0x43ea, 0xeca: 0x43fc, 0xecb: 0x4378, + 0xecc: 0x4360, 0xecd: 0x43a8, 0xece: 0x43d2, 0xecf: 0x1d7f, 0xed0: 0x0299, 0xed1: 0x0299, + 0xed2: 0x02a2, 0xed3: 0x02a2, 0xed4: 0x02a2, 0xed5: 0x02a2, 0xed6: 0x02a5, 0xed7: 0x02a5, + 0xed8: 0x02a5, 0xed9: 0x02a5, 0xeda: 0x02ab, 0xedb: 0x02ab, 0xedc: 0x02ab, 0xedd: 0x02ab, + 0xede: 0x029f, 0xedf: 0x029f, 0xee0: 0x029f, 0xee1: 0x029f, 0xee2: 0x02a8, 0xee3: 0x02a8, + 0xee4: 0x02a8, 0xee5: 0x02a8, 0xee6: 0x029c, 0xee7: 0x029c, 0xee8: 0x029c, 0xee9: 0x029c, + 0xeea: 0x02cf, 0xeeb: 0x02cf, 0xeec: 0x02cf, 0xeed: 0x02cf, 0xeee: 0x02d2, 0xeef: 0x02d2, + 0xef0: 0x02d2, 0xef1: 0x02d2, 0xef2: 0x02b1, 0xef3: 0x02b1, 0xef4: 0x02b1, 0xef5: 0x02b1, + 0xef6: 0x02ae, 0xef7: 0x02ae, 0xef8: 0x02ae, 0xef9: 0x02ae, 0xefa: 0x02b4, 0xefb: 0x02b4, + 0xefc: 0x02b4, 0xefd: 0x02b4, 0xefe: 0x02b7, 0xeff: 0x02b7, + // Block 0x3c, offset 0xf00 + 0xf00: 0x02b7, 0xf01: 0x02b7, 0xf02: 0x02c0, 0xf03: 0x02c0, 0xf04: 0x02bd, 0xf05: 0x02bd, + 0xf06: 0x02c3, 0xf07: 0x02c3, 0xf08: 0x02ba, 0xf09: 0x02ba, 0xf0a: 0x02c9, 0xf0b: 0x02c9, + 0xf0c: 0x02c6, 0xf0d: 0x02c6, 0xf0e: 0x02d5, 0xf0f: 0x02d5, 0xf10: 0x02d5, 0xf11: 0x02d5, + 0xf12: 0x02db, 0xf13: 0x02db, 0xf14: 0x02db, 0xf15: 0x02db, 0xf16: 0x02e1, 0xf17: 0x02e1, + 0xf18: 0x02e1, 0xf19: 0x02e1, 0xf1a: 0x02de, 0xf1b: 0x02de, 0xf1c: 0x02de, 0xf1d: 0x02de, + 0xf1e: 0x02e4, 0xf1f: 0x02e4, 0xf20: 0x02e7, 0xf21: 0x02e7, 0xf22: 0x02e7, 0xf23: 0x02e7, + 0xf24: 0x446e, 0xf25: 0x446e, 0xf26: 0x02ed, 0xf27: 0x02ed, 0xf28: 0x02ed, 0xf29: 0x02ed, + 0xf2a: 0x02ea, 0xf2b: 0x02ea, 0xf2c: 0x02ea, 0xf2d: 0x02ea, 0xf2e: 0x0308, 0xf2f: 0x0308, + 0xf30: 0x4468, 0xf31: 0x4468, + // Block 0x3d, offset 0xf40 + 0xf53: 0x02d8, 0xf54: 0x02d8, 0xf55: 0x02d8, 0xf56: 0x02d8, 0xf57: 0x02f6, + 0xf58: 0x02f6, 0xf59: 0x02f3, 0xf5a: 0x02f3, 0xf5b: 0x02f9, 0xf5c: 0x02f9, 0xf5d: 0x204f, + 0xf5e: 0x02ff, 0xf5f: 0x02ff, 0xf60: 0x02f0, 0xf61: 0x02f0, 0xf62: 0x02fc, 0xf63: 0x02fc, + 0xf64: 0x0305, 0xf65: 0x0305, 0xf66: 0x0305, 0xf67: 0x0305, 0xf68: 0x028d, 0xf69: 0x028d, + 0xf6a: 0x25aa, 0xf6b: 0x25aa, 0xf6c: 0x261a, 0xf6d: 0x261a, 0xf6e: 0x25e9, 0xf6f: 0x25e9, + 0xf70: 0x2605, 0xf71: 0x2605, 0xf72: 0x25fe, 0xf73: 0x25fe, 0xf74: 0x260c, 0xf75: 0x260c, + 0xf76: 0x2613, 0xf77: 0x2613, 0xf78: 0x2613, 0xf79: 0x25f0, 0xf7a: 0x25f0, 0xf7b: 0x25f0, + 0xf7c: 0x0302, 0xf7d: 0x0302, 0xf7e: 0x0302, 0xf7f: 0x0302, + // Block 0x3e, offset 0xf80 + 0xf80: 0x25b1, 0xf81: 0x25b8, 0xf82: 0x25d4, 0xf83: 0x25f0, 0xf84: 0x25f7, 0xf85: 0x1d89, + 0xf86: 0x1d8e, 0xf87: 0x1d93, 0xf88: 0x1da2, 0xf89: 0x1db1, 0xf8a: 0x1db6, 0xf8b: 0x1dbb, + 0xf8c: 0x1dc0, 0xf8d: 0x1dc5, 0xf8e: 0x1dd4, 0xf8f: 0x1de3, 0xf90: 0x1de8, 0xf91: 0x1ded, + 0xf92: 0x1dfc, 0xf93: 0x1e0b, 0xf94: 0x1e10, 0xf95: 0x1e15, 0xf96: 0x1e1a, 0xf97: 0x1e29, + 0xf98: 0x1e2e, 0xf99: 0x1e3d, 0xf9a: 0x1e42, 0xf9b: 0x1e47, 0xf9c: 0x1e56, 0xf9d: 0x1e5b, + 0xf9e: 0x1e60, 0xf9f: 0x1e6a, 0xfa0: 0x1ea6, 0xfa1: 0x1eb5, 0xfa2: 0x1ec4, 0xfa3: 0x1ec9, + 0xfa4: 0x1ece, 0xfa5: 0x1ed8, 0xfa6: 0x1ee7, 0xfa7: 0x1eec, 0xfa8: 0x1efb, 0xfa9: 0x1f00, + 0xfaa: 0x1f05, 0xfab: 0x1f14, 0xfac: 0x1f19, 0xfad: 0x1f28, 0xfae: 0x1f2d, 0xfaf: 0x1f32, + 0xfb0: 0x1f37, 0xfb1: 0x1f3c, 0xfb2: 0x1f41, 0xfb3: 0x1f46, 0xfb4: 0x1f4b, 0xfb5: 0x1f50, + 0xfb6: 0x1f55, 0xfb7: 0x1f5a, 0xfb8: 0x1f5f, 0xfb9: 0x1f64, 0xfba: 0x1f69, 0xfbb: 0x1f6e, + 0xfbc: 0x1f73, 0xfbd: 0x1f78, 0xfbe: 0x1f7d, 0xfbf: 0x1f87, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x1f8c, 0xfc1: 0x1f91, 0xfc2: 0x1f96, 0xfc3: 0x1fa0, 0xfc4: 0x1fa5, 0xfc5: 0x1faf, + 0xfc6: 0x1fb4, 0xfc7: 0x1fb9, 0xfc8: 0x1fbe, 0xfc9: 0x1fc3, 0xfca: 0x1fc8, 0xfcb: 0x1fcd, + 0xfcc: 0x1fd2, 0xfcd: 0x1fd7, 0xfce: 0x1fe6, 0xfcf: 0x1ff5, 0xfd0: 0x1ffa, 0xfd1: 0x1fff, + 0xfd2: 0x2004, 0xfd3: 0x2009, 0xfd4: 0x200e, 0xfd5: 0x2018, 0xfd6: 0x201d, 0xfd7: 0x2022, + 0xfd8: 0x2031, 0xfd9: 0x2040, 0xfda: 0x2045, 0xfdb: 0x4420, 0xfdc: 0x4426, 0xfdd: 0x445c, + 0xfde: 0x44b3, 0xfdf: 0x44ba, 0xfe0: 0x44c1, 0xfe1: 0x44c8, 0xfe2: 0x44cf, 0xfe3: 0x44d6, + 0xfe4: 0x25c6, 0xfe5: 0x25cd, 0xfe6: 0x25d4, 0xfe7: 0x25db, 0xfe8: 0x25f0, 0xfe9: 0x25f7, + 0xfea: 0x1d98, 0xfeb: 0x1d9d, 0xfec: 0x1da2, 0xfed: 0x1da7, 0xfee: 0x1db1, 0xfef: 0x1db6, + 0xff0: 0x1dca, 0xff1: 0x1dcf, 0xff2: 0x1dd4, 0xff3: 0x1dd9, 0xff4: 0x1de3, 0xff5: 0x1de8, + 0xff6: 0x1df2, 0xff7: 0x1df7, 0xff8: 0x1dfc, 0xff9: 0x1e01, 0xffa: 0x1e0b, 0xffb: 0x1e10, + 0xffc: 0x1f3c, 0xffd: 0x1f41, 0xffe: 0x1f50, 0xfff: 0x1f55, + // Block 0x40, offset 0x1000 + 0x1000: 0x1f5a, 0x1001: 0x1f6e, 0x1002: 0x1f73, 0x1003: 0x1f78, 0x1004: 0x1f7d, 0x1005: 0x1f96, + 0x1006: 0x1fa0, 0x1007: 0x1fa5, 0x1008: 0x1faa, 0x1009: 0x1fbe, 0x100a: 0x1fdc, 0x100b: 0x1fe1, + 0x100c: 0x1fe6, 0x100d: 0x1feb, 0x100e: 0x1ff5, 0x100f: 0x1ffa, 0x1010: 0x445c, 0x1011: 0x2027, + 0x1012: 0x202c, 0x1013: 0x2031, 0x1014: 0x2036, 0x1015: 0x2040, 0x1016: 0x2045, 0x1017: 0x25b1, + 0x1018: 0x25b8, 0x1019: 0x25bf, 0x101a: 0x25d4, 0x101b: 0x25e2, 0x101c: 0x1d89, 0x101d: 0x1d8e, + 0x101e: 0x1d93, 0x101f: 0x1da2, 0x1020: 0x1dac, 0x1021: 0x1dbb, 0x1022: 0x1dc0, 0x1023: 0x1dc5, + 0x1024: 0x1dd4, 0x1025: 0x1dde, 0x1026: 0x1dfc, 0x1027: 0x1e15, 0x1028: 0x1e1a, 0x1029: 0x1e29, + 0x102a: 0x1e2e, 0x102b: 0x1e3d, 0x102c: 0x1e47, 0x102d: 0x1e56, 0x102e: 0x1e5b, 0x102f: 0x1e60, + 0x1030: 0x1e6a, 0x1031: 0x1ea6, 0x1032: 0x1eab, 0x1033: 0x1eb5, 0x1034: 0x1ec4, 0x1035: 0x1ec9, + 0x1036: 0x1ece, 0x1037: 0x1ed8, 0x1038: 0x1ee7, 0x1039: 0x1efb, 0x103a: 0x1f00, 0x103b: 0x1f05, + 0x103c: 0x1f14, 0x103d: 0x1f19, 0x103e: 0x1f28, 0x103f: 0x1f2d, + // Block 0x41, offset 0x1040 + 0x1040: 0x1f32, 0x1041: 0x1f37, 0x1042: 0x1f46, 0x1043: 0x1f4b, 0x1044: 0x1f5f, 0x1045: 0x1f64, + 0x1046: 0x1f69, 0x1047: 0x1f6e, 0x1048: 0x1f73, 0x1049: 0x1f87, 0x104a: 0x1f8c, 0x104b: 0x1f91, + 0x104c: 0x1f96, 0x104d: 0x1f9b, 0x104e: 0x1faf, 0x104f: 0x1fb4, 0x1050: 0x1fb9, 0x1051: 0x1fbe, + 0x1052: 0x1fcd, 0x1053: 0x1fd2, 0x1054: 0x1fd7, 0x1055: 0x1fe6, 0x1056: 0x1ff0, 0x1057: 0x1fff, + 0x1058: 0x2004, 0x1059: 0x4450, 0x105a: 0x2018, 0x105b: 0x201d, 0x105c: 0x2022, 0x105d: 0x2031, + 0x105e: 0x203b, 0x105f: 0x25d4, 0x1060: 0x25e2, 0x1061: 0x1da2, 0x1062: 0x1dac, 0x1063: 0x1dd4, + 0x1064: 0x1dde, 0x1065: 0x1dfc, 0x1066: 0x1e06, 0x1067: 0x1e6a, 0x1068: 0x1e6f, 0x1069: 0x1e92, + 0x106a: 0x1e97, 0x106b: 0x1f6e, 0x106c: 0x1f73, 0x106d: 0x1f96, 0x106e: 0x1fe6, 0x106f: 0x1ff0, + 0x1070: 0x2031, 0x1071: 0x203b, 0x1072: 0x4504, 0x1073: 0x450c, 0x1074: 0x4514, 0x1075: 0x1ef1, + 0x1076: 0x1ef6, 0x1077: 0x1f0a, 0x1078: 0x1f0f, 0x1079: 0x1f1e, 0x107a: 0x1f23, 0x107b: 0x1e74, + 0x107c: 0x1e79, 0x107d: 0x1e9c, 0x107e: 0x1ea1, 0x107f: 0x1e33, + // Block 0x42, offset 0x1080 + 0x1080: 0x1e38, 0x1081: 0x1e1f, 0x1082: 0x1e24, 0x1083: 0x1e4c, 0x1084: 0x1e51, 0x1085: 0x1eba, + 0x1086: 0x1ebf, 0x1087: 0x1edd, 0x1088: 0x1ee2, 0x1089: 0x1e7e, 0x108a: 0x1e83, 0x108b: 0x1e88, + 0x108c: 0x1e92, 0x108d: 0x1e8d, 0x108e: 0x1e65, 0x108f: 0x1eb0, 0x1090: 0x1ed3, 0x1091: 0x1ef1, + 0x1092: 0x1ef6, 0x1093: 0x1f0a, 0x1094: 0x1f0f, 0x1095: 0x1f1e, 0x1096: 0x1f23, 0x1097: 0x1e74, + 0x1098: 0x1e79, 0x1099: 0x1e9c, 0x109a: 0x1ea1, 0x109b: 0x1e33, 0x109c: 0x1e38, 0x109d: 0x1e1f, + 0x109e: 0x1e24, 0x109f: 0x1e4c, 0x10a0: 0x1e51, 0x10a1: 0x1eba, 0x10a2: 0x1ebf, 0x10a3: 0x1edd, + 0x10a4: 0x1ee2, 0x10a5: 0x1e7e, 0x10a6: 0x1e83, 0x10a7: 0x1e88, 0x10a8: 0x1e92, 0x10a9: 0x1e8d, + 0x10aa: 0x1e65, 0x10ab: 0x1eb0, 0x10ac: 0x1ed3, 0x10ad: 0x1e7e, 0x10ae: 0x1e83, 0x10af: 0x1e88, + 0x10b0: 0x1e92, 0x10b1: 0x1e6f, 0x10b2: 0x1e97, 0x10b3: 0x1eec, 0x10b4: 0x1e56, 0x10b5: 0x1e5b, + 0x10b6: 0x1e60, 0x10b7: 0x1e7e, 0x10b8: 0x1e83, 0x10b9: 0x1e88, 0x10ba: 0x1eec, 0x10bb: 0x1efb, + 0x10bc: 0x4408, 0x10bd: 0x4408, + // Block 0x43, offset 0x10c0 + 0x10d0: 0x2311, 0x10d1: 0x2326, + 0x10d2: 0x2326, 0x10d3: 0x232d, 0x10d4: 0x2334, 0x10d5: 0x2349, 0x10d6: 0x2350, 0x10d7: 0x2357, + 0x10d8: 0x237a, 0x10d9: 0x237a, 0x10da: 0x239d, 0x10db: 0x2396, 0x10dc: 0x23b2, 0x10dd: 0x23a4, + 0x10de: 0x23ab, 0x10df: 0x23ce, 0x10e0: 0x23ce, 0x10e1: 0x23c7, 0x10e2: 0x23d5, 0x10e3: 0x23d5, + 0x10e4: 0x23ff, 0x10e5: 0x23ff, 0x10e6: 0x241b, 0x10e7: 0x23e3, 0x10e8: 0x23e3, 0x10e9: 0x23dc, + 0x10ea: 0x23f1, 0x10eb: 0x23f1, 0x10ec: 0x23f8, 0x10ed: 0x23f8, 0x10ee: 0x2422, 0x10ef: 0x2430, + 0x10f0: 0x2430, 0x10f1: 0x2437, 0x10f2: 0x2437, 0x10f3: 0x243e, 0x10f4: 0x2445, 0x10f5: 0x244c, + 0x10f6: 0x2453, 0x10f7: 0x2453, 0x10f8: 0x245a, 0x10f9: 0x2468, 0x10fa: 0x2476, 0x10fb: 0x246f, + 0x10fc: 0x247d, 0x10fd: 0x247d, 0x10fe: 0x2492, 0x10ff: 0x2499, + // Block 0x44, offset 0x1100 + 0x1100: 0x24ca, 0x1101: 0x24d8, 0x1102: 0x24d1, 0x1103: 0x24b5, 0x1104: 0x24b5, 0x1105: 0x24df, + 0x1106: 0x24df, 0x1107: 0x24e6, 0x1108: 0x24e6, 0x1109: 0x2510, 0x110a: 0x2517, 0x110b: 0x251e, + 0x110c: 0x24f4, 0x110d: 0x2502, 0x110e: 0x2525, 0x110f: 0x252c, + 0x1112: 0x24fb, 0x1113: 0x2580, 0x1114: 0x2587, 0x1115: 0x255d, 0x1116: 0x2564, 0x1117: 0x2548, + 0x1118: 0x2548, 0x1119: 0x254f, 0x111a: 0x2579, 0x111b: 0x2572, 0x111c: 0x259c, 0x111d: 0x259c, + 0x111e: 0x230a, 0x111f: 0x231f, 0x1120: 0x2318, 0x1121: 0x2342, 0x1122: 0x233b, 0x1123: 0x2365, + 0x1124: 0x235e, 0x1125: 0x2388, 0x1126: 0x236c, 0x1127: 0x2381, 0x1128: 0x23b9, 0x1129: 0x2406, + 0x112a: 0x23ea, 0x112b: 0x2429, 0x112c: 0x24c3, 0x112d: 0x24ed, 0x112e: 0x2595, 0x112f: 0x258e, + 0x1130: 0x25a3, 0x1131: 0x253a, 0x1132: 0x24a0, 0x1133: 0x256b, 0x1134: 0x2492, 0x1135: 0x24ca, + 0x1136: 0x2461, 0x1137: 0x24ae, 0x1138: 0x2541, 0x1139: 0x2533, 0x113a: 0x24bc, 0x113b: 0x24a7, + 0x113c: 0x24bc, 0x113d: 0x2541, 0x113e: 0x2373, 0x113f: 0x238f, + // Block 0x45, offset 0x1140 + 0x1140: 0x2509, 0x1141: 0x2484, 0x1142: 0x2303, 0x1143: 0x24a7, 0x1144: 0x244c, 0x1145: 0x241b, + 0x1146: 0x23c0, 0x1147: 0x2556, + 0x1170: 0x2414, 0x1171: 0x248b, 0x1172: 0x27bf, 0x1173: 0x27b6, 0x1174: 0x27ec, 0x1175: 0x27da, + 0x1176: 0x27c8, 0x1177: 0x27e3, 0x1178: 0x27f5, 0x1179: 0x240d, 0x117a: 0x2c7c, 0x117b: 0x2afc, + 0x117c: 0x27d1, + // Block 0x46, offset 0x1180 + 0x1190: 0x0019, 0x1191: 0x0483, + 0x1192: 0x0487, 0x1193: 0x0035, 0x1194: 0x0037, 0x1195: 0x0003, 0x1196: 0x003f, 0x1197: 0x04bf, + 0x1198: 0x04c3, 0x1199: 0x1b5c, + 0x11a0: 0x8132, 0x11a1: 0x8132, 0x11a2: 0x8132, 0x11a3: 0x8132, + 0x11a4: 0x8132, 0x11a5: 0x8132, 0x11a6: 0x8132, 0x11a7: 0x812d, 0x11a8: 0x812d, 0x11a9: 0x812d, + 0x11aa: 0x812d, 0x11ab: 0x812d, 0x11ac: 0x812d, 0x11ad: 0x812d, 0x11ae: 0x8132, 0x11af: 0x8132, + 0x11b0: 0x1873, 0x11b1: 0x0443, 0x11b2: 0x043f, 0x11b3: 0x007f, 0x11b4: 0x007f, 0x11b5: 0x0011, + 0x11b6: 0x0013, 0x11b7: 0x00b7, 0x11b8: 0x00bb, 0x11b9: 0x04b7, 0x11ba: 0x04bb, 0x11bb: 0x04ab, + 0x11bc: 0x04af, 0x11bd: 0x0493, 0x11be: 0x0497, 0x11bf: 0x048b, + // Block 0x47, offset 0x11c0 + 0x11c0: 0x048f, 0x11c1: 0x049b, 0x11c2: 0x049f, 0x11c3: 0x04a3, 0x11c4: 0x04a7, + 0x11c7: 0x0077, 0x11c8: 0x007b, 0x11c9: 0x4269, 0x11ca: 0x4269, 0x11cb: 0x4269, + 0x11cc: 0x4269, 0x11cd: 0x007f, 0x11ce: 0x007f, 0x11cf: 0x007f, 0x11d0: 0x0019, 0x11d1: 0x0483, + 0x11d2: 0x001d, 0x11d4: 0x0037, 0x11d5: 0x0035, 0x11d6: 0x003f, 0x11d7: 0x0003, + 0x11d8: 0x0443, 0x11d9: 0x0011, 0x11da: 0x0013, 0x11db: 0x00b7, 0x11dc: 0x00bb, 0x11dd: 0x04b7, + 0x11de: 0x04bb, 0x11df: 0x0007, 0x11e0: 0x000d, 0x11e1: 0x0015, 0x11e2: 0x0017, 0x11e3: 0x001b, + 0x11e4: 0x0039, 0x11e5: 0x003d, 0x11e6: 0x003b, 0x11e8: 0x0079, 0x11e9: 0x0009, + 0x11ea: 0x000b, 0x11eb: 0x0041, + 0x11f0: 0x42aa, 0x11f1: 0x442c, 0x11f2: 0x42af, 0x11f4: 0x42b4, + 0x11f6: 0x42b9, 0x11f7: 0x4432, 0x11f8: 0x42be, 0x11f9: 0x4438, 0x11fa: 0x42c3, 0x11fb: 0x443e, + 0x11fc: 0x42c8, 0x11fd: 0x4444, 0x11fe: 0x42cd, 0x11ff: 0x444a, + // Block 0x48, offset 0x1200 + 0x1200: 0x0236, 0x1201: 0x440e, 0x1202: 0x440e, 0x1203: 0x4414, 0x1204: 0x4414, 0x1205: 0x4456, + 0x1206: 0x4456, 0x1207: 0x441a, 0x1208: 0x441a, 0x1209: 0x4462, 0x120a: 0x4462, 0x120b: 0x4462, + 0x120c: 0x4462, 0x120d: 0x0239, 0x120e: 0x0239, 0x120f: 0x023c, 0x1210: 0x023c, 0x1211: 0x023c, + 0x1212: 0x023c, 0x1213: 0x023f, 0x1214: 0x023f, 0x1215: 0x0242, 0x1216: 0x0242, 0x1217: 0x0242, + 0x1218: 0x0242, 0x1219: 0x0245, 0x121a: 0x0245, 0x121b: 0x0245, 0x121c: 0x0245, 0x121d: 0x0248, + 0x121e: 0x0248, 0x121f: 0x0248, 0x1220: 0x0248, 0x1221: 0x024b, 0x1222: 0x024b, 0x1223: 0x024b, + 0x1224: 0x024b, 0x1225: 0x024e, 0x1226: 0x024e, 0x1227: 0x024e, 0x1228: 0x024e, 0x1229: 0x0251, + 0x122a: 0x0251, 0x122b: 0x0254, 0x122c: 0x0254, 0x122d: 0x0257, 0x122e: 0x0257, 0x122f: 0x025a, + 0x1230: 0x025a, 0x1231: 0x025d, 0x1232: 0x025d, 0x1233: 0x025d, 0x1234: 0x025d, 0x1235: 0x0260, + 0x1236: 0x0260, 0x1237: 0x0260, 0x1238: 0x0260, 0x1239: 0x0263, 0x123a: 0x0263, 0x123b: 0x0263, + 0x123c: 0x0263, 0x123d: 0x0266, 0x123e: 0x0266, 0x123f: 0x0266, + // Block 0x49, offset 0x1240 + 0x1240: 0x0266, 0x1241: 0x0269, 0x1242: 0x0269, 0x1243: 0x0269, 0x1244: 0x0269, 0x1245: 0x026c, + 0x1246: 0x026c, 0x1247: 0x026c, 0x1248: 0x026c, 0x1249: 0x026f, 0x124a: 0x026f, 0x124b: 0x026f, + 0x124c: 0x026f, 0x124d: 0x0272, 0x124e: 0x0272, 0x124f: 0x0272, 0x1250: 0x0272, 0x1251: 0x0275, + 0x1252: 0x0275, 0x1253: 0x0275, 0x1254: 0x0275, 0x1255: 0x0278, 0x1256: 0x0278, 0x1257: 0x0278, + 0x1258: 0x0278, 0x1259: 0x027b, 0x125a: 0x027b, 0x125b: 0x027b, 0x125c: 0x027b, 0x125d: 0x027e, + 0x125e: 0x027e, 0x125f: 0x027e, 0x1260: 0x027e, 0x1261: 0x0281, 0x1262: 0x0281, 0x1263: 0x0281, + 0x1264: 0x0281, 0x1265: 0x0284, 0x1266: 0x0284, 0x1267: 0x0284, 0x1268: 0x0284, 0x1269: 0x0287, + 0x126a: 0x0287, 0x126b: 0x0287, 0x126c: 0x0287, 0x126d: 0x028a, 0x126e: 0x028a, 0x126f: 0x028d, + 0x1270: 0x028d, 0x1271: 0x0290, 0x1272: 0x0290, 0x1273: 0x0290, 0x1274: 0x0290, 0x1275: 0x2e00, + 0x1276: 0x2e00, 0x1277: 0x2e08, 0x1278: 0x2e08, 0x1279: 0x2e10, 0x127a: 0x2e10, 0x127b: 0x1f82, + 0x127c: 0x1f82, + // Block 0x4a, offset 0x1280 + 0x1280: 0x0081, 0x1281: 0x0083, 0x1282: 0x0085, 0x1283: 0x0087, 0x1284: 0x0089, 0x1285: 0x008b, + 0x1286: 0x008d, 0x1287: 0x008f, 0x1288: 0x0091, 0x1289: 0x0093, 0x128a: 0x0095, 0x128b: 0x0097, + 0x128c: 0x0099, 0x128d: 0x009b, 0x128e: 0x009d, 0x128f: 0x009f, 0x1290: 0x00a1, 0x1291: 0x00a3, + 0x1292: 0x00a5, 0x1293: 0x00a7, 0x1294: 0x00a9, 0x1295: 0x00ab, 0x1296: 0x00ad, 0x1297: 0x00af, + 0x1298: 0x00b1, 0x1299: 0x00b3, 0x129a: 0x00b5, 0x129b: 0x00b7, 0x129c: 0x00b9, 0x129d: 0x00bb, + 0x129e: 0x00bd, 0x129f: 0x0477, 0x12a0: 0x047b, 0x12a1: 0x0487, 0x12a2: 0x049b, 0x12a3: 0x049f, + 0x12a4: 0x0483, 0x12a5: 0x05ab, 0x12a6: 0x05a3, 0x12a7: 0x04c7, 0x12a8: 0x04cf, 0x12a9: 0x04d7, + 0x12aa: 0x04df, 0x12ab: 0x04e7, 0x12ac: 0x056b, 0x12ad: 0x0573, 0x12ae: 0x057b, 0x12af: 0x051f, + 0x12b0: 0x05af, 0x12b1: 0x04cb, 0x12b2: 0x04d3, 0x12b3: 0x04db, 0x12b4: 0x04e3, 0x12b5: 0x04eb, + 0x12b6: 0x04ef, 0x12b7: 0x04f3, 0x12b8: 0x04f7, 0x12b9: 0x04fb, 0x12ba: 0x04ff, 0x12bb: 0x0503, + 0x12bc: 0x0507, 0x12bd: 0x050b, 0x12be: 0x050f, 0x12bf: 0x0513, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x0517, 0x12c1: 0x051b, 0x12c2: 0x0523, 0x12c3: 0x0527, 0x12c4: 0x052b, 0x12c5: 0x052f, + 0x12c6: 0x0533, 0x12c7: 0x0537, 0x12c8: 0x053b, 0x12c9: 0x053f, 0x12ca: 0x0543, 0x12cb: 0x0547, + 0x12cc: 0x054b, 0x12cd: 0x054f, 0x12ce: 0x0553, 0x12cf: 0x0557, 0x12d0: 0x055b, 0x12d1: 0x055f, + 0x12d2: 0x0563, 0x12d3: 0x0567, 0x12d4: 0x056f, 0x12d5: 0x0577, 0x12d6: 0x057f, 0x12d7: 0x0583, + 0x12d8: 0x0587, 0x12d9: 0x058b, 0x12da: 0x058f, 0x12db: 0x0593, 0x12dc: 0x0597, 0x12dd: 0x05a7, + 0x12de: 0x4a78, 0x12df: 0x4a7e, 0x12e0: 0x03c3, 0x12e1: 0x0313, 0x12e2: 0x0317, 0x12e3: 0x4a3b, + 0x12e4: 0x031b, 0x12e5: 0x4a41, 0x12e6: 0x4a47, 0x12e7: 0x031f, 0x12e8: 0x0323, 0x12e9: 0x0327, + 0x12ea: 0x4a4d, 0x12eb: 0x4a53, 0x12ec: 0x4a59, 0x12ed: 0x4a5f, 0x12ee: 0x4a65, 0x12ef: 0x4a6b, + 0x12f0: 0x0367, 0x12f1: 0x032b, 0x12f2: 0x032f, 0x12f3: 0x0333, 0x12f4: 0x037b, 0x12f5: 0x0337, + 0x12f6: 0x033b, 0x12f7: 0x033f, 0x12f8: 0x0343, 0x12f9: 0x0347, 0x12fa: 0x034b, 0x12fb: 0x034f, + 0x12fc: 0x0353, 0x12fd: 0x0357, 0x12fe: 0x035b, + // Block 0x4c, offset 0x1300 + 0x1302: 0x49bd, 0x1303: 0x49c3, 0x1304: 0x49c9, 0x1305: 0x49cf, + 0x1306: 0x49d5, 0x1307: 0x49db, 0x130a: 0x49e1, 0x130b: 0x49e7, + 0x130c: 0x49ed, 0x130d: 0x49f3, 0x130e: 0x49f9, 0x130f: 0x49ff, + 0x1312: 0x4a05, 0x1313: 0x4a0b, 0x1314: 0x4a11, 0x1315: 0x4a17, 0x1316: 0x4a1d, 0x1317: 0x4a23, + 0x131a: 0x4a29, 0x131b: 0x4a2f, 0x131c: 0x4a35, + 0x1320: 0x00bf, 0x1321: 0x00c2, 0x1322: 0x00cb, 0x1323: 0x4264, + 0x1324: 0x00c8, 0x1325: 0x00c5, 0x1326: 0x0447, 0x1328: 0x046b, 0x1329: 0x044b, + 0x132a: 0x044f, 0x132b: 0x0453, 0x132c: 0x0457, 0x132d: 0x046f, 0x132e: 0x0473, + // Block 0x4d, offset 0x1340 + 0x1340: 0x0063, 0x1341: 0x0065, 0x1342: 0x0067, 0x1343: 0x0069, 0x1344: 0x006b, 0x1345: 0x006d, + 0x1346: 0x006f, 0x1347: 0x0071, 0x1348: 0x0073, 0x1349: 0x0075, 0x134a: 0x0083, 0x134b: 0x0085, + 0x134c: 0x0087, 0x134d: 0x0089, 0x134e: 0x008b, 0x134f: 0x008d, 0x1350: 0x008f, 0x1351: 0x0091, + 0x1352: 0x0093, 0x1353: 0x0095, 0x1354: 0x0097, 0x1355: 0x0099, 0x1356: 0x009b, 0x1357: 0x009d, + 0x1358: 0x009f, 0x1359: 0x00a1, 0x135a: 0x00a3, 0x135b: 0x00a5, 0x135c: 0x00a7, 0x135d: 0x00a9, + 0x135e: 0x00ab, 0x135f: 0x00ad, 0x1360: 0x00af, 0x1361: 0x00b1, 0x1362: 0x00b3, 0x1363: 0x00b5, + 0x1364: 0x00dd, 0x1365: 0x00f2, 0x1368: 0x0173, 0x1369: 0x0176, + 0x136a: 0x0179, 0x136b: 0x017c, 0x136c: 0x017f, 0x136d: 0x0182, 0x136e: 0x0185, 0x136f: 0x0188, + 0x1370: 0x018b, 0x1371: 0x018e, 0x1372: 0x0191, 0x1373: 0x0194, 0x1374: 0x0197, 0x1375: 0x019a, + 0x1376: 0x019d, 0x1377: 0x01a0, 0x1378: 0x01a3, 0x1379: 0x0188, 0x137a: 0x01a6, 0x137b: 0x01a9, + 0x137c: 0x01ac, 0x137d: 0x01af, 0x137e: 0x01b2, 0x137f: 0x01b5, + // Block 0x4e, offset 0x1380 + 0x1380: 0x01fd, 0x1381: 0x0200, 0x1382: 0x0203, 0x1383: 0x045b, 0x1384: 0x01c7, 0x1385: 0x01d0, + 0x1386: 0x01d6, 0x1387: 0x01fa, 0x1388: 0x01eb, 0x1389: 0x01e8, 0x138a: 0x0206, 0x138b: 0x0209, + 0x138e: 0x0021, 0x138f: 0x0023, 0x1390: 0x0025, 0x1391: 0x0027, + 0x1392: 0x0029, 0x1393: 0x002b, 0x1394: 0x002d, 0x1395: 0x002f, 0x1396: 0x0031, 0x1397: 0x0033, + 0x1398: 0x0021, 0x1399: 0x0023, 0x139a: 0x0025, 0x139b: 0x0027, 0x139c: 0x0029, 0x139d: 0x002b, + 0x139e: 0x002d, 0x139f: 0x002f, 0x13a0: 0x0031, 0x13a1: 0x0033, 0x13a2: 0x0021, 0x13a3: 0x0023, + 0x13a4: 0x0025, 0x13a5: 0x0027, 0x13a6: 0x0029, 0x13a7: 0x002b, 0x13a8: 0x002d, 0x13a9: 0x002f, + 0x13aa: 0x0031, 0x13ab: 0x0033, 0x13ac: 0x0021, 0x13ad: 0x0023, 0x13ae: 0x0025, 0x13af: 0x0027, + 0x13b0: 0x0029, 0x13b1: 0x002b, 0x13b2: 0x002d, 0x13b3: 0x002f, 0x13b4: 0x0031, 0x13b5: 0x0033, + 0x13b6: 0x0021, 0x13b7: 0x0023, 0x13b8: 0x0025, 0x13b9: 0x0027, 0x13ba: 0x0029, 0x13bb: 0x002b, + 0x13bc: 0x002d, 0x13bd: 0x002f, 0x13be: 0x0031, 0x13bf: 0x0033, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x0239, 0x13c1: 0x023c, 0x13c2: 0x0248, 0x13c3: 0x0251, 0x13c5: 0x028a, + 0x13c6: 0x025a, 0x13c7: 0x024b, 0x13c8: 0x0269, 0x13c9: 0x0290, 0x13ca: 0x027b, 0x13cb: 0x027e, + 0x13cc: 0x0281, 0x13cd: 0x0284, 0x13ce: 0x025d, 0x13cf: 0x026f, 0x13d0: 0x0275, 0x13d1: 0x0263, + 0x13d2: 0x0278, 0x13d3: 0x0257, 0x13d4: 0x0260, 0x13d5: 0x0242, 0x13d6: 0x0245, 0x13d7: 0x024e, + 0x13d8: 0x0254, 0x13d9: 0x0266, 0x13da: 0x026c, 0x13db: 0x0272, 0x13dc: 0x0293, 0x13dd: 0x02e4, + 0x13de: 0x02cc, 0x13df: 0x0296, 0x13e1: 0x023c, 0x13e2: 0x0248, + 0x13e4: 0x0287, 0x13e7: 0x024b, 0x13e9: 0x0290, + 0x13ea: 0x027b, 0x13eb: 0x027e, 0x13ec: 0x0281, 0x13ed: 0x0284, 0x13ee: 0x025d, 0x13ef: 0x026f, + 0x13f0: 0x0275, 0x13f1: 0x0263, 0x13f2: 0x0278, 0x13f4: 0x0260, 0x13f5: 0x0242, + 0x13f6: 0x0245, 0x13f7: 0x024e, 0x13f9: 0x0266, 0x13fb: 0x0272, + // Block 0x50, offset 0x1400 + 0x1402: 0x0248, + 0x1407: 0x024b, 0x1409: 0x0290, 0x140b: 0x027e, + 0x140d: 0x0284, 0x140e: 0x025d, 0x140f: 0x026f, 0x1411: 0x0263, + 0x1412: 0x0278, 0x1414: 0x0260, 0x1417: 0x024e, + 0x1419: 0x0266, 0x141b: 0x0272, 0x141d: 0x02e4, + 0x141f: 0x0296, 0x1421: 0x023c, 0x1422: 0x0248, + 0x1424: 0x0287, 0x1427: 0x024b, 0x1428: 0x0269, 0x1429: 0x0290, + 0x142a: 0x027b, 0x142c: 0x0281, 0x142d: 0x0284, 0x142e: 0x025d, 0x142f: 0x026f, + 0x1430: 0x0275, 0x1431: 0x0263, 0x1432: 0x0278, 0x1434: 0x0260, 0x1435: 0x0242, + 0x1436: 0x0245, 0x1437: 0x024e, 0x1439: 0x0266, 0x143a: 0x026c, 0x143b: 0x0272, + 0x143c: 0x0293, 0x143e: 0x02cc, + // Block 0x51, offset 0x1440 + 0x1440: 0x0239, 0x1441: 0x023c, 0x1442: 0x0248, 0x1443: 0x0251, 0x1444: 0x0287, 0x1445: 0x028a, + 0x1446: 0x025a, 0x1447: 0x024b, 0x1448: 0x0269, 0x1449: 0x0290, 0x144b: 0x027e, + 0x144c: 0x0281, 0x144d: 0x0284, 0x144e: 0x025d, 0x144f: 0x026f, 0x1450: 0x0275, 0x1451: 0x0263, + 0x1452: 0x0278, 0x1453: 0x0257, 0x1454: 0x0260, 0x1455: 0x0242, 0x1456: 0x0245, 0x1457: 0x024e, + 0x1458: 0x0254, 0x1459: 0x0266, 0x145a: 0x026c, 0x145b: 0x0272, + 0x1461: 0x023c, 0x1462: 0x0248, 0x1463: 0x0251, + 0x1465: 0x028a, 0x1466: 0x025a, 0x1467: 0x024b, 0x1468: 0x0269, 0x1469: 0x0290, + 0x146b: 0x027e, 0x146c: 0x0281, 0x146d: 0x0284, 0x146e: 0x025d, 0x146f: 0x026f, + 0x1470: 0x0275, 0x1471: 0x0263, 0x1472: 0x0278, 0x1473: 0x0257, 0x1474: 0x0260, 0x1475: 0x0242, + 0x1476: 0x0245, 0x1477: 0x024e, 0x1478: 0x0254, 0x1479: 0x0266, 0x147a: 0x026c, 0x147b: 0x0272, + // Block 0x52, offset 0x1480 + 0x1480: 0x1879, 0x1481: 0x1876, 0x1482: 0x187c, 0x1483: 0x18a0, 0x1484: 0x18c4, 0x1485: 0x18e8, + 0x1486: 0x190c, 0x1487: 0x1915, 0x1488: 0x191b, 0x1489: 0x1921, 0x148a: 0x1927, + 0x1490: 0x1a8c, 0x1491: 0x1a90, + 0x1492: 0x1a94, 0x1493: 0x1a98, 0x1494: 0x1a9c, 0x1495: 0x1aa0, 0x1496: 0x1aa4, 0x1497: 0x1aa8, + 0x1498: 0x1aac, 0x1499: 0x1ab0, 0x149a: 0x1ab4, 0x149b: 0x1ab8, 0x149c: 0x1abc, 0x149d: 0x1ac0, + 0x149e: 0x1ac4, 0x149f: 0x1ac8, 0x14a0: 0x1acc, 0x14a1: 0x1ad0, 0x14a2: 0x1ad4, 0x14a3: 0x1ad8, + 0x14a4: 0x1adc, 0x14a5: 0x1ae0, 0x14a6: 0x1ae4, 0x14a7: 0x1ae8, 0x14a8: 0x1aec, 0x14a9: 0x1af0, + 0x14aa: 0x271e, 0x14ab: 0x0047, 0x14ac: 0x0065, 0x14ad: 0x193c, 0x14ae: 0x19b1, + 0x14b0: 0x0043, 0x14b1: 0x0045, 0x14b2: 0x0047, 0x14b3: 0x0049, 0x14b4: 0x004b, 0x14b5: 0x004d, + 0x14b6: 0x004f, 0x14b7: 0x0051, 0x14b8: 0x0053, 0x14b9: 0x0055, 0x14ba: 0x0057, 0x14bb: 0x0059, + 0x14bc: 0x005b, 0x14bd: 0x005d, 0x14be: 0x005f, 0x14bf: 0x0061, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x26ad, 0x14c1: 0x26c2, 0x14c2: 0x0503, + 0x14d0: 0x0c0f, 0x14d1: 0x0a47, + 0x14d2: 0x08d3, 0x14d3: 0x45c4, 0x14d4: 0x071b, 0x14d5: 0x09ef, 0x14d6: 0x132f, 0x14d7: 0x09ff, + 0x14d8: 0x0727, 0x14d9: 0x0cd7, 0x14da: 0x0eaf, 0x14db: 0x0caf, 0x14dc: 0x0827, 0x14dd: 0x0b6b, + 0x14de: 0x07bf, 0x14df: 0x0cb7, 0x14e0: 0x0813, 0x14e1: 0x1117, 0x14e2: 0x0f83, 0x14e3: 0x138b, + 0x14e4: 0x09d3, 0x14e5: 0x090b, 0x14e6: 0x0e63, 0x14e7: 0x0c1b, 0x14e8: 0x0c47, 0x14e9: 0x06bf, + 0x14ea: 0x06cb, 0x14eb: 0x140b, 0x14ec: 0x0adb, 0x14ed: 0x06e7, 0x14ee: 0x08ef, 0x14ef: 0x0c3b, + 0x14f0: 0x13b3, 0x14f1: 0x0c13, 0x14f2: 0x106f, 0x14f3: 0x10ab, 0x14f4: 0x08f7, 0x14f5: 0x0e43, + 0x14f6: 0x0d0b, 0x14f7: 0x0d07, 0x14f8: 0x0f97, 0x14f9: 0x082b, 0x14fa: 0x0957, 0x14fb: 0x1443, + // Block 0x54, offset 0x1500 + 0x1500: 0x06fb, 0x1501: 0x06f3, 0x1502: 0x0703, 0x1503: 0x1647, 0x1504: 0x0747, 0x1505: 0x0757, + 0x1506: 0x075b, 0x1507: 0x0763, 0x1508: 0x076b, 0x1509: 0x076f, 0x150a: 0x077b, 0x150b: 0x0773, + 0x150c: 0x05b3, 0x150d: 0x165b, 0x150e: 0x078f, 0x150f: 0x0793, 0x1510: 0x0797, 0x1511: 0x07b3, + 0x1512: 0x164c, 0x1513: 0x05b7, 0x1514: 0x079f, 0x1515: 0x07bf, 0x1516: 0x1656, 0x1517: 0x07cf, + 0x1518: 0x07d7, 0x1519: 0x0737, 0x151a: 0x07df, 0x151b: 0x07e3, 0x151c: 0x1831, 0x151d: 0x07ff, + 0x151e: 0x0807, 0x151f: 0x05bf, 0x1520: 0x081f, 0x1521: 0x0823, 0x1522: 0x082b, 0x1523: 0x082f, + 0x1524: 0x05c3, 0x1525: 0x0847, 0x1526: 0x084b, 0x1527: 0x0857, 0x1528: 0x0863, 0x1529: 0x0867, + 0x152a: 0x086b, 0x152b: 0x0873, 0x152c: 0x0893, 0x152d: 0x0897, 0x152e: 0x089f, 0x152f: 0x08af, + 0x1530: 0x08b7, 0x1531: 0x08bb, 0x1532: 0x08bb, 0x1533: 0x08bb, 0x1534: 0x166a, 0x1535: 0x0e93, + 0x1536: 0x08cf, 0x1537: 0x08d7, 0x1538: 0x166f, 0x1539: 0x08e3, 0x153a: 0x08eb, 0x153b: 0x08f3, + 0x153c: 0x091b, 0x153d: 0x0907, 0x153e: 0x0913, 0x153f: 0x0917, + // Block 0x55, offset 0x1540 + 0x1540: 0x091f, 0x1541: 0x0927, 0x1542: 0x092b, 0x1543: 0x0933, 0x1544: 0x093b, 0x1545: 0x093f, + 0x1546: 0x093f, 0x1547: 0x0947, 0x1548: 0x094f, 0x1549: 0x0953, 0x154a: 0x095f, 0x154b: 0x0983, + 0x154c: 0x0967, 0x154d: 0x0987, 0x154e: 0x096b, 0x154f: 0x0973, 0x1550: 0x080b, 0x1551: 0x09cf, + 0x1552: 0x0997, 0x1553: 0x099b, 0x1554: 0x099f, 0x1555: 0x0993, 0x1556: 0x09a7, 0x1557: 0x09a3, + 0x1558: 0x09bb, 0x1559: 0x1674, 0x155a: 0x09d7, 0x155b: 0x09db, 0x155c: 0x09e3, 0x155d: 0x09ef, + 0x155e: 0x09f7, 0x155f: 0x0a13, 0x1560: 0x1679, 0x1561: 0x167e, 0x1562: 0x0a1f, 0x1563: 0x0a23, + 0x1564: 0x0a27, 0x1565: 0x0a1b, 0x1566: 0x0a2f, 0x1567: 0x05c7, 0x1568: 0x05cb, 0x1569: 0x0a37, + 0x156a: 0x0a3f, 0x156b: 0x0a3f, 0x156c: 0x1683, 0x156d: 0x0a5b, 0x156e: 0x0a5f, 0x156f: 0x0a63, + 0x1570: 0x0a6b, 0x1571: 0x1688, 0x1572: 0x0a73, 0x1573: 0x0a77, 0x1574: 0x0b4f, 0x1575: 0x0a7f, + 0x1576: 0x05cf, 0x1577: 0x0a8b, 0x1578: 0x0a9b, 0x1579: 0x0aa7, 0x157a: 0x0aa3, 0x157b: 0x1692, + 0x157c: 0x0aaf, 0x157d: 0x1697, 0x157e: 0x0abb, 0x157f: 0x0ab7, + // Block 0x56, offset 0x1580 + 0x1580: 0x0abf, 0x1581: 0x0acf, 0x1582: 0x0ad3, 0x1583: 0x05d3, 0x1584: 0x0ae3, 0x1585: 0x0aeb, + 0x1586: 0x0aef, 0x1587: 0x0af3, 0x1588: 0x05d7, 0x1589: 0x169c, 0x158a: 0x05db, 0x158b: 0x0b0f, + 0x158c: 0x0b13, 0x158d: 0x0b17, 0x158e: 0x0b1f, 0x158f: 0x1863, 0x1590: 0x0b37, 0x1591: 0x16a6, + 0x1592: 0x16a6, 0x1593: 0x11d7, 0x1594: 0x0b47, 0x1595: 0x0b47, 0x1596: 0x05df, 0x1597: 0x16c9, + 0x1598: 0x179b, 0x1599: 0x0b57, 0x159a: 0x0b5f, 0x159b: 0x05e3, 0x159c: 0x0b73, 0x159d: 0x0b83, + 0x159e: 0x0b87, 0x159f: 0x0b8f, 0x15a0: 0x0b9f, 0x15a1: 0x05eb, 0x15a2: 0x05e7, 0x15a3: 0x0ba3, + 0x15a4: 0x16ab, 0x15a5: 0x0ba7, 0x15a6: 0x0bbb, 0x15a7: 0x0bbf, 0x15a8: 0x0bc3, 0x15a9: 0x0bbf, + 0x15aa: 0x0bcf, 0x15ab: 0x0bd3, 0x15ac: 0x0be3, 0x15ad: 0x0bdb, 0x15ae: 0x0bdf, 0x15af: 0x0be7, + 0x15b0: 0x0beb, 0x15b1: 0x0bef, 0x15b2: 0x0bfb, 0x15b3: 0x0bff, 0x15b4: 0x0c17, 0x15b5: 0x0c1f, + 0x15b6: 0x0c2f, 0x15b7: 0x0c43, 0x15b8: 0x16ba, 0x15b9: 0x0c3f, 0x15ba: 0x0c33, 0x15bb: 0x0c4b, + 0x15bc: 0x0c53, 0x15bd: 0x0c67, 0x15be: 0x16bf, 0x15bf: 0x0c6f, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x0c63, 0x15c1: 0x0c5b, 0x15c2: 0x05ef, 0x15c3: 0x0c77, 0x15c4: 0x0c7f, 0x15c5: 0x0c87, + 0x15c6: 0x0c7b, 0x15c7: 0x05f3, 0x15c8: 0x0c97, 0x15c9: 0x0c9f, 0x15ca: 0x16c4, 0x15cb: 0x0ccb, + 0x15cc: 0x0cff, 0x15cd: 0x0cdb, 0x15ce: 0x05ff, 0x15cf: 0x0ce7, 0x15d0: 0x05fb, 0x15d1: 0x05f7, + 0x15d2: 0x07c3, 0x15d3: 0x07c7, 0x15d4: 0x0d03, 0x15d5: 0x0ceb, 0x15d6: 0x11ab, 0x15d7: 0x0663, + 0x15d8: 0x0d0f, 0x15d9: 0x0d13, 0x15da: 0x0d17, 0x15db: 0x0d2b, 0x15dc: 0x0d23, 0x15dd: 0x16dd, + 0x15de: 0x0603, 0x15df: 0x0d3f, 0x15e0: 0x0d33, 0x15e1: 0x0d4f, 0x15e2: 0x0d57, 0x15e3: 0x16e7, + 0x15e4: 0x0d5b, 0x15e5: 0x0d47, 0x15e6: 0x0d63, 0x15e7: 0x0607, 0x15e8: 0x0d67, 0x15e9: 0x0d6b, + 0x15ea: 0x0d6f, 0x15eb: 0x0d7b, 0x15ec: 0x16ec, 0x15ed: 0x0d83, 0x15ee: 0x060b, 0x15ef: 0x0d8f, + 0x15f0: 0x16f1, 0x15f1: 0x0d93, 0x15f2: 0x060f, 0x15f3: 0x0d9f, 0x15f4: 0x0dab, 0x15f5: 0x0db7, + 0x15f6: 0x0dbb, 0x15f7: 0x16f6, 0x15f8: 0x168d, 0x15f9: 0x16fb, 0x15fa: 0x0ddb, 0x15fb: 0x1700, + 0x15fc: 0x0de7, 0x15fd: 0x0def, 0x15fe: 0x0ddf, 0x15ff: 0x0dfb, + // Block 0x58, offset 0x1600 + 0x1600: 0x0e0b, 0x1601: 0x0e1b, 0x1602: 0x0e0f, 0x1603: 0x0e13, 0x1604: 0x0e1f, 0x1605: 0x0e23, + 0x1606: 0x1705, 0x1607: 0x0e07, 0x1608: 0x0e3b, 0x1609: 0x0e3f, 0x160a: 0x0613, 0x160b: 0x0e53, + 0x160c: 0x0e4f, 0x160d: 0x170a, 0x160e: 0x0e33, 0x160f: 0x0e6f, 0x1610: 0x170f, 0x1611: 0x1714, + 0x1612: 0x0e73, 0x1613: 0x0e87, 0x1614: 0x0e83, 0x1615: 0x0e7f, 0x1616: 0x0617, 0x1617: 0x0e8b, + 0x1618: 0x0e9b, 0x1619: 0x0e97, 0x161a: 0x0ea3, 0x161b: 0x1651, 0x161c: 0x0eb3, 0x161d: 0x1719, + 0x161e: 0x0ebf, 0x161f: 0x1723, 0x1620: 0x0ed3, 0x1621: 0x0edf, 0x1622: 0x0ef3, 0x1623: 0x1728, + 0x1624: 0x0f07, 0x1625: 0x0f0b, 0x1626: 0x172d, 0x1627: 0x1732, 0x1628: 0x0f27, 0x1629: 0x0f37, + 0x162a: 0x061b, 0x162b: 0x0f3b, 0x162c: 0x061f, 0x162d: 0x061f, 0x162e: 0x0f53, 0x162f: 0x0f57, + 0x1630: 0x0f5f, 0x1631: 0x0f63, 0x1632: 0x0f6f, 0x1633: 0x0623, 0x1634: 0x0f87, 0x1635: 0x1737, + 0x1636: 0x0fa3, 0x1637: 0x173c, 0x1638: 0x0faf, 0x1639: 0x16a1, 0x163a: 0x0fbf, 0x163b: 0x1741, + 0x163c: 0x1746, 0x163d: 0x174b, 0x163e: 0x0627, 0x163f: 0x062b, + // Block 0x59, offset 0x1640 + 0x1640: 0x0ff7, 0x1641: 0x1755, 0x1642: 0x1750, 0x1643: 0x175a, 0x1644: 0x175f, 0x1645: 0x0fff, + 0x1646: 0x1003, 0x1647: 0x1003, 0x1648: 0x100b, 0x1649: 0x0633, 0x164a: 0x100f, 0x164b: 0x0637, + 0x164c: 0x063b, 0x164d: 0x1769, 0x164e: 0x1023, 0x164f: 0x102b, 0x1650: 0x1037, 0x1651: 0x063f, + 0x1652: 0x176e, 0x1653: 0x105b, 0x1654: 0x1773, 0x1655: 0x1778, 0x1656: 0x107b, 0x1657: 0x1093, + 0x1658: 0x0643, 0x1659: 0x109b, 0x165a: 0x109f, 0x165b: 0x10a3, 0x165c: 0x177d, 0x165d: 0x1782, + 0x165e: 0x1782, 0x165f: 0x10bb, 0x1660: 0x0647, 0x1661: 0x1787, 0x1662: 0x10cf, 0x1663: 0x10d3, + 0x1664: 0x064b, 0x1665: 0x178c, 0x1666: 0x10ef, 0x1667: 0x064f, 0x1668: 0x10ff, 0x1669: 0x10f7, + 0x166a: 0x1107, 0x166b: 0x1796, 0x166c: 0x111f, 0x166d: 0x0653, 0x166e: 0x112b, 0x166f: 0x1133, + 0x1670: 0x1143, 0x1671: 0x0657, 0x1672: 0x17a0, 0x1673: 0x17a5, 0x1674: 0x065b, 0x1675: 0x17aa, + 0x1676: 0x115b, 0x1677: 0x17af, 0x1678: 0x1167, 0x1679: 0x1173, 0x167a: 0x117b, 0x167b: 0x17b4, + 0x167c: 0x17b9, 0x167d: 0x118f, 0x167e: 0x17be, 0x167f: 0x1197, + // Block 0x5a, offset 0x1680 + 0x1680: 0x16ce, 0x1681: 0x065f, 0x1682: 0x11af, 0x1683: 0x11b3, 0x1684: 0x0667, 0x1685: 0x11b7, + 0x1686: 0x0a33, 0x1687: 0x17c3, 0x1688: 0x17c8, 0x1689: 0x16d3, 0x168a: 0x16d8, 0x168b: 0x11d7, + 0x168c: 0x11db, 0x168d: 0x13f3, 0x168e: 0x066b, 0x168f: 0x1207, 0x1690: 0x1203, 0x1691: 0x120b, + 0x1692: 0x083f, 0x1693: 0x120f, 0x1694: 0x1213, 0x1695: 0x1217, 0x1696: 0x121f, 0x1697: 0x17cd, + 0x1698: 0x121b, 0x1699: 0x1223, 0x169a: 0x1237, 0x169b: 0x123b, 0x169c: 0x1227, 0x169d: 0x123f, + 0x169e: 0x1253, 0x169f: 0x1267, 0x16a0: 0x1233, 0x16a1: 0x1247, 0x16a2: 0x124b, 0x16a3: 0x124f, + 0x16a4: 0x17d2, 0x16a5: 0x17dc, 0x16a6: 0x17d7, 0x16a7: 0x066f, 0x16a8: 0x126f, 0x16a9: 0x1273, + 0x16aa: 0x127b, 0x16ab: 0x17f0, 0x16ac: 0x127f, 0x16ad: 0x17e1, 0x16ae: 0x0673, 0x16af: 0x0677, + 0x16b0: 0x17e6, 0x16b1: 0x17eb, 0x16b2: 0x067b, 0x16b3: 0x129f, 0x16b4: 0x12a3, 0x16b5: 0x12a7, + 0x16b6: 0x12ab, 0x16b7: 0x12b7, 0x16b8: 0x12b3, 0x16b9: 0x12bf, 0x16ba: 0x12bb, 0x16bb: 0x12cb, + 0x16bc: 0x12c3, 0x16bd: 0x12c7, 0x16be: 0x12cf, 0x16bf: 0x067f, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x12d7, 0x16c1: 0x12db, 0x16c2: 0x0683, 0x16c3: 0x12eb, 0x16c4: 0x12ef, 0x16c5: 0x17f5, + 0x16c6: 0x12fb, 0x16c7: 0x12ff, 0x16c8: 0x0687, 0x16c9: 0x130b, 0x16ca: 0x05bb, 0x16cb: 0x17fa, + 0x16cc: 0x17ff, 0x16cd: 0x068b, 0x16ce: 0x068f, 0x16cf: 0x1337, 0x16d0: 0x134f, 0x16d1: 0x136b, + 0x16d2: 0x137b, 0x16d3: 0x1804, 0x16d4: 0x138f, 0x16d5: 0x1393, 0x16d6: 0x13ab, 0x16d7: 0x13b7, + 0x16d8: 0x180e, 0x16d9: 0x1660, 0x16da: 0x13c3, 0x16db: 0x13bf, 0x16dc: 0x13cb, 0x16dd: 0x1665, + 0x16de: 0x13d7, 0x16df: 0x13e3, 0x16e0: 0x1813, 0x16e1: 0x1818, 0x16e2: 0x1423, 0x16e3: 0x142f, + 0x16e4: 0x1437, 0x16e5: 0x181d, 0x16e6: 0x143b, 0x16e7: 0x1467, 0x16e8: 0x1473, 0x16e9: 0x1477, + 0x16ea: 0x146f, 0x16eb: 0x1483, 0x16ec: 0x1487, 0x16ed: 0x1822, 0x16ee: 0x1493, 0x16ef: 0x0693, + 0x16f0: 0x149b, 0x16f1: 0x1827, 0x16f2: 0x0697, 0x16f3: 0x14d3, 0x16f4: 0x0ac3, 0x16f5: 0x14eb, + 0x16f6: 0x182c, 0x16f7: 0x1836, 0x16f8: 0x069b, 0x16f9: 0x069f, 0x16fa: 0x1513, 0x16fb: 0x183b, + 0x16fc: 0x06a3, 0x16fd: 0x1840, 0x16fe: 0x152b, 0x16ff: 0x152b, + // Block 0x5c, offset 0x1700 + 0x1700: 0x1533, 0x1701: 0x1845, 0x1702: 0x154b, 0x1703: 0x06a7, 0x1704: 0x155b, 0x1705: 0x1567, + 0x1706: 0x156f, 0x1707: 0x1577, 0x1708: 0x06ab, 0x1709: 0x184a, 0x170a: 0x158b, 0x170b: 0x15a7, + 0x170c: 0x15b3, 0x170d: 0x06af, 0x170e: 0x06b3, 0x170f: 0x15b7, 0x1710: 0x184f, 0x1711: 0x06b7, + 0x1712: 0x1854, 0x1713: 0x1859, 0x1714: 0x185e, 0x1715: 0x15db, 0x1716: 0x06bb, 0x1717: 0x15ef, + 0x1718: 0x15f7, 0x1719: 0x15fb, 0x171a: 0x1603, 0x171b: 0x160b, 0x171c: 0x1613, 0x171d: 0x1868, +} + +// nfkcIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var nfkcIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x5b, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x5c, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x5d, 0xcb: 0x5e, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x09, + 0xd0: 0x0a, 0xd1: 0x5f, 0xd2: 0x60, 0xd3: 0x0b, 0xd6: 0x0c, 0xd7: 0x61, + 0xd8: 0x62, 0xd9: 0x0d, 0xdb: 0x63, 0xdc: 0x64, 0xdd: 0x65, 0xdf: 0x66, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x67, 0x121: 0x68, 0x123: 0x69, 0x124: 0x6a, 0x125: 0x6b, 0x126: 0x6c, 0x127: 0x6d, + 0x128: 0x6e, 0x129: 0x6f, 0x12a: 0x70, 0x12b: 0x71, 0x12c: 0x6c, 0x12d: 0x72, 0x12e: 0x73, 0x12f: 0x74, + 0x131: 0x75, 0x132: 0x76, 0x133: 0x77, 0x134: 0x78, 0x135: 0x79, 0x137: 0x7a, + 0x138: 0x7b, 0x139: 0x7c, 0x13a: 0x7d, 0x13b: 0x7e, 0x13c: 0x7f, 0x13d: 0x80, 0x13e: 0x81, 0x13f: 0x82, + // Block 0x5, offset 0x140 + 0x140: 0x83, 0x142: 0x84, 0x143: 0x85, 0x144: 0x86, 0x145: 0x87, 0x146: 0x88, 0x147: 0x89, + 0x14d: 0x8a, + 0x15c: 0x8b, 0x15f: 0x8c, + 0x162: 0x8d, 0x164: 0x8e, + 0x168: 0x8f, 0x169: 0x90, 0x16a: 0x91, 0x16c: 0x0e, 0x16d: 0x92, 0x16e: 0x93, 0x16f: 0x94, + 0x170: 0x95, 0x173: 0x96, 0x174: 0x97, 0x175: 0x0f, 0x176: 0x10, 0x177: 0x11, + 0x178: 0x12, 0x179: 0x13, 0x17a: 0x14, 0x17b: 0x15, 0x17c: 0x16, 0x17d: 0x17, 0x17e: 0x18, 0x17f: 0x19, + // Block 0x6, offset 0x180 + 0x180: 0x98, 0x181: 0x99, 0x182: 0x9a, 0x183: 0x9b, 0x184: 0x1a, 0x185: 0x1b, 0x186: 0x9c, 0x187: 0x9d, + 0x188: 0x9e, 0x189: 0x1c, 0x18a: 0x1d, 0x18b: 0x9f, 0x18c: 0xa0, + 0x191: 0x1e, 0x192: 0x1f, 0x193: 0xa1, + 0x1a8: 0xa2, 0x1a9: 0xa3, 0x1ab: 0xa4, + 0x1b1: 0xa5, 0x1b3: 0xa6, 0x1b5: 0xa7, 0x1b7: 0xa8, + 0x1ba: 0xa9, 0x1bb: 0xaa, 0x1bc: 0x20, 0x1bd: 0x21, 0x1be: 0x22, 0x1bf: 0xab, + // Block 0x7, offset 0x1c0 + 0x1c0: 0xac, 0x1c1: 0x23, 0x1c2: 0x24, 0x1c3: 0x25, 0x1c4: 0xad, 0x1c5: 0x26, 0x1c6: 0x27, + 0x1c8: 0x28, 0x1c9: 0x29, 0x1ca: 0x2a, 0x1cb: 0x2b, 0x1cc: 0x2c, 0x1cd: 0x2d, 0x1ce: 0x2e, 0x1cf: 0x2f, + // Block 0x8, offset 0x200 + 0x219: 0xae, 0x21a: 0xaf, 0x21b: 0xb0, 0x21d: 0xb1, 0x21f: 0xb2, + 0x220: 0xb3, 0x223: 0xb4, 0x224: 0xb5, 0x225: 0xb6, 0x226: 0xb7, 0x227: 0xb8, + 0x22a: 0xb9, 0x22b: 0xba, 0x22d: 0xbb, 0x22f: 0xbc, + 0x230: 0xbd, 0x231: 0xbe, 0x232: 0xbf, 0x233: 0xc0, 0x234: 0xc1, 0x235: 0xc2, 0x236: 0xc3, 0x237: 0xbd, + 0x238: 0xbe, 0x239: 0xbf, 0x23a: 0xc0, 0x23b: 0xc1, 0x23c: 0xc2, 0x23d: 0xc3, 0x23e: 0xbd, 0x23f: 0xbe, + // Block 0x9, offset 0x240 + 0x240: 0xbf, 0x241: 0xc0, 0x242: 0xc1, 0x243: 0xc2, 0x244: 0xc3, 0x245: 0xbd, 0x246: 0xbe, 0x247: 0xbf, + 0x248: 0xc0, 0x249: 0xc1, 0x24a: 0xc2, 0x24b: 0xc3, 0x24c: 0xbd, 0x24d: 0xbe, 0x24e: 0xbf, 0x24f: 0xc0, + 0x250: 0xc1, 0x251: 0xc2, 0x252: 0xc3, 0x253: 0xbd, 0x254: 0xbe, 0x255: 0xbf, 0x256: 0xc0, 0x257: 0xc1, + 0x258: 0xc2, 0x259: 0xc3, 0x25a: 0xbd, 0x25b: 0xbe, 0x25c: 0xbf, 0x25d: 0xc0, 0x25e: 0xc1, 0x25f: 0xc2, + 0x260: 0xc3, 0x261: 0xbd, 0x262: 0xbe, 0x263: 0xbf, 0x264: 0xc0, 0x265: 0xc1, 0x266: 0xc2, 0x267: 0xc3, + 0x268: 0xbd, 0x269: 0xbe, 0x26a: 0xbf, 0x26b: 0xc0, 0x26c: 0xc1, 0x26d: 0xc2, 0x26e: 0xc3, 0x26f: 0xbd, + 0x270: 0xbe, 0x271: 0xbf, 0x272: 0xc0, 0x273: 0xc1, 0x274: 0xc2, 0x275: 0xc3, 0x276: 0xbd, 0x277: 0xbe, + 0x278: 0xbf, 0x279: 0xc0, 0x27a: 0xc1, 0x27b: 0xc2, 0x27c: 0xc3, 0x27d: 0xbd, 0x27e: 0xbe, 0x27f: 0xbf, + // Block 0xa, offset 0x280 + 0x280: 0xc0, 0x281: 0xc1, 0x282: 0xc2, 0x283: 0xc3, 0x284: 0xbd, 0x285: 0xbe, 0x286: 0xbf, 0x287: 0xc0, + 0x288: 0xc1, 0x289: 0xc2, 0x28a: 0xc3, 0x28b: 0xbd, 0x28c: 0xbe, 0x28d: 0xbf, 0x28e: 0xc0, 0x28f: 0xc1, + 0x290: 0xc2, 0x291: 0xc3, 0x292: 0xbd, 0x293: 0xbe, 0x294: 0xbf, 0x295: 0xc0, 0x296: 0xc1, 0x297: 0xc2, + 0x298: 0xc3, 0x299: 0xbd, 0x29a: 0xbe, 0x29b: 0xbf, 0x29c: 0xc0, 0x29d: 0xc1, 0x29e: 0xc2, 0x29f: 0xc3, + 0x2a0: 0xbd, 0x2a1: 0xbe, 0x2a2: 0xbf, 0x2a3: 0xc0, 0x2a4: 0xc1, 0x2a5: 0xc2, 0x2a6: 0xc3, 0x2a7: 0xbd, + 0x2a8: 0xbe, 0x2a9: 0xbf, 0x2aa: 0xc0, 0x2ab: 0xc1, 0x2ac: 0xc2, 0x2ad: 0xc3, 0x2ae: 0xbd, 0x2af: 0xbe, + 0x2b0: 0xbf, 0x2b1: 0xc0, 0x2b2: 0xc1, 0x2b3: 0xc2, 0x2b4: 0xc3, 0x2b5: 0xbd, 0x2b6: 0xbe, 0x2b7: 0xbf, + 0x2b8: 0xc0, 0x2b9: 0xc1, 0x2ba: 0xc2, 0x2bb: 0xc3, 0x2bc: 0xbd, 0x2bd: 0xbe, 0x2be: 0xbf, 0x2bf: 0xc0, + // Block 0xb, offset 0x2c0 + 0x2c0: 0xc1, 0x2c1: 0xc2, 0x2c2: 0xc3, 0x2c3: 0xbd, 0x2c4: 0xbe, 0x2c5: 0xbf, 0x2c6: 0xc0, 0x2c7: 0xc1, + 0x2c8: 0xc2, 0x2c9: 0xc3, 0x2ca: 0xbd, 0x2cb: 0xbe, 0x2cc: 0xbf, 0x2cd: 0xc0, 0x2ce: 0xc1, 0x2cf: 0xc2, + 0x2d0: 0xc3, 0x2d1: 0xbd, 0x2d2: 0xbe, 0x2d3: 0xbf, 0x2d4: 0xc0, 0x2d5: 0xc1, 0x2d6: 0xc2, 0x2d7: 0xc3, + 0x2d8: 0xbd, 0x2d9: 0xbe, 0x2da: 0xbf, 0x2db: 0xc0, 0x2dc: 0xc1, 0x2dd: 0xc2, 0x2de: 0xc4, + // Block 0xc, offset 0x300 + 0x324: 0x30, 0x325: 0x31, 0x326: 0x32, 0x327: 0x33, + 0x328: 0x34, 0x329: 0x35, 0x32a: 0x36, 0x32b: 0x37, 0x32c: 0x38, 0x32d: 0x39, 0x32e: 0x3a, 0x32f: 0x3b, + 0x330: 0x3c, 0x331: 0x3d, 0x332: 0x3e, 0x333: 0x3f, 0x334: 0x40, 0x335: 0x41, 0x336: 0x42, 0x337: 0x43, + 0x338: 0x44, 0x339: 0x45, 0x33a: 0x46, 0x33b: 0x47, 0x33c: 0xc5, 0x33d: 0x48, 0x33e: 0x49, 0x33f: 0x4a, + // Block 0xd, offset 0x340 + 0x347: 0xc6, + 0x34b: 0xc7, 0x34d: 0xc8, + 0x368: 0xc9, 0x36b: 0xca, + // Block 0xe, offset 0x380 + 0x381: 0xcb, 0x382: 0xcc, 0x384: 0xcd, 0x385: 0xb7, 0x387: 0xce, + 0x388: 0xcf, 0x38b: 0xd0, 0x38c: 0x6c, 0x38d: 0xd1, + 0x391: 0xd2, 0x392: 0xd3, 0x393: 0xd4, 0x396: 0xd5, 0x397: 0xd6, + 0x398: 0xd7, 0x39a: 0xd8, 0x39c: 0xd9, + 0x3a8: 0xda, 0x3a9: 0xdb, 0x3aa: 0xdc, + 0x3b0: 0xd7, 0x3b5: 0xdd, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xde, 0x3ec: 0xdf, + // Block 0x10, offset 0x400 + 0x432: 0xe0, + // Block 0x11, offset 0x440 + 0x445: 0xe1, 0x446: 0xe2, 0x447: 0xe3, + 0x449: 0xe4, + 0x450: 0xe5, 0x451: 0xe6, 0x452: 0xe7, 0x453: 0xe8, 0x454: 0xe9, 0x455: 0xea, 0x456: 0xeb, 0x457: 0xec, + 0x458: 0xed, 0x459: 0xee, 0x45a: 0x4b, 0x45b: 0xef, 0x45c: 0xf0, 0x45d: 0xf1, 0x45e: 0xf2, 0x45f: 0x4c, + // Block 0x12, offset 0x480 + 0x480: 0xf3, + 0x4a3: 0xf4, 0x4a5: 0xf5, + 0x4b8: 0x4d, 0x4b9: 0x4e, 0x4ba: 0x4f, + // Block 0x13, offset 0x4c0 + 0x4c4: 0x50, 0x4c5: 0xf6, 0x4c6: 0xf7, + 0x4c8: 0x51, 0x4c9: 0xf8, + // Block 0x14, offset 0x500 + 0x520: 0x52, 0x521: 0x53, 0x522: 0x54, 0x523: 0x55, 0x524: 0x56, 0x525: 0x57, 0x526: 0x58, 0x527: 0x59, + 0x528: 0x5a, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfkcSparseOffset: 158 entries, 316 bytes +var nfkcSparseOffset = []uint16{0x0, 0xe, 0x12, 0x1b, 0x25, 0x35, 0x37, 0x3c, 0x47, 0x56, 0x63, 0x6b, 0x6f, 0x74, 0x76, 0x87, 0x8f, 0x96, 0x99, 0xa0, 0xa4, 0xa8, 0xaa, 0xac, 0xb5, 0xb9, 0xc0, 0xc5, 0xc8, 0xd2, 0xd5, 0xdc, 0xe4, 0xe8, 0xea, 0xed, 0xf1, 0xf7, 0x108, 0x114, 0x116, 0x11c, 0x11e, 0x120, 0x122, 0x124, 0x126, 0x128, 0x12a, 0x12d, 0x130, 0x132, 0x135, 0x138, 0x13c, 0x141, 0x14a, 0x14c, 0x14f, 0x151, 0x15c, 0x167, 0x175, 0x183, 0x193, 0x1a1, 0x1a8, 0x1ae, 0x1bd, 0x1c1, 0x1c3, 0x1c7, 0x1c9, 0x1cc, 0x1ce, 0x1d1, 0x1d3, 0x1d6, 0x1d8, 0x1da, 0x1dc, 0x1e8, 0x1f2, 0x1fc, 0x1ff, 0x203, 0x205, 0x207, 0x209, 0x20b, 0x20e, 0x210, 0x212, 0x214, 0x216, 0x21c, 0x21f, 0x223, 0x225, 0x22c, 0x232, 0x238, 0x240, 0x246, 0x24c, 0x252, 0x256, 0x258, 0x25a, 0x25c, 0x25e, 0x264, 0x267, 0x26a, 0x272, 0x279, 0x27c, 0x27f, 0x281, 0x289, 0x28c, 0x293, 0x296, 0x29c, 0x29e, 0x2a0, 0x2a3, 0x2a5, 0x2a7, 0x2a9, 0x2ab, 0x2ae, 0x2b0, 0x2b2, 0x2b4, 0x2c1, 0x2cb, 0x2cd, 0x2cf, 0x2d3, 0x2d8, 0x2e4, 0x2e9, 0x2f2, 0x2f8, 0x2fd, 0x301, 0x306, 0x30a, 0x31a, 0x328, 0x336, 0x344, 0x34a, 0x34c, 0x34f, 0x359, 0x35b} + +// nfkcSparseValues: 869 entries, 3476 bytes +var nfkcSparseValues = [869]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0002, lo: 0x0d}, + {value: 0x0001, lo: 0xa0, hi: 0xa0}, + {value: 0x4278, lo: 0xa8, hi: 0xa8}, + {value: 0x0083, lo: 0xaa, hi: 0xaa}, + {value: 0x4264, lo: 0xaf, hi: 0xaf}, + {value: 0x0025, lo: 0xb2, hi: 0xb3}, + {value: 0x425a, lo: 0xb4, hi: 0xb4}, + {value: 0x01dc, lo: 0xb5, hi: 0xb5}, + {value: 0x4291, lo: 0xb8, hi: 0xb8}, + {value: 0x0023, lo: 0xb9, hi: 0xb9}, + {value: 0x009f, lo: 0xba, hi: 0xba}, + {value: 0x221c, lo: 0xbc, hi: 0xbc}, + {value: 0x2210, lo: 0xbd, hi: 0xbd}, + {value: 0x22b2, lo: 0xbe, hi: 0xbe}, + // Block 0x1, offset 0xe + {value: 0x0091, lo: 0x03}, + {value: 0x46e2, lo: 0xa0, hi: 0xa1}, + {value: 0x4714, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x12 + {value: 0x0003, lo: 0x08}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x0091, lo: 0xb0, hi: 0xb0}, + {value: 0x0119, lo: 0xb1, hi: 0xb1}, + {value: 0x0095, lo: 0xb2, hi: 0xb2}, + {value: 0x00a5, lo: 0xb3, hi: 0xb3}, + {value: 0x0143, lo: 0xb4, hi: 0xb6}, + {value: 0x00af, lo: 0xb7, hi: 0xb7}, + {value: 0x00b3, lo: 0xb8, hi: 0xb8}, + // Block 0x3, offset 0x1b + {value: 0x000a, lo: 0x09}, + {value: 0x426e, lo: 0x98, hi: 0x98}, + {value: 0x4273, lo: 0x99, hi: 0x9a}, + {value: 0x4296, lo: 0x9b, hi: 0x9b}, + {value: 0x425f, lo: 0x9c, hi: 0x9c}, + {value: 0x4282, lo: 0x9d, hi: 0x9d}, + {value: 0x0113, lo: 0xa0, hi: 0xa0}, + {value: 0x0099, lo: 0xa1, hi: 0xa1}, + {value: 0x00a7, lo: 0xa2, hi: 0xa3}, + {value: 0x0167, lo: 0xa4, hi: 0xa4}, + // Block 0x4, offset 0x25 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x37a5, lo: 0x90, hi: 0x90}, + {value: 0x37b1, lo: 0x91, hi: 0x91}, + {value: 0x379f, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x3817, lo: 0x97, hi: 0x97}, + {value: 0x37e1, lo: 0x9c, hi: 0x9c}, + {value: 0x37c9, lo: 0x9d, hi: 0x9d}, + {value: 0x37f3, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x381d, lo: 0xb6, hi: 0xb6}, + {value: 0x3823, lo: 0xb7, hi: 0xb7}, + // Block 0x5, offset 0x35 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x83, hi: 0x87}, + // Block 0x6, offset 0x37 + {value: 0x0001, lo: 0x04}, + {value: 0x8113, lo: 0x81, hi: 0x82}, + {value: 0x8132, lo: 0x84, hi: 0x84}, + {value: 0x812d, lo: 0x85, hi: 0x85}, + {value: 0x810d, lo: 0x87, hi: 0x87}, + // Block 0x7, offset 0x3c + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x97}, + {value: 0x8119, lo: 0x98, hi: 0x98}, + {value: 0x811a, lo: 0x99, hi: 0x99}, + {value: 0x811b, lo: 0x9a, hi: 0x9a}, + {value: 0x3841, lo: 0xa2, hi: 0xa2}, + {value: 0x3847, lo: 0xa3, hi: 0xa3}, + {value: 0x3853, lo: 0xa4, hi: 0xa4}, + {value: 0x384d, lo: 0xa5, hi: 0xa5}, + {value: 0x3859, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x8, offset 0x47 + {value: 0x0000, lo: 0x0e}, + {value: 0x386b, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x385f, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x3865, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8132, lo: 0x96, hi: 0x9c}, + {value: 0x8132, lo: 0x9f, hi: 0xa2}, + {value: 0x812d, lo: 0xa3, hi: 0xa3}, + {value: 0x8132, lo: 0xa4, hi: 0xa4}, + {value: 0x8132, lo: 0xa7, hi: 0xa8}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xac}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + // Block 0x9, offset 0x56 + {value: 0x0000, lo: 0x0c}, + {value: 0x811f, lo: 0x91, hi: 0x91}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x812d, lo: 0xb1, hi: 0xb1}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb5, hi: 0xb6}, + {value: 0x812d, lo: 0xb7, hi: 0xb9}, + {value: 0x8132, lo: 0xba, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbc}, + {value: 0x8132, lo: 0xbd, hi: 0xbd}, + {value: 0x812d, lo: 0xbe, hi: 0xbe}, + {value: 0x8132, lo: 0xbf, hi: 0xbf}, + // Block 0xa, offset 0x63 + {value: 0x0005, lo: 0x07}, + {value: 0x8132, lo: 0x80, hi: 0x80}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x812d, lo: 0x82, hi: 0x83}, + {value: 0x812d, lo: 0x84, hi: 0x85}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x812d, lo: 0x88, hi: 0x89}, + {value: 0x8132, lo: 0x8a, hi: 0x8a}, + // Block 0xb, offset 0x6b + {value: 0x0000, lo: 0x03}, + {value: 0x8132, lo: 0xab, hi: 0xb1}, + {value: 0x812d, lo: 0xb2, hi: 0xb2}, + {value: 0x8132, lo: 0xb3, hi: 0xb3}, + // Block 0xc, offset 0x6f + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0x96, hi: 0x99}, + {value: 0x8132, lo: 0x9b, hi: 0xa3}, + {value: 0x8132, lo: 0xa5, hi: 0xa7}, + {value: 0x8132, lo: 0xa9, hi: 0xad}, + // Block 0xd, offset 0x74 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x99, hi: 0x9b}, + // Block 0xe, offset 0x76 + {value: 0x0000, lo: 0x10}, + {value: 0x8132, lo: 0x94, hi: 0xa1}, + {value: 0x812d, lo: 0xa3, hi: 0xa3}, + {value: 0x8132, lo: 0xa4, hi: 0xa5}, + {value: 0x812d, lo: 0xa6, hi: 0xa6}, + {value: 0x8132, lo: 0xa7, hi: 0xa8}, + {value: 0x812d, lo: 0xa9, hi: 0xa9}, + {value: 0x8132, lo: 0xaa, hi: 0xac}, + {value: 0x812d, lo: 0xad, hi: 0xaf}, + {value: 0x8116, lo: 0xb0, hi: 0xb0}, + {value: 0x8117, lo: 0xb1, hi: 0xb1}, + {value: 0x8118, lo: 0xb2, hi: 0xb2}, + {value: 0x8132, lo: 0xb3, hi: 0xb5}, + {value: 0x812d, lo: 0xb6, hi: 0xb6}, + {value: 0x8132, lo: 0xb7, hi: 0xb8}, + {value: 0x812d, lo: 0xb9, hi: 0xba}, + {value: 0x8132, lo: 0xbb, hi: 0xbf}, + // Block 0xf, offset 0x87 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x3ed8, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x3ee0, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x3ee8, lo: 0xb4, hi: 0xb4}, + {value: 0x9902, lo: 0xbc, hi: 0xbc}, + // Block 0x10, offset 0x8f + {value: 0x0008, lo: 0x06}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x91, hi: 0x91}, + {value: 0x812d, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x93, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x94}, + {value: 0x451c, lo: 0x98, hi: 0x9f}, + // Block 0x11, offset 0x96 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x12, offset 0x99 + {value: 0x0008, lo: 0x06}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2c9e, lo: 0x8b, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x455c, lo: 0x9c, hi: 0x9d}, + {value: 0x456c, lo: 0x9f, hi: 0x9f}, + // Block 0x13, offset 0xa0 + {value: 0x0000, lo: 0x03}, + {value: 0x4594, lo: 0xb3, hi: 0xb3}, + {value: 0x459c, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x14, offset 0xa4 + {value: 0x0008, lo: 0x03}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x4574, lo: 0x99, hi: 0x9b}, + {value: 0x458c, lo: 0x9e, hi: 0x9e}, + // Block 0x15, offset 0xa8 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x16, offset 0xaa + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + // Block 0x17, offset 0xac + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cb6, lo: 0x88, hi: 0x88}, + {value: 0x2cae, lo: 0x8b, hi: 0x8b}, + {value: 0x2cbe, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x45a4, lo: 0x9c, hi: 0x9c}, + {value: 0x45ac, lo: 0x9d, hi: 0x9d}, + // Block 0x18, offset 0xb5 + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2cc6, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x19, offset 0xb9 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cce, lo: 0x8a, hi: 0x8a}, + {value: 0x2cde, lo: 0x8b, hi: 0x8b}, + {value: 0x2cd6, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1a, offset 0xc0 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x3ef0, lo: 0x88, hi: 0x88}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8120, lo: 0x95, hi: 0x96}, + // Block 0x1b, offset 0xc5 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1c, offset 0xc8 + {value: 0x0000, lo: 0x09}, + {value: 0x2ce6, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2cee, lo: 0x87, hi: 0x87}, + {value: 0x2cf6, lo: 0x88, hi: 0x88}, + {value: 0x2f50, lo: 0x8a, hi: 0x8a}, + {value: 0x2dd8, lo: 0x8b, hi: 0x8b}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1d, offset 0xd2 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1e, offset 0xd5 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cfe, lo: 0x8a, hi: 0x8a}, + {value: 0x2d0e, lo: 0x8b, hi: 0x8b}, + {value: 0x2d06, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1f, offset 0xdc + {value: 0x6bea, lo: 0x07}, + {value: 0x9904, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x3ef8, lo: 0x9a, hi: 0x9a}, + {value: 0x2f58, lo: 0x9c, hi: 0x9c}, + {value: 0x2de3, lo: 0x9d, hi: 0x9d}, + {value: 0x2d16, lo: 0x9e, hi: 0x9f}, + // Block 0x20, offset 0xe4 + {value: 0x0000, lo: 0x03}, + {value: 0x2621, lo: 0xb3, hi: 0xb3}, + {value: 0x8122, lo: 0xb8, hi: 0xb9}, + {value: 0x8104, lo: 0xba, hi: 0xba}, + // Block 0x21, offset 0xe8 + {value: 0x0000, lo: 0x01}, + {value: 0x8123, lo: 0x88, hi: 0x8b}, + // Block 0x22, offset 0xea + {value: 0x0000, lo: 0x02}, + {value: 0x2636, lo: 0xb3, hi: 0xb3}, + {value: 0x8124, lo: 0xb8, hi: 0xb9}, + // Block 0x23, offset 0xed + {value: 0x0000, lo: 0x03}, + {value: 0x8125, lo: 0x88, hi: 0x8b}, + {value: 0x2628, lo: 0x9c, hi: 0x9c}, + {value: 0x262f, lo: 0x9d, hi: 0x9d}, + // Block 0x24, offset 0xf1 + {value: 0x0000, lo: 0x05}, + {value: 0x030b, lo: 0x8c, hi: 0x8c}, + {value: 0x812d, lo: 0x98, hi: 0x99}, + {value: 0x812d, lo: 0xb5, hi: 0xb5}, + {value: 0x812d, lo: 0xb7, hi: 0xb7}, + {value: 0x812b, lo: 0xb9, hi: 0xb9}, + // Block 0x25, offset 0xf7 + {value: 0x0000, lo: 0x10}, + {value: 0x2644, lo: 0x83, hi: 0x83}, + {value: 0x264b, lo: 0x8d, hi: 0x8d}, + {value: 0x2652, lo: 0x92, hi: 0x92}, + {value: 0x2659, lo: 0x97, hi: 0x97}, + {value: 0x2660, lo: 0x9c, hi: 0x9c}, + {value: 0x263d, lo: 0xa9, hi: 0xa9}, + {value: 0x8126, lo: 0xb1, hi: 0xb1}, + {value: 0x8127, lo: 0xb2, hi: 0xb2}, + {value: 0x4a84, lo: 0xb3, hi: 0xb3}, + {value: 0x8128, lo: 0xb4, hi: 0xb4}, + {value: 0x4a8d, lo: 0xb5, hi: 0xb5}, + {value: 0x45b4, lo: 0xb6, hi: 0xb6}, + {value: 0x45f4, lo: 0xb7, hi: 0xb7}, + {value: 0x45bc, lo: 0xb8, hi: 0xb8}, + {value: 0x45ff, lo: 0xb9, hi: 0xb9}, + {value: 0x8127, lo: 0xba, hi: 0xbd}, + // Block 0x26, offset 0x108 + {value: 0x0000, lo: 0x0b}, + {value: 0x8127, lo: 0x80, hi: 0x80}, + {value: 0x4a96, lo: 0x81, hi: 0x81}, + {value: 0x8132, lo: 0x82, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0x86, hi: 0x87}, + {value: 0x266e, lo: 0x93, hi: 0x93}, + {value: 0x2675, lo: 0x9d, hi: 0x9d}, + {value: 0x267c, lo: 0xa2, hi: 0xa2}, + {value: 0x2683, lo: 0xa7, hi: 0xa7}, + {value: 0x268a, lo: 0xac, hi: 0xac}, + {value: 0x2667, lo: 0xb9, hi: 0xb9}, + // Block 0x27, offset 0x114 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x86, hi: 0x86}, + // Block 0x28, offset 0x116 + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2d1e, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x29, offset 0x11c + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + // Block 0x2a, offset 0x11e + {value: 0x0000, lo: 0x01}, + {value: 0x030f, lo: 0xbc, hi: 0xbc}, + // Block 0x2b, offset 0x120 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2c, offset 0x122 + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2d, offset 0x124 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2e, offset 0x126 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2f, offset 0x128 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9d, hi: 0x9f}, + // Block 0x30, offset 0x12a + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x94, hi: 0x94}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x31, offset 0x12d + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x9d, hi: 0x9d}, + // Block 0x32, offset 0x130 + {value: 0x0000, lo: 0x01}, + {value: 0x8131, lo: 0xa9, hi: 0xa9}, + // Block 0x33, offset 0x132 + {value: 0x0004, lo: 0x02}, + {value: 0x812e, lo: 0xb9, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbb}, + // Block 0x34, offset 0x135 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x97, hi: 0x97}, + {value: 0x812d, lo: 0x98, hi: 0x98}, + // Block 0x35, offset 0x138 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0xa0, hi: 0xa0}, + {value: 0x8132, lo: 0xb5, hi: 0xbc}, + {value: 0x812d, lo: 0xbf, hi: 0xbf}, + // Block 0x36, offset 0x13c + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + {value: 0x812d, lo: 0xb5, hi: 0xba}, + {value: 0x8132, lo: 0xbb, hi: 0xbc}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x37, offset 0x141 + {value: 0x0000, lo: 0x08}, + {value: 0x2d66, lo: 0x80, hi: 0x80}, + {value: 0x2d6e, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2d76, lo: 0x83, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xac}, + {value: 0x8132, lo: 0xad, hi: 0xb3}, + // Block 0x38, offset 0x14a + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xaa, hi: 0xab}, + // Block 0x39, offset 0x14c + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xa6, hi: 0xa6}, + {value: 0x8104, lo: 0xb2, hi: 0xb3}, + // Block 0x3a, offset 0x14f + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x3b, offset 0x151 + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812d, lo: 0x95, hi: 0x99}, + {value: 0x8132, lo: 0x9a, hi: 0x9b}, + {value: 0x812d, lo: 0x9c, hi: 0x9f}, + {value: 0x8132, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + {value: 0x8132, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb8, hi: 0xb9}, + // Block 0x3c, offset 0x15c + {value: 0x0002, lo: 0x0a}, + {value: 0x0043, lo: 0xac, hi: 0xac}, + {value: 0x00d1, lo: 0xad, hi: 0xad}, + {value: 0x0045, lo: 0xae, hi: 0xae}, + {value: 0x0049, lo: 0xb0, hi: 0xb1}, + {value: 0x00e6, lo: 0xb2, hi: 0xb2}, + {value: 0x004f, lo: 0xb3, hi: 0xba}, + {value: 0x005f, lo: 0xbc, hi: 0xbc}, + {value: 0x00ef, lo: 0xbd, hi: 0xbd}, + {value: 0x0061, lo: 0xbe, hi: 0xbe}, + {value: 0x0065, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x167 + {value: 0x0000, lo: 0x0d}, + {value: 0x0001, lo: 0x80, hi: 0x8a}, + {value: 0x043b, lo: 0x91, hi: 0x91}, + {value: 0x429b, lo: 0x97, hi: 0x97}, + {value: 0x001d, lo: 0xa4, hi: 0xa4}, + {value: 0x1873, lo: 0xa5, hi: 0xa5}, + {value: 0x1b5c, lo: 0xa6, hi: 0xa6}, + {value: 0x0001, lo: 0xaf, hi: 0xaf}, + {value: 0x2691, lo: 0xb3, hi: 0xb3}, + {value: 0x27fe, lo: 0xb4, hi: 0xb4}, + {value: 0x2698, lo: 0xb6, hi: 0xb6}, + {value: 0x2808, lo: 0xb7, hi: 0xb7}, + {value: 0x186d, lo: 0xbc, hi: 0xbc}, + {value: 0x4269, lo: 0xbe, hi: 0xbe}, + // Block 0x3e, offset 0x175 + {value: 0x0002, lo: 0x0d}, + {value: 0x1933, lo: 0x87, hi: 0x87}, + {value: 0x1930, lo: 0x88, hi: 0x88}, + {value: 0x1870, lo: 0x89, hi: 0x89}, + {value: 0x298e, lo: 0x97, hi: 0x97}, + {value: 0x0001, lo: 0x9f, hi: 0x9f}, + {value: 0x0021, lo: 0xb0, hi: 0xb0}, + {value: 0x0093, lo: 0xb1, hi: 0xb1}, + {value: 0x0029, lo: 0xb4, hi: 0xb9}, + {value: 0x0017, lo: 0xba, hi: 0xba}, + {value: 0x0467, lo: 0xbb, hi: 0xbb}, + {value: 0x003b, lo: 0xbc, hi: 0xbc}, + {value: 0x0011, lo: 0xbd, hi: 0xbe}, + {value: 0x009d, lo: 0xbf, hi: 0xbf}, + // Block 0x3f, offset 0x183 + {value: 0x0002, lo: 0x0f}, + {value: 0x0021, lo: 0x80, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8a}, + {value: 0x0467, lo: 0x8b, hi: 0x8b}, + {value: 0x003b, lo: 0x8c, hi: 0x8c}, + {value: 0x0011, lo: 0x8d, hi: 0x8e}, + {value: 0x0083, lo: 0x90, hi: 0x90}, + {value: 0x008b, lo: 0x91, hi: 0x91}, + {value: 0x009f, lo: 0x92, hi: 0x92}, + {value: 0x00b1, lo: 0x93, hi: 0x93}, + {value: 0x0104, lo: 0x94, hi: 0x94}, + {value: 0x0091, lo: 0x95, hi: 0x95}, + {value: 0x0097, lo: 0x96, hi: 0x99}, + {value: 0x00a1, lo: 0x9a, hi: 0x9a}, + {value: 0x00a7, lo: 0x9b, hi: 0x9c}, + {value: 0x1999, lo: 0xa8, hi: 0xa8}, + // Block 0x40, offset 0x193 + {value: 0x0000, lo: 0x0d}, + {value: 0x8132, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8132, lo: 0x9b, hi: 0x9c}, + {value: 0x8132, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8132, lo: 0xa7, hi: 0xa7}, + {value: 0x812d, lo: 0xa8, hi: 0xa8}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xaf}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + // Block 0x41, offset 0x1a1 + {value: 0x0007, lo: 0x06}, + {value: 0x2180, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3bb9, lo: 0x9a, hi: 0x9b}, + {value: 0x3bc7, lo: 0xae, hi: 0xae}, + // Block 0x42, offset 0x1a8 + {value: 0x000e, lo: 0x05}, + {value: 0x3bce, lo: 0x8d, hi: 0x8e}, + {value: 0x3bd5, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x43, offset 0x1ae + {value: 0x0173, lo: 0x0e}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3be3, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3bea, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3bf1, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3bf8, lo: 0xa4, hi: 0xa4}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x3bff, lo: 0xa6, hi: 0xa6}, + {value: 0x269f, lo: 0xac, hi: 0xad}, + {value: 0x26a6, lo: 0xaf, hi: 0xaf}, + {value: 0x281c, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x44, offset 0x1bd + {value: 0x0007, lo: 0x03}, + {value: 0x3c68, lo: 0xa0, hi: 0xa1}, + {value: 0x3c92, lo: 0xa2, hi: 0xa3}, + {value: 0x3cbc, lo: 0xaa, hi: 0xad}, + // Block 0x45, offset 0x1c1 + {value: 0x0004, lo: 0x01}, + {value: 0x048b, lo: 0xa9, hi: 0xaa}, + // Block 0x46, offset 0x1c3 + {value: 0x0002, lo: 0x03}, + {value: 0x0057, lo: 0x80, hi: 0x8f}, + {value: 0x0083, lo: 0x90, hi: 0xa9}, + {value: 0x0021, lo: 0xaa, hi: 0xaa}, + // Block 0x47, offset 0x1c7 + {value: 0x0000, lo: 0x01}, + {value: 0x299b, lo: 0x8c, hi: 0x8c}, + // Block 0x48, offset 0x1c9 + {value: 0x0263, lo: 0x02}, + {value: 0x1b8c, lo: 0xb4, hi: 0xb4}, + {value: 0x192d, lo: 0xb5, hi: 0xb6}, + // Block 0x49, offset 0x1cc + {value: 0x0000, lo: 0x01}, + {value: 0x44dd, lo: 0x9c, hi: 0x9c}, + // Block 0x4a, offset 0x1ce + {value: 0x0000, lo: 0x02}, + {value: 0x0095, lo: 0xbc, hi: 0xbc}, + {value: 0x006d, lo: 0xbd, hi: 0xbd}, + // Block 0x4b, offset 0x1d1 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xaf, hi: 0xb1}, + // Block 0x4c, offset 0x1d3 + {value: 0x0000, lo: 0x02}, + {value: 0x047f, lo: 0xaf, hi: 0xaf}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x4d, offset 0x1d6 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa0, hi: 0xbf}, + // Block 0x4e, offset 0x1d8 + {value: 0x0000, lo: 0x01}, + {value: 0x0dc3, lo: 0x9f, hi: 0x9f}, + // Block 0x4f, offset 0x1da + {value: 0x0000, lo: 0x01}, + {value: 0x162f, lo: 0xb3, hi: 0xb3}, + // Block 0x50, offset 0x1dc + {value: 0x0004, lo: 0x0b}, + {value: 0x1597, lo: 0x80, hi: 0x82}, + {value: 0x15af, lo: 0x83, hi: 0x83}, + {value: 0x15c7, lo: 0x84, hi: 0x85}, + {value: 0x15d7, lo: 0x86, hi: 0x89}, + {value: 0x15eb, lo: 0x8a, hi: 0x8c}, + {value: 0x15ff, lo: 0x8d, hi: 0x8d}, + {value: 0x1607, lo: 0x8e, hi: 0x8e}, + {value: 0x160f, lo: 0x8f, hi: 0x90}, + {value: 0x161b, lo: 0x91, hi: 0x93}, + {value: 0x162b, lo: 0x94, hi: 0x94}, + {value: 0x1633, lo: 0x95, hi: 0x95}, + // Block 0x51, offset 0x1e8 + {value: 0x0004, lo: 0x09}, + {value: 0x0001, lo: 0x80, hi: 0x80}, + {value: 0x812c, lo: 0xaa, hi: 0xaa}, + {value: 0x8131, lo: 0xab, hi: 0xab}, + {value: 0x8133, lo: 0xac, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x812f, lo: 0xae, hi: 0xae}, + {value: 0x812f, lo: 0xaf, hi: 0xaf}, + {value: 0x04b3, lo: 0xb6, hi: 0xb6}, + {value: 0x0887, lo: 0xb8, hi: 0xba}, + // Block 0x52, offset 0x1f2 + {value: 0x0006, lo: 0x09}, + {value: 0x0313, lo: 0xb1, hi: 0xb1}, + {value: 0x0317, lo: 0xb2, hi: 0xb2}, + {value: 0x4a3b, lo: 0xb3, hi: 0xb3}, + {value: 0x031b, lo: 0xb4, hi: 0xb4}, + {value: 0x4a41, lo: 0xb5, hi: 0xb6}, + {value: 0x031f, lo: 0xb7, hi: 0xb7}, + {value: 0x0323, lo: 0xb8, hi: 0xb8}, + {value: 0x0327, lo: 0xb9, hi: 0xb9}, + {value: 0x4a4d, lo: 0xba, hi: 0xbf}, + // Block 0x53, offset 0x1fc + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xaf, hi: 0xaf}, + {value: 0x8132, lo: 0xb4, hi: 0xbd}, + // Block 0x54, offset 0x1ff + {value: 0x0000, lo: 0x03}, + {value: 0x020f, lo: 0x9c, hi: 0x9c}, + {value: 0x0212, lo: 0x9d, hi: 0x9d}, + {value: 0x8132, lo: 0x9e, hi: 0x9f}, + // Block 0x55, offset 0x203 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb1}, + // Block 0x56, offset 0x205 + {value: 0x0000, lo: 0x01}, + {value: 0x163b, lo: 0xb0, hi: 0xb0}, + // Block 0x57, offset 0x207 + {value: 0x000c, lo: 0x01}, + {value: 0x00d7, lo: 0xb8, hi: 0xb9}, + // Block 0x58, offset 0x209 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + // Block 0x59, offset 0x20b + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xa0, hi: 0xb1}, + // Block 0x5a, offset 0x20e + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xab, hi: 0xad}, + // Block 0x5b, offset 0x210 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x93, hi: 0x93}, + // Block 0x5c, offset 0x212 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb3, hi: 0xb3}, + // Block 0x5d, offset 0x214 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + // Block 0x5e, offset 0x216 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb7, hi: 0xb8}, + {value: 0x8132, lo: 0xbe, hi: 0xbf}, + // Block 0x5f, offset 0x21c + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + // Block 0x60, offset 0x21f + {value: 0x0008, lo: 0x03}, + {value: 0x1637, lo: 0x9c, hi: 0x9d}, + {value: 0x0125, lo: 0x9e, hi: 0x9e}, + {value: 0x1643, lo: 0x9f, hi: 0x9f}, + // Block 0x61, offset 0x223 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xad, hi: 0xad}, + // Block 0x62, offset 0x225 + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x63, offset 0x22c + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x64, offset 0x232 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x65, offset 0x238 + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x66, offset 0x240 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x67, offset 0x246 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x68, offset 0x24c + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x69, offset 0x252 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x6a, offset 0x256 + {value: 0x0002, lo: 0x01}, + {value: 0x0003, lo: 0x81, hi: 0xbf}, + // Block 0x6b, offset 0x258 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x6c, offset 0x25a + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xa0, hi: 0xa0}, + // Block 0x6d, offset 0x25c + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb6, hi: 0xba}, + // Block 0x6e, offset 0x25e + {value: 0x002c, lo: 0x05}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x8f, hi: 0x8f}, + {value: 0x8132, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x6f, offset 0x264 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xa5, hi: 0xa5}, + {value: 0x812d, lo: 0xa6, hi: 0xa6}, + // Block 0x70, offset 0x267 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x71, offset 0x26a + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4238, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4242, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x424c, lo: 0xab, hi: 0xab}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x72, offset 0x272 + {value: 0x0000, lo: 0x06}, + {value: 0x8132, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2d7e, lo: 0xae, hi: 0xae}, + {value: 0x2d88, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8104, lo: 0xb3, hi: 0xb4}, + // Block 0x73, offset 0x279 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x74, offset 0x27c + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb5, hi: 0xb5}, + {value: 0x8102, lo: 0xb6, hi: 0xb6}, + // Block 0x75, offset 0x27f + {value: 0x0002, lo: 0x01}, + {value: 0x8102, lo: 0xa9, hi: 0xaa}, + // Block 0x76, offset 0x281 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2d92, lo: 0x8b, hi: 0x8b}, + {value: 0x2d9c, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8132, lo: 0xa6, hi: 0xac}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + // Block 0x77, offset 0x289 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x86, hi: 0x86}, + // Block 0x78, offset 0x28c + {value: 0x6b5a, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2db0, lo: 0xbb, hi: 0xbb}, + {value: 0x2da6, lo: 0xbc, hi: 0xbd}, + {value: 0x2dba, lo: 0xbe, hi: 0xbe}, + // Block 0x79, offset 0x293 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x83, hi: 0x83}, + // Block 0x7a, offset 0x296 + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2dc4, lo: 0xba, hi: 0xba}, + {value: 0x2dce, lo: 0xbb, hi: 0xbb}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x7b, offset 0x29c + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0x80, hi: 0x80}, + // Block 0x7c, offset 0x29e + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x7d, offset 0x2a0 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x7e, offset 0x2a3 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xab, hi: 0xab}, + // Block 0x7f, offset 0x2a5 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x80, offset 0x2a7 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x87, hi: 0x87}, + // Block 0x81, offset 0x2a9 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x99, hi: 0x99}, + // Block 0x82, offset 0x2ab + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0x82, hi: 0x82}, + {value: 0x8104, lo: 0x84, hi: 0x85}, + // Block 0x83, offset 0x2ae + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x84, offset 0x2b0 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb6}, + // Block 0x85, offset 0x2b2 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x86, offset 0x2b4 + {value: 0x0000, lo: 0x0c}, + {value: 0x45cc, lo: 0x9e, hi: 0x9e}, + {value: 0x45d6, lo: 0x9f, hi: 0x9f}, + {value: 0x460a, lo: 0xa0, hi: 0xa0}, + {value: 0x4618, lo: 0xa1, hi: 0xa1}, + {value: 0x4626, lo: 0xa2, hi: 0xa2}, + {value: 0x4634, lo: 0xa3, hi: 0xa3}, + {value: 0x4642, lo: 0xa4, hi: 0xa4}, + {value: 0x812b, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8130, lo: 0xad, hi: 0xad}, + {value: 0x812b, lo: 0xae, hi: 0xb2}, + {value: 0x812d, lo: 0xbb, hi: 0xbf}, + // Block 0x87, offset 0x2c1 + {value: 0x0000, lo: 0x09}, + {value: 0x812d, lo: 0x80, hi: 0x82}, + {value: 0x8132, lo: 0x85, hi: 0x89}, + {value: 0x812d, lo: 0x8a, hi: 0x8b}, + {value: 0x8132, lo: 0xaa, hi: 0xad}, + {value: 0x45e0, lo: 0xbb, hi: 0xbb}, + {value: 0x45ea, lo: 0xbc, hi: 0xbc}, + {value: 0x4650, lo: 0xbd, hi: 0xbd}, + {value: 0x466c, lo: 0xbe, hi: 0xbe}, + {value: 0x465e, lo: 0xbf, hi: 0xbf}, + // Block 0x88, offset 0x2cb + {value: 0x0000, lo: 0x01}, + {value: 0x467a, lo: 0x80, hi: 0x80}, + // Block 0x89, offset 0x2cd + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x82, hi: 0x84}, + // Block 0x8a, offset 0x2cf + {value: 0x0002, lo: 0x03}, + {value: 0x0043, lo: 0x80, hi: 0x99}, + {value: 0x0083, lo: 0x9a, hi: 0xb3}, + {value: 0x0043, lo: 0xb4, hi: 0xbf}, + // Block 0x8b, offset 0x2d3 + {value: 0x0002, lo: 0x04}, + {value: 0x005b, lo: 0x80, hi: 0x8d}, + {value: 0x0083, lo: 0x8e, hi: 0x94}, + {value: 0x0093, lo: 0x96, hi: 0xa7}, + {value: 0x0043, lo: 0xa8, hi: 0xbf}, + // Block 0x8c, offset 0x2d8 + {value: 0x0002, lo: 0x0b}, + {value: 0x0073, lo: 0x80, hi: 0x81}, + {value: 0x0083, lo: 0x82, hi: 0x9b}, + {value: 0x0043, lo: 0x9c, hi: 0x9c}, + {value: 0x0047, lo: 0x9e, hi: 0x9f}, + {value: 0x004f, lo: 0xa2, hi: 0xa2}, + {value: 0x0055, lo: 0xa5, hi: 0xa6}, + {value: 0x005d, lo: 0xa9, hi: 0xac}, + {value: 0x0067, lo: 0xae, hi: 0xb5}, + {value: 0x0083, lo: 0xb6, hi: 0xb9}, + {value: 0x008d, lo: 0xbb, hi: 0xbb}, + {value: 0x0091, lo: 0xbd, hi: 0xbf}, + // Block 0x8d, offset 0x2e4 + {value: 0x0002, lo: 0x04}, + {value: 0x0097, lo: 0x80, hi: 0x83}, + {value: 0x00a1, lo: 0x85, hi: 0x8f}, + {value: 0x0043, lo: 0x90, hi: 0xa9}, + {value: 0x0083, lo: 0xaa, hi: 0xbf}, + // Block 0x8e, offset 0x2e9 + {value: 0x0002, lo: 0x08}, + {value: 0x00af, lo: 0x80, hi: 0x83}, + {value: 0x0043, lo: 0x84, hi: 0x85}, + {value: 0x0049, lo: 0x87, hi: 0x8a}, + {value: 0x0055, lo: 0x8d, hi: 0x94}, + {value: 0x0067, lo: 0x96, hi: 0x9c}, + {value: 0x0083, lo: 0x9e, hi: 0xb7}, + {value: 0x0043, lo: 0xb8, hi: 0xb9}, + {value: 0x0049, lo: 0xbb, hi: 0xbe}, + // Block 0x8f, offset 0x2f2 + {value: 0x0002, lo: 0x05}, + {value: 0x0053, lo: 0x80, hi: 0x84}, + {value: 0x005f, lo: 0x86, hi: 0x86}, + {value: 0x0067, lo: 0x8a, hi: 0x90}, + {value: 0x0083, lo: 0x92, hi: 0xab}, + {value: 0x0043, lo: 0xac, hi: 0xbf}, + // Block 0x90, offset 0x2f8 + {value: 0x0002, lo: 0x04}, + {value: 0x006b, lo: 0x80, hi: 0x85}, + {value: 0x0083, lo: 0x86, hi: 0x9f}, + {value: 0x0043, lo: 0xa0, hi: 0xb9}, + {value: 0x0083, lo: 0xba, hi: 0xbf}, + // Block 0x91, offset 0x2fd + {value: 0x0002, lo: 0x03}, + {value: 0x008f, lo: 0x80, hi: 0x93}, + {value: 0x0043, lo: 0x94, hi: 0xad}, + {value: 0x0083, lo: 0xae, hi: 0xbf}, + // Block 0x92, offset 0x301 + {value: 0x0002, lo: 0x04}, + {value: 0x00a7, lo: 0x80, hi: 0x87}, + {value: 0x0043, lo: 0x88, hi: 0xa1}, + {value: 0x0083, lo: 0xa2, hi: 0xbb}, + {value: 0x0043, lo: 0xbc, hi: 0xbf}, + // Block 0x93, offset 0x306 + {value: 0x0002, lo: 0x03}, + {value: 0x004b, lo: 0x80, hi: 0x95}, + {value: 0x0083, lo: 0x96, hi: 0xaf}, + {value: 0x0043, lo: 0xb0, hi: 0xbf}, + // Block 0x94, offset 0x30a + {value: 0x0003, lo: 0x0f}, + {value: 0x01b8, lo: 0x80, hi: 0x80}, + {value: 0x045f, lo: 0x81, hi: 0x81}, + {value: 0x01bb, lo: 0x82, hi: 0x9a}, + {value: 0x045b, lo: 0x9b, hi: 0x9b}, + {value: 0x01c7, lo: 0x9c, hi: 0x9c}, + {value: 0x01d0, lo: 0x9d, hi: 0x9d}, + {value: 0x01d6, lo: 0x9e, hi: 0x9e}, + {value: 0x01fa, lo: 0x9f, hi: 0x9f}, + {value: 0x01eb, lo: 0xa0, hi: 0xa0}, + {value: 0x01e8, lo: 0xa1, hi: 0xa1}, + {value: 0x0173, lo: 0xa2, hi: 0xb2}, + {value: 0x0188, lo: 0xb3, hi: 0xb3}, + {value: 0x01a6, lo: 0xb4, hi: 0xba}, + {value: 0x045f, lo: 0xbb, hi: 0xbb}, + {value: 0x01bb, lo: 0xbc, hi: 0xbf}, + // Block 0x95, offset 0x31a + {value: 0x0003, lo: 0x0d}, + {value: 0x01c7, lo: 0x80, hi: 0x94}, + {value: 0x045b, lo: 0x95, hi: 0x95}, + {value: 0x01c7, lo: 0x96, hi: 0x96}, + {value: 0x01d0, lo: 0x97, hi: 0x97}, + {value: 0x01d6, lo: 0x98, hi: 0x98}, + {value: 0x01fa, lo: 0x99, hi: 0x99}, + {value: 0x01eb, lo: 0x9a, hi: 0x9a}, + {value: 0x01e8, lo: 0x9b, hi: 0x9b}, + {value: 0x0173, lo: 0x9c, hi: 0xac}, + {value: 0x0188, lo: 0xad, hi: 0xad}, + {value: 0x01a6, lo: 0xae, hi: 0xb4}, + {value: 0x045f, lo: 0xb5, hi: 0xb5}, + {value: 0x01bb, lo: 0xb6, hi: 0xbf}, + // Block 0x96, offset 0x328 + {value: 0x0003, lo: 0x0d}, + {value: 0x01d9, lo: 0x80, hi: 0x8e}, + {value: 0x045b, lo: 0x8f, hi: 0x8f}, + {value: 0x01c7, lo: 0x90, hi: 0x90}, + {value: 0x01d0, lo: 0x91, hi: 0x91}, + {value: 0x01d6, lo: 0x92, hi: 0x92}, + {value: 0x01fa, lo: 0x93, hi: 0x93}, + {value: 0x01eb, lo: 0x94, hi: 0x94}, + {value: 0x01e8, lo: 0x95, hi: 0x95}, + {value: 0x0173, lo: 0x96, hi: 0xa6}, + {value: 0x0188, lo: 0xa7, hi: 0xa7}, + {value: 0x01a6, lo: 0xa8, hi: 0xae}, + {value: 0x045f, lo: 0xaf, hi: 0xaf}, + {value: 0x01bb, lo: 0xb0, hi: 0xbf}, + // Block 0x97, offset 0x336 + {value: 0x0003, lo: 0x0d}, + {value: 0x01eb, lo: 0x80, hi: 0x88}, + {value: 0x045b, lo: 0x89, hi: 0x89}, + {value: 0x01c7, lo: 0x8a, hi: 0x8a}, + {value: 0x01d0, lo: 0x8b, hi: 0x8b}, + {value: 0x01d6, lo: 0x8c, hi: 0x8c}, + {value: 0x01fa, lo: 0x8d, hi: 0x8d}, + {value: 0x01eb, lo: 0x8e, hi: 0x8e}, + {value: 0x01e8, lo: 0x8f, hi: 0x8f}, + {value: 0x0173, lo: 0x90, hi: 0xa0}, + {value: 0x0188, lo: 0xa1, hi: 0xa1}, + {value: 0x01a6, lo: 0xa2, hi: 0xa8}, + {value: 0x045f, lo: 0xa9, hi: 0xa9}, + {value: 0x01bb, lo: 0xaa, hi: 0xbf}, + // Block 0x98, offset 0x344 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0x80, hi: 0x86}, + {value: 0x8132, lo: 0x88, hi: 0x98}, + {value: 0x8132, lo: 0x9b, hi: 0xa1}, + {value: 0x8132, lo: 0xa3, hi: 0xa4}, + {value: 0x8132, lo: 0xa6, hi: 0xaa}, + // Block 0x99, offset 0x34a + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x90, hi: 0x96}, + // Block 0x9a, offset 0x34c + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x84, hi: 0x89}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x9b, offset 0x34f + {value: 0x0002, lo: 0x09}, + {value: 0x0063, lo: 0x80, hi: 0x89}, + {value: 0x1951, lo: 0x8a, hi: 0x8a}, + {value: 0x1981, lo: 0x8b, hi: 0x8b}, + {value: 0x199c, lo: 0x8c, hi: 0x8c}, + {value: 0x19a2, lo: 0x8d, hi: 0x8d}, + {value: 0x1bc0, lo: 0x8e, hi: 0x8e}, + {value: 0x19ae, lo: 0x8f, hi: 0x8f}, + {value: 0x197b, lo: 0xaa, hi: 0xaa}, + {value: 0x197e, lo: 0xab, hi: 0xab}, + // Block 0x9c, offset 0x359 + {value: 0x0000, lo: 0x01}, + {value: 0x193f, lo: 0x90, hi: 0x90}, + // Block 0x9d, offset 0x35b + {value: 0x0028, lo: 0x09}, + {value: 0x2862, lo: 0x80, hi: 0x80}, + {value: 0x2826, lo: 0x81, hi: 0x81}, + {value: 0x2830, lo: 0x82, hi: 0x82}, + {value: 0x2844, lo: 0x83, hi: 0x84}, + {value: 0x284e, lo: 0x85, hi: 0x86}, + {value: 0x283a, lo: 0x87, hi: 0x87}, + {value: 0x2858, lo: 0x88, hi: 0x88}, + {value: 0x0b6f, lo: 0x90, hi: 0x90}, + {value: 0x08e7, lo: 0x91, hi: 0x91}, +} + +// recompMap: 7520 bytes (entries only) +var recompMap = map[uint32]rune{ + 0x00410300: 0x00C0, + 0x00410301: 0x00C1, + 0x00410302: 0x00C2, + 0x00410303: 0x00C3, + 0x00410308: 0x00C4, + 0x0041030A: 0x00C5, + 0x00430327: 0x00C7, + 0x00450300: 0x00C8, + 0x00450301: 0x00C9, + 0x00450302: 0x00CA, + 0x00450308: 0x00CB, + 0x00490300: 0x00CC, + 0x00490301: 0x00CD, + 0x00490302: 0x00CE, + 0x00490308: 0x00CF, + 0x004E0303: 0x00D1, + 0x004F0300: 0x00D2, + 0x004F0301: 0x00D3, + 0x004F0302: 0x00D4, + 0x004F0303: 0x00D5, + 0x004F0308: 0x00D6, + 0x00550300: 0x00D9, + 0x00550301: 0x00DA, + 0x00550302: 0x00DB, + 0x00550308: 0x00DC, + 0x00590301: 0x00DD, + 0x00610300: 0x00E0, + 0x00610301: 0x00E1, + 0x00610302: 0x00E2, + 0x00610303: 0x00E3, + 0x00610308: 0x00E4, + 0x0061030A: 0x00E5, + 0x00630327: 0x00E7, + 0x00650300: 0x00E8, + 0x00650301: 0x00E9, + 0x00650302: 0x00EA, + 0x00650308: 0x00EB, + 0x00690300: 0x00EC, + 0x00690301: 0x00ED, + 0x00690302: 0x00EE, + 0x00690308: 0x00EF, + 0x006E0303: 0x00F1, + 0x006F0300: 0x00F2, + 0x006F0301: 0x00F3, + 0x006F0302: 0x00F4, + 0x006F0303: 0x00F5, + 0x006F0308: 0x00F6, + 0x00750300: 0x00F9, + 0x00750301: 0x00FA, + 0x00750302: 0x00FB, + 0x00750308: 0x00FC, + 0x00790301: 0x00FD, + 0x00790308: 0x00FF, + 0x00410304: 0x0100, + 0x00610304: 0x0101, + 0x00410306: 0x0102, + 0x00610306: 0x0103, + 0x00410328: 0x0104, + 0x00610328: 0x0105, + 0x00430301: 0x0106, + 0x00630301: 0x0107, + 0x00430302: 0x0108, + 0x00630302: 0x0109, + 0x00430307: 0x010A, + 0x00630307: 0x010B, + 0x0043030C: 0x010C, + 0x0063030C: 0x010D, + 0x0044030C: 0x010E, + 0x0064030C: 0x010F, + 0x00450304: 0x0112, + 0x00650304: 0x0113, + 0x00450306: 0x0114, + 0x00650306: 0x0115, + 0x00450307: 0x0116, + 0x00650307: 0x0117, + 0x00450328: 0x0118, + 0x00650328: 0x0119, + 0x0045030C: 0x011A, + 0x0065030C: 0x011B, + 0x00470302: 0x011C, + 0x00670302: 0x011D, + 0x00470306: 0x011E, + 0x00670306: 0x011F, + 0x00470307: 0x0120, + 0x00670307: 0x0121, + 0x00470327: 0x0122, + 0x00670327: 0x0123, + 0x00480302: 0x0124, + 0x00680302: 0x0125, + 0x00490303: 0x0128, + 0x00690303: 0x0129, + 0x00490304: 0x012A, + 0x00690304: 0x012B, + 0x00490306: 0x012C, + 0x00690306: 0x012D, + 0x00490328: 0x012E, + 0x00690328: 0x012F, + 0x00490307: 0x0130, + 0x004A0302: 0x0134, + 0x006A0302: 0x0135, + 0x004B0327: 0x0136, + 0x006B0327: 0x0137, + 0x004C0301: 0x0139, + 0x006C0301: 0x013A, + 0x004C0327: 0x013B, + 0x006C0327: 0x013C, + 0x004C030C: 0x013D, + 0x006C030C: 0x013E, + 0x004E0301: 0x0143, + 0x006E0301: 0x0144, + 0x004E0327: 0x0145, + 0x006E0327: 0x0146, + 0x004E030C: 0x0147, + 0x006E030C: 0x0148, + 0x004F0304: 0x014C, + 0x006F0304: 0x014D, + 0x004F0306: 0x014E, + 0x006F0306: 0x014F, + 0x004F030B: 0x0150, + 0x006F030B: 0x0151, + 0x00520301: 0x0154, + 0x00720301: 0x0155, + 0x00520327: 0x0156, + 0x00720327: 0x0157, + 0x0052030C: 0x0158, + 0x0072030C: 0x0159, + 0x00530301: 0x015A, + 0x00730301: 0x015B, + 0x00530302: 0x015C, + 0x00730302: 0x015D, + 0x00530327: 0x015E, + 0x00730327: 0x015F, + 0x0053030C: 0x0160, + 0x0073030C: 0x0161, + 0x00540327: 0x0162, + 0x00740327: 0x0163, + 0x0054030C: 0x0164, + 0x0074030C: 0x0165, + 0x00550303: 0x0168, + 0x00750303: 0x0169, + 0x00550304: 0x016A, + 0x00750304: 0x016B, + 0x00550306: 0x016C, + 0x00750306: 0x016D, + 0x0055030A: 0x016E, + 0x0075030A: 0x016F, + 0x0055030B: 0x0170, + 0x0075030B: 0x0171, + 0x00550328: 0x0172, + 0x00750328: 0x0173, + 0x00570302: 0x0174, + 0x00770302: 0x0175, + 0x00590302: 0x0176, + 0x00790302: 0x0177, + 0x00590308: 0x0178, + 0x005A0301: 0x0179, + 0x007A0301: 0x017A, + 0x005A0307: 0x017B, + 0x007A0307: 0x017C, + 0x005A030C: 0x017D, + 0x007A030C: 0x017E, + 0x004F031B: 0x01A0, + 0x006F031B: 0x01A1, + 0x0055031B: 0x01AF, + 0x0075031B: 0x01B0, + 0x0041030C: 0x01CD, + 0x0061030C: 0x01CE, + 0x0049030C: 0x01CF, + 0x0069030C: 0x01D0, + 0x004F030C: 0x01D1, + 0x006F030C: 0x01D2, + 0x0055030C: 0x01D3, + 0x0075030C: 0x01D4, + 0x00DC0304: 0x01D5, + 0x00FC0304: 0x01D6, + 0x00DC0301: 0x01D7, + 0x00FC0301: 0x01D8, + 0x00DC030C: 0x01D9, + 0x00FC030C: 0x01DA, + 0x00DC0300: 0x01DB, + 0x00FC0300: 0x01DC, + 0x00C40304: 0x01DE, + 0x00E40304: 0x01DF, + 0x02260304: 0x01E0, + 0x02270304: 0x01E1, + 0x00C60304: 0x01E2, + 0x00E60304: 0x01E3, + 0x0047030C: 0x01E6, + 0x0067030C: 0x01E7, + 0x004B030C: 0x01E8, + 0x006B030C: 0x01E9, + 0x004F0328: 0x01EA, + 0x006F0328: 0x01EB, + 0x01EA0304: 0x01EC, + 0x01EB0304: 0x01ED, + 0x01B7030C: 0x01EE, + 0x0292030C: 0x01EF, + 0x006A030C: 0x01F0, + 0x00470301: 0x01F4, + 0x00670301: 0x01F5, + 0x004E0300: 0x01F8, + 0x006E0300: 0x01F9, + 0x00C50301: 0x01FA, + 0x00E50301: 0x01FB, + 0x00C60301: 0x01FC, + 0x00E60301: 0x01FD, + 0x00D80301: 0x01FE, + 0x00F80301: 0x01FF, + 0x0041030F: 0x0200, + 0x0061030F: 0x0201, + 0x00410311: 0x0202, + 0x00610311: 0x0203, + 0x0045030F: 0x0204, + 0x0065030F: 0x0205, + 0x00450311: 0x0206, + 0x00650311: 0x0207, + 0x0049030F: 0x0208, + 0x0069030F: 0x0209, + 0x00490311: 0x020A, + 0x00690311: 0x020B, + 0x004F030F: 0x020C, + 0x006F030F: 0x020D, + 0x004F0311: 0x020E, + 0x006F0311: 0x020F, + 0x0052030F: 0x0210, + 0x0072030F: 0x0211, + 0x00520311: 0x0212, + 0x00720311: 0x0213, + 0x0055030F: 0x0214, + 0x0075030F: 0x0215, + 0x00550311: 0x0216, + 0x00750311: 0x0217, + 0x00530326: 0x0218, + 0x00730326: 0x0219, + 0x00540326: 0x021A, + 0x00740326: 0x021B, + 0x0048030C: 0x021E, + 0x0068030C: 0x021F, + 0x00410307: 0x0226, + 0x00610307: 0x0227, + 0x00450327: 0x0228, + 0x00650327: 0x0229, + 0x00D60304: 0x022A, + 0x00F60304: 0x022B, + 0x00D50304: 0x022C, + 0x00F50304: 0x022D, + 0x004F0307: 0x022E, + 0x006F0307: 0x022F, + 0x022E0304: 0x0230, + 0x022F0304: 0x0231, + 0x00590304: 0x0232, + 0x00790304: 0x0233, + 0x00A80301: 0x0385, + 0x03910301: 0x0386, + 0x03950301: 0x0388, + 0x03970301: 0x0389, + 0x03990301: 0x038A, + 0x039F0301: 0x038C, + 0x03A50301: 0x038E, + 0x03A90301: 0x038F, + 0x03CA0301: 0x0390, + 0x03990308: 0x03AA, + 0x03A50308: 0x03AB, + 0x03B10301: 0x03AC, + 0x03B50301: 0x03AD, + 0x03B70301: 0x03AE, + 0x03B90301: 0x03AF, + 0x03CB0301: 0x03B0, + 0x03B90308: 0x03CA, + 0x03C50308: 0x03CB, + 0x03BF0301: 0x03CC, + 0x03C50301: 0x03CD, + 0x03C90301: 0x03CE, + 0x03D20301: 0x03D3, + 0x03D20308: 0x03D4, + 0x04150300: 0x0400, + 0x04150308: 0x0401, + 0x04130301: 0x0403, + 0x04060308: 0x0407, + 0x041A0301: 0x040C, + 0x04180300: 0x040D, + 0x04230306: 0x040E, + 0x04180306: 0x0419, + 0x04380306: 0x0439, + 0x04350300: 0x0450, + 0x04350308: 0x0451, + 0x04330301: 0x0453, + 0x04560308: 0x0457, + 0x043A0301: 0x045C, + 0x04380300: 0x045D, + 0x04430306: 0x045E, + 0x0474030F: 0x0476, + 0x0475030F: 0x0477, + 0x04160306: 0x04C1, + 0x04360306: 0x04C2, + 0x04100306: 0x04D0, + 0x04300306: 0x04D1, + 0x04100308: 0x04D2, + 0x04300308: 0x04D3, + 0x04150306: 0x04D6, + 0x04350306: 0x04D7, + 0x04D80308: 0x04DA, + 0x04D90308: 0x04DB, + 0x04160308: 0x04DC, + 0x04360308: 0x04DD, + 0x04170308: 0x04DE, + 0x04370308: 0x04DF, + 0x04180304: 0x04E2, + 0x04380304: 0x04E3, + 0x04180308: 0x04E4, + 0x04380308: 0x04E5, + 0x041E0308: 0x04E6, + 0x043E0308: 0x04E7, + 0x04E80308: 0x04EA, + 0x04E90308: 0x04EB, + 0x042D0308: 0x04EC, + 0x044D0308: 0x04ED, + 0x04230304: 0x04EE, + 0x04430304: 0x04EF, + 0x04230308: 0x04F0, + 0x04430308: 0x04F1, + 0x0423030B: 0x04F2, + 0x0443030B: 0x04F3, + 0x04270308: 0x04F4, + 0x04470308: 0x04F5, + 0x042B0308: 0x04F8, + 0x044B0308: 0x04F9, + 0x06270653: 0x0622, + 0x06270654: 0x0623, + 0x06480654: 0x0624, + 0x06270655: 0x0625, + 0x064A0654: 0x0626, + 0x06D50654: 0x06C0, + 0x06C10654: 0x06C2, + 0x06D20654: 0x06D3, + 0x0928093C: 0x0929, + 0x0930093C: 0x0931, + 0x0933093C: 0x0934, + 0x09C709BE: 0x09CB, + 0x09C709D7: 0x09CC, + 0x0B470B56: 0x0B48, + 0x0B470B3E: 0x0B4B, + 0x0B470B57: 0x0B4C, + 0x0B920BD7: 0x0B94, + 0x0BC60BBE: 0x0BCA, + 0x0BC70BBE: 0x0BCB, + 0x0BC60BD7: 0x0BCC, + 0x0C460C56: 0x0C48, + 0x0CBF0CD5: 0x0CC0, + 0x0CC60CD5: 0x0CC7, + 0x0CC60CD6: 0x0CC8, + 0x0CC60CC2: 0x0CCA, + 0x0CCA0CD5: 0x0CCB, + 0x0D460D3E: 0x0D4A, + 0x0D470D3E: 0x0D4B, + 0x0D460D57: 0x0D4C, + 0x0DD90DCA: 0x0DDA, + 0x0DD90DCF: 0x0DDC, + 0x0DDC0DCA: 0x0DDD, + 0x0DD90DDF: 0x0DDE, + 0x1025102E: 0x1026, + 0x1B051B35: 0x1B06, + 0x1B071B35: 0x1B08, + 0x1B091B35: 0x1B0A, + 0x1B0B1B35: 0x1B0C, + 0x1B0D1B35: 0x1B0E, + 0x1B111B35: 0x1B12, + 0x1B3A1B35: 0x1B3B, + 0x1B3C1B35: 0x1B3D, + 0x1B3E1B35: 0x1B40, + 0x1B3F1B35: 0x1B41, + 0x1B421B35: 0x1B43, + 0x00410325: 0x1E00, + 0x00610325: 0x1E01, + 0x00420307: 0x1E02, + 0x00620307: 0x1E03, + 0x00420323: 0x1E04, + 0x00620323: 0x1E05, + 0x00420331: 0x1E06, + 0x00620331: 0x1E07, + 0x00C70301: 0x1E08, + 0x00E70301: 0x1E09, + 0x00440307: 0x1E0A, + 0x00640307: 0x1E0B, + 0x00440323: 0x1E0C, + 0x00640323: 0x1E0D, + 0x00440331: 0x1E0E, + 0x00640331: 0x1E0F, + 0x00440327: 0x1E10, + 0x00640327: 0x1E11, + 0x0044032D: 0x1E12, + 0x0064032D: 0x1E13, + 0x01120300: 0x1E14, + 0x01130300: 0x1E15, + 0x01120301: 0x1E16, + 0x01130301: 0x1E17, + 0x0045032D: 0x1E18, + 0x0065032D: 0x1E19, + 0x00450330: 0x1E1A, + 0x00650330: 0x1E1B, + 0x02280306: 0x1E1C, + 0x02290306: 0x1E1D, + 0x00460307: 0x1E1E, + 0x00660307: 0x1E1F, + 0x00470304: 0x1E20, + 0x00670304: 0x1E21, + 0x00480307: 0x1E22, + 0x00680307: 0x1E23, + 0x00480323: 0x1E24, + 0x00680323: 0x1E25, + 0x00480308: 0x1E26, + 0x00680308: 0x1E27, + 0x00480327: 0x1E28, + 0x00680327: 0x1E29, + 0x0048032E: 0x1E2A, + 0x0068032E: 0x1E2B, + 0x00490330: 0x1E2C, + 0x00690330: 0x1E2D, + 0x00CF0301: 0x1E2E, + 0x00EF0301: 0x1E2F, + 0x004B0301: 0x1E30, + 0x006B0301: 0x1E31, + 0x004B0323: 0x1E32, + 0x006B0323: 0x1E33, + 0x004B0331: 0x1E34, + 0x006B0331: 0x1E35, + 0x004C0323: 0x1E36, + 0x006C0323: 0x1E37, + 0x1E360304: 0x1E38, + 0x1E370304: 0x1E39, + 0x004C0331: 0x1E3A, + 0x006C0331: 0x1E3B, + 0x004C032D: 0x1E3C, + 0x006C032D: 0x1E3D, + 0x004D0301: 0x1E3E, + 0x006D0301: 0x1E3F, + 0x004D0307: 0x1E40, + 0x006D0307: 0x1E41, + 0x004D0323: 0x1E42, + 0x006D0323: 0x1E43, + 0x004E0307: 0x1E44, + 0x006E0307: 0x1E45, + 0x004E0323: 0x1E46, + 0x006E0323: 0x1E47, + 0x004E0331: 0x1E48, + 0x006E0331: 0x1E49, + 0x004E032D: 0x1E4A, + 0x006E032D: 0x1E4B, + 0x00D50301: 0x1E4C, + 0x00F50301: 0x1E4D, + 0x00D50308: 0x1E4E, + 0x00F50308: 0x1E4F, + 0x014C0300: 0x1E50, + 0x014D0300: 0x1E51, + 0x014C0301: 0x1E52, + 0x014D0301: 0x1E53, + 0x00500301: 0x1E54, + 0x00700301: 0x1E55, + 0x00500307: 0x1E56, + 0x00700307: 0x1E57, + 0x00520307: 0x1E58, + 0x00720307: 0x1E59, + 0x00520323: 0x1E5A, + 0x00720323: 0x1E5B, + 0x1E5A0304: 0x1E5C, + 0x1E5B0304: 0x1E5D, + 0x00520331: 0x1E5E, + 0x00720331: 0x1E5F, + 0x00530307: 0x1E60, + 0x00730307: 0x1E61, + 0x00530323: 0x1E62, + 0x00730323: 0x1E63, + 0x015A0307: 0x1E64, + 0x015B0307: 0x1E65, + 0x01600307: 0x1E66, + 0x01610307: 0x1E67, + 0x1E620307: 0x1E68, + 0x1E630307: 0x1E69, + 0x00540307: 0x1E6A, + 0x00740307: 0x1E6B, + 0x00540323: 0x1E6C, + 0x00740323: 0x1E6D, + 0x00540331: 0x1E6E, + 0x00740331: 0x1E6F, + 0x0054032D: 0x1E70, + 0x0074032D: 0x1E71, + 0x00550324: 0x1E72, + 0x00750324: 0x1E73, + 0x00550330: 0x1E74, + 0x00750330: 0x1E75, + 0x0055032D: 0x1E76, + 0x0075032D: 0x1E77, + 0x01680301: 0x1E78, + 0x01690301: 0x1E79, + 0x016A0308: 0x1E7A, + 0x016B0308: 0x1E7B, + 0x00560303: 0x1E7C, + 0x00760303: 0x1E7D, + 0x00560323: 0x1E7E, + 0x00760323: 0x1E7F, + 0x00570300: 0x1E80, + 0x00770300: 0x1E81, + 0x00570301: 0x1E82, + 0x00770301: 0x1E83, + 0x00570308: 0x1E84, + 0x00770308: 0x1E85, + 0x00570307: 0x1E86, + 0x00770307: 0x1E87, + 0x00570323: 0x1E88, + 0x00770323: 0x1E89, + 0x00580307: 0x1E8A, + 0x00780307: 0x1E8B, + 0x00580308: 0x1E8C, + 0x00780308: 0x1E8D, + 0x00590307: 0x1E8E, + 0x00790307: 0x1E8F, + 0x005A0302: 0x1E90, + 0x007A0302: 0x1E91, + 0x005A0323: 0x1E92, + 0x007A0323: 0x1E93, + 0x005A0331: 0x1E94, + 0x007A0331: 0x1E95, + 0x00680331: 0x1E96, + 0x00740308: 0x1E97, + 0x0077030A: 0x1E98, + 0x0079030A: 0x1E99, + 0x017F0307: 0x1E9B, + 0x00410323: 0x1EA0, + 0x00610323: 0x1EA1, + 0x00410309: 0x1EA2, + 0x00610309: 0x1EA3, + 0x00C20301: 0x1EA4, + 0x00E20301: 0x1EA5, + 0x00C20300: 0x1EA6, + 0x00E20300: 0x1EA7, + 0x00C20309: 0x1EA8, + 0x00E20309: 0x1EA9, + 0x00C20303: 0x1EAA, + 0x00E20303: 0x1EAB, + 0x1EA00302: 0x1EAC, + 0x1EA10302: 0x1EAD, + 0x01020301: 0x1EAE, + 0x01030301: 0x1EAF, + 0x01020300: 0x1EB0, + 0x01030300: 0x1EB1, + 0x01020309: 0x1EB2, + 0x01030309: 0x1EB3, + 0x01020303: 0x1EB4, + 0x01030303: 0x1EB5, + 0x1EA00306: 0x1EB6, + 0x1EA10306: 0x1EB7, + 0x00450323: 0x1EB8, + 0x00650323: 0x1EB9, + 0x00450309: 0x1EBA, + 0x00650309: 0x1EBB, + 0x00450303: 0x1EBC, + 0x00650303: 0x1EBD, + 0x00CA0301: 0x1EBE, + 0x00EA0301: 0x1EBF, + 0x00CA0300: 0x1EC0, + 0x00EA0300: 0x1EC1, + 0x00CA0309: 0x1EC2, + 0x00EA0309: 0x1EC3, + 0x00CA0303: 0x1EC4, + 0x00EA0303: 0x1EC5, + 0x1EB80302: 0x1EC6, + 0x1EB90302: 0x1EC7, + 0x00490309: 0x1EC8, + 0x00690309: 0x1EC9, + 0x00490323: 0x1ECA, + 0x00690323: 0x1ECB, + 0x004F0323: 0x1ECC, + 0x006F0323: 0x1ECD, + 0x004F0309: 0x1ECE, + 0x006F0309: 0x1ECF, + 0x00D40301: 0x1ED0, + 0x00F40301: 0x1ED1, + 0x00D40300: 0x1ED2, + 0x00F40300: 0x1ED3, + 0x00D40309: 0x1ED4, + 0x00F40309: 0x1ED5, + 0x00D40303: 0x1ED6, + 0x00F40303: 0x1ED7, + 0x1ECC0302: 0x1ED8, + 0x1ECD0302: 0x1ED9, + 0x01A00301: 0x1EDA, + 0x01A10301: 0x1EDB, + 0x01A00300: 0x1EDC, + 0x01A10300: 0x1EDD, + 0x01A00309: 0x1EDE, + 0x01A10309: 0x1EDF, + 0x01A00303: 0x1EE0, + 0x01A10303: 0x1EE1, + 0x01A00323: 0x1EE2, + 0x01A10323: 0x1EE3, + 0x00550323: 0x1EE4, + 0x00750323: 0x1EE5, + 0x00550309: 0x1EE6, + 0x00750309: 0x1EE7, + 0x01AF0301: 0x1EE8, + 0x01B00301: 0x1EE9, + 0x01AF0300: 0x1EEA, + 0x01B00300: 0x1EEB, + 0x01AF0309: 0x1EEC, + 0x01B00309: 0x1EED, + 0x01AF0303: 0x1EEE, + 0x01B00303: 0x1EEF, + 0x01AF0323: 0x1EF0, + 0x01B00323: 0x1EF1, + 0x00590300: 0x1EF2, + 0x00790300: 0x1EF3, + 0x00590323: 0x1EF4, + 0x00790323: 0x1EF5, + 0x00590309: 0x1EF6, + 0x00790309: 0x1EF7, + 0x00590303: 0x1EF8, + 0x00790303: 0x1EF9, + 0x03B10313: 0x1F00, + 0x03B10314: 0x1F01, + 0x1F000300: 0x1F02, + 0x1F010300: 0x1F03, + 0x1F000301: 0x1F04, + 0x1F010301: 0x1F05, + 0x1F000342: 0x1F06, + 0x1F010342: 0x1F07, + 0x03910313: 0x1F08, + 0x03910314: 0x1F09, + 0x1F080300: 0x1F0A, + 0x1F090300: 0x1F0B, + 0x1F080301: 0x1F0C, + 0x1F090301: 0x1F0D, + 0x1F080342: 0x1F0E, + 0x1F090342: 0x1F0F, + 0x03B50313: 0x1F10, + 0x03B50314: 0x1F11, + 0x1F100300: 0x1F12, + 0x1F110300: 0x1F13, + 0x1F100301: 0x1F14, + 0x1F110301: 0x1F15, + 0x03950313: 0x1F18, + 0x03950314: 0x1F19, + 0x1F180300: 0x1F1A, + 0x1F190300: 0x1F1B, + 0x1F180301: 0x1F1C, + 0x1F190301: 0x1F1D, + 0x03B70313: 0x1F20, + 0x03B70314: 0x1F21, + 0x1F200300: 0x1F22, + 0x1F210300: 0x1F23, + 0x1F200301: 0x1F24, + 0x1F210301: 0x1F25, + 0x1F200342: 0x1F26, + 0x1F210342: 0x1F27, + 0x03970313: 0x1F28, + 0x03970314: 0x1F29, + 0x1F280300: 0x1F2A, + 0x1F290300: 0x1F2B, + 0x1F280301: 0x1F2C, + 0x1F290301: 0x1F2D, + 0x1F280342: 0x1F2E, + 0x1F290342: 0x1F2F, + 0x03B90313: 0x1F30, + 0x03B90314: 0x1F31, + 0x1F300300: 0x1F32, + 0x1F310300: 0x1F33, + 0x1F300301: 0x1F34, + 0x1F310301: 0x1F35, + 0x1F300342: 0x1F36, + 0x1F310342: 0x1F37, + 0x03990313: 0x1F38, + 0x03990314: 0x1F39, + 0x1F380300: 0x1F3A, + 0x1F390300: 0x1F3B, + 0x1F380301: 0x1F3C, + 0x1F390301: 0x1F3D, + 0x1F380342: 0x1F3E, + 0x1F390342: 0x1F3F, + 0x03BF0313: 0x1F40, + 0x03BF0314: 0x1F41, + 0x1F400300: 0x1F42, + 0x1F410300: 0x1F43, + 0x1F400301: 0x1F44, + 0x1F410301: 0x1F45, + 0x039F0313: 0x1F48, + 0x039F0314: 0x1F49, + 0x1F480300: 0x1F4A, + 0x1F490300: 0x1F4B, + 0x1F480301: 0x1F4C, + 0x1F490301: 0x1F4D, + 0x03C50313: 0x1F50, + 0x03C50314: 0x1F51, + 0x1F500300: 0x1F52, + 0x1F510300: 0x1F53, + 0x1F500301: 0x1F54, + 0x1F510301: 0x1F55, + 0x1F500342: 0x1F56, + 0x1F510342: 0x1F57, + 0x03A50314: 0x1F59, + 0x1F590300: 0x1F5B, + 0x1F590301: 0x1F5D, + 0x1F590342: 0x1F5F, + 0x03C90313: 0x1F60, + 0x03C90314: 0x1F61, + 0x1F600300: 0x1F62, + 0x1F610300: 0x1F63, + 0x1F600301: 0x1F64, + 0x1F610301: 0x1F65, + 0x1F600342: 0x1F66, + 0x1F610342: 0x1F67, + 0x03A90313: 0x1F68, + 0x03A90314: 0x1F69, + 0x1F680300: 0x1F6A, + 0x1F690300: 0x1F6B, + 0x1F680301: 0x1F6C, + 0x1F690301: 0x1F6D, + 0x1F680342: 0x1F6E, + 0x1F690342: 0x1F6F, + 0x03B10300: 0x1F70, + 0x03B50300: 0x1F72, + 0x03B70300: 0x1F74, + 0x03B90300: 0x1F76, + 0x03BF0300: 0x1F78, + 0x03C50300: 0x1F7A, + 0x03C90300: 0x1F7C, + 0x1F000345: 0x1F80, + 0x1F010345: 0x1F81, + 0x1F020345: 0x1F82, + 0x1F030345: 0x1F83, + 0x1F040345: 0x1F84, + 0x1F050345: 0x1F85, + 0x1F060345: 0x1F86, + 0x1F070345: 0x1F87, + 0x1F080345: 0x1F88, + 0x1F090345: 0x1F89, + 0x1F0A0345: 0x1F8A, + 0x1F0B0345: 0x1F8B, + 0x1F0C0345: 0x1F8C, + 0x1F0D0345: 0x1F8D, + 0x1F0E0345: 0x1F8E, + 0x1F0F0345: 0x1F8F, + 0x1F200345: 0x1F90, + 0x1F210345: 0x1F91, + 0x1F220345: 0x1F92, + 0x1F230345: 0x1F93, + 0x1F240345: 0x1F94, + 0x1F250345: 0x1F95, + 0x1F260345: 0x1F96, + 0x1F270345: 0x1F97, + 0x1F280345: 0x1F98, + 0x1F290345: 0x1F99, + 0x1F2A0345: 0x1F9A, + 0x1F2B0345: 0x1F9B, + 0x1F2C0345: 0x1F9C, + 0x1F2D0345: 0x1F9D, + 0x1F2E0345: 0x1F9E, + 0x1F2F0345: 0x1F9F, + 0x1F600345: 0x1FA0, + 0x1F610345: 0x1FA1, + 0x1F620345: 0x1FA2, + 0x1F630345: 0x1FA3, + 0x1F640345: 0x1FA4, + 0x1F650345: 0x1FA5, + 0x1F660345: 0x1FA6, + 0x1F670345: 0x1FA7, + 0x1F680345: 0x1FA8, + 0x1F690345: 0x1FA9, + 0x1F6A0345: 0x1FAA, + 0x1F6B0345: 0x1FAB, + 0x1F6C0345: 0x1FAC, + 0x1F6D0345: 0x1FAD, + 0x1F6E0345: 0x1FAE, + 0x1F6F0345: 0x1FAF, + 0x03B10306: 0x1FB0, + 0x03B10304: 0x1FB1, + 0x1F700345: 0x1FB2, + 0x03B10345: 0x1FB3, + 0x03AC0345: 0x1FB4, + 0x03B10342: 0x1FB6, + 0x1FB60345: 0x1FB7, + 0x03910306: 0x1FB8, + 0x03910304: 0x1FB9, + 0x03910300: 0x1FBA, + 0x03910345: 0x1FBC, + 0x00A80342: 0x1FC1, + 0x1F740345: 0x1FC2, + 0x03B70345: 0x1FC3, + 0x03AE0345: 0x1FC4, + 0x03B70342: 0x1FC6, + 0x1FC60345: 0x1FC7, + 0x03950300: 0x1FC8, + 0x03970300: 0x1FCA, + 0x03970345: 0x1FCC, + 0x1FBF0300: 0x1FCD, + 0x1FBF0301: 0x1FCE, + 0x1FBF0342: 0x1FCF, + 0x03B90306: 0x1FD0, + 0x03B90304: 0x1FD1, + 0x03CA0300: 0x1FD2, + 0x03B90342: 0x1FD6, + 0x03CA0342: 0x1FD7, + 0x03990306: 0x1FD8, + 0x03990304: 0x1FD9, + 0x03990300: 0x1FDA, + 0x1FFE0300: 0x1FDD, + 0x1FFE0301: 0x1FDE, + 0x1FFE0342: 0x1FDF, + 0x03C50306: 0x1FE0, + 0x03C50304: 0x1FE1, + 0x03CB0300: 0x1FE2, + 0x03C10313: 0x1FE4, + 0x03C10314: 0x1FE5, + 0x03C50342: 0x1FE6, + 0x03CB0342: 0x1FE7, + 0x03A50306: 0x1FE8, + 0x03A50304: 0x1FE9, + 0x03A50300: 0x1FEA, + 0x03A10314: 0x1FEC, + 0x00A80300: 0x1FED, + 0x1F7C0345: 0x1FF2, + 0x03C90345: 0x1FF3, + 0x03CE0345: 0x1FF4, + 0x03C90342: 0x1FF6, + 0x1FF60345: 0x1FF7, + 0x039F0300: 0x1FF8, + 0x03A90300: 0x1FFA, + 0x03A90345: 0x1FFC, + 0x21900338: 0x219A, + 0x21920338: 0x219B, + 0x21940338: 0x21AE, + 0x21D00338: 0x21CD, + 0x21D40338: 0x21CE, + 0x21D20338: 0x21CF, + 0x22030338: 0x2204, + 0x22080338: 0x2209, + 0x220B0338: 0x220C, + 0x22230338: 0x2224, + 0x22250338: 0x2226, + 0x223C0338: 0x2241, + 0x22430338: 0x2244, + 0x22450338: 0x2247, + 0x22480338: 0x2249, + 0x003D0338: 0x2260, + 0x22610338: 0x2262, + 0x224D0338: 0x226D, + 0x003C0338: 0x226E, + 0x003E0338: 0x226F, + 0x22640338: 0x2270, + 0x22650338: 0x2271, + 0x22720338: 0x2274, + 0x22730338: 0x2275, + 0x22760338: 0x2278, + 0x22770338: 0x2279, + 0x227A0338: 0x2280, + 0x227B0338: 0x2281, + 0x22820338: 0x2284, + 0x22830338: 0x2285, + 0x22860338: 0x2288, + 0x22870338: 0x2289, + 0x22A20338: 0x22AC, + 0x22A80338: 0x22AD, + 0x22A90338: 0x22AE, + 0x22AB0338: 0x22AF, + 0x227C0338: 0x22E0, + 0x227D0338: 0x22E1, + 0x22910338: 0x22E2, + 0x22920338: 0x22E3, + 0x22B20338: 0x22EA, + 0x22B30338: 0x22EB, + 0x22B40338: 0x22EC, + 0x22B50338: 0x22ED, + 0x304B3099: 0x304C, + 0x304D3099: 0x304E, + 0x304F3099: 0x3050, + 0x30513099: 0x3052, + 0x30533099: 0x3054, + 0x30553099: 0x3056, + 0x30573099: 0x3058, + 0x30593099: 0x305A, + 0x305B3099: 0x305C, + 0x305D3099: 0x305E, + 0x305F3099: 0x3060, + 0x30613099: 0x3062, + 0x30643099: 0x3065, + 0x30663099: 0x3067, + 0x30683099: 0x3069, + 0x306F3099: 0x3070, + 0x306F309A: 0x3071, + 0x30723099: 0x3073, + 0x3072309A: 0x3074, + 0x30753099: 0x3076, + 0x3075309A: 0x3077, + 0x30783099: 0x3079, + 0x3078309A: 0x307A, + 0x307B3099: 0x307C, + 0x307B309A: 0x307D, + 0x30463099: 0x3094, + 0x309D3099: 0x309E, + 0x30AB3099: 0x30AC, + 0x30AD3099: 0x30AE, + 0x30AF3099: 0x30B0, + 0x30B13099: 0x30B2, + 0x30B33099: 0x30B4, + 0x30B53099: 0x30B6, + 0x30B73099: 0x30B8, + 0x30B93099: 0x30BA, + 0x30BB3099: 0x30BC, + 0x30BD3099: 0x30BE, + 0x30BF3099: 0x30C0, + 0x30C13099: 0x30C2, + 0x30C43099: 0x30C5, + 0x30C63099: 0x30C7, + 0x30C83099: 0x30C9, + 0x30CF3099: 0x30D0, + 0x30CF309A: 0x30D1, + 0x30D23099: 0x30D3, + 0x30D2309A: 0x30D4, + 0x30D53099: 0x30D6, + 0x30D5309A: 0x30D7, + 0x30D83099: 0x30D9, + 0x30D8309A: 0x30DA, + 0x30DB3099: 0x30DC, + 0x30DB309A: 0x30DD, + 0x30A63099: 0x30F4, + 0x30EF3099: 0x30F7, + 0x30F03099: 0x30F8, + 0x30F13099: 0x30F9, + 0x30F23099: 0x30FA, + 0x30FD3099: 0x30FE, + 0x109910BA: 0x1109A, + 0x109B10BA: 0x1109C, + 0x10A510BA: 0x110AB, + 0x11311127: 0x1112E, + 0x11321127: 0x1112F, + 0x1347133E: 0x1134B, + 0x13471357: 0x1134C, + 0x14B914BA: 0x114BB, + 0x14B914B0: 0x114BC, + 0x14B914BD: 0x114BE, + 0x15B815AF: 0x115BA, + 0x15B915AF: 0x115BB, +} + +// Total size of tables: 53KB (54226 bytes) diff --git a/vendor/golang.org/x/text/unicode/norm/transform.go b/vendor/golang.org/x/text/unicode/norm/transform.go new file mode 100644 index 00000000000..9f47efbaf65 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/norm/transform.go @@ -0,0 +1,88 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package norm + +import ( + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// Reset implements the Reset method of the transform.Transformer interface. +func (Form) Reset() {} + +// Transform implements the Transform method of the transform.Transformer +// interface. It may need to write segments of up to MaxSegmentSize at once. +// Users should either catch ErrShortDst and allow dst to grow or have dst be at +// least of size MaxTransformChunkSize to be guaranteed of progress. +func (f Form) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + n := 0 + // Cap the maximum number of src bytes to check. + b := src + eof := atEOF + if ns := len(dst); ns < len(b) { + err = transform.ErrShortDst + eof = false + b = b[:ns] + } + i, ok := formTable[f].quickSpan(inputBytes(b), n, len(b), eof) + n += copy(dst[n:], b[n:i]) + if !ok { + nDst, nSrc, err = f.transform(dst[n:], src[n:], atEOF) + return nDst + n, nSrc + n, err + } + if n < len(src) && !atEOF { + err = transform.ErrShortSrc + } + return n, n, err +} + +func flushTransform(rb *reorderBuffer) bool { + // Write out (must fully fit in dst, or else it is an ErrShortDst). + if len(rb.out) < rb.nrune*utf8.UTFMax { + return false + } + rb.out = rb.out[rb.flushCopy(rb.out):] + return true +} + +var errs = []error{nil, transform.ErrShortDst, transform.ErrShortSrc} + +// transform implements the transform.Transformer interface. It is only called +// when quickSpan does not pass for a given string. +func (f Form) transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + // TODO: get rid of reorderBuffer. See CL 23460044. + rb := reorderBuffer{} + rb.init(f, src) + for { + // Load segment into reorder buffer. + rb.setFlusher(dst[nDst:], flushTransform) + end := decomposeSegment(&rb, nSrc, atEOF) + if end < 0 { + return nDst, nSrc, errs[-end] + } + nDst = len(dst) - len(rb.out) + nSrc = end + + // Next quickSpan. + end = rb.nsrc + eof := atEOF + if n := nSrc + len(dst) - nDst; n < end { + err = transform.ErrShortDst + end = n + eof = false + } + end, ok := rb.f.quickSpan(rb.src, nSrc, end, eof) + n := copy(dst[nDst:], rb.src.bytes[nSrc:end]) + nSrc += n + nDst += n + if ok { + if n < rb.nsrc && !atEOF { + err = transform.ErrShortSrc + } + return nDst, nSrc, err + } + } +} diff --git a/vendor/golang.org/x/text/unicode/norm/trie.go b/vendor/golang.org/x/text/unicode/norm/trie.go new file mode 100644 index 00000000000..423386bf436 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/norm/trie.go @@ -0,0 +1,54 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package norm + +type valueRange struct { + value uint16 // header: value:stride + lo, hi byte // header: lo:n +} + +type sparseBlocks struct { + values []valueRange + offset []uint16 +} + +var nfcSparse = sparseBlocks{ + values: nfcSparseValues[:], + offset: nfcSparseOffset[:], +} + +var nfkcSparse = sparseBlocks{ + values: nfkcSparseValues[:], + offset: nfkcSparseOffset[:], +} + +var ( + nfcData = newNfcTrie(0) + nfkcData = newNfkcTrie(0) +) + +// lookupValue determines the type of block n and looks up the value for b. +// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block +// is a list of ranges with an accompanying value. Given a matching range r, +// the value for b is by r.value + (b - r.lo) * stride. +func (t *sparseBlocks) lookup(n uint32, b byte) uint16 { + offset := t.offset[n] + header := t.values[offset] + lo := offset + 1 + hi := lo + uint16(header.lo) + for lo < hi { + m := lo + (hi-lo)/2 + r := t.values[m] + if r.lo <= b && b <= r.hi { + return r.value + uint16(b-r.lo)*header.value + } + if b < r.lo { + hi = m + } else { + lo = m + 1 + } + } + return 0 +} diff --git a/vendor/google.golang.org/api/LICENSE b/vendor/google.golang.org/api/LICENSE new file mode 100644 index 00000000000..263aa7a0c12 --- /dev/null +++ b/vendor/google.golang.org/api/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/api/gensupport/backoff.go b/vendor/google.golang.org/api/gensupport/backoff.go new file mode 100644 index 00000000000..1356140472a --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/backoff.go @@ -0,0 +1,46 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "math/rand" + "time" +) + +type BackoffStrategy interface { + // Pause returns the duration of the next pause and true if the operation should be + // retried, or false if no further retries should be attempted. + Pause() (time.Duration, bool) + + // Reset restores the strategy to its initial state. + Reset() +} + +// ExponentialBackoff performs exponential backoff as per https://en.wikipedia.org/wiki/Exponential_backoff. +// The initial pause time is given by Base. +// Once the total pause time exceeds Max, Pause will indicate no further retries. +type ExponentialBackoff struct { + Base time.Duration + Max time.Duration + total time.Duration + n uint +} + +func (eb *ExponentialBackoff) Pause() (time.Duration, bool) { + if eb.total > eb.Max { + return 0, false + } + + // The next pause is selected from randomly from [0, 2^n * Base). + d := time.Duration(rand.Int63n((1 << eb.n) * int64(eb.Base))) + eb.total += d + eb.n++ + return d, true +} + +func (eb *ExponentialBackoff) Reset() { + eb.n = 0 + eb.total = 0 +} diff --git a/vendor/google.golang.org/api/gensupport/buffer.go b/vendor/google.golang.org/api/gensupport/buffer.go new file mode 100644 index 00000000000..99210491153 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/buffer.go @@ -0,0 +1,77 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "bytes" + "io" + + "google.golang.org/api/googleapi" +) + +// MediaBuffer buffers data from an io.Reader to support uploading media in retryable chunks. +type MediaBuffer struct { + media io.Reader + + chunk []byte // The current chunk which is pending upload. The capacity is the chunk size. + err error // Any error generated when populating chunk by reading media. + + // The absolute position of chunk in the underlying media. + off int64 +} + +func NewMediaBuffer(media io.Reader, chunkSize int) *MediaBuffer { + return &MediaBuffer{media: media, chunk: make([]byte, 0, chunkSize)} +} + +// Chunk returns the current buffered chunk, the offset in the underlying media +// from which the chunk is drawn, and the size of the chunk. +// Successive calls to Chunk return the same chunk between calls to Next. +func (mb *MediaBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) { + // There may already be data in chunk if Next has not been called since the previous call to Chunk. + if mb.err == nil && len(mb.chunk) == 0 { + mb.err = mb.loadChunk() + } + return bytes.NewReader(mb.chunk), mb.off, len(mb.chunk), mb.err +} + +// loadChunk will read from media into chunk, up to the capacity of chunk. +func (mb *MediaBuffer) loadChunk() error { + bufSize := cap(mb.chunk) + mb.chunk = mb.chunk[:bufSize] + + read := 0 + var err error + for err == nil && read < bufSize { + var n int + n, err = mb.media.Read(mb.chunk[read:]) + read += n + } + mb.chunk = mb.chunk[:read] + return err +} + +// Next advances to the next chunk, which will be returned by the next call to Chunk. +// Calls to Next without a corresponding prior call to Chunk will have no effect. +func (mb *MediaBuffer) Next() { + mb.off += int64(len(mb.chunk)) + mb.chunk = mb.chunk[0:0] +} + +type readerTyper struct { + io.Reader + googleapi.ContentTyper +} + +// ReaderAtToReader adapts a ReaderAt to be used as a Reader. +// If ra implements googleapi.ContentTyper, then the returned reader +// will also implement googleapi.ContentTyper, delegating to ra. +func ReaderAtToReader(ra io.ReaderAt, size int64) io.Reader { + r := io.NewSectionReader(ra, 0, size) + if typer, ok := ra.(googleapi.ContentTyper); ok { + return readerTyper{r, typer} + } + return r +} diff --git a/vendor/google.golang.org/api/gensupport/doc.go b/vendor/google.golang.org/api/gensupport/doc.go new file mode 100644 index 00000000000..752c4b411b2 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/doc.go @@ -0,0 +1,10 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gensupport is an internal implementation detail used by code +// generated by the google-api-go-generator tool. +// +// This package may be modified at any time without regard for backwards +// compatibility. It should not be used directly by API users. +package gensupport diff --git a/vendor/google.golang.org/api/gensupport/header.go b/vendor/google.golang.org/api/gensupport/header.go new file mode 100644 index 00000000000..cb5e67c77a2 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/header.go @@ -0,0 +1,22 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "fmt" + "runtime" + "strings" +) + +// GoogleClientHeader returns the value to use for the x-goog-api-client +// header, which is used internally by Google. +func GoogleClientHeader(generatorVersion, clientElement string) string { + elts := []string{"gl-go/" + strings.Replace(runtime.Version(), " ", "_", -1)} + if clientElement != "" { + elts = append(elts, clientElement) + } + elts = append(elts, fmt.Sprintf("gdcl/%s", generatorVersion)) + return strings.Join(elts, " ") +} diff --git a/vendor/google.golang.org/api/gensupport/json.go b/vendor/google.golang.org/api/gensupport/json.go new file mode 100644 index 00000000000..c01e32189f4 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/json.go @@ -0,0 +1,211 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" +) + +// MarshalJSON returns a JSON encoding of schema containing only selected fields. +// A field is selected if any of the following is true: +// * it has a non-empty value +// * its field name is present in forceSendFields and it is not a nil pointer or nil interface +// * its field name is present in nullFields. +// The JSON key for each selected field is taken from the field's json: struct tag. +func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]byte, error) { + if len(forceSendFields) == 0 && len(nullFields) == 0 { + return json.Marshal(schema) + } + + mustInclude := make(map[string]bool) + for _, f := range forceSendFields { + mustInclude[f] = true + } + useNull := make(map[string]bool) + useNullMaps := make(map[string]map[string]bool) + for _, nf := range nullFields { + parts := strings.SplitN(nf, ".", 2) + field := parts[0] + if len(parts) == 1 { + useNull[field] = true + } else { + if useNullMaps[field] == nil { + useNullMaps[field] = map[string]bool{} + } + useNullMaps[field][parts[1]] = true + } + } + + dataMap, err := schemaToMap(schema, mustInclude, useNull, useNullMaps) + if err != nil { + return nil, err + } + return json.Marshal(dataMap) +} + +func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNullMaps map[string]map[string]bool) (map[string]interface{}, error) { + m := make(map[string]interface{}) + s := reflect.ValueOf(schema) + st := s.Type() + + for i := 0; i < s.NumField(); i++ { + jsonTag := st.Field(i).Tag.Get("json") + if jsonTag == "" { + continue + } + tag, err := parseJSONTag(jsonTag) + if err != nil { + return nil, err + } + if tag.ignore { + continue + } + + v := s.Field(i) + f := st.Field(i) + + if useNull[f.Name] { + if !isEmptyValue(v) { + return nil, fmt.Errorf("field %q in NullFields has non-empty value", f.Name) + } + m[tag.apiName] = nil + continue + } + + if !includeField(v, f, mustInclude) { + continue + } + + // If map fields are explicitly set to null, use a map[string]interface{}. + if f.Type.Kind() == reflect.Map && useNullMaps[f.Name] != nil { + ms, ok := v.Interface().(map[string]string) + if !ok { + return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]string", f.Name) + } + mi := map[string]interface{}{} + for k, v := range ms { + mi[k] = v + } + for k := range useNullMaps[f.Name] { + mi[k] = nil + } + m[tag.apiName] = mi + continue + } + + // nil maps are treated as empty maps. + if f.Type.Kind() == reflect.Map && v.IsNil() { + m[tag.apiName] = map[string]string{} + continue + } + + // nil slices are treated as empty slices. + if f.Type.Kind() == reflect.Slice && v.IsNil() { + m[tag.apiName] = []bool{} + continue + } + + if tag.stringFormat { + m[tag.apiName] = formatAsString(v, f.Type.Kind()) + } else { + m[tag.apiName] = v.Interface() + } + } + return m, nil +} + +// formatAsString returns a string representation of v, dereferencing it first if possible. +func formatAsString(v reflect.Value, kind reflect.Kind) string { + if kind == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + + return fmt.Sprintf("%v", v.Interface()) +} + +// jsonTag represents a restricted version of the struct tag format used by encoding/json. +// It is used to describe the JSON encoding of fields in a Schema struct. +type jsonTag struct { + apiName string + stringFormat bool + ignore bool +} + +// parseJSONTag parses a restricted version of the struct tag format used by encoding/json. +// The format of the tag must match that generated by the Schema.writeSchemaStruct method +// in the api generator. +func parseJSONTag(val string) (jsonTag, error) { + if val == "-" { + return jsonTag{ignore: true}, nil + } + + var tag jsonTag + + i := strings.Index(val, ",") + if i == -1 || val[:i] == "" { + return tag, fmt.Errorf("malformed json tag: %s", val) + } + + tag = jsonTag{ + apiName: val[:i], + } + + switch val[i+1:] { + case "omitempty": + case "omitempty,string": + tag.stringFormat = true + default: + return tag, fmt.Errorf("malformed json tag: %s", val) + } + + return tag, nil +} + +// Reports whether the struct field "f" with value "v" should be included in JSON output. +func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]bool) bool { + // The regular JSON encoding of a nil pointer is "null", which means "delete this field". + // Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set. + // However, many fields are not pointers, so there would be no way to delete these fields. + // Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields. + // Deletion will be handled by a separate mechanism. + if f.Type.Kind() == reflect.Ptr && v.IsNil() { + return false + } + + // The "any" type is represented as an interface{}. If this interface + // is nil, there is no reasonable representation to send. We ignore + // these fields, for the same reasons as given above for pointers. + if f.Type.Kind() == reflect.Interface && v.IsNil() { + return false + } + + return mustInclude[f.Name] || !isEmptyValue(v) +} + +// isEmptyValue reports whether v is the empty value for its type. This +// implementation is based on that of the encoding/json package, but its +// correctness does not depend on it being identical. What's important is that +// this function return false in situations where v should not be sent as part +// of a PATCH operation. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/vendor/google.golang.org/api/gensupport/jsonfloat.go b/vendor/google.golang.org/api/gensupport/jsonfloat.go new file mode 100644 index 00000000000..cb02335d22d --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/jsonfloat.go @@ -0,0 +1,57 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gensupport + +import ( + "encoding/json" + "errors" + "fmt" + "math" +) + +// JSONFloat64 is a float64 that supports proper unmarshaling of special float +// values in JSON, according to +// https://developers.google.com/protocol-buffers/docs/proto3#json. Although +// that is a proto-to-JSON spec, it applies to all Google APIs. +// +// The jsonpb package +// (https://github.com/golang/protobuf/blob/master/jsonpb/jsonpb.go) has +// similar functionality, but only for direct translation from proto messages +// to JSON. +type JSONFloat64 float64 + +func (f *JSONFloat64) UnmarshalJSON(data []byte) error { + var ff float64 + if err := json.Unmarshal(data, &ff); err == nil { + *f = JSONFloat64(ff) + return nil + } + var s string + if err := json.Unmarshal(data, &s); err == nil { + switch s { + case "NaN": + ff = math.NaN() + case "Infinity": + ff = math.Inf(1) + case "-Infinity": + ff = math.Inf(-1) + default: + return fmt.Errorf("google.golang.org/api/internal: bad float string %q", s) + } + *f = JSONFloat64(ff) + return nil + } + return errors.New("google.golang.org/api/internal: data not float or string") +} diff --git a/vendor/google.golang.org/api/gensupport/media.go b/vendor/google.golang.org/api/gensupport/media.go new file mode 100644 index 00000000000..f3e77fc5297 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/media.go @@ -0,0 +1,299 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/textproto" + + "google.golang.org/api/googleapi" +) + +const sniffBuffSize = 512 + +func newContentSniffer(r io.Reader) *contentSniffer { + return &contentSniffer{r: r} +} + +// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. +type contentSniffer struct { + r io.Reader + start []byte // buffer for the sniffed bytes. + err error // set to any error encountered while reading bytes to be sniffed. + + ctype string // set on first sniff. + sniffed bool // set to true on first sniff. +} + +func (cs *contentSniffer) Read(p []byte) (n int, err error) { + // Ensure that the content type is sniffed before any data is consumed from Reader. + _, _ = cs.ContentType() + + if len(cs.start) > 0 { + n := copy(p, cs.start) + cs.start = cs.start[n:] + return n, nil + } + + // We may have read some bytes into start while sniffing, even if the read ended in an error. + // We should first return those bytes, then the error. + if cs.err != nil { + return 0, cs.err + } + + // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. + return cs.r.Read(p) +} + +// ContentType returns the sniffed content type, and whether the content type was succesfully sniffed. +func (cs *contentSniffer) ContentType() (string, bool) { + if cs.sniffed { + return cs.ctype, cs.ctype != "" + } + cs.sniffed = true + // If ReadAll hits EOF, it returns err==nil. + cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) + + // Don't try to detect the content type based on possibly incomplete data. + if cs.err != nil { + return "", false + } + + cs.ctype = http.DetectContentType(cs.start) + return cs.ctype, true +} + +// DetermineContentType determines the content type of the supplied reader. +// If the content type is already known, it can be specified via ctype. +// Otherwise, the content of media will be sniffed to determine the content type. +// If media implements googleapi.ContentTyper (deprecated), this will be used +// instead of sniffing the content. +// After calling DetectContentType the caller must not perform further reads on +// media, but rather read from the Reader that is returned. +func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) { + // Note: callers could avoid calling DetectContentType if ctype != "", + // but doing the check inside this function reduces the amount of + // generated code. + if ctype != "" { + return media, ctype + } + + // For backwards compatability, allow clients to set content + // type by providing a ContentTyper for media. + if typer, ok := media.(googleapi.ContentTyper); ok { + return media, typer.ContentType() + } + + sniffer := newContentSniffer(media) + if ctype, ok := sniffer.ContentType(); ok { + return sniffer, ctype + } + // If content type could not be sniffed, reads from sniffer will eventually fail with an error. + return sniffer, "" +} + +type typeReader struct { + io.Reader + typ string +} + +// multipartReader combines the contents of multiple readers to creat a multipart/related HTTP body. +// Close must be called if reads from the multipartReader are abandoned before reaching EOF. +type multipartReader struct { + pr *io.PipeReader + pipeOpen bool + ctype string +} + +func newMultipartReader(parts []typeReader) *multipartReader { + mp := &multipartReader{pipeOpen: true} + var pw *io.PipeWriter + mp.pr, pw = io.Pipe() + mpw := multipart.NewWriter(pw) + mp.ctype = "multipart/related; boundary=" + mpw.Boundary() + go func() { + for _, part := range parts { + w, err := mpw.CreatePart(typeHeader(part.typ)) + if err != nil { + mpw.Close() + pw.CloseWithError(fmt.Errorf("googleapi: CreatePart failed: %v", err)) + return + } + _, err = io.Copy(w, part.Reader) + if err != nil { + mpw.Close() + pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err)) + return + } + } + + mpw.Close() + pw.Close() + }() + return mp +} + +func (mp *multipartReader) Read(data []byte) (n int, err error) { + return mp.pr.Read(data) +} + +func (mp *multipartReader) Close() error { + if !mp.pipeOpen { + return nil + } + mp.pipeOpen = false + return mp.pr.Close() +} + +// CombineBodyMedia combines a json body with media content to create a multipart/related HTTP body. +// It returns a ReadCloser containing the combined body, and the overall "multipart/related" content type, with random boundary. +// +// The caller must call Close on the returned ReadCloser if reads are abandoned before reaching EOF. +func CombineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType string) (io.ReadCloser, string) { + mp := newMultipartReader([]typeReader{ + {body, bodyContentType}, + {media, mediaContentType}, + }) + return mp, mp.ctype +} + +func typeHeader(contentType string) textproto.MIMEHeader { + h := make(textproto.MIMEHeader) + if contentType != "" { + h.Set("Content-Type", contentType) + } + return h +} + +// PrepareUpload determines whether the data in the supplied reader should be +// uploaded in a single request, or in sequential chunks. +// chunkSize is the size of the chunk that media should be split into. +// +// If chunkSize is zero, media is returned as the first value, and the other +// two return values are nil, true. +// +// Otherwise, a MediaBuffer is returned, along with a bool indicating whether the +// contents of media fit in a single chunk. +// +// After PrepareUpload has been called, media should no longer be used: the +// media content should be accessed via one of the return values. +func PrepareUpload(media io.Reader, chunkSize int) (r io.Reader, mb *MediaBuffer, singleChunk bool) { + if chunkSize == 0 { // do not chunk + return media, nil, true + } + mb = NewMediaBuffer(media, chunkSize) + _, _, _, err := mb.Chunk() + // If err is io.EOF, we can upload this in a single request. Otherwise, err is + // either nil or a non-EOF error. If it is the latter, then the next call to + // mb.Chunk will return the same error. Returning a MediaBuffer ensures that this + // error will be handled at some point. + return nil, mb, err == io.EOF +} + +// MediaInfo holds information for media uploads. It is intended for use by generated +// code only. +type MediaInfo struct { + // At most one of Media and MediaBuffer will be set. + media io.Reader + buffer *MediaBuffer + singleChunk bool + mType string + size int64 // mediaSize, if known. Used only for calls to progressUpdater_. + progressUpdater googleapi.ProgressUpdater +} + +// NewInfoFromMedia should be invoked from the Media method of a call. It returns a +// MediaInfo populated with chunk size and content type, and a reader or MediaBuffer +// if needed. +func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo { + mi := &MediaInfo{} + opts := googleapi.ProcessMediaOptions(options) + if !opts.ForceEmptyContentType { + r, mi.mType = DetermineContentType(r, opts.ContentType) + } + mi.media, mi.buffer, mi.singleChunk = PrepareUpload(r, opts.ChunkSize) + return mi +} + +// NewInfoFromResumableMedia should be invoked from the ResumableMedia method of a +// call. It returns a MediaInfo using the given reader, size and media type. +func NewInfoFromResumableMedia(r io.ReaderAt, size int64, mediaType string) *MediaInfo { + rdr := ReaderAtToReader(r, size) + rdr, mType := DetermineContentType(rdr, mediaType) + return &MediaInfo{ + size: size, + mType: mType, + buffer: NewMediaBuffer(rdr, googleapi.DefaultUploadChunkSize), + media: nil, + singleChunk: false, + } +} + +func (mi *MediaInfo) SetProgressUpdater(pu googleapi.ProgressUpdater) { + if mi != nil { + mi.progressUpdater = pu + } +} + +// UploadType determines the type of upload: a single request, or a resumable +// series of requests. +func (mi *MediaInfo) UploadType() string { + if mi.singleChunk { + return "multipart" + } + return "resumable" +} + +// UploadRequest sets up an HTTP request for media upload. It adds headers +// as necessary, and returns a replacement for the body. +func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newBody io.Reader, cleanup func()) { + cleanup = func() {} + if mi == nil { + return body, cleanup + } + var media io.Reader + if mi.media != nil { + // This only happens when the caller has turned off chunking. In that + // case, we write all of media in a single non-retryable request. + media = mi.media + } else if mi.singleChunk { + // The data fits in a single chunk, which has now been read into the MediaBuffer. + // We obtain that chunk so we can write it in a single request. The request can + // be retried because the data is stored in the MediaBuffer. + media, _, _, _ = mi.buffer.Chunk() + } + if media != nil { + combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType) + cleanup = func() { combined.Close() } + reqHeaders.Set("Content-Type", ctype) + body = combined + } + if mi.buffer != nil && mi.mType != "" && !mi.singleChunk { + reqHeaders.Set("X-Upload-Content-Type", mi.mType) + } + return body, cleanup +} + +// ResumableUpload returns an appropriately configured ResumableUpload value if the +// upload is resumable, or nil otherwise. +func (mi *MediaInfo) ResumableUpload(locURI string) *ResumableUpload { + if mi == nil || mi.singleChunk { + return nil + } + return &ResumableUpload{ + URI: locURI, + Media: mi.buffer, + MediaType: mi.mType, + Callback: func(curr int64) { + if mi.progressUpdater != nil { + mi.progressUpdater(curr, mi.size) + } + }, + } +} diff --git a/vendor/google.golang.org/api/gensupport/params.go b/vendor/google.golang.org/api/gensupport/params.go new file mode 100644 index 00000000000..3b3c743967e --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/params.go @@ -0,0 +1,50 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "net/url" + + "google.golang.org/api/googleapi" +) + +// URLParams is a simplified replacement for url.Values +// that safely builds up URL parameters for encoding. +type URLParams map[string][]string + +// Get returns the first value for the given key, or "". +func (u URLParams) Get(key string) string { + vs := u[key] + if len(vs) == 0 { + return "" + } + return vs[0] +} + +// Set sets the key to value. +// It replaces any existing values. +func (u URLParams) Set(key, value string) { + u[key] = []string{value} +} + +// SetMulti sets the key to an array of values. +// It replaces any existing values. +// Note that values must not be modified after calling SetMulti +// so the caller is responsible for making a copy if necessary. +func (u URLParams) SetMulti(key string, values []string) { + u[key] = values +} + +// Encode encodes the values into ``URL encoded'' form +// ("bar=baz&foo=quux") sorted by key. +func (u URLParams) Encode() string { + return url.Values(u).Encode() +} + +func SetOptions(u URLParams, opts ...googleapi.CallOption) { + for _, o := range opts { + u.Set(o.Get()) + } +} diff --git a/vendor/google.golang.org/api/gensupport/resumable.go b/vendor/google.golang.org/api/gensupport/resumable.go new file mode 100644 index 00000000000..dcd591f7ff6 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/resumable.go @@ -0,0 +1,217 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "errors" + "fmt" + "io" + "net/http" + "sync" + "time" + + "golang.org/x/net/context" +) + +const ( + // statusTooManyRequests is returned by the storage API if the + // per-project limits have been temporarily exceeded. The request + // should be retried. + // https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes + statusTooManyRequests = 429 +) + +// ResumableUpload is used by the generated APIs to provide resumable uploads. +// It is not used by developers directly. +type ResumableUpload struct { + Client *http.Client + // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable". + URI string + UserAgent string // User-Agent for header of the request + // Media is the object being uploaded. + Media *MediaBuffer + // MediaType defines the media type, e.g. "image/jpeg". + MediaType string + + mu sync.Mutex // guards progress + progress int64 // number of bytes uploaded so far + + // Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded. + Callback func(int64) + + // If not specified, a default exponential backoff strategy will be used. + Backoff BackoffStrategy +} + +// Progress returns the number of bytes uploaded at this point. +func (rx *ResumableUpload) Progress() int64 { + rx.mu.Lock() + defer rx.mu.Unlock() + return rx.progress +} + +// doUploadRequest performs a single HTTP request to upload data. +// off specifies the offset in rx.Media from which data is drawn. +// size is the number of bytes in data. +// final specifies whether data is the final chunk to be uploaded. +func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, off, size int64, final bool) (*http.Response, error) { + req, err := http.NewRequest("POST", rx.URI, data) + if err != nil { + return nil, err + } + + req.ContentLength = size + var contentRange string + if final { + if size == 0 { + contentRange = fmt.Sprintf("bytes */%v", off) + } else { + contentRange = fmt.Sprintf("bytes %v-%v/%v", off, off+size-1, off+size) + } + } else { + contentRange = fmt.Sprintf("bytes %v-%v/*", off, off+size-1) + } + req.Header.Set("Content-Range", contentRange) + req.Header.Set("Content-Type", rx.MediaType) + req.Header.Set("User-Agent", rx.UserAgent) + + // Google's upload endpoint uses status code 308 for a + // different purpose than the "308 Permanent Redirect" + // since-standardized in RFC 7238. Because of the conflict in + // semantics, Google added this new request header which + // causes it to not use "308" and instead reply with 200 OK + // and sets the upload-specific "X-HTTP-Status-Code-Override: + // 308" response header. + req.Header.Set("X-GUploader-No-308", "yes") + + return SendRequest(ctx, rx.Client, req) +} + +func statusResumeIncomplete(resp *http.Response) bool { + // This is how the server signals "status resume incomplete" + // when X-GUploader-No-308 is set to "yes": + return resp != nil && resp.Header.Get("X-Http-Status-Code-Override") == "308" +} + +// reportProgress calls a user-supplied callback to report upload progress. +// If old==updated, the callback is not called. +func (rx *ResumableUpload) reportProgress(old, updated int64) { + if updated-old == 0 { + return + } + rx.mu.Lock() + rx.progress = updated + rx.mu.Unlock() + if rx.Callback != nil { + rx.Callback(updated) + } +} + +// transferChunk performs a single HTTP request to upload a single chunk from rx.Media. +func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, error) { + chunk, off, size, err := rx.Media.Chunk() + + done := err == io.EOF + if !done && err != nil { + return nil, err + } + + res, err := rx.doUploadRequest(ctx, chunk, off, int64(size), done) + if err != nil { + return res, err + } + + // We sent "X-GUploader-No-308: yes" (see comment elsewhere in + // this file), so we don't expect to get a 308. + if res.StatusCode == 308 { + return nil, errors.New("unexpected 308 response status code") + } + + if res.StatusCode == http.StatusOK { + rx.reportProgress(off, off+int64(size)) + } + + if statusResumeIncomplete(res) { + rx.Media.Next() + } + return res, nil +} + +func contextDone(ctx context.Context) bool { + select { + case <-ctx.Done(): + return true + default: + return false + } +} + +// Upload starts the process of a resumable upload with a cancellable context. +// It retries using the provided back off strategy until cancelled or the +// strategy indicates to stop retrying. +// It is called from the auto-generated API code and is not visible to the user. +// Before sending an HTTP request, Upload calls any registered hook functions, +// and calls the returned functions after the request returns (see send.go). +// rx is private to the auto-generated API code. +// Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. +func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { + var pause time.Duration + backoff := rx.Backoff + if backoff == nil { + backoff = DefaultBackoffStrategy() + } + + for { + // Ensure that we return in the case of cancelled context, even if pause is 0. + if contextDone(ctx) { + return nil, ctx.Err() + } + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(pause): + } + + resp, err = rx.transferChunk(ctx) + + var status int + if resp != nil { + status = resp.StatusCode + } + + // Check if we should retry the request. + if shouldRetry(status, err) { + var retry bool + pause, retry = backoff.Pause() + if retry { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + continue + } + } + + // If the chunk was uploaded successfully, but there's still + // more to go, upload the next chunk without any delay. + if statusResumeIncomplete(resp) { + pause = 0 + backoff.Reset() + resp.Body.Close() + continue + } + + // It's possible for err and resp to both be non-nil here, but we expose a simpler + // contract to our callers: exactly one of resp and err will be non-nil. This means + // that any response body must be closed here before returning a non-nil error. + if err != nil { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + return nil, err + } + + return resp, nil + } +} diff --git a/vendor/google.golang.org/api/gensupport/retry.go b/vendor/google.golang.org/api/gensupport/retry.go new file mode 100644 index 00000000000..c60b3c394b3 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/retry.go @@ -0,0 +1,85 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gensupport + +import ( + "io" + "net" + "net/http" + "time" + + "golang.org/x/net/context" +) + +// Retry invokes the given function, retrying it multiple times if the connection failed or +// the HTTP status response indicates the request should be attempted again. ctx may be nil. +func Retry(ctx context.Context, f func() (*http.Response, error), backoff BackoffStrategy) (*http.Response, error) { + for { + resp, err := f() + + var status int + if resp != nil { + status = resp.StatusCode + } + + // Return if we shouldn't retry. + pause, retry := backoff.Pause() + if !shouldRetry(status, err) || !retry { + return resp, err + } + + // Ensure the response body is closed, if any. + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + + // Pause, but still listen to ctx.Done if context is not nil. + var done <-chan struct{} + if ctx != nil { + done = ctx.Done() + } + select { + case <-done: + return nil, ctx.Err() + case <-time.After(pause): + } + } +} + +// DefaultBackoffStrategy returns a default strategy to use for retrying failed upload requests. +func DefaultBackoffStrategy() BackoffStrategy { + return &ExponentialBackoff{ + Base: 250 * time.Millisecond, + Max: 16 * time.Second, + } +} + +// shouldRetry returns true if the HTTP response / error indicates that the +// request should be attempted again. +func shouldRetry(status int, err error) bool { + if 500 <= status && status <= 599 { + return true + } + if status == statusTooManyRequests { + return true + } + if err == io.ErrUnexpectedEOF { + return true + } + if err, ok := err.(net.Error); ok { + return err.Temporary() + } + return false +} diff --git a/vendor/google.golang.org/api/gensupport/send.go b/vendor/google.golang.org/api/gensupport/send.go new file mode 100644 index 00000000000..092044f448c --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/send.go @@ -0,0 +1,61 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "errors" + "net/http" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +// Hook is the type of a function that is called once before each HTTP request +// that is sent by a generated API. It returns a function that is called after +// the request returns. +// Hooks are not called if the context is nil. +type Hook func(ctx context.Context, req *http.Request) func(resp *http.Response) + +var hooks []Hook + +// RegisterHook registers a Hook to be called before each HTTP request by a +// generated API. Hooks are called in the order they are registered. Each +// hook can return a function; if it is non-nil, it is called after the HTTP +// request returns. These functions are called in the reverse order. +// RegisterHook should not be called concurrently with itself or SendRequest. +func RegisterHook(h Hook) { + hooks = append(hooks, h) +} + +// SendRequest sends a single HTTP request using the given client. +// If ctx is non-nil, it calls all hooks, then sends the request with +// ctxhttp.Do, then calls any functions returned by the hooks in reverse order. +func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + // Disallow Accept-Encoding because it interferes with the automatic gzip handling + // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. + if _, ok := req.Header["Accept-Encoding"]; ok { + return nil, errors.New("google api: custom Accept-Encoding headers not allowed") + } + if ctx == nil { + return client.Do(req) + } + // Call hooks in order of registration, store returned funcs. + post := make([]func(resp *http.Response), len(hooks)) + for i, h := range hooks { + fn := h(ctx, req) + post[i] = fn + } + + // Send request. + resp, err := ctxhttp.Do(ctx, client, req) + + // Call returned funcs in reverse order. + for i := len(post) - 1; i >= 0; i-- { + if fn := post[i]; fn != nil { + fn(resp) + } + } + return resp, err +} diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go new file mode 100644 index 00000000000..f6e15be35da --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -0,0 +1,406 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package googleapi contains the common code shared by all Google API +// libraries. +package googleapi // import "google.golang.org/api/googleapi" + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "google.golang.org/api/googleapi/internal/uritemplates" +) + +// ContentTyper is an interface for Readers which know (or would like +// to override) their Content-Type. If a media body doesn't implement +// ContentTyper, the type is sniffed from the content using +// http.DetectContentType. +type ContentTyper interface { + ContentType() string +} + +// A SizeReaderAt is a ReaderAt with a Size method. +// An io.SectionReader implements SizeReaderAt. +type SizeReaderAt interface { + io.ReaderAt + Size() int64 +} + +// ServerResponse is embedded in each Do response and +// provides the HTTP status code and header sent by the server. +type ServerResponse struct { + // HTTPStatusCode is the server's response status code. + // When using a resource method's Do call, this will always be in the 2xx range. + HTTPStatusCode int + // Header contains the response header fields from the server. + Header http.Header +} + +const ( + Version = "0.5" + + // UserAgent is the header string used to identify this package. + UserAgent = "google-api-go-client/" + Version + + // The default chunk size to use for resumable uploads if not specified by the user. + DefaultUploadChunkSize = 8 * 1024 * 1024 + + // The minimum chunk size that can be used for resumable uploads. All + // user-specified chunk sizes must be multiple of this value. + MinUploadChunkSize = 256 * 1024 +) + +// Error contains an error response from the server. +type Error struct { + // Code is the HTTP response status code and will always be populated. + Code int `json:"code"` + // Message is the server response message and is only populated when + // explicitly referenced by the JSON server response. + Message string `json:"message"` + // Body is the raw response returned by the server. + // It is often but not always JSON, depending on how the request fails. + Body string + // Header contains the response header fields from the server. + Header http.Header + + Errors []ErrorItem +} + +// ErrorItem is a detailed error code & message from the Google API frontend. +type ErrorItem struct { + // Reason is the typed error code. For example: "some_example". + Reason string `json:"reason"` + // Message is the human-readable description of the error. + Message string `json:"message"` +} + +func (e *Error) Error() string { + if len(e.Errors) == 0 && e.Message == "" { + return fmt.Sprintf("googleapi: got HTTP response code %d with body: %v", e.Code, e.Body) + } + var buf bytes.Buffer + fmt.Fprintf(&buf, "googleapi: Error %d: ", e.Code) + if e.Message != "" { + fmt.Fprintf(&buf, "%s", e.Message) + } + if len(e.Errors) == 0 { + return strings.TrimSpace(buf.String()) + } + if len(e.Errors) == 1 && e.Errors[0].Message == e.Message { + fmt.Fprintf(&buf, ", %s", e.Errors[0].Reason) + return buf.String() + } + fmt.Fprintln(&buf, "\nMore details:") + for _, v := range e.Errors { + fmt.Fprintf(&buf, "Reason: %s, Message: %s\n", v.Reason, v.Message) + } + return buf.String() +} + +type errorReply struct { + Error *Error `json:"error"` +} + +// CheckResponse returns an error (of type *Error) if the response +// status code is not 2xx. +func CheckResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + slurp, err := ioutil.ReadAll(res.Body) + if err == nil { + jerr := new(errorReply) + err = json.Unmarshal(slurp, jerr) + if err == nil && jerr.Error != nil { + if jerr.Error.Code == 0 { + jerr.Error.Code = res.StatusCode + } + jerr.Error.Body = string(slurp) + return jerr.Error + } + } + return &Error{ + Code: res.StatusCode, + Body: string(slurp), + Header: res.Header, + } +} + +// IsNotModified reports whether err is the result of the +// server replying with http.StatusNotModified. +// Such error values are sometimes returned by "Do" methods +// on calls when If-None-Match is used. +func IsNotModified(err error) bool { + if err == nil { + return false + } + ae, ok := err.(*Error) + return ok && ae.Code == http.StatusNotModified +} + +// CheckMediaResponse returns an error (of type *Error) if the response +// status code is not 2xx. Unlike CheckResponse it does not assume the +// body is a JSON error document. +// It is the caller's responsibility to close res.Body. +func CheckMediaResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) + return &Error{ + Code: res.StatusCode, + Body: string(slurp), + } +} + +type MarshalStyle bool + +var WithDataWrapper = MarshalStyle(true) +var WithoutDataWrapper = MarshalStyle(false) + +func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { + buf := new(bytes.Buffer) + if wrap { + buf.Write([]byte(`{"data": `)) + } + err := json.NewEncoder(buf).Encode(v) + if err != nil { + return nil, err + } + if wrap { + buf.Write([]byte(`}`)) + } + return buf, nil +} + +// endingWithErrorReader from r until it returns an error. If the +// final error from r is io.EOF and e is non-nil, e is used instead. +type endingWithErrorReader struct { + r io.Reader + e error +} + +func (er endingWithErrorReader) Read(p []byte) (n int, err error) { + n, err = er.r.Read(p) + if err == io.EOF && er.e != nil { + err = er.e + } + return +} + +// countingWriter counts the number of bytes it receives to write, but +// discards them. +type countingWriter struct { + n *int64 +} + +func (w countingWriter) Write(p []byte) (int, error) { + *w.n += int64(len(p)) + return len(p), nil +} + +// ProgressUpdater is a function that is called upon every progress update of a resumable upload. +// This is the only part of a resumable upload (from googleapi) that is usable by the developer. +// The remaining usable pieces of resumable uploads is exposed in each auto-generated API. +type ProgressUpdater func(current, total int64) + +type MediaOption interface { + setOptions(o *MediaOptions) +} + +type contentTypeOption string + +func (ct contentTypeOption) setOptions(o *MediaOptions) { + o.ContentType = string(ct) + if o.ContentType == "" { + o.ForceEmptyContentType = true + } +} + +// ContentType returns a MediaOption which sets the Content-Type header for media uploads. +// If ctype is empty, the Content-Type header will be omitted. +func ContentType(ctype string) MediaOption { + return contentTypeOption(ctype) +} + +type chunkSizeOption int + +func (cs chunkSizeOption) setOptions(o *MediaOptions) { + size := int(cs) + if size%MinUploadChunkSize != 0 { + size += MinUploadChunkSize - (size % MinUploadChunkSize) + } + o.ChunkSize = size +} + +// ChunkSize returns a MediaOption which sets the chunk size for media uploads. +// size will be rounded up to the nearest multiple of 256K. +// Media which contains fewer than size bytes will be uploaded in a single request. +// Media which contains size bytes or more will be uploaded in separate chunks. +// If size is zero, media will be uploaded in a single request. +func ChunkSize(size int) MediaOption { + return chunkSizeOption(size) +} + +// MediaOptions stores options for customizing media upload. It is not used by developers directly. +type MediaOptions struct { + ContentType string + ForceEmptyContentType bool + + ChunkSize int +} + +// ProcessMediaOptions stores options from opts in a MediaOptions. +// It is not used by developers directly. +func ProcessMediaOptions(opts []MediaOption) *MediaOptions { + mo := &MediaOptions{ChunkSize: DefaultUploadChunkSize} + for _, o := range opts { + o.setOptions(mo) + } + return mo +} + +func ResolveRelative(basestr, relstr string) string { + u, _ := url.Parse(basestr) + rel, _ := url.Parse(relstr) + u = u.ResolveReference(rel) + us := u.String() + us = strings.Replace(us, "%7B", "{", -1) + us = strings.Replace(us, "%7D", "}", -1) + return us +} + +// Expand subsitutes any {encoded} strings in the URL passed in using +// the map supplied. +// +// This calls SetOpaque to avoid encoding of the parameters in the URL path. +func Expand(u *url.URL, expansions map[string]string) { + escaped, unescaped, err := uritemplates.Expand(u.Path, expansions) + if err == nil { + u.Path = unescaped + u.RawPath = escaped + } +} + +// CloseBody is used to close res.Body. +// Prior to calling Close, it also tries to Read a small amount to see an EOF. +// Not seeing an EOF can prevent HTTP Transports from reusing connections. +func CloseBody(res *http.Response) { + if res == nil || res.Body == nil { + return + } + // Justification for 3 byte reads: two for up to "\r\n" after + // a JSON/XML document, and then 1 to see EOF if we haven't yet. + // TODO(bradfitz): detect Go 1.3+ and skip these reads. + // See https://codereview.appspot.com/58240043 + // and https://codereview.appspot.com/49570044 + buf := make([]byte, 1) + for i := 0; i < 3; i++ { + _, err := res.Body.Read(buf) + if err != nil { + break + } + } + res.Body.Close() + +} + +// VariantType returns the type name of the given variant. +// If the map doesn't contain the named key or the value is not a []interface{}, "" is returned. +// This is used to support "variant" APIs that can return one of a number of different types. +func VariantType(t map[string]interface{}) string { + s, _ := t["type"].(string) + return s +} + +// ConvertVariant uses the JSON encoder/decoder to fill in the struct 'dst' with the fields found in variant 'v'. +// This is used to support "variant" APIs that can return one of a number of different types. +// It reports whether the conversion was successful. +func ConvertVariant(v map[string]interface{}, dst interface{}) bool { + var buf bytes.Buffer + err := json.NewEncoder(&buf).Encode(v) + if err != nil { + return false + } + return json.Unmarshal(buf.Bytes(), dst) == nil +} + +// A Field names a field to be retrieved with a partial response. +// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// +// Partial responses can dramatically reduce the amount of data that must be sent to your application. +// In order to request partial responses, you can specify the full list of fields +// that your application needs by adding the Fields option to your request. +// +// Field strings use camelCase with leading lower-case characters to identify fields within the response. +// +// For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields, +// you could request just those fields like this: +// +// svc.Events.List().Fields("nextPageToken", "items/id").Do() +// +// or if you were also interested in each Item's "Updated" field, you can combine them like this: +// +// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() +// +// More information about field formatting can be found here: +// https://developers.google.com/+/api/#fields-syntax +// +// Another way to find field names is through the Google API explorer: +// https://developers.google.com/apis-explorer/#p/ +type Field string + +// CombineFields combines fields into a single string. +func CombineFields(s []Field) string { + r := make([]string, len(s)) + for i, v := range s { + r[i] = string(v) + } + return strings.Join(r, ",") +} + +// A CallOption is an optional argument to an API call. +// It should be treated as an opaque value by users of Google APIs. +// +// A CallOption is something that configures an API call in a way that is +// not specific to that API; for instance, controlling the quota user for +// an API call is common across many APIs, and is thus a CallOption. +type CallOption interface { + Get() (key, value string) +} + +// QuotaUser returns a CallOption that will set the quota user for a call. +// The quota user can be used by server-side applications to control accounting. +// It can be an arbitrary string up to 40 characters, and will override UserIP +// if both are provided. +func QuotaUser(u string) CallOption { return quotaUser(u) } + +type quotaUser string + +func (q quotaUser) Get() (string, string) { return "quotaUser", string(q) } + +// UserIP returns a CallOption that will set the "userIp" parameter of a call. +// This should be the IP address of the originating request. +func UserIP(ip string) CallOption { return userIP(ip) } + +type userIP string + +func (i userIP) Get() (string, string) { return "userIp", string(i) } + +// Trace returns a CallOption that enables diagnostic tracing for a call. +// traceToken is an ID supplied by Google support. +func Trace(traceToken string) CallOption { return traceTok(traceToken) } + +type traceTok string + +func (t traceTok) Get() (string, string) { return "trace", "token:" + string(t) } + +// TODO: Fields too diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE b/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE new file mode 100644 index 00000000000..de9c88cb65c --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2013 Joshua Tacoma + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go b/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go new file mode 100644 index 00000000000..63bf0538301 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go @@ -0,0 +1,248 @@ +// Copyright 2013 Joshua Tacoma. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uritemplates is a level 3 implementation of RFC 6570 (URI +// Template, http://tools.ietf.org/html/rfc6570). +// uritemplates does not support composite values (in Go: slices or maps) +// and so does not qualify as a level 4 implementation. +package uritemplates + +import ( + "bytes" + "errors" + "regexp" + "strconv" + "strings" +) + +var ( + unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]") + reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]") + validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$") + hex = []byte("0123456789ABCDEF") +) + +func pctEncode(src []byte) []byte { + dst := make([]byte, len(src)*3) + for i, b := range src { + buf := dst[i*3 : i*3+3] + buf[0] = 0x25 + buf[1] = hex[b/16] + buf[2] = hex[b%16] + } + return dst +} + +// pairWriter is a convenience struct which allows escaped and unescaped +// versions of the template to be written in parallel. +type pairWriter struct { + escaped, unescaped bytes.Buffer +} + +// Write writes the provided string directly without any escaping. +func (w *pairWriter) Write(s string) { + w.escaped.WriteString(s) + w.unescaped.WriteString(s) +} + +// Escape writes the provided string, escaping the string for the +// escaped output. +func (w *pairWriter) Escape(s string, allowReserved bool) { + w.unescaped.WriteString(s) + if allowReserved { + w.escaped.Write(reserved.ReplaceAllFunc([]byte(s), pctEncode)) + } else { + w.escaped.Write(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) + } +} + +// Escaped returns the escaped string. +func (w *pairWriter) Escaped() string { + return w.escaped.String() +} + +// Unescaped returns the unescaped string. +func (w *pairWriter) Unescaped() string { + return w.unescaped.String() +} + +// A uriTemplate is a parsed representation of a URI template. +type uriTemplate struct { + raw string + parts []templatePart +} + +// parse parses a URI template string into a uriTemplate object. +func parse(rawTemplate string) (*uriTemplate, error) { + split := strings.Split(rawTemplate, "{") + parts := make([]templatePart, len(split)*2-1) + for i, s := range split { + if i == 0 { + if strings.Contains(s, "}") { + return nil, errors.New("unexpected }") + } + parts[i].raw = s + continue + } + subsplit := strings.Split(s, "}") + if len(subsplit) != 2 { + return nil, errors.New("malformed template") + } + expression := subsplit[0] + var err error + parts[i*2-1], err = parseExpression(expression) + if err != nil { + return nil, err + } + parts[i*2].raw = subsplit[1] + } + return &uriTemplate{ + raw: rawTemplate, + parts: parts, + }, nil +} + +type templatePart struct { + raw string + terms []templateTerm + first string + sep string + named bool + ifemp string + allowReserved bool +} + +type templateTerm struct { + name string + explode bool + truncate int +} + +func parseExpression(expression string) (result templatePart, err error) { + switch expression[0] { + case '+': + result.sep = "," + result.allowReserved = true + expression = expression[1:] + case '.': + result.first = "." + result.sep = "." + expression = expression[1:] + case '/': + result.first = "/" + result.sep = "/" + expression = expression[1:] + case ';': + result.first = ";" + result.sep = ";" + result.named = true + expression = expression[1:] + case '?': + result.first = "?" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '&': + result.first = "&" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '#': + result.first = "#" + result.sep = "," + result.allowReserved = true + expression = expression[1:] + default: + result.sep = "," + } + rawterms := strings.Split(expression, ",") + result.terms = make([]templateTerm, len(rawterms)) + for i, raw := range rawterms { + result.terms[i], err = parseTerm(raw) + if err != nil { + break + } + } + return result, err +} + +func parseTerm(term string) (result templateTerm, err error) { + // TODO(djd): Remove "*" suffix parsing once we check that no APIs have + // mistakenly used that attribute. + if strings.HasSuffix(term, "*") { + result.explode = true + term = term[:len(term)-1] + } + split := strings.Split(term, ":") + if len(split) == 1 { + result.name = term + } else if len(split) == 2 { + result.name = split[0] + var parsed int64 + parsed, err = strconv.ParseInt(split[1], 10, 0) + result.truncate = int(parsed) + } else { + err = errors.New("multiple colons in same term") + } + if !validname.MatchString(result.name) { + err = errors.New("not a valid name: " + result.name) + } + if result.explode && result.truncate > 0 { + err = errors.New("both explode and prefix modifers on same term") + } + return result, err +} + +// Expand expands a URI template with a set of values to produce the +// resultant URI. Two forms of the result are returned: one with all the +// elements escaped, and one with the elements unescaped. +func (t *uriTemplate) Expand(values map[string]string) (escaped, unescaped string) { + var w pairWriter + for _, p := range t.parts { + p.expand(&w, values) + } + return w.Escaped(), w.Unescaped() +} + +func (tp *templatePart) expand(w *pairWriter, values map[string]string) { + if len(tp.raw) > 0 { + w.Write(tp.raw) + return + } + var first = true + for _, term := range tp.terms { + value, exists := values[term.name] + if !exists { + continue + } + if first { + w.Write(tp.first) + first = false + } else { + w.Write(tp.sep) + } + tp.expandString(w, term, value) + } +} + +func (tp *templatePart) expandName(w *pairWriter, name string, empty bool) { + if tp.named { + w.Write(name) + if empty { + w.Write(tp.ifemp) + } else { + w.Write("=") + } + } +} + +func (tp *templatePart) expandString(w *pairWriter, t templateTerm, s string) { + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + tp.expandName(w, t.name, len(s) == 0) + w.Escape(s, tp.allowReserved) +} diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go b/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go new file mode 100644 index 00000000000..2e70b81543d --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go @@ -0,0 +1,17 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uritemplates + +// Expand parses then expands a URI template with a set of values to produce +// the resultant URI. Two forms of the result are returned: one with all the +// elements escaped, and one with the elements unescaped. +func Expand(path string, values map[string]string) (escaped, unescaped string, err error) { + template, err := parse(path) + if err != nil { + return "", "", err + } + escaped, unescaped = template.Expand(values) + return escaped, unescaped, nil +} diff --git a/vendor/google.golang.org/api/googleapi/types.go b/vendor/google.golang.org/api/googleapi/types.go new file mode 100644 index 00000000000..c8fdd541611 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/types.go @@ -0,0 +1,202 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package googleapi + +import ( + "encoding/json" + "errors" + "strconv" +) + +// Int64s is a slice of int64s that marshal as quoted strings in JSON. +type Int64s []int64 + +func (q *Int64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + *q = append(*q, int64(v)) + } + return nil +} + +// Int32s is a slice of int32s that marshal as quoted strings in JSON. +type Int32s []int32 + +func (q *Int32s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return err + } + *q = append(*q, int32(v)) + } + return nil +} + +// Uint64s is a slice of uint64s that marshal as quoted strings in JSON. +type Uint64s []uint64 + +func (q *Uint64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return err + } + *q = append(*q, uint64(v)) + } + return nil +} + +// Uint32s is a slice of uint32s that marshal as quoted strings in JSON. +type Uint32s []uint32 + +func (q *Uint32s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return err + } + *q = append(*q, uint32(v)) + } + return nil +} + +// Float64s is a slice of float64s that marshal as quoted strings in JSON. +type Float64s []float64 + +func (q *Float64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseFloat(s, 64) + if err != nil { + return err + } + *q = append(*q, float64(v)) + } + return nil +} + +func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) { + dst := make([]byte, 0, 2+n*10) // somewhat arbitrary + dst = append(dst, '[') + for i := 0; i < n; i++ { + if i > 0 { + dst = append(dst, ',') + } + dst = append(dst, '"') + dst = fn(dst, i) + dst = append(dst, '"') + } + dst = append(dst, ']') + return dst, nil +} + +func (s Int64s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendInt(dst, s[i], 10) + }) +} + +func (s Int32s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendInt(dst, int64(s[i]), 10) + }) +} + +func (s Uint64s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendUint(dst, s[i], 10) + }) +} + +func (s Uint32s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendUint(dst, uint64(s[i]), 10) + }) +} + +func (s Float64s) MarshalJSON() ([]byte, error) { + return quotedList(len(s), func(dst []byte, i int) []byte { + return strconv.AppendFloat(dst, s[i], 'g', -1, 64) + }) +} + +// RawMessage is a raw encoded JSON value. +// It is identical to json.RawMessage, except it does not suffer from +// https://golang.org/issue/14493. +type RawMessage []byte + +// MarshalJSON returns m. +func (m RawMessage) MarshalJSON() ([]byte, error) { + return m, nil +} + +// UnmarshalJSON sets *m to a copy of data. +func (m *RawMessage) UnmarshalJSON(data []byte) error { + if m == nil { + return errors.New("googleapi.RawMessage: UnmarshalJSON on nil pointer") + } + *m = append((*m)[:0], data...) + return nil +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json new file mode 100644 index 00000000000..f49c93f7857 --- /dev/null +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -0,0 +1,3711 @@ +{ + "kind": "discovery#restDescription", + "etag": "\"YWOzh2SDasdU84ArJnpYek-OMdg/akxawO6Ey81E_n6KZZ_RFctOG6Q\"", + "discoveryVersion": "v1", + "id": "storage:v1", + "name": "storage", + "version": "v1", + "revision": "20171011", + "title": "Cloud Storage JSON API", + "description": "Stores and retrieves potentially large, immutable data objects.", + "ownerDomain": "google.com", + "ownerName": "Google", + "icons": { + "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", + "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" + }, + "documentationLink": "https://developers.google.com/storage/docs/json_api/", + "labels": [ + "labs" + ], + "protocol": "rest", + "baseUrl": "https://www.googleapis.com/storage/v1/", + "basePath": "/storage/v1/", + "rootUrl": "https://www.googleapis.com/", + "servicePath": "storage/v1/", + "batchPath": "batch", + "parameters": { + "alt": { + "type": "string", + "description": "Data format for the response.", + "default": "json", + "enum": [ + "json" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json" + ], + "location": "query" + }, + "fields": { + "type": "string", + "description": "Selector specifying which fields to include in a partial response.", + "location": "query" + }, + "key": { + "type": "string", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query" + }, + "oauth_token": { + "type": "string", + "description": "OAuth 2.0 token for the current user.", + "location": "query" + }, + "prettyPrint": { + "type": "boolean", + "description": "Returns response with indentations and line breaks.", + "default": "true", + "location": "query" + }, + "quotaUser": { + "type": "string", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", + "location": "query" + }, + "userIp": { + "type": "string", + "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", + "location": "query" + } + }, + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/cloud-platform.read-only": { + "description": "View your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/devstorage.full_control": { + "description": "Manage your data and permissions in Google Cloud Storage" + }, + "https://www.googleapis.com/auth/devstorage.read_only": { + "description": "View your data in Google Cloud Storage" + }, + "https://www.googleapis.com/auth/devstorage.read_write": { + "description": "Manage your data in Google Cloud Storage" + } + } + } + }, + "schemas": { + "Bucket": { + "id": "Bucket", + "type": "object", + "description": "A bucket.", + "properties": { + "acl": { + "type": "array", + "description": "Access controls on the bucket.", + "items": { + "$ref": "BucketAccessControl" + }, + "annotations": { + "required": [ + "storage.buckets.update" + ] + } + }, + "billing": { + "type": "object", + "description": "The bucket's billing configuration.", + "properties": { + "requesterPays": { + "type": "boolean", + "description": "When set to true, bucket is requester pays." + } + } + }, + "cors": { + "type": "array", + "description": "The bucket's Cross-Origin Resource Sharing (CORS) configuration.", + "items": { + "type": "object", + "properties": { + "maxAgeSeconds": { + "type": "integer", + "description": "The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses.", + "format": "int32" + }, + "method": { + "type": "array", + "description": "The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: \"*\" is permitted in the list of methods, and means \"any method\".", + "items": { + "type": "string" + } + }, + "origin": { + "type": "array", + "description": "The list of Origins eligible to receive CORS response headers. Note: \"*\" is permitted in the list of origins, and means \"any Origin\".", + "items": { + "type": "string" + } + }, + "responseHeader": { + "type": "array", + "description": "The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.", + "items": { + "type": "string" + } + } + } + } + }, + "defaultObjectAcl": { + "type": "array", + "description": "Default access controls to apply to new objects when no ACL is provided.", + "items": { + "$ref": "ObjectAccessControl" + } + }, + "encryption": { + "type": "object", + "description": "Encryption configuration used by default for newly inserted objects, when no encryption config is specified.", + "properties": { + "defaultKmsKeyName": { + "type": "string" + } + } + }, + "etag": { + "type": "string", + "description": "HTTP 1.1 Entity tag for the bucket." + }, + "id": { + "type": "string", + "description": "The ID of the bucket. For buckets, the id and name properities are the same." + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For buckets, this is always storage#bucket.", + "default": "storage#bucket" + }, + "labels": { + "type": "object", + "description": "User-provided labels, in key/value pairs.", + "additionalProperties": { + "type": "string", + "description": "An individual label entry." + } + }, + "lifecycle": { + "type": "object", + "description": "The bucket's lifecycle configuration. See lifecycle management for more information.", + "properties": { + "rule": { + "type": "array", + "description": "A lifecycle management rule, which is made of an action to take and the condition(s) under which the action will be taken.", + "items": { + "type": "object", + "properties": { + "action": { + "type": "object", + "description": "The action to take.", + "properties": { + "storageClass": { + "type": "string", + "description": "Target storage class. Required iff the type of the action is SetStorageClass." + }, + "type": { + "type": "string", + "description": "Type of the action. Currently, only Delete and SetStorageClass are supported." + } + } + }, + "condition": { + "type": "object", + "description": "The condition(s) under which the action will be taken.", + "properties": { + "age": { + "type": "integer", + "description": "Age of an object (in days). This condition is satisfied when an object reaches the specified age.", + "format": "int32" + }, + "createdBefore": { + "type": "string", + "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when an object is created before midnight of the specified date in UTC.", + "format": "date" + }, + "isLive": { + "type": "boolean", + "description": "Relevant only for versioned objects. If the value is true, this condition matches live objects; if the value is false, it matches archived objects." + }, + "matchesStorageClass": { + "type": "array", + "description": "Objects having any of the storage classes specified by this condition will be matched. Values include MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, and DURABLE_REDUCED_AVAILABILITY.", + "items": { + "type": "string" + } + }, + "numNewerVersions": { + "type": "integer", + "description": "Relevant only for versioned objects. If the value is N, this condition is satisfied when there are at least N versions (including the live version) newer than this version of the object.", + "format": "int32" + } + } + } + } + } + } + } + }, + "location": { + "type": "string", + "description": "The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list." + }, + "logging": { + "type": "object", + "description": "The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs.", + "properties": { + "logBucket": { + "type": "string", + "description": "The destination bucket where the current bucket's logs should be placed." + }, + "logObjectPrefix": { + "type": "string", + "description": "A prefix for log object names." + } + } + }, + "metageneration": { + "type": "string", + "description": "The metadata generation of this bucket.", + "format": "int64" + }, + "name": { + "type": "string", + "description": "The name of the bucket.", + "annotations": { + "required": [ + "storage.buckets.insert" + ] + } + }, + "owner": { + "type": "object", + "description": "The owner of the bucket. This is always the project team's owner group.", + "properties": { + "entity": { + "type": "string", + "description": "The entity, in the form project-owner-projectId." + }, + "entityId": { + "type": "string", + "description": "The ID for the entity." + } + } + }, + "projectNumber": { + "type": "string", + "description": "The project number of the project the bucket belongs to.", + "format": "uint64" + }, + "selfLink": { + "type": "string", + "description": "The URI of this bucket." + }, + "storageClass": { + "type": "string", + "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see storage classes." + }, + "timeCreated": { + "type": "string", + "description": "The creation time of the bucket in RFC 3339 format.", + "format": "date-time" + }, + "updated": { + "type": "string", + "description": "The modification time of the bucket in RFC 3339 format.", + "format": "date-time" + }, + "versioning": { + "type": "object", + "description": "The bucket's versioning configuration.", + "properties": { + "enabled": { + "type": "boolean", + "description": "While set to true, versioning is fully enabled for this bucket." + } + } + }, + "website": { + "type": "object", + "description": "The bucket's website configuration, controlling how the service behaves when accessing bucket contents as a web site. See the Static Website Examples for more information.", + "properties": { + "mainPageSuffix": { + "type": "string", + "description": "If the requested object path is missing, the service will ensure the path has a trailing '/', append this suffix, and attempt to retrieve the resulting object. This allows the creation of index.html objects to represent directory pages." + }, + "notFoundPage": { + "type": "string", + "description": "If the requested object path is missing, and any mainPageSuffix object is missing, if applicable, the service will return the named object from this bucket as the content for a 404 Not Found result." + } + } + } + } + }, + "BucketAccessControl": { + "id": "BucketAccessControl", + "type": "object", + "description": "An access-control entry.", + "properties": { + "bucket": { + "type": "string", + "description": "The name of the bucket." + }, + "domain": { + "type": "string", + "description": "The domain associated with the entity, if any." + }, + "email": { + "type": "string", + "description": "The email address associated with the entity, if any." + }, + "entity": { + "type": "string", + "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.", + "annotations": { + "required": [ + "storage.bucketAccessControls.insert" + ] + } + }, + "entityId": { + "type": "string", + "description": "The ID for the entity, if any." + }, + "etag": { + "type": "string", + "description": "HTTP 1.1 Entity tag for the access-control entry." + }, + "id": { + "type": "string", + "description": "The ID of the access-control entry." + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For bucket access control entries, this is always storage#bucketAccessControl.", + "default": "storage#bucketAccessControl" + }, + "projectTeam": { + "type": "object", + "description": "The project team associated with the entity, if any.", + "properties": { + "projectNumber": { + "type": "string", + "description": "The project number." + }, + "team": { + "type": "string", + "description": "The team." + } + } + }, + "role": { + "type": "string", + "description": "The access permission for the entity.", + "annotations": { + "required": [ + "storage.bucketAccessControls.insert" + ] + } + }, + "selfLink": { + "type": "string", + "description": "The link to this access-control entry." + } + } + }, + "BucketAccessControls": { + "id": "BucketAccessControls", + "type": "object", + "description": "An access-control list.", + "properties": { + "items": { + "type": "array", + "description": "The list of items.", + "items": { + "$ref": "BucketAccessControl" + } + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For lists of bucket access control entries, this is always storage#bucketAccessControls.", + "default": "storage#bucketAccessControls" + } + } + }, + "Buckets": { + "id": "Buckets", + "type": "object", + "description": "A list of buckets.", + "properties": { + "items": { + "type": "array", + "description": "The list of items.", + "items": { + "$ref": "Bucket" + } + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For lists of buckets, this is always storage#buckets.", + "default": "storage#buckets" + }, + "nextPageToken": { + "type": "string", + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results." + } + } + }, + "Channel": { + "id": "Channel", + "type": "object", + "description": "An notification channel used to watch for resource changes.", + "properties": { + "address": { + "type": "string", + "description": "The address where notifications are delivered for this channel." + }, + "expiration": { + "type": "string", + "description": "Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional.", + "format": "int64" + }, + "id": { + "type": "string", + "description": "A UUID or similar unique string that identifies this channel." + }, + "kind": { + "type": "string", + "description": "Identifies this as a notification channel used to watch for changes to a resource. Value: the fixed string \"api#channel\".", + "default": "api#channel" + }, + "params": { + "type": "object", + "description": "Additional parameters controlling delivery channel behavior. Optional.", + "additionalProperties": { + "type": "string", + "description": "Declares a new parameter by name." + } + }, + "payload": { + "type": "boolean", + "description": "A Boolean value to indicate whether payload is wanted. Optional." + }, + "resourceId": { + "type": "string", + "description": "An opaque ID that identifies the resource being watched on this channel. Stable across different API versions." + }, + "resourceUri": { + "type": "string", + "description": "A version-specific identifier for the watched resource." + }, + "token": { + "type": "string", + "description": "An arbitrary string delivered to the target address with each notification delivered over this channel. Optional." + }, + "type": { + "type": "string", + "description": "The type of delivery mechanism used for this channel." + } + } + }, + "ComposeRequest": { + "id": "ComposeRequest", + "type": "object", + "description": "A Compose request.", + "properties": { + "destination": { + "$ref": "Object", + "description": "Properties of the resulting object." + }, + "kind": { + "type": "string", + "description": "The kind of item this is.", + "default": "storage#composeRequest" + }, + "sourceObjects": { + "type": "array", + "description": "The list of source objects that will be concatenated into a single object.", + "items": { + "type": "object", + "properties": { + "generation": { + "type": "string", + "description": "The generation of this object to use as the source.", + "format": "int64" + }, + "name": { + "type": "string", + "description": "The source object's name. The source object's bucket is implicitly the destination bucket.", + "annotations": { + "required": [ + "storage.objects.compose" + ] + } + }, + "objectPreconditions": { + "type": "object", + "description": "Conditions that must be met for this operation to execute.", + "properties": { + "ifGenerationMatch": { + "type": "string", + "description": "Only perform the composition if the generation of the source object that would be used matches this value. If this value and a generation are both specified, they must be the same value or the call will fail.", + "format": "int64" + } + } + } + } + }, + "annotations": { + "required": [ + "storage.objects.compose" + ] + } + } + } + }, + "Notification": { + "id": "Notification", + "type": "object", + "description": "A subscription to receive Google PubSub notifications.", + "properties": { + "custom_attributes": { + "type": "object", + "description": "An optional list of additional attributes to attach to each Cloud PubSub message published for this notification subscription.", + "additionalProperties": { + "type": "string" + } + }, + "etag": { + "type": "string", + "description": "HTTP 1.1 Entity tag for this subscription notification." + }, + "event_types": { + "type": "array", + "description": "If present, only send notifications about listed event types. If empty, sent notifications for all event types.", + "items": { + "type": "string" + } + }, + "id": { + "type": "string", + "description": "The ID of the notification." + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For notifications, this is always storage#notification.", + "default": "storage#notification" + }, + "object_name_prefix": { + "type": "string", + "description": "If present, only apply this notification configuration to object names that begin with this prefix." + }, + "payload_format": { + "type": "string", + "description": "The desired content of the Payload.", + "default": "JSON_API_V1" + }, + "selfLink": { + "type": "string", + "description": "The canonical URL of this notification." + }, + "topic": { + "type": "string", + "description": "The Cloud PubSub topic to which this subscription publishes. Formatted as: '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}'", + "annotations": { + "required": [ + "storage.notifications.insert" + ] + } + } + } + }, + "Notifications": { + "id": "Notifications", + "type": "object", + "description": "A list of notification subscriptions.", + "properties": { + "items": { + "type": "array", + "description": "The list of items.", + "items": { + "$ref": "Notification" + } + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For lists of notifications, this is always storage#notifications.", + "default": "storage#notifications" + } + } + }, + "Object": { + "id": "Object", + "type": "object", + "description": "An object.", + "properties": { + "acl": { + "type": "array", + "description": "Access controls on the object.", + "items": { + "$ref": "ObjectAccessControl" + }, + "annotations": { + "required": [ + "storage.objects.update" + ] + } + }, + "bucket": { + "type": "string", + "description": "The name of the bucket containing this object." + }, + "cacheControl": { + "type": "string", + "description": "Cache-Control directive for the object data. If omitted, and the object is accessible to all anonymous users, the default will be public, max-age=3600." + }, + "componentCount": { + "type": "integer", + "description": "Number of underlying components that make up this object. Components are accumulated by compose operations.", + "format": "int32" + }, + "contentDisposition": { + "type": "string", + "description": "Content-Disposition of the object data." + }, + "contentEncoding": { + "type": "string", + "description": "Content-Encoding of the object data." + }, + "contentLanguage": { + "type": "string", + "description": "Content-Language of the object data." + }, + "contentType": { + "type": "string", + "description": "Content-Type of the object data. If an object is stored without a Content-Type, it is served as application/octet-stream." + }, + "crc32c": { + "type": "string", + "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best Practices." + }, + "customerEncryption": { + "type": "object", + "description": "Metadata of customer-supplied encryption key, if the object is encrypted by such a key.", + "properties": { + "encryptionAlgorithm": { + "type": "string", + "description": "The encryption algorithm." + }, + "keySha256": { + "type": "string", + "description": "SHA256 hash value of the encryption key." + } + } + }, + "etag": { + "type": "string", + "description": "HTTP 1.1 Entity tag for the object." + }, + "generation": { + "type": "string", + "description": "The content generation of this object. Used for object versioning.", + "format": "int64" + }, + "id": { + "type": "string", + "description": "The ID of the object, including the bucket name, object name, and generation number." + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For objects, this is always storage#object.", + "default": "storage#object" + }, + "kmsKeyName": { + "type": "string", + "description": "Cloud KMS Key used to encrypt this object, if the object is encrypted by such a key." + }, + "md5Hash": { + "type": "string", + "description": "MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, see Hashes and ETags: Best Practices." + }, + "mediaLink": { + "type": "string", + "description": "Media download link." + }, + "metadata": { + "type": "object", + "description": "User-provided metadata, in key/value pairs.", + "additionalProperties": { + "type": "string", + "description": "An individual metadata entry." + } + }, + "metageneration": { + "type": "string", + "description": "The version of the metadata for this object at this generation. Used for preconditions and for detecting changes in metadata. A metageneration number is only meaningful in the context of a particular generation of a particular object.", + "format": "int64" + }, + "name": { + "type": "string", + "description": "The name of the object. Required if not specified by URL parameter." + }, + "owner": { + "type": "object", + "description": "The owner of the object. This will always be the uploader of the object.", + "properties": { + "entity": { + "type": "string", + "description": "The entity, in the form user-userId." + }, + "entityId": { + "type": "string", + "description": "The ID for the entity." + } + } + }, + "selfLink": { + "type": "string", + "description": "The link to this object." + }, + "size": { + "type": "string", + "description": "Content-Length of the data in bytes.", + "format": "uint64" + }, + "storageClass": { + "type": "string", + "description": "Storage class of the object." + }, + "timeCreated": { + "type": "string", + "description": "The creation time of the object in RFC 3339 format.", + "format": "date-time" + }, + "timeDeleted": { + "type": "string", + "description": "The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.", + "format": "date-time" + }, + "timeStorageClassUpdated": { + "type": "string", + "description": "The time at which the object's storage class was last changed. When the object is initially created, it will be set to timeCreated.", + "format": "date-time" + }, + "updated": { + "type": "string", + "description": "The modification time of the object metadata in RFC 3339 format.", + "format": "date-time" + } + } + }, + "ObjectAccessControl": { + "id": "ObjectAccessControl", + "type": "object", + "description": "An access-control entry.", + "properties": { + "bucket": { + "type": "string", + "description": "The name of the bucket." + }, + "domain": { + "type": "string", + "description": "The domain associated with the entity, if any." + }, + "email": { + "type": "string", + "description": "The email address associated with the entity, if any." + }, + "entity": { + "type": "string", + "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.", + "annotations": { + "required": [ + "storage.defaultObjectAccessControls.insert", + "storage.objectAccessControls.insert" + ] + } + }, + "entityId": { + "type": "string", + "description": "The ID for the entity, if any." + }, + "etag": { + "type": "string", + "description": "HTTP 1.1 Entity tag for the access-control entry." + }, + "generation": { + "type": "string", + "description": "The content generation of the object, if applied to an object.", + "format": "int64" + }, + "id": { + "type": "string", + "description": "The ID of the access-control entry." + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For object access control entries, this is always storage#objectAccessControl.", + "default": "storage#objectAccessControl" + }, + "object": { + "type": "string", + "description": "The name of the object, if applied to an object." + }, + "projectTeam": { + "type": "object", + "description": "The project team associated with the entity, if any.", + "properties": { + "projectNumber": { + "type": "string", + "description": "The project number." + }, + "team": { + "type": "string", + "description": "The team." + } + } + }, + "role": { + "type": "string", + "description": "The access permission for the entity.", + "annotations": { + "required": [ + "storage.defaultObjectAccessControls.insert", + "storage.objectAccessControls.insert" + ] + } + }, + "selfLink": { + "type": "string", + "description": "The link to this access-control entry." + } + } + }, + "ObjectAccessControls": { + "id": "ObjectAccessControls", + "type": "object", + "description": "An access-control list.", + "properties": { + "items": { + "type": "array", + "description": "The list of items.", + "items": { + "$ref": "ObjectAccessControl" + } + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For lists of object access control entries, this is always storage#objectAccessControls.", + "default": "storage#objectAccessControls" + } + } + }, + "Objects": { + "id": "Objects", + "type": "object", + "description": "A list of objects.", + "properties": { + "items": { + "type": "array", + "description": "The list of items.", + "items": { + "$ref": "Object" + } + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For lists of objects, this is always storage#objects.", + "default": "storage#objects" + }, + "nextPageToken": { + "type": "string", + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results." + }, + "prefixes": { + "type": "array", + "description": "The list of prefixes of objects matching-but-not-listed up to and including the requested delimiter.", + "items": { + "type": "string" + } + } + } + }, + "Policy": { + "id": "Policy", + "type": "object", + "description": "A bucket/object IAM policy.", + "properties": { + "bindings": { + "type": "array", + "description": "An association between a role, which comes with a set of permissions, and members who may assume that role.", + "items": { + "type": "object", + "properties": { + "condition": { + "type": "any" + }, + "members": { + "type": "array", + "description": "A collection of identifiers for members who may assume the provided role. Recognized identifiers are as follows: \n- allUsers — A special identifier that represents anyone on the internet; with or without a Google account. \n- allAuthenticatedUsers — A special identifier that represents anyone who is authenticated with a Google account or a service account. \n- user:emailid — An email address that represents a specific account. For example, user:alice@gmail.com or user:joe@example.com. \n- serviceAccount:emailid — An email address that represents a service account. For example, serviceAccount:my-other-app@appspot.gserviceaccount.com . \n- group:emailid — An email address that represents a Google group. For example, group:admins@example.com. \n- domain:domain — A Google Apps domain name that represents all the users of that domain. For example, domain:google.com or domain:example.com. \n- projectOwner:projectid — Owners of the given project. For example, projectOwner:my-example-project \n- projectEditor:projectid — Editors of the given project. For example, projectEditor:my-example-project \n- projectViewer:projectid — Viewers of the given project. For example, projectViewer:my-example-project", + "items": { + "type": "string" + }, + "annotations": { + "required": [ + "storage.buckets.setIamPolicy", + "storage.objects.setIamPolicy" + ] + } + }, + "role": { + "type": "string", + "description": "The role to which members belong. Two types of roles are supported: new IAM roles, which grant permissions that do not map directly to those provided by ACLs, and legacy IAM roles, which do map directly to ACL permissions. All roles are of the format roles/storage.specificRole.\nThe new IAM roles are: \n- roles/storage.admin — Full control of Google Cloud Storage resources. \n- roles/storage.objectViewer — Read-Only access to Google Cloud Storage objects. \n- roles/storage.objectCreator — Access to create objects in Google Cloud Storage. \n- roles/storage.objectAdmin — Full control of Google Cloud Storage objects. The legacy IAM roles are: \n- roles/storage.legacyObjectReader — Read-only access to objects without listing. Equivalent to an ACL entry on an object with the READER role. \n- roles/storage.legacyObjectOwner — Read/write access to existing objects without listing. Equivalent to an ACL entry on an object with the OWNER role. \n- roles/storage.legacyBucketReader — Read access to buckets with object listing. Equivalent to an ACL entry on a bucket with the READER role. \n- roles/storage.legacyBucketWriter — Read access to buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the WRITER role. \n- roles/storage.legacyBucketOwner — Read and write access to existing buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the OWNER role.", + "annotations": { + "required": [ + "storage.buckets.setIamPolicy", + "storage.objects.setIamPolicy" + ] + } + } + } + }, + "annotations": { + "required": [ + "storage.buckets.setIamPolicy", + "storage.objects.setIamPolicy" + ] + } + }, + "etag": { + "type": "string", + "description": "HTTP 1.1 Entity tag for the policy.", + "format": "byte" + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For policies, this is always storage#policy. This field is ignored on input.", + "default": "storage#policy" + }, + "resourceId": { + "type": "string", + "description": "The ID of the resource to which this policy belongs. Will be of the form projects/_/buckets/bucket for buckets, and projects/_/buckets/bucket/objects/object for objects. A specific generation may be specified by appending #generationNumber to the end of the object name, e.g. projects/_/buckets/my-bucket/objects/data.txt#17. The current generation can be denoted with #0. This field is ignored on input." + } + } + }, + "RewriteResponse": { + "id": "RewriteResponse", + "type": "object", + "description": "A rewrite response.", + "properties": { + "done": { + "type": "boolean", + "description": "true if the copy is finished; otherwise, false if the copy is in progress. This property is always present in the response." + }, + "kind": { + "type": "string", + "description": "The kind of item this is.", + "default": "storage#rewriteResponse" + }, + "objectSize": { + "type": "string", + "description": "The total size of the object being copied in bytes. This property is always present in the response.", + "format": "int64" + }, + "resource": { + "$ref": "Object", + "description": "A resource containing the metadata for the copied-to object. This property is present in the response only when copying completes." + }, + "rewriteToken": { + "type": "string", + "description": "A token to use in subsequent requests to continue copying data. This token is present in the response only when there is more data to copy." + }, + "totalBytesRewritten": { + "type": "string", + "description": "The total bytes written so far, which can be used to provide a waiting user with a progress indicator. This property is always present in the response.", + "format": "int64" + } + } + }, + "ServiceAccount": { + "id": "ServiceAccount", + "type": "object", + "description": "A subscription to receive Google PubSub notifications.", + "properties": { + "email_address": { + "type": "string", + "description": "The ID of the notification." + }, + "kind": { + "type": "string", + "description": "The kind of item this is. For notifications, this is always storage#notification.", + "default": "storage#serviceAccount" + } + } + }, + "TestIamPermissionsResponse": { + "id": "TestIamPermissionsResponse", + "type": "object", + "description": "A storage.(buckets|objects).testIamPermissions response.", + "properties": { + "kind": { + "type": "string", + "description": "The kind of item this is.", + "default": "storage#testIamPermissionsResponse" + }, + "permissions": { + "type": "array", + "description": "The permissions held by the caller. Permissions are always of the format storage.resource.capability, where resource is one of buckets or objects. The supported permissions are as follows: \n- storage.buckets.delete — Delete bucket. \n- storage.buckets.get — Read bucket metadata. \n- storage.buckets.getIamPolicy — Read bucket IAM policy. \n- storage.buckets.create — Create bucket. \n- storage.buckets.list — List buckets. \n- storage.buckets.setIamPolicy — Update bucket IAM policy. \n- storage.buckets.update — Update bucket metadata. \n- storage.objects.delete — Delete object. \n- storage.objects.get — Read object data and metadata. \n- storage.objects.getIamPolicy — Read object IAM policy. \n- storage.objects.create — Create object. \n- storage.objects.list — List objects. \n- storage.objects.setIamPolicy — Update object IAM policy. \n- storage.objects.update — Update object metadata.", + "items": { + "type": "string" + } + } + } + } + }, + "resources": { + "bucketAccessControls": { + "methods": { + "delete": { + "id": "storage.bucketAccessControls.delete", + "path": "b/{bucket}/acl/{entity}", + "httpMethod": "DELETE", + "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "get": { + "id": "storage.bucketAccessControls.get", + "path": "b/{bucket}/acl/{entity}", + "httpMethod": "GET", + "description": "Returns the ACL entry for the specified entity on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "insert": { + "id": "storage.bucketAccessControls.insert", + "path": "b/{bucket}/acl", + "httpMethod": "POST", + "description": "Creates a new ACL entry on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "BucketAccessControl" + }, + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "list": { + "id": "storage.bucketAccessControls.list", + "path": "b/{bucket}/acl", + "httpMethod": "GET", + "description": "Retrieves ACL entries on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "response": { + "$ref": "BucketAccessControls" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "patch": { + "id": "storage.bucketAccessControls.patch", + "path": "b/{bucket}/acl/{entity}", + "httpMethod": "PATCH", + "description": "Updates an ACL entry on the specified bucket. This method supports patch semantics.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "request": { + "$ref": "BucketAccessControl" + }, + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "update": { + "id": "storage.bucketAccessControls.update", + "path": "b/{bucket}/acl/{entity}", + "httpMethod": "PUT", + "description": "Updates an ACL entry on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "request": { + "$ref": "BucketAccessControl" + }, + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "buckets": { + "methods": { + "delete": { + "id": "storage.buckets.delete", + "path": "b/{bucket}", + "httpMethod": "DELETE", + "description": "Permanently deletes an empty bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "If set, only deletes the bucket if its metageneration matches this value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "If set, only deletes the bucket if its metageneration does not match this value.", + "format": "int64", + "location": "query" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "id": "storage.buckets.get", + "path": "b/{bucket}", + "httpMethod": "GET", + "description": "Returns metadata for the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit owner, acl and defaultObjectAcl properties." + ], + "location": "query" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "getIamPolicy": { + "id": "storage.buckets.getIamPolicy", + "path": "b/{bucket}/iam", + "httpMethod": "GET", + "description": "Returns an IAM policy for the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "insert": { + "id": "storage.buckets.insert", + "path": "b", + "httpMethod": "POST", + "description": "Creates a new bucket.", + "parameters": { + "predefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to this bucket.", + "enum": [ + "authenticatedRead", + "private", + "projectPrivate", + "publicRead", + "publicReadWrite" + ], + "enumDescriptions": [ + "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + "Project team owners get OWNER access.", + "Project team members get access according to their roles.", + "Project team owners get OWNER access, and allUsers get READER access.", + "Project team owners get OWNER access, and allUsers get WRITER access." + ], + "location": "query" + }, + "predefinedDefaultObjectAcl": { + "type": "string", + "description": "Apply a predefined set of default object access controls to this bucket.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "project": { + "type": "string", + "description": "A valid API project identifier.", + "required": true, + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit owner, acl and defaultObjectAcl properties." + ], + "location": "query" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request.", + "location": "query" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Bucket" + }, + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "id": "storage.buckets.list", + "path": "b", + "httpMethod": "GET", + "description": "Retrieves a list of buckets for a given project.", + "parameters": { + "maxResults": { + "type": "integer", + "description": "Maximum number of buckets to return in a single response. The service will use this parameter or 1,000 items, whichever is smaller.", + "default": "1000", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query" + }, + "prefix": { + "type": "string", + "description": "Filter results to buckets whose names begin with this prefix.", + "location": "query" + }, + "project": { + "type": "string", + "description": "A valid API project identifier.", + "required": true, + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit owner, acl and defaultObjectAcl properties." + ], + "location": "query" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request.", + "location": "query" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "Buckets" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "patch": { + "id": "storage.buckets.patch", + "path": "b/{bucket}", + "httpMethod": "PATCH", + "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate. This method supports patch semantics.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "predefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to this bucket.", + "enum": [ + "authenticatedRead", + "private", + "projectPrivate", + "publicRead", + "publicReadWrite" + ], + "enumDescriptions": [ + "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + "Project team owners get OWNER access.", + "Project team members get access according to their roles.", + "Project team owners get OWNER access, and allUsers get READER access.", + "Project team owners get OWNER access, and allUsers get WRITER access." + ], + "location": "query" + }, + "predefinedDefaultObjectAcl": { + "type": "string", + "description": "Apply a predefined set of default object access controls to this bucket.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit owner, acl and defaultObjectAcl properties." + ], + "location": "query" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "Bucket" + }, + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "setIamPolicy": { + "id": "storage.buckets.setIamPolicy", + "path": "b/{bucket}/iam", + "httpMethod": "PUT", + "description": "Updates an IAM policy for the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "Policy" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "testIamPermissions": { + "id": "storage.buckets.testIamPermissions", + "path": "b/{bucket}/iam/testPermissions", + "httpMethod": "GET", + "description": "Tests a set of permissions on the given bucket to see which, if any, are held by the caller.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "permissions": { + "type": "string", + "description": "Permissions to test.", + "required": true, + "repeated": true, + "location": "query" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "permissions" + ], + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "update": { + "id": "storage.buckets.update", + "path": "b/{bucket}", + "httpMethod": "PUT", + "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "predefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to this bucket.", + "enum": [ + "authenticatedRead", + "private", + "projectPrivate", + "publicRead", + "publicReadWrite" + ], + "enumDescriptions": [ + "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + "Project team owners get OWNER access.", + "Project team members get access according to their roles.", + "Project team owners get OWNER access, and allUsers get READER access.", + "Project team owners get OWNER access, and allUsers get WRITER access." + ], + "location": "query" + }, + "predefinedDefaultObjectAcl": { + "type": "string", + "description": "Apply a predefined set of default object access controls to this bucket.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit owner, acl and defaultObjectAcl properties." + ], + "location": "query" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "Bucket" + }, + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "channels": { + "methods": { + "stop": { + "id": "storage.channels.stop", + "path": "channels/stop", + "httpMethod": "POST", + "description": "Stop watching resources through this channel", + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, + "defaultObjectAccessControls": { + "methods": { + "delete": { + "id": "storage.defaultObjectAccessControls.delete", + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "httpMethod": "DELETE", + "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "get": { + "id": "storage.defaultObjectAccessControls.get", + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "httpMethod": "GET", + "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "insert": { + "id": "storage.defaultObjectAccessControls.insert", + "path": "b/{bucket}/defaultObjectAcl", + "httpMethod": "POST", + "description": "Creates a new default object ACL entry on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "list": { + "id": "storage.defaultObjectAccessControls.list", + "path": "b/{bucket}/defaultObjectAcl", + "httpMethod": "GET", + "description": "Retrieves default object ACL entries on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "response": { + "$ref": "ObjectAccessControls" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "patch": { + "id": "storage.defaultObjectAccessControls.patch", + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "httpMethod": "PATCH", + "description": "Updates a default object ACL entry on the specified bucket. This method supports patch semantics.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "update": { + "id": "storage.defaultObjectAccessControls.update", + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "httpMethod": "PUT", + "description": "Updates a default object ACL entry on the specified bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "entity" + ], + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "notifications": { + "methods": { + "delete": { + "id": "storage.notifications.delete", + "path": "b/{bucket}/notificationConfigs/{notification}", + "httpMethod": "DELETE", + "description": "Permanently deletes a notification subscription.", + "parameters": { + "bucket": { + "type": "string", + "description": "The parent bucket of the notification.", + "required": true, + "location": "path" + }, + "notification": { + "type": "string", + "description": "ID of the notification to delete.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "notification" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "id": "storage.notifications.get", + "path": "b/{bucket}/notificationConfigs/{notification}", + "httpMethod": "GET", + "description": "View a notification configuration.", + "parameters": { + "bucket": { + "type": "string", + "description": "The parent bucket of the notification.", + "required": true, + "location": "path" + }, + "notification": { + "type": "string", + "description": "Notification ID", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "notification" + ], + "response": { + "$ref": "Notification" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "insert": { + "id": "storage.notifications.insert", + "path": "b/{bucket}/notificationConfigs", + "httpMethod": "POST", + "description": "Creates a notification subscription for a given bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "The parent bucket of the notification.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "Notification" + }, + "response": { + "$ref": "Notification" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "id": "storage.notifications.list", + "path": "b/{bucket}/notificationConfigs", + "httpMethod": "GET", + "description": "Retrieves a list of notification subscriptions for a given bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a Google Cloud Storage bucket.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "response": { + "$ref": "Notifications" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, + "objectAccessControls": { + "methods": { + "delete": { + "id": "storage.objectAccessControls.delete", + "path": "b/{bucket}/o/{object}/acl/{entity}", + "httpMethod": "DELETE", + "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "get": { + "id": "storage.objectAccessControls.get", + "path": "b/{bucket}/o/{object}/acl/{entity}", + "httpMethod": "GET", + "description": "Returns the ACL entry for the specified entity on the specified object.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "insert": { + "id": "storage.objectAccessControls.insert", + "path": "b/{bucket}/o/{object}/acl", + "httpMethod": "POST", + "description": "Creates a new ACL entry on the specified object.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "list": { + "id": "storage.objectAccessControls.list", + "path": "b/{bucket}/o/{object}/acl", + "httpMethod": "GET", + "description": "Retrieves ACL entries on the specified object.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "response": { + "$ref": "ObjectAccessControls" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "patch": { + "id": "storage.objectAccessControls.patch", + "path": "b/{bucket}/o/{object}/acl/{entity}", + "httpMethod": "PATCH", + "description": "Updates an ACL entry on the specified object. This method supports patch semantics.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "update": { + "id": "storage.objectAccessControls.update", + "path": "b/{bucket}/o/{object}/acl/{entity}", + "httpMethod": "PUT", + "description": "Updates an ACL entry on the specified object.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of a bucket.", + "required": true, + "location": "path" + }, + "entity": { + "type": "string", + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "objects": { + "methods": { + "compose": { + "id": "storage.objects.compose", + "path": "b/{destinationBucket}/o/{destinationObject}/compose", + "httpMethod": "POST", + "description": "Concatenates a list of existing objects into a new object in the same bucket.", + "parameters": { + "destinationBucket": { + "type": "string", + "description": "Name of the bucket in which to store the new object.", + "required": true, + "location": "path" + }, + "destinationObject": { + "type": "string", + "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "destinationPredefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to the destination object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "kmsKeyName": { + "type": "string", + "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + "location": "query" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "destinationBucket", + "destinationObject" + ], + "request": { + "$ref": "ComposeRequest" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true + }, + "copy": { + "id": "storage.objects.copy", + "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", + "httpMethod": "POST", + "description": "Copies a source object to a destination object. Optionally overrides metadata.", + "parameters": { + "destinationBucket": { + "type": "string", + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "destinationObject": { + "type": "string", + "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", + "required": true, + "location": "path" + }, + "destinationPredefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to the destination object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query" + }, + "sourceBucket": { + "type": "string", + "description": "Name of the bucket in which to find the source object.", + "required": true, + "location": "path" + }, + "sourceGeneration": { + "type": "string", + "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "sourceObject": { + "type": "string", + "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "sourceBucket", + "sourceObject", + "destinationBucket", + "destinationObject" + ], + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true + }, + "delete": { + "id": "storage.objects.delete", + "path": "b/{bucket}/o/{object}", + "httpMethod": "DELETE", + "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which the object resides.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "id": "storage.objects.get", + "path": "b/{bucket}/o/{object}", + "httpMethod": "GET", + "description": "Retrieves an object or its metadata.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which the object resides.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true + }, + "getIamPolicy": { + "id": "storage.objects.getIamPolicy", + "path": "b/{bucket}/o/{object}/iam", + "httpMethod": "GET", + "description": "Returns an IAM policy for the specified object.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which the object resides.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "insert": { + "id": "storage.objects.insert", + "path": "b/{bucket}/o", + "httpMethod": "POST", + "description": "Stores a new object and metadata.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + "required": true, + "location": "path" + }, + "contentEncoding": { + "type": "string", + "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "kmsKeyName": { + "type": "string", + "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + "location": "query" + }, + "name": { + "type": "string", + "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "query" + }, + "predefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to this object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true, + "supportsMediaUpload": true, + "mediaUpload": { + "accept": [ + "*/*" + ], + "protocols": { + "simple": { + "multipart": true, + "path": "/upload/storage/v1/b/{bucket}/o" + }, + "resumable": { + "multipart": true, + "path": "/resumable/upload/storage/v1/b/{bucket}/o" + } + } + } + }, + "list": { + "id": "storage.objects.list", + "path": "b/{bucket}/o", + "httpMethod": "GET", + "description": "Retrieves a list of objects matching the criteria.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which to look for objects.", + "required": true, + "location": "path" + }, + "delimiter": { + "type": "string", + "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", + "default": "1000", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query" + }, + "prefix": { + "type": "string", + "description": "Filter results to objects whose names begin with this prefix.", + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + }, + "versions": { + "type": "boolean", + "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "response": { + "$ref": "Objects" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsSubscription": true + }, + "patch": { + "id": "storage.objects.patch", + "path": "b/{bucket}/o/{object}", + "httpMethod": "PATCH", + "description": "Updates an object's metadata. This method supports patch semantics.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which the object resides.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "predefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to this object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "rewrite": { + "id": "storage.objects.rewrite", + "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", + "httpMethod": "POST", + "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", + "parameters": { + "destinationBucket": { + "type": "string", + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + "required": true, + "location": "path" + }, + "destinationKmsKeyName": { + "type": "string", + "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + "location": "query" + }, + "destinationObject": { + "type": "string", + "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "destinationPredefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to the destination object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifSourceMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "maxBytesRewrittenPerCall": { + "type": "string", + "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", + "format": "int64", + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query" + }, + "rewriteToken": { + "type": "string", + "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", + "location": "query" + }, + "sourceBucket": { + "type": "string", + "description": "Name of the bucket in which to find the source object.", + "required": true, + "location": "path" + }, + "sourceGeneration": { + "type": "string", + "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "sourceObject": { + "type": "string", + "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "sourceBucket", + "sourceObject", + "destinationBucket", + "destinationObject" + ], + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "RewriteResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "setIamPolicy": { + "id": "storage.objects.setIamPolicy", + "path": "b/{bucket}/o/{object}/iam", + "httpMethod": "PUT", + "description": "Updates an IAM policy for the specified object.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which the object resides.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "request": { + "$ref": "Policy" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "testIamPermissions": { + "id": "storage.objects.testIamPermissions", + "path": "b/{bucket}/o/{object}/iam/testPermissions", + "httpMethod": "GET", + "description": "Tests a set of permissions on the given object to see which, if any, are held by the caller.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which the object resides.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "permissions": { + "type": "string", + "description": "Permissions to test.", + "required": true, + "repeated": true, + "location": "query" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "object", + "permissions" + ], + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "update": { + "id": "storage.objects.update", + "path": "b/{bucket}/o/{object}", + "httpMethod": "PUT", + "description": "Updates an object's metadata.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which the object resides.", + "required": true, + "location": "path" + }, + "generation": { + "type": "string", + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query" + }, + "ifGenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query" + }, + "ifGenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query" + }, + "ifMetagenerationNotMatch": { + "type": "string", + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query" + }, + "object": { + "type": "string", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "required": true, + "location": "path" + }, + "predefinedAcl": { + "type": "string", + "description": "Apply a predefined set of access controls to this object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket", + "object" + ], + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true + }, + "watchAll": { + "id": "storage.objects.watchAll", + "path": "b/{bucket}/o/watch", + "httpMethod": "POST", + "description": "Watch for changes on all objects in a bucket.", + "parameters": { + "bucket": { + "type": "string", + "description": "Name of the bucket in which to look for objects.", + "required": true, + "location": "path" + }, + "delimiter": { + "type": "string", + "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", + "default": "1000", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query" + }, + "prefix": { + "type": "string", + "description": "Filter results to objects whose names begin with this prefix.", + "location": "query" + }, + "projection": { + "type": "string", + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query" + }, + "versions": { + "type": "boolean", + "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + "location": "query" + } + }, + "parameterOrder": [ + "bucket" + ], + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "response": { + "$ref": "Channel" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsSubscription": true + } + } + }, + "projects": { + "resources": { + "serviceAccount": { + "methods": { + "get": { + "id": "storage.projects.serviceAccount.get", + "path": "projects/{projectId}/serviceAccount", + "httpMethod": "GET", + "description": "Get the email address of this project's Google Cloud Storage service account.", + "parameters": { + "projectId": { + "type": "string", + "description": "Project ID", + "required": true, + "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request.", + "location": "query" + } + }, + "parameterOrder": [ + "projectId" + ], + "response": { + "$ref": "ServiceAccount" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + } + } + } + } +} diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go new file mode 100644 index 00000000000..62a627594c2 --- /dev/null +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -0,0 +1,10976 @@ +// Package storage provides access to the Cloud Storage JSON API. +// +// See https://developers.google.com/storage/docs/json_api/ +// +// Usage example: +// +// import "google.golang.org/api/storage/v1" +// ... +// storageService, err := storage.New(oauthHttpClient) +package storage // import "google.golang.org/api/storage/v1" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "storage:v1" +const apiName = "storage" +const apiVersion = "v1" +const basePath = "https://www.googleapis.com/storage/v1/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" + + // View your data across Google Cloud Platform services + CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only" + + // Manage your data and permissions in Google Cloud Storage + DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control" + + // View your data in Google Cloud Storage + DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only" + + // Manage your data in Google Cloud Storage + DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write" +) + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.BucketAccessControls = NewBucketAccessControlsService(s) + s.Buckets = NewBucketsService(s) + s.Channels = NewChannelsService(s) + s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s) + s.Notifications = NewNotificationsService(s) + s.ObjectAccessControls = NewObjectAccessControlsService(s) + s.Objects = NewObjectsService(s) + s.Projects = NewProjectsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + BucketAccessControls *BucketAccessControlsService + + Buckets *BucketsService + + Channels *ChannelsService + + DefaultObjectAccessControls *DefaultObjectAccessControlsService + + Notifications *NotificationsService + + ObjectAccessControls *ObjectAccessControlsService + + Objects *ObjectsService + + Projects *ProjectsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewBucketAccessControlsService(s *Service) *BucketAccessControlsService { + rs := &BucketAccessControlsService{s: s} + return rs +} + +type BucketAccessControlsService struct { + s *Service +} + +func NewBucketsService(s *Service) *BucketsService { + rs := &BucketsService{s: s} + return rs +} + +type BucketsService struct { + s *Service +} + +func NewChannelsService(s *Service) *ChannelsService { + rs := &ChannelsService{s: s} + return rs +} + +type ChannelsService struct { + s *Service +} + +func NewDefaultObjectAccessControlsService(s *Service) *DefaultObjectAccessControlsService { + rs := &DefaultObjectAccessControlsService{s: s} + return rs +} + +type DefaultObjectAccessControlsService struct { + s *Service +} + +func NewNotificationsService(s *Service) *NotificationsService { + rs := &NotificationsService{s: s} + return rs +} + +type NotificationsService struct { + s *Service +} + +func NewObjectAccessControlsService(s *Service) *ObjectAccessControlsService { + rs := &ObjectAccessControlsService{s: s} + return rs +} + +type ObjectAccessControlsService struct { + s *Service +} + +func NewObjectsService(s *Service) *ObjectsService { + rs := &ObjectsService{s: s} + return rs +} + +type ObjectsService struct { + s *Service +} + +func NewProjectsService(s *Service) *ProjectsService { + rs := &ProjectsService{s: s} + rs.ServiceAccount = NewProjectsServiceAccountService(s) + return rs +} + +type ProjectsService struct { + s *Service + + ServiceAccount *ProjectsServiceAccountService +} + +func NewProjectsServiceAccountService(s *Service) *ProjectsServiceAccountService { + rs := &ProjectsServiceAccountService{s: s} + return rs +} + +type ProjectsServiceAccountService struct { + s *Service +} + +// Bucket: A bucket. +type Bucket struct { + // Acl: Access controls on the bucket. + Acl []*BucketAccessControl `json:"acl,omitempty"` + + // Billing: The bucket's billing configuration. + Billing *BucketBilling `json:"billing,omitempty"` + + // Cors: The bucket's Cross-Origin Resource Sharing (CORS) + // configuration. + Cors []*BucketCors `json:"cors,omitempty"` + + // DefaultObjectAcl: Default access controls to apply to new objects + // when no ACL is provided. + DefaultObjectAcl []*ObjectAccessControl `json:"defaultObjectAcl,omitempty"` + + // Encryption: Encryption configuration used by default for newly + // inserted objects, when no encryption config is specified. + Encryption *BucketEncryption `json:"encryption,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the bucket. + Etag string `json:"etag,omitempty"` + + // Id: The ID of the bucket. For buckets, the id and name properities + // are the same. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For buckets, this is always + // storage#bucket. + Kind string `json:"kind,omitempty"` + + // Labels: User-provided labels, in key/value pairs. + Labels map[string]string `json:"labels,omitempty"` + + // Lifecycle: The bucket's lifecycle configuration. See lifecycle + // management for more information. + Lifecycle *BucketLifecycle `json:"lifecycle,omitempty"` + + // Location: The location of the bucket. Object data for objects in the + // bucket resides in physical storage within this region. Defaults to + // US. See the developer's guide for the authoritative list. + Location string `json:"location,omitempty"` + + // Logging: The bucket's logging configuration, which defines the + // destination bucket and optional name prefix for the current bucket's + // logs. + Logging *BucketLogging `json:"logging,omitempty"` + + // Metageneration: The metadata generation of this bucket. + Metageneration int64 `json:"metageneration,omitempty,string"` + + // Name: The name of the bucket. + Name string `json:"name,omitempty"` + + // Owner: The owner of the bucket. This is always the project team's + // owner group. + Owner *BucketOwner `json:"owner,omitempty"` + + // ProjectNumber: The project number of the project the bucket belongs + // to. + ProjectNumber uint64 `json:"projectNumber,omitempty,string"` + + // SelfLink: The URI of this bucket. + SelfLink string `json:"selfLink,omitempty"` + + // StorageClass: The bucket's default storage class, used whenever no + // storageClass is specified for a newly-created object. This defines + // how objects in the bucket are stored and determines the SLA and the + // cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, + // NEARLINE, COLDLINE, and DURABLE_REDUCED_AVAILABILITY. If this value + // is not specified when the bucket is created, it will default to + // STANDARD. For more information, see storage classes. + StorageClass string `json:"storageClass,omitempty"` + + // TimeCreated: The creation time of the bucket in RFC 3339 format. + TimeCreated string `json:"timeCreated,omitempty"` + + // Updated: The modification time of the bucket in RFC 3339 format. + Updated string `json:"updated,omitempty"` + + // Versioning: The bucket's versioning configuration. + Versioning *BucketVersioning `json:"versioning,omitempty"` + + // Website: The bucket's website configuration, controlling how the + // service behaves when accessing bucket contents as a web site. See the + // Static Website Examples for more information. + Website *BucketWebsite `json:"website,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Acl") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Acl") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Bucket) MarshalJSON() ([]byte, error) { + type noMethod Bucket + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketBilling: The bucket's billing configuration. +type BucketBilling struct { + // RequesterPays: When set to true, bucket is requester pays. + RequesterPays bool `json:"requesterPays,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RequesterPays") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RequesterPays") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketBilling) MarshalJSON() ([]byte, error) { + type noMethod BucketBilling + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BucketCors struct { + // MaxAgeSeconds: The value, in seconds, to return in the + // Access-Control-Max-Age header used in preflight responses. + MaxAgeSeconds int64 `json:"maxAgeSeconds,omitempty"` + + // Method: The list of HTTP methods on which to include CORS response + // headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list + // of methods, and means "any method". + Method []string `json:"method,omitempty"` + + // Origin: The list of Origins eligible to receive CORS response + // headers. Note: "*" is permitted in the list of origins, and means + // "any Origin". + Origin []string `json:"origin,omitempty"` + + // ResponseHeader: The list of HTTP headers other than the simple + // response headers to give permission for the user-agent to share + // across domains. + ResponseHeader []string `json:"responseHeader,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaxAgeSeconds") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MaxAgeSeconds") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketCors) MarshalJSON() ([]byte, error) { + type noMethod BucketCors + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketEncryption: Encryption configuration used by default for newly +// inserted objects, when no encryption config is specified. +type BucketEncryption struct { + DefaultKmsKeyName string `json:"defaultKmsKeyName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DefaultKmsKeyName") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DefaultKmsKeyName") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *BucketEncryption) MarshalJSON() ([]byte, error) { + type noMethod BucketEncryption + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketLifecycle: The bucket's lifecycle configuration. See lifecycle +// management for more information. +type BucketLifecycle struct { + // Rule: A lifecycle management rule, which is made of an action to take + // and the condition(s) under which the action will be taken. + Rule []*BucketLifecycleRule `json:"rule,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Rule") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Rule") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketLifecycle) MarshalJSON() ([]byte, error) { + type noMethod BucketLifecycle + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BucketLifecycleRule struct { + // Action: The action to take. + Action *BucketLifecycleRuleAction `json:"action,omitempty"` + + // Condition: The condition(s) under which the action will be taken. + Condition *BucketLifecycleRuleCondition `json:"condition,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Action") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Action") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketLifecycleRule) MarshalJSON() ([]byte, error) { + type noMethod BucketLifecycleRule + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketLifecycleRuleAction: The action to take. +type BucketLifecycleRuleAction struct { + // StorageClass: Target storage class. Required iff the type of the + // action is SetStorageClass. + StorageClass string `json:"storageClass,omitempty"` + + // Type: Type of the action. Currently, only Delete and SetStorageClass + // are supported. + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "StorageClass") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "StorageClass") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) { + type noMethod BucketLifecycleRuleAction + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketLifecycleRuleCondition: The condition(s) under which the action +// will be taken. +type BucketLifecycleRuleCondition struct { + // Age: Age of an object (in days). This condition is satisfied when an + // object reaches the specified age. + Age int64 `json:"age,omitempty"` + + // CreatedBefore: A date in RFC 3339 format with only the date part (for + // instance, "2013-01-15"). This condition is satisfied when an object + // is created before midnight of the specified date in UTC. + CreatedBefore string `json:"createdBefore,omitempty"` + + // IsLive: Relevant only for versioned objects. If the value is true, + // this condition matches live objects; if the value is false, it + // matches archived objects. + IsLive *bool `json:"isLive,omitempty"` + + // MatchesStorageClass: Objects having any of the storage classes + // specified by this condition will be matched. Values include + // MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, and + // DURABLE_REDUCED_AVAILABILITY. + MatchesStorageClass []string `json:"matchesStorageClass,omitempty"` + + // NumNewerVersions: Relevant only for versioned objects. If the value + // is N, this condition is satisfied when there are at least N versions + // (including the live version) newer than this version of the object. + NumNewerVersions int64 `json:"numNewerVersions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Age") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Age") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketLifecycleRuleCondition) MarshalJSON() ([]byte, error) { + type noMethod BucketLifecycleRuleCondition + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketLogging: The bucket's logging configuration, which defines the +// destination bucket and optional name prefix for the current bucket's +// logs. +type BucketLogging struct { + // LogBucket: The destination bucket where the current bucket's logs + // should be placed. + LogBucket string `json:"logBucket,omitempty"` + + // LogObjectPrefix: A prefix for log object names. + LogObjectPrefix string `json:"logObjectPrefix,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LogBucket") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LogBucket") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketLogging) MarshalJSON() ([]byte, error) { + type noMethod BucketLogging + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketOwner: The owner of the bucket. This is always the project +// team's owner group. +type BucketOwner struct { + // Entity: The entity, in the form project-owner-projectId. + Entity string `json:"entity,omitempty"` + + // EntityId: The ID for the entity. + EntityId string `json:"entityId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Entity") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Entity") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketOwner) MarshalJSON() ([]byte, error) { + type noMethod BucketOwner + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketVersioning: The bucket's versioning configuration. +type BucketVersioning struct { + // Enabled: While set to true, versioning is fully enabled for this + // bucket. + Enabled bool `json:"enabled,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Enabled") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketVersioning) MarshalJSON() ([]byte, error) { + type noMethod BucketVersioning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketWebsite: The bucket's website configuration, controlling how +// the service behaves when accessing bucket contents as a web site. See +// the Static Website Examples for more information. +type BucketWebsite struct { + // MainPageSuffix: If the requested object path is missing, the service + // will ensure the path has a trailing '/', append this suffix, and + // attempt to retrieve the resulting object. This allows the creation of + // index.html objects to represent directory pages. + MainPageSuffix string `json:"mainPageSuffix,omitempty"` + + // NotFoundPage: If the requested object path is missing, and any + // mainPageSuffix object is missing, if applicable, the service will + // return the named object from this bucket as the content for a 404 Not + // Found result. + NotFoundPage string `json:"notFoundPage,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MainPageSuffix") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MainPageSuffix") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *BucketWebsite) MarshalJSON() ([]byte, error) { + type noMethod BucketWebsite + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketAccessControl: An access-control entry. +type BucketAccessControl struct { + // Bucket: The name of the bucket. + Bucket string `json:"bucket,omitempty"` + + // Domain: The domain associated with the entity, if any. + Domain string `json:"domain,omitempty"` + + // Email: The email address associated with the entity, if any. + Email string `json:"email,omitempty"` + + // Entity: The entity holding the permission, in one of the following + // forms: + // - user-userId + // - user-email + // - group-groupId + // - group-email + // - domain-domain + // - project-team-projectId + // - allUsers + // - allAuthenticatedUsers Examples: + // - The user liz@example.com would be user-liz@example.com. + // - The group example@googlegroups.com would be + // group-example@googlegroups.com. + // - To refer to all members of the Google Apps for Business domain + // example.com, the entity would be domain-example.com. + Entity string `json:"entity,omitempty"` + + // EntityId: The ID for the entity, if any. + EntityId string `json:"entityId,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the access-control entry. + Etag string `json:"etag,omitempty"` + + // Id: The ID of the access-control entry. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For bucket access control entries, + // this is always storage#bucketAccessControl. + Kind string `json:"kind,omitempty"` + + // ProjectTeam: The project team associated with the entity, if any. + ProjectTeam *BucketAccessControlProjectTeam `json:"projectTeam,omitempty"` + + // Role: The access permission for the entity. + Role string `json:"role,omitempty"` + + // SelfLink: The link to this access-control entry. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Bucket") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Bucket") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketAccessControl) MarshalJSON() ([]byte, error) { + type noMethod BucketAccessControl + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketAccessControlProjectTeam: The project team associated with the +// entity, if any. +type BucketAccessControlProjectTeam struct { + // ProjectNumber: The project number. + ProjectNumber string `json:"projectNumber,omitempty"` + + // Team: The team. + Team string `json:"team,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ProjectNumber") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ProjectNumber") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketAccessControlProjectTeam) MarshalJSON() ([]byte, error) { + type noMethod BucketAccessControlProjectTeam + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketAccessControls: An access-control list. +type BucketAccessControls struct { + // Items: The list of items. + Items []*BucketAccessControl `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of bucket access control + // entries, this is always storage#bucketAccessControls. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketAccessControls) MarshalJSON() ([]byte, error) { + type noMethod BucketAccessControls + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Buckets: A list of buckets. +type Buckets struct { + // Items: The list of items. + Items []*Bucket `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of buckets, this is always + // storage#buckets. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Buckets) MarshalJSON() ([]byte, error) { + type noMethod Buckets + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Channel: An notification channel used to watch for resource changes. +type Channel struct { + // Address: The address where notifications are delivered for this + // channel. + Address string `json:"address,omitempty"` + + // Expiration: Date and time of notification channel expiration, + // expressed as a Unix timestamp, in milliseconds. Optional. + Expiration int64 `json:"expiration,omitempty,string"` + + // Id: A UUID or similar unique string that identifies this channel. + Id string `json:"id,omitempty"` + + // Kind: Identifies this as a notification channel used to watch for + // changes to a resource. Value: the fixed string "api#channel". + Kind string `json:"kind,omitempty"` + + // Params: Additional parameters controlling delivery channel behavior. + // Optional. + Params map[string]string `json:"params,omitempty"` + + // Payload: A Boolean value to indicate whether payload is wanted. + // Optional. + Payload bool `json:"payload,omitempty"` + + // ResourceId: An opaque ID that identifies the resource being watched + // on this channel. Stable across different API versions. + ResourceId string `json:"resourceId,omitempty"` + + // ResourceUri: A version-specific identifier for the watched resource. + ResourceUri string `json:"resourceUri,omitempty"` + + // Token: An arbitrary string delivered to the target address with each + // notification delivered over this channel. Optional. + Token string `json:"token,omitempty"` + + // Type: The type of delivery mechanism used for this channel. + Type string `json:"type,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Address") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Address") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Channel) MarshalJSON() ([]byte, error) { + type noMethod Channel + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ComposeRequest: A Compose request. +type ComposeRequest struct { + // Destination: Properties of the resulting object. + Destination *Object `json:"destination,omitempty"` + + // Kind: The kind of item this is. + Kind string `json:"kind,omitempty"` + + // SourceObjects: The list of source objects that will be concatenated + // into a single object. + SourceObjects []*ComposeRequestSourceObjects `json:"sourceObjects,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Destination") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Destination") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ComposeRequest) MarshalJSON() ([]byte, error) { + type noMethod ComposeRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ComposeRequestSourceObjects struct { + // Generation: The generation of this object to use as the source. + Generation int64 `json:"generation,omitempty,string"` + + // Name: The source object's name. The source object's bucket is + // implicitly the destination bucket. + Name string `json:"name,omitempty"` + + // ObjectPreconditions: Conditions that must be met for this operation + // to execute. + ObjectPreconditions *ComposeRequestSourceObjectsObjectPreconditions `json:"objectPreconditions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Generation") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Generation") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ComposeRequestSourceObjects) MarshalJSON() ([]byte, error) { + type noMethod ComposeRequestSourceObjects + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ComposeRequestSourceObjectsObjectPreconditions: Conditions that must +// be met for this operation to execute. +type ComposeRequestSourceObjectsObjectPreconditions struct { + // IfGenerationMatch: Only perform the composition if the generation of + // the source object that would be used matches this value. If this + // value and a generation are both specified, they must be the same + // value or the call will fail. + IfGenerationMatch int64 `json:"ifGenerationMatch,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "IfGenerationMatch") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IfGenerationMatch") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, error) { + type noMethod ComposeRequestSourceObjectsObjectPreconditions + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Notification: A subscription to receive Google PubSub notifications. +type Notification struct { + // CustomAttributes: An optional list of additional attributes to attach + // to each Cloud PubSub message published for this notification + // subscription. + CustomAttributes map[string]string `json:"custom_attributes,omitempty"` + + // Etag: HTTP 1.1 Entity tag for this subscription notification. + Etag string `json:"etag,omitempty"` + + // EventTypes: If present, only send notifications about listed event + // types. If empty, sent notifications for all event types. + EventTypes []string `json:"event_types,omitempty"` + + // Id: The ID of the notification. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For notifications, this is always + // storage#notification. + Kind string `json:"kind,omitempty"` + + // ObjectNamePrefix: If present, only apply this notification + // configuration to object names that begin with this prefix. + ObjectNamePrefix string `json:"object_name_prefix,omitempty"` + + // PayloadFormat: The desired content of the Payload. + PayloadFormat string `json:"payload_format,omitempty"` + + // SelfLink: The canonical URL of this notification. + SelfLink string `json:"selfLink,omitempty"` + + // Topic: The Cloud PubSub topic to which this subscription publishes. + // Formatted as: + // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topi + // c}' + Topic string `json:"topic,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CustomAttributes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CustomAttributes") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Notification) MarshalJSON() ([]byte, error) { + type noMethod Notification + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Notifications: A list of notification subscriptions. +type Notifications struct { + // Items: The list of items. + Items []*Notification `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of notifications, this is + // always storage#notifications. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Notifications) MarshalJSON() ([]byte, error) { + type noMethod Notifications + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Object: An object. +type Object struct { + // Acl: Access controls on the object. + Acl []*ObjectAccessControl `json:"acl,omitempty"` + + // Bucket: The name of the bucket containing this object. + Bucket string `json:"bucket,omitempty"` + + // CacheControl: Cache-Control directive for the object data. If + // omitted, and the object is accessible to all anonymous users, the + // default will be public, max-age=3600. + CacheControl string `json:"cacheControl,omitempty"` + + // ComponentCount: Number of underlying components that make up this + // object. Components are accumulated by compose operations. + ComponentCount int64 `json:"componentCount,omitempty"` + + // ContentDisposition: Content-Disposition of the object data. + ContentDisposition string `json:"contentDisposition,omitempty"` + + // ContentEncoding: Content-Encoding of the object data. + ContentEncoding string `json:"contentEncoding,omitempty"` + + // ContentLanguage: Content-Language of the object data. + ContentLanguage string `json:"contentLanguage,omitempty"` + + // ContentType: Content-Type of the object data. If an object is stored + // without a Content-Type, it is served as application/octet-stream. + ContentType string `json:"contentType,omitempty"` + + // Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; + // encoded using base64 in big-endian byte order. For more information + // about using the CRC32c checksum, see Hashes and ETags: Best + // Practices. + Crc32c string `json:"crc32c,omitempty"` + + // CustomerEncryption: Metadata of customer-supplied encryption key, if + // the object is encrypted by such a key. + CustomerEncryption *ObjectCustomerEncryption `json:"customerEncryption,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the object. + Etag string `json:"etag,omitempty"` + + // Generation: The content generation of this object. Used for object + // versioning. + Generation int64 `json:"generation,omitempty,string"` + + // Id: The ID of the object, including the bucket name, object name, and + // generation number. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For objects, this is always + // storage#object. + Kind string `json:"kind,omitempty"` + + // KmsKeyName: Cloud KMS Key used to encrypt this object, if the object + // is encrypted by such a key. + KmsKeyName string `json:"kmsKeyName,omitempty"` + + // Md5Hash: MD5 hash of the data; encoded using base64. For more + // information about using the MD5 hash, see Hashes and ETags: Best + // Practices. + Md5Hash string `json:"md5Hash,omitempty"` + + // MediaLink: Media download link. + MediaLink string `json:"mediaLink,omitempty"` + + // Metadata: User-provided metadata, in key/value pairs. + Metadata map[string]string `json:"metadata,omitempty"` + + // Metageneration: The version of the metadata for this object at this + // generation. Used for preconditions and for detecting changes in + // metadata. A metageneration number is only meaningful in the context + // of a particular generation of a particular object. + Metageneration int64 `json:"metageneration,omitempty,string"` + + // Name: The name of the object. Required if not specified by URL + // parameter. + Name string `json:"name,omitempty"` + + // Owner: The owner of the object. This will always be the uploader of + // the object. + Owner *ObjectOwner `json:"owner,omitempty"` + + // SelfLink: The link to this object. + SelfLink string `json:"selfLink,omitempty"` + + // Size: Content-Length of the data in bytes. + Size uint64 `json:"size,omitempty,string"` + + // StorageClass: Storage class of the object. + StorageClass string `json:"storageClass,omitempty"` + + // TimeCreated: The creation time of the object in RFC 3339 format. + TimeCreated string `json:"timeCreated,omitempty"` + + // TimeDeleted: The deletion time of the object in RFC 3339 format. Will + // be returned if and only if this version of the object has been + // deleted. + TimeDeleted string `json:"timeDeleted,omitempty"` + + // TimeStorageClassUpdated: The time at which the object's storage class + // was last changed. When the object is initially created, it will be + // set to timeCreated. + TimeStorageClassUpdated string `json:"timeStorageClassUpdated,omitempty"` + + // Updated: The modification time of the object metadata in RFC 3339 + // format. + Updated string `json:"updated,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Acl") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Acl") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Object) MarshalJSON() ([]byte, error) { + type noMethod Object + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ObjectCustomerEncryption: Metadata of customer-supplied encryption +// key, if the object is encrypted by such a key. +type ObjectCustomerEncryption struct { + // EncryptionAlgorithm: The encryption algorithm. + EncryptionAlgorithm string `json:"encryptionAlgorithm,omitempty"` + + // KeySha256: SHA256 hash value of the encryption key. + KeySha256 string `json:"keySha256,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EncryptionAlgorithm") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EncryptionAlgorithm") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ObjectCustomerEncryption) MarshalJSON() ([]byte, error) { + type noMethod ObjectCustomerEncryption + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ObjectOwner: The owner of the object. This will always be the +// uploader of the object. +type ObjectOwner struct { + // Entity: The entity, in the form user-userId. + Entity string `json:"entity,omitempty"` + + // EntityId: The ID for the entity. + EntityId string `json:"entityId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Entity") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Entity") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ObjectOwner) MarshalJSON() ([]byte, error) { + type noMethod ObjectOwner + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ObjectAccessControl: An access-control entry. +type ObjectAccessControl struct { + // Bucket: The name of the bucket. + Bucket string `json:"bucket,omitempty"` + + // Domain: The domain associated with the entity, if any. + Domain string `json:"domain,omitempty"` + + // Email: The email address associated with the entity, if any. + Email string `json:"email,omitempty"` + + // Entity: The entity holding the permission, in one of the following + // forms: + // - user-userId + // - user-email + // - group-groupId + // - group-email + // - domain-domain + // - project-team-projectId + // - allUsers + // - allAuthenticatedUsers Examples: + // - The user liz@example.com would be user-liz@example.com. + // - The group example@googlegroups.com would be + // group-example@googlegroups.com. + // - To refer to all members of the Google Apps for Business domain + // example.com, the entity would be domain-example.com. + Entity string `json:"entity,omitempty"` + + // EntityId: The ID for the entity, if any. + EntityId string `json:"entityId,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the access-control entry. + Etag string `json:"etag,omitempty"` + + // Generation: The content generation of the object, if applied to an + // object. + Generation int64 `json:"generation,omitempty,string"` + + // Id: The ID of the access-control entry. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For object access control entries, + // this is always storage#objectAccessControl. + Kind string `json:"kind,omitempty"` + + // Object: The name of the object, if applied to an object. + Object string `json:"object,omitempty"` + + // ProjectTeam: The project team associated with the entity, if any. + ProjectTeam *ObjectAccessControlProjectTeam `json:"projectTeam,omitempty"` + + // Role: The access permission for the entity. + Role string `json:"role,omitempty"` + + // SelfLink: The link to this access-control entry. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Bucket") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Bucket") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ObjectAccessControl) MarshalJSON() ([]byte, error) { + type noMethod ObjectAccessControl + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ObjectAccessControlProjectTeam: The project team associated with the +// entity, if any. +type ObjectAccessControlProjectTeam struct { + // ProjectNumber: The project number. + ProjectNumber string `json:"projectNumber,omitempty"` + + // Team: The team. + Team string `json:"team,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ProjectNumber") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ProjectNumber") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ObjectAccessControlProjectTeam) MarshalJSON() ([]byte, error) { + type noMethod ObjectAccessControlProjectTeam + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ObjectAccessControls: An access-control list. +type ObjectAccessControls struct { + // Items: The list of items. + Items []*ObjectAccessControl `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of object access control + // entries, this is always storage#objectAccessControls. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ObjectAccessControls) MarshalJSON() ([]byte, error) { + type noMethod ObjectAccessControls + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Objects: A list of objects. +type Objects struct { + // Items: The list of items. + Items []*Object `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of objects, this is always + // storage#objects. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Prefixes: The list of prefixes of objects matching-but-not-listed up + // to and including the requested delimiter. + Prefixes []string `json:"prefixes,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Objects) MarshalJSON() ([]byte, error) { + type noMethod Objects + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Policy: A bucket/object IAM policy. +type Policy struct { + // Bindings: An association between a role, which comes with a set of + // permissions, and members who may assume that role. + Bindings []*PolicyBindings `json:"bindings,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the policy. + Etag string `json:"etag,omitempty"` + + // Kind: The kind of item this is. For policies, this is always + // storage#policy. This field is ignored on input. + Kind string `json:"kind,omitempty"` + + // ResourceId: The ID of the resource to which this policy belongs. Will + // be of the form projects/_/buckets/bucket for buckets, and + // projects/_/buckets/bucket/objects/object for objects. A specific + // generation may be specified by appending #generationNumber to the end + // of the object name, e.g. + // projects/_/buckets/my-bucket/objects/data.txt#17. The current + // generation can be denoted with #0. This field is ignored on input. + ResourceId string `json:"resourceId,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Bindings") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Bindings") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Policy) MarshalJSON() ([]byte, error) { + type noMethod Policy + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type PolicyBindings struct { + Condition interface{} `json:"condition,omitempty"` + + // Members: A collection of identifiers for members who may assume the + // provided role. Recognized identifiers are as follows: + // - allUsers — A special identifier that represents anyone on the + // internet; with or without a Google account. + // - allAuthenticatedUsers — A special identifier that represents + // anyone who is authenticated with a Google account or a service + // account. + // - user:emailid — An email address that represents a specific + // account. For example, user:alice@gmail.com or user:joe@example.com. + // + // - serviceAccount:emailid — An email address that represents a + // service account. For example, + // serviceAccount:my-other-app@appspot.gserviceaccount.com . + // - group:emailid — An email address that represents a Google group. + // For example, group:admins@example.com. + // - domain:domain — A Google Apps domain name that represents all the + // users of that domain. For example, domain:google.com or + // domain:example.com. + // - projectOwner:projectid — Owners of the given project. For + // example, projectOwner:my-example-project + // - projectEditor:projectid — Editors of the given project. For + // example, projectEditor:my-example-project + // - projectViewer:projectid — Viewers of the given project. For + // example, projectViewer:my-example-project + Members []string `json:"members,omitempty"` + + // Role: The role to which members belong. Two types of roles are + // supported: new IAM roles, which grant permissions that do not map + // directly to those provided by ACLs, and legacy IAM roles, which do + // map directly to ACL permissions. All roles are of the format + // roles/storage.specificRole. + // The new IAM roles are: + // - roles/storage.admin — Full control of Google Cloud Storage + // resources. + // - roles/storage.objectViewer — Read-Only access to Google Cloud + // Storage objects. + // - roles/storage.objectCreator — Access to create objects in Google + // Cloud Storage. + // - roles/storage.objectAdmin — Full control of Google Cloud Storage + // objects. The legacy IAM roles are: + // - roles/storage.legacyObjectReader — Read-only access to objects + // without listing. Equivalent to an ACL entry on an object with the + // READER role. + // - roles/storage.legacyObjectOwner — Read/write access to existing + // objects without listing. Equivalent to an ACL entry on an object with + // the OWNER role. + // - roles/storage.legacyBucketReader — Read access to buckets with + // object listing. Equivalent to an ACL entry on a bucket with the + // READER role. + // - roles/storage.legacyBucketWriter — Read access to buckets with + // object listing/creation/deletion. Equivalent to an ACL entry on a + // bucket with the WRITER role. + // - roles/storage.legacyBucketOwner — Read and write access to + // existing buckets with object listing/creation/deletion. Equivalent to + // an ACL entry on a bucket with the OWNER role. + Role string `json:"role,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Condition") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Condition") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PolicyBindings) MarshalJSON() ([]byte, error) { + type noMethod PolicyBindings + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RewriteResponse: A rewrite response. +type RewriteResponse struct { + // Done: true if the copy is finished; otherwise, false if the copy is + // in progress. This property is always present in the response. + Done bool `json:"done,omitempty"` + + // Kind: The kind of item this is. + Kind string `json:"kind,omitempty"` + + // ObjectSize: The total size of the object being copied in bytes. This + // property is always present in the response. + ObjectSize int64 `json:"objectSize,omitempty,string"` + + // Resource: A resource containing the metadata for the copied-to + // object. This property is present in the response only when copying + // completes. + Resource *Object `json:"resource,omitempty"` + + // RewriteToken: A token to use in subsequent requests to continue + // copying data. This token is present in the response only when there + // is more data to copy. + RewriteToken string `json:"rewriteToken,omitempty"` + + // TotalBytesRewritten: The total bytes written so far, which can be + // used to provide a waiting user with a progress indicator. This + // property is always present in the response. + TotalBytesRewritten int64 `json:"totalBytesRewritten,omitempty,string"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Done") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Done") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RewriteResponse) MarshalJSON() ([]byte, error) { + type noMethod RewriteResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ServiceAccount: A subscription to receive Google PubSub +// notifications. +type ServiceAccount struct { + // EmailAddress: The ID of the notification. + EmailAddress string `json:"email_address,omitempty"` + + // Kind: The kind of item this is. For notifications, this is always + // storage#notification. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "EmailAddress") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EmailAddress") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ServiceAccount) MarshalJSON() ([]byte, error) { + type noMethod ServiceAccount + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TestIamPermissionsResponse: A +// storage.(buckets|objects).testIamPermissions response. +type TestIamPermissionsResponse struct { + // Kind: The kind of item this is. + Kind string `json:"kind,omitempty"` + + // Permissions: The permissions held by the caller. Permissions are + // always of the format storage.resource.capability, where resource is + // one of buckets or objects. The supported permissions are as follows: + // + // - storage.buckets.delete — Delete bucket. + // - storage.buckets.get — Read bucket metadata. + // - storage.buckets.getIamPolicy — Read bucket IAM policy. + // - storage.buckets.create — Create bucket. + // - storage.buckets.list — List buckets. + // - storage.buckets.setIamPolicy — Update bucket IAM policy. + // - storage.buckets.update — Update bucket metadata. + // - storage.objects.delete — Delete object. + // - storage.objects.get — Read object data and metadata. + // - storage.objects.getIamPolicy — Read object IAM policy. + // - storage.objects.create — Create object. + // - storage.objects.list — List objects. + // - storage.objects.setIamPolicy — Update object IAM policy. + // - storage.objects.update — Update object metadata. + Permissions []string `json:"permissions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { + type noMethod TestIamPermissionsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "storage.bucketAccessControls.delete": + +type BucketAccessControlsDeleteCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Permanently deletes the ACL entry for the specified entity on +// the specified bucket. +func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall { + c := &BucketAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsDeleteCall) UserProject(userProject string) *BucketAccessControlsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAccessControlsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAccessControlsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketAccessControlsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.bucketAccessControls.delete" call. +func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "DELETE", + // "id": "storage.bucketAccessControls.delete", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl/{entity}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.get": + +type BucketAccessControlsGetCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the ACL entry for the specified entity on the specified +// bucket. +func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall { + c := &BucketAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsGetCall) UserProject(userProject string) *BucketAccessControlsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccessControlsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketAccessControlsGetCall) IfNoneMatch(entityTag string) *BucketAccessControlsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccessControlsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketAccessControlsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.bucketAccessControls.get" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.bucketAccessControls.get", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl/{entity}", + // "response": { + // "$ref": "BucketAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.insert": + +type BucketAccessControlsInsertCall struct { + s *Service + bucket string + bucketaccesscontrol *BucketAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new ACL entry on the specified bucket. +func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsInsertCall { + c := &BucketAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.bucketaccesscontrol = bucketaccesscontrol + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsInsertCall) UserProject(userProject string) *BucketAccessControlsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAccessControlsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsInsertCall) Context(ctx context.Context) *BucketAccessControlsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketAccessControlsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.bucketAccessControls.insert" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new ACL entry on the specified bucket.", + // "httpMethod": "POST", + // "id": "storage.bucketAccessControls.insert", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl", + // "request": { + // "$ref": "BucketAccessControl" + // }, + // "response": { + // "$ref": "BucketAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.list": + +type BucketAccessControlsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves ACL entries on the specified bucket. +func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsListCall { + c := &BucketAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsListCall) UserProject(userProject string) *BucketAccessControlsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAccessControlsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketAccessControlsListCall) IfNoneMatch(entityTag string) *BucketAccessControlsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAccessControlsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketAccessControlsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.bucketAccessControls.list" call. +// Exactly one of *BucketAccessControls or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControls.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*BucketAccessControls, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControls{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves ACL entries on the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.bucketAccessControls.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl", + // "response": { + // "$ref": "BucketAccessControls" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.patch": + +type BucketAccessControlsPatchCall struct { + s *Service + bucket string + entity string + bucketaccesscontrol *BucketAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates an ACL entry on the specified bucket. This method +// supports patch semantics. +func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall { + c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + c.bucketaccesscontrol = bucketaccesscontrol + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsPatchCall) UserProject(userProject string) *BucketAccessControlsPatchCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAccessControlsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsPatchCall) Context(ctx context.Context) *BucketAccessControlsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketAccessControlsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.bucketAccessControls.patch" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an ACL entry on the specified bucket. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "storage.bucketAccessControls.patch", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl/{entity}", + // "request": { + // "$ref": "BucketAccessControl" + // }, + // "response": { + // "$ref": "BucketAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.update": + +type BucketAccessControlsUpdateCall struct { + s *Service + bucket string + entity string + bucketaccesscontrol *BucketAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an ACL entry on the specified bucket. +func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall { + c := &BucketAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + c.bucketaccesscontrol = bucketaccesscontrol + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsUpdateCall) UserProject(userProject string) *BucketAccessControlsUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAccessControlsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsUpdateCall) Context(ctx context.Context) *BucketAccessControlsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketAccessControlsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.bucketAccessControls.update" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an ACL entry on the specified bucket.", + // "httpMethod": "PUT", + // "id": "storage.bucketAccessControls.update", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl/{entity}", + // "request": { + // "$ref": "BucketAccessControl" + // }, + // "response": { + // "$ref": "BucketAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.buckets.delete": + +type BucketsDeleteCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Permanently deletes an empty bucket. +func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall { + c := &BucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": If set, only deletes the bucket if its +// metageneration matches this value. +func (c *BucketsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsDeleteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": If set, only deletes the bucket if its +// metageneration does not match this value. +func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsDeleteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketsDeleteCall) UserProject(userProject string) *BucketsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.delete" call. +func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes an empty bucket.", + // "httpMethod": "DELETE", + // "id": "storage.buckets.delete", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "If set, only deletes the bucket if its metageneration matches this value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "If set, only deletes the bucket if its metageneration does not match this value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.get": + +type BucketsGetCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns metadata for the specified bucket. +func (r *BucketsService) Get(bucket string) *BucketsGetCall { + c := &BucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration matches +// the given value. +func (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsGetCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not +// match the given value. +func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsGetCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketsGetCall) UserProject(userProject string) *BucketsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketsGetCall) IfNoneMatch(entityTag string) *BucketsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.get" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns metadata for the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.buckets.get", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.getIamPolicy": + +type BucketsGetIamPolicyCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Returns an IAM policy for the specified bucket. +func (r *BucketsService) GetIamPolicy(bucket string) *BucketsGetIamPolicyCall { + c := &BucketsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketsGetIamPolicyCall) UserProject(userProject string) *BucketsGetIamPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsGetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketsGetIamPolicyCall) IfNoneMatch(entityTag string) *BucketsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsGetIamPolicyCall) Context(ctx context.Context) *BucketsGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns an IAM policy for the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.buckets.getIamPolicy", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/iam", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.insert": + +type BucketsInsertCall struct { + s *Service + bucket *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new bucket. +func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsertCall { + c := &BucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.urlParams_.Set("project", projectid) + c.bucket = bucket + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Project team owners get OWNER access, and +// allAuthenticatedUsers get READER access. +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// their roles. +// "publicRead" - Project team owners get OWNER access, and allUsers +// get READER access. +// "publicReadWrite" - Project team owners get OWNER access, and +// allUsers get WRITER access. +func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default +// object access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall { + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the bucket resource +// specifies acl or defaultObjectAcl properties, when it defaults to +// full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. +func (c *BucketsInsertCall) UserProject(userProject string) *BucketsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsInsertCall) Context(ctx context.Context) *BucketsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.insert" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new bucket.", + // "httpMethod": "POST", + // "id": "storage.buckets.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "private", + // "projectPrivate", + // "publicRead", + // "publicReadWrite" + // ], + // "enumDescriptions": [ + // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + // "Project team owners get OWNER access.", + // "Project team members get access according to their roles.", + // "Project team owners get OWNER access, and allUsers get READER access.", + // "Project team owners get OWNER access, and allUsers get WRITER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "predefinedDefaultObjectAcl": { + // "description": "Apply a predefined set of default object access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "A valid API project identifier.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b", + // "request": { + // "$ref": "Bucket" + // }, + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.list": + +type BucketsListCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of buckets for a given project. +func (r *BucketsService) List(projectid string) *BucketsListCall { + c := &BucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.urlParams_.Set("project", projectid) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of buckets to return in a single response. The service will use this +// parameter or 1,000 items, whichever is smaller. +func (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *BucketsListCall) PageToken(pageToken string) *BucketsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to +// buckets whose names begin with this prefix. +func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsListCall) Projection(projection string) *BucketsListCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. +func (c *BucketsListCall) UserProject(userProject string) *BucketsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketsListCall) IfNoneMatch(entityTag string) *BucketsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.list" call. +// Exactly one of *Buckets or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Buckets.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Buckets{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of buckets for a given project.", + // "httpMethod": "GET", + // "id": "storage.buckets.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "maxResults": { + // "default": "1000", + // "description": "Maximum number of buckets to return in a single response. The service will use this parameter or 1,000 items, whichever is smaller.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "prefix": { + // "description": "Filter results to buckets whose names begin with this prefix.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "A valid API project identifier.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b", + // "response": { + // "$ref": "Buckets" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *BucketsListCall) Pages(ctx context.Context, f func(*Buckets) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "storage.buckets.patch": + +type BucketsPatchCall struct { + s *Service + bucket string + bucket2 *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a bucket. Changes to the bucket will be readable +// immediately after writing, but configuration changes may take time to +// propagate. This method supports patch semantics. +func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall { + c := &BucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.bucket2 = bucket2 + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration matches +// the given value. +func (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsPatchCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not +// match the given value. +func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsPatchCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Project team owners get OWNER access, and +// allAuthenticatedUsers get READER access. +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// their roles. +// "publicRead" - Project team owners get OWNER access, and allUsers +// get READER access. +// "publicReadWrite" - Project team owners get OWNER access, and +// allUsers get WRITER access. +func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default +// object access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall { + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketsPatchCall) UserProject(userProject string) *BucketsPatchCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsPatchCall) Context(ctx context.Context) *BucketsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.patch" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "storage.buckets.patch", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "private", + // "projectPrivate", + // "publicRead", + // "publicReadWrite" + // ], + // "enumDescriptions": [ + // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + // "Project team owners get OWNER access.", + // "Project team members get access according to their roles.", + // "Project team owners get OWNER access, and allUsers get READER access.", + // "Project team owners get OWNER access, and allUsers get WRITER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "predefinedDefaultObjectAcl": { + // "description": "Apply a predefined set of default object access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "request": { + // "$ref": "Bucket" + // }, + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.buckets.setIamPolicy": + +type BucketsSetIamPolicyCall struct { + s *Service + bucket string + policy *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Updates an IAM policy for the specified bucket. +func (r *BucketsService) SetIamPolicy(bucket string, policy *Policy) *BucketsSetIamPolicyCall { + c := &BucketsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.policy = policy + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketsSetIamPolicyCall) UserProject(userProject string) *BucketsSetIamPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsSetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsSetIamPolicyCall) Context(ctx context.Context) *BucketsSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an IAM policy for the specified bucket.", + // "httpMethod": "PUT", + // "id": "storage.buckets.setIamPolicy", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/iam", + // "request": { + // "$ref": "Policy" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.testIamPermissions": + +type BucketsTestIamPermissionsCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Tests a set of permissions on the given bucket to +// see which, if any, are held by the caller. +func (r *BucketsService) TestIamPermissions(bucket string, permissions []string) *BucketsTestIamPermissionsCall { + c := &BucketsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketsTestIamPermissionsCall) UserProject(userProject string) *BucketsTestIamPermissionsCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsTestIamPermissionsCall) Fields(s ...googleapi.Field) *BucketsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketsTestIamPermissionsCall) IfNoneMatch(entityTag string) *BucketsTestIamPermissionsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsTestIamPermissionsCall) Context(ctx context.Context) *BucketsTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam/testPermissions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.testIamPermissions" call. +// Exactly one of *TestIamPermissionsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestIamPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Tests a set of permissions on the given bucket to see which, if any, are held by the caller.", + // "httpMethod": "GET", + // "id": "storage.buckets.testIamPermissions", + // "parameterOrder": [ + // "bucket", + // "permissions" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "permissions": { + // "description": "Permissions to test.", + // "location": "query", + // "repeated": true, + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/iam/testPermissions", + // "response": { + // "$ref": "TestIamPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.update": + +type BucketsUpdateCall struct { + s *Service + bucket string + bucket2 *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates a bucket. Changes to the bucket will be readable +// immediately after writing, but configuration changes may take time to +// propagate. +func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall { + c := &BucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.bucket2 = bucket2 + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration matches +// the given value. +func (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsUpdateCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not +// match the given value. +func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsUpdateCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Project team owners get OWNER access, and +// allAuthenticatedUsers get READER access. +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// their roles. +// "publicRead" - Project team owners get OWNER access, and allUsers +// get READER access. +// "publicReadWrite" - Project team owners get OWNER access, and +// allUsers get WRITER access. +func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default +// object access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall { + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketsUpdateCall) UserProject(userProject string) *BucketsUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsUpdateCall) Fields(s ...googleapi.Field) *BucketsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsUpdateCall) Context(ctx context.Context) *BucketsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.update" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", + // "httpMethod": "PUT", + // "id": "storage.buckets.update", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "private", + // "projectPrivate", + // "publicRead", + // "publicReadWrite" + // ], + // "enumDescriptions": [ + // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + // "Project team owners get OWNER access.", + // "Project team members get access according to their roles.", + // "Project team owners get OWNER access, and allUsers get READER access.", + // "Project team owners get OWNER access, and allUsers get WRITER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "predefinedDefaultObjectAcl": { + // "description": "Apply a predefined set of default object access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "request": { + // "$ref": "Bucket" + // }, + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.channels.stop": + +type ChannelsStopCall struct { + s *Service + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Stop: Stop watching resources through this channel +func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall { + c := &ChannelsStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.channel = channel + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ChannelsStopCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.channels.stop" call. +func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Stop watching resources through this channel", + // "httpMethod": "POST", + // "id": "storage.channels.stop", + // "path": "channels/stop", + // "request": { + // "$ref": "Channel", + // "parameterName": "resource" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.delete": + +type DefaultObjectAccessControlsDeleteCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Permanently deletes the default object ACL entry for the +// specified entity on the specified bucket. +func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall { + c := &DefaultObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *DefaultObjectAccessControlsDeleteCall) UserProject(userProject string) *DefaultObjectAccessControlsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsDeleteCall) Context(ctx context.Context) *DefaultObjectAccessControlsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.defaultObjectAccessControls.delete" call. +func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "DELETE", + // "id": "storage.defaultObjectAccessControls.delete", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.get": + +type DefaultObjectAccessControlsGetCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the default object ACL entry for the specified entity on +// the specified bucket. +func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall { + c := &DefaultObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *DefaultObjectAccessControlsGetCall) UserProject(userProject string) *DefaultObjectAccessControlsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DefaultObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsGetCall) Context(ctx context.Context) *DefaultObjectAccessControlsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DefaultObjectAccessControlsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.defaultObjectAccessControls.get" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.defaultObjectAccessControls.get", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.insert": + +type DefaultObjectAccessControlsInsertCall struct { + s *Service + bucket string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new default object ACL entry on the specified +// bucket. +func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsInsertCall { + c := &DefaultObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *DefaultObjectAccessControlsInsertCall) UserProject(userProject string) *DefaultObjectAccessControlsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsInsertCall) Context(ctx context.Context) *DefaultObjectAccessControlsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.defaultObjectAccessControls.insert" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new default object ACL entry on the specified bucket.", + // "httpMethod": "POST", + // "id": "storage.defaultObjectAccessControls.insert", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.list": + +type DefaultObjectAccessControlsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves default object ACL entries on the specified bucket. +func (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectAccessControlsListCall { + c := &DefaultObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": If present, only return default ACL listing +// if the bucket's current metageneration matches this value. +func (c *DefaultObjectAccessControlsListCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": If present, only return default ACL +// listing if the bucket's current metageneration does not match the +// given value. +func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *DefaultObjectAccessControlsListCall) UserProject(userProject string) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsListCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DefaultObjectAccessControlsListCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsListCall) Context(ctx context.Context) *DefaultObjectAccessControlsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DefaultObjectAccessControlsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.defaultObjectAccessControls.list" call. +// Exactly one of *ObjectAccessControls or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControls.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControls{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves default object ACL entries on the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.defaultObjectAccessControls.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl", + // "response": { + // "$ref": "ObjectAccessControls" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.patch": + +type DefaultObjectAccessControlsPatchCall struct { + s *Service + bucket string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a default object ACL entry on the specified bucket. +// This method supports patch semantics. +func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall { + c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *DefaultObjectAccessControlsPatchCall) UserProject(userProject string) *DefaultObjectAccessControlsPatchCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsPatchCall) Context(ctx context.Context) *DefaultObjectAccessControlsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.defaultObjectAccessControls.patch" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a default object ACL entry on the specified bucket. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "storage.defaultObjectAccessControls.patch", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.update": + +type DefaultObjectAccessControlsUpdateCall struct { + s *Service + bucket string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates a default object ACL entry on the specified bucket. +func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall { + c := &DefaultObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *DefaultObjectAccessControlsUpdateCall) UserProject(userProject string) *DefaultObjectAccessControlsUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsUpdateCall) Context(ctx context.Context) *DefaultObjectAccessControlsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.defaultObjectAccessControls.update" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a default object ACL entry on the specified bucket.", + // "httpMethod": "PUT", + // "id": "storage.defaultObjectAccessControls.update", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.notifications.delete": + +type NotificationsDeleteCall struct { + s *Service + bucket string + notification string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Permanently deletes a notification subscription. +func (r *NotificationsService) Delete(bucket string, notification string) *NotificationsDeleteCall { + c := &NotificationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.notification = notification + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *NotificationsDeleteCall) UserProject(userProject string) *NotificationsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NotificationsDeleteCall) Fields(s ...googleapi.Field) *NotificationsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NotificationsDeleteCall) Context(ctx context.Context) *NotificationsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NotificationsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "notification": c.notification, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.notifications.delete" call. +func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes a notification subscription.", + // "httpMethod": "DELETE", + // "id": "storage.notifications.delete", + // "parameterOrder": [ + // "bucket", + // "notification" + // ], + // "parameters": { + // "bucket": { + // "description": "The parent bucket of the notification.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "notification": { + // "description": "ID of the notification to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/notificationConfigs/{notification}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.notifications.get": + +type NotificationsGetCall struct { + s *Service + bucket string + notification string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: View a notification configuration. +func (r *NotificationsService) Get(bucket string, notification string) *NotificationsGetCall { + c := &NotificationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.notification = notification + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *NotificationsGetCall) UserProject(userProject string) *NotificationsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NotificationsGetCall) Fields(s ...googleapi.Field) *NotificationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NotificationsGetCall) IfNoneMatch(entityTag string) *NotificationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NotificationsGetCall) Context(ctx context.Context) *NotificationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NotificationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "notification": c.notification, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.notifications.get" call. +// Exactly one of *Notification or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Notification.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Notification{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "View a notification configuration.", + // "httpMethod": "GET", + // "id": "storage.notifications.get", + // "parameterOrder": [ + // "bucket", + // "notification" + // ], + // "parameters": { + // "bucket": { + // "description": "The parent bucket of the notification.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "notification": { + // "description": "Notification ID", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/notificationConfigs/{notification}", + // "response": { + // "$ref": "Notification" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.notifications.insert": + +type NotificationsInsertCall struct { + s *Service + bucket string + notification *Notification + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a notification subscription for a given bucket. +func (r *NotificationsService) Insert(bucket string, notification *Notification) *NotificationsInsertCall { + c := &NotificationsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.notification = notification + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *NotificationsInsertCall) UserProject(userProject string) *NotificationsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NotificationsInsertCall) Fields(s ...googleapi.Field) *NotificationsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NotificationsInsertCall) Context(ctx context.Context) *NotificationsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NotificationsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.notification) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.notifications.insert" call. +// Exactly one of *Notification or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Notification.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notification, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Notification{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a notification subscription for a given bucket.", + // "httpMethod": "POST", + // "id": "storage.notifications.insert", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "The parent bucket of the notification.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/notificationConfigs", + // "request": { + // "$ref": "Notification" + // }, + // "response": { + // "$ref": "Notification" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.notifications.list": + +type NotificationsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of notification subscriptions for a given +// bucket. +func (r *NotificationsService) List(bucket string) *NotificationsListCall { + c := &NotificationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *NotificationsListCall) UserProject(userProject string) *NotificationsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NotificationsListCall) Fields(s ...googleapi.Field) *NotificationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NotificationsListCall) IfNoneMatch(entityTag string) *NotificationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NotificationsListCall) Context(ctx context.Context) *NotificationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NotificationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.notifications.list" call. +// Exactly one of *Notifications or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Notifications.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Notifications{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of notification subscriptions for a given bucket.", + // "httpMethod": "GET", + // "id": "storage.notifications.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a Google Cloud Storage bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/notificationConfigs", + // "response": { + // "$ref": "Notifications" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objectAccessControls.delete": + +type ObjectAccessControlsDeleteCall struct { + s *Service + bucket string + object string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Permanently deletes the ACL entry for the specified entity on +// the specified object. +func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { + c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAccessControlsDeleteCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsDeleteCall) UserProject(userProject string) *ObjectAccessControlsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAccessControlsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAccessControlsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.delete" call. +func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", + // "httpMethod": "DELETE", + // "id": "storage.objectAccessControls.delete", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.get": + +type ObjectAccessControlsGetCall struct { + s *Service + bucket string + object string + entity string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the ACL entry for the specified entity on the specified +// object. +func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { + c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccessControlsGetCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsGetCall) UserProject(userProject string) *ObjectAccessControlsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccessControlsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *ObjectAccessControlsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccessControlsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.get" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the ACL entry for the specified entity on the specified object.", + // "httpMethod": "GET", + // "id": "storage.objectAccessControls.get", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.insert": + +type ObjectAccessControlsInsertCall struct { + s *Service + bucket string + object string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new ACL entry on the specified object. +func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { + c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAccessControlsInsertCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsInsertCall) UserProject(userProject string) *ObjectAccessControlsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAccessControlsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsInsertCall) Context(ctx context.Context) *ObjectAccessControlsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.insert" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new ACL entry on the specified object.", + // "httpMethod": "POST", + // "id": "storage.objectAccessControls.insert", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.list": + +type ObjectAccessControlsListCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves ACL entries on the specified object. +func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { + c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAccessControlsListCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsListCall) UserProject(userProject string) *ObjectAccessControlsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAccessControlsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectAccessControlsListCall) IfNoneMatch(entityTag string) *ObjectAccessControlsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAccessControlsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.list" call. +// Exactly one of *ObjectAccessControls or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControls.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControls{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves ACL entries on the specified object.", + // "httpMethod": "GET", + // "id": "storage.objectAccessControls.list", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl", + // "response": { + // "$ref": "ObjectAccessControls" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.patch": + +type ObjectAccessControlsPatchCall struct { + s *Service + bucket string + object string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates an ACL entry on the specified object. This method +// supports patch semantics. +func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { + c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAccessControlsPatchCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsPatchCall) UserProject(userProject string) *ObjectAccessControlsPatchCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAccessControlsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsPatchCall) Context(ctx context.Context) *ObjectAccessControlsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.patch" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an ACL entry on the specified object. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "storage.objectAccessControls.patch", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.update": + +type ObjectAccessControlsUpdateCall struct { + s *Service + bucket string + object string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an ACL entry on the specified object. +func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { + c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAccessControlsUpdateCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsUpdateCall) UserProject(userProject string) *ObjectAccessControlsUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *ObjectAccessControlsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsUpdateCall) Context(ctx context.Context) *ObjectAccessControlsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.update" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an ACL entry on the specified object.", + // "httpMethod": "PUT", + // "id": "storage.objectAccessControls.update", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objects.compose": + +type ObjectsComposeCall struct { + s *Service + destinationBucket string + destinationObject string + composerequest *ComposeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Compose: Concatenates a list of existing objects into a new object in +// the same bucket. +func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { + c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.composerequest = composerequest + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls +// to the destination object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall { + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsComposeCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsComposeCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsComposeCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of +// the Cloud KMS key, of the form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, +// that will be used to encrypt the object. Overrides the object +// metadata's kms_key_name value, if any. +func (c *ObjectsComposeCall) KmsKeyName(kmsKeyName string) *ObjectsComposeCall { + c.urlParams_.Set("kmsKeyName", kmsKeyName) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsComposeCall) UserProject(userProject string) *ObjectsComposeCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsComposeCall) Fields(s ...googleapi.Field) *ObjectsComposeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. +func (c *ObjectsComposeCall) Context(ctx context.Context) *ObjectsComposeCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsComposeCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *ObjectsComposeCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +// Do executes the "storage.objects.compose" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Concatenates a list of existing objects into a new object in the same bucket.", + // "httpMethod": "POST", + // "id": "storage.objects.compose", + // "parameterOrder": [ + // "destinationBucket", + // "destinationObject" + // ], + // "parameters": { + // "destinationBucket": { + // "description": "Name of the bucket in which to store the new object.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationObject": { + // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationPredefinedAcl": { + // "description": "Apply a predefined set of access controls to the destination object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "kmsKeyName": { + // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{destinationBucket}/o/{destinationObject}/compose", + // "request": { + // "$ref": "ComposeRequest" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaDownload": true, + // "useMediaDownloadService": true + // } + +} + +// method id "storage.objects.copy": + +type ObjectsCopyCall struct { + s *Service + sourceBucket string + sourceObject string + destinationBucket string + destinationObject string + object *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Copy: Copies a source object to a destination object. Optionally +// overrides metadata. +func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { + c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sourceBucket = sourceBucket + c.sourceObject = sourceObject + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.object = object + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls +// to the destination object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall { + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the destination object's +// current generation matches the given value. Setting to 0 makes the +// operation succeed only if there are no live versions of the object. +func (c *ObjectsCopyCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the destination object's current generation does not match the given +// value. If no live object exists, the precondition fails. Setting to 0 +// makes the operation succeed only if there is a live version of the +// object. +func (c *ObjectsCopyCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the destination object's current metageneration matches the given +// value. +func (c *ObjectsCopyCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the destination object's current metageneration does not +// match the given value. +func (c *ObjectsCopyCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// IfSourceGenerationMatch sets the optional parameter +// "ifSourceGenerationMatch": Makes the operation conditional on whether +// the source object's current generation matches the given value. +func (c *ObjectsCopyCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) + return c +} + +// IfSourceGenerationNotMatch sets the optional parameter +// "ifSourceGenerationNotMatch": Makes the operation conditional on +// whether the source object's current generation does not match the +// given value. +func (c *ObjectsCopyCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) + return c +} + +// IfSourceMetagenerationMatch sets the optional parameter +// "ifSourceMetagenerationMatch": Makes the operation conditional on +// whether the source object's current metageneration matches the given +// value. +func (c *ObjectsCopyCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) + return c +} + +// IfSourceMetagenerationNotMatch sets the optional parameter +// "ifSourceMetagenerationNotMatch": Makes the operation conditional on +// whether the source object's current metageneration does not match the +// given value. +func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the object resource +// specifies the acl property, when it defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall { + c.urlParams_.Set("projection", projection) + return c +} + +// SourceGeneration sets the optional parameter "sourceGeneration": If +// present, selects a specific revision of the source object (as opposed +// to the latest version, the default). +func (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyCall { + c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsCopyCall) UserProject(userProject string) *ObjectsCopyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsCopyCall) Fields(s ...googleapi.Field) *ObjectsCopyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. +func (c *ObjectsCopyCall) Context(ctx context.Context) *ObjectsCopyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsCopyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "sourceBucket": c.sourceBucket, + "sourceObject": c.sourceObject, + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *ObjectsCopyCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +// Do executes the "storage.objects.copy" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Copies a source object to a destination object. Optionally overrides metadata.", + // "httpMethod": "POST", + // "id": "storage.objects.copy", + // "parameterOrder": [ + // "sourceBucket", + // "sourceObject", + // "destinationBucket", + // "destinationObject" + // ], + // "parameters": { + // "destinationBucket": { + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationObject": { + // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationPredefinedAcl": { + // "description": "Apply a predefined set of access controls to the destination object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "sourceBucket": { + // "description": "Name of the bucket in which to find the source object.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sourceGeneration": { + // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "sourceObject": { + // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaDownload": true, + // "useMediaDownloadService": true + // } + +} + +// method id "storage.objects.delete": + +type ObjectsDeleteCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes an object and its metadata. Deletions are permanent +// if versioning is not enabled for the bucket, or if the generation +// parameter is used. +func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { + c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// permanently deletes a specific revision of this object (as opposed to +// the latest version, the default). +func (c *ObjectsDeleteCall) Generation(generation int64) *ObjectsDeleteCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsDeleteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsDeleteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsDeleteCall) UserProject(userProject string) *ObjectsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.delete" call. +func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", + // "httpMethod": "DELETE", + // "id": "storage.objects.delete", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.get": + +type ObjectsGetCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves an object or its metadata. +func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { + c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsGetCall) Generation(generation int64) *ObjectsGetCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsGetCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsGetCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsGetCall) UserProject(userProject string) *ObjectsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectsGetCall) IfNoneMatch(entityTag string) *ObjectsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. +func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +// Do executes the "storage.objects.get" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an object or its metadata.", + // "httpMethod": "GET", + // "id": "storage.objects.get", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaDownload": true, + // "useMediaDownloadService": true + // } + +} + +// method id "storage.objects.getIamPolicy": + +type ObjectsGetIamPolicyCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Returns an IAM policy for the specified object. +func (r *ObjectsService) GetIamPolicy(bucket string, object string) *ObjectsGetIamPolicyCall { + c := &ObjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsGetIamPolicyCall) Generation(generation int64) *ObjectsGetIamPolicyCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsGetIamPolicyCall) UserProject(userProject string) *ObjectsGetIamPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsGetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectsGetIamPolicyCall) IfNoneMatch(entityTag string) *ObjectsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsGetIamPolicyCall) Context(ctx context.Context) *ObjectsGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns an IAM policy for the specified object.", + // "httpMethod": "GET", + // "id": "storage.objects.getIamPolicy", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/iam", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.insert": + +type ObjectsInsertCall struct { + s *Service + bucket string + object *Object + urlParams_ gensupport.URLParams + mediaInfo_ *gensupport.MediaInfo + ctx_ context.Context + header_ http.Header +} + +// Insert: Stores a new object and metadata. +func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall { + c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// ContentEncoding sets the optional parameter "contentEncoding": If +// set, sets the contentEncoding property of the final object to this +// value. Setting this parameter is equivalent to setting the +// contentEncoding metadata property. This can be useful when uploading +// an object with uploadType=media to indicate the encoding of the +// content being uploaded. +func (c *ObjectsInsertCall) ContentEncoding(contentEncoding string) *ObjectsInsertCall { + c.urlParams_.Set("contentEncoding", contentEncoding) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsInsertCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsInsertCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsInsertCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of +// the Cloud KMS key, of the form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, +// that will be used to encrypt the object. Overrides the object +// metadata's kms_key_name value, if any. +func (c *ObjectsInsertCall) KmsKeyName(kmsKeyName string) *ObjectsInsertCall { + c.urlParams_.Set("kmsKeyName", kmsKeyName) + return c +} + +// Name sets the optional parameter "name": Name of the object. Required +// when the object metadata is not otherwise provided. Overrides the +// object metadata's name value, if any. For information about how to +// URL encode object names to be path safe, see Encoding URI Path Parts. +func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { + c.urlParams_.Set("name", name) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the object resource +// specifies the acl property, when it defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsInsertCall) UserProject(userProject string) *ObjectsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Media specifies the media to upload in one or more chunks. The chunk +// size may be controlled by supplying a MediaOption generated by +// googleapi.ChunkSize. The chunk size defaults to +// googleapi.DefaultUploadChunkSize.The Content-Type header used in the +// upload request will be determined by sniffing the contents of r, +// unless a MediaOption generated by googleapi.ContentType is +// supplied. +// At most one of Media and ResumableMedia may be set. +func (c *ObjectsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *ObjectsInsertCall { + if ct := c.object.ContentType; ct != "" { + options = append([]googleapi.MediaOption{googleapi.ContentType(ct)}, options...) + } + c.mediaInfo_ = gensupport.NewInfoFromMedia(r, options) + return c +} + +// ResumableMedia specifies the media to upload in chunks and can be +// canceled with ctx. +// +// Deprecated: use Media instead. +// +// At most one of Media and ResumableMedia may be set. mediaType +// identifies the MIME media type of the upload, such as "image/png". If +// mediaType is "", it will be auto-detected. The provided ctx will +// supersede any context previously provided to the Context method. +func (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *ObjectsInsertCall { + c.ctx_ = ctx + c.mediaInfo_ = gensupport.NewInfoFromResumableMedia(r, size, mediaType) + return c +} + +// ProgressUpdater provides a callback function that will be called +// after every chunk. It should be a low-latency function in order to +// not slow down the upload operation. This should only be called when +// using ResumableMedia (as opposed to Media). +func (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *ObjectsInsertCall { + c.mediaInfo_.SetProgressUpdater(pu) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +// This context will supersede any context previously provided to the +// ResumableMedia method. +func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") + if c.mediaInfo_ != nil { + urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1) + c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType()) + } + if body == nil { + body = new(bytes.Buffer) + reqHeaders.Set("Content-Type", "application/json") + } + body, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body) + defer cleanup() + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.insert" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + rx := c.mediaInfo_.ResumableUpload(res.Header.Get("Location")) + if rx != nil { + rx.Client = c.s.client + rx.UserAgent = c.s.userAgent() + ctx := c.ctx_ + if ctx == nil { + ctx = context.TODO() + } + res, err = rx.Upload(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Stores a new object and metadata.", + // "httpMethod": "POST", + // "id": "storage.objects.insert", + // "mediaUpload": { + // "accept": [ + // "*/*" + // ], + // "protocols": { + // "resumable": { + // "multipart": true, + // "path": "/resumable/upload/storage/v1/b/{bucket}/o" + // }, + // "simple": { + // "multipart": true, + // "path": "/upload/storage/v1/b/{bucket}/o" + // } + // } + // }, + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "contentEncoding": { + // "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "kmsKeyName": { + // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "query", + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaDownload": true, + // "supportsMediaUpload": true, + // "useMediaDownloadService": true + // } + +} + +// method id "storage.objects.list": + +type ObjectsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of objects matching the criteria. +func (r *ObjectsService) List(bucket string) *ObjectsListCall { + c := &ObjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// Delimiter sets the optional parameter "delimiter": Returns results in +// a directory-like mode. items will contain only objects whose names, +// aside from the prefix, do not contain delimiter. Objects whose names, +// aside from the prefix, contain delimiter will have their name, +// truncated after the delimiter, returned in prefixes. Duplicate +// prefixes are omitted. +func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall { + c.urlParams_.Set("delimiter", delimiter) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of items plus prefixes to return in a single page of responses. As +// duplicate prefixes are omitted, fewer total results may be returned +// than requested. The service will use this parameter or 1,000 items, +// whichever is smaller. +func (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *ObjectsListCall) PageToken(pageToken string) *ObjectsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to +// objects whose names begin with this prefix. +func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsListCall) UserProject(userProject string) *ObjectsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Versions sets the optional parameter "versions": If true, lists all +// versions of an object as distinct results. The default is false. For +// more information, see Object Versioning. +func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall { + c.urlParams_.Set("versions", fmt.Sprint(versions)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectsListCall) IfNoneMatch(entityTag string) *ObjectsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.list" call. +// Exactly one of *Objects or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Objects.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Objects{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of objects matching the criteria.", + // "httpMethod": "GET", + // "id": "storage.objects.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which to look for objects.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "delimiter": { + // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "1000", + // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "prefix": { + // "description": "Filter results to objects whose names begin with this prefix.", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // }, + // "versions": { + // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "b/{bucket}/o", + // "response": { + // "$ref": "Objects" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsSubscription": true + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "storage.objects.patch": + +type ObjectsPatchCall struct { + s *Service + bucket string + object string + object2 *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates an object's metadata. This method supports patch +// semantics. +func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { + c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.object2 = object2 + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsPatchCall) Generation(generation int64) *ObjectsPatchCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsPatchCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsPatchCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsPatchCall) UserProject(userProject string) *ObjectsPatchCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.patch" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an object's metadata. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "storage.objects.patch", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objects.rewrite": + +type ObjectsRewriteCall struct { + s *Service + sourceBucket string + sourceObject string + destinationBucket string + destinationObject string + object *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Rewrite: Rewrites a source object to a destination object. Optionally +// overrides metadata. +func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { + c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sourceBucket = sourceBucket + c.sourceObject = sourceObject + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.object = object + return c +} + +// DestinationKmsKeyName sets the optional parameter +// "destinationKmsKeyName": Resource name of the Cloud KMS key, of the +// form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, +// that will be used to encrypt the object. Overrides the object +// metadata's kms_key_name value, if any. +func (c *ObjectsRewriteCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsRewriteCall { + c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls +// to the destination object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall { + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsRewriteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsRewriteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the destination object's current metageneration matches the given +// value. +func (c *ObjectsRewriteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the destination object's current metageneration does not +// match the given value. +func (c *ObjectsRewriteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// IfSourceGenerationMatch sets the optional parameter +// "ifSourceGenerationMatch": Makes the operation conditional on whether +// the source object's current generation matches the given value. +func (c *ObjectsRewriteCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) + return c +} + +// IfSourceGenerationNotMatch sets the optional parameter +// "ifSourceGenerationNotMatch": Makes the operation conditional on +// whether the source object's current generation does not match the +// given value. +func (c *ObjectsRewriteCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) + return c +} + +// IfSourceMetagenerationMatch sets the optional parameter +// "ifSourceMetagenerationMatch": Makes the operation conditional on +// whether the source object's current metageneration matches the given +// value. +func (c *ObjectsRewriteCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) + return c +} + +// IfSourceMetagenerationNotMatch sets the optional parameter +// "ifSourceMetagenerationNotMatch": Makes the operation conditional on +// whether the source object's current metageneration does not match the +// given value. +func (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) + return c +} + +// MaxBytesRewrittenPerCall sets the optional parameter +// "maxBytesRewrittenPerCall": The maximum number of bytes that will be +// rewritten per rewrite request. Most callers shouldn't need to specify +// this parameter - it is primarily in place to support testing. If +// specified the value must be an integral multiple of 1 MiB (1048576). +// Also, this only applies to requests where the source and destination +// span locations and/or storage classes. Finally, this value must not +// change across rewrite calls else you'll get an error that the +// rewriteToken is invalid. +func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall int64) *ObjectsRewriteCall { + c.urlParams_.Set("maxBytesRewrittenPerCall", fmt.Sprint(maxBytesRewrittenPerCall)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the object resource +// specifies the acl property, when it defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall { + c.urlParams_.Set("projection", projection) + return c +} + +// RewriteToken sets the optional parameter "rewriteToken": Include this +// field (from the previous rewrite response) on each rewrite request +// after the first one, until the rewrite response 'done' flag is true. +// Calls that provide a rewriteToken can omit all other request fields, +// but if included those fields must match the values provided in the +// first rewrite request. +func (c *ObjectsRewriteCall) RewriteToken(rewriteToken string) *ObjectsRewriteCall { + c.urlParams_.Set("rewriteToken", rewriteToken) + return c +} + +// SourceGeneration sets the optional parameter "sourceGeneration": If +// present, selects a specific revision of the source object (as opposed +// to the latest version, the default). +func (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRewriteCall { + c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsRewriteCall) UserProject(userProject string) *ObjectsRewriteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsRewriteCall) Fields(s ...googleapi.Field) *ObjectsRewriteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsRewriteCall) Context(ctx context.Context) *ObjectsRewriteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsRewriteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "sourceBucket": c.sourceBucket, + "sourceObject": c.sourceObject, + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.rewrite" call. +// Exactly one of *RewriteResponse or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RewriteResponse.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &RewriteResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", + // "httpMethod": "POST", + // "id": "storage.objects.rewrite", + // "parameterOrder": [ + // "sourceBucket", + // "sourceObject", + // "destinationBucket", + // "destinationObject" + // ], + // "parameters": { + // "destinationBucket": { + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationKmsKeyName": { + // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "location": "query", + // "type": "string" + // }, + // "destinationObject": { + // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationPredefinedAcl": { + // "description": "Apply a predefined set of access controls to the destination object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "maxBytesRewrittenPerCall": { + // "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "rewriteToken": { + // "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", + // "location": "query", + // "type": "string" + // }, + // "sourceBucket": { + // "description": "Name of the bucket in which to find the source object.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sourceGeneration": { + // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "sourceObject": { + // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "RewriteResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.setIamPolicy": + +type ObjectsSetIamPolicyCall struct { + s *Service + bucket string + object string + policy *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Updates an IAM policy for the specified object. +func (r *ObjectsService) SetIamPolicy(bucket string, object string, policy *Policy) *ObjectsSetIamPolicyCall { + c := &ObjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.policy = policy + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsSetIamPolicyCall) Generation(generation int64) *ObjectsSetIamPolicyCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsSetIamPolicyCall) UserProject(userProject string) *ObjectsSetIamPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsSetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsSetIamPolicyCall) Context(ctx context.Context) *ObjectsSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an IAM policy for the specified object.", + // "httpMethod": "PUT", + // "id": "storage.objects.setIamPolicy", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/iam", + // "request": { + // "$ref": "Policy" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.testIamPermissions": + +type ObjectsTestIamPermissionsCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Tests a set of permissions on the given object to +// see which, if any, are held by the caller. +func (r *ObjectsService) TestIamPermissions(bucket string, object string, permissions []string) *ObjectsTestIamPermissionsCall { + c := &ObjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsTestIamPermissionsCall) Generation(generation int64) *ObjectsTestIamPermissionsCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsTestIamPermissionsCall) UserProject(userProject string) *ObjectsTestIamPermissionsCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ObjectsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectsTestIamPermissionsCall) IfNoneMatch(entityTag string) *ObjectsTestIamPermissionsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsTestIamPermissionsCall) Context(ctx context.Context) *ObjectsTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam/testPermissions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.testIamPermissions" call. +// Exactly one of *TestIamPermissionsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestIamPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Tests a set of permissions on the given object to see which, if any, are held by the caller.", + // "httpMethod": "GET", + // "id": "storage.objects.testIamPermissions", + // "parameterOrder": [ + // "bucket", + // "object", + // "permissions" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "permissions": { + // "description": "Permissions to test.", + // "location": "query", + // "repeated": true, + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/iam/testPermissions", + // "response": { + // "$ref": "TestIamPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.update": + +type ObjectsUpdateCall struct { + s *Service + bucket string + object string + object2 *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an object's metadata. +func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { + c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.object2 = object2 + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsUpdateCall) Generation(generation int64) *ObjectsUpdateCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsUpdateCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsUpdateCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsUpdateCall) UserProject(userProject string) *ObjectsUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. +func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *ObjectsUpdateCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +// Do executes the "storage.objects.update" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an object's metadata.", + // "httpMethod": "PUT", + // "id": "storage.objects.update", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ], + // "supportsMediaDownload": true, + // "useMediaDownloadService": true + // } + +} + +// method id "storage.objects.watchAll": + +type ObjectsWatchAllCall struct { + s *Service + bucket string + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// WatchAll: Watch for changes on all objects in a bucket. +func (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatchAllCall { + c := &ObjectsWatchAllCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.channel = channel + return c +} + +// Delimiter sets the optional parameter "delimiter": Returns results in +// a directory-like mode. items will contain only objects whose names, +// aside from the prefix, do not contain delimiter. Objects whose names, +// aside from the prefix, contain delimiter will have their name, +// truncated after the delimiter, returned in prefixes. Duplicate +// prefixes are omitted. +func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall { + c.urlParams_.Set("delimiter", delimiter) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of items plus prefixes to return in a single page of responses. As +// duplicate prefixes are omitted, fewer total results may be returned +// than requested. The service will use this parameter or 1,000 items, +// whichever is smaller. +func (c *ObjectsWatchAllCall) MaxResults(maxResults int64) *ObjectsWatchAllCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *ObjectsWatchAllCall) PageToken(pageToken string) *ObjectsWatchAllCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to +// objects whose names begin with this prefix. +func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsWatchAllCall) UserProject(userProject string) *ObjectsWatchAllCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Versions sets the optional parameter "versions": If true, lists all +// versions of an object as distinct results. The default is false. For +// more information, see Object Versioning. +func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall { + c.urlParams_.Set("versions", fmt.Sprint(versions)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsWatchAllCall) Fields(s ...googleapi.Field) *ObjectsWatchAllCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsWatchAllCall) Context(ctx context.Context) *ObjectsWatchAllCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsWatchAllCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.watchAll" call. +// Exactly one of *Channel or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Channel.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Channel{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Watch for changes on all objects in a bucket.", + // "httpMethod": "POST", + // "id": "storage.objects.watchAll", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which to look for objects.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "delimiter": { + // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "1000", + // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "prefix": { + // "description": "Filter results to objects whose names begin with this prefix.", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // }, + // "versions": { + // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "b/{bucket}/o/watch", + // "request": { + // "$ref": "Channel", + // "parameterName": "resource" + // }, + // "response": { + // "$ref": "Channel" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsSubscription": true + // } + +} + +// method id "storage.projects.serviceAccount.get": + +type ProjectsServiceAccountGetCall struct { + s *Service + projectId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Get the email address of this project's Google Cloud Storage +// service account. +func (r *ProjectsServiceAccountService) Get(projectId string) *ProjectsServiceAccountGetCall { + c := &ProjectsServiceAccountGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. +func (c *ProjectsServiceAccountGetCall) UserProject(userProject string) *ProjectsServiceAccountGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsServiceAccountGetCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsServiceAccountGetCall) IfNoneMatch(entityTag string) *ProjectsServiceAccountGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsServiceAccountGetCall) Context(ctx context.Context) *ProjectsServiceAccountGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsServiceAccountGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/serviceAccount") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.projects.serviceAccount.get" call. +// Exactly one of *ServiceAccount or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ServiceAccount.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*ServiceAccount, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ServiceAccount{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Get the email address of this project's Google Cloud Storage service account.", + // "httpMethod": "GET", + // "id": "storage.projects.serviceAccount.get", + // "parameterOrder": [ + // "projectId" + // ], + // "parameters": { + // "projectId": { + // "description": "Project ID", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/serviceAccount", + // "response": { + // "$ref": "ServiceAccount" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} diff --git a/vendor/google.golang.org/appengine/CONTRIBUTING.md b/vendor/google.golang.org/appengine/CONTRIBUTING.md new file mode 100644 index 00000000000..ffc29852085 --- /dev/null +++ b/vendor/google.golang.org/appengine/CONTRIBUTING.md @@ -0,0 +1,90 @@ +# Contributing + +1. Sign one of the contributor license agreements below. +1. Get the package: + + `go get -d google.golang.org/appengine` +1. Change into the checked out source: + + `cd $GOPATH/src/google.golang.org/appengine` +1. Fork the repo. +1. Set your fork as a remote: + + `git remote add fork git@github.com:GITHUB_USERNAME/appengine.git` +1. Make changes, commit to your fork. +1. Send a pull request with your changes. + The first line of your commit message is conventionally a one-line summary of the change, prefixed by the primary affected package, and is used as the title of your pull request. + +# Testing + +## Running system tests + +Download and install the [Go App Engine SDK](https://cloud.google.com/appengine/docs/go/download). Make sure the `go_appengine` dir is in your `PATH`. + +Set the `APPENGINE_DEV_APPSERVER` environment variable to `/path/to/go_appengine/dev_appserver.py`. + +Run tests with `goapp test`: + +``` +goapp test -v google.golang.org/appengine/... +``` + +## Contributor License Agreements + +Before we can accept your pull requests you'll need to sign a Contributor +License Agreement (CLA): + +- **If you are an individual writing original source code** and **you own the +intellectual property**, then you'll need to sign an [individual CLA][indvcla]. +- **If you work for a company that wants to allow you to contribute your work**, +then you'll need to sign a [corporate CLA][corpcla]. + +You can sign these electronically (just scroll to the bottom). After that, +we'll be able to accept your pull requests. + +## Contributor Code of Conduct + +As contributors and maintainers of this project, +and in the interest of fostering an open and welcoming community, +we pledge to respect all people who contribute through reporting issues, +posting feature requests, updating documentation, +submitting pull requests or patches, and other activities. + +We are committed to making participation in this project +a harassment-free experience for everyone, +regardless of level of experience, gender, gender identity and expression, +sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, +such as physical or electronic +addresses, without explicit permission +* Other unethical or unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct. +By adopting this Code of Conduct, +project maintainers commit themselves to fairly and consistently +applying these principles to every aspect of managing this project. +Project maintainers who do not follow or enforce the Code of Conduct +may be permanently removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by opening an issue +or contacting one or more of the project maintainers. + +This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, +available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) + +[indvcla]: https://developers.google.com/open-source/cla/individual +[corpcla]: https://developers.google.com/open-source/cla/corporate diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/google.golang.org/appengine/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/google.golang.org/appengine/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md new file mode 100644 index 00000000000..d86768a2c66 --- /dev/null +++ b/vendor/google.golang.org/appengine/README.md @@ -0,0 +1,73 @@ +# Go App Engine packages + +[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine) + +This repository supports the Go runtime on *App Engine standard*. +It provides APIs for interacting with App Engine services. +Its canonical import path is `google.golang.org/appengine`. + +See https://cloud.google.com/appengine/docs/go/ +for more information. + +File issue reports and feature requests on the [GitHub's issue +tracker](https://github.com/golang/appengine/issues). + +## Upgrading an App Engine app to the flexible environment + +This package does not work on *App Engine flexible*. + +There are many differences between the App Engine standard environment and +the flexible environment. + +See the [documentation on upgrading to the flexible environment](https://cloud.google.com/appengine/docs/flexible/go/upgrading). + +## Directory structure + +The top level directory of this repository is the `appengine` package. It +contains the +basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API +packages are in subdirectories (e.g. `datastore`). + +There is an `internal` subdirectory that contains service protocol buffers, +plus packages required for connectivity to make API calls. App Engine apps +should not directly import any package under `internal`. + +## Updating from legacy (`import "appengine"`) packages + +If you're currently using the bare `appengine` packages +(that is, not these ones, imported via `google.golang.org/appengine`), +then you can use the `aefix` tool to help automate an upgrade to these packages. + +Run `go get google.golang.org/appengine/cmd/aefix` to install it. + +### 1. Update import paths + +The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`. +You will need to update your code to use import paths starting with that; for instance, +code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`. + +### 2. Update code using deprecated, removed or modified APIs + +Most App Engine services are available with exactly the same API. +A few APIs were cleaned up, and there are some differences: + +* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`. +* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`. +* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead. +* `appengine.Datacenter` now takes a `context.Context` argument. +* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels. +* `delay.Call` now returns an error. +* `search.FieldLoadSaver` now handles document metadata. +* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the + `context.Context` instead. +* `aetest` no longer declares its own Context type, and uses the standard one instead. +* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been + deprecated and unused for a long time. +* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature. + Use `appengine.ModuleHostname`and `appengine.ModuleName` instead. +* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated. + Use [Google Cloud Storage](https://godoc.org/cloud.google.com/go/storage) if the + feature you require is not present in the new + [blobstore package](https://google.golang.org/appengine/blobstore). +* `appengine/socket` is not required on App Engine flexible environment / Managed VMs. + Use the standard `net` package instead. diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go new file mode 100644 index 00000000000..d4f808442b7 --- /dev/null +++ b/vendor/google.golang.org/appengine/appengine.go @@ -0,0 +1,113 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package appengine provides basic functionality for Google App Engine. +// +// For more information on how to write Go apps for Google App Engine, see: +// https://cloud.google.com/appengine/docs/go/ +package appengine // import "google.golang.org/appengine" + +import ( + "net/http" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// The gophers party all night; the rabbits provide the beats. + +// Main is the principal entry point for an app running in App Engine. +// +// On App Engine Flexible it installs a trivial health checker if one isn't +// already registered, and starts listening on port 8080 (overridden by the +// $PORT environment variable). +// +// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests +// for details on how to do your own health checking. +// +// On App Engine Standard it ensures the server has started and is prepared to +// receive requests. +// +// Main never returns. +// +// Main is designed so that the app's main package looks like this: +// +// package main +// +// import ( +// "google.golang.org/appengine" +// +// _ "myapp/package0" +// _ "myapp/package1" +// ) +// +// func main() { +// appengine.Main() +// } +// +// The "myapp/packageX" packages are expected to register HTTP handlers +// in their init functions. +func Main() { + internal.Main() +} + +// IsDevAppServer reports whether the App Engine app is running in the +// development App Server. +func IsDevAppServer() bool { + return internal.IsDevAppServer() +} + +// NewContext returns a context for an in-flight HTTP request. +// This function is cheap. +func NewContext(req *http.Request) context.Context { + return WithContext(context.Background(), req) +} + +// WithContext returns a copy of the parent context +// and associates it with an in-flight HTTP request. +// This function is cheap. +func WithContext(parent context.Context, req *http.Request) context.Context { + return internal.WithContext(parent, req) +} + +// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call. + +// BlobKey is a key for a blobstore blob. +// +// Conceptually, this type belongs in the blobstore package, but it lives in +// the appengine package to avoid a circular dependency: blobstore depends on +// datastore, and datastore needs to refer to the BlobKey type. +type BlobKey string + +// GeoPoint represents a location as latitude/longitude in degrees. +type GeoPoint struct { + Lat, Lng float64 +} + +// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude. +func (g GeoPoint) Valid() bool { + return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180 +} + +// APICallFunc defines a function type for handling an API call. +// See WithCallOverride. +type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error + +// WithAPICallFunc returns a copy of the parent context +// that will cause API calls to invoke f instead of their normal operation. +// +// This is intended for advanced users only. +func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context { + return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f)) +} + +// APICall performs an API call. +// +// This is not intended for general use; it is exported for use in conjunction +// with WithAPICallFunc. +func APICall(ctx context.Context, service, method string, in, out proto.Message) error { + return internal.Call(ctx, service, method, in, out) +} diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go new file mode 100644 index 00000000000..f4b645aad3b --- /dev/null +++ b/vendor/google.golang.org/appengine/appengine_vm.go @@ -0,0 +1,20 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package appengine + +import ( + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// BackgroundContext returns a context not associated with a request. +// This should only be used when not servicing a request. +// This only works in App Engine "flexible environment". +func BackgroundContext() context.Context { + return internal.BackgroundContext() +} diff --git a/vendor/google.golang.org/appengine/errors.go b/vendor/google.golang.org/appengine/errors.go new file mode 100644 index 00000000000..16d0772e2a4 --- /dev/null +++ b/vendor/google.golang.org/appengine/errors.go @@ -0,0 +1,46 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// This file provides error functions for common API failure modes. + +package appengine + +import ( + "fmt" + + "google.golang.org/appengine/internal" +) + +// IsOverQuota reports whether err represents an API call failure +// due to insufficient available quota. +func IsOverQuota(err error) bool { + callErr, ok := err.(*internal.CallError) + return ok && callErr.Code == 4 +} + +// MultiError is returned by batch operations when there are errors with +// particular elements. Errors will be in a one-to-one correspondence with +// the input elements; successful elements will have a nil entry. +type MultiError []error + +func (m MultiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go new file mode 100644 index 00000000000..b8dcf8f3619 --- /dev/null +++ b/vendor/google.golang.org/appengine/identity.go @@ -0,0 +1,142 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import ( + "time" + + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/app_identity" + modpb "google.golang.org/appengine/internal/modules" +) + +// AppID returns the application ID for the current application. +// The string will be a plain application ID (e.g. "appid"), with a +// domain prefix for custom domain deployments (e.g. "example.com:appid"). +func AppID(c context.Context) string { return internal.AppID(c) } + +// DefaultVersionHostname returns the standard hostname of the default version +// of the current application (e.g. "my-app.appspot.com"). This is suitable for +// use in constructing URLs. +func DefaultVersionHostname(c context.Context) string { + return internal.DefaultVersionHostname(c) +} + +// ModuleName returns the module name of the current instance. +func ModuleName(c context.Context) string { + return internal.ModuleName(c) +} + +// ModuleHostname returns a hostname of a module instance. +// If module is the empty string, it refers to the module of the current instance. +// If version is empty, it refers to the version of the current instance if valid, +// or the default version of the module of the current instance. +// If instance is empty, ModuleHostname returns the load-balancing hostname. +func ModuleHostname(c context.Context, module, version, instance string) (string, error) { + req := &modpb.GetHostnameRequest{} + if module != "" { + req.Module = &module + } + if version != "" { + req.Version = &version + } + if instance != "" { + req.Instance = &instance + } + res := &modpb.GetHostnameResponse{} + if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil { + return "", err + } + return *res.Hostname, nil +} + +// VersionID returns the version ID for the current application. +// It will be of the form "X.Y", where X is specified in app.yaml, +// and Y is a number generated when each version of the app is uploaded. +// It does not include a module name. +func VersionID(c context.Context) string { return internal.VersionID(c) } + +// InstanceID returns a mostly-unique identifier for this instance. +func InstanceID() string { return internal.InstanceID() } + +// Datacenter returns an identifier for the datacenter that the instance is running in. +func Datacenter(c context.Context) string { return internal.Datacenter(c) } + +// ServerSoftware returns the App Engine release version. +// In production, it looks like "Google App Engine/X.Y.Z". +// In the development appserver, it looks like "Development/X.Y". +func ServerSoftware() string { return internal.ServerSoftware() } + +// RequestID returns a string that uniquely identifies the request. +func RequestID(c context.Context) string { return internal.RequestID(c) } + +// AccessToken generates an OAuth2 access token for the specified scopes on +// behalf of service account of this application. This token will expire after +// the returned time. +func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) { + req := &pb.GetAccessTokenRequest{Scope: scopes} + res := &pb.GetAccessTokenResponse{} + + err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res) + if err != nil { + return "", time.Time{}, err + } + return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil +} + +// Certificate represents a public certificate for the app. +type Certificate struct { + KeyName string + Data []byte // PEM-encoded X.509 certificate +} + +// PublicCertificates retrieves the public certificates for the app. +// They can be used to verify a signature returned by SignBytes. +func PublicCertificates(c context.Context) ([]Certificate, error) { + req := &pb.GetPublicCertificateForAppRequest{} + res := &pb.GetPublicCertificateForAppResponse{} + if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil { + return nil, err + } + var cs []Certificate + for _, pc := range res.PublicCertificateList { + cs = append(cs, Certificate{ + KeyName: pc.GetKeyName(), + Data: []byte(pc.GetX509CertificatePem()), + }) + } + return cs, nil +} + +// ServiceAccount returns a string representing the service account name, in +// the form of an email address (typically app_id@appspot.gserviceaccount.com). +func ServiceAccount(c context.Context) (string, error) { + req := &pb.GetServiceAccountNameRequest{} + res := &pb.GetServiceAccountNameResponse{} + + err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res) + if err != nil { + return "", err + } + return res.GetServiceAccountName(), err +} + +// SignBytes signs bytes using a private key unique to your application. +func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) { + req := &pb.SignForAppRequest{BytesToSign: bytes} + res := &pb.SignForAppResponse{} + + if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil { + return "", nil, err + } + return res.GetKeyName(), res.GetSignatureBytes(), nil +} + +func init() { + internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name) + internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go new file mode 100644 index 00000000000..efee06090fc --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/api.go @@ -0,0 +1,677 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "log" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" + + basepb "google.golang.org/appengine/internal/base" + logpb "google.golang.org/appengine/internal/log" + remotepb "google.golang.org/appengine/internal/remote_api" +) + +const ( + apiPath = "/rpc_http" + defaultTicketSuffix = "/default.20150612t184001.0" +) + +var ( + // Incoming headers. + ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket") + dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo") + traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context") + curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") + userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP") + remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr") + + // Outgoing headers. + apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint") + apiEndpointHeaderValue = []string{"app-engine-apis"} + apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method") + apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"} + apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline") + apiContentType = http.CanonicalHeaderKey("Content-Type") + apiContentTypeValue = []string{"application/octet-stream"} + logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count") + + apiHTTPClient = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: limitDial, + }, + } + + defaultTicketOnce sync.Once + defaultTicket string +) + +func apiURL() *url.URL { + host, port := "appengine.googleapis.internal", "10001" + if h := os.Getenv("API_HOST"); h != "" { + host = h + } + if p := os.Getenv("API_PORT"); p != "" { + port = p + } + return &url.URL{ + Scheme: "http", + Host: host + ":" + port, + Path: apiPath, + } +} + +func handleHTTP(w http.ResponseWriter, r *http.Request) { + c := &context{ + req: r, + outHeader: w.Header(), + apiURL: apiURL(), + } + stopFlushing := make(chan int) + + ctxs.Lock() + ctxs.m[r] = c + ctxs.Unlock() + defer func() { + ctxs.Lock() + delete(ctxs.m, r) + ctxs.Unlock() + }() + + // Patch up RemoteAddr so it looks reasonable. + if addr := r.Header.Get(userIPHeader); addr != "" { + r.RemoteAddr = addr + } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { + r.RemoteAddr = addr + } else { + // Should not normally reach here, but pick a sensible default anyway. + r.RemoteAddr = "127.0.0.1" + } + // The address in the headers will most likely be of these forms: + // 123.123.123.123 + // 2001:db8::1 + // net/http.Request.RemoteAddr is specified to be in "IP:port" form. + if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { + // Assume the remote address is only a host; add a default port. + r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") + } + + // Start goroutine responsible for flushing app logs. + // This is done after adding c to ctx.m (and stopped before removing it) + // because flushing logs requires making an API call. + go c.logFlusher(stopFlushing) + + executeRequestSafely(c, r) + c.outHeader = nil // make sure header changes aren't respected any more + + stopFlushing <- 1 // any logging beyond this point will be dropped + + // Flush any pending logs asynchronously. + c.pendingLogs.Lock() + flushes := c.pendingLogs.flushes + if len(c.pendingLogs.lines) > 0 { + flushes++ + } + c.pendingLogs.Unlock() + go c.flushLog(false) + w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + + // Avoid nil Write call if c.Write is never called. + if c.outCode != 0 { + w.WriteHeader(c.outCode) + } + if c.outBody != nil { + w.Write(c.outBody) + } +} + +func executeRequestSafely(c *context, r *http.Request) { + defer func() { + if x := recover(); x != nil { + logf(c, 4, "%s", renderPanic(x)) // 4 == critical + c.outCode = 500 + } + }() + + http.DefaultServeMux.ServeHTTP(c, r) +} + +func renderPanic(x interface{}) string { + buf := make([]byte, 16<<10) // 16 KB should be plenty + buf = buf[:runtime.Stack(buf, false)] + + // Remove the first few stack frames: + // this func + // the recover closure in the caller + // That will root the stack trace at the site of the panic. + const ( + skipStart = "internal.renderPanic" + skipFrames = 2 + ) + start := bytes.Index(buf, []byte(skipStart)) + p := start + for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ { + p = bytes.IndexByte(buf[p+1:], '\n') + p + 1 + if p < 0 { + break + } + } + if p >= 0 { + // buf[start:p+1] is the block to remove. + // Copy buf[p+1:] over buf[start:] and shrink buf. + copy(buf[start:], buf[p+1:]) + buf = buf[:len(buf)-(p+1-start)] + } + + // Add panic heading. + head := fmt.Sprintf("panic: %v\n\n", x) + if len(head) > len(buf) { + // Extremely unlikely to happen. + return head + } + copy(buf[len(head):], buf) + copy(buf, head) + + return string(buf) +} + +var ctxs = struct { + sync.Mutex + m map[*http.Request]*context + bg *context // background context, lazily initialized + // dec is used by tests to decorate the netcontext.Context returned + // for a given request. This allows tests to add overrides (such as + // WithAppIDOverride) to the context. The map is nil outside tests. + dec map[*http.Request]func(netcontext.Context) netcontext.Context +}{ + m: make(map[*http.Request]*context), +} + +// context represents the context of an in-flight HTTP request. +// It implements the appengine.Context and http.ResponseWriter interfaces. +type context struct { + req *http.Request + + outCode int + outHeader http.Header + outBody []byte + + pendingLogs struct { + sync.Mutex + lines []*logpb.UserAppLogLine + flushes int + } + + apiURL *url.URL +} + +var contextKey = "holds a *context" + +// fromContext returns the App Engine context or nil if ctx is not +// derived from an App Engine context. +func fromContext(ctx netcontext.Context) *context { + c, _ := ctx.Value(&contextKey).(*context) + return c +} + +func withContext(parent netcontext.Context, c *context) netcontext.Context { + ctx := netcontext.WithValue(parent, &contextKey, c) + if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { + ctx = withNamespace(ctx, ns) + } + return ctx +} + +func toContext(c *context) netcontext.Context { + return withContext(netcontext.Background(), c) +} + +func IncomingHeaders(ctx netcontext.Context) http.Header { + if c := fromContext(ctx); c != nil { + return c.req.Header + } + return nil +} + +func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { + ctxs.Lock() + c := ctxs.m[req] + d := ctxs.dec[req] + ctxs.Unlock() + + if d != nil { + parent = d(parent) + } + + if c == nil { + // Someone passed in an http.Request that is not in-flight. + // We panic here rather than panicking at a later point + // so that stack traces will be more sensible. + log.Panic("appengine: NewContext passed an unknown http.Request") + } + return withContext(parent, c) +} + +// DefaultTicket returns a ticket used for background context or dev_appserver. +func DefaultTicket() string { + defaultTicketOnce.Do(func() { + if IsDevAppServer() { + defaultTicket = "testapp" + defaultTicketSuffix + return + } + appID := partitionlessAppID() + escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) + majVersion := VersionID(nil) + if i := strings.Index(majVersion, "."); i > 0 { + majVersion = majVersion[:i] + } + defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) + }) + return defaultTicket +} + +func BackgroundContext() netcontext.Context { + ctxs.Lock() + defer ctxs.Unlock() + + if ctxs.bg != nil { + return toContext(ctxs.bg) + } + + // Compute background security ticket. + ticket := DefaultTicket() + + ctxs.bg = &context{ + req: &http.Request{ + Header: http.Header{ + ticketHeader: []string{ticket}, + }, + }, + apiURL: apiURL(), + } + + // TODO(dsymonds): Wire up the shutdown handler to do a final flush. + go ctxs.bg.logFlusher(make(chan int)) + + return toContext(ctxs.bg) +} + +// RegisterTestRequest registers the HTTP request req for testing, such that +// any API calls are sent to the provided URL. It returns a closure to delete +// the registration. +// It should only be used by aetest package. +func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) func() { + c := &context{ + req: req, + apiURL: apiURL, + } + ctxs.Lock() + defer ctxs.Unlock() + if _, ok := ctxs.m[req]; ok { + log.Panic("req already associated with context") + } + if _, ok := ctxs.dec[req]; ok { + log.Panic("req already associated with context") + } + if ctxs.dec == nil { + ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context) + } + ctxs.m[req] = c + ctxs.dec[req] = decorate + + return func() { + ctxs.Lock() + delete(ctxs.m, req) + delete(ctxs.dec, req) + ctxs.Unlock() + } +} + +var errTimeout = &CallError{ + Detail: "Deadline exceeded", + Code: int32(remotepb.RpcError_CANCELLED), + Timeout: true, +} + +func (c *context) Header() http.Header { return c.outHeader } + +// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status +// codes do not permit a response body (nor response entity headers such as +// Content-Length, Content-Type, etc). +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +func (c *context) Write(b []byte) (int, error) { + if c.outCode == 0 { + c.WriteHeader(http.StatusOK) + } + if len(b) > 0 && !bodyAllowedForStatus(c.outCode) { + return 0, http.ErrBodyNotAllowed + } + c.outBody = append(c.outBody, b...) + return len(b), nil +} + +func (c *context) WriteHeader(code int) { + if c.outCode != 0 { + logf(c, 3, "WriteHeader called multiple times on request.") // error level + return + } + c.outCode = code +} + +func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { + hreq := &http.Request{ + Method: "POST", + URL: c.apiURL, + Header: http.Header{ + apiEndpointHeader: apiEndpointHeaderValue, + apiMethodHeader: apiMethodHeaderValue, + apiContentType: apiContentTypeValue, + apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)}, + }, + Body: ioutil.NopCloser(bytes.NewReader(body)), + ContentLength: int64(len(body)), + Host: c.apiURL.Host, + } + if info := c.req.Header.Get(dapperHeader); info != "" { + hreq.Header.Set(dapperHeader, info) + } + if info := c.req.Header.Get(traceHeader); info != "" { + hreq.Header.Set(traceHeader, info) + } + + tr := apiHTTPClient.Transport.(*http.Transport) + + var timedOut int32 // atomic; set to 1 if timed out + t := time.AfterFunc(timeout, func() { + atomic.StoreInt32(&timedOut, 1) + tr.CancelRequest(hreq) + }) + defer t.Stop() + defer func() { + // Check if timeout was exceeded. + if atomic.LoadInt32(&timedOut) != 0 { + err = errTimeout + } + }() + + hresp, err := apiHTTPClient.Do(hreq) + if err != nil { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge HTTP failed: %v", err), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + defer hresp.Body.Close() + hrespBody, err := ioutil.ReadAll(hresp.Body) + if hresp.StatusCode != 200 { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + if err != nil { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge response bad: %v", err), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + return hrespBody, nil +} + +func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { + if ns := NamespaceFromContext(ctx); ns != "" { + if fn, ok := NamespaceMods[service]; ok { + fn(in, ns) + } + } + + if f, ctx, ok := callOverrideFromContext(ctx); ok { + return f(ctx, service, method, in, out) + } + + // Handle already-done contexts quickly. + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + c := fromContext(ctx) + if c == nil { + // Give a good error message rather than a panic lower down. + return errNotAppEngineContext + } + + // Apply transaction modifications if we're in a transaction. + if t := transactionFromContext(ctx); t != nil { + if t.finished { + return errors.New("transaction context has expired") + } + applyTransaction(in, &t.transaction) + } + + // Default RPC timeout is 60s. + timeout := 60 * time.Second + if deadline, ok := ctx.Deadline(); ok { + timeout = deadline.Sub(time.Now()) + } + + data, err := proto.Marshal(in) + if err != nil { + return err + } + + ticket := c.req.Header.Get(ticketHeader) + // Use a test ticket under test environment. + if ticket == "" { + if appid := ctx.Value(&appIDOverrideKey); appid != nil { + ticket = appid.(string) + defaultTicketSuffix + } + } + // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. + if ticket == "" { + ticket = DefaultTicket() + } + req := &remotepb.Request{ + ServiceName: &service, + Method: &method, + Request: data, + RequestId: &ticket, + } + hreqBody, err := proto.Marshal(req) + if err != nil { + return err + } + + hrespBody, err := c.post(hreqBody, timeout) + if err != nil { + return err + } + + res := &remotepb.Response{} + if err := proto.Unmarshal(hrespBody, res); err != nil { + return err + } + if res.RpcError != nil { + ce := &CallError{ + Detail: res.RpcError.GetDetail(), + Code: *res.RpcError.Code, + } + switch remotepb.RpcError_ErrorCode(ce.Code) { + case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED: + ce.Timeout = true + } + return ce + } + if res.ApplicationError != nil { + return &APIError{ + Service: *req.ServiceName, + Detail: res.ApplicationError.GetDetail(), + Code: *res.ApplicationError.Code, + } + } + if res.Exception != nil || res.JavaException != nil { + // This shouldn't happen, but let's be defensive. + return &CallError{ + Detail: "service bridge returned exception", + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + return proto.Unmarshal(res.Response, out) +} + +func (c *context) Request() *http.Request { + return c.req +} + +func (c *context) addLogLine(ll *logpb.UserAppLogLine) { + // Truncate long log lines. + // TODO(dsymonds): Check if this is still necessary. + const lim = 8 << 10 + if len(*ll.Message) > lim { + suffix := fmt.Sprintf("...(length %d)", len(*ll.Message)) + ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix) + } + + c.pendingLogs.Lock() + c.pendingLogs.lines = append(c.pendingLogs.lines, ll) + c.pendingLogs.Unlock() +} + +var logLevelName = map[int64]string{ + 0: "DEBUG", + 1: "INFO", + 2: "WARNING", + 3: "ERROR", + 4: "CRITICAL", +} + +func logf(c *context, level int64, format string, args ...interface{}) { + if c == nil { + panic("not an App Engine context") + } + s := fmt.Sprintf(format, args...) + s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. + c.addLogLine(&logpb.UserAppLogLine{ + TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), + Level: &level, + Message: &s, + }) + log.Print(logLevelName[level] + ": " + s) +} + +// flushLog attempts to flush any pending logs to the appserver. +// It should not be called concurrently. +func (c *context) flushLog(force bool) (flushed bool) { + c.pendingLogs.Lock() + // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. + n, rem := 0, 30<<20 + for ; n < len(c.pendingLogs.lines); n++ { + ll := c.pendingLogs.lines[n] + // Each log line will require about 3 bytes of overhead. + nb := proto.Size(ll) + 3 + if nb > rem { + break + } + rem -= nb + } + lines := c.pendingLogs.lines[:n] + c.pendingLogs.lines = c.pendingLogs.lines[n:] + c.pendingLogs.Unlock() + + if len(lines) == 0 && !force { + // Nothing to flush. + return false + } + + rescueLogs := false + defer func() { + if rescueLogs { + c.pendingLogs.Lock() + c.pendingLogs.lines = append(lines, c.pendingLogs.lines...) + c.pendingLogs.Unlock() + } + }() + + buf, err := proto.Marshal(&logpb.UserAppLogGroup{ + LogLine: lines, + }) + if err != nil { + log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err) + rescueLogs = true + return false + } + + req := &logpb.FlushRequest{ + Logs: buf, + } + res := &basepb.VoidProto{} + c.pendingLogs.Lock() + c.pendingLogs.flushes++ + c.pendingLogs.Unlock() + if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil { + log.Printf("internal.flushLog: Flush RPC: %v", err) + rescueLogs = true + return false + } + return true +} + +const ( + // Log flushing parameters. + flushInterval = 1 * time.Second + forceFlushInterval = 60 * time.Second +) + +func (c *context) logFlusher(stop <-chan int) { + lastFlush := time.Now() + tick := time.NewTicker(flushInterval) + for { + select { + case <-stop: + // Request finished. + tick.Stop() + return + case <-tick.C: + force := time.Now().Sub(lastFlush) > forceFlushInterval + if c.flushLog(force) { + lastFlush = time.Now() + } + } + } +} + +func ContextForTesting(req *http.Request) netcontext.Context { + return toContext(&context{req: req}) +} diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go new file mode 100644 index 00000000000..e0c0b214b72 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/api_common.go @@ -0,0 +1,123 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +import ( + "errors" + "os" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" +) + +var errNotAppEngineContext = errors.New("not an App Engine context") + +type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error + +var callOverrideKey = "holds []CallOverrideFunc" + +func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { + // We avoid appending to any existing call override + // so we don't risk overwriting a popped stack below. + var cofs []CallOverrideFunc + if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok { + cofs = append(cofs, uf...) + } + cofs = append(cofs, f) + return netcontext.WithValue(ctx, &callOverrideKey, cofs) +} + +func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { + cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) + if len(cofs) == 0 { + return nil, nil, false + } + // We found a list of overrides; grab the last, and reconstitute a + // context that will hide it. + f := cofs[len(cofs)-1] + ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) + return f, ctx, true +} + +type logOverrideFunc func(level int64, format string, args ...interface{}) + +var logOverrideKey = "holds a logOverrideFunc" + +func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { + return netcontext.WithValue(ctx, &logOverrideKey, f) +} + +var appIDOverrideKey = "holds a string, being the full app ID" + +func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { + return netcontext.WithValue(ctx, &appIDOverrideKey, appID) +} + +var namespaceKey = "holds the namespace string" + +func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { + return netcontext.WithValue(ctx, &namespaceKey, ns) +} + +func NamespaceFromContext(ctx netcontext.Context) string { + // If there's no namespace, return the empty string. + ns, _ := ctx.Value(&namespaceKey).(string) + return ns +} + +// FullyQualifiedAppID returns the fully-qualified application ID. +// This may contain a partition prefix (e.g. "s~" for High Replication apps), +// or a domain prefix (e.g. "example.com:"). +func FullyQualifiedAppID(ctx netcontext.Context) string { + if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { + return id + } + return fullyQualifiedAppID(ctx) +} + +func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { + if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { + f(level, format, args...) + return + } + c := fromContext(ctx) + if c == nil { + panic(errNotAppEngineContext) + } + logf(c, level, format, args...) +} + +// NamespacedContext wraps a Context to support namespaces. +func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { + return withNamespace(ctx, namespace) +} + +// SetTestEnv sets the env variables for testing background ticket in Flex. +func SetTestEnv() func() { + var environ = []struct { + key, value string + }{ + {"GAE_LONG_APP_ID", "my-app-id"}, + {"GAE_MINOR_VERSION", "067924799508853122"}, + {"GAE_MODULE_INSTANCE", "0"}, + {"GAE_MODULE_NAME", "default"}, + {"GAE_MODULE_VERSION", "20150612t184001"}, + } + + for _, v := range environ { + old := os.Getenv(v.key) + os.Setenv(v.key, v.value) + v.value = old + } + return func() { // Restore old environment after the test completes. + for _, v := range environ { + if v.value == "" { + os.Unsetenv(v.key) + continue + } + os.Setenv(v.key, v.value) + } + } +} diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go new file mode 100644 index 00000000000..11df8c07b53 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/app_id.go @@ -0,0 +1,28 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +import ( + "strings" +) + +func parseFullAppID(appid string) (partition, domain, displayID string) { + if i := strings.Index(appid, "~"); i != -1 { + partition, appid = appid[:i], appid[i+1:] + } + if i := strings.Index(appid, ":"); i != -1 { + domain, appid = appid[:i], appid[i+1:] + } + return partition, domain, appid +} + +// appID returns "appid" or "domain.com:appid". +func appID(fullAppID string) string { + _, dom, dis := parseFullAppID(fullAppID) + if dom != "" { + return dom + ":" + dis + } + return dis +} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go new file mode 100644 index 00000000000..87d9701b8df --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go @@ -0,0 +1,296 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto +// DO NOT EDIT! + +/* +Package app_identity is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/app_identity/app_identity_service.proto + +It has these top-level messages: + AppIdentityServiceError + SignForAppRequest + SignForAppResponse + GetPublicCertificateForAppRequest + PublicCertificate + GetPublicCertificateForAppResponse + GetServiceAccountNameRequest + GetServiceAccountNameResponse + GetAccessTokenRequest + GetAccessTokenResponse + GetDefaultGcsBucketNameRequest + GetDefaultGcsBucketNameResponse +*/ +package app_identity + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type AppIdentityServiceError_ErrorCode int32 + +const ( + AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0 + AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9 + AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000 + AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001 + AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002 + AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003 + AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005 + AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006 +) + +var AppIdentityServiceError_ErrorCode_name = map[int32]string{ + 0: "SUCCESS", + 9: "UNKNOWN_SCOPE", + 1000: "BLOB_TOO_LARGE", + 1001: "DEADLINE_EXCEEDED", + 1002: "NOT_A_VALID_APP", + 1003: "UNKNOWN_ERROR", + 1005: "NOT_ALLOWED", + 1006: "NOT_IMPLEMENTED", +} +var AppIdentityServiceError_ErrorCode_value = map[string]int32{ + "SUCCESS": 0, + "UNKNOWN_SCOPE": 9, + "BLOB_TOO_LARGE": 1000, + "DEADLINE_EXCEEDED": 1001, + "NOT_A_VALID_APP": 1002, + "UNKNOWN_ERROR": 1003, + "NOT_ALLOWED": 1005, + "NOT_IMPLEMENTED": 1006, +} + +func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode { + p := new(AppIdentityServiceError_ErrorCode) + *p = x + return p +} +func (x AppIdentityServiceError_ErrorCode) String() string { + return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x)) +} +func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode") + if err != nil { + return err + } + *x = AppIdentityServiceError_ErrorCode(value) + return nil +} + +type AppIdentityServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} } +func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) } +func (*AppIdentityServiceError) ProtoMessage() {} + +type SignForAppRequest struct { + BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign" json:"bytes_to_sign,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} } +func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) } +func (*SignForAppRequest) ProtoMessage() {} + +func (m *SignForAppRequest) GetBytesToSign() []byte { + if m != nil { + return m.BytesToSign + } + return nil +} + +type SignForAppResponse struct { + KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"` + SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes" json:"signature_bytes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} } +func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) } +func (*SignForAppResponse) ProtoMessage() {} + +func (m *SignForAppResponse) GetKeyName() string { + if m != nil && m.KeyName != nil { + return *m.KeyName + } + return "" +} + +func (m *SignForAppResponse) GetSignatureBytes() []byte { + if m != nil { + return m.SignatureBytes + } + return nil +} + +type GetPublicCertificateForAppRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} } +func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) } +func (*GetPublicCertificateForAppRequest) ProtoMessage() {} + +type PublicCertificate struct { + KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"` + X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem" json:"x509_certificate_pem,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PublicCertificate) Reset() { *m = PublicCertificate{} } +func (m *PublicCertificate) String() string { return proto.CompactTextString(m) } +func (*PublicCertificate) ProtoMessage() {} + +func (m *PublicCertificate) GetKeyName() string { + if m != nil && m.KeyName != nil { + return *m.KeyName + } + return "" +} + +func (m *PublicCertificate) GetX509CertificatePem() string { + if m != nil && m.X509CertificatePem != nil { + return *m.X509CertificatePem + } + return "" +} + +type GetPublicCertificateForAppResponse struct { + PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list" json:"public_certificate_list,omitempty"` + MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second" json:"max_client_cache_time_in_second,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} } +func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) } +func (*GetPublicCertificateForAppResponse) ProtoMessage() {} + +func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate { + if m != nil { + return m.PublicCertificateList + } + return nil +} + +func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 { + if m != nil && m.MaxClientCacheTimeInSecond != nil { + return *m.MaxClientCacheTimeInSecond + } + return 0 +} + +type GetServiceAccountNameRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} } +func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetServiceAccountNameRequest) ProtoMessage() {} + +type GetServiceAccountNameResponse struct { + ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name" json:"service_account_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} } +func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) } +func (*GetServiceAccountNameResponse) ProtoMessage() {} + +func (m *GetServiceAccountNameResponse) GetServiceAccountName() string { + if m != nil && m.ServiceAccountName != nil { + return *m.ServiceAccountName + } + return "" +} + +type GetAccessTokenRequest struct { + Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"` + ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id" json:"service_account_id,omitempty"` + ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name" json:"service_account_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} } +func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) } +func (*GetAccessTokenRequest) ProtoMessage() {} + +func (m *GetAccessTokenRequest) GetScope() []string { + if m != nil { + return m.Scope + } + return nil +} + +func (m *GetAccessTokenRequest) GetServiceAccountId() int64 { + if m != nil && m.ServiceAccountId != nil { + return *m.ServiceAccountId + } + return 0 +} + +func (m *GetAccessTokenRequest) GetServiceAccountName() string { + if m != nil && m.ServiceAccountName != nil { + return *m.ServiceAccountName + } + return "" +} + +type GetAccessTokenResponse struct { + AccessToken *string `protobuf:"bytes,1,opt,name=access_token" json:"access_token,omitempty"` + ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time" json:"expiration_time,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} } +func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) } +func (*GetAccessTokenResponse) ProtoMessage() {} + +func (m *GetAccessTokenResponse) GetAccessToken() string { + if m != nil && m.AccessToken != nil { + return *m.AccessToken + } + return "" +} + +func (m *GetAccessTokenResponse) GetExpirationTime() int64 { + if m != nil && m.ExpirationTime != nil { + return *m.ExpirationTime + } + return 0 +} + +type GetDefaultGcsBucketNameRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} } +func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {} + +type GetDefaultGcsBucketNameResponse struct { + DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name" json:"default_gcs_bucket_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} } +func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) } +func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {} + +func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string { + if m != nil && m.DefaultGcsBucketName != nil { + return *m.DefaultGcsBucketName + } + return "" +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto new file mode 100644 index 00000000000..19610ca5b75 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto @@ -0,0 +1,64 @@ +syntax = "proto2"; +option go_package = "app_identity"; + +package appengine; + +message AppIdentityServiceError { + enum ErrorCode { + SUCCESS = 0; + UNKNOWN_SCOPE = 9; + BLOB_TOO_LARGE = 1000; + DEADLINE_EXCEEDED = 1001; + NOT_A_VALID_APP = 1002; + UNKNOWN_ERROR = 1003; + NOT_ALLOWED = 1005; + NOT_IMPLEMENTED = 1006; + } +} + +message SignForAppRequest { + optional bytes bytes_to_sign = 1; +} + +message SignForAppResponse { + optional string key_name = 1; + optional bytes signature_bytes = 2; +} + +message GetPublicCertificateForAppRequest { +} + +message PublicCertificate { + optional string key_name = 1; + optional string x509_certificate_pem = 2; +} + +message GetPublicCertificateForAppResponse { + repeated PublicCertificate public_certificate_list = 1; + optional int64 max_client_cache_time_in_second = 2; +} + +message GetServiceAccountNameRequest { +} + +message GetServiceAccountNameResponse { + optional string service_account_name = 1; +} + +message GetAccessTokenRequest { + repeated string scope = 1; + optional int64 service_account_id = 2; + optional string service_account_name = 3; +} + +message GetAccessTokenResponse { + optional string access_token = 1; + optional int64 expiration_time = 2; +} + +message GetDefaultGcsBucketNameRequest { +} + +message GetDefaultGcsBucketNameResponse { + optional string default_gcs_bucket_name = 1; +} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go new file mode 100644 index 00000000000..36a195650a9 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go @@ -0,0 +1,133 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/base/api_base.proto +// DO NOT EDIT! + +/* +Package base is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/base/api_base.proto + +It has these top-level messages: + StringProto + Integer32Proto + Integer64Proto + BoolProto + DoubleProto + BytesProto + VoidProto +*/ +package base + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type StringProto struct { + Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StringProto) Reset() { *m = StringProto{} } +func (m *StringProto) String() string { return proto.CompactTextString(m) } +func (*StringProto) ProtoMessage() {} + +func (m *StringProto) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Integer32Proto struct { + Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Integer32Proto) Reset() { *m = Integer32Proto{} } +func (m *Integer32Proto) String() string { return proto.CompactTextString(m) } +func (*Integer32Proto) ProtoMessage() {} + +func (m *Integer32Proto) GetValue() int32 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Integer64Proto struct { + Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Integer64Proto) Reset() { *m = Integer64Proto{} } +func (m *Integer64Proto) String() string { return proto.CompactTextString(m) } +func (*Integer64Proto) ProtoMessage() {} + +func (m *Integer64Proto) GetValue() int64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type BoolProto struct { + Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BoolProto) Reset() { *m = BoolProto{} } +func (m *BoolProto) String() string { return proto.CompactTextString(m) } +func (*BoolProto) ProtoMessage() {} + +func (m *BoolProto) GetValue() bool { + if m != nil && m.Value != nil { + return *m.Value + } + return false +} + +type DoubleProto struct { + Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DoubleProto) Reset() { *m = DoubleProto{} } +func (m *DoubleProto) String() string { return proto.CompactTextString(m) } +func (*DoubleProto) ProtoMessage() {} + +func (m *DoubleProto) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type BytesProto struct { + Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BytesProto) Reset() { *m = BytesProto{} } +func (m *BytesProto) String() string { return proto.CompactTextString(m) } +func (*BytesProto) ProtoMessage() {} + +func (m *BytesProto) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type VoidProto struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *VoidProto) Reset() { *m = VoidProto{} } +func (m *VoidProto) String() string { return proto.CompactTextString(m) } +func (*VoidProto) ProtoMessage() {} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto new file mode 100644 index 00000000000..56cd7a3cad0 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/base/api_base.proto @@ -0,0 +1,33 @@ +// Built-in base types for API calls. Primarily useful as return types. + +syntax = "proto2"; +option go_package = "base"; + +package appengine.base; + +message StringProto { + required string value = 1; +} + +message Integer32Proto { + required int32 value = 1; +} + +message Integer64Proto { + required int64 value = 1; +} + +message BoolProto { + required bool value = 1; +} + +message DoubleProto { + required double value = 1; +} + +message BytesProto { + required bytes value = 1 [ctype=CORD]; +} + +message VoidProto { +} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go new file mode 100644 index 00000000000..8613cb7311a --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go @@ -0,0 +1,2778 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto +// DO NOT EDIT! + +/* +Package datastore is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/datastore/datastore_v3.proto + +It has these top-level messages: + Action + PropertyValue + Property + Path + Reference + User + EntityProto + CompositeProperty + Index + CompositeIndex + IndexPostfix + IndexPosition + Snapshot + InternalHeader + Transaction + Query + CompiledQuery + CompiledCursor + Cursor + Error + Cost + GetRequest + GetResponse + PutRequest + PutResponse + TouchRequest + TouchResponse + DeleteRequest + DeleteResponse + NextRequest + QueryResult + AllocateIdsRequest + AllocateIdsResponse + CompositeIndices + AddActionsRequest + AddActionsResponse + BeginTransactionRequest + CommitResponse +*/ +package datastore + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Property_Meaning int32 + +const ( + Property_NO_MEANING Property_Meaning = 0 + Property_BLOB Property_Meaning = 14 + Property_TEXT Property_Meaning = 15 + Property_BYTESTRING Property_Meaning = 16 + Property_ATOM_CATEGORY Property_Meaning = 1 + Property_ATOM_LINK Property_Meaning = 2 + Property_ATOM_TITLE Property_Meaning = 3 + Property_ATOM_CONTENT Property_Meaning = 4 + Property_ATOM_SUMMARY Property_Meaning = 5 + Property_ATOM_AUTHOR Property_Meaning = 6 + Property_GD_WHEN Property_Meaning = 7 + Property_GD_EMAIL Property_Meaning = 8 + Property_GEORSS_POINT Property_Meaning = 9 + Property_GD_IM Property_Meaning = 10 + Property_GD_PHONENUMBER Property_Meaning = 11 + Property_GD_POSTALADDRESS Property_Meaning = 12 + Property_GD_RATING Property_Meaning = 13 + Property_BLOBKEY Property_Meaning = 17 + Property_ENTITY_PROTO Property_Meaning = 19 + Property_INDEX_VALUE Property_Meaning = 18 +) + +var Property_Meaning_name = map[int32]string{ + 0: "NO_MEANING", + 14: "BLOB", + 15: "TEXT", + 16: "BYTESTRING", + 1: "ATOM_CATEGORY", + 2: "ATOM_LINK", + 3: "ATOM_TITLE", + 4: "ATOM_CONTENT", + 5: "ATOM_SUMMARY", + 6: "ATOM_AUTHOR", + 7: "GD_WHEN", + 8: "GD_EMAIL", + 9: "GEORSS_POINT", + 10: "GD_IM", + 11: "GD_PHONENUMBER", + 12: "GD_POSTALADDRESS", + 13: "GD_RATING", + 17: "BLOBKEY", + 19: "ENTITY_PROTO", + 18: "INDEX_VALUE", +} +var Property_Meaning_value = map[string]int32{ + "NO_MEANING": 0, + "BLOB": 14, + "TEXT": 15, + "BYTESTRING": 16, + "ATOM_CATEGORY": 1, + "ATOM_LINK": 2, + "ATOM_TITLE": 3, + "ATOM_CONTENT": 4, + "ATOM_SUMMARY": 5, + "ATOM_AUTHOR": 6, + "GD_WHEN": 7, + "GD_EMAIL": 8, + "GEORSS_POINT": 9, + "GD_IM": 10, + "GD_PHONENUMBER": 11, + "GD_POSTALADDRESS": 12, + "GD_RATING": 13, + "BLOBKEY": 17, + "ENTITY_PROTO": 19, + "INDEX_VALUE": 18, +} + +func (x Property_Meaning) Enum() *Property_Meaning { + p := new(Property_Meaning) + *p = x + return p +} +func (x Property_Meaning) String() string { + return proto.EnumName(Property_Meaning_name, int32(x)) +} +func (x *Property_Meaning) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning") + if err != nil { + return err + } + *x = Property_Meaning(value) + return nil +} + +type Property_FtsTokenizationOption int32 + +const ( + Property_HTML Property_FtsTokenizationOption = 1 + Property_ATOM Property_FtsTokenizationOption = 2 +) + +var Property_FtsTokenizationOption_name = map[int32]string{ + 1: "HTML", + 2: "ATOM", +} +var Property_FtsTokenizationOption_value = map[string]int32{ + "HTML": 1, + "ATOM": 2, +} + +func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption { + p := new(Property_FtsTokenizationOption) + *p = x + return p +} +func (x Property_FtsTokenizationOption) String() string { + return proto.EnumName(Property_FtsTokenizationOption_name, int32(x)) +} +func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption") + if err != nil { + return err + } + *x = Property_FtsTokenizationOption(value) + return nil +} + +type EntityProto_Kind int32 + +const ( + EntityProto_GD_CONTACT EntityProto_Kind = 1 + EntityProto_GD_EVENT EntityProto_Kind = 2 + EntityProto_GD_MESSAGE EntityProto_Kind = 3 +) + +var EntityProto_Kind_name = map[int32]string{ + 1: "GD_CONTACT", + 2: "GD_EVENT", + 3: "GD_MESSAGE", +} +var EntityProto_Kind_value = map[string]int32{ + "GD_CONTACT": 1, + "GD_EVENT": 2, + "GD_MESSAGE": 3, +} + +func (x EntityProto_Kind) Enum() *EntityProto_Kind { + p := new(EntityProto_Kind) + *p = x + return p +} +func (x EntityProto_Kind) String() string { + return proto.EnumName(EntityProto_Kind_name, int32(x)) +} +func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind") + if err != nil { + return err + } + *x = EntityProto_Kind(value) + return nil +} + +type Index_Property_Direction int32 + +const ( + Index_Property_ASCENDING Index_Property_Direction = 1 + Index_Property_DESCENDING Index_Property_Direction = 2 +) + +var Index_Property_Direction_name = map[int32]string{ + 1: "ASCENDING", + 2: "DESCENDING", +} +var Index_Property_Direction_value = map[string]int32{ + "ASCENDING": 1, + "DESCENDING": 2, +} + +func (x Index_Property_Direction) Enum() *Index_Property_Direction { + p := new(Index_Property_Direction) + *p = x + return p +} +func (x Index_Property_Direction) String() string { + return proto.EnumName(Index_Property_Direction_name, int32(x)) +} +func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction") + if err != nil { + return err + } + *x = Index_Property_Direction(value) + return nil +} + +type CompositeIndex_State int32 + +const ( + CompositeIndex_WRITE_ONLY CompositeIndex_State = 1 + CompositeIndex_READ_WRITE CompositeIndex_State = 2 + CompositeIndex_DELETED CompositeIndex_State = 3 + CompositeIndex_ERROR CompositeIndex_State = 4 +) + +var CompositeIndex_State_name = map[int32]string{ + 1: "WRITE_ONLY", + 2: "READ_WRITE", + 3: "DELETED", + 4: "ERROR", +} +var CompositeIndex_State_value = map[string]int32{ + "WRITE_ONLY": 1, + "READ_WRITE": 2, + "DELETED": 3, + "ERROR": 4, +} + +func (x CompositeIndex_State) Enum() *CompositeIndex_State { + p := new(CompositeIndex_State) + *p = x + return p +} +func (x CompositeIndex_State) String() string { + return proto.EnumName(CompositeIndex_State_name, int32(x)) +} +func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State") + if err != nil { + return err + } + *x = CompositeIndex_State(value) + return nil +} + +type Snapshot_Status int32 + +const ( + Snapshot_INACTIVE Snapshot_Status = 0 + Snapshot_ACTIVE Snapshot_Status = 1 +) + +var Snapshot_Status_name = map[int32]string{ + 0: "INACTIVE", + 1: "ACTIVE", +} +var Snapshot_Status_value = map[string]int32{ + "INACTIVE": 0, + "ACTIVE": 1, +} + +func (x Snapshot_Status) Enum() *Snapshot_Status { + p := new(Snapshot_Status) + *p = x + return p +} +func (x Snapshot_Status) String() string { + return proto.EnumName(Snapshot_Status_name, int32(x)) +} +func (x *Snapshot_Status) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status") + if err != nil { + return err + } + *x = Snapshot_Status(value) + return nil +} + +type Query_Hint int32 + +const ( + Query_ORDER_FIRST Query_Hint = 1 + Query_ANCESTOR_FIRST Query_Hint = 2 + Query_FILTER_FIRST Query_Hint = 3 +) + +var Query_Hint_name = map[int32]string{ + 1: "ORDER_FIRST", + 2: "ANCESTOR_FIRST", + 3: "FILTER_FIRST", +} +var Query_Hint_value = map[string]int32{ + "ORDER_FIRST": 1, + "ANCESTOR_FIRST": 2, + "FILTER_FIRST": 3, +} + +func (x Query_Hint) Enum() *Query_Hint { + p := new(Query_Hint) + *p = x + return p +} +func (x Query_Hint) String() string { + return proto.EnumName(Query_Hint_name, int32(x)) +} +func (x *Query_Hint) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint") + if err != nil { + return err + } + *x = Query_Hint(value) + return nil +} + +type Query_Filter_Operator int32 + +const ( + Query_Filter_LESS_THAN Query_Filter_Operator = 1 + Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2 + Query_Filter_GREATER_THAN Query_Filter_Operator = 3 + Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4 + Query_Filter_EQUAL Query_Filter_Operator = 5 + Query_Filter_IN Query_Filter_Operator = 6 + Query_Filter_EXISTS Query_Filter_Operator = 7 +) + +var Query_Filter_Operator_name = map[int32]string{ + 1: "LESS_THAN", + 2: "LESS_THAN_OR_EQUAL", + 3: "GREATER_THAN", + 4: "GREATER_THAN_OR_EQUAL", + 5: "EQUAL", + 6: "IN", + 7: "EXISTS", +} +var Query_Filter_Operator_value = map[string]int32{ + "LESS_THAN": 1, + "LESS_THAN_OR_EQUAL": 2, + "GREATER_THAN": 3, + "GREATER_THAN_OR_EQUAL": 4, + "EQUAL": 5, + "IN": 6, + "EXISTS": 7, +} + +func (x Query_Filter_Operator) Enum() *Query_Filter_Operator { + p := new(Query_Filter_Operator) + *p = x + return p +} +func (x Query_Filter_Operator) String() string { + return proto.EnumName(Query_Filter_Operator_name, int32(x)) +} +func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator") + if err != nil { + return err + } + *x = Query_Filter_Operator(value) + return nil +} + +type Query_Order_Direction int32 + +const ( + Query_Order_ASCENDING Query_Order_Direction = 1 + Query_Order_DESCENDING Query_Order_Direction = 2 +) + +var Query_Order_Direction_name = map[int32]string{ + 1: "ASCENDING", + 2: "DESCENDING", +} +var Query_Order_Direction_value = map[string]int32{ + "ASCENDING": 1, + "DESCENDING": 2, +} + +func (x Query_Order_Direction) Enum() *Query_Order_Direction { + p := new(Query_Order_Direction) + *p = x + return p +} +func (x Query_Order_Direction) String() string { + return proto.EnumName(Query_Order_Direction_name, int32(x)) +} +func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction") + if err != nil { + return err + } + *x = Query_Order_Direction(value) + return nil +} + +type Error_ErrorCode int32 + +const ( + Error_BAD_REQUEST Error_ErrorCode = 1 + Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2 + Error_INTERNAL_ERROR Error_ErrorCode = 3 + Error_NEED_INDEX Error_ErrorCode = 4 + Error_TIMEOUT Error_ErrorCode = 5 + Error_PERMISSION_DENIED Error_ErrorCode = 6 + Error_BIGTABLE_ERROR Error_ErrorCode = 7 + Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8 + Error_CAPABILITY_DISABLED Error_ErrorCode = 9 + Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10 + Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11 +) + +var Error_ErrorCode_name = map[int32]string{ + 1: "BAD_REQUEST", + 2: "CONCURRENT_TRANSACTION", + 3: "INTERNAL_ERROR", + 4: "NEED_INDEX", + 5: "TIMEOUT", + 6: "PERMISSION_DENIED", + 7: "BIGTABLE_ERROR", + 8: "COMMITTED_BUT_STILL_APPLYING", + 9: "CAPABILITY_DISABLED", + 10: "TRY_ALTERNATE_BACKEND", + 11: "SAFE_TIME_TOO_OLD", +} +var Error_ErrorCode_value = map[string]int32{ + "BAD_REQUEST": 1, + "CONCURRENT_TRANSACTION": 2, + "INTERNAL_ERROR": 3, + "NEED_INDEX": 4, + "TIMEOUT": 5, + "PERMISSION_DENIED": 6, + "BIGTABLE_ERROR": 7, + "COMMITTED_BUT_STILL_APPLYING": 8, + "CAPABILITY_DISABLED": 9, + "TRY_ALTERNATE_BACKEND": 10, + "SAFE_TIME_TOO_OLD": 11, +} + +func (x Error_ErrorCode) Enum() *Error_ErrorCode { + p := new(Error_ErrorCode) + *p = x + return p +} +func (x Error_ErrorCode) String() string { + return proto.EnumName(Error_ErrorCode_name, int32(x)) +} +func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode") + if err != nil { + return err + } + *x = Error_ErrorCode(value) + return nil +} + +type PutRequest_AutoIdPolicy int32 + +const ( + PutRequest_CURRENT PutRequest_AutoIdPolicy = 0 + PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1 +) + +var PutRequest_AutoIdPolicy_name = map[int32]string{ + 0: "CURRENT", + 1: "SEQUENTIAL", +} +var PutRequest_AutoIdPolicy_value = map[string]int32{ + "CURRENT": 0, + "SEQUENTIAL": 1, +} + +func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy { + p := new(PutRequest_AutoIdPolicy) + *p = x + return p +} +func (x PutRequest_AutoIdPolicy) String() string { + return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x)) +} +func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy") + if err != nil { + return err + } + *x = PutRequest_AutoIdPolicy(value) + return nil +} + +type Action struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Action) Reset() { *m = Action{} } +func (m *Action) String() string { return proto.CompactTextString(m) } +func (*Action) ProtoMessage() {} + +type PropertyValue struct { + Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"` + BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"` + StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"` + Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue" json:"pointvalue,omitempty"` + Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue" json:"uservalue,omitempty"` + Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue" json:"referencevalue,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue) Reset() { *m = PropertyValue{} } +func (m *PropertyValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue) ProtoMessage() {} + +func (m *PropertyValue) GetInt64Value() int64 { + if m != nil && m.Int64Value != nil { + return *m.Int64Value + } + return 0 +} + +func (m *PropertyValue) GetBooleanValue() bool { + if m != nil && m.BooleanValue != nil { + return *m.BooleanValue + } + return false +} + +func (m *PropertyValue) GetStringValue() string { + if m != nil && m.StringValue != nil { + return *m.StringValue + } + return "" +} + +func (m *PropertyValue) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue { + if m != nil { + return m.Pointvalue + } + return nil +} + +func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue { + if m != nil { + return m.Uservalue + } + return nil +} + +func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue { + if m != nil { + return m.Referencevalue + } + return nil +} + +type PropertyValue_PointValue struct { + X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"` + Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} } +func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_PointValue) ProtoMessage() {} + +func (m *PropertyValue_PointValue) GetX() float64 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +func (m *PropertyValue_PointValue) GetY() float64 { + if m != nil && m.Y != nil { + return *m.Y + } + return 0 +} + +type PropertyValue_UserValue struct { + Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"` + AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain" json:"auth_domain,omitempty"` + Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"` + FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity" json:"federated_identity,omitempty"` + FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider" json:"federated_provider,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} } +func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_UserValue) ProtoMessage() {} + +func (m *PropertyValue_UserValue) GetEmail() string { + if m != nil && m.Email != nil { + return *m.Email + } + return "" +} + +func (m *PropertyValue_UserValue) GetAuthDomain() string { + if m != nil && m.AuthDomain != nil { + return *m.AuthDomain + } + return "" +} + +func (m *PropertyValue_UserValue) GetNickname() string { + if m != nil && m.Nickname != nil { + return *m.Nickname + } + return "" +} + +func (m *PropertyValue_UserValue) GetFederatedIdentity() string { + if m != nil && m.FederatedIdentity != nil { + return *m.FederatedIdentity + } + return "" +} + +func (m *PropertyValue_UserValue) GetFederatedProvider() string { + if m != nil && m.FederatedProvider != nil { + return *m.FederatedProvider + } + return "" +} + +type PropertyValue_ReferenceValue struct { + App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` + NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"` + Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement" json:"pathelement,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} } +func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_ReferenceValue) ProtoMessage() {} + +func (m *PropertyValue_ReferenceValue) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *PropertyValue_ReferenceValue) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement { + if m != nil { + return m.Pathelement + } + return nil +} + +type PropertyValue_ReferenceValue_PathElement struct { + Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"` + Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"` + Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue_ReferenceValue_PathElement) Reset() { + *m = PropertyValue_ReferenceValue_PathElement{} +} +func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {} + +func (m *PropertyValue_ReferenceValue_PathElement) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *PropertyValue_ReferenceValue_PathElement) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type Property struct { + Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"` + MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri" json:"meaning_uri,omitempty"` + Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` + Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"` + Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"` + Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"` + FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"` + Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Property) Reset() { *m = Property{} } +func (m *Property) String() string { return proto.CompactTextString(m) } +func (*Property) ProtoMessage() {} + +const Default_Property_Meaning Property_Meaning = Property_NO_MEANING +const Default_Property_Searchable bool = false +const Default_Property_Locale string = "en" + +func (m *Property) GetMeaning() Property_Meaning { + if m != nil && m.Meaning != nil { + return *m.Meaning + } + return Default_Property_Meaning +} + +func (m *Property) GetMeaningUri() string { + if m != nil && m.MeaningUri != nil { + return *m.MeaningUri + } + return "" +} + +func (m *Property) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Property) GetValue() *PropertyValue { + if m != nil { + return m.Value + } + return nil +} + +func (m *Property) GetMultiple() bool { + if m != nil && m.Multiple != nil { + return *m.Multiple + } + return false +} + +func (m *Property) GetSearchable() bool { + if m != nil && m.Searchable != nil { + return *m.Searchable + } + return Default_Property_Searchable +} + +func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption { + if m != nil && m.FtsTokenizationOption != nil { + return *m.FtsTokenizationOption + } + return Property_HTML +} + +func (m *Property) GetLocale() string { + if m != nil && m.Locale != nil { + return *m.Locale + } + return Default_Property_Locale +} + +type Path struct { + Element []*Path_Element `protobuf:"group,1,rep,name=Element" json:"element,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Path) Reset() { *m = Path{} } +func (m *Path) String() string { return proto.CompactTextString(m) } +func (*Path) ProtoMessage() {} + +func (m *Path) GetElement() []*Path_Element { + if m != nil { + return m.Element + } + return nil +} + +type Path_Element struct { + Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"` + Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"` + Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Path_Element) Reset() { *m = Path_Element{} } +func (m *Path_Element) String() string { return proto.CompactTextString(m) } +func (*Path_Element) ProtoMessage() {} + +func (m *Path_Element) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +func (m *Path_Element) GetId() int64 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *Path_Element) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type Reference struct { + App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` + NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"` + Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Reference) Reset() { *m = Reference{} } +func (m *Reference) String() string { return proto.CompactTextString(m) } +func (*Reference) ProtoMessage() {} + +func (m *Reference) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *Reference) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *Reference) GetPath() *Path { + if m != nil { + return m.Path + } + return nil +} + +type User struct { + Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"` + AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain" json:"auth_domain,omitempty"` + Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"` + FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity" json:"federated_identity,omitempty"` + FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider" json:"federated_provider,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *User) Reset() { *m = User{} } +func (m *User) String() string { return proto.CompactTextString(m) } +func (*User) ProtoMessage() {} + +func (m *User) GetEmail() string { + if m != nil && m.Email != nil { + return *m.Email + } + return "" +} + +func (m *User) GetAuthDomain() string { + if m != nil && m.AuthDomain != nil { + return *m.AuthDomain + } + return "" +} + +func (m *User) GetNickname() string { + if m != nil && m.Nickname != nil { + return *m.Nickname + } + return "" +} + +func (m *User) GetFederatedIdentity() string { + if m != nil && m.FederatedIdentity != nil { + return *m.FederatedIdentity + } + return "" +} + +func (m *User) GetFederatedProvider() string { + if m != nil && m.FederatedProvider != nil { + return *m.FederatedProvider + } + return "" +} + +type EntityProto struct { + Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"` + EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group" json:"entity_group,omitempty"` + Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"` + Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"` + KindUri *string `protobuf:"bytes,5,opt,name=kind_uri" json:"kind_uri,omitempty"` + Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` + RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property" json:"raw_property,omitempty"` + Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EntityProto) Reset() { *m = EntityProto{} } +func (m *EntityProto) String() string { return proto.CompactTextString(m) } +func (*EntityProto) ProtoMessage() {} + +func (m *EntityProto) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *EntityProto) GetEntityGroup() *Path { + if m != nil { + return m.EntityGroup + } + return nil +} + +func (m *EntityProto) GetOwner() *User { + if m != nil { + return m.Owner + } + return nil +} + +func (m *EntityProto) GetKind() EntityProto_Kind { + if m != nil && m.Kind != nil { + return *m.Kind + } + return EntityProto_GD_CONTACT +} + +func (m *EntityProto) GetKindUri() string { + if m != nil && m.KindUri != nil { + return *m.KindUri + } + return "" +} + +func (m *EntityProto) GetProperty() []*Property { + if m != nil { + return m.Property + } + return nil +} + +func (m *EntityProto) GetRawProperty() []*Property { + if m != nil { + return m.RawProperty + } + return nil +} + +func (m *EntityProto) GetRank() int32 { + if m != nil && m.Rank != nil { + return *m.Rank + } + return 0 +} + +type CompositeProperty struct { + IndexId *int64 `protobuf:"varint,1,req,name=index_id" json:"index_id,omitempty"` + Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompositeProperty) Reset() { *m = CompositeProperty{} } +func (m *CompositeProperty) String() string { return proto.CompactTextString(m) } +func (*CompositeProperty) ProtoMessage() {} + +func (m *CompositeProperty) GetIndexId() int64 { + if m != nil && m.IndexId != nil { + return *m.IndexId + } + return 0 +} + +func (m *CompositeProperty) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +type Index struct { + EntityType *string `protobuf:"bytes,1,req,name=entity_type" json:"entity_type,omitempty"` + Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"` + Property []*Index_Property `protobuf:"group,2,rep,name=Property" json:"property,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Index) Reset() { *m = Index{} } +func (m *Index) String() string { return proto.CompactTextString(m) } +func (*Index) ProtoMessage() {} + +func (m *Index) GetEntityType() string { + if m != nil && m.EntityType != nil { + return *m.EntityType + } + return "" +} + +func (m *Index) GetAncestor() bool { + if m != nil && m.Ancestor != nil { + return *m.Ancestor + } + return false +} + +func (m *Index) GetProperty() []*Index_Property { + if m != nil { + return m.Property + } + return nil +} + +type Index_Property struct { + Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` + Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Index_Property) Reset() { *m = Index_Property{} } +func (m *Index_Property) String() string { return proto.CompactTextString(m) } +func (*Index_Property) ProtoMessage() {} + +const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING + +func (m *Index_Property) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Index_Property) GetDirection() Index_Property_Direction { + if m != nil && m.Direction != nil { + return *m.Direction + } + return Default_Index_Property_Direction +} + +type CompositeIndex struct { + AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"` + Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"` + State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"` + OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,def=0" json:"only_use_if_required,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompositeIndex) Reset() { *m = CompositeIndex{} } +func (m *CompositeIndex) String() string { return proto.CompactTextString(m) } +func (*CompositeIndex) ProtoMessage() {} + +const Default_CompositeIndex_OnlyUseIfRequired bool = false + +func (m *CompositeIndex) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *CompositeIndex) GetId() int64 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *CompositeIndex) GetDefinition() *Index { + if m != nil { + return m.Definition + } + return nil +} + +func (m *CompositeIndex) GetState() CompositeIndex_State { + if m != nil && m.State != nil { + return *m.State + } + return CompositeIndex_WRITE_ONLY +} + +func (m *CompositeIndex) GetOnlyUseIfRequired() bool { + if m != nil && m.OnlyUseIfRequired != nil { + return *m.OnlyUseIfRequired + } + return Default_CompositeIndex_OnlyUseIfRequired +} + +type IndexPostfix struct { + IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value" json:"index_value,omitempty"` + Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"` + Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexPostfix) Reset() { *m = IndexPostfix{} } +func (m *IndexPostfix) String() string { return proto.CompactTextString(m) } +func (*IndexPostfix) ProtoMessage() {} + +const Default_IndexPostfix_Before bool = true + +func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue { + if m != nil { + return m.IndexValue + } + return nil +} + +func (m *IndexPostfix) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *IndexPostfix) GetBefore() bool { + if m != nil && m.Before != nil { + return *m.Before + } + return Default_IndexPostfix_Before +} + +type IndexPostfix_IndexValue struct { + PropertyName *string `protobuf:"bytes,1,req,name=property_name" json:"property_name,omitempty"` + Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} } +func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) } +func (*IndexPostfix_IndexValue) ProtoMessage() {} + +func (m *IndexPostfix_IndexValue) GetPropertyName() string { + if m != nil && m.PropertyName != nil { + return *m.PropertyName + } + return "" +} + +func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue { + if m != nil { + return m.Value + } + return nil +} + +type IndexPosition struct { + Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexPosition) Reset() { *m = IndexPosition{} } +func (m *IndexPosition) String() string { return proto.CompactTextString(m) } +func (*IndexPosition) ProtoMessage() {} + +const Default_IndexPosition_Before bool = true + +func (m *IndexPosition) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *IndexPosition) GetBefore() bool { + if m != nil && m.Before != nil { + return *m.Before + } + return Default_IndexPosition_Before +} + +type Snapshot struct { + Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} + +func (m *Snapshot) GetTs() int64 { + if m != nil && m.Ts != nil { + return *m.Ts + } + return 0 +} + +type InternalHeader struct { + Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *InternalHeader) Reset() { *m = InternalHeader{} } +func (m *InternalHeader) String() string { return proto.CompactTextString(m) } +func (*InternalHeader) ProtoMessage() {} + +func (m *InternalHeader) GetQos() string { + if m != nil && m.Qos != nil { + return *m.Qos + } + return "" +} + +type Transaction struct { + Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` + Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"` + App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"` + MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Transaction) Reset() { *m = Transaction{} } +func (m *Transaction) String() string { return proto.CompactTextString(m) } +func (*Transaction) ProtoMessage() {} + +const Default_Transaction_MarkChanges bool = false + +func (m *Transaction) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *Transaction) GetHandle() uint64 { + if m != nil && m.Handle != nil { + return *m.Handle + } + return 0 +} + +func (m *Transaction) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *Transaction) GetMarkChanges() bool { + if m != nil && m.MarkChanges != nil { + return *m.MarkChanges + } + return Default_Transaction_MarkChanges +} + +type Query struct { + Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"` + App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` + NameSpace *string `protobuf:"bytes,29,opt,name=name_space" json:"name_space,omitempty"` + Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` + Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"` + Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter" json:"filter,omitempty"` + SearchQuery *string `protobuf:"bytes,8,opt,name=search_query" json:"search_query,omitempty"` + Order []*Query_Order `protobuf:"group,9,rep,name=Order" json:"order,omitempty"` + Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"` + Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"` + Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"` + Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"` + CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"` + EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor" json:"end_compiled_cursor,omitempty"` + CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index" json:"composite_index,omitempty"` + RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,def=0" json:"require_perfect_plan,omitempty"` + KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,def=0" json:"keys_only,omitempty"` + Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"` + Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"` + FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms" json:"failover_ms,omitempty"` + Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"` + PropertyName []string `protobuf:"bytes,33,rep,name=property_name" json:"property_name,omitempty"` + GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name" json:"group_by_property_name,omitempty"` + Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"` + MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds" json:"min_safe_time_seconds,omitempty"` + SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name" json:"safe_replica_name,omitempty"` + PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,def=0" json:"persist_offset,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Query) Reset() { *m = Query{} } +func (m *Query) String() string { return proto.CompactTextString(m) } +func (*Query) ProtoMessage() {} + +const Default_Query_Offset int32 = 0 +const Default_Query_RequirePerfectPlan bool = false +const Default_Query_KeysOnly bool = false +const Default_Query_Compile bool = false +const Default_Query_PersistOffset bool = false + +func (m *Query) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *Query) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *Query) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *Query) GetKind() string { + if m != nil && m.Kind != nil { + return *m.Kind + } + return "" +} + +func (m *Query) GetAncestor() *Reference { + if m != nil { + return m.Ancestor + } + return nil +} + +func (m *Query) GetFilter() []*Query_Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *Query) GetSearchQuery() string { + if m != nil && m.SearchQuery != nil { + return *m.SearchQuery + } + return "" +} + +func (m *Query) GetOrder() []*Query_Order { + if m != nil { + return m.Order + } + return nil +} + +func (m *Query) GetHint() Query_Hint { + if m != nil && m.Hint != nil { + return *m.Hint + } + return Query_ORDER_FIRST +} + +func (m *Query) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *Query) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return Default_Query_Offset +} + +func (m *Query) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +func (m *Query) GetCompiledCursor() *CompiledCursor { + if m != nil { + return m.CompiledCursor + } + return nil +} + +func (m *Query) GetEndCompiledCursor() *CompiledCursor { + if m != nil { + return m.EndCompiledCursor + } + return nil +} + +func (m *Query) GetCompositeIndex() []*CompositeIndex { + if m != nil { + return m.CompositeIndex + } + return nil +} + +func (m *Query) GetRequirePerfectPlan() bool { + if m != nil && m.RequirePerfectPlan != nil { + return *m.RequirePerfectPlan + } + return Default_Query_RequirePerfectPlan +} + +func (m *Query) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return Default_Query_KeysOnly +} + +func (m *Query) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *Query) GetCompile() bool { + if m != nil && m.Compile != nil { + return *m.Compile + } + return Default_Query_Compile +} + +func (m *Query) GetFailoverMs() int64 { + if m != nil && m.FailoverMs != nil { + return *m.FailoverMs + } + return 0 +} + +func (m *Query) GetStrong() bool { + if m != nil && m.Strong != nil { + return *m.Strong + } + return false +} + +func (m *Query) GetPropertyName() []string { + if m != nil { + return m.PropertyName + } + return nil +} + +func (m *Query) GetGroupByPropertyName() []string { + if m != nil { + return m.GroupByPropertyName + } + return nil +} + +func (m *Query) GetDistinct() bool { + if m != nil && m.Distinct != nil { + return *m.Distinct + } + return false +} + +func (m *Query) GetMinSafeTimeSeconds() int64 { + if m != nil && m.MinSafeTimeSeconds != nil { + return *m.MinSafeTimeSeconds + } + return 0 +} + +func (m *Query) GetSafeReplicaName() []string { + if m != nil { + return m.SafeReplicaName + } + return nil +} + +func (m *Query) GetPersistOffset() bool { + if m != nil && m.PersistOffset != nil { + return *m.PersistOffset + } + return Default_Query_PersistOffset +} + +type Query_Filter struct { + Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"` + Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Query_Filter) Reset() { *m = Query_Filter{} } +func (m *Query_Filter) String() string { return proto.CompactTextString(m) } +func (*Query_Filter) ProtoMessage() {} + +func (m *Query_Filter) GetOp() Query_Filter_Operator { + if m != nil && m.Op != nil { + return *m.Op + } + return Query_Filter_LESS_THAN +} + +func (m *Query_Filter) GetProperty() []*Property { + if m != nil { + return m.Property + } + return nil +} + +type Query_Order struct { + Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"` + Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Query_Order) Reset() { *m = Query_Order{} } +func (m *Query_Order) String() string { return proto.CompactTextString(m) } +func (*Query_Order) ProtoMessage() {} + +const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING + +func (m *Query_Order) GetProperty() string { + if m != nil && m.Property != nil { + return *m.Property + } + return "" +} + +func (m *Query_Order) GetDirection() Query_Order_Direction { + if m != nil && m.Direction != nil { + return *m.Direction + } + return Default_Query_Order_Direction +} + +type CompiledQuery struct { + Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan" json:"primaryscan,omitempty"` + Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan" json:"mergejoinscan,omitempty"` + IndexDef *Index `protobuf:"bytes,21,opt,name=index_def" json:"index_def,omitempty"` + Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` + Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` + KeysOnly *bool `protobuf:"varint,12,req,name=keys_only" json:"keys_only,omitempty"` + PropertyName []string `protobuf:"bytes,24,rep,name=property_name" json:"property_name,omitempty"` + DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size" json:"distinct_infix_size,omitempty"` + Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter" json:"entityfilter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledQuery) Reset() { *m = CompiledQuery{} } +func (m *CompiledQuery) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery) ProtoMessage() {} + +const Default_CompiledQuery_Offset int32 = 0 + +func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan { + if m != nil { + return m.Primaryscan + } + return nil +} + +func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan { + if m != nil { + return m.Mergejoinscan + } + return nil +} + +func (m *CompiledQuery) GetIndexDef() *Index { + if m != nil { + return m.IndexDef + } + return nil +} + +func (m *CompiledQuery) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return Default_CompiledQuery_Offset +} + +func (m *CompiledQuery) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +func (m *CompiledQuery) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return false +} + +func (m *CompiledQuery) GetPropertyName() []string { + if m != nil { + return m.PropertyName + } + return nil +} + +func (m *CompiledQuery) GetDistinctInfixSize() int32 { + if m != nil && m.DistinctInfixSize != nil { + return *m.DistinctInfixSize + } + return 0 +} + +func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter { + if m != nil { + return m.Entityfilter + } + return nil +} + +type CompiledQuery_PrimaryScan struct { + IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"` + StartKey *string `protobuf:"bytes,3,opt,name=start_key" json:"start_key,omitempty"` + StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive" json:"start_inclusive,omitempty"` + EndKey *string `protobuf:"bytes,5,opt,name=end_key" json:"end_key,omitempty"` + EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive" json:"end_inclusive,omitempty"` + StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value" json:"start_postfix_value,omitempty"` + EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value" json:"end_postfix_value,omitempty"` + EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us" json:"end_unapplied_log_timestamp_us,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} } +func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery_PrimaryScan) ProtoMessage() {} + +func (m *CompiledQuery_PrimaryScan) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +func (m *CompiledQuery_PrimaryScan) GetStartKey() string { + if m != nil && m.StartKey != nil { + return *m.StartKey + } + return "" +} + +func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool { + if m != nil && m.StartInclusive != nil { + return *m.StartInclusive + } + return false +} + +func (m *CompiledQuery_PrimaryScan) GetEndKey() string { + if m != nil && m.EndKey != nil { + return *m.EndKey + } + return "" +} + +func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool { + if m != nil && m.EndInclusive != nil { + return *m.EndInclusive + } + return false +} + +func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string { + if m != nil { + return m.StartPostfixValue + } + return nil +} + +func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string { + if m != nil { + return m.EndPostfixValue + } + return nil +} + +func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 { + if m != nil && m.EndUnappliedLogTimestampUs != nil { + return *m.EndUnappliedLogTimestampUs + } + return 0 +} + +type CompiledQuery_MergeJoinScan struct { + IndexName *string `protobuf:"bytes,8,req,name=index_name" json:"index_name,omitempty"` + PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value" json:"prefix_value,omitempty"` + ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,def=0" json:"value_prefix,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} } +func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery_MergeJoinScan) ProtoMessage() {} + +const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false + +func (m *CompiledQuery_MergeJoinScan) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string { + if m != nil { + return m.PrefixValue + } + return nil +} + +func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool { + if m != nil && m.ValuePrefix != nil { + return *m.ValuePrefix + } + return Default_CompiledQuery_MergeJoinScan_ValuePrefix +} + +type CompiledQuery_EntityFilter struct { + Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"` + Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"` + Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} } +func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery_EntityFilter) ProtoMessage() {} + +const Default_CompiledQuery_EntityFilter_Distinct bool = false + +func (m *CompiledQuery_EntityFilter) GetDistinct() bool { + if m != nil && m.Distinct != nil { + return *m.Distinct + } + return Default_CompiledQuery_EntityFilter_Distinct +} + +func (m *CompiledQuery_EntityFilter) GetKind() string { + if m != nil && m.Kind != nil { + return *m.Kind + } + return "" +} + +func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference { + if m != nil { + return m.Ancestor + } + return nil +} + +type CompiledCursor struct { + Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position" json:"position,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledCursor) Reset() { *m = CompiledCursor{} } +func (m *CompiledCursor) String() string { return proto.CompactTextString(m) } +func (*CompiledCursor) ProtoMessage() {} + +func (m *CompiledCursor) GetPosition() *CompiledCursor_Position { + if m != nil { + return m.Position + } + return nil +} + +type CompiledCursor_Position struct { + StartKey *string `protobuf:"bytes,27,opt,name=start_key" json:"start_key,omitempty"` + Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue" json:"indexvalue,omitempty"` + Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"` + StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,def=1" json:"start_inclusive,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} } +func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) } +func (*CompiledCursor_Position) ProtoMessage() {} + +const Default_CompiledCursor_Position_StartInclusive bool = true + +func (m *CompiledCursor_Position) GetStartKey() string { + if m != nil && m.StartKey != nil { + return *m.StartKey + } + return "" +} + +func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue { + if m != nil { + return m.Indexvalue + } + return nil +} + +func (m *CompiledCursor_Position) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *CompiledCursor_Position) GetStartInclusive() bool { + if m != nil && m.StartInclusive != nil { + return *m.StartInclusive + } + return Default_CompiledCursor_Position_StartInclusive +} + +type CompiledCursor_Position_IndexValue struct { + Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"` + Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} } +func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) } +func (*CompiledCursor_Position_IndexValue) ProtoMessage() {} + +func (m *CompiledCursor_Position_IndexValue) GetProperty() string { + if m != nil && m.Property != nil { + return *m.Property + } + return "" +} + +func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue { + if m != nil { + return m.Value + } + return nil +} + +type Cursor struct { + Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"` + App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Cursor) Reset() { *m = Cursor{} } +func (m *Cursor) String() string { return proto.CompactTextString(m) } +func (*Cursor) ProtoMessage() {} + +func (m *Cursor) GetCursor() uint64 { + if m != nil && m.Cursor != nil { + return *m.Cursor + } + return 0 +} + +func (m *Cursor) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +type Error struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Error) Reset() { *m = Error{} } +func (m *Error) String() string { return proto.CompactTextString(m) } +func (*Error) ProtoMessage() {} + +type Cost struct { + IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes" json:"index_writes,omitempty"` + IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes" json:"index_write_bytes,omitempty"` + EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes" json:"entity_writes,omitempty"` + EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes" json:"entity_write_bytes,omitempty"` + Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost" json:"commitcost,omitempty"` + ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta" json:"approximate_storage_delta,omitempty"` + IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates" json:"id_sequence_updates,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Cost) Reset() { *m = Cost{} } +func (m *Cost) String() string { return proto.CompactTextString(m) } +func (*Cost) ProtoMessage() {} + +func (m *Cost) GetIndexWrites() int32 { + if m != nil && m.IndexWrites != nil { + return *m.IndexWrites + } + return 0 +} + +func (m *Cost) GetIndexWriteBytes() int32 { + if m != nil && m.IndexWriteBytes != nil { + return *m.IndexWriteBytes + } + return 0 +} + +func (m *Cost) GetEntityWrites() int32 { + if m != nil && m.EntityWrites != nil { + return *m.EntityWrites + } + return 0 +} + +func (m *Cost) GetEntityWriteBytes() int32 { + if m != nil && m.EntityWriteBytes != nil { + return *m.EntityWriteBytes + } + return 0 +} + +func (m *Cost) GetCommitcost() *Cost_CommitCost { + if m != nil { + return m.Commitcost + } + return nil +} + +func (m *Cost) GetApproximateStorageDelta() int32 { + if m != nil && m.ApproximateStorageDelta != nil { + return *m.ApproximateStorageDelta + } + return 0 +} + +func (m *Cost) GetIdSequenceUpdates() int32 { + if m != nil && m.IdSequenceUpdates != nil { + return *m.IdSequenceUpdates + } + return 0 +} + +type Cost_CommitCost struct { + RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts" json:"requested_entity_puts,omitempty"` + RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes" json:"requested_entity_deletes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} } +func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) } +func (*Cost_CommitCost) ProtoMessage() {} + +func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 { + if m != nil && m.RequestedEntityPuts != nil { + return *m.RequestedEntityPuts + } + return 0 +} + +func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 { + if m != nil && m.RequestedEntityDeletes != nil { + return *m.RequestedEntityDeletes + } + return 0 +} + +type GetRequest struct { + Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"` + Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` + FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms" json:"failover_ms,omitempty"` + Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"` + AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,def=0" json:"allow_deferred,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetRequest) Reset() { *m = GetRequest{} } +func (m *GetRequest) String() string { return proto.CompactTextString(m) } +func (*GetRequest) ProtoMessage() {} + +const Default_GetRequest_AllowDeferred bool = false + +func (m *GetRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *GetRequest) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *GetRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *GetRequest) GetFailoverMs() int64 { + if m != nil && m.FailoverMs != nil { + return *m.FailoverMs + } + return 0 +} + +func (m *GetRequest) GetStrong() bool { + if m != nil && m.Strong != nil { + return *m.Strong + } + return false +} + +func (m *GetRequest) GetAllowDeferred() bool { + if m != nil && m.AllowDeferred != nil { + return *m.AllowDeferred + } + return Default_GetRequest_AllowDeferred +} + +type GetResponse struct { + Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity" json:"entity,omitempty"` + Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"` + InOrder *bool `protobuf:"varint,6,opt,name=in_order,def=1" json:"in_order,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetResponse) Reset() { *m = GetResponse{} } +func (m *GetResponse) String() string { return proto.CompactTextString(m) } +func (*GetResponse) ProtoMessage() {} + +const Default_GetResponse_InOrder bool = true + +func (m *GetResponse) GetEntity() []*GetResponse_Entity { + if m != nil { + return m.Entity + } + return nil +} + +func (m *GetResponse) GetDeferred() []*Reference { + if m != nil { + return m.Deferred + } + return nil +} + +func (m *GetResponse) GetInOrder() bool { + if m != nil && m.InOrder != nil { + return *m.InOrder + } + return Default_GetResponse_InOrder +} + +type GetResponse_Entity struct { + Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"` + Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"` + Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} } +func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) } +func (*GetResponse_Entity) ProtoMessage() {} + +func (m *GetResponse_Entity) GetEntity() *EntityProto { + if m != nil { + return m.Entity + } + return nil +} + +func (m *GetResponse_Entity) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *GetResponse_Entity) GetVersion() int64 { + if m != nil && m.Version != nil { + return *m.Version + } + return 0 +} + +type PutRequest struct { + Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"` + Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"` + Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` + CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index" json:"composite_index,omitempty"` + Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` + Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` + MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` + Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` + AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PutRequest) Reset() { *m = PutRequest{} } +func (m *PutRequest) String() string { return proto.CompactTextString(m) } +func (*PutRequest) ProtoMessage() {} + +const Default_PutRequest_Trusted bool = false +const Default_PutRequest_Force bool = false +const Default_PutRequest_MarkChanges bool = false +const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT + +func (m *PutRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *PutRequest) GetEntity() []*EntityProto { + if m != nil { + return m.Entity + } + return nil +} + +func (m *PutRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *PutRequest) GetCompositeIndex() []*CompositeIndex { + if m != nil { + return m.CompositeIndex + } + return nil +} + +func (m *PutRequest) GetTrusted() bool { + if m != nil && m.Trusted != nil { + return *m.Trusted + } + return Default_PutRequest_Trusted +} + +func (m *PutRequest) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return Default_PutRequest_Force +} + +func (m *PutRequest) GetMarkChanges() bool { + if m != nil && m.MarkChanges != nil { + return *m.MarkChanges + } + return Default_PutRequest_MarkChanges +} + +func (m *PutRequest) GetSnapshot() []*Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy { + if m != nil && m.AutoIdPolicy != nil { + return *m.AutoIdPolicy + } + return Default_PutRequest_AutoIdPolicy +} + +type PutResponse struct { + Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"` + Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PutResponse) Reset() { *m = PutResponse{} } +func (m *PutResponse) String() string { return proto.CompactTextString(m) } +func (*PutResponse) ProtoMessage() {} + +func (m *PutResponse) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *PutResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +func (m *PutResponse) GetVersion() []int64 { + if m != nil { + return m.Version + } + return nil +} + +type TouchRequest struct { + Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` + Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index" json:"composite_index,omitempty"` + Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"` + Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TouchRequest) Reset() { *m = TouchRequest{} } +func (m *TouchRequest) String() string { return proto.CompactTextString(m) } +func (*TouchRequest) ProtoMessage() {} + +const Default_TouchRequest_Force bool = false + +func (m *TouchRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *TouchRequest) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex { + if m != nil { + return m.CompositeIndex + } + return nil +} + +func (m *TouchRequest) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return Default_TouchRequest_Force +} + +func (m *TouchRequest) GetSnapshot() []*Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +type TouchResponse struct { + Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TouchResponse) Reset() { *m = TouchResponse{} } +func (m *TouchResponse) String() string { return proto.CompactTextString(m) } +func (*TouchResponse) ProtoMessage() {} + +func (m *TouchResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +type DeleteRequest struct { + Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` + Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"` + Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"` + Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` + Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` + MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` + Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } +func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRequest) ProtoMessage() {} + +const Default_DeleteRequest_Trusted bool = false +const Default_DeleteRequest_Force bool = false +const Default_DeleteRequest_MarkChanges bool = false + +func (m *DeleteRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *DeleteRequest) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *DeleteRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *DeleteRequest) GetTrusted() bool { + if m != nil && m.Trusted != nil { + return *m.Trusted + } + return Default_DeleteRequest_Trusted +} + +func (m *DeleteRequest) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return Default_DeleteRequest_Force +} + +func (m *DeleteRequest) GetMarkChanges() bool { + if m != nil && m.MarkChanges != nil { + return *m.MarkChanges + } + return Default_DeleteRequest_MarkChanges +} + +func (m *DeleteRequest) GetSnapshot() []*Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +type DeleteResponse struct { + Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` + Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } +func (m *DeleteResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteResponse) ProtoMessage() {} + +func (m *DeleteResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +func (m *DeleteResponse) GetVersion() []int64 { + if m != nil { + return m.Version + } + return nil +} + +type NextRequest struct { + Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"` + Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"` + Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"` + Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"` + Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NextRequest) Reset() { *m = NextRequest{} } +func (m *NextRequest) String() string { return proto.CompactTextString(m) } +func (*NextRequest) ProtoMessage() {} + +const Default_NextRequest_Offset int32 = 0 +const Default_NextRequest_Compile bool = false + +func (m *NextRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *NextRequest) GetCursor() *Cursor { + if m != nil { + return m.Cursor + } + return nil +} + +func (m *NextRequest) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *NextRequest) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return Default_NextRequest_Offset +} + +func (m *NextRequest) GetCompile() bool { + if m != nil && m.Compile != nil { + return *m.Compile + } + return Default_NextRequest_Compile +} + +type QueryResult struct { + Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"` + Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"` + SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results" json:"skipped_results,omitempty"` + MoreResults *bool `protobuf:"varint,3,req,name=more_results" json:"more_results,omitempty"` + KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only" json:"keys_only,omitempty"` + IndexOnly *bool `protobuf:"varint,9,opt,name=index_only" json:"index_only,omitempty"` + SmallOps *bool `protobuf:"varint,10,opt,name=small_ops" json:"small_ops,omitempty"` + CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query" json:"compiled_query,omitempty"` + CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"` + Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"` + Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *QueryResult) Reset() { *m = QueryResult{} } +func (m *QueryResult) String() string { return proto.CompactTextString(m) } +func (*QueryResult) ProtoMessage() {} + +func (m *QueryResult) GetCursor() *Cursor { + if m != nil { + return m.Cursor + } + return nil +} + +func (m *QueryResult) GetResult() []*EntityProto { + if m != nil { + return m.Result + } + return nil +} + +func (m *QueryResult) GetSkippedResults() int32 { + if m != nil && m.SkippedResults != nil { + return *m.SkippedResults + } + return 0 +} + +func (m *QueryResult) GetMoreResults() bool { + if m != nil && m.MoreResults != nil { + return *m.MoreResults + } + return false +} + +func (m *QueryResult) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return false +} + +func (m *QueryResult) GetIndexOnly() bool { + if m != nil && m.IndexOnly != nil { + return *m.IndexOnly + } + return false +} + +func (m *QueryResult) GetSmallOps() bool { + if m != nil && m.SmallOps != nil { + return *m.SmallOps + } + return false +} + +func (m *QueryResult) GetCompiledQuery() *CompiledQuery { + if m != nil { + return m.CompiledQuery + } + return nil +} + +func (m *QueryResult) GetCompiledCursor() *CompiledCursor { + if m != nil { + return m.CompiledCursor + } + return nil +} + +func (m *QueryResult) GetIndex() []*CompositeIndex { + if m != nil { + return m.Index + } + return nil +} + +func (m *QueryResult) GetVersion() []int64 { + if m != nil { + return m.Version + } + return nil +} + +type AllocateIdsRequest struct { + Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` + ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key" json:"model_key,omitempty"` + Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` + Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"` + Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } +func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } +func (*AllocateIdsRequest) ProtoMessage() {} + +func (m *AllocateIdsRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AllocateIdsRequest) GetModelKey() *Reference { + if m != nil { + return m.ModelKey + } + return nil +} + +func (m *AllocateIdsRequest) GetSize() int64 { + if m != nil && m.Size != nil { + return *m.Size + } + return 0 +} + +func (m *AllocateIdsRequest) GetMax() int64 { + if m != nil && m.Max != nil { + return *m.Max + } + return 0 +} + +func (m *AllocateIdsRequest) GetReserve() []*Reference { + if m != nil { + return m.Reserve + } + return nil +} + +type AllocateIdsResponse struct { + Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"` + End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"` + Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } +func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } +func (*AllocateIdsResponse) ProtoMessage() {} + +func (m *AllocateIdsResponse) GetStart() int64 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *AllocateIdsResponse) GetEnd() int64 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *AllocateIdsResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +type CompositeIndices struct { + Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompositeIndices) Reset() { *m = CompositeIndices{} } +func (m *CompositeIndices) String() string { return proto.CompactTextString(m) } +func (*CompositeIndices) ProtoMessage() {} + +func (m *CompositeIndices) GetIndex() []*CompositeIndex { + if m != nil { + return m.Index + } + return nil +} + +type AddActionsRequest struct { + Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` + Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` + Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} } +func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) } +func (*AddActionsRequest) ProtoMessage() {} + +func (m *AddActionsRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AddActionsRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *AddActionsRequest) GetAction() []*Action { + if m != nil { + return m.Action + } + return nil +} + +type AddActionsResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} } +func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) } +func (*AddActionsResponse) ProtoMessage() {} + +type BeginTransactionRequest struct { + Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` + App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` + AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,def=0" json:"allow_multiple_eg,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } +func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*BeginTransactionRequest) ProtoMessage() {} + +const Default_BeginTransactionRequest_AllowMultipleEg bool = false + +func (m *BeginTransactionRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *BeginTransactionRequest) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *BeginTransactionRequest) GetAllowMultipleEg() bool { + if m != nil && m.AllowMultipleEg != nil { + return *m.AllowMultipleEg + } + return Default_BeginTransactionRequest_AllowMultipleEg +} + +type CommitResponse struct { + Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` + Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CommitResponse) Reset() { *m = CommitResponse{} } +func (m *CommitResponse) String() string { return proto.CompactTextString(m) } +func (*CommitResponse) ProtoMessage() {} + +func (m *CommitResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +func (m *CommitResponse) GetVersion() []*CommitResponse_Version { + if m != nil { + return m.Version + } + return nil +} + +type CommitResponse_Version struct { + RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key" json:"root_entity_key,omitempty"` + Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} } +func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) } +func (*CommitResponse_Version) ProtoMessage() {} + +func (m *CommitResponse_Version) GetRootEntityKey() *Reference { + if m != nil { + return m.RootEntityKey + } + return nil +} + +func (m *CommitResponse_Version) GetVersion() int64 { + if m != nil && m.Version != nil { + return *m.Version + } + return 0 +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto new file mode 100755 index 00000000000..e76f126ff7c --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto @@ -0,0 +1,541 @@ +syntax = "proto2"; +option go_package = "datastore"; + +package appengine; + +message Action{} + +message PropertyValue { + optional int64 int64Value = 1; + optional bool booleanValue = 2; + optional string stringValue = 3; + optional double doubleValue = 4; + + optional group PointValue = 5 { + required double x = 6; + required double y = 7; + } + + optional group UserValue = 8 { + required string email = 9; + required string auth_domain = 10; + optional string nickname = 11; + optional string federated_identity = 21; + optional string federated_provider = 22; + } + + optional group ReferenceValue = 12 { + required string app = 13; + optional string name_space = 20; + repeated group PathElement = 14 { + required string type = 15; + optional int64 id = 16; + optional string name = 17; + } + } +} + +message Property { + enum Meaning { + NO_MEANING = 0; + BLOB = 14; + TEXT = 15; + BYTESTRING = 16; + + ATOM_CATEGORY = 1; + ATOM_LINK = 2; + ATOM_TITLE = 3; + ATOM_CONTENT = 4; + ATOM_SUMMARY = 5; + ATOM_AUTHOR = 6; + + GD_WHEN = 7; + GD_EMAIL = 8; + GEORSS_POINT = 9; + GD_IM = 10; + + GD_PHONENUMBER = 11; + GD_POSTALADDRESS = 12; + + GD_RATING = 13; + + BLOBKEY = 17; + ENTITY_PROTO = 19; + + INDEX_VALUE = 18; + }; + + optional Meaning meaning = 1 [default = NO_MEANING]; + optional string meaning_uri = 2; + + required string name = 3; + + required PropertyValue value = 5; + + required bool multiple = 4; + + optional bool searchable = 6 [default=false]; + + enum FtsTokenizationOption { + HTML = 1; + ATOM = 2; + } + + optional FtsTokenizationOption fts_tokenization_option = 8; + + optional string locale = 9 [default = "en"]; +} + +message Path { + repeated group Element = 1 { + required string type = 2; + optional int64 id = 3; + optional string name = 4; + } +} + +message Reference { + required string app = 13; + optional string name_space = 20; + required Path path = 14; +} + +message User { + required string email = 1; + required string auth_domain = 2; + optional string nickname = 3; + optional string federated_identity = 6; + optional string federated_provider = 7; +} + +message EntityProto { + required Reference key = 13; + required Path entity_group = 16; + optional User owner = 17; + + enum Kind { + GD_CONTACT = 1; + GD_EVENT = 2; + GD_MESSAGE = 3; + } + optional Kind kind = 4; + optional string kind_uri = 5; + + repeated Property property = 14; + repeated Property raw_property = 15; + + optional int32 rank = 18; +} + +message CompositeProperty { + required int64 index_id = 1; + repeated string value = 2; +} + +message Index { + required string entity_type = 1; + required bool ancestor = 5; + repeated group Property = 2 { + required string name = 3; + enum Direction { + ASCENDING = 1; + DESCENDING = 2; + } + optional Direction direction = 4 [default = ASCENDING]; + } +} + +message CompositeIndex { + required string app_id = 1; + required int64 id = 2; + required Index definition = 3; + + enum State { + WRITE_ONLY = 1; + READ_WRITE = 2; + DELETED = 3; + ERROR = 4; + } + required State state = 4; + + optional bool only_use_if_required = 6 [default = false]; +} + +message IndexPostfix { + message IndexValue { + required string property_name = 1; + required PropertyValue value = 2; + } + + repeated IndexValue index_value = 1; + + optional Reference key = 2; + + optional bool before = 3 [default=true]; +} + +message IndexPosition { + optional string key = 1; + + optional bool before = 2 [default=true]; +} + +message Snapshot { + enum Status { + INACTIVE = 0; + ACTIVE = 1; + } + + required int64 ts = 1; +} + +message InternalHeader { + optional string qos = 1; +} + +message Transaction { + optional InternalHeader header = 4; + required fixed64 handle = 1; + required string app = 2; + optional bool mark_changes = 3 [default = false]; +} + +message Query { + optional InternalHeader header = 39; + + required string app = 1; + optional string name_space = 29; + + optional string kind = 3; + optional Reference ancestor = 17; + + repeated group Filter = 4 { + enum Operator { + LESS_THAN = 1; + LESS_THAN_OR_EQUAL = 2; + GREATER_THAN = 3; + GREATER_THAN_OR_EQUAL = 4; + EQUAL = 5; + IN = 6; + EXISTS = 7; + } + + required Operator op = 6; + repeated Property property = 14; + } + + optional string search_query = 8; + + repeated group Order = 9 { + enum Direction { + ASCENDING = 1; + DESCENDING = 2; + } + + required string property = 10; + optional Direction direction = 11 [default = ASCENDING]; + } + + enum Hint { + ORDER_FIRST = 1; + ANCESTOR_FIRST = 2; + FILTER_FIRST = 3; + } + optional Hint hint = 18; + + optional int32 count = 23; + + optional int32 offset = 12 [default = 0]; + + optional int32 limit = 16; + + optional CompiledCursor compiled_cursor = 30; + optional CompiledCursor end_compiled_cursor = 31; + + repeated CompositeIndex composite_index = 19; + + optional bool require_perfect_plan = 20 [default = false]; + + optional bool keys_only = 21 [default = false]; + + optional Transaction transaction = 22; + + optional bool compile = 25 [default = false]; + + optional int64 failover_ms = 26; + + optional bool strong = 32; + + repeated string property_name = 33; + + repeated string group_by_property_name = 34; + + optional bool distinct = 24; + + optional int64 min_safe_time_seconds = 35; + + repeated string safe_replica_name = 36; + + optional bool persist_offset = 37 [default=false]; +} + +message CompiledQuery { + required group PrimaryScan = 1 { + optional string index_name = 2; + + optional string start_key = 3; + optional bool start_inclusive = 4; + optional string end_key = 5; + optional bool end_inclusive = 6; + + repeated string start_postfix_value = 22; + repeated string end_postfix_value = 23; + + optional int64 end_unapplied_log_timestamp_us = 19; + } + + repeated group MergeJoinScan = 7 { + required string index_name = 8; + + repeated string prefix_value = 9; + + optional bool value_prefix = 20 [default=false]; + } + + optional Index index_def = 21; + + optional int32 offset = 10 [default = 0]; + + optional int32 limit = 11; + + required bool keys_only = 12; + + repeated string property_name = 24; + + optional int32 distinct_infix_size = 25; + + optional group EntityFilter = 13 { + optional bool distinct = 14 [default=false]; + + optional string kind = 17; + optional Reference ancestor = 18; + } +} + +message CompiledCursor { + optional group Position = 2 { + optional string start_key = 27; + + repeated group IndexValue = 29 { + optional string property = 30; + required PropertyValue value = 31; + } + + optional Reference key = 32; + + optional bool start_inclusive = 28 [default=true]; + } +} + +message Cursor { + required fixed64 cursor = 1; + + optional string app = 2; +} + +message Error { + enum ErrorCode { + BAD_REQUEST = 1; + CONCURRENT_TRANSACTION = 2; + INTERNAL_ERROR = 3; + NEED_INDEX = 4; + TIMEOUT = 5; + PERMISSION_DENIED = 6; + BIGTABLE_ERROR = 7; + COMMITTED_BUT_STILL_APPLYING = 8; + CAPABILITY_DISABLED = 9; + TRY_ALTERNATE_BACKEND = 10; + SAFE_TIME_TOO_OLD = 11; + } +} + +message Cost { + optional int32 index_writes = 1; + optional int32 index_write_bytes = 2; + optional int32 entity_writes = 3; + optional int32 entity_write_bytes = 4; + optional group CommitCost = 5 { + optional int32 requested_entity_puts = 6; + optional int32 requested_entity_deletes = 7; + }; + optional int32 approximate_storage_delta = 8; + optional int32 id_sequence_updates = 9; +} + +message GetRequest { + optional InternalHeader header = 6; + + repeated Reference key = 1; + optional Transaction transaction = 2; + + optional int64 failover_ms = 3; + + optional bool strong = 4; + + optional bool allow_deferred = 5 [default=false]; +} + +message GetResponse { + repeated group Entity = 1 { + optional EntityProto entity = 2; + optional Reference key = 4; + + optional int64 version = 3; + } + + repeated Reference deferred = 5; + + optional bool in_order = 6 [default=true]; +} + +message PutRequest { + optional InternalHeader header = 11; + + repeated EntityProto entity = 1; + optional Transaction transaction = 2; + repeated CompositeIndex composite_index = 3; + + optional bool trusted = 4 [default = false]; + + optional bool force = 7 [default = false]; + + optional bool mark_changes = 8 [default = false]; + repeated Snapshot snapshot = 9; + + enum AutoIdPolicy { + CURRENT = 0; + SEQUENTIAL = 1; + } + optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT]; +} + +message PutResponse { + repeated Reference key = 1; + optional Cost cost = 2; + repeated int64 version = 3; +} + +message TouchRequest { + optional InternalHeader header = 10; + + repeated Reference key = 1; + repeated CompositeIndex composite_index = 2; + optional bool force = 3 [default = false]; + repeated Snapshot snapshot = 9; +} + +message TouchResponse { + optional Cost cost = 1; +} + +message DeleteRequest { + optional InternalHeader header = 10; + + repeated Reference key = 6; + optional Transaction transaction = 5; + + optional bool trusted = 4 [default = false]; + + optional bool force = 7 [default = false]; + + optional bool mark_changes = 8 [default = false]; + repeated Snapshot snapshot = 9; +} + +message DeleteResponse { + optional Cost cost = 1; + repeated int64 version = 3; +} + +message NextRequest { + optional InternalHeader header = 5; + + required Cursor cursor = 1; + optional int32 count = 2; + + optional int32 offset = 4 [default = 0]; + + optional bool compile = 3 [default = false]; +} + +message QueryResult { + optional Cursor cursor = 1; + + repeated EntityProto result = 2; + + optional int32 skipped_results = 7; + + required bool more_results = 3; + + optional bool keys_only = 4; + + optional bool index_only = 9; + + optional bool small_ops = 10; + + optional CompiledQuery compiled_query = 5; + + optional CompiledCursor compiled_cursor = 6; + + repeated CompositeIndex index = 8; + + repeated int64 version = 11; +} + +message AllocateIdsRequest { + optional InternalHeader header = 4; + + optional Reference model_key = 1; + + optional int64 size = 2; + + optional int64 max = 3; + + repeated Reference reserve = 5; +} + +message AllocateIdsResponse { + required int64 start = 1; + required int64 end = 2; + optional Cost cost = 3; +} + +message CompositeIndices { + repeated CompositeIndex index = 1; +} + +message AddActionsRequest { + optional InternalHeader header = 3; + + required Transaction transaction = 1; + repeated Action action = 2; +} + +message AddActionsResponse { +} + +message BeginTransactionRequest { + optional InternalHeader header = 3; + + required string app = 1; + optional bool allow_multiple_eg = 2 [default = false]; +} + +message CommitResponse { + optional Cost cost = 1; + + repeated group Version = 3 { + required Reference root_entity_key = 4; + required int64 version = 5; + } +} diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go new file mode 100644 index 00000000000..d538701ab3b --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/identity.go @@ -0,0 +1,14 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +import netcontext "golang.org/x/net/context" + +// These functions are implementations of the wrapper functions +// in ../appengine/identity.go. See that file for commentary. + +func AppID(c netcontext.Context) string { + return appID(FullyQualifiedAppID(c)) +} diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go new file mode 100644 index 00000000000..d5fa75be78e --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/identity_vm.go @@ -0,0 +1,101 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "net/http" + "os" + + netcontext "golang.org/x/net/context" +) + +// These functions are implementations of the wrapper functions +// in ../appengine/identity.go. See that file for commentary. + +const ( + hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname" + hRequestLogId = "X-AppEngine-Request-Log-Id" + hDatacenter = "X-AppEngine-Datacenter" +) + +func ctxHeaders(ctx netcontext.Context) http.Header { + c := fromContext(ctx) + if c == nil { + return nil + } + return c.Request().Header +} + +func DefaultVersionHostname(ctx netcontext.Context) string { + return ctxHeaders(ctx).Get(hDefaultVersionHostname) +} + +func RequestID(ctx netcontext.Context) string { + return ctxHeaders(ctx).Get(hRequestLogId) +} + +func Datacenter(ctx netcontext.Context) string { + return ctxHeaders(ctx).Get(hDatacenter) +} + +func ServerSoftware() string { + // TODO(dsymonds): Remove fallback when we've verified this. + if s := os.Getenv("SERVER_SOFTWARE"); s != "" { + return s + } + return "Google App Engine/1.x.x" +} + +// TODO(dsymonds): Remove the metadata fetches. + +func ModuleName(_ netcontext.Context) string { + if s := os.Getenv("GAE_MODULE_NAME"); s != "" { + return s + } + return string(mustGetMetadata("instance/attributes/gae_backend_name")) +} + +func VersionID(_ netcontext.Context) string { + if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { + return s1 + "." + s2 + } + return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version")) +} + +func InstanceID() string { + if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" { + return s + } + return string(mustGetMetadata("instance/attributes/gae_backend_instance")) +} + +func partitionlessAppID() string { + // gae_project has everything except the partition prefix. + appID := os.Getenv("GAE_LONG_APP_ID") + if appID == "" { + appID = string(mustGetMetadata("instance/attributes/gae_project")) + } + return appID +} + +func fullyQualifiedAppID(_ netcontext.Context) string { + appID := partitionlessAppID() + + part := os.Getenv("GAE_PARTITION") + if part == "" { + part = string(mustGetMetadata("instance/attributes/gae_partition")) + } + + if part != "" { + appID = part + "~" + appID + } + return appID +} + +func IsDevAppServer() bool { + return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" +} diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go new file mode 100644 index 00000000000..051ea3980ab --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/internal.go @@ -0,0 +1,110 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package internal provides support for package appengine. +// +// Programs should not use this package directly. Its API is not stable. +// Use packages appengine and appengine/* instead. +package internal + +import ( + "fmt" + + "github.com/golang/protobuf/proto" + + remotepb "google.golang.org/appengine/internal/remote_api" +) + +// errorCodeMaps is a map of service name to the error code map for the service. +var errorCodeMaps = make(map[string]map[int32]string) + +// RegisterErrorCodeMap is called from API implementations to register their +// error code map. This should only be called from init functions. +func RegisterErrorCodeMap(service string, m map[int32]string) { + errorCodeMaps[service] = m +} + +type timeoutCodeKey struct { + service string + code int32 +} + +// timeoutCodes is the set of service+code pairs that represent timeouts. +var timeoutCodes = make(map[timeoutCodeKey]bool) + +func RegisterTimeoutErrorCode(service string, code int32) { + timeoutCodes[timeoutCodeKey{service, code}] = true +} + +// APIError is the type returned by appengine.Context's Call method +// when an API call fails in an API-specific way. This may be, for instance, +// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE. +type APIError struct { + Service string + Detail string + Code int32 // API-specific error code +} + +func (e *APIError) Error() string { + if e.Code == 0 { + if e.Detail == "" { + return "APIError " + } + return e.Detail + } + s := fmt.Sprintf("API error %d", e.Code) + if m, ok := errorCodeMaps[e.Service]; ok { + s += " (" + e.Service + ": " + m[e.Code] + ")" + } else { + // Shouldn't happen, but provide a bit more detail if it does. + s = e.Service + " " + s + } + if e.Detail != "" { + s += ": " + e.Detail + } + return s +} + +func (e *APIError) IsTimeout() bool { + return timeoutCodes[timeoutCodeKey{e.Service, e.Code}] +} + +// CallError is the type returned by appengine.Context's Call method when an +// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED. +type CallError struct { + Detail string + Code int32 + // TODO: Remove this if we get a distinguishable error code. + Timeout bool +} + +func (e *CallError) Error() string { + var msg string + switch remotepb.RpcError_ErrorCode(e.Code) { + case remotepb.RpcError_UNKNOWN: + return e.Detail + case remotepb.RpcError_OVER_QUOTA: + msg = "Over quota" + case remotepb.RpcError_CAPABILITY_DISABLED: + msg = "Capability disabled" + case remotepb.RpcError_CANCELLED: + msg = "Canceled" + default: + msg = fmt.Sprintf("Call error %d", e.Code) + } + s := msg + ": " + e.Detail + if e.Timeout { + s += " (timeout)" + } + return s +} + +func (e *CallError) IsTimeout() bool { + return e.Timeout +} + +// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace. +// The function should be prepared to be called on the same message more than once; it should only modify the +// RPC request the first time. +var NamespaceMods = make(map[string]func(m proto.Message, namespace string)) diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go new file mode 100644 index 00000000000..20c595be30a --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go @@ -0,0 +1,899 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/log/log_service.proto +// DO NOT EDIT! + +/* +Package log is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/log/log_service.proto + +It has these top-level messages: + LogServiceError + UserAppLogLine + UserAppLogGroup + FlushRequest + SetStatusRequest + LogOffset + LogLine + RequestLog + LogModuleVersion + LogReadRequest + LogReadResponse + LogUsageRecord + LogUsageRequest + LogUsageResponse +*/ +package log + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type LogServiceError_ErrorCode int32 + +const ( + LogServiceError_OK LogServiceError_ErrorCode = 0 + LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1 + LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2 +) + +var LogServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_REQUEST", + 2: "STORAGE_ERROR", +} +var LogServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_REQUEST": 1, + "STORAGE_ERROR": 2, +} + +func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode { + p := new(LogServiceError_ErrorCode) + *p = x + return p +} +func (x LogServiceError_ErrorCode) String() string { + return proto.EnumName(LogServiceError_ErrorCode_name, int32(x)) +} +func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode") + if err != nil { + return err + } + *x = LogServiceError_ErrorCode(value) + return nil +} + +type LogServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogServiceError) Reset() { *m = LogServiceError{} } +func (m *LogServiceError) String() string { return proto.CompactTextString(m) } +func (*LogServiceError) ProtoMessage() {} + +type UserAppLogLine struct { + TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec" json:"timestamp_usec,omitempty"` + Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` + Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} } +func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) } +func (*UserAppLogLine) ProtoMessage() {} + +func (m *UserAppLogLine) GetTimestampUsec() int64 { + if m != nil && m.TimestampUsec != nil { + return *m.TimestampUsec + } + return 0 +} + +func (m *UserAppLogLine) GetLevel() int64 { + if m != nil && m.Level != nil { + return *m.Level + } + return 0 +} + +func (m *UserAppLogLine) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + +type UserAppLogGroup struct { + LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line" json:"log_line,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} } +func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) } +func (*UserAppLogGroup) ProtoMessage() {} + +func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine { + if m != nil { + return m.LogLine + } + return nil +} + +type FlushRequest struct { + Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FlushRequest) Reset() { *m = FlushRequest{} } +func (m *FlushRequest) String() string { return proto.CompactTextString(m) } +func (*FlushRequest) ProtoMessage() {} + +func (m *FlushRequest) GetLogs() []byte { + if m != nil { + return m.Logs + } + return nil +} + +type SetStatusRequest struct { + Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} } +func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) } +func (*SetStatusRequest) ProtoMessage() {} + +func (m *SetStatusRequest) GetStatus() string { + if m != nil && m.Status != nil { + return *m.Status + } + return "" +} + +type LogOffset struct { + RequestId []byte `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogOffset) Reset() { *m = LogOffset{} } +func (m *LogOffset) String() string { return proto.CompactTextString(m) } +func (*LogOffset) ProtoMessage() {} + +func (m *LogOffset) GetRequestId() []byte { + if m != nil { + return m.RequestId + } + return nil +} + +type LogLine struct { + Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"` + Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` + LogMessage *string `protobuf:"bytes,3,req,name=log_message" json:"log_message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogLine) Reset() { *m = LogLine{} } +func (m *LogLine) String() string { return proto.CompactTextString(m) } +func (*LogLine) ProtoMessage() {} + +func (m *LogLine) GetTime() int64 { + if m != nil && m.Time != nil { + return *m.Time + } + return 0 +} + +func (m *LogLine) GetLevel() int32 { + if m != nil && m.Level != nil { + return *m.Level + } + return 0 +} + +func (m *LogLine) GetLogMessage() string { + if m != nil && m.LogMessage != nil { + return *m.LogMessage + } + return "" +} + +type RequestLog struct { + AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + ModuleId *string `protobuf:"bytes,37,opt,name=module_id,def=default" json:"module_id,omitempty"` + VersionId *string `protobuf:"bytes,2,req,name=version_id" json:"version_id,omitempty"` + RequestId []byte `protobuf:"bytes,3,req,name=request_id" json:"request_id,omitempty"` + Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"` + Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"` + Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"` + StartTime *int64 `protobuf:"varint,6,req,name=start_time" json:"start_time,omitempty"` + EndTime *int64 `protobuf:"varint,7,req,name=end_time" json:"end_time,omitempty"` + Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"` + Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"` + Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"` + Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"` + HttpVersion *string `protobuf:"bytes,12,req,name=http_version" json:"http_version,omitempty"` + Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"` + ResponseSize *int64 `protobuf:"varint,14,req,name=response_size" json:"response_size,omitempty"` + Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"` + UserAgent *string `protobuf:"bytes,16,opt,name=user_agent" json:"user_agent,omitempty"` + UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry" json:"url_map_entry,omitempty"` + Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"` + ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles" json:"api_mcycles,omitempty"` + Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"` + Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"` + TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name" json:"task_queue_name,omitempty"` + TaskName *string `protobuf:"bytes,23,opt,name=task_name" json:"task_name,omitempty"` + WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request" json:"was_loading_request,omitempty"` + PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time" json:"pending_time,omitempty"` + ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,def=-1" json:"replica_index,omitempty"` + Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"` + CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key" json:"clone_key,omitempty"` + Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"` + LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete" json:"lines_incomplete,omitempty"` + AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release" json:"app_engine_release,omitempty"` + ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason" json:"exit_reason,omitempty"` + WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time" json:"was_throttled_for_time,omitempty"` + WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests" json:"was_throttled_for_requests,omitempty"` + ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time" json:"throttled_time,omitempty"` + ServerName []byte `protobuf:"bytes,34,opt,name=server_name" json:"server_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RequestLog) Reset() { *m = RequestLog{} } +func (m *RequestLog) String() string { return proto.CompactTextString(m) } +func (*RequestLog) ProtoMessage() {} + +const Default_RequestLog_ModuleId string = "default" +const Default_RequestLog_ReplicaIndex int32 = -1 +const Default_RequestLog_Finished bool = true + +func (m *RequestLog) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *RequestLog) GetModuleId() string { + if m != nil && m.ModuleId != nil { + return *m.ModuleId + } + return Default_RequestLog_ModuleId +} + +func (m *RequestLog) GetVersionId() string { + if m != nil && m.VersionId != nil { + return *m.VersionId + } + return "" +} + +func (m *RequestLog) GetRequestId() []byte { + if m != nil { + return m.RequestId + } + return nil +} + +func (m *RequestLog) GetOffset() *LogOffset { + if m != nil { + return m.Offset + } + return nil +} + +func (m *RequestLog) GetIp() string { + if m != nil && m.Ip != nil { + return *m.Ip + } + return "" +} + +func (m *RequestLog) GetNickname() string { + if m != nil && m.Nickname != nil { + return *m.Nickname + } + return "" +} + +func (m *RequestLog) GetStartTime() int64 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *RequestLog) GetEndTime() int64 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *RequestLog) GetLatency() int64 { + if m != nil && m.Latency != nil { + return *m.Latency + } + return 0 +} + +func (m *RequestLog) GetMcycles() int64 { + if m != nil && m.Mcycles != nil { + return *m.Mcycles + } + return 0 +} + +func (m *RequestLog) GetMethod() string { + if m != nil && m.Method != nil { + return *m.Method + } + return "" +} + +func (m *RequestLog) GetResource() string { + if m != nil && m.Resource != nil { + return *m.Resource + } + return "" +} + +func (m *RequestLog) GetHttpVersion() string { + if m != nil && m.HttpVersion != nil { + return *m.HttpVersion + } + return "" +} + +func (m *RequestLog) GetStatus() int32 { + if m != nil && m.Status != nil { + return *m.Status + } + return 0 +} + +func (m *RequestLog) GetResponseSize() int64 { + if m != nil && m.ResponseSize != nil { + return *m.ResponseSize + } + return 0 +} + +func (m *RequestLog) GetReferrer() string { + if m != nil && m.Referrer != nil { + return *m.Referrer + } + return "" +} + +func (m *RequestLog) GetUserAgent() string { + if m != nil && m.UserAgent != nil { + return *m.UserAgent + } + return "" +} + +func (m *RequestLog) GetUrlMapEntry() string { + if m != nil && m.UrlMapEntry != nil { + return *m.UrlMapEntry + } + return "" +} + +func (m *RequestLog) GetCombined() string { + if m != nil && m.Combined != nil { + return *m.Combined + } + return "" +} + +func (m *RequestLog) GetApiMcycles() int64 { + if m != nil && m.ApiMcycles != nil { + return *m.ApiMcycles + } + return 0 +} + +func (m *RequestLog) GetHost() string { + if m != nil && m.Host != nil { + return *m.Host + } + return "" +} + +func (m *RequestLog) GetCost() float64 { + if m != nil && m.Cost != nil { + return *m.Cost + } + return 0 +} + +func (m *RequestLog) GetTaskQueueName() string { + if m != nil && m.TaskQueueName != nil { + return *m.TaskQueueName + } + return "" +} + +func (m *RequestLog) GetTaskName() string { + if m != nil && m.TaskName != nil { + return *m.TaskName + } + return "" +} + +func (m *RequestLog) GetWasLoadingRequest() bool { + if m != nil && m.WasLoadingRequest != nil { + return *m.WasLoadingRequest + } + return false +} + +func (m *RequestLog) GetPendingTime() int64 { + if m != nil && m.PendingTime != nil { + return *m.PendingTime + } + return 0 +} + +func (m *RequestLog) GetReplicaIndex() int32 { + if m != nil && m.ReplicaIndex != nil { + return *m.ReplicaIndex + } + return Default_RequestLog_ReplicaIndex +} + +func (m *RequestLog) GetFinished() bool { + if m != nil && m.Finished != nil { + return *m.Finished + } + return Default_RequestLog_Finished +} + +func (m *RequestLog) GetCloneKey() []byte { + if m != nil { + return m.CloneKey + } + return nil +} + +func (m *RequestLog) GetLine() []*LogLine { + if m != nil { + return m.Line + } + return nil +} + +func (m *RequestLog) GetLinesIncomplete() bool { + if m != nil && m.LinesIncomplete != nil { + return *m.LinesIncomplete + } + return false +} + +func (m *RequestLog) GetAppEngineRelease() []byte { + if m != nil { + return m.AppEngineRelease + } + return nil +} + +func (m *RequestLog) GetExitReason() int32 { + if m != nil && m.ExitReason != nil { + return *m.ExitReason + } + return 0 +} + +func (m *RequestLog) GetWasThrottledForTime() bool { + if m != nil && m.WasThrottledForTime != nil { + return *m.WasThrottledForTime + } + return false +} + +func (m *RequestLog) GetWasThrottledForRequests() bool { + if m != nil && m.WasThrottledForRequests != nil { + return *m.WasThrottledForRequests + } + return false +} + +func (m *RequestLog) GetThrottledTime() int64 { + if m != nil && m.ThrottledTime != nil { + return *m.ThrottledTime + } + return 0 +} + +func (m *RequestLog) GetServerName() []byte { + if m != nil { + return m.ServerName + } + return nil +} + +type LogModuleVersion struct { + ModuleId *string `protobuf:"bytes,1,opt,name=module_id,def=default" json:"module_id,omitempty"` + VersionId *string `protobuf:"bytes,2,opt,name=version_id" json:"version_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} } +func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) } +func (*LogModuleVersion) ProtoMessage() {} + +const Default_LogModuleVersion_ModuleId string = "default" + +func (m *LogModuleVersion) GetModuleId() string { + if m != nil && m.ModuleId != nil { + return *m.ModuleId + } + return Default_LogModuleVersion_ModuleId +} + +func (m *LogModuleVersion) GetVersionId() string { + if m != nil && m.VersionId != nil { + return *m.VersionId + } + return "" +} + +type LogReadRequest struct { + AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"` + ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version" json:"module_version,omitempty"` + StartTime *int64 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"` + EndTime *int64 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"` + Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"` + RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id" json:"request_id,omitempty"` + MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level" json:"minimum_log_level,omitempty"` + IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete" json:"include_incomplete,omitempty"` + Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"` + CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex" json:"combined_log_regex,omitempty"` + HostRegex *string `protobuf:"bytes,15,opt,name=host_regex" json:"host_regex,omitempty"` + ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index" json:"replica_index,omitempty"` + IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs" json:"include_app_logs,omitempty"` + AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request" json:"app_logs_per_request,omitempty"` + IncludeHost *bool `protobuf:"varint,11,opt,name=include_host" json:"include_host,omitempty"` + IncludeAll *bool `protobuf:"varint,12,opt,name=include_all" json:"include_all,omitempty"` + CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator" json:"cache_iterator,omitempty"` + NumShards *int32 `protobuf:"varint,18,opt,name=num_shards" json:"num_shards,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogReadRequest) Reset() { *m = LogReadRequest{} } +func (m *LogReadRequest) String() string { return proto.CompactTextString(m) } +func (*LogReadRequest) ProtoMessage() {} + +func (m *LogReadRequest) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *LogReadRequest) GetVersionId() []string { + if m != nil { + return m.VersionId + } + return nil +} + +func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion { + if m != nil { + return m.ModuleVersion + } + return nil +} + +func (m *LogReadRequest) GetStartTime() int64 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *LogReadRequest) GetEndTime() int64 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *LogReadRequest) GetOffset() *LogOffset { + if m != nil { + return m.Offset + } + return nil +} + +func (m *LogReadRequest) GetRequestId() [][]byte { + if m != nil { + return m.RequestId + } + return nil +} + +func (m *LogReadRequest) GetMinimumLogLevel() int32 { + if m != nil && m.MinimumLogLevel != nil { + return *m.MinimumLogLevel + } + return 0 +} + +func (m *LogReadRequest) GetIncludeIncomplete() bool { + if m != nil && m.IncludeIncomplete != nil { + return *m.IncludeIncomplete + } + return false +} + +func (m *LogReadRequest) GetCount() int64 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *LogReadRequest) GetCombinedLogRegex() string { + if m != nil && m.CombinedLogRegex != nil { + return *m.CombinedLogRegex + } + return "" +} + +func (m *LogReadRequest) GetHostRegex() string { + if m != nil && m.HostRegex != nil { + return *m.HostRegex + } + return "" +} + +func (m *LogReadRequest) GetReplicaIndex() int32 { + if m != nil && m.ReplicaIndex != nil { + return *m.ReplicaIndex + } + return 0 +} + +func (m *LogReadRequest) GetIncludeAppLogs() bool { + if m != nil && m.IncludeAppLogs != nil { + return *m.IncludeAppLogs + } + return false +} + +func (m *LogReadRequest) GetAppLogsPerRequest() int32 { + if m != nil && m.AppLogsPerRequest != nil { + return *m.AppLogsPerRequest + } + return 0 +} + +func (m *LogReadRequest) GetIncludeHost() bool { + if m != nil && m.IncludeHost != nil { + return *m.IncludeHost + } + return false +} + +func (m *LogReadRequest) GetIncludeAll() bool { + if m != nil && m.IncludeAll != nil { + return *m.IncludeAll + } + return false +} + +func (m *LogReadRequest) GetCacheIterator() bool { + if m != nil && m.CacheIterator != nil { + return *m.CacheIterator + } + return false +} + +func (m *LogReadRequest) GetNumShards() int32 { + if m != nil && m.NumShards != nil { + return *m.NumShards + } + return 0 +} + +type LogReadResponse struct { + Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"` + Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"` + LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time" json:"last_end_time,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogReadResponse) Reset() { *m = LogReadResponse{} } +func (m *LogReadResponse) String() string { return proto.CompactTextString(m) } +func (*LogReadResponse) ProtoMessage() {} + +func (m *LogReadResponse) GetLog() []*RequestLog { + if m != nil { + return m.Log + } + return nil +} + +func (m *LogReadResponse) GetOffset() *LogOffset { + if m != nil { + return m.Offset + } + return nil +} + +func (m *LogReadResponse) GetLastEndTime() int64 { + if m != nil && m.LastEndTime != nil { + return *m.LastEndTime + } + return 0 +} + +type LogUsageRecord struct { + VersionId *string `protobuf:"bytes,1,opt,name=version_id" json:"version_id,omitempty"` + StartTime *int32 `protobuf:"varint,2,opt,name=start_time" json:"start_time,omitempty"` + EndTime *int32 `protobuf:"varint,3,opt,name=end_time" json:"end_time,omitempty"` + Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` + TotalSize *int64 `protobuf:"varint,5,opt,name=total_size" json:"total_size,omitempty"` + Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} } +func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) } +func (*LogUsageRecord) ProtoMessage() {} + +func (m *LogUsageRecord) GetVersionId() string { + if m != nil && m.VersionId != nil { + return *m.VersionId + } + return "" +} + +func (m *LogUsageRecord) GetStartTime() int32 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *LogUsageRecord) GetEndTime() int32 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *LogUsageRecord) GetCount() int64 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *LogUsageRecord) GetTotalSize() int64 { + if m != nil && m.TotalSize != nil { + return *m.TotalSize + } + return 0 +} + +func (m *LogUsageRecord) GetRecords() int32 { + if m != nil && m.Records != nil { + return *m.Records + } + return 0 +} + +type LogUsageRequest struct { + AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"` + StartTime *int32 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"` + EndTime *int32 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"` + ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,def=1" json:"resolution_hours,omitempty"` + CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions" json:"combine_versions,omitempty"` + UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version" json:"usage_version,omitempty"` + VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only" json:"versions_only,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} } +func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) } +func (*LogUsageRequest) ProtoMessage() {} + +const Default_LogUsageRequest_ResolutionHours uint32 = 1 + +func (m *LogUsageRequest) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *LogUsageRequest) GetVersionId() []string { + if m != nil { + return m.VersionId + } + return nil +} + +func (m *LogUsageRequest) GetStartTime() int32 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *LogUsageRequest) GetEndTime() int32 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *LogUsageRequest) GetResolutionHours() uint32 { + if m != nil && m.ResolutionHours != nil { + return *m.ResolutionHours + } + return Default_LogUsageRequest_ResolutionHours +} + +func (m *LogUsageRequest) GetCombineVersions() bool { + if m != nil && m.CombineVersions != nil { + return *m.CombineVersions + } + return false +} + +func (m *LogUsageRequest) GetUsageVersion() int32 { + if m != nil && m.UsageVersion != nil { + return *m.UsageVersion + } + return 0 +} + +func (m *LogUsageRequest) GetVersionsOnly() bool { + if m != nil && m.VersionsOnly != nil { + return *m.VersionsOnly + } + return false +} + +type LogUsageResponse struct { + Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"` + Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} } +func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) } +func (*LogUsageResponse) ProtoMessage() {} + +func (m *LogUsageResponse) GetUsage() []*LogUsageRecord { + if m != nil { + return m.Usage + } + return nil +} + +func (m *LogUsageResponse) GetSummary() *LogUsageRecord { + if m != nil { + return m.Summary + } + return nil +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto new file mode 100644 index 00000000000..8981dc47577 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/log/log_service.proto @@ -0,0 +1,150 @@ +syntax = "proto2"; +option go_package = "log"; + +package appengine; + +message LogServiceError { + enum ErrorCode { + OK = 0; + INVALID_REQUEST = 1; + STORAGE_ERROR = 2; + } +} + +message UserAppLogLine { + required int64 timestamp_usec = 1; + required int64 level = 2; + required string message = 3; +} + +message UserAppLogGroup { + repeated UserAppLogLine log_line = 2; +} + +message FlushRequest { + optional bytes logs = 1; +} + +message SetStatusRequest { + required string status = 1; +} + + +message LogOffset { + optional bytes request_id = 1; +} + +message LogLine { + required int64 time = 1; + required int32 level = 2; + required string log_message = 3; +} + +message RequestLog { + required string app_id = 1; + optional string module_id = 37 [default="default"]; + required string version_id = 2; + required bytes request_id = 3; + optional LogOffset offset = 35; + required string ip = 4; + optional string nickname = 5; + required int64 start_time = 6; + required int64 end_time = 7; + required int64 latency = 8; + required int64 mcycles = 9; + required string method = 10; + required string resource = 11; + required string http_version = 12; + required int32 status = 13; + required int64 response_size = 14; + optional string referrer = 15; + optional string user_agent = 16; + required string url_map_entry = 17; + required string combined = 18; + optional int64 api_mcycles = 19; + optional string host = 20; + optional double cost = 21; + + optional string task_queue_name = 22; + optional string task_name = 23; + + optional bool was_loading_request = 24; + optional int64 pending_time = 25; + optional int32 replica_index = 26 [default = -1]; + optional bool finished = 27 [default = true]; + optional bytes clone_key = 28; + + repeated LogLine line = 29; + + optional bool lines_incomplete = 36; + optional bytes app_engine_release = 38; + + optional int32 exit_reason = 30; + optional bool was_throttled_for_time = 31; + optional bool was_throttled_for_requests = 32; + optional int64 throttled_time = 33; + + optional bytes server_name = 34; +} + +message LogModuleVersion { + optional string module_id = 1 [default="default"]; + optional string version_id = 2; +} + +message LogReadRequest { + required string app_id = 1; + repeated string version_id = 2; + repeated LogModuleVersion module_version = 19; + + optional int64 start_time = 3; + optional int64 end_time = 4; + optional LogOffset offset = 5; + repeated bytes request_id = 6; + + optional int32 minimum_log_level = 7; + optional bool include_incomplete = 8; + optional int64 count = 9; + + optional string combined_log_regex = 14; + optional string host_regex = 15; + optional int32 replica_index = 16; + + optional bool include_app_logs = 10; + optional int32 app_logs_per_request = 17; + optional bool include_host = 11; + optional bool include_all = 12; + optional bool cache_iterator = 13; + optional int32 num_shards = 18; +} + +message LogReadResponse { + repeated RequestLog log = 1; + optional LogOffset offset = 2; + optional int64 last_end_time = 3; +} + +message LogUsageRecord { + optional string version_id = 1; + optional int32 start_time = 2; + optional int32 end_time = 3; + optional int64 count = 4; + optional int64 total_size = 5; + optional int32 records = 6; +} + +message LogUsageRequest { + required string app_id = 1; + repeated string version_id = 2; + optional int32 start_time = 3; + optional int32 end_time = 4; + optional uint32 resolution_hours = 5 [default = 1]; + optional bool combine_versions = 6; + optional int32 usage_version = 7; + optional bool versions_only = 8; +} + +message LogUsageResponse { + repeated LogUsageRecord usage = 1; + optional LogUsageRecord summary = 2; +} diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go new file mode 100644 index 00000000000..822e784a458 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/main_vm.go @@ -0,0 +1,48 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "io" + "log" + "net/http" + "net/url" + "os" +) + +func Main() { + installHealthChecker(http.DefaultServeMux) + + port := "8080" + if s := os.Getenv("PORT"); s != "" { + port = s + } + + host := "" + if IsDevAppServer() { + host = "127.0.0.1" + } + if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil { + log.Fatalf("http.ListenAndServe: %v", err) + } +} + +func installHealthChecker(mux *http.ServeMux) { + // If no health check handler has been installed by this point, add a trivial one. + const healthPath = "/_ah/health" + hreq := &http.Request{ + Method: "GET", + URL: &url.URL{ + Path: healthPath, + }, + } + if _, pat := mux.Handler(hreq); pat != healthPath { + mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "ok") + }) + } +} diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go new file mode 100644 index 00000000000..9cc1f71d104 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/metadata.go @@ -0,0 +1,61 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +// This file has code for accessing metadata. +// +// References: +// https://cloud.google.com/compute/docs/metadata + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" + "net/url" +) + +const ( + metadataHost = "metadata" + metadataPath = "/computeMetadata/v1/" +) + +var ( + metadataRequestHeaders = http.Header{ + "Metadata-Flavor": []string{"Google"}, + } +) + +// TODO(dsymonds): Do we need to support default values, like Python? +func mustGetMetadata(key string) []byte { + b, err := getMetadata(key) + if err != nil { + log.Fatalf("Metadata fetch failed: %v", err) + } + return b +} + +func getMetadata(key string) ([]byte, error) { + // TODO(dsymonds): May need to use url.Parse to support keys with query args. + req := &http.Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Host: metadataHost, + Path: metadataPath + key, + }, + Header: metadataRequestHeaders, + Host: metadataHost, + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode) + } + return ioutil.ReadAll(resp.Body) +} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go new file mode 100644 index 00000000000..a0145ed317c --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go @@ -0,0 +1,375 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/modules/modules_service.proto +// DO NOT EDIT! + +/* +Package modules is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/modules/modules_service.proto + +It has these top-level messages: + ModulesServiceError + GetModulesRequest + GetModulesResponse + GetVersionsRequest + GetVersionsResponse + GetDefaultVersionRequest + GetDefaultVersionResponse + GetNumInstancesRequest + GetNumInstancesResponse + SetNumInstancesRequest + SetNumInstancesResponse + StartModuleRequest + StartModuleResponse + StopModuleRequest + StopModuleResponse + GetHostnameRequest + GetHostnameResponse +*/ +package modules + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type ModulesServiceError_ErrorCode int32 + +const ( + ModulesServiceError_OK ModulesServiceError_ErrorCode = 0 + ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1 + ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2 + ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3 + ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4 + ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5 +) + +var ModulesServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_MODULE", + 2: "INVALID_VERSION", + 3: "INVALID_INSTANCES", + 4: "TRANSIENT_ERROR", + 5: "UNEXPECTED_STATE", +} +var ModulesServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_MODULE": 1, + "INVALID_VERSION": 2, + "INVALID_INSTANCES": 3, + "TRANSIENT_ERROR": 4, + "UNEXPECTED_STATE": 5, +} + +func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode { + p := new(ModulesServiceError_ErrorCode) + *p = x + return p +} +func (x ModulesServiceError_ErrorCode) String() string { + return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x)) +} +func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode") + if err != nil { + return err + } + *x = ModulesServiceError_ErrorCode(value) + return nil +} + +type ModulesServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} } +func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) } +func (*ModulesServiceError) ProtoMessage() {} + +type GetModulesRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} } +func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) } +func (*GetModulesRequest) ProtoMessage() {} + +type GetModulesResponse struct { + Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} } +func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) } +func (*GetModulesResponse) ProtoMessage() {} + +func (m *GetModulesResponse) GetModule() []string { + if m != nil { + return m.Module + } + return nil +} + +type GetVersionsRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} } +func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) } +func (*GetVersionsRequest) ProtoMessage() {} + +func (m *GetVersionsRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +type GetVersionsResponse struct { + Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} } +func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) } +func (*GetVersionsResponse) ProtoMessage() {} + +func (m *GetVersionsResponse) GetVersion() []string { + if m != nil { + return m.Version + } + return nil +} + +type GetDefaultVersionRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} } +func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) } +func (*GetDefaultVersionRequest) ProtoMessage() {} + +func (m *GetDefaultVersionRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +type GetDefaultVersionResponse struct { + Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} } +func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) } +func (*GetDefaultVersionResponse) ProtoMessage() {} + +func (m *GetDefaultVersionResponse) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type GetNumInstancesRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} } +func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) } +func (*GetNumInstancesRequest) ProtoMessage() {} + +func (m *GetNumInstancesRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *GetNumInstancesRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type GetNumInstancesResponse struct { + Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} } +func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) } +func (*GetNumInstancesResponse) ProtoMessage() {} + +func (m *GetNumInstancesResponse) GetInstances() int64 { + if m != nil && m.Instances != nil { + return *m.Instances + } + return 0 +} + +type SetNumInstancesRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} } +func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) } +func (*SetNumInstancesRequest) ProtoMessage() {} + +func (m *SetNumInstancesRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *SetNumInstancesRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +func (m *SetNumInstancesRequest) GetInstances() int64 { + if m != nil && m.Instances != nil { + return *m.Instances + } + return 0 +} + +type SetNumInstancesResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} } +func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) } +func (*SetNumInstancesResponse) ProtoMessage() {} + +type StartModuleRequest struct { + Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} } +func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) } +func (*StartModuleRequest) ProtoMessage() {} + +func (m *StartModuleRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *StartModuleRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type StartModuleResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} } +func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) } +func (*StartModuleResponse) ProtoMessage() {} + +type StopModuleRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} } +func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) } +func (*StopModuleRequest) ProtoMessage() {} + +func (m *StopModuleRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *StopModuleRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type StopModuleResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} } +func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) } +func (*StopModuleResponse) ProtoMessage() {} + +type GetHostnameRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} } +func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) } +func (*GetHostnameRequest) ProtoMessage() {} + +func (m *GetHostnameRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *GetHostnameRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +func (m *GetHostnameRequest) GetInstance() string { + if m != nil && m.Instance != nil { + return *m.Instance + } + return "" +} + +type GetHostnameResponse struct { + Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} } +func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) } +func (*GetHostnameResponse) ProtoMessage() {} + +func (m *GetHostnameResponse) GetHostname() string { + if m != nil && m.Hostname != nil { + return *m.Hostname + } + return "" +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto new file mode 100644 index 00000000000..d29f0065a2f --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto @@ -0,0 +1,80 @@ +syntax = "proto2"; +option go_package = "modules"; + +package appengine; + +message ModulesServiceError { + enum ErrorCode { + OK = 0; + INVALID_MODULE = 1; + INVALID_VERSION = 2; + INVALID_INSTANCES = 3; + TRANSIENT_ERROR = 4; + UNEXPECTED_STATE = 5; + } +} + +message GetModulesRequest { +} + +message GetModulesResponse { + repeated string module = 1; +} + +message GetVersionsRequest { + optional string module = 1; +} + +message GetVersionsResponse { + repeated string version = 1; +} + +message GetDefaultVersionRequest { + optional string module = 1; +} + +message GetDefaultVersionResponse { + required string version = 1; +} + +message GetNumInstancesRequest { + optional string module = 1; + optional string version = 2; +} + +message GetNumInstancesResponse { + required int64 instances = 1; +} + +message SetNumInstancesRequest { + optional string module = 1; + optional string version = 2; + required int64 instances = 3; +} + +message SetNumInstancesResponse {} + +message StartModuleRequest { + required string module = 1; + required string version = 2; +} + +message StartModuleResponse {} + +message StopModuleRequest { + optional string module = 1; + optional string version = 2; +} + +message StopModuleResponse {} + +message GetHostnameRequest { + optional string module = 1; + optional string version = 2; + optional string instance = 3; +} + +message GetHostnameResponse { + required string hostname = 1; +} + diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go new file mode 100644 index 00000000000..3b94cf0c6a8 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/net.go @@ -0,0 +1,56 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +// This file implements a network dialer that limits the number of concurrent connections. +// It is only used for API calls. + +import ( + "log" + "net" + "runtime" + "sync" + "time" +) + +var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable. + +func limitRelease() { + // non-blocking + select { + case <-limitSem: + default: + // This should not normally happen. + log.Print("appengine: unbalanced limitSem release!") + } +} + +func limitDial(network, addr string) (net.Conn, error) { + limitSem <- 1 + + // Dial with a timeout in case the API host is MIA. + // The connection should normally be very fast. + conn, err := net.DialTimeout(network, addr, 500*time.Millisecond) + if err != nil { + limitRelease() + return nil, err + } + lc := &limitConn{Conn: conn} + runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required + return lc, nil +} + +type limitConn struct { + close sync.Once + net.Conn +} + +func (lc *limitConn) Close() error { + defer lc.close.Do(func() { + limitRelease() + runtime.SetFinalizer(lc, nil) + }) + return lc.Conn.Close() +} diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh new file mode 100755 index 00000000000..2fdb546a633 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/regen.sh @@ -0,0 +1,40 @@ +#!/bin/bash -e +# +# This script rebuilds the generated code for the protocol buffers. +# To run this you will need protoc and goprotobuf installed; +# see https://github.com/golang/protobuf for instructions. + +PKG=google.golang.org/appengine + +function die() { + echo 1>&2 $* + exit 1 +} + +# Sanity check that the right tools are accessible. +for tool in go protoc protoc-gen-go; do + q=$(which $tool) || die "didn't find $tool" + echo 1>&2 "$tool: $q" +done + +echo -n 1>&2 "finding package dir... " +pkgdir=$(go list -f '{{.Dir}}' $PKG) +echo 1>&2 $pkgdir +base=$(echo $pkgdir | sed "s,/$PKG\$,,") +echo 1>&2 "base: $base" +cd $base + +# Run protoc once per package. +for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do + echo 1>&2 "* $dir" + protoc --go_out=. $dir/*.proto +done + +for f in $(find $PKG/internal -name '*.pb.go'); do + # Remove proto.RegisterEnum calls. + # These cause duplicate registration panics when these packages + # are used on classic App Engine. proto.RegisterEnum only affects + # parsing the text format; we don't care about that. + # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17 + sed -i '/proto.RegisterEnum/d' $f +done diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go new file mode 100644 index 00000000000..526bd39e6d1 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go @@ -0,0 +1,231 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/remote_api/remote_api.proto +// DO NOT EDIT! + +/* +Package remote_api is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/remote_api/remote_api.proto + +It has these top-level messages: + Request + ApplicationError + RpcError + Response +*/ +package remote_api + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type RpcError_ErrorCode int32 + +const ( + RpcError_UNKNOWN RpcError_ErrorCode = 0 + RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1 + RpcError_PARSE_ERROR RpcError_ErrorCode = 2 + RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3 + RpcError_OVER_QUOTA RpcError_ErrorCode = 4 + RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5 + RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6 + RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7 + RpcError_BAD_REQUEST RpcError_ErrorCode = 8 + RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9 + RpcError_CANCELLED RpcError_ErrorCode = 10 + RpcError_REPLAY_ERROR RpcError_ErrorCode = 11 + RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12 +) + +var RpcError_ErrorCode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CALL_NOT_FOUND", + 2: "PARSE_ERROR", + 3: "SECURITY_VIOLATION", + 4: "OVER_QUOTA", + 5: "REQUEST_TOO_LARGE", + 6: "CAPABILITY_DISABLED", + 7: "FEATURE_DISABLED", + 8: "BAD_REQUEST", + 9: "RESPONSE_TOO_LARGE", + 10: "CANCELLED", + 11: "REPLAY_ERROR", + 12: "DEADLINE_EXCEEDED", +} +var RpcError_ErrorCode_value = map[string]int32{ + "UNKNOWN": 0, + "CALL_NOT_FOUND": 1, + "PARSE_ERROR": 2, + "SECURITY_VIOLATION": 3, + "OVER_QUOTA": 4, + "REQUEST_TOO_LARGE": 5, + "CAPABILITY_DISABLED": 6, + "FEATURE_DISABLED": 7, + "BAD_REQUEST": 8, + "RESPONSE_TOO_LARGE": 9, + "CANCELLED": 10, + "REPLAY_ERROR": 11, + "DEADLINE_EXCEEDED": 12, +} + +func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode { + p := new(RpcError_ErrorCode) + *p = x + return p +} +func (x RpcError_ErrorCode) String() string { + return proto.EnumName(RpcError_ErrorCode_name, int32(x)) +} +func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode") + if err != nil { + return err + } + *x = RpcError_ErrorCode(value) + return nil +} + +type Request struct { + ServiceName *string `protobuf:"bytes,2,req,name=service_name" json:"service_name,omitempty"` + Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"` + Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"` + RequestId *string `protobuf:"bytes,5,opt,name=request_id" json:"request_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} + +func (m *Request) GetServiceName() string { + if m != nil && m.ServiceName != nil { + return *m.ServiceName + } + return "" +} + +func (m *Request) GetMethod() string { + if m != nil && m.Method != nil { + return *m.Method + } + return "" +} + +func (m *Request) GetRequest() []byte { + if m != nil { + return m.Request + } + return nil +} + +func (m *Request) GetRequestId() string { + if m != nil && m.RequestId != nil { + return *m.RequestId + } + return "" +} + +type ApplicationError struct { + Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` + Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ApplicationError) Reset() { *m = ApplicationError{} } +func (m *ApplicationError) String() string { return proto.CompactTextString(m) } +func (*ApplicationError) ProtoMessage() {} + +func (m *ApplicationError) GetCode() int32 { + if m != nil && m.Code != nil { + return *m.Code + } + return 0 +} + +func (m *ApplicationError) GetDetail() string { + if m != nil && m.Detail != nil { + return *m.Detail + } + return "" +} + +type RpcError struct { + Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` + Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RpcError) Reset() { *m = RpcError{} } +func (m *RpcError) String() string { return proto.CompactTextString(m) } +func (*RpcError) ProtoMessage() {} + +func (m *RpcError) GetCode() int32 { + if m != nil && m.Code != nil { + return *m.Code + } + return 0 +} + +func (m *RpcError) GetDetail() string { + if m != nil && m.Detail != nil { + return *m.Detail + } + return "" +} + +type Response struct { + Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"` + Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"` + ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error" json:"application_error,omitempty"` + JavaException []byte `protobuf:"bytes,4,opt,name=java_exception" json:"java_exception,omitempty"` + RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error" json:"rpc_error,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} + +func (m *Response) GetResponse() []byte { + if m != nil { + return m.Response + } + return nil +} + +func (m *Response) GetException() []byte { + if m != nil { + return m.Exception + } + return nil +} + +func (m *Response) GetApplicationError() *ApplicationError { + if m != nil { + return m.ApplicationError + } + return nil +} + +func (m *Response) GetJavaException() []byte { + if m != nil { + return m.JavaException + } + return nil +} + +func (m *Response) GetRpcError() *RpcError { + if m != nil { + return m.RpcError + } + return nil +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto new file mode 100644 index 00000000000..f21763a4e23 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto @@ -0,0 +1,44 @@ +syntax = "proto2"; +option go_package = "remote_api"; + +package remote_api; + +message Request { + required string service_name = 2; + required string method = 3; + required bytes request = 4; + optional string request_id = 5; +} + +message ApplicationError { + required int32 code = 1; + required string detail = 2; +} + +message RpcError { + enum ErrorCode { + UNKNOWN = 0; + CALL_NOT_FOUND = 1; + PARSE_ERROR = 2; + SECURITY_VIOLATION = 3; + OVER_QUOTA = 4; + REQUEST_TOO_LARGE = 5; + CAPABILITY_DISABLED = 6; + FEATURE_DISABLED = 7; + BAD_REQUEST = 8; + RESPONSE_TOO_LARGE = 9; + CANCELLED = 10; + REPLAY_ERROR = 11; + DEADLINE_EXCEEDED = 12; + } + required int32 code = 1; + optional string detail = 2; +} + +message Response { + optional bytes response = 1; + optional bytes exception = 2; + optional ApplicationError application_error = 3; + optional bytes java_exception = 4; + optional RpcError rpc_error = 5; +} diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go new file mode 100644 index 00000000000..28a6d181206 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/transaction.go @@ -0,0 +1,107 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +// This file implements hooks for applying datastore transactions. + +import ( + "errors" + "reflect" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" + + basepb "google.golang.org/appengine/internal/base" + pb "google.golang.org/appengine/internal/datastore" +) + +var transactionSetters = make(map[reflect.Type]reflect.Value) + +// RegisterTransactionSetter registers a function that sets transaction information +// in a protocol buffer message. f should be a function with two arguments, +// the first being a protocol buffer type, and the second being *datastore.Transaction. +func RegisterTransactionSetter(f interface{}) { + v := reflect.ValueOf(f) + transactionSetters[v.Type().In(0)] = v +} + +// applyTransaction applies the transaction t to message pb +// by using the relevant setter passed to RegisterTransactionSetter. +func applyTransaction(pb proto.Message, t *pb.Transaction) { + v := reflect.ValueOf(pb) + if f, ok := transactionSetters[v.Type()]; ok { + f.Call([]reflect.Value{v, reflect.ValueOf(t)}) + } +} + +var transactionKey = "used for *Transaction" + +func transactionFromContext(ctx netcontext.Context) *transaction { + t, _ := ctx.Value(&transactionKey).(*transaction) + return t +} + +func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { + return netcontext.WithValue(ctx, &transactionKey, t) +} + +type transaction struct { + transaction pb.Transaction + finished bool +} + +var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") + +func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool) error { + if transactionFromContext(c) != nil { + return errors.New("nested transactions are not supported") + } + + // Begin the transaction. + t := &transaction{} + req := &pb.BeginTransactionRequest{ + App: proto.String(FullyQualifiedAppID(c)), + } + if xg { + req.AllowMultipleEg = proto.Bool(true) + } + if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil { + return err + } + + // Call f, rolling back the transaction if f returns a non-nil error, or panics. + // The panic is not recovered. + defer func() { + if t.finished { + return + } + t.finished = true + // Ignore the error return value, since we are already returning a non-nil + // error (or we're panicking). + Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{}) + }() + if err := f(withTransaction(c, t)); err != nil { + return err + } + t.finished = true + + // Commit the transaction. + res := &pb.CommitResponse{} + err := Call(c, "datastore_v3", "Commit", &t.transaction, res) + if ae, ok := err.(*APIError); ok { + /* TODO: restore this conditional + if appengine.IsDevAppServer() { + */ + // The Python Dev AppServer raises an ApplicationError with error code 2 (which is + // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.". + if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." { + return ErrConcurrentTransaction + } + if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) { + return ErrConcurrentTransaction + } + } + return err +} diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go new file mode 100644 index 00000000000..21860ca0822 --- /dev/null +++ b/vendor/google.golang.org/appengine/namespace.go @@ -0,0 +1,25 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import ( + "fmt" + "regexp" + + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// Namespace returns a replacement context that operates within the given namespace. +func Namespace(c context.Context, namespace string) (context.Context, error) { + if !validNamespace.MatchString(namespace) { + return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace) + } + return internal.NamespacedContext(c, namespace), nil +} + +// validNamespace matches valid namespace names. +var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`) diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go new file mode 100644 index 00000000000..05642a992a3 --- /dev/null +++ b/vendor/google.golang.org/appengine/timeout.go @@ -0,0 +1,20 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import "golang.org/x/net/context" + +// IsTimeoutError reports whether err is a timeout error. +func IsTimeoutError(err error) bool { + if err == context.DeadlineExceeded { + return true + } + if t, ok := err.(interface { + IsTimeout() bool + }); ok { + return t.IsTimeout() + } + return false +} diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/google.golang.org/genproto/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/google.golang.org/genproto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go new file mode 100644 index 00000000000..8867ae78129 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -0,0 +1,143 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/rpc/status.proto + +/* +Package status is a generated protocol buffer package. + +It is generated from these files: + google/rpc/status.proto + +It has these top-level messages: + Status +*/ +package status + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The `Status` type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. It is used by +// [gRPC](https://github.com/grpc). The error model is designed to be: +// +// - Simple to use and understand for most users +// - Flexible enough to meet unexpected needs +// +// # Overview +// +// The `Status` message contains three pieces of data: error code, error message, +// and error details. The error code should be an enum value of +// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The +// error message should be a developer-facing English message that helps +// developers *understand* and *resolve* the error. If a localized user-facing +// error message is needed, put the localized message in the error details or +// localize it in the client. The optional error details may contain arbitrary +// information about the error. There is a predefined set of error detail types +// in the package `google.rpc` that can be used for common error conditions. +// +// # Language mapping +// +// The `Status` message is the logical representation of the error model, but it +// is not necessarily the actual wire format. When the `Status` message is +// exposed in different client libraries and different wire protocols, it can be +// mapped differently. For example, it will likely be mapped to some exceptions +// in Java, but more likely mapped to some error codes in C. +// +// # Other uses +// +// The error model and the `Status` message can be used in a variety of +// environments, either with or without APIs, to provide a +// consistent developer experience across different environments. +// +// Example uses of this error model include: +// +// - Partial errors. If a service needs to return partial errors to the client, +// it may embed the `Status` in the normal response to indicate the partial +// errors. +// +// - Workflow errors. A typical workflow has multiple steps. Each step may +// have a `Status` message for error reporting. +// +// - Batch operations. If a client uses batch request and batch response, the +// `Status` message should be used directly inside batch response, one for +// each error sub-response. +// +// - Asynchronous operations. If an API call embeds asynchronous operation +// results in its response, the status of those operations should be +// represented directly using the `Status` message. +// +// - Logging. If some API errors are stored in logs, the message `Status` could +// be used directly after any stripping needed for security/privacy reasons. +type Status struct { + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + Code int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"` + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + Details []*google_protobuf.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Status) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *Status) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *Status) GetDetails() []*google_protobuf.Any { + if m != nil { + return m.Details + } + return nil +} + +func init() { + proto.RegisterType((*Status)(nil), "google.rpc.Status") +} + +func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 209 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28, + 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81, + 0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1, + 0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83, + 0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05, + 0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7, + 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7, + 0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x38, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c, + 0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12, + 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12, + 0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x16, 0x31, 0x31, 0x07, 0x05, 0x38, 0x27, 0xb1, + 0x81, 0x55, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x53, 0xf0, 0x7c, 0x10, 0x01, 0x00, + 0x00, +} diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS new file mode 100644 index 00000000000..e491a9e7f78 --- /dev/null +++ b/vendor/google.golang.org/grpc/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md new file mode 100644 index 00000000000..a5c6e06e255 --- /dev/null +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -0,0 +1,32 @@ +# How to contribute + +We definitely welcome your patches and contributions to gRPC! + +If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) + +## Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +## Guidelines for Pull Requests +How to get your contributions merged smoothly and quickly. + +- Create **small PRs** that are narrowly focused on **addressing a single concern**. We often times receive PRs that are trying to fix several things at a time, but only one fix is considered acceptable, nothing gets merged and both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. + +- For speculative changes, consider opening an issue and discussing it first. If you are suggesting a behavioral or API change, consider starting with a [gRFC proposal](https://github.com/grpc/proposal). + +- Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists. + +- Don't fix code style and formatting unless you are already changing that line to address an issue. PRs with irrelevant changes won't be merged. If you do want to fix formatting or style, do that in a separate PR. + +- Unless your PR is trivial, you should expect there will be reviewer comments that you'll need to address before merging. We expect you to be reasonably responsive to those comments, otherwise the PR will be closed after 2-3 weeks of inactivity. + +- Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use `rebase -i upstream/master` to curate your commit history and/or to bring in latest changes from master (but avoid rebasing in the middle of a code review). + +- Keep your PR up to date with upstream/master (if there are merge conflicts, we can't really merge your change). + +- **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. + +- Exceptions to the rules can be made if there's a compelling reason for doing so. + diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/google.golang.org/grpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile new file mode 100644 index 00000000000..39606b564a6 --- /dev/null +++ b/vendor/google.golang.org/grpc/Makefile @@ -0,0 +1,45 @@ +all: test testrace + +deps: + go get -d -v google.golang.org/grpc/... + +updatedeps: + go get -d -v -u -f google.golang.org/grpc/... + +testdeps: + go get -d -v -t google.golang.org/grpc/... + +updatetestdeps: + go get -d -v -t -u -f google.golang.org/grpc/... + +build: deps + go build google.golang.org/grpc/... + +proto: + @ if ! which protoc > /dev/null; then \ + echo "error: protoc not installed" >&2; \ + exit 1; \ + fi + go generate google.golang.org/grpc/... + +test: testdeps + go test -cpu 1,4 google.golang.org/grpc/... + +testrace: testdeps + go test -race -cpu 1,4 google.golang.org/grpc/... + +clean: + go clean -i google.golang.org/grpc/... + +.PHONY: \ + all \ + deps \ + updatedeps \ + testdeps \ + updatetestdeps \ + build \ + proto \ + test \ + testrace \ + clean \ + coverage diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md new file mode 100644 index 00000000000..118327bb17a --- /dev/null +++ b/vendor/google.golang.org/grpc/README.md @@ -0,0 +1,46 @@ +# gRPC-Go + +[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) + +The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](https://grpc.io/docs/quickstart/go.html) guide. + +Installation +------------ + +To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run: + +``` +$ go get -u google.golang.org/grpc +``` + +Prerequisites +------------- + +This requires Go 1.6 or later. Go 1.7 will be required as of the next gRPC-Go +release (1.8). + +Constraints +----------- +The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants. + +Documentation +------------- +See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/). + +Performance +----------- +See the current benchmarks for some of the languages supported in [this dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696). + +Status +------ +General Availability [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). + +FAQ +--- + +#### Compiling error, undefined: grpc.SupportPackageIsVersion + +Please update proto package, gRPC package and rebuild the proto files: + - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}` + - `go get -u google.golang.org/grpc` + - `protoc --go_out=plugins=grpc:. *.proto` diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go new file mode 100644 index 00000000000..c40facce510 --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff.go @@ -0,0 +1,96 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "math/rand" + "time" +) + +// DefaultBackoffConfig uses values specified for backoff in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var DefaultBackoffConfig = BackoffConfig{ + MaxDelay: 120 * time.Second, + baseDelay: 1.0 * time.Second, + factor: 1.6, + jitter: 0.2, +} + +// backoffStrategy defines the methodology for backing off after a grpc +// connection failure. +// +// This is unexported until the gRPC project decides whether or not to allow +// alternative backoff strategies. Once a decision is made, this type and its +// method may be exported. +type backoffStrategy interface { + // backoff returns the amount of time to wait before the next retry given + // the number of consecutive failures. + backoff(retries int) time.Duration +} + +// BackoffConfig defines the parameters for the default gRPC backoff strategy. +type BackoffConfig struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration + + // TODO(stevvooe): The following fields are not exported, as allowing + // changes would violate the current gRPC specification for backoff. If + // gRPC decides to allow more interesting backoff strategies, these fields + // may be opened up in the future. + + // baseDelay is the amount of time to wait before retrying after the first + // failure. + baseDelay time.Duration + + // factor is applied to the backoff after each retry. + factor float64 + + // jitter provides a range to randomize backoff delays. + jitter float64 +} + +func setDefaults(bc *BackoffConfig) { + md := bc.MaxDelay + *bc = DefaultBackoffConfig + + if md > 0 { + bc.MaxDelay = md + } +} + +func (bc BackoffConfig) backoff(retries int) time.Duration { + if retries == 0 { + return bc.baseDelay + } + backoff, max := float64(bc.baseDelay), float64(bc.MaxDelay) + for backoff < max && retries > 0 { + backoff *= bc.factor + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + bc.jitter*(rand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go new file mode 100644 index 00000000000..ab65049ddc1 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer.go @@ -0,0 +1,408 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "net" + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/naming" +) + +// Address represents a server the client connects to. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + Metadata interface{} +} + +// BalancerConfig specifies the configurations for Balancer. +type BalancerConfig struct { + // DialCreds is the transport credential the Balancer implementation can + // use to dial to a remote load balancer server. The Balancer implementations + // can ignore this if it does not need to talk to another party securely. + DialCreds credentials.TransportCredentials + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) +} + +// BalancerGetOptions configures a Get call. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type BalancerGetOptions struct { + // BlockingWait specifies whether Get should block when there is no + // connected address. + BlockingWait bool +} + +// Balancer chooses network addresses for RPCs. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type Balancer interface { + // Start does the initialization work to bootstrap a Balancer. For example, + // this function may start the name resolution and watch the updates. It will + // be called when dialing. + Start(target string, config BalancerConfig) error + // Up informs the Balancer that gRPC has a connection to the server at + // addr. It returns down which is called once the connection to addr gets + // lost or closed. + // TODO: It is not clear how to construct and take advantage of the meaningful error + // parameter for down. Need realistic demands to guide. + Up(addr Address) (down func(error)) + // Get gets the address of a server for the RPC corresponding to ctx. + // i) If it returns a connected address, gRPC internals issues the RPC on the + // connection to this address; + // ii) If it returns an address on which the connection is under construction + // (initiated by Notify(...)) but not connected, gRPC internals + // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or + // Shutdown state; + // or + // * issues RPC on the connection otherwise. + // iii) If it returns an address on which the connection does not exist, gRPC + // internals treats it as an error and will fail the corresponding RPC. + // + // Therefore, the following is the recommended rule when writing a custom Balancer. + // If opts.BlockingWait is true, it should return a connected address or + // block if there is no connected address. It should respect the timeout or + // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast + // RPCs), it should return an address it has notified via Notify(...) immediately + // instead of blocking. + // + // The function returns put which is called once the rpc has completed or failed. + // put can collect and report RPC stats to a remote load balancer. + // + // This function should only return the errors Balancer cannot recover by itself. + // gRPC internals will fail the RPC if an error is returned. + Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) + // Notify returns a channel that is used by gRPC internals to watch the addresses + // gRPC needs to connect. The addresses might be from a name resolver or remote + // load balancer. gRPC internals will compare it with the existing connected + // addresses. If the address Balancer notified is not in the existing connected + // addresses, gRPC starts to connect the address. If an address in the existing + // connected addresses is not in the notification list, the corresponding connection + // is shutdown gracefully. Otherwise, there are no operations to take. Note that + // the Address slice must be the full list of the Addresses which should be connected. + // It is NOT delta. + Notify() <-chan []Address + // Close shuts down the balancer. + Close() error +} + +// downErr implements net.Error. It is constructed by gRPC internals and passed to the down +// call of Balancer. +type downErr struct { + timeout bool + temporary bool + desc string +} + +func (e downErr) Error() string { return e.desc } +func (e downErr) Timeout() bool { return e.timeout } +func (e downErr) Temporary() bool { return e.temporary } + +func downErrorf(timeout, temporary bool, format string, a ...interface{}) downErr { + return downErr{ + timeout: timeout, + temporary: temporary, + desc: fmt.Sprintf(format, a...), + } +} + +// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch +// the name resolution updates and updates the addresses available correspondingly. +func RoundRobin(r naming.Resolver) Balancer { + return &roundRobin{r: r} +} + +type addrInfo struct { + addr Address + connected bool +} + +type roundRobin struct { + r naming.Resolver + w naming.Watcher + addrs []*addrInfo // all the addresses the client should potentially connect + mu sync.Mutex + addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to. + next int // index of the next address to return for Get() + waitCh chan struct{} // the channel to block when there is no connected address available + done bool // The Balancer is closed. +} + +func (rr *roundRobin) watchAddrUpdates() error { + updates, err := rr.w.Next() + if err != nil { + grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err) + return err + } + rr.mu.Lock() + defer rr.mu.Unlock() + for _, update := range updates { + addr := Address{ + Addr: update.Addr, + Metadata: update.Metadata, + } + switch update.Op { + case naming.Add: + var exist bool + for _, v := range rr.addrs { + if addr == v.addr { + exist = true + grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr) + break + } + } + if exist { + continue + } + rr.addrs = append(rr.addrs, &addrInfo{addr: addr}) + case naming.Delete: + for i, v := range rr.addrs { + if addr == v.addr { + copy(rr.addrs[i:], rr.addrs[i+1:]) + rr.addrs = rr.addrs[:len(rr.addrs)-1] + break + } + } + default: + grpclog.Errorln("Unknown update.Op ", update.Op) + } + } + // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. + open := make([]Address, len(rr.addrs)) + for i, v := range rr.addrs { + open[i] = v.addr + } + if rr.done { + return ErrClientConnClosing + } + select { + case <-rr.addrCh: + default: + } + rr.addrCh <- open + return nil +} + +func (rr *roundRobin) Start(target string, config BalancerConfig) error { + rr.mu.Lock() + defer rr.mu.Unlock() + if rr.done { + return ErrClientConnClosing + } + if rr.r == nil { + // If there is no name resolver installed, it is not needed to + // do name resolution. In this case, target is added into rr.addrs + // as the only address available and rr.addrCh stays nil. + rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}}) + return nil + } + w, err := rr.r.Resolve(target) + if err != nil { + return err + } + rr.w = w + rr.addrCh = make(chan []Address, 1) + go func() { + for { + if err := rr.watchAddrUpdates(); err != nil { + return + } + } + }() + return nil +} + +// Up sets the connected state of addr and sends notification if there are pending +// Get() calls. +func (rr *roundRobin) Up(addr Address) func(error) { + rr.mu.Lock() + defer rr.mu.Unlock() + var cnt int + for _, a := range rr.addrs { + if a.addr == addr { + if a.connected { + return nil + } + a.connected = true + } + if a.connected { + cnt++ + } + } + // addr is only one which is connected. Notify the Get() callers who are blocking. + if cnt == 1 && rr.waitCh != nil { + close(rr.waitCh) + rr.waitCh = nil + } + return func(err error) { + rr.down(addr, err) + } +} + +// down unsets the connected state of addr. +func (rr *roundRobin) down(addr Address, err error) { + rr.mu.Lock() + defer rr.mu.Unlock() + for _, a := range rr.addrs { + if addr == a.addr { + a.connected = false + break + } + } +} + +// Get returns the next addr in the rotation. +func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { + var ch chan struct{} + rr.mu.Lock() + if rr.done { + rr.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(rr.addrs) > 0 { + if rr.next >= len(rr.addrs) { + rr.next = 0 + } + next := rr.next + for { + a := rr.addrs[next] + next = (next + 1) % len(rr.addrs) + if a.connected { + addr = a.addr + rr.next = next + rr.mu.Unlock() + return + } + if next == rr.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + if !opts.BlockingWait { + if len(rr.addrs) == 0 { + rr.mu.Unlock() + err = Errorf(codes.Unavailable, "there is no address available") + return + } + // Returns the next addr on rr.addrs for failfast RPCs. + addr = rr.addrs[rr.next].addr + rr.next++ + rr.mu.Unlock() + return + } + // Wait on rr.waitCh for non-failfast RPCs. + if rr.waitCh == nil { + ch = make(chan struct{}) + rr.waitCh = ch + } else { + ch = rr.waitCh + } + rr.mu.Unlock() + for { + select { + case <-ctx.Done(): + err = ctx.Err() + return + case <-ch: + rr.mu.Lock() + if rr.done { + rr.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(rr.addrs) > 0 { + if rr.next >= len(rr.addrs) { + rr.next = 0 + } + next := rr.next + for { + a := rr.addrs[next] + next = (next + 1) % len(rr.addrs) + if a.connected { + addr = a.addr + rr.next = next + rr.mu.Unlock() + return + } + if next == rr.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + // The newly added addr got removed by Down() again. + if rr.waitCh == nil { + ch = make(chan struct{}) + rr.waitCh = ch + } else { + ch = rr.waitCh + } + rr.mu.Unlock() + } + } +} + +func (rr *roundRobin) Notify() <-chan []Address { + return rr.addrCh +} + +func (rr *roundRobin) Close() error { + rr.mu.Lock() + defer rr.mu.Unlock() + if rr.done { + return errBalancerClosed + } + rr.done = true + if rr.w != nil { + rr.w.Close() + } + if rr.waitCh != nil { + close(rr.waitCh) + rr.waitCh = nil + } + if rr.addrCh != nil { + close(rr.addrCh) + } + return nil +} + +// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn. +// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get() +// returns the only address Up by resetTransport(). +type pickFirst struct { + *roundRobin +} + +func pickFirstBalancerV1(r naming.Resolver) Balancer { + return &pickFirst{&roundRobin{r: r}} +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go new file mode 100644 index 00000000000..2ce9a346ae8 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -0,0 +1,203 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package balancer defines APIs for load balancing in gRPC. +// All APIs in this package are experimental. +package balancer + +import ( + "errors" + "net" + + "golang.org/x/net/context" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/resolver" +) + +var ( + // m is a map from name to balancer builder. + m = make(map[string]Builder) +) + +// Register registers the balancer builder to the balancer map. +// b.Name will be used as the name registered with this builder. +func Register(b Builder) { + m[b.Name()] = b +} + +// Get returns the resolver builder registered with the given name. +// If no builder is register with the name, nil will be returned. +func Get(name string) Builder { + if b, ok := m[name]; ok { + return b + } + return nil +} + +// SubConn represents a gRPC sub connection. +// Each sub connection contains a list of addresses. gRPC will +// try to connect to them (in sequence), and stop trying the +// remainder once one connection is successful. +// +// The reconnect backoff will be applied on the list, not a single address. +// For example, try_on_all_addresses -> backoff -> try_on_all_addresses. +// +// All SubConns start in IDLE, and will not try to connect. To trigger +// the connecting, Balancers must call Connect. +// When the connection encounters an error, it will reconnect immediately. +// When the connection becomes IDLE, it will not reconnect unless Connect is +// called. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully closed, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() +} + +// NewSubConnOptions contains options to create new SubConn. +type NewSubConnOptions struct{} + +// ClientConn represents a gRPC ClientConn. +type ClientConn interface { + // NewSubConn is called by balancer to create a new SubConn. + // It doesn't block and wait for the connections to be established. + // Behaviors of the SubConn can be controlled by options. + NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) + // RemoveSubConn removes the SubConn from ClientConn. + // The SubConn will be shutdown. + RemoveSubConn(SubConn) + + // UpdateBalancerState is called by balancer to nofity gRPC that some internal + // state in balancer has changed. + // + // gRPC will update the connectivity state of the ClientConn, and will call pick + // on the new picker to pick new SubConn. + UpdateBalancerState(s connectivity.State, p Picker) + + // Target returns the dial target for this ClientConn. + Target() string +} + +// BuildOptions contains additional information for Build. +type BuildOptions struct { + // DialCreds is the transport credential the Balancer implementation can + // use to dial to a remote load balancer server. The Balancer implementations + // can ignore this if it does not need to talk to another party securely. + DialCreds credentials.TransportCredentials + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) +} + +// Builder creates a balancer. +type Builder interface { + // Build creates a new balancer with the ClientConn. + Build(cc ClientConn, opts BuildOptions) Balancer + // Name returns the name of balancers built by this builder. + // It will be used to pick balancers (for example in service config). + Name() string +} + +// PickOptions contains addition information for the Pick operation. +type PickOptions struct{} + +// DoneInfo contains additional information for done. +type DoneInfo struct { + // Err is the rpc error the RPC finished with. It could be nil. + Err error +} + +var ( + // ErrNoSubConnAvailable indicates no SubConn is available for pick(). + // gRPC will block the RPC until a new picker is available via UpdateBalancerState(). + ErrNoSubConnAvailable = errors.New("no SubConn is available") + // ErrTransientFailure indicates all SubConns are in TransientFailure. + // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. + ErrTransientFailure = errors.New("all SubConns are in TransientFailure") +) + +// Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot everytime its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). +type Picker interface { + // Pick returns the SubConn to be used to send the RPC. + // The returned SubConn must be one returned by NewSubConn(). + // + // This functions is expected to return: + // - a SubConn that is known to be READY; + // - ErrNoSubConnAvailable if no SubConn is available, but progress is being + // made (for example, some SubConn is in CONNECTING mode); + // - other errors if no active connecting is happening (for example, all SubConn + // are in TRANSIENT_FAILURE mode). + // + // If a SubConn is returned: + // - If it is READY, gRPC will send the RPC on it; + // - If it is not ready, or becomes not ready after it's returned, gRPC will block + // this call until a new picker is updated and will call pick on the new picker. + // + // If the returned error is not nil: + // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() + // - If the error is ErrTransientFailure: + // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState() + // is called to pick again; + // - Otherwise, RPC will fail with unavailable error. + // - Else (error is other non-nil error): + // - The RPC will fail with unavailable error. + // + // The returned done() function will be called once the rpc has finished, with the + // final status of that RPC. + // done may be nil if balancer doesn't care about the RPC status. + Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error) +} + +// Balancer takes input from gRPC, manages SubConns, and collects and aggregates +// the connectivity states. +// +// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. +// +// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed +// to be called synchronously from the same goroutine. +// There's no guarantee on picker.Pick, it may be called anytime. +type Balancer interface { + // HandleSubConnStateChange is called by gRPC when the connectivity state + // of sc has changed. + // Balancer is expected to aggregate all the state of SubConn and report + // that back to gRPC. + // Balancer should also generate and update Pickers when its internal state has + // been changed by the new state. + HandleSubConnStateChange(sc SubConn, state connectivity.State) + // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to + // balancers. + // Balancer can create new SubConn or remove SubConn with the addresses. + // An empty address slice and a non-nil error will be passed if the resolver returns + // non-nil error to gRPC. + HandleResolvedAddrs([]resolver.Address, error) + // Close closes the balancer. The balancer is not required to call + // ClientConn.RemoveSubConn for its existing SubConns. + Close() +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go new file mode 100644 index 00000000000..99e71cd390e --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -0,0 +1,241 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is +// installed as one of the default balancers in gRPC, users don't need to +// explicitly install this balancer. +package roundrobin + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// newBuilder creates a new roundrobin balancer builder. +func newBuilder() balancer.Builder { + return &rrBuilder{} +} + +func init() { + balancer.Register(newBuilder()) +} + +type rrBuilder struct{} + +func (*rrBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &rrBalancer{ + cc: cc, + subConns: make(map[resolver.Address]balancer.SubConn), + scStates: make(map[balancer.SubConn]connectivity.State), + csEvltr: &connectivityStateEvaluator{}, + // Initialize picker to a picker that always return + // ErrNoSubConnAvailable, because when state of a SubConn changes, we + // may call UpdateBalancerState with this picker. + picker: newPicker([]balancer.SubConn{}, nil), + } +} + +func (*rrBuilder) Name() string { + return "roundrobin" +} + +type rrBalancer struct { + cc balancer.ClientConn + + csEvltr *connectivityStateEvaluator + state connectivity.State + + subConns map[resolver.Address]balancer.SubConn + scStates map[balancer.SubConn]connectivity.State + picker *picker +} + +func (b *rrBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + if err != nil { + grpclog.Infof("roundrobin.rrBalancer: HandleResolvedAddrs called with error %v", err) + return + } + grpclog.Infoln("roundrobin.rrBalancer: got new resolved addresses: ", addrs) + // addrsSet is the set converted from addrs, it's used for quick lookup of an address. + addrsSet := make(map[resolver.Address]struct{}) + for _, a := range addrs { + addrsSet[a] = struct{}{} + if _, ok := b.subConns[a]; !ok { + // a is a new address (not existing in b.subConns). + sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("roundrobin.rrBalancer: failed to create new SubConn: %v", err) + continue + } + b.subConns[a] = sc + b.scStates[sc] = connectivity.Idle + sc.Connect() + } + } + for a, sc := range b.subConns { + // a was removed by resolver. + if _, ok := addrsSet[a]; !ok { + b.cc.RemoveSubConn(sc) + delete(b.subConns, a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in HandleSubConnStateChange. + } + } +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker +// from it. The picker +// - always returns ErrTransientFailure if the balancer is in TransientFailure, +// - or does round robin selection of all READY SubConns otherwise. +func (b *rrBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + b.picker = newPicker(nil, balancer.ErrTransientFailure) + return + } + var readySCs []balancer.SubConn + for sc, st := range b.scStates { + if st == connectivity.Ready { + readySCs = append(readySCs, sc) + } + } + b.picker = newPicker(readySCs, nil) +} + +func (b *rrBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + grpclog.Infof("roundrobin.rrBalancer: handle SubConn state change: %p, %v", sc, s) + oldS, ok := b.scStates[sc] + if !ok { + grpclog.Infof("roundrobin.rrBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + return + } + b.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + } + + oldAggrState := b.state + b.state = b.csEvltr.recordTransition(oldS, s) + + // Regenerate picker when one of the following happens: + // - this sc became ready from not-ready + // - this sc became not-ready from ready + // - the aggregated state of balancer became TransientFailure from non-TransientFailure + // - the aggregated state of balancer became non-TransientFailure from TransientFailure + if (s == connectivity.Ready) != (oldS == connectivity.Ready) || + (b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) { + b.regeneratePicker() + } + + b.cc.UpdateBalancerState(b.state, b.picker) + return +} + +// Close is a nop because roundrobin balancer doesn't internal state to clean +// up, and it doesn't need to call RemoveSubConn for the SubConns. +func (b *rrBalancer) Close() { +} + +type picker struct { + // If err is not nil, Pick always returns this err. It's immutable after + // picker is created. + err error + + // subConns is the snapshot of the roundrobin balancer when this picker was + // created. The slice is immutable. Each Get() will do a round robin + // selection from it and return the selected SubConn. + subConns []balancer.SubConn + + mu sync.Mutex + next int +} + +func newPicker(scs []balancer.SubConn, err error) *picker { + grpclog.Infof("roundrobinPicker: newPicker called with scs: %v, %v", scs, err) + if err != nil { + return &picker{err: err} + } + return &picker{ + subConns: scs, + } +} + +func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + if p.err != nil { + return nil, nil, p.err + } + if len(p.subConns) <= 0 { + return nil, nil, balancer.ErrNoSubConnAvailable + } + + p.mu.Lock() + sc := p.subConns[p.next] + p.next = (p.next + 1) % len(p.subConns) + p.mu.Unlock() + return sc, nil, nil +} + +// connectivityStateEvaluator gets updated by addrConns when their +// states transition, based on which it evaluates the state of +// ClientConn. +type connectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transientFailure. +} + +// recordTransition records state change happening in every subConn and based on +// that it evaluates what aggregated state should be. +// It can only transition between Ready, Connecting and TransientFailure. Other states, +// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection +// before any subConn is created ClientConn is in idle state. In the end when ClientConn +// closes it is in Shutdown state. +// +// recordTransition should only be called synchronously from the same goroutine. +func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go new file mode 100644 index 00000000000..c673b98fd2b --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -0,0 +1,266 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// scStateUpdate contains the subConn and the new state it changed to. +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State +} + +// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple. +// TODO make a general purpose buffer that uses interface{}. +type scStateUpdateBuffer struct { + c chan *scStateUpdate + mu sync.Mutex + backlog []*scStateUpdate +} + +func newSCStateUpdateBuffer() *scStateUpdateBuffer { + return &scStateUpdateBuffer{ + c: make(chan *scStateUpdate, 1), + } +} + +func (b *scStateUpdateBuffer) put(t *scStateUpdate) { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) == 0 { + select { + case b.c <- t: + return + default: + } + } + b.backlog = append(b.backlog, t) +} + +func (b *scStateUpdateBuffer) load() { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } +} + +// get returns the channel that the scStateUpdate will be sent to. +// +// Upon receiving, the caller should call load to send another +// scStateChangeTuple onto the channel if there is any. +func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate { + return b.c +} + +// resolverUpdate contains the new resolved addresses or error if there's +// any. +type resolverUpdate struct { + addrs []resolver.Address + err error +} + +// ccBalancerWrapper is a wrapper on top of cc for balancers. +// It implements balancer.ClientConn interface. +type ccBalancerWrapper struct { + cc *ClientConn + balancer balancer.Balancer + stateChangeQueue *scStateUpdateBuffer + resolverUpdateCh chan *resolverUpdate + done chan struct{} + + subConns map[*acBalancerWrapper]struct{} +} + +func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { + ccb := &ccBalancerWrapper{ + cc: cc, + stateChangeQueue: newSCStateUpdateBuffer(), + resolverUpdateCh: make(chan *resolverUpdate, 1), + done: make(chan struct{}), + subConns: make(map[*acBalancerWrapper]struct{}), + } + go ccb.watcher() + ccb.balancer = b.Build(ccb, bopts) + return ccb +} + +// watcher balancer functions sequencially, so the balancer can be implemeneted +// lock-free. +func (ccb *ccBalancerWrapper) watcher() { + for { + select { + case t := <-ccb.stateChangeQueue.get(): + ccb.stateChangeQueue.load() + select { + case <-ccb.done: + ccb.balancer.Close() + return + default: + } + ccb.balancer.HandleSubConnStateChange(t.sc, t.state) + case t := <-ccb.resolverUpdateCh: + select { + case <-ccb.done: + ccb.balancer.Close() + return + default: + } + ccb.balancer.HandleResolvedAddrs(t.addrs, t.err) + case <-ccb.done: + } + + select { + case <-ccb.done: + ccb.balancer.Close() + for acbw := range ccb.subConns { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) + } + return + default: + } + } +} + +func (ccb *ccBalancerWrapper) close() { + close(ccb.done) +} + +func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + // When updating addresses for a SubConn, if the address in use is not in + // the new addresses, the old ac will be tearDown() and a new ac will be + // created. tearDown() generates a state change with Shutdown state, we + // don't want the balancer to receive this state change. So before + // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and + // this function will be called with (nil, Shutdown). We don't need to call + // balancer method in this case. + if sc == nil { + return + } + ccb.stateChangeQueue.put(&scStateUpdate{ + sc: sc, + state: s, + }) +} + +func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err error) { + select { + case <-ccb.resolverUpdateCh: + default: + } + ccb.resolverUpdateCh <- &resolverUpdate{ + addrs: addrs, + err: err, + } +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + ac, err := ccb.cc.newAddrConn(addrs) + if err != nil { + return nil, err + } + acbw := &acBalancerWrapper{ac: ac} + acbw.ac.mu.Lock() + ac.acbw = acbw + acbw.ac.mu.Unlock() + ccb.subConns[acbw] = struct{}{} + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + delete(ccb.subConns, acbw) + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) { + ccb.cc.csMgr.updateState(s) + ccb.cc.blockingpicker.updatePicker(p) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + mu sync.Mutex + ac *addrConn +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + if !acbw.ac.tryUpdateAddrs(addrs) { + cc := acbw.ac.cc + acbw.ac.mu.Lock() + // Set old ac.acbw to nil so the Shutdown state update will be ignored + // by balancer. + // + // TODO(bar) the state transition could be wrong when tearDown() old ac + // and creating new ac, fix the transition. + acbw.ac.acbw = nil + acbw.ac.mu.Unlock() + acState := acbw.ac.getState() + acbw.ac.tearDown(errConnDrain) + + if acState == connectivity.Shutdown { + return + } + + ac, err := cc.newAddrConn(addrs) + if err != nil { + grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + return + } + acbw.ac = ac + ac.acbw = acbw + if acState != connectivity.Idle { + ac.connect() + } + } +} + +func (acbw *acBalancerWrapper) Connect() { + acbw.mu.Lock() + defer acbw.mu.Unlock() + acbw.ac.connect() +} + +func (acbw *acBalancerWrapper) getAddrConn() *addrConn { + acbw.mu.Lock() + defer acbw.mu.Unlock() + return acbw.ac +} diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go new file mode 100644 index 00000000000..6cb39071c1d --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go @@ -0,0 +1,374 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "strings" + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +type balancerWrapperBuilder struct { + b Balancer // The v1 balancer. +} + +func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + targetAddr := cc.Target() + targetSplitted := strings.Split(targetAddr, ":///") + if len(targetSplitted) >= 2 { + targetAddr = targetSplitted[1] + } + + bwb.b.Start(targetAddr, BalancerConfig{ + DialCreds: opts.DialCreds, + Dialer: opts.Dialer, + }) + _, pickfirst := bwb.b.(*pickFirst) + bw := &balancerWrapper{ + balancer: bwb.b, + pickfirst: pickfirst, + cc: cc, + targetAddr: targetAddr, + startCh: make(chan struct{}), + conns: make(map[resolver.Address]balancer.SubConn), + connSt: make(map[balancer.SubConn]*scState), + csEvltr: &connectivityStateEvaluator{}, + state: connectivity.Idle, + } + cc.UpdateBalancerState(connectivity.Idle, bw) + go bw.lbWatcher() + return bw +} + +func (bwb *balancerWrapperBuilder) Name() string { + return "wrapper" +} + +type scState struct { + addr Address // The v1 address type. + s connectivity.State + down func(error) +} + +type balancerWrapper struct { + balancer Balancer // The v1 balancer. + pickfirst bool + + cc balancer.ClientConn + targetAddr string // Target without the scheme. + + // To aggregate the connectivity state. + csEvltr *connectivityStateEvaluator + state connectivity.State + + mu sync.Mutex + conns map[resolver.Address]balancer.SubConn + connSt map[balancer.SubConn]*scState + // This channel is closed when handling the first resolver result. + // lbWatcher blocks until this is closed, to avoid race between + // - NewSubConn is created, cc wants to notify balancer of state changes; + // - Build hasn't return, cc doesn't have access to balancer. + startCh chan struct{} +} + +// lbWatcher watches the Notify channel of the balancer and manages +// connections accordingly. +func (bw *balancerWrapper) lbWatcher() { + <-bw.startCh + notifyCh := bw.balancer.Notify() + if notifyCh == nil { + // There's no resolver in the balancer. Connect directly. + a := resolver.Address{ + Addr: bw.targetAddr, + Type: resolver.Backend, + } + sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) + } else { + bw.mu.Lock() + bw.conns[a] = sc + bw.connSt[sc] = &scState{ + addr: Address{Addr: bw.targetAddr}, + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + return + } + + for addrs := range notifyCh { + grpclog.Infof("balancerWrapper: got update addr from Notify: %v\n", addrs) + if bw.pickfirst { + var ( + oldA resolver.Address + oldSC balancer.SubConn + ) + bw.mu.Lock() + for oldA, oldSC = range bw.conns { + break + } + bw.mu.Unlock() + if len(addrs) <= 0 { + if oldSC != nil { + // Teardown old sc. + bw.mu.Lock() + delete(bw.conns, oldA) + delete(bw.connSt, oldSC) + bw.mu.Unlock() + bw.cc.RemoveSubConn(oldSC) + } + continue + } + + var newAddrs []resolver.Address + for _, a := range addrs { + newAddr := resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, // All addresses from balancer are all backends. + ServerName: "", + Metadata: a.Metadata, + } + newAddrs = append(newAddrs, newAddr) + } + if oldSC == nil { + // Create new sc. + sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err) + } else { + bw.mu.Lock() + // For pickfirst, there should be only one SubConn, so the + // address doesn't matter. All states updating (up and down) + // and picking should all happen on that only SubConn. + bw.conns[resolver.Address{}] = sc + bw.connSt[sc] = &scState{ + addr: addrs[0], // Use the first address. + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + } else { + oldSC.UpdateAddresses(newAddrs) + bw.mu.Lock() + bw.connSt[oldSC].addr = addrs[0] + bw.mu.Unlock() + } + } else { + var ( + add []resolver.Address // Addresses need to setup connections. + del []balancer.SubConn // Connections need to tear down. + ) + resAddrs := make(map[resolver.Address]Address) + for _, a := range addrs { + resAddrs[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, // All addresses from balancer are all backends. + ServerName: "", + Metadata: a.Metadata, + }] = a + } + bw.mu.Lock() + for a := range resAddrs { + if _, ok := bw.conns[a]; !ok { + add = append(add, a) + } + } + for a, c := range bw.conns { + if _, ok := resAddrs[a]; !ok { + del = append(del, c) + delete(bw.conns, a) + // Keep the state of this sc in bw.connSt until its state becomes Shutdown. + } + } + bw.mu.Unlock() + for _, a := range add { + sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) + } else { + bw.mu.Lock() + bw.conns[a] = sc + bw.connSt[sc] = &scState{ + addr: resAddrs[a], + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + } + for _, c := range del { + bw.cc.RemoveSubConn(c) + } + } + } +} + +func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + bw.mu.Lock() + defer bw.mu.Unlock() + scSt, ok := bw.connSt[sc] + if !ok { + return + } + if s == connectivity.Idle { + sc.Connect() + } + oldS := scSt.s + scSt.s = s + if oldS != connectivity.Ready && s == connectivity.Ready { + scSt.down = bw.balancer.Up(scSt.addr) + } else if oldS == connectivity.Ready && s != connectivity.Ready { + if scSt.down != nil { + scSt.down(errConnClosing) + } + } + sa := bw.csEvltr.recordTransition(oldS, s) + if bw.state != sa { + bw.state = sa + } + bw.cc.UpdateBalancerState(bw.state, bw) + if s == connectivity.Shutdown { + // Remove state for this sc. + delete(bw.connSt, sc) + } + return +} + +func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) { + bw.mu.Lock() + defer bw.mu.Unlock() + select { + case <-bw.startCh: + default: + close(bw.startCh) + } + // There should be a resolver inside the balancer. + // All updates here, if any, are ignored. + return +} + +func (bw *balancerWrapper) Close() { + bw.mu.Lock() + defer bw.mu.Unlock() + select { + case <-bw.startCh: + default: + close(bw.startCh) + } + bw.balancer.Close() + return +} + +// The picker is the balancerWrapper itself. +// Pick should never return ErrNoSubConnAvailable. +// It either blocks or returns error, consistent with v1 balancer Get(). +func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + failfast := true // Default failfast is true. + if ss, ok := rpcInfoFromContext(ctx); ok { + failfast = ss.failfast + } + a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast}) + if err != nil { + return nil, nil, err + } + var done func(balancer.DoneInfo) + if p != nil { + done = func(i balancer.DoneInfo) { p() } + } + var sc balancer.SubConn + bw.mu.Lock() + defer bw.mu.Unlock() + if bw.pickfirst { + // Get the first sc in conns. + for _, sc = range bw.conns { + break + } + } else { + var ok bool + sc, ok = bw.conns[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, + ServerName: "", + Metadata: a.Metadata, + }] + if !ok && failfast { + return nil, nil, Errorf(codes.Unavailable, "there is no connection available") + } + if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) { + // If the returned sc is not ready and RPC is failfast, + // return error, and this RPC will fail. + return nil, nil, Errorf(codes.Unavailable, "there is no connection available") + } + } + + return sc, done, nil +} + +// connectivityStateEvaluator gets updated by addrConns when their +// states transition, based on which it evaluates the state of +// ClientConn. +type connectivityStateEvaluator struct { + mu sync.Mutex + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transientFailure. +} + +// recordTransition records state change happening in every subConn and based on +// that it evaluates what aggregated state should be. +// It can only transition between Ready, Connecting and TransientFailure. Other states, +// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection +// before any subConn is created ClientConn is in idle state. In the end when ClientConn +// closes it is in Shutdown state. +// TODO Note that in later releases, a ClientConn with no activity will be put into an Idle state. +func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State { + cse.mu.Lock() + defer cse.mu.Unlock() + + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go new file mode 100644 index 00000000000..4bd673cd502 --- /dev/null +++ b/vendor/google.golang.org/grpc/call.go @@ -0,0 +1,314 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "io" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" +) + +// recvResponse receives and parses an RPC response. +// On error, it returns the error and indicates whether the call should be retried. +// +// TODO(zhaoq): Check whether the received message sequence is valid. +// TODO ctx is used for stats collection and processing. It is the context passed from the application. +func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) (err error) { + // Try to acquire header metadata from the server if there is any. + defer func() { + if err != nil { + if _, ok := err.(transport.ConnectionError); !ok { + t.CloseStream(stream, err) + } + } + }() + c.headerMD, err = stream.Header() + if err != nil { + return + } + p := &parser{r: stream} + var inPayload *stats.InPayload + if dopts.copts.StatsHandler != nil { + inPayload = &stats.InPayload{ + Client: true, + } + } + for { + if c.maxReceiveMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") + } + if err = recv(p, dopts.codec, stream, dopts.dc, reply, *c.maxReceiveMessageSize, inPayload); err != nil { + if err == io.EOF { + break + } + return + } + } + if inPayload != nil && err == io.EOF && stream.Status().Code() == codes.OK { + // TODO in the current implementation, inTrailer may be handled before inPayload in some cases. + // Fix the order if necessary. + dopts.copts.StatsHandler.HandleRPC(ctx, inPayload) + } + c.trailerMD = stream.Trailer() + return nil +} + +// sendRequest writes out various information of an RPC such as Context and Message. +func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, c *callInfo, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) { + defer func() { + if err != nil { + // If err is connection error, t will be closed, no need to close stream here. + if _, ok := err.(transport.ConnectionError); !ok { + t.CloseStream(stream, err) + } + } + }() + var ( + cbuf *bytes.Buffer + outPayload *stats.OutPayload + ) + if compressor != nil { + cbuf = new(bytes.Buffer) + } + if dopts.copts.StatsHandler != nil { + outPayload = &stats.OutPayload{ + Client: true, + } + } + hdr, data, err := encode(dopts.codec, args, compressor, cbuf, outPayload) + if err != nil { + return err + } + if c.maxSendMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)") + } + if len(data) > *c.maxSendMessageSize { + return Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), *c.maxSendMessageSize) + } + err = t.Write(stream, hdr, data, opts) + if err == nil && outPayload != nil { + outPayload.SentTime = time.Now() + dopts.copts.StatsHandler.HandleRPC(ctx, outPayload) + } + // t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method + // does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following + // recvResponse to get the final status. + if err != nil && err != io.EOF { + return err + } + // Sent successfully. + return nil +} + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { + if cc.dopts.unaryInt != nil { + return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) + } + return invoke(ctx, method, args, reply, cc, opts...) +} + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// DEPRECATED: Use ClientConn.Invoke instead. +func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return cc.Invoke(ctx, method, args, reply, opts...) +} + +func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) { + c := defaultCallInfo() + mc := cc.GetMethodConfig(method) + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady + } + + if mc.Timeout != nil && *mc.Timeout >= 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + defer cancel() + } + + opts = append(cc.dopts.callOptions, opts...) + for _, o := range opts { + if err := o.before(c); err != nil { + return toRPCErr(err) + } + } + defer func() { + for _, o := range opts { + o.after(c) + } + }() + + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + + if EnableTracing { + c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) + defer c.traceInfo.tr.Finish() + c.traceInfo.firstLine.client = true + if deadline, ok := ctx.Deadline(); ok { + c.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) + } + c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false) + // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set. + defer func() { + if e != nil { + c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{e}}, true) + c.traceInfo.tr.SetError() + } + }() + } + ctx = newContextWithRPCInfo(ctx, c.failFast) + sh := cc.dopts.copts.StatsHandler + if sh != nil { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) + begin := &stats.Begin{ + Client: true, + BeginTime: time.Now(), + FailFast: c.failFast, + } + sh.HandleRPC(ctx, begin) + defer func() { + end := &stats.End{ + Client: true, + EndTime: time.Now(), + Error: e, + } + sh.HandleRPC(ctx, end) + }() + } + topts := &transport.Options{ + Last: true, + Delay: false, + } + for { + var ( + err error + t transport.ClientTransport + stream *transport.Stream + // Record the done handler from Balancer.Get(...). It is called once the + // RPC has completed or failed. + done func(balancer.DoneInfo) + ) + // TODO(zhaoq): Need a formal spec of fail-fast. + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + } + if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + } + if c.creds != nil { + callHdr.Creds = c.creds + } + + t, done, err = cc.getTransport(ctx, c.failFast) + if err != nil { + // TODO(zhaoq): Probably revisit the error handling. + if _, ok := status.FromError(err); ok { + return err + } + if err == errConnClosing || err == errConnUnavailable { + if c.failFast { + return Errorf(codes.Unavailable, "%v", err) + } + continue + } + // All the other errors are treated as Internal errors. + return Errorf(codes.Internal, "%v", err) + } + if c.traceInfo.tr != nil { + c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) + } + stream, err = t.NewStream(ctx, callHdr) + if err != nil { + if done != nil { + if _, ok := err.(transport.ConnectionError); ok { + // If error is connection error, transport was sending data on wire, + // and we are not sure if anything has been sent on wire. + // If error is not connection error, we are sure nothing has been sent. + updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false}) + } + done(balancer.DoneInfo{Err: err}) + } + if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { + continue + } + return toRPCErr(err) + } + if peer, ok := peer.FromContext(stream.Context()); ok { + c.peer = peer + } + err = sendRequest(ctx, cc.dopts, cc.dopts.cp, c, callHdr, stream, t, args, topts) + if err != nil { + if done != nil { + updateRPCInfoInContext(ctx, rpcInfo{ + bytesSent: stream.BytesSent(), + bytesReceived: stream.BytesReceived(), + }) + done(balancer.DoneInfo{Err: err}) + } + // Retry a non-failfast RPC when + // i) there is a connection error; or + // ii) the server started to drain before this RPC was initiated. + if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { + continue + } + return toRPCErr(err) + } + err = recvResponse(ctx, cc.dopts, t, c, stream, reply) + if err != nil { + if done != nil { + updateRPCInfoInContext(ctx, rpcInfo{ + bytesSent: stream.BytesSent(), + bytesReceived: stream.BytesReceived(), + }) + done(balancer.DoneInfo{Err: err}) + } + if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { + continue + } + return toRPCErr(err) + } + if c.traceInfo.tr != nil { + c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) + } + t.CloseStream(stream, nil) + if done != nil { + updateRPCInfoInContext(ctx, rpcInfo{ + bytesSent: stream.BytesSent(), + bytesReceived: stream.BytesReceived(), + }) + done(balancer.DoneInfo{Err: err}) + } + return stream.Status().Err() + } +} diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go new file mode 100644 index 00000000000..5f5aac41f11 --- /dev/null +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -0,0 +1,1148 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "errors" + "fmt" + "math" + "net" + "reflect" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" + _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. + _ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver. + "google.golang.org/grpc/stats" + "google.golang.org/grpc/transport" +) + +var ( + // ErrClientConnClosing indicates that the operation is illegal because + // the ClientConn is closing. + ErrClientConnClosing = errors.New("grpc: the client connection is closing") + // ErrClientConnTimeout indicates that the ClientConn cannot establish the + // underlying connections within the specified timeout. + // DEPRECATED: Please use context.DeadlineExceeded instead. + ErrClientConnTimeout = errors.New("grpc: timed out when dialing") + + // errNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicitly + // call WithInsecure DialOption to disable security. + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + // errTransportCredentialsMissing indicates that users want to transmit security + // information (e.g., oauth2 token) which requires secure connection on an insecure + // connection. + errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") + // errCredentialsConflict indicates that grpc.WithTransportCredentials() + // and grpc.WithInsecure() are both called for a connection. + errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") + // errNetworkIO indicates that the connection is down due to some network I/O error. + errNetworkIO = errors.New("grpc: failed with network I/O error") + // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. + errConnDrain = errors.New("grpc: the connection is drained") + // errConnClosing indicates that the connection is closing. + errConnClosing = errors.New("grpc: the connection is closing") + // errConnUnavailable indicates that the connection is unavailable. + errConnUnavailable = errors.New("grpc: the connection is unavailable") + // errBalancerClosed indicates that the balancer is closed. + errBalancerClosed = errors.New("grpc: balancer is closed") + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second +) + +// dialOptions configure a Dial call. dialOptions are set by the DialOption +// values passed to Dial. +type dialOptions struct { + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + codec Codec + cp Compressor + dc Decompressor + bs backoffStrategy + block bool + insecure bool + timeout time.Duration + scChan <-chan ServiceConfig + copts transport.ConnectOptions + callOptions []CallOption + // This is to support v1 balancer. + balancerBuilder balancer.Builder +} + +const ( + defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultClientMaxSendMessageSize = math.MaxInt32 +) + +// DialOption configures how we set up the connection. +type DialOption func(*dialOptions) + +// WithWriteBufferSize lets you set the size of write buffer, this determines how much data can be batched +// before doing a write on the wire. +func WithWriteBufferSize(s int) DialOption { + return func(o *dialOptions) { + o.copts.WriteBufferSize = s + } +} + +// WithReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for each read syscall. +func WithReadBufferSize(s int) DialOption { + return func(o *dialOptions) { + o.copts.ReadBufferSize = s + } +} + +// WithInitialWindowSize returns a DialOption which sets the value for initial window size on a stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func WithInitialWindowSize(s int32) DialOption { + return func(o *dialOptions) { + o.copts.InitialWindowSize = s + } +} + +// WithInitialConnWindowSize returns a DialOption which sets the value for initial window size on a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func WithInitialConnWindowSize(s int32) DialOption { + return func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + } +} + +// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. +func WithMaxMsgSize(s int) DialOption { + return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) +} + +// WithDefaultCallOptions returns a DialOption which sets the default CallOptions for calls over the connection. +func WithDefaultCallOptions(cos ...CallOption) DialOption { + return func(o *dialOptions) { + o.callOptions = append(o.callOptions, cos...) + } +} + +// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling. +func WithCodec(c Codec) DialOption { + return func(o *dialOptions) { + o.codec = c + } +} + +// WithCompressor returns a DialOption which sets a CompressorGenerator for generating message +// compressor. +func WithCompressor(cp Compressor) DialOption { + return func(o *dialOptions) { + o.cp = cp + } +} + +// WithDecompressor returns a DialOption which sets a DecompressorGenerator for generating +// message decompressor. +func WithDecompressor(dc Decompressor) DialOption { + return func(o *dialOptions) { + o.dc = dc + } +} + +// WithBalancer returns a DialOption which sets a load balancer with the v1 API. +// Name resolver will be ignored if this DialOption is specified. +// Deprecated: use the new balancer APIs in balancer package instead. +func WithBalancer(b Balancer) DialOption { + return func(o *dialOptions) { + o.balancerBuilder = &balancerWrapperBuilder{ + b: b, + } + } +} + +// WithBalancerBuilder is for testing only. Users using custom balancers should +// register their balancer and use service config to choose the balancer to use. +func WithBalancerBuilder(b balancer.Builder) DialOption { + // TODO(bar) remove this when switching balancer is done. + return func(o *dialOptions) { + o.balancerBuilder = b + } +} + +// WithServiceConfig returns a DialOption which has a channel to read the service configuration. +// DEPRECATED: service config should be received through name resolver, as specified here. +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +func WithServiceConfig(c <-chan ServiceConfig) DialOption { + return func(o *dialOptions) { + o.scChan = c + } +} + +// WithBackoffMaxDelay configures the dialer to use the provided maximum delay +// when backing off after failed connection attempts. +func WithBackoffMaxDelay(md time.Duration) DialOption { + return WithBackoffConfig(BackoffConfig{MaxDelay: md}) +} + +// WithBackoffConfig configures the dialer to use the provided backoff +// parameters after connection failures. +// +// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up +// for use. +func WithBackoffConfig(b BackoffConfig) DialOption { + // Set defaults to ensure that provided BackoffConfig is valid and + // unexported fields get default values. + setDefaults(&b) + return withBackoff(b) +} + +// withBackoff sets the backoff strategy used for retries after a +// failed connection attempt. +// +// This can be exported if arbitrary backoff strategies are allowed by gRPC. +func withBackoff(bs backoffStrategy) DialOption { + return func(o *dialOptions) { + o.bs = bs + } +} + +// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying +// connection is up. Without this, Dial returns immediately and connecting the server +// happens in background. +func WithBlock() DialOption { + return func(o *dialOptions) { + o.block = true + } +} + +// WithInsecure returns a DialOption which disables transport security for this ClientConn. +// Note that transport security is required unless WithInsecure is set. +func WithInsecure() DialOption { + return func(o *dialOptions) { + o.insecure = true + } +} + +// WithTransportCredentials returns a DialOption which configures a +// connection level security credentials (e.g., TLS/SSL). +func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { + return func(o *dialOptions) { + o.copts.TransportCredentials = creds + } +} + +// WithPerRPCCredentials returns a DialOption which sets +// credentials and places auth state on each outbound RPC. +func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { + return func(o *dialOptions) { + o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) + } +} + +// WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn +// initially. This is valid if and only if WithBlock() is present. +// Deprecated: use DialContext and context.WithTimeout instead. +func WithTimeout(d time.Duration) DialOption { + return func(o *dialOptions) { + o.timeout = d + } +} + +// WithDialer returns a DialOption that specifies a function to use for dialing network addresses. +// If FailOnNonTempDialError() is set to true, and an error is returned by f, gRPC checks the error's +// Temporary() method to decide if it should try to reconnect to the network address. +func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { + return func(o *dialOptions) { + o.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) { + if deadline, ok := ctx.Deadline(); ok { + return f(addr, deadline.Sub(time.Now())) + } + return f(addr, 0) + } + } +} + +// WithStatsHandler returns a DialOption that specifies the stats handler +// for all the RPCs and underlying network connections in this ClientConn. +func WithStatsHandler(h stats.Handler) DialOption { + return func(o *dialOptions) { + o.copts.StatsHandler = h + } +} + +// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on non-temporary dial errors. +// If f is true, and dialer returns a non-temporary error, gRPC will fail the connection to the network +// address and won't try to reconnect. +// The default value of FailOnNonTempDialError is false. +// This is an EXPERIMENTAL API. +func FailOnNonTempDialError(f bool) DialOption { + return func(o *dialOptions) { + o.copts.FailOnNonTempDialError = f + } +} + +// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs. +func WithUserAgent(s string) DialOption { + return func(o *dialOptions) { + o.copts.UserAgent = s + } +} + +// WithKeepaliveParams returns a DialOption that specifies keepalive parameters for the client transport. +func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { + return func(o *dialOptions) { + o.copts.KeepaliveParams = kp + } +} + +// WithUnaryInterceptor returns a DialOption that specifies the interceptor for unary RPCs. +func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { + return func(o *dialOptions) { + o.unaryInt = f + } +} + +// WithStreamInterceptor returns a DialOption that specifies the interceptor for streaming RPCs. +func WithStreamInterceptor(f StreamClientInterceptor) DialOption { + return func(o *dialOptions) { + o.streamInt = f + } +} + +// WithAuthority returns a DialOption that specifies the value to be used as +// the :authority pseudo-header. This value only works with WithInsecure and +// has no effect if TransportCredentials are present. +func WithAuthority(a string) DialOption { + return func(o *dialOptions) { + o.copts.Authority = a + } +} + +// Dial creates a client connection to the given target. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), target, opts...) +} + +// DialContext creates a client connection to the given target. ctx can be used to +// cancel or expire the pending connection. Once this function returns, the +// cancellation and expiration of ctx will be noop. Users should call ClientConn.Close +// to terminate all the pending operations after this function returns. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc := &ClientConn{ + target: target, + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + + blockingpicker: newPickerWrapper(), + } + cc.ctx, cc.cancel = context.WithCancel(context.Background()) + + for _, opt := range opts { + opt(&cc.dopts) + } + + if !cc.dopts.insecure { + if cc.dopts.copts.TransportCredentials == nil { + return nil, errNoTransportSecurity + } + } else { + if cc.dopts.copts.TransportCredentials != nil { + return nil, errCredentialsConflict + } + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return nil, errTransportCredentialsMissing + } + } + } + + cc.mkp = cc.dopts.copts.KeepaliveParams + + if cc.dopts.copts.Dialer == nil { + cc.dopts.copts.Dialer = newProxyDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + return dialContext(ctx, "tcp", addr) + }, + ) + } + + if cc.dopts.copts.UserAgent != "" { + cc.dopts.copts.UserAgent += " " + grpcUA + } else { + cc.dopts.copts.UserAgent = grpcUA + } + + if cc.dopts.timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) + defer cancel() + } + + defer func() { + select { + case <-ctx.Done(): + conn, err = nil, ctx.Err() + default: + } + + if err != nil { + cc.Close() + } + }() + + scSet := false + if cc.dopts.scChan != nil { + // Try to get an initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = sc + scSet = true + } + default: + } + } + // Set defaults. + if cc.dopts.codec == nil { + cc.dopts.codec = protoCodec{} + } + if cc.dopts.bs == nil { + cc.dopts.bs = DefaultBackoffConfig + } + cc.parsedTarget = parseTarget(cc.target) + creds := cc.dopts.copts.TransportCredentials + if creds != nil && creds.Info().ServerName != "" { + cc.authority = creds.Info().ServerName + } else if cc.dopts.insecure && cc.dopts.copts.Authority != "" { + cc.authority = cc.dopts.copts.Authority + } else { + // Use endpoint from "scheme://authority/endpoint" as the default + // authority for ClientConn. + cc.authority = cc.parsedTarget.Endpoint + } + + if cc.dopts.scChan != nil && !scSet { + // Blocking wait for the initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = sc + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } + if cc.dopts.scChan != nil { + go cc.scWatcher() + } + + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + cc.balancerBuildOpts = balancer.BuildOptions{ + DialCreds: credsClone, + Dialer: cc.dopts.copts.Dialer, + } + + if cc.dopts.balancerBuilder != nil { + cc.customBalancer = true + // Build should not take long time. So it's ok to not have a goroutine for it. + cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + } + + // Build the resolver. + cc.resolverWrapper, err = newCCResolverWrapper(cc) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %v", err) + } + + // A blocking dial blocks until the clientConn is ready. + if cc.dopts.block { + for { + s := cc.GetState() + if s == connectivity.Ready { + break + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + return nil, ctx.Err() + } + } + } + + return cc, nil +} + +// connectivityStateManager keeps the connectivity.State of ClientConn. +// This struct will eventually be exported so the balancers can access it. +type connectivityStateManager struct { + mu sync.Mutex + state connectivity.State + notifyChan chan struct{} +} + +// updateState updates the connectivity.State of ClientConn. +// If there's a change it notifies goroutines waiting on state change to +// happen. +func (csm *connectivityStateManager) updateState(state connectivity.State) { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.state == connectivity.Shutdown { + return + } + if csm.state == state { + return + } + csm.state = state + if csm.notifyChan != nil { + // There are other goroutines waiting on this channel. + close(csm.notifyChan) + csm.notifyChan = nil + } +} + +func (csm *connectivityStateManager) getState() connectivity.State { + csm.mu.Lock() + defer csm.mu.Unlock() + return csm.state +} + +func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.notifyChan == nil { + csm.notifyChan = make(chan struct{}) + } + return csm.notifyChan +} + +// ClientConn represents a client connection to an RPC server. +type ClientConn struct { + ctx context.Context + cancel context.CancelFunc + + target string + parsedTarget resolver.Target + authority string + dopts dialOptions + csMgr *connectivityStateManager + + customBalancer bool // If this is true, switching balancer will be disabled. + balancerBuildOpts balancer.BuildOptions + resolverWrapper *ccResolverWrapper + blockingpicker *pickerWrapper + + mu sync.RWMutex + sc ServiceConfig + scRaw string + conns map[*addrConn]struct{} + // Keepalive parameter can be updated if a GoAway is received. + mkp keepalive.ClientParameters + curBalancerName string + curAddresses []resolver.Address + balancerWrapper *ccBalancerWrapper +} + +// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or +// ctx expires. A true value is returned in former case and false in latter. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { + ch := cc.csMgr.getNotifyChan() + if cc.csMgr.getState() != sourceState { + return true + } + select { + case <-ctx.Done(): + return false + case <-ch: + return true + } +} + +// GetState returns the connectivity.State of ClientConn. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) GetState() connectivity.State { + return cc.csMgr.getState() +} + +func (cc *ClientConn) scWatcher() { + for { + select { + case sc, ok := <-cc.dopts.scChan: + if !ok { + return + } + cc.mu.Lock() + // TODO: load balance policy runtime change is ignored. + // We may revist this decision in the future. + cc.sc = sc + cc.scRaw = "" + cc.mu.Unlock() + case <-cc.ctx.Done(): + return + } + } +} + +func (cc *ClientConn) handleResolvedAddrs(addrs []resolver.Address, err error) { + cc.mu.Lock() + defer cc.mu.Unlock() + if cc.conns == nil { + return + } + + // TODO(bar switching) when grpclb is submitted, check address type and start grpclb. + if !cc.customBalancer && cc.balancerWrapper == nil { + // No customBalancer was specified by DialOption, and this is the first + // time handling resolved addresses, create a pickfirst balancer. + builder := newPickfirstBuilder() + cc.curBalancerName = builder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) + } + + // TODO(bar switching) compare addresses, if there's no update, don't notify balancer. + cc.curAddresses = addrs + cc.balancerWrapper.handleResolvedAddrs(addrs, nil) +} + +// switchBalancer starts the switching from current balancer to the balancer with name. +func (cc *ClientConn) switchBalancer(name string) { + cc.mu.Lock() + defer cc.mu.Unlock() + if cc.conns == nil { + return + } + grpclog.Infof("ClientConn switching balancer to %q", name) + + if cc.customBalancer { + grpclog.Infoln("ignoring service config balancer configuration: WithBalancer DialOption used instead") + return + } + + if cc.curBalancerName == name { + return + } + + // TODO(bar switching) change this to two steps: drain and close. + // Keep track of sc in wrapper. + cc.balancerWrapper.close() + + builder := balancer.Get(name) + if builder == nil { + grpclog.Infof("failed to get balancer builder for: %v (this should never happen...)", name) + builder = newPickfirstBuilder() + } + cc.curBalancerName = builder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) + cc.balancerWrapper.handleResolvedAddrs(cc.curAddresses, nil) +} + +func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + // TODO(bar switching) send updates to all balancer wrappers when balancer + // gracefully switching is supported. + cc.balancerWrapper.handleSubConnStateChange(sc, s) + cc.mu.Unlock() +} + +// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) { + ac := &addrConn{ + cc: cc, + addrs: addrs, + dopts: cc.dopts, + } + ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + // Track ac in cc. This needs to be done before any getTransport(...) is called. + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return nil, ErrClientConnClosing + } + cc.conns[ac] = struct{}{} + cc.mu.Unlock() + return ac, nil +} + +// removeAddrConn removes the addrConn in the subConn from clientConn. +// It also tears down the ac with the given error. +func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + delete(cc.conns, ac) + cc.mu.Unlock() + ac.tearDown(err) +} + +// connect starts to creating transport and also starts the transport monitor +// goroutine for this ac. +// It does nothing if the ac is not IDLE. +// TODO(bar) Move this to the addrConn section. +// This was part of resetAddrConn, keep it here to make the diff look clean. +func (ac *addrConn) connect() error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + if ac.state != connectivity.Idle { + ac.mu.Unlock() + return nil + } + ac.state = connectivity.Connecting + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + ac.mu.Unlock() + + // Start a goroutine connecting to the server asynchronously. + go func() { + if err := ac.resetTransport(); err != nil { + grpclog.Warningf("Failed to dial %s: %v; please retry.", ac.addrs[0].Addr, err) + if err != errConnClosing { + // Keep this ac in cc.conns, to get the reason it's torn down. + ac.tearDown(err) + } + return + } + ac.transportMonitor() + }() + return nil +} + +// tryUpdateAddrs tries to update ac.addrs with the new addresses list. +// +// It checks whether current connected address of ac is in the new addrs list. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. +func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { + ac.mu.Lock() + defer ac.mu.Unlock() + grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + if ac.state == connectivity.Shutdown { + ac.addrs = addrs + return true + } + + var curAddrFound bool + for _, a := range addrs { + if reflect.DeepEqual(ac.curAddr, a) { + curAddrFound = true + break + } + } + grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + if curAddrFound { + ac.addrs = addrs + } + + return curAddrFound +} + +// GetMethodConfig gets the method config of the input method. +// If there's an exact match for input method (i.e. /service/method), we return +// the corresponding MethodConfig. +// If there isn't an exact match for the input method, we look for the default config +// under the service (i.e /service/). If there is a default MethodConfig for +// the serivce, we return it. +// Otherwise, we return an empty MethodConfig. +func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { + // TODO: Avoid the locking here. + cc.mu.RLock() + defer cc.mu.RUnlock() + m, ok := cc.sc.Methods[method] + if !ok { + i := strings.LastIndex(method, "/") + m, _ = cc.sc.Methods[method[:i+1]] + } + return m +} + +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool) (transport.ClientTransport, func(balancer.DoneInfo), error) { + t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{}) + if err != nil { + return nil, nil, toRPCErr(err) + } + return t, done, nil +} + +// handleServiceConfig parses the service config string in JSON format to Go native +// struct ServiceConfig, and store both the struct and the JSON string in ClientConn. +func (cc *ClientConn) handleServiceConfig(js string) error { + sc, err := parseServiceConfig(js) + if err != nil { + return err + } + cc.mu.Lock() + cc.scRaw = js + cc.sc = sc + cc.mu.Unlock() + return nil +} + +// Close tears down the ClientConn and all underlying connections. +func (cc *ClientConn) Close() error { + cc.cancel() + + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + conns := cc.conns + cc.conns = nil + cc.csMgr.updateState(connectivity.Shutdown) + + rWrapper := cc.resolverWrapper + cc.resolverWrapper = nil + bWrapper := cc.balancerWrapper + cc.balancerWrapper = nil + cc.mu.Unlock() + cc.blockingpicker.close() + if rWrapper != nil { + rWrapper.close() + } + if bWrapper != nil { + bWrapper.close() + } + for ac := range conns { + ac.tearDown(ErrClientConnClosing) + } + return nil +} + +// addrConn is a network connection to a given address. +type addrConn struct { + ctx context.Context + cancel context.CancelFunc + + cc *ClientConn + curAddr resolver.Address + addrs []resolver.Address + dopts dialOptions + events trace.EventLog + acbw balancer.SubConn + + mu sync.Mutex + state connectivity.State + // ready is closed and becomes nil when a new transport is up or failed + // due to timeout. + ready chan struct{} + transport transport.ClientTransport + + // The reason this addrConn is torn down. + tearDownErr error +} + +// adjustParams updates parameters used to create transports upon +// receiving a GoAway. +func (ac *addrConn) adjustParams(r transport.GoAwayReason) { + switch r { + case transport.TooManyPings: + v := 2 * ac.dopts.copts.KeepaliveParams.Time + ac.cc.mu.Lock() + if v > ac.cc.mkp.Time { + ac.cc.mkp.Time = v + } + ac.cc.mu.Unlock() + } +} + +// printf records an event in ac's event log, unless ac has been closed. +// REQUIRES ac.mu is held. +func (ac *addrConn) printf(format string, a ...interface{}) { + if ac.events != nil { + ac.events.Printf(format, a...) + } +} + +// errorf records an error in ac's event log, unless ac has been closed. +// REQUIRES ac.mu is held. +func (ac *addrConn) errorf(format string, a ...interface{}) { + if ac.events != nil { + ac.events.Errorf(format, a...) + } +} + +// resetTransport recreates a transport to the address for ac. The old +// transport will close itself on error or when the clientconn is closed. +// +// TODO(bar) make sure all state transitions are valid. +func (ac *addrConn) resetTransport() error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + if ac.ready != nil { + close(ac.ready) + ac.ready = nil + } + ac.transport = nil + ac.curAddr = resolver.Address{} + ac.mu.Unlock() + ac.cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.cc.mu.RUnlock() + for retries := 0; ; retries++ { + sleepTime := ac.dopts.bs.backoff(retries) + timeout := minConnectTimeout + ac.mu.Lock() + if timeout < time.Duration(int(sleepTime)/len(ac.addrs)) { + timeout = time.Duration(int(sleepTime) / len(ac.addrs)) + } + connectTime := time.Now() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + ac.printf("connecting") + if ac.state != connectivity.Connecting { + ac.state = connectivity.Connecting + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + } + // copy ac.addrs in case of race + addrsIter := make([]resolver.Address, len(ac.addrs)) + copy(addrsIter, ac.addrs) + copts := ac.dopts.copts + ac.mu.Unlock() + for _, addr := range addrsIter { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() + return errConnClosing + } + ac.mu.Unlock() + sinfo := transport.TargetInfo{ + Addr: addr.Addr, + Metadata: addr.Metadata, + Authority: ac.cc.authority, + } + newTransport, err := transport.NewClientTransport(ac.cc.ctx, sinfo, copts, timeout) + if err != nil { + if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { + return err + } + grpclog.Warningf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %v", err, addr) + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() + return errConnClosing + } + ac.mu.Unlock() + continue + } + ac.mu.Lock() + ac.printf("ready") + if ac.state == connectivity.Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() + newTransport.Close() + return errConnClosing + } + ac.state = connectivity.Ready + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + t := ac.transport + ac.transport = newTransport + if t != nil { + t.Close() + } + ac.curAddr = addr + if ac.ready != nil { + close(ac.ready) + ac.ready = nil + } + ac.mu.Unlock() + return nil + } + ac.mu.Lock() + ac.state = connectivity.TransientFailure + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + if ac.ready != nil { + close(ac.ready) + ac.ready = nil + } + ac.mu.Unlock() + timer := time.NewTimer(sleepTime - time.Since(connectTime)) + select { + case <-timer.C: + case <-ac.ctx.Done(): + timer.Stop() + return ac.ctx.Err() + } + timer.Stop() + } +} + +// Run in a goroutine to track the error in transport and create the +// new transport if an error happens. It returns when the channel is closing. +func (ac *addrConn) transportMonitor() { + for { + ac.mu.Lock() + t := ac.transport + ac.mu.Unlock() + // Block until we receive a goaway or an error occurs. + select { + case <-t.GoAway(): + case <-t.Error(): + } + // If a GoAway happened, regardless of error, adjust our keepalive + // parameters as appropriate. + select { + case <-t.GoAway(): + ac.adjustParams(t.GetGoAwayReason()) + default: + } + ac.mu.Lock() + // Set connectivity state to TransientFailure before calling + // resetTransport. Transition READY->CONNECTING is not valid. + ac.state = connectivity.TransientFailure + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + ac.curAddr = resolver.Address{} + ac.mu.Unlock() + if err := ac.resetTransport(); err != nil { + ac.mu.Lock() + ac.printf("transport exiting: %v", err) + ac.mu.Unlock() + grpclog.Warningf("grpc: addrConn.transportMonitor exits due to: %v", err) + if err != errConnClosing { + // Keep this ac in cc.conns, to get the reason it's torn down. + ac.tearDown(err) + } + return + } + } +} + +// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or +// iv) transport is in connectivity.TransientFailure and there is a balancer/failfast is true. +func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) { + for { + ac.mu.Lock() + switch { + case ac.state == connectivity.Shutdown: + if failfast || !hasBalancer { + // RPC is failfast or balancer is nil. This RPC should fail with ac.tearDownErr. + err := ac.tearDownErr + ac.mu.Unlock() + return nil, err + } + ac.mu.Unlock() + return nil, errConnClosing + case ac.state == connectivity.Ready: + ct := ac.transport + ac.mu.Unlock() + return ct, nil + case ac.state == connectivity.TransientFailure: + if failfast || hasBalancer { + ac.mu.Unlock() + return nil, errConnUnavailable + } + } + ready := ac.ready + if ready == nil { + ready = make(chan struct{}) + ac.ready = ready + } + ac.mu.Unlock() + select { + case <-ctx.Done(): + return nil, toRPCErr(ctx.Err()) + // Wait until the new transport is ready or failed. + case <-ready: + } + } +} + +// getReadyTransport returns the transport if ac's state is READY. +// Otherwise it returns nil, false. +// If ac's state is IDLE, it will trigger ac to connect. +func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { + ac.mu.Lock() + if ac.state == connectivity.Ready { + t := ac.transport + ac.mu.Unlock() + return t, true + } + var idle bool + if ac.state == connectivity.Idle { + idle = true + } + ac.mu.Unlock() + // Trigger idle ac to connect. + if idle { + ac.connect() + } + return nil, false +} + +// tearDown starts to tear down the addrConn. +// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in +// some edge cases (e.g., the caller opens and closes many addrConn's in a +// tight loop. +// tearDown doesn't remove ac from ac.cc.conns. +func (ac *addrConn) tearDown(err error) { + ac.cancel() + ac.mu.Lock() + defer ac.mu.Unlock() + ac.curAddr = resolver.Address{} + if err == errConnDrain && ac.transport != nil { + // GracefulClose(...) may be executed multiple times when + // i) receiving multiple GoAway frames from the server; or + // ii) there are concurrent name resolver/Balancer triggered + // address removal and GoAway. + ac.transport.GracefulClose() + } + if ac.state == connectivity.Shutdown { + return + } + ac.state = connectivity.Shutdown + ac.tearDownErr = err + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + if ac.events != nil { + ac.events.Finish() + ac.events = nil + } + if ac.ready != nil { + close(ac.ready) + ac.ready = nil + } + return +} + +func (ac *addrConn) getState() connectivity.State { + ac.mu.Lock() + defer ac.mu.Unlock() + return ac.state +} diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go new file mode 100644 index 00000000000..b452a4ae8da --- /dev/null +++ b/vendor/google.golang.org/grpc/codec.go @@ -0,0 +1,102 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "math" + "sync" + + "github.com/golang/protobuf/proto" +) + +// Codec defines the interface gRPC uses to encode and decode messages. +// Note that implementations of this interface must be thread safe; +// a Codec's methods can be called from concurrent goroutines. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // String returns the name of the Codec implementation. The returned + // string will be used as part of content type in transmission. + String() string +} + +// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC. +type protoCodec struct { +} + +type cachedProtoBuffer struct { + lastMarshaledSize uint32 + proto.Buffer +} + +func capToMaxInt32(val int) uint32 { + if val > math.MaxInt32 { + return uint32(math.MaxInt32) + } + return uint32(val) +} + +func (p protoCodec) marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) { + protoMsg := v.(proto.Message) + newSlice := make([]byte, 0, cb.lastMarshaledSize) + + cb.SetBuf(newSlice) + cb.Reset() + if err := cb.Marshal(protoMsg); err != nil { + return nil, err + } + out := cb.Bytes() + cb.lastMarshaledSize = capToMaxInt32(len(out)) + return out, nil +} + +func (p protoCodec) Marshal(v interface{}) ([]byte, error) { + cb := protoBufferPool.Get().(*cachedProtoBuffer) + out, err := p.marshal(v, cb) + + // put back buffer and lose the ref to the slice + cb.SetBuf(nil) + protoBufferPool.Put(cb) + return out, err +} + +func (p protoCodec) Unmarshal(data []byte, v interface{}) error { + cb := protoBufferPool.Get().(*cachedProtoBuffer) + cb.SetBuf(data) + v.(proto.Message).Reset() + err := cb.Unmarshal(v.(proto.Message)) + cb.SetBuf(nil) + protoBufferPool.Put(cb) + return err +} + +func (protoCodec) String() string { + return "proto" +} + +var protoBufferPool = &sync.Pool{ + New: func() interface{} { + return &cachedProtoBuffer{ + Buffer: proto.Buffer{}, + lastMarshaledSize: 16, + } + }, +} diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh new file mode 100755 index 00000000000..4cdc6ba7c09 --- /dev/null +++ b/vendor/google.golang.org/grpc/codegen.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# This script serves as an example to demonstrate how to generate the gRPC-Go +# interface and the related messages from .proto file. +# +# It assumes the installation of i) Google proto buffer compiler at +# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen +# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have +# not, please install them first. +# +# We recommend running this script at $GOPATH/src. +# +# If this is not what you need, feel free to make your own scripts. Again, this +# script is for demonstration purpose. +# +proto=$1 +protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go new file mode 100644 index 00000000000..259837060ab --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=Code"; DO NOT EDIT. + +package codes + +import "fmt" + +const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated" + +var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192} + +func (i Code) String() string { + if i >= Code(len(_Code_index)-1) { + return fmt.Sprintf("Code(%d)", i) + } + return _Code_name[_Code_index[i]:_Code_index[i+1]] +} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go new file mode 100644 index 00000000000..21e7733a5fe --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -0,0 +1,144 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package codes defines the canonical error codes used by gRPC. It is +// consistent across various languages. +package codes // import "google.golang.org/grpc/codes" + +// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +type Code uint32 + +//go:generate stringer -type=Code + +const ( + // OK is returned on success. + OK Code = 0 + + // Canceled indicates the operation was canceled (typically by the caller). + Canceled Code = 1 + + // Unknown error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + Unknown Code = 2 + + // InvalidArgument indicates client specified an invalid argument. + // Note that this differs from FailedPrecondition. It indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + InvalidArgument Code = 3 + + // DeadlineExceeded means operation expired before completion. + // For operations that change the state of the system, this error may be + // returned even if the operation has completed successfully. For + // example, a successful response from a server could have been delayed + // long enough for the deadline to expire. + DeadlineExceeded Code = 4 + + // NotFound means some requested entity (e.g., file or directory) was + // not found. + NotFound Code = 5 + + // AlreadyExists means an attempt to create an entity failed because one + // already exists. + AlreadyExists Code = 6 + + // PermissionDenied indicates the caller does not have permission to + // execute the specified operation. It must not be used for rejections + // caused by exhausting some resource (use ResourceExhausted + // instead for those errors). It must not be + // used if the caller cannot be identified (use Unauthenticated + // instead for those errors). + PermissionDenied Code = 7 + + // Unauthenticated indicates the request does not have valid + // authentication credentials for the operation. + Unauthenticated Code = 16 + + // ResourceExhausted indicates some resource has been exhausted, perhaps + // a per-user quota, or perhaps the entire file system is out of space. + ResourceExhausted Code = 8 + + // FailedPrecondition indicates operation was rejected because the + // system is not in a state required for the operation's execution. + // For example, directory to be deleted may be non-empty, an rmdir + // operation is applied to a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FailedPrecondition, Aborted, and Unavailable: + // (a) Use Unavailable if the client can retry just the failing call. + // (b) Use Aborted if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FailedPrecondition if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FailedPrecondition + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FailedPrecondition if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + FailedPrecondition Code = 9 + + // Aborted indicates the operation was aborted, typically due to a + // concurrency issue like sequencer check failures, transaction aborts, + // etc. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + Aborted Code = 10 + + // OutOfRange means operation was attempted past the valid range. + // E.g., seeking or reading past end of file. + // + // Unlike InvalidArgument, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate InvalidArgument if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OutOfRange if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FailedPrecondition and + // OutOfRange. We recommend using OutOfRange (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OutOfRange error to detect when + // they are done. + OutOfRange Code = 11 + + // Unimplemented indicates operation is not implemented or not + // supported/enabled in this service. + Unimplemented Code = 12 + + // Internal errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + Internal Code = 13 + + // Unavailable indicates the service is currently unavailable. + // This is a most likely a transient condition and may be corrected + // by retrying with a backoff. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + Unavailable Code = 14 + + // DataLoss indicates unrecoverable data loss or corruption. + DataLoss Code = 15 +) diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go new file mode 100644 index 00000000000..568ef5dc68b --- /dev/null +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package connectivity defines connectivity semantics. +// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. +// All APIs in this package are experimental. +package connectivity + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" +) + +// State indicates the state of connectivity. +// It can be the state of a ClientConn or SubConn. +type State int + +func (s State) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + grpclog.Errorf("unknown connectivity state: %d", s) + return "Invalid-State" + } +} + +const ( + // Idle indicates the ClientConn is idle. + Idle State = iota + // Connecting indicates the ClienConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) + +// Reporter reports the connectivity states. +type Reporter interface { + // CurrentState returns the current state of the reporter. + CurrentState() State + // WaitForStateChange blocks until the reporter's state is different from the given state, + // and returns true. + // It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled). + WaitForStateChange(context.Context, State) bool +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go new file mode 100644 index 00000000000..90b6a61170d --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -0,0 +1,215 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials implements various credentials supported by gRPC library, +// which encapsulate all the state needed by a client to authenticate with a +// server and make various assertions, e.g., about the client's identity, role, +// or whether it is authorized to make a particular call. +package credentials // import "google.golang.org/grpc/credentials" + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io/ioutil" + "net" + "strings" + + "golang.org/x/net/context" +) + +// alpnProtoStr are the specified application level protocols for gRPC. +var alpnProtoStr = []string{"h2"} + +// PerRPCCredentials defines the common interface for the credentials which need to +// attach security information to every RPC (e.g., oauth2). +type PerRPCCredentials interface { + // GetRequestMetadata gets the current request metadata, refreshing + // tokens if required. This should be called by the transport layer on + // each request, and the data should be populated in headers or other + // context. uri is the URI of the entry point for the request. When + // supported by the underlying implementation, ctx can be used for + // timeout and cancellation. + // TODO(zhaoq): Define the set of the qualified keys instead of leaving + // it as an arbitrary string. + GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) + // RequireTransportSecurity indicates whether the credentials requires + // transport security. + RequireTransportSecurity() bool +} + +// ProtocolInfo provides information regarding the gRPC wire protocol version, +// security protocol, security protocol version in use, server name, etc. +type ProtocolInfo struct { + // ProtocolVersion is the gRPC wire protocol version. + ProtocolVersion string + // SecurityProtocol is the security protocol in use. + SecurityProtocol string + // SecurityVersion is the security protocol version. + SecurityVersion string + // ServerName is the user-configured server name. + ServerName string +} + +// AuthInfo defines the common interface for the auth information the users are interested in. +type AuthInfo interface { + AuthType() string +} + +// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC +// and the caller should not close rawConn. +var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") + +// TransportCredentials defines the common interface for all the live gRPC wire +// protocols and supported transport security protocols (e.g., TLS, SSL). +type TransportCredentials interface { + // ClientHandshake does the authentication handshake specified by the corresponding + // authentication protocol on rawConn for clients. It returns the authenticated + // connection and the corresponding auth information about the connection. + // Implementations must use the provided context to implement timely cancellation. + // gRPC will try to reconnect if the error returned is a temporary error + // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). + // If the returned error is a wrapper error, implementations should make sure that + // the error implements Temporary() to have the correct retry behaviors. + ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) + // ServerHandshake does the authentication handshake for servers. It returns + // the authenticated connection and the corresponding auth information about + // the connection. + ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) + // Info provides the ProtocolInfo of this TransportCredentials. + Info() ProtocolInfo + // Clone makes a copy of this TransportCredentials. + Clone() TransportCredentials + // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. + // gRPC internals also use it to override the virtual hosting name if it is set. + // It must be called before dialing. Currently, this is only used by grpclb. + OverrideServerName(string) error +} + +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState +} + +// AuthType returns the type of TLSInfo as a string. +func (t TLSInfo) AuthType() string { + return "tls" +} + +// tlsCreds is the credentials required for authenticating a connection using TLS. +type tlsCreds struct { + // TLS configuration + config *tls.Config +} + +func (c tlsCreds) Info() ProtocolInfo { + return ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + ServerName: c.config.ServerName, + } +} + +func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { + // use local cfg to avoid clobbering ServerName if using multiple endpoints + cfg := cloneTLSConfig(c.config) + if cfg.ServerName == "" { + colonPos := strings.LastIndex(authority, ":") + if colonPos == -1 { + colonPos = len(authority) + } + cfg.ServerName = authority[:colonPos] + } + conn := tls.Client(rawConn, cfg) + errChannel := make(chan error, 1) + go func() { + errChannel <- conn.Handshake() + }() + select { + case err := <-errChannel: + if err != nil { + return nil, nil, err + } + case <-ctx.Done(): + return nil, nil, ctx.Err() + } + return conn, TLSInfo{conn.ConnectionState()}, nil +} + +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { + conn := tls.Server(rawConn, c.config) + if err := conn.Handshake(); err != nil { + return nil, nil, err + } + return conn, TLSInfo{conn.ConnectionState()}, nil +} + +func (c *tlsCreds) Clone() TransportCredentials { + return NewTLS(c.config) +} + +func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { + c.config.ServerName = serverNameOverride + return nil +} + +// NewTLS uses c to construct a TransportCredentials based on TLS. +func NewTLS(c *tls.Config) TransportCredentials { + tc := &tlsCreds{cloneTLSConfig(c)} + tc.config.NextProtos = alpnProtoStr + return tc +} + +// NewClientTLSFromCert constructs TLS credentials from the input certificate for client. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header field) in requests. +func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) +} + +// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header field) in requests. +func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { + b, err := ioutil.ReadFile(certFile) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("credentials: failed to append certificates") + } + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil +} + +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. +func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { + return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) +} + +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key +// file for server. +func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go new file mode 100644 index 00000000000..60409aac0fb --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go @@ -0,0 +1,60 @@ +// +build go1.7 +// +build !go1.8 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + SessionTicketsDisabled: cfg.SessionTicketsDisabled, + SessionTicketKey: cfg.SessionTicketKey, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled, + Renegotiation: cfg.Renegotiation, + } +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go new file mode 100644 index 00000000000..93f0e1d8de2 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go @@ -0,0 +1,38 @@ +// +build go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go new file mode 100644 index 00000000000..d6bbcc9fdd9 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go @@ -0,0 +1,57 @@ +// +build !go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + SessionTicketsDisabled: cfg.SessionTicketsDisabled, + SessionTicketKey: cfg.SessionTicketKey, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go new file mode 100644 index 00000000000..187adbb117f --- /dev/null +++ b/vendor/google.golang.org/grpc/doc.go @@ -0,0 +1,24 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* +Package grpc implements an RPC system called gRPC. + +See grpc.io for more information about gRPC. +*/ +package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/go16.go b/vendor/google.golang.org/grpc/go16.go new file mode 100644 index 00000000000..f3dbf21700f --- /dev/null +++ b/vendor/google.golang.org/grpc/go16.go @@ -0,0 +1,98 @@ +// +build go1.6,!go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "io" + "net" + "net/http" + "os" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req.Cancel = ctx.Done() + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + if _, ok := status.FromError(err); ok { + return err + } + switch e := err.(type) { + case transport.StreamError: + return status.Error(e.Code, e.Desc) + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + case ErrClientConnClosing: + return status.Error(codes.FailedPrecondition, err.Error()) + } + } + return status.Error(codes.Unknown, err.Error()) +} + +// convertCode converts a standard Go error into its canonical code. Note that +// this is only used to translate the error returned by the server applications. +func convertCode(err error) codes.Code { + switch err { + case nil: + return codes.OK + case io.EOF: + return codes.OutOfRange + case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: + return codes.FailedPrecondition + case os.ErrInvalid: + return codes.InvalidArgument + case context.Canceled: + return codes.Canceled + case context.DeadlineExceeded: + return codes.DeadlineExceeded + } + switch { + case os.IsExist(err): + return codes.AlreadyExists + case os.IsNotExist(err): + return codes.NotFound + case os.IsPermission(err): + return codes.PermissionDenied + } + return codes.Unknown +} diff --git a/vendor/google.golang.org/grpc/go17.go b/vendor/google.golang.org/grpc/go17.go new file mode 100644 index 00000000000..de23098eb9a --- /dev/null +++ b/vendor/google.golang.org/grpc/go17.go @@ -0,0 +1,99 @@ +// +build go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "io" + "net" + "net/http" + "os" + + netctx "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, network, address) +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + if _, ok := status.FromError(err); ok { + return err + } + switch e := err.(type) { + case transport.StreamError: + return status.Error(e.Code, e.Desc) + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + switch err { + case context.DeadlineExceeded, netctx.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled, netctx.Canceled: + return status.Error(codes.Canceled, err.Error()) + case ErrClientConnClosing: + return status.Error(codes.FailedPrecondition, err.Error()) + } + } + return status.Error(codes.Unknown, err.Error()) +} + +// convertCode converts a standard Go error into its canonical code. Note that +// this is only used to translate the error returned by the server applications. +func convertCode(err error) codes.Code { + switch err { + case nil: + return codes.OK + case io.EOF: + return codes.OutOfRange + case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: + return codes.FailedPrecondition + case os.ErrInvalid: + return codes.InvalidArgument + case context.Canceled, netctx.Canceled: + return codes.Canceled + case context.DeadlineExceeded, netctx.DeadlineExceeded: + return codes.DeadlineExceeded + } + switch { + case os.IsExist(err): + return codes.AlreadyExists + case os.IsNotExist(err): + return codes.NotFound + case os.IsPermission(err): + return codes.PermissionDenied + } + return codes.Unknown +} diff --git a/vendor/google.golang.org/grpc/grpclb.go b/vendor/google.golang.org/grpc/grpclb.go new file mode 100644 index 00000000000..db56ff36217 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclb.go @@ -0,0 +1,704 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "errors" + "fmt" + "math/rand" + "net" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + lbmpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/naming" +) + +// Client API for LoadBalancer service. +// Mostly copied from generated pb.go file. +// To avoid circular dependency. +type loadBalancerClient struct { + cc *ClientConn +} + +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...CallOption) (*balanceLoadClientStream, error) { + desc := &StreamDesc{ + StreamName: "BalanceLoad", + ServerStreams: true, + ClientStreams: true, + } + stream, err := NewClientStream(ctx, desc, c.cc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) + if err != nil { + return nil, err + } + x := &balanceLoadClientStream{stream} + return x, nil +} + +type balanceLoadClientStream struct { + ClientStream +} + +func (x *balanceLoadClientStream) Send(m *lbmpb.LoadBalanceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *balanceLoadClientStream) Recv() (*lbmpb.LoadBalanceResponse, error) { + m := new(lbmpb.LoadBalanceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// NewGRPCLBBalancer creates a grpclb load balancer. +func NewGRPCLBBalancer(r naming.Resolver) Balancer { + return &grpclbBalancer{ + r: r, + } +} + +type remoteBalancerInfo struct { + addr string + // the server name used for authentication with the remote LB server. + name string +} + +// grpclbAddrInfo consists of the information of a backend server. +type grpclbAddrInfo struct { + addr Address + connected bool + // dropForRateLimiting indicates whether this particular request should be + // dropped by the client for rate limiting. + dropForRateLimiting bool + // dropForLoadBalancing indicates whether this particular request should be + // dropped by the client for load balancing. + dropForLoadBalancing bool +} + +type grpclbBalancer struct { + r naming.Resolver + target string + mu sync.Mutex + seq int // a sequence number to make sure addrCh does not get stale addresses. + w naming.Watcher + addrCh chan []Address + rbs []remoteBalancerInfo + addrs []*grpclbAddrInfo + next int + waitCh chan struct{} + done bool + rand *rand.Rand + + clientStats lbmpb.ClientStats +} + +func (b *grpclbBalancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error { + updates, err := w.Next() + if err != nil { + grpclog.Warningf("grpclb: failed to get next addr update from watcher: %v", err) + return err + } + b.mu.Lock() + defer b.mu.Unlock() + if b.done { + return ErrClientConnClosing + } + for _, update := range updates { + switch update.Op { + case naming.Add: + var exist bool + for _, v := range b.rbs { + // TODO: Is the same addr with different server name a different balancer? + if update.Addr == v.addr { + exist = true + break + } + } + if exist { + continue + } + md, ok := update.Metadata.(*naming.AddrMetadataGRPCLB) + if !ok { + // TODO: Revisit the handling here and may introduce some fallback mechanism. + grpclog.Errorf("The name resolution contains unexpected metadata %v", update.Metadata) + continue + } + switch md.AddrType { + case naming.Backend: + // TODO: Revisit the handling here and may introduce some fallback mechanism. + grpclog.Errorf("The name resolution does not give grpclb addresses") + continue + case naming.GRPCLB: + b.rbs = append(b.rbs, remoteBalancerInfo{ + addr: update.Addr, + name: md.ServerName, + }) + default: + grpclog.Errorf("Received unknow address type %d", md.AddrType) + continue + } + case naming.Delete: + for i, v := range b.rbs { + if update.Addr == v.addr { + copy(b.rbs[i:], b.rbs[i+1:]) + b.rbs = b.rbs[:len(b.rbs)-1] + break + } + } + default: + grpclog.Errorf("Unknown update.Op %v", update.Op) + } + } + // TODO: Fall back to the basic round-robin load balancing if the resulting address is + // not a load balancer. + select { + case <-ch: + default: + } + ch <- b.rbs + return nil +} + +func convertDuration(d *lbmpb.Duration) time.Duration { + if d == nil { + return 0 + } + return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond +} + +func (b *grpclbBalancer) processServerList(l *lbmpb.ServerList, seq int) { + if l == nil { + return + } + servers := l.GetServers() + var ( + sl []*grpclbAddrInfo + addrs []Address + ) + for _, s := range servers { + md := metadata.Pairs("lb-token", s.LoadBalanceToken) + ip := net.IP(s.IpAddress) + ipStr := ip.String() + if ip.To4() == nil { + // Add square brackets to ipv6 addresses, otherwise net.Dial() and + // net.SplitHostPort() will return too many colons error. + ipStr = fmt.Sprintf("[%s]", ipStr) + } + addr := Address{ + Addr: fmt.Sprintf("%s:%d", ipStr, s.Port), + Metadata: &md, + } + sl = append(sl, &grpclbAddrInfo{ + addr: addr, + dropForRateLimiting: s.DropForRateLimiting, + dropForLoadBalancing: s.DropForLoadBalancing, + }) + addrs = append(addrs, addr) + } + b.mu.Lock() + defer b.mu.Unlock() + if b.done || seq < b.seq { + return + } + if len(sl) > 0 { + // reset b.next to 0 when replacing the server list. + b.next = 0 + b.addrs = sl + b.addrCh <- addrs + } + return +} + +func (b *grpclbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + case <-done: + return + } + b.mu.Lock() + stats := b.clientStats + b.clientStats = lbmpb.ClientStats{} // Clear the stats. + b.mu.Unlock() + t := time.Now() + stats.Timestamp = &lbmpb.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := s.Send(&lbmpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_ClientStats{ + ClientStats: &stats, + }, + }); err != nil { + grpclog.Errorf("grpclb: failed to send load report: %v", err) + return + } + } +} + +func (b *grpclbBalancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := lbc.BalanceLoad(ctx) + if err != nil { + grpclog.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err) + return + } + b.mu.Lock() + if b.done { + b.mu.Unlock() + return + } + b.mu.Unlock() + initReq := &lbmpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_InitialRequest{ + InitialRequest: &lbmpb.InitialLoadBalanceRequest{ + Name: b.target, + }, + }, + } + if err := stream.Send(initReq); err != nil { + grpclog.Errorf("grpclb: failed to send init request: %v", err) + // TODO: backoff on retry? + return true + } + reply, err := stream.Recv() + if err != nil { + grpclog.Errorf("grpclb: failed to recv init response: %v", err) + // TODO: backoff on retry? + return true + } + initResp := reply.GetInitialResponse() + if initResp == nil { + grpclog.Errorf("grpclb: reply from remote balancer did not include initial response.") + return + } + // TODO: Support delegation. + if initResp.LoadBalancerDelegate != "" { + // delegation + grpclog.Errorf("TODO: Delegation is not supported yet.") + return + } + streamDone := make(chan struct{}) + defer close(streamDone) + b.mu.Lock() + b.clientStats = lbmpb.ClientStats{} // Clear client stats. + b.mu.Unlock() + if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 { + go b.sendLoadReport(stream, d, streamDone) + } + // Retrieve the server list. + for { + reply, err := stream.Recv() + if err != nil { + grpclog.Errorf("grpclb: failed to recv server list: %v", err) + break + } + b.mu.Lock() + if b.done || seq < b.seq { + b.mu.Unlock() + return + } + b.seq++ // tick when receiving a new list of servers. + seq = b.seq + b.mu.Unlock() + if serverList := reply.GetServerList(); serverList != nil { + b.processServerList(serverList, seq) + } + } + return true +} + +func (b *grpclbBalancer) Start(target string, config BalancerConfig) error { + b.rand = rand.New(rand.NewSource(time.Now().Unix())) + // TODO: Fall back to the basic direct connection if there is no name resolver. + if b.r == nil { + return errors.New("there is no name resolver installed") + } + b.target = target + b.mu.Lock() + if b.done { + b.mu.Unlock() + return ErrClientConnClosing + } + b.addrCh = make(chan []Address) + w, err := b.r.Resolve(target) + if err != nil { + b.mu.Unlock() + grpclog.Errorf("grpclb: failed to resolve address: %v, err: %v", target, err) + return err + } + b.w = w + b.mu.Unlock() + balancerAddrsCh := make(chan []remoteBalancerInfo, 1) + // Spawn a goroutine to monitor the name resolution of remote load balancer. + go func() { + for { + if err := b.watchAddrUpdates(w, balancerAddrsCh); err != nil { + grpclog.Warningf("grpclb: the naming watcher stops working due to %v.\n", err) + close(balancerAddrsCh) + return + } + } + }() + // Spawn a goroutine to talk to the remote load balancer. + go func() { + var ( + cc *ClientConn + // ccError is closed when there is an error in the current cc. + // A new rb should be picked from rbs and connected. + ccError chan struct{} + rb *remoteBalancerInfo + rbs []remoteBalancerInfo + rbIdx int + ) + + defer func() { + if ccError != nil { + select { + case <-ccError: + default: + close(ccError) + } + } + if cc != nil { + cc.Close() + } + }() + + for { + var ok bool + select { + case rbs, ok = <-balancerAddrsCh: + if !ok { + return + } + foundIdx := -1 + if rb != nil { + for i, trb := range rbs { + if trb == *rb { + foundIdx = i + break + } + } + } + if foundIdx >= 0 { + if foundIdx >= 1 { + // Move the address in use to the beginning of the list. + b.rbs[0], b.rbs[foundIdx] = b.rbs[foundIdx], b.rbs[0] + rbIdx = 0 + } + continue // If found, don't dial new cc. + } else if len(rbs) > 0 { + // Pick a random one from the list, instead of always using the first one. + if l := len(rbs); l > 1 && rb != nil { + tmpIdx := b.rand.Intn(l - 1) + b.rbs[0], b.rbs[tmpIdx] = b.rbs[tmpIdx], b.rbs[0] + } + rbIdx = 0 + rb = &rbs[0] + } else { + // foundIdx < 0 && len(rbs) <= 0. + rb = nil + } + case <-ccError: + ccError = nil + if rbIdx < len(rbs)-1 { + rbIdx++ + rb = &rbs[rbIdx] + } else { + rb = nil + } + } + + if rb == nil { + continue + } + + if cc != nil { + cc.Close() + } + // Talk to the remote load balancer to get the server list. + var ( + err error + dopts []DialOption + ) + if creds := config.DialCreds; creds != nil { + if rb.name != "" { + if err := creds.OverrideServerName(rb.name); err != nil { + grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v", err) + continue + } + } + dopts = append(dopts, WithTransportCredentials(creds)) + } else { + dopts = append(dopts, WithInsecure()) + } + if dialer := config.Dialer; dialer != nil { + // WithDialer takes a different type of function, so we instead use a special DialOption here. + dopts = append(dopts, func(o *dialOptions) { o.copts.Dialer = dialer }) + } + dopts = append(dopts, WithBlock()) + ccError = make(chan struct{}) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + cc, err = DialContext(ctx, rb.addr, dopts...) + cancel() + if err != nil { + grpclog.Warningf("grpclb: failed to setup a connection to the remote balancer %v: %v", rb.addr, err) + close(ccError) + continue + } + b.mu.Lock() + b.seq++ // tick when getting a new balancer address + seq := b.seq + b.next = 0 + b.mu.Unlock() + go func(cc *ClientConn, ccError chan struct{}) { + lbc := &loadBalancerClient{cc} + b.callRemoteBalancer(lbc, seq) + cc.Close() + select { + case <-ccError: + default: + close(ccError) + } + }(cc, ccError) + } + }() + return nil +} + +func (b *grpclbBalancer) down(addr Address, err error) { + b.mu.Lock() + defer b.mu.Unlock() + for _, a := range b.addrs { + if addr == a.addr { + a.connected = false + break + } + } +} + +func (b *grpclbBalancer) Up(addr Address) func(error) { + b.mu.Lock() + defer b.mu.Unlock() + if b.done { + return nil + } + var cnt int + for _, a := range b.addrs { + if a.addr == addr { + if a.connected { + return nil + } + a.connected = true + } + if a.connected && !a.dropForRateLimiting && !a.dropForLoadBalancing { + cnt++ + } + } + // addr is the only one which is connected. Notify the Get() callers who are blocking. + if cnt == 1 && b.waitCh != nil { + close(b.waitCh) + b.waitCh = nil + } + return func(err error) { + b.down(addr, err) + } +} + +func (b *grpclbBalancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { + var ch chan struct{} + b.mu.Lock() + if b.done { + b.mu.Unlock() + err = ErrClientConnClosing + return + } + seq := b.seq + + defer func() { + if err != nil { + return + } + put = func() { + s, ok := rpcInfoFromContext(ctx) + if !ok { + return + } + b.mu.Lock() + defer b.mu.Unlock() + if b.done || seq < b.seq { + return + } + b.clientStats.NumCallsFinished++ + if !s.bytesSent { + b.clientStats.NumCallsFinishedWithClientFailedToSend++ + } else if s.bytesReceived { + b.clientStats.NumCallsFinishedKnownReceived++ + } + } + }() + + b.clientStats.NumCallsStarted++ + if len(b.addrs) > 0 { + if b.next >= len(b.addrs) { + b.next = 0 + } + next := b.next + for { + a := b.addrs[next] + next = (next + 1) % len(b.addrs) + if a.connected { + if !a.dropForRateLimiting && !a.dropForLoadBalancing { + addr = a.addr + b.next = next + b.mu.Unlock() + return + } + if !opts.BlockingWait { + b.next = next + if a.dropForLoadBalancing { + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithDropForLoadBalancing++ + } else if a.dropForRateLimiting { + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithDropForRateLimiting++ + } + b.mu.Unlock() + err = Errorf(codes.Unavailable, "%s drops requests", a.addr.Addr) + return + } + } + if next == b.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + if !opts.BlockingWait { + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithClientFailedToSend++ + b.mu.Unlock() + err = Errorf(codes.Unavailable, "there is no address available") + return + } + // Wait on b.waitCh for non-failfast RPCs. + if b.waitCh == nil { + ch = make(chan struct{}) + b.waitCh = ch + } else { + ch = b.waitCh + } + b.mu.Unlock() + for { + select { + case <-ctx.Done(): + b.mu.Lock() + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithClientFailedToSend++ + b.mu.Unlock() + err = ctx.Err() + return + case <-ch: + b.mu.Lock() + if b.done { + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithClientFailedToSend++ + b.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(b.addrs) > 0 { + if b.next >= len(b.addrs) { + b.next = 0 + } + next := b.next + for { + a := b.addrs[next] + next = (next + 1) % len(b.addrs) + if a.connected { + if !a.dropForRateLimiting && !a.dropForLoadBalancing { + addr = a.addr + b.next = next + b.mu.Unlock() + return + } + if !opts.BlockingWait { + b.next = next + if a.dropForLoadBalancing { + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithDropForLoadBalancing++ + } else if a.dropForRateLimiting { + b.clientStats.NumCallsFinished++ + b.clientStats.NumCallsFinishedWithDropForRateLimiting++ + } + b.mu.Unlock() + err = Errorf(codes.Unavailable, "drop requests for the addreess %s", a.addr.Addr) + return + } + } + if next == b.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + // The newly added addr got removed by Down() again. + if b.waitCh == nil { + ch = make(chan struct{}) + b.waitCh = ch + } else { + ch = b.waitCh + } + b.mu.Unlock() + } + } +} + +func (b *grpclbBalancer) Notify() <-chan []Address { + return b.addrCh +} + +func (b *grpclbBalancer) Close() error { + b.mu.Lock() + defer b.mu.Unlock() + if b.done { + return errBalancerClosed + } + b.done = true + if b.waitCh != nil { + close(b.waitCh) + } + if b.addrCh != nil { + close(b.addrCh) + } + if b.w != nil { + b.w.Close() + } + return nil +} diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go new file mode 100644 index 00000000000..f4a27125a4f --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go @@ -0,0 +1,615 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_lb_v1/messages/messages.proto + +/* +Package messages is a generated protocol buffer package. + +It is generated from these files: + grpc_lb_v1/messages/messages.proto + +It has these top-level messages: + Duration + Timestamp + LoadBalanceRequest + InitialLoadBalanceRequest + ClientStats + LoadBalanceResponse + InitialLoadBalanceResponse + ServerList + Server +*/ +package messages + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +type LoadBalanceRequest struct { + // Types that are valid to be assigned to LoadBalanceRequestType: + // *LoadBalanceRequest_InitialRequest + // *LoadBalanceRequest_ClientStats + LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"` +} + +func (m *LoadBalanceRequest) Reset() { *m = LoadBalanceRequest{} } +func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) } +func (*LoadBalanceRequest) ProtoMessage() {} +func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type isLoadBalanceRequest_LoadBalanceRequestType interface { + isLoadBalanceRequest_LoadBalanceRequestType() +} + +type LoadBalanceRequest_InitialRequest struct { + InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,oneof"` +} +type LoadBalanceRequest_ClientStats struct { + ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,oneof"` +} + +func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {} +func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType() {} + +func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType { + if m != nil { + return m.LoadBalanceRequestType + } + return nil +} + +func (m *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest { + if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok { + return x.InitialRequest + } + return nil +} + +func (m *LoadBalanceRequest) GetClientStats() *ClientStats { + if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok { + return x.ClientStats + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*LoadBalanceRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _LoadBalanceRequest_OneofMarshaler, _LoadBalanceRequest_OneofUnmarshaler, _LoadBalanceRequest_OneofSizer, []interface{}{ + (*LoadBalanceRequest_InitialRequest)(nil), + (*LoadBalanceRequest_ClientStats)(nil), + } +} + +func _LoadBalanceRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*LoadBalanceRequest) + // load_balance_request_type + switch x := m.LoadBalanceRequestType.(type) { + case *LoadBalanceRequest_InitialRequest: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InitialRequest); err != nil { + return err + } + case *LoadBalanceRequest_ClientStats: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ClientStats); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("LoadBalanceRequest.LoadBalanceRequestType has unexpected type %T", x) + } + return nil +} + +func _LoadBalanceRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*LoadBalanceRequest) + switch tag { + case 1: // load_balance_request_type.initial_request + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(InitialLoadBalanceRequest) + err := b.DecodeMessage(msg) + m.LoadBalanceRequestType = &LoadBalanceRequest_InitialRequest{msg} + return true, err + case 2: // load_balance_request_type.client_stats + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ClientStats) + err := b.DecodeMessage(msg) + m.LoadBalanceRequestType = &LoadBalanceRequest_ClientStats{msg} + return true, err + default: + return false, nil + } +} + +func _LoadBalanceRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*LoadBalanceRequest) + // load_balance_request_type + switch x := m.LoadBalanceRequestType.(type) { + case *LoadBalanceRequest_InitialRequest: + s := proto.Size(x.InitialRequest) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *LoadBalanceRequest_ClientStats: + s := proto.Size(x.ClientStats) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type InitialLoadBalanceRequest struct { + // Name of load balanced service (IE, balancer.service.com) + // length should be less than 256 bytes. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *InitialLoadBalanceRequest) Reset() { *m = InitialLoadBalanceRequest{} } +func (m *InitialLoadBalanceRequest) String() string { return proto.CompactTextString(m) } +func (*InitialLoadBalanceRequest) ProtoMessage() {} +func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *InitialLoadBalanceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Contains client level statistics that are useful to load balancing. Each +// count except the timestamp should be reset to zero after reporting the stats. +type ClientStats struct { + // The timestamp of generating the report. + Timestamp *Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"` + // The total number of RPCs that started. + NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted" json:"num_calls_started,omitempty"` + // The total number of RPCs that finished. + NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished" json:"num_calls_finished,omitempty"` + // The total number of RPCs that were dropped by the client because of rate + // limiting. + NumCallsFinishedWithDropForRateLimiting int64 `protobuf:"varint,4,opt,name=num_calls_finished_with_drop_for_rate_limiting,json=numCallsFinishedWithDropForRateLimiting" json:"num_calls_finished_with_drop_for_rate_limiting,omitempty"` + // The total number of RPCs that were dropped by the client because of load + // balancing. + NumCallsFinishedWithDropForLoadBalancing int64 `protobuf:"varint,5,opt,name=num_calls_finished_with_drop_for_load_balancing,json=numCallsFinishedWithDropForLoadBalancing" json:"num_calls_finished_with_drop_for_load_balancing,omitempty"` + // The total number of RPCs that failed to reach a server except dropped RPCs. + NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend" json:"num_calls_finished_with_client_failed_to_send,omitempty"` + // The total number of RPCs that finished and are known to have been received + // by a server. + NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived" json:"num_calls_finished_known_received,omitempty"` +} + +func (m *ClientStats) Reset() { *m = ClientStats{} } +func (m *ClientStats) String() string { return proto.CompactTextString(m) } +func (*ClientStats) ProtoMessage() {} +func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *ClientStats) GetTimestamp() *Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *ClientStats) GetNumCallsStarted() int64 { + if m != nil { + return m.NumCallsStarted + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinished() int64 { + if m != nil { + return m.NumCallsFinished + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinishedWithDropForRateLimiting() int64 { + if m != nil { + return m.NumCallsFinishedWithDropForRateLimiting + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinishedWithDropForLoadBalancing() int64 { + if m != nil { + return m.NumCallsFinishedWithDropForLoadBalancing + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 { + if m != nil { + return m.NumCallsFinishedWithClientFailedToSend + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinishedKnownReceived() int64 { + if m != nil { + return m.NumCallsFinishedKnownReceived + } + return 0 +} + +type LoadBalanceResponse struct { + // Types that are valid to be assigned to LoadBalanceResponseType: + // *LoadBalanceResponse_InitialResponse + // *LoadBalanceResponse_ServerList + LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"` +} + +func (m *LoadBalanceResponse) Reset() { *m = LoadBalanceResponse{} } +func (m *LoadBalanceResponse) String() string { return proto.CompactTextString(m) } +func (*LoadBalanceResponse) ProtoMessage() {} +func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +type isLoadBalanceResponse_LoadBalanceResponseType interface { + isLoadBalanceResponse_LoadBalanceResponseType() +} + +type LoadBalanceResponse_InitialResponse struct { + InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,oneof"` +} +type LoadBalanceResponse_ServerList struct { + ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,oneof"` +} + +func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {} +func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType() {} + +func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType { + if m != nil { + return m.LoadBalanceResponseType + } + return nil +} + +func (m *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse { + if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok { + return x.InitialResponse + } + return nil +} + +func (m *LoadBalanceResponse) GetServerList() *ServerList { + if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok { + return x.ServerList + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*LoadBalanceResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _LoadBalanceResponse_OneofMarshaler, _LoadBalanceResponse_OneofUnmarshaler, _LoadBalanceResponse_OneofSizer, []interface{}{ + (*LoadBalanceResponse_InitialResponse)(nil), + (*LoadBalanceResponse_ServerList)(nil), + } +} + +func _LoadBalanceResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*LoadBalanceResponse) + // load_balance_response_type + switch x := m.LoadBalanceResponseType.(type) { + case *LoadBalanceResponse_InitialResponse: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InitialResponse); err != nil { + return err + } + case *LoadBalanceResponse_ServerList: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ServerList); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("LoadBalanceResponse.LoadBalanceResponseType has unexpected type %T", x) + } + return nil +} + +func _LoadBalanceResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*LoadBalanceResponse) + switch tag { + case 1: // load_balance_response_type.initial_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(InitialLoadBalanceResponse) + err := b.DecodeMessage(msg) + m.LoadBalanceResponseType = &LoadBalanceResponse_InitialResponse{msg} + return true, err + case 2: // load_balance_response_type.server_list + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ServerList) + err := b.DecodeMessage(msg) + m.LoadBalanceResponseType = &LoadBalanceResponse_ServerList{msg} + return true, err + default: + return false, nil + } +} + +func _LoadBalanceResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*LoadBalanceResponse) + // load_balance_response_type + switch x := m.LoadBalanceResponseType.(type) { + case *LoadBalanceResponse_InitialResponse: + s := proto.Size(x.InitialResponse) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *LoadBalanceResponse_ServerList: + s := proto.Size(x.ServerList) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type InitialLoadBalanceResponse struct { + // This is an application layer redirect that indicates the client should use + // the specified server for load balancing. When this field is non-empty in + // the response, the client should open a separate connection to the + // load_balancer_delegate and call the BalanceLoad method. Its length should + // be less than 64 bytes. + LoadBalancerDelegate string `protobuf:"bytes,1,opt,name=load_balancer_delegate,json=loadBalancerDelegate" json:"load_balancer_delegate,omitempty"` + // This interval defines how often the client should send the client stats + // to the load balancer. Stats should only be reported when the duration is + // positive. + ClientStatsReportInterval *Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval" json:"client_stats_report_interval,omitempty"` +} + +func (m *InitialLoadBalanceResponse) Reset() { *m = InitialLoadBalanceResponse{} } +func (m *InitialLoadBalanceResponse) String() string { return proto.CompactTextString(m) } +func (*InitialLoadBalanceResponse) ProtoMessage() {} +func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string { + if m != nil { + return m.LoadBalancerDelegate + } + return "" +} + +func (m *InitialLoadBalanceResponse) GetClientStatsReportInterval() *Duration { + if m != nil { + return m.ClientStatsReportInterval + } + return nil +} + +type ServerList struct { + // Contains a list of servers selected by the load balancer. The list will + // be updated when server resolutions change or as needed to balance load + // across more servers. The client should consume the server list in order + // unless instructed otherwise via the client_config. + Servers []*Server `protobuf:"bytes,1,rep,name=servers" json:"servers,omitempty"` +} + +func (m *ServerList) Reset() { *m = ServerList{} } +func (m *ServerList) String() string { return proto.CompactTextString(m) } +func (*ServerList) ProtoMessage() {} +func (*ServerList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *ServerList) GetServers() []*Server { + if m != nil { + return m.Servers + } + return nil +} + +// Contains server information. When none of the [drop_for_*] fields are true, +// use the other fields. When drop_for_rate_limiting is true, ignore all other +// fields. Use drop_for_load_balancing only when it is true and +// drop_for_rate_limiting is false. +type Server struct { + // A resolved address for the server, serialized in network-byte-order. It may + // either be an IPv4 or IPv6 address. + IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + // A resolved port number for the server. + Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` + // An opaque but printable token given to the frontend for each pick. All + // frontend requests for that pick must include the token in its initial + // metadata. The token is used by the backend to verify the request and to + // allow the backend to report load to the gRPC LB system. + // + // Its length is variable but less than 50 bytes. + LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken" json:"load_balance_token,omitempty"` + // Indicates whether this particular request should be dropped by the client + // for rate limiting. + DropForRateLimiting bool `protobuf:"varint,4,opt,name=drop_for_rate_limiting,json=dropForRateLimiting" json:"drop_for_rate_limiting,omitempty"` + // Indicates whether this particular request should be dropped by the client + // for load balancing. + DropForLoadBalancing bool `protobuf:"varint,5,opt,name=drop_for_load_balancing,json=dropForLoadBalancing" json:"drop_for_load_balancing,omitempty"` +} + +func (m *Server) Reset() { *m = Server{} } +func (m *Server) String() string { return proto.CompactTextString(m) } +func (*Server) ProtoMessage() {} +func (*Server) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *Server) GetIpAddress() []byte { + if m != nil { + return m.IpAddress + } + return nil +} + +func (m *Server) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *Server) GetLoadBalanceToken() string { + if m != nil { + return m.LoadBalanceToken + } + return "" +} + +func (m *Server) GetDropForRateLimiting() bool { + if m != nil { + return m.DropForRateLimiting + } + return false +} + +func (m *Server) GetDropForLoadBalancing() bool { + if m != nil { + return m.DropForLoadBalancing + } + return false +} + +func init() { + proto.RegisterType((*Duration)(nil), "grpc.lb.v1.Duration") + proto.RegisterType((*Timestamp)(nil), "grpc.lb.v1.Timestamp") + proto.RegisterType((*LoadBalanceRequest)(nil), "grpc.lb.v1.LoadBalanceRequest") + proto.RegisterType((*InitialLoadBalanceRequest)(nil), "grpc.lb.v1.InitialLoadBalanceRequest") + proto.RegisterType((*ClientStats)(nil), "grpc.lb.v1.ClientStats") + proto.RegisterType((*LoadBalanceResponse)(nil), "grpc.lb.v1.LoadBalanceResponse") + proto.RegisterType((*InitialLoadBalanceResponse)(nil), "grpc.lb.v1.InitialLoadBalanceResponse") + proto.RegisterType((*ServerList)(nil), "grpc.lb.v1.ServerList") + proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server") +} + +func init() { proto.RegisterFile("grpc_lb_v1/messages/messages.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 709 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x3b, + 0x10, 0x26, 0x27, 0x01, 0x92, 0x09, 0x3a, 0xe4, 0x98, 0x1c, 0x08, 0x14, 0x24, 0xba, 0x52, 0x69, + 0x54, 0xd1, 0x20, 0xa0, 0xbd, 0xe8, 0xcf, 0x45, 0x1b, 0x10, 0x0a, 0x2d, 0x17, 0x95, 0x43, 0x55, + 0xa9, 0x52, 0x65, 0x39, 0xd9, 0x21, 0x58, 0x6c, 0xec, 0xad, 0xed, 0x04, 0xf5, 0x11, 0xfa, 0x28, + 0x7d, 0x8c, 0xaa, 0xcf, 0xd0, 0xf7, 0xa9, 0xd6, 0xbb, 0x9b, 0x5d, 0x20, 0x80, 0x7a, 0x67, 0x8f, + 0xbf, 0xf9, 0xbe, 0xf1, 0xac, 0xbf, 0x59, 0xf0, 0x06, 0x3a, 0xec, 0xb3, 0xa0, 0xc7, 0xc6, 0xbb, + 0x3b, 0x43, 0x34, 0x86, 0x0f, 0xd0, 0x4c, 0x16, 0xad, 0x50, 0x2b, 0xab, 0x08, 0x44, 0x98, 0x56, + 0xd0, 0x6b, 0x8d, 0x77, 0xbd, 0x97, 0x50, 0x3e, 0x1c, 0x69, 0x6e, 0x85, 0x92, 0xa4, 0x01, 0xf3, + 0x06, 0xfb, 0x4a, 0xfa, 0xa6, 0x51, 0xd8, 0x2c, 0x34, 0x8b, 0x34, 0xdd, 0x92, 0x3a, 0xcc, 0x4a, + 0x2e, 0x95, 0x69, 0xfc, 0xb3, 0x59, 0x68, 0xce, 0xd2, 0x78, 0xe3, 0xbd, 0x82, 0xca, 0xa9, 0x18, + 0xa2, 0xb1, 0x7c, 0x18, 0xfe, 0x75, 0xf2, 0xcf, 0x02, 0x90, 0x13, 0xc5, 0xfd, 0x36, 0x0f, 0xb8, + 0xec, 0x23, 0xc5, 0xaf, 0x23, 0x34, 0x96, 0x7c, 0x80, 0x45, 0x21, 0x85, 0x15, 0x3c, 0x60, 0x3a, + 0x0e, 0x39, 0xba, 0xea, 0xde, 0xa3, 0x56, 0x56, 0x75, 0xeb, 0x38, 0x86, 0xdc, 0xcc, 0xef, 0xcc, + 0xd0, 0x7f, 0x93, 0xfc, 0x94, 0xf1, 0x35, 0x2c, 0xf4, 0x03, 0x81, 0xd2, 0x32, 0x63, 0xb9, 0x8d, + 0xab, 0xa8, 0xee, 0xad, 0xe4, 0xe9, 0x0e, 0xdc, 0x79, 0x37, 0x3a, 0xee, 0xcc, 0xd0, 0x6a, 0x3f, + 0xdb, 0xb6, 0x1f, 0xc0, 0x6a, 0xa0, 0xb8, 0xcf, 0x7a, 0xb1, 0x4c, 0x5a, 0x14, 0xb3, 0xdf, 0x42, + 0xf4, 0x76, 0x60, 0xf5, 0xd6, 0x4a, 0x08, 0x81, 0x92, 0xe4, 0x43, 0x74, 0xe5, 0x57, 0xa8, 0x5b, + 0x7b, 0xdf, 0x4b, 0x50, 0xcd, 0x89, 0x91, 0x7d, 0xa8, 0xd8, 0xb4, 0x83, 0xc9, 0x3d, 0xff, 0xcf, + 0x17, 0x36, 0x69, 0x2f, 0xcd, 0x70, 0xe4, 0x09, 0xfc, 0x27, 0x47, 0x43, 0xd6, 0xe7, 0x41, 0x60, + 0xa2, 0x3b, 0x69, 0x8b, 0xbe, 0xbb, 0x55, 0x91, 0x2e, 0xca, 0xd1, 0xf0, 0x20, 0x8a, 0x77, 0xe3, + 0x30, 0xd9, 0x06, 0x92, 0x61, 0xcf, 0x84, 0x14, 0xe6, 0x1c, 0xfd, 0x46, 0xd1, 0x81, 0x6b, 0x29, + 0xf8, 0x28, 0x89, 0x13, 0x06, 0xad, 0x9b, 0x68, 0x76, 0x29, 0xec, 0x39, 0xf3, 0xb5, 0x0a, 0xd9, + 0x99, 0xd2, 0x4c, 0x73, 0x8b, 0x2c, 0x10, 0x43, 0x61, 0x85, 0x1c, 0x34, 0x4a, 0x8e, 0xe9, 0xf1, + 0x75, 0xa6, 0x4f, 0xc2, 0x9e, 0x1f, 0x6a, 0x15, 0x1e, 0x29, 0x4d, 0xb9, 0xc5, 0x93, 0x04, 0x4e, + 0x38, 0xec, 0xdc, 0x2b, 0x90, 0x6b, 0x77, 0xa4, 0x30, 0xeb, 0x14, 0x9a, 0x77, 0x28, 0x64, 0xbd, + 0x8f, 0x24, 0xbe, 0xc0, 0xd3, 0xdb, 0x24, 0x92, 0x67, 0x70, 0xc6, 0x45, 0x80, 0x3e, 0xb3, 0x8a, + 0x19, 0x94, 0x7e, 0x63, 0xce, 0x09, 0x6c, 0x4d, 0x13, 0x88, 0x3f, 0xd5, 0x91, 0xc3, 0x9f, 0xaa, + 0x2e, 0x4a, 0x9f, 0x74, 0xe0, 0xe1, 0x14, 0xfa, 0x0b, 0xa9, 0x2e, 0x25, 0xd3, 0xd8, 0x47, 0x31, + 0x46, 0xbf, 0x31, 0xef, 0x28, 0x37, 0xae, 0x53, 0xbe, 0x8f, 0x50, 0x34, 0x01, 0x79, 0xbf, 0x0a, + 0xb0, 0x74, 0xe5, 0xd9, 0x98, 0x50, 0x49, 0x83, 0xa4, 0x0b, 0xb5, 0xcc, 0x01, 0x71, 0x2c, 0x79, + 0x1a, 0x5b, 0xf7, 0x59, 0x20, 0x46, 0x77, 0x66, 0xe8, 0xe2, 0xc4, 0x03, 0x09, 0xe9, 0x0b, 0xa8, + 0x1a, 0xd4, 0x63, 0xd4, 0x2c, 0x10, 0xc6, 0x26, 0x1e, 0x58, 0xce, 0xf3, 0x75, 0xdd, 0xf1, 0x89, + 0x70, 0x1e, 0x02, 0x33, 0xd9, 0xb5, 0xd7, 0x61, 0xed, 0x9a, 0x03, 0x62, 0xce, 0xd8, 0x02, 0x3f, + 0x0a, 0xb0, 0x76, 0x7b, 0x29, 0xe4, 0x19, 0x2c, 0xe7, 0x93, 0x35, 0xf3, 0x31, 0xc0, 0x01, 0xb7, + 0xa9, 0x2d, 0xea, 0x41, 0x96, 0xa4, 0x0f, 0x93, 0x33, 0xf2, 0x11, 0xd6, 0xf3, 0x96, 0x65, 0x1a, + 0x43, 0xa5, 0x2d, 0x13, 0xd2, 0xa2, 0x1e, 0xf3, 0x20, 0x29, 0xbf, 0x9e, 0x2f, 0x3f, 0x1d, 0x62, + 0x74, 0x35, 0xe7, 0x5e, 0xea, 0xf2, 0x8e, 0x93, 0x34, 0xef, 0x0d, 0x40, 0x76, 0x4b, 0xb2, 0x1d, + 0x0d, 0xac, 0x68, 0x17, 0x0d, 0xac, 0x62, 0xb3, 0xba, 0x47, 0x6e, 0xb6, 0x83, 0xa6, 0x90, 0x77, + 0xa5, 0x72, 0xb1, 0x56, 0xf2, 0x7e, 0x17, 0x60, 0x2e, 0x3e, 0x21, 0x1b, 0x00, 0x22, 0x64, 0xdc, + 0xf7, 0x35, 0x9a, 0x78, 0xe4, 0x2d, 0xd0, 0x8a, 0x08, 0xdf, 0xc6, 0x81, 0xc8, 0xfd, 0x91, 0x76, + 0x32, 0xf3, 0xdc, 0x3a, 0x32, 0xe3, 0x95, 0x4e, 0x5a, 0x75, 0x81, 0xd2, 0x99, 0xb1, 0x42, 0x6b, + 0xb9, 0x46, 0x9c, 0x46, 0x71, 0xb2, 0x0f, 0xcb, 0x77, 0x98, 0xae, 0x4c, 0x97, 0xfc, 0x29, 0x06, + 0x7b, 0x0e, 0x2b, 0x77, 0x19, 0xa9, 0x4c, 0xeb, 0xfe, 0x14, 0xd3, 0xb4, 0xe1, 0x73, 0x39, 0xfd, + 0x47, 0xf4, 0xe6, 0xdc, 0x4f, 0x62, 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x36, 0x86, + 0xa6, 0x4a, 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto new file mode 100644 index 00000000000..42d99c109fe --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto @@ -0,0 +1,155 @@ +// Copyright 2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package grpc.lb.v1; +option go_package = "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"; + +message Duration { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} + +message Timestamp { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} + +message LoadBalanceRequest { + oneof load_balance_request_type { + // This message should be sent on the first request to the load balancer. + InitialLoadBalanceRequest initial_request = 1; + + // The client stats should be periodically reported to the load balancer + // based on the duration defined in the InitialLoadBalanceResponse. + ClientStats client_stats = 2; + } +} + +message InitialLoadBalanceRequest { + // Name of load balanced service (IE, balancer.service.com) + // length should be less than 256 bytes. + string name = 1; +} + +// Contains client level statistics that are useful to load balancing. Each +// count except the timestamp should be reset to zero after reporting the stats. +message ClientStats { + // The timestamp of generating the report. + Timestamp timestamp = 1; + + // The total number of RPCs that started. + int64 num_calls_started = 2; + + // The total number of RPCs that finished. + int64 num_calls_finished = 3; + + // The total number of RPCs that were dropped by the client because of rate + // limiting. + int64 num_calls_finished_with_drop_for_rate_limiting = 4; + + // The total number of RPCs that were dropped by the client because of load + // balancing. + int64 num_calls_finished_with_drop_for_load_balancing = 5; + + // The total number of RPCs that failed to reach a server except dropped RPCs. + int64 num_calls_finished_with_client_failed_to_send = 6; + + // The total number of RPCs that finished and are known to have been received + // by a server. + int64 num_calls_finished_known_received = 7; +} + +message LoadBalanceResponse { + oneof load_balance_response_type { + // This message should be sent on the first response to the client. + InitialLoadBalanceResponse initial_response = 1; + + // Contains the list of servers selected by the load balancer. The client + // should send requests to these servers in the specified order. + ServerList server_list = 2; + } +} + +message InitialLoadBalanceResponse { + // This is an application layer redirect that indicates the client should use + // the specified server for load balancing. When this field is non-empty in + // the response, the client should open a separate connection to the + // load_balancer_delegate and call the BalanceLoad method. Its length should + // be less than 64 bytes. + string load_balancer_delegate = 1; + + // This interval defines how often the client should send the client stats + // to the load balancer. Stats should only be reported when the duration is + // positive. + Duration client_stats_report_interval = 2; +} + +message ServerList { + // Contains a list of servers selected by the load balancer. The list will + // be updated when server resolutions change or as needed to balance load + // across more servers. The client should consume the server list in order + // unless instructed otherwise via the client_config. + repeated Server servers = 1; + + // Was google.protobuf.Duration expiration_interval. + reserved 3; +} + +// Contains server information. When none of the [drop_for_*] fields are true, +// use the other fields. When drop_for_rate_limiting is true, ignore all other +// fields. Use drop_for_load_balancing only when it is true and +// drop_for_rate_limiting is false. +message Server { + // A resolved address for the server, serialized in network-byte-order. It may + // either be an IPv4 or IPv6 address. + bytes ip_address = 1; + + // A resolved port number for the server. + int32 port = 2; + + // An opaque but printable token given to the frontend for each pick. All + // frontend requests for that pick must include the token in its initial + // metadata. The token is used by the backend to verify the request and to + // allow the backend to report load to the gRPC LB system. + // + // Its length is variable but less than 50 bytes. + string load_balance_token = 3; + + // Indicates whether this particular request should be dropped by the client + // for rate limiting. + bool drop_for_rate_limiting = 4; + + // Indicates whether this particular request should be dropped by the client + // for load balancing. + bool drop_for_load_balancing = 5; +} diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go new file mode 100644 index 00000000000..16a7d88867e --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -0,0 +1,123 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog defines logging for grpc. +// +// All logs in transport package only go to verbose level 2. +// All logs in other packages in grpc are logged in spite of the verbosity level. +// +// In the default logger, +// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, +// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog // import "google.golang.org/grpc/grpclog" + +import "os" + +var logger = newLoggerV2() + +// V reports whether verbosity level l is at least the requested verbose level. +func V(l int) bool { + return logger.V(l) +} + +// Info logs to the INFO log. +func Info(args ...interface{}) { + logger.Info(args...) +} + +// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. +func Infof(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. +func Infoln(args ...interface{}) { + logger.Infoln(args...) +} + +// Warning logs to the WARNING log. +func Warning(args ...interface{}) { + logger.Warning(args...) +} + +// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. +func Warningf(format string, args ...interface{}) { + logger.Warningf(format, args...) +} + +// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. +func Warningln(args ...interface{}) { + logger.Warningln(args...) +} + +// Error logs to the ERROR log. +func Error(args ...interface{}) { + logger.Error(args...) +} + +// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. +func Errorf(format string, args ...interface{}) { + logger.Errorf(format, args...) +} + +// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. +func Errorln(args ...interface{}) { + logger.Errorln(args...) +} + +// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. +// It calls os.Exit() with exit code 1. +func Fatal(args ...interface{}) { + logger.Fatal(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. +// It calles os.Exit() with exit code 1. +func Fatalf(format string, args ...interface{}) { + logger.Fatalf(format, args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. +// It calle os.Exit()) with exit code 1. +func Fatalln(args ...interface{}) { + logger.Fatalln(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +// Deprecated: use Info. +func Print(args ...interface{}) { + logger.Info(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +// Deprecated: use Infof. +func Printf(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +// Deprecated: use Infoln. +func Println(args ...interface{}) { + logger.Infoln(args...) +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go new file mode 100644 index 00000000000..d03b2397bfa --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -0,0 +1,83 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +// Logger mimics golang's standard Logger as an interface. +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) +} + +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. +// Deprecated: use SetLoggerV2. +func SetLogger(l Logger) { + logger = &loggerWrapper{Logger: l} +} + +// loggerWrapper wraps Logger into a LoggerV2. +type loggerWrapper struct { + Logger +} + +func (g *loggerWrapper) Info(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Infoln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Infof(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Warning(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Warningln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Warningf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Error(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Errorln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Errorf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) V(l int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go new file mode 100644 index 00000000000..d4932577695 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -0,0 +1,195 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "io" + "io/ioutil" + "log" + "os" + "strconv" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// SetLoggerV2 sets logger that is used in grpc to a V2 logger. +// Not mutex-protected, should be called before any gRPC functions. +func SetLoggerV2(l LoggerV2) { + logger = l +} + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int +} + +// NewLoggerV2 creates a loggerV2 with the provided writers. +// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). +// Error logs will be written to errorW, warningW and infoW. +// Warning logs will be written to warningW and infoW. +// Info logs will be written to infoW. +func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) +} + +// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and +// verbosity level. +func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + var m []*log.Logger + m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) + m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) + return &loggerT{m: m, v: v} +} + +// newLoggerV2 creates a loggerV2 to be used as default logger. +// All logs are written to stderr. +func newLoggerV2() LoggerV2 { + errorW := ioutil.Discard + warningW := ioutil.Discard + infoW := ioutil.Discard + + logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") + switch logLevel { + case "", "ERROR", "error": // If env is unset, set level to ERROR. + errorW = os.Stderr + case "WARNING", "warning": + warningW = os.Stderr + case "INFO", "info": + infoW = os.Stderr + } + + var v int + vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") + if vl, err := strconv.Atoi(vLevel); err == nil { + v = vl + } + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) +} + +func (g *loggerT) Info(args ...interface{}) { + g.m[infoLog].Print(args...) +} + +func (g *loggerT) Infoln(args ...interface{}) { + g.m[infoLog].Println(args...) +} + +func (g *loggerT) Infof(format string, args ...interface{}) { + g.m[infoLog].Printf(format, args...) +} + +func (g *loggerT) Warning(args ...interface{}) { + g.m[warningLog].Print(args...) +} + +func (g *loggerT) Warningln(args ...interface{}) { + g.m[warningLog].Println(args...) +} + +func (g *loggerT) Warningf(format string, args ...interface{}) { + g.m[warningLog].Printf(format, args...) +} + +func (g *loggerT) Error(args ...interface{}) { + g.m[errorLog].Print(args...) +} + +func (g *loggerT) Errorln(args ...interface{}) { + g.m[errorLog].Println(args...) +} + +func (g *loggerT) Errorf(format string, args ...interface{}) { + g.m[errorLog].Printf(format, args...) +} + +func (g *loggerT) Fatal(args ...interface{}) { + g.m[fatalLog].Fatal(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalln(args ...interface{}) { + g.m[fatalLog].Fatalln(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalf(format string, args ...interface{}) { + g.m[fatalLog].Fatalf(format, args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go new file mode 100644 index 00000000000..fdcbb9e0b7d --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -0,0 +1,190 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_health_v1/health.proto + +/* +Package grpc_health_v1 is a generated protocol buffer package. + +It is generated from these files: + grpc_health_v1/health.proto + +It has these top-level messages: + HealthCheckRequest + HealthCheckResponse +*/ +package grpc_health_v1 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type HealthCheckResponse_ServingStatus int32 + +const ( + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 +) + +var HealthCheckResponse_ServingStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SERVING", + 2: "NOT_SERVING", +} +var HealthCheckResponse_ServingStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, +} + +func (x HealthCheckResponse_ServingStatus) String() string { + return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) +} +func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{1, 0} +} + +type HealthCheckRequest struct { + Service string `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` +} + +func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } +func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) } +func (*HealthCheckRequest) ProtoMessage() {} +func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *HealthCheckRequest) GetService() string { + if m != nil { + return m.Service + } + return "" +} + +type HealthCheckResponse struct { + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` +} + +func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } +func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) } +func (*HealthCheckResponse) ProtoMessage() {} +func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { + if m != nil { + return m.Status + } + return HealthCheckResponse_UNKNOWN +} + +func init() { + proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest") + proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse") + proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Health service + +type HealthClient interface { + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) +} + +type healthClient struct { + cc *grpc.ClientConn +} + +func NewHealthClient(cc *grpc.ClientConn) HealthClient { + return &healthClient{cc} +} + +func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + out := new(HealthCheckResponse) + err := grpc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Health service + +type HealthServer interface { + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) +} + +func RegisterHealthServer(s *grpc.Server, srv HealthServer) { + s.RegisterService(&_Health_serviceDesc, srv) +} + +func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.health.v1.Health/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Health_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.health.v1.Health", + HandlerType: (*HealthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _Health_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "grpc_health_v1/health.proto", +} + +func init() { proto.RegisterFile("grpc_health_v1/health.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 213 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48, + 0x8e, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0x88, 0x2f, 0x33, 0xd4, 0x87, 0xb0, 0xf4, 0x0a, 0x8a, + 0xf2, 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, + 0x0f, 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, + 0x82, 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, + 0x08, 0xc6, 0x55, 0x9a, 0xc3, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, + 0xc8, 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, + 0xd5, 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, + 0x0d, 0x50, 0xb2, 0xe2, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, + 0x0f, 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, + 0xf8, 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x46, 0x51, 0x5c, 0x6c, 0x10, 0x8b, + 0x84, 0x02, 0xb8, 0x58, 0xc1, 0x96, 0x09, 0x29, 0xe1, 0x75, 0x09, 0xd8, 0xbf, 0x52, 0xca, 0x44, + 0xb8, 0x36, 0x89, 0x0d, 0x1c, 0x82, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x53, 0x2b, 0x65, + 0x20, 0x60, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto new file mode 100644 index 00000000000..6072fdc3b80 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto @@ -0,0 +1,34 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package grpc.health.v1; + +message HealthCheckRequest { + string service = 1; +} + +message HealthCheckResponse { + enum ServingStatus { + UNKNOWN = 0; + SERVING = 1; + NOT_SERVING = 2; + } + ServingStatus status = 1; +} + +service Health{ + rpc Check(HealthCheckRequest) returns (HealthCheckResponse); +} diff --git a/vendor/google.golang.org/grpc/health/health.go b/vendor/google.golang.org/grpc/health/health.go new file mode 100644 index 00000000000..c6212f406f7 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/health.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate protoc --go_out=plugins=grpc:. grpc_health_v1/health.proto + +// Package health provides some utility functions to health-check a server. The implementation +// is based on protobuf. Users need to write their own implementations if other IDLs are used. +package health + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + healthpb "google.golang.org/grpc/health/grpc_health_v1" +) + +// Server implements `service Health`. +type Server struct { + mu sync.Mutex + // statusMap stores the serving status of the services this Server monitors. + statusMap map[string]healthpb.HealthCheckResponse_ServingStatus +} + +// NewServer returns a new Server. +func NewServer() *Server { + return &Server{ + statusMap: make(map[string]healthpb.HealthCheckResponse_ServingStatus), + } +} + +// Check implements `service Health`. +func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + if in.Service == "" { + // check the server overall health status. + return &healthpb.HealthCheckResponse{ + Status: healthpb.HealthCheckResponse_SERVING, + }, nil + } + if status, ok := s.statusMap[in.Service]; ok { + return &healthpb.HealthCheckResponse{ + Status: status, + }, nil + } + return nil, grpc.Errorf(codes.NotFound, "unknown service") +} + +// SetServingStatus is called when need to reset the serving status of a service +// or insert a new service entry into the statusMap. +func (s *Server) SetServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) { + s.mu.Lock() + s.statusMap[service] = status + s.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go new file mode 100644 index 00000000000..06dc825b9fb --- /dev/null +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "golang.org/x/net/context" +) + +// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. +type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error + +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC +// and it is the responsibility of the interceptor to call it. +// This is an EXPERIMENTAL API. +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error + +// Streamer is called by StreamClientInterceptor to create a ClientStream. +type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) + +// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O +// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it. +// This is an EXPERIMENTAL API. +type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) + +// UnaryServerInfo consists of various information about a unary RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type UnaryServerInfo struct { + // Server is the service implementation the user provides. This is read-only. + Server interface{} + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string +} + +// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal +// execution of a unary RPC. +type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) + +// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info +// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper +// of the service method implementation. It is the responsibility of the interceptor to invoke handler +// to complete the RPC. +type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) + +// StreamServerInfo consists of various information about a streaming RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type StreamServerInfo struct { + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. +// info contains all the information of this RPC the interceptor can operate on. And handler is the +// service method implementation. It is the responsibility of the interceptor to invoke handler to +// complete the RPC. +type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go new file mode 100644 index 00000000000..07083832c3c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,34 @@ +/* + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains gRPC-internal code for testing, to avoid polluting +// the godoc of the top-level grpc package. +package internal + +// TestingCloseConns closes all existing transports but keeps +// grpcServer.lis accepting new connections. +// +// The provided grpcServer must be of type *grpc.Server. It is untyped +// for circular dependency reasons. +var TestingCloseConns func(grpcServer interface{}) + +// TestingUseHandlerImpl enables the http.Handler-based server implementation. +// It must be called before Serve and requires TLS credentials. +// +// The provided grpcServer must be of type *grpc.Server. It is untyped +// for circular dependency reasons. +var TestingUseHandlerImpl func(grpcServer interface{}) diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go new file mode 100644 index 00000000000..f8adc7e6d4f --- /dev/null +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -0,0 +1,65 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package keepalive defines configurable parameters for point-to-point healthcheck. +package keepalive + +import ( + "time" +) + +// ClientParameters is used to set keepalive parameters on the client-side. +// These configure how the client will actively probe to notice when a connection is broken +// and send pings so intermediaries will be aware of the liveness of the connection. +// Make sure these parameters are set in coordination with the keepalive policy on the server, +// as incompatible settings can result in closing of connection. +type ClientParameters struct { + // After a duration of this time if the client doesn't see any activity it pings the server to see if the transport is still alive. + Time time.Duration // The current default value is infinity. + // After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that + // the connection is closed. + Timeout time.Duration // The current default value is 20 seconds. + // If true, client runs keepalive checks even with no active RPCs. + PermitWithoutStream bool // false by default. +} + +// ServerParameters is used to set keepalive and max-age parameters on the server-side. +type ServerParameters struct { + // MaxConnectionIdle is a duration for the amount of time after which an idle connection would be closed by sending a GoAway. + // Idleness duration is defined since the most recent time the number of outstanding RPCs became zero or the connection establishment. + MaxConnectionIdle time.Duration // The current default value is infinity. + // MaxConnectionAge is a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway. + // A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms. + MaxConnectionAge time.Duration // The current default value is infinity. + // MaxConnectinoAgeGrace is an additive period after MaxConnectionAge after which the connection will be forcibly closed. + MaxConnectionAgeGrace time.Duration // The current default value is infinity. + // After a duration of this time if the server doesn't see any activity it pings the client to see if the transport is still alive. + Time time.Duration // The current default value is 2 hours. + // After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that + // the connection is closed. + Timeout time.Duration // The current default value is 20 seconds. +} + +// EnforcementPolicy is used to set keepalive enforcement policy on the server-side. +// Server will close connection with a client that violates this policy. +type EnforcementPolicy struct { + // MinTime is the minimum amount of time a client should wait before sending a keepalive ping. + MinTime time.Duration // The current default value is 5 minutes. + // If true, server expects keepalive pings even when there are no active streams(RPCs). + PermitWithoutStream bool // false by default. +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go new file mode 100644 index 00000000000..ccfea5d4530 --- /dev/null +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -0,0 +1,137 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata define the structure of the metadata supported by gRPC library. +// Please refer to https://grpc.io/docs/guides/wire.html for more information about custom-metadata. +package metadata // import "google.golang.org/grpc/metadata" + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" +) + +// DecodeKeyValue returns k, v, nil. It is deprecated and should not be used. +func DecodeKeyValue(k, v string) (string, string, error) { + return k, v, nil +} + +// MD is a mapping from metadata keys to values. Users should use the following +// two convenience functions New and Pairs to generate MD. +type MD map[string][]string + +// New creates an MD from a given key-value map. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func New(m map[string]string) MD { + md := MD{} + for k, val := range m { + key := strings.ToLower(k) + md[key] = append(md[key], val) + } + return md +} + +// Pairs returns an MD formed by the mapping of key, value ... +// Pairs panics if len(kv) is odd. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func Pairs(kv ...string) MD { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) + } + md := MD{} + var key string + for i, s := range kv { + if i%2 == 0 { + key = strings.ToLower(s) + continue + } + md[key] = append(md[key], s) + } + return md +} + +// Len returns the number of items in md. +func (md MD) Len() int { + return len(md) +} + +// Copy returns a copy of md. +func (md MD) Copy() MD { + return Join(md) +} + +// Join joins any number of mds into a single MD. +// The order of values for each key is determined by the order in which +// the mds containing those values are presented to Join. +func Join(mds ...MD) MD { + out := MD{} + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return out +} + +type mdIncomingKey struct{} +type mdOutgoingKey struct{} + +// NewIncomingContext creates a new context with incoming md attached. +func NewIncomingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdIncomingKey{}, md) +} + +// NewOutgoingContext creates a new context with outgoing md attached. +func NewOutgoingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdOutgoingKey{}, md) +} + +// FromIncomingContext returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func FromIncomingContext(ctx context.Context) (md MD, ok bool) { + md, ok = ctx.Value(mdIncomingKey{}).(MD) + return +} + +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to the copies of the returned MD. +func FromOutgoingContext(ctx context.Context) (md MD, ok bool) { + md, ok = ctx.Value(mdOutgoingKey{}).(MD) + return +} diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go new file mode 100644 index 00000000000..7e69a2ca0a6 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/dns_resolver.go @@ -0,0 +1,290 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "errors" + "fmt" + "net" + "strconv" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" +) + +const ( + defaultPort = "443" + defaultFreq = time.Minute * 30 +) + +var ( + errMissingAddr = errors.New("missing address") + errWatcherClose = errors.New("watcher has been closed") +) + +// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and +// create watchers that poll the DNS server using the frequency set by freq. +func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) { + return &dnsResolver{freq: freq}, nil +} + +// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create +// watchers that poll the DNS server using the default frequency defined by defaultFreq. +func NewDNSResolver() (Resolver, error) { + return NewDNSResolverWithFreq(defaultFreq) +} + +// dnsResolver handles name resolution for names following the DNS scheme +type dnsResolver struct { + // frequency of polling the DNS server that the watchers created by this resolver will use. + freq time.Duration +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets +// are strippd when setting the host. +// examples: +// target: "www.google.com" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" +// target: ":80" returns host: "localhost", port: "80" +// target: ":" returns host: "localhost", port: "443" +func parseTarget(target string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err := net.SplitHostPort(target); err == nil { + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + if port == "" { + // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. + port = defaultPort + } + return host, port, nil + } + if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v", target) +} + +// Resolve creates a watcher that watches the name resolution of the target. +func (r *dnsResolver) Resolve(target string) (Watcher, error) { + host, port, err := parseTarget(target) + if err != nil { + return nil, err + } + + if net.ParseIP(host) != nil { + ipWatcher := &ipWatcher{ + updateChan: make(chan *Update, 1), + } + host, _ = formatIP(host) + ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port} + return ipWatcher, nil + } + + ctx, cancel := context.WithCancel(context.Background()) + return &dnsWatcher{ + r: r, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + t: time.NewTimer(0), + }, nil +} + +// dnsWatcher watches for the name resolution update for a specific target +type dnsWatcher struct { + r *dnsResolver + host string + port string + // The latest resolved address set + curAddrs map[string]*Update + ctx context.Context + cancel context.CancelFunc + t *time.Timer +} + +// ipWatcher watches for the name resolution update for an IP address. +type ipWatcher struct { + updateChan chan *Update +} + +// Next returns the adrress resolution Update for the target. For IP address, +// the resolution is itself, thus polling name server is unncessary. Therefore, +// Next() will return an Update the first time it is called, and will be blocked +// for all following calls as no Update exisits until watcher is closed. +func (i *ipWatcher) Next() ([]*Update, error) { + u, ok := <-i.updateChan + if !ok { + return nil, errWatcherClose + } + return []*Update{u}, nil +} + +// Close closes the ipWatcher. +func (i *ipWatcher) Close() { + close(i.updateChan) +} + +// AddressType indicates the address type returned by name resolution. +type AddressType uint8 + +const ( + // Backend indicates the server is a backend server. + Backend AddressType = iota + // GRPCLB indicates the server is a grpclb load balancer. + GRPCLB +) + +// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The +// name resolver used by the grpclb balancer is required to provide this type of metadata in +// its address updates. +type AddrMetadataGRPCLB struct { + // AddrType is the type of server (grpc load balancer or backend). + AddrType AddressType + // ServerName is the name of the grpc load balancer. Used for authentication. + ServerName string +} + +// compileUpdate compares the old resolved addresses and newly resolved addresses, +// and generates an update list +func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update { + var res []*Update + for a, u := range w.curAddrs { + if _, ok := newAddrs[a]; !ok { + u.Op = Delete + res = append(res, u) + } + } + for a, u := range newAddrs { + if _, ok := w.curAddrs[a]; !ok { + res = append(res, u) + } + } + return res +} + +func (w *dnsWatcher) lookupSRV() map[string]*Update { + newAddrs := make(map[string]*Update) + _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host) + if err != nil { + grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) + return nil + } + for _, s := range srvs { + lbAddrs, err := lookupHost(w.ctx, s.Target) + if err != nil { + grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err) + continue + } + for _, a := range lbAddrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + strconv.Itoa(int(s.Port)) + newAddrs[addr] = &Update{Addr: addr, + Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}} + } + } + return newAddrs +} + +func (w *dnsWatcher) lookupHost() map[string]*Update { + newAddrs := make(map[string]*Update) + addrs, err := lookupHost(w.ctx, w.host) + if err != nil { + grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) + return nil + } + for _, a := range addrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + w.port + newAddrs[addr] = &Update{Addr: addr} + } + return newAddrs +} + +func (w *dnsWatcher) lookup() []*Update { + newAddrs := w.lookupSRV() + if newAddrs == nil { + // If failed to get any balancer address (either no corresponding SRV for the + // target, or caused by failure during resolution/parsing of the balancer target), + // return any A record info available. + newAddrs = w.lookupHost() + } + result := w.compileUpdate(newAddrs) + w.curAddrs = newAddrs + return result +} + +// Next returns the resolved address update(delta) for the target. If there's no +// change, it will sleep for 30 mins and try to resolve again after that. +func (w *dnsWatcher) Next() ([]*Update, error) { + for { + select { + case <-w.ctx.Done(): + return nil, errWatcherClose + case <-w.t.C: + } + result := w.lookup() + // Next lookup should happen after an interval defined by w.r.freq. + w.t.Reset(w.r.freq) + if len(result) > 0 { + return result, nil + } + } +} + +func (w *dnsWatcher) Close() { + w.cancel() +} diff --git a/vendor/google.golang.org/grpc/naming/go17.go b/vendor/google.golang.org/grpc/naming/go17.go new file mode 100644 index 00000000000..a537b08c602 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/go17.go @@ -0,0 +1,34 @@ +// +build go1.6, !go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "net" + + "golang.org/x/net/context" +) + +var ( + lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) } + lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return net.LookupSRV(service, proto, name) + } +) diff --git a/vendor/google.golang.org/grpc/naming/go18.go b/vendor/google.golang.org/grpc/naming/go18.go new file mode 100644 index 00000000000..b5a0f842748 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/go18.go @@ -0,0 +1,28 @@ +// +build go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import "net" + +var ( + lookupHost = net.DefaultResolver.LookupHost + lookupSRV = net.DefaultResolver.LookupSRV +) diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go new file mode 100644 index 00000000000..1af7e32f86d --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/naming.go @@ -0,0 +1,59 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package naming defines the naming API and related data structures for gRPC. +// The interface is EXPERIMENTAL and may be suject to change. +package naming + +// Operation defines the corresponding operations for a name resolution change. +type Operation uint8 + +const ( + // Add indicates a new address is added. + Add Operation = iota + // Delete indicates an exisiting address is deleted. + Delete +) + +// Update defines a name resolution update. Notice that it is not valid having both +// empty string Addr and nil Metadata in an Update. +type Update struct { + // Op indicates the operation of the update. + Op Operation + // Addr is the updated address. It is empty string if there is no address update. + Addr string + // Metadata is the updated metadata. It is nil if there is no metadata update. + // Metadata is not required for a custom naming implementation. + Metadata interface{} +} + +// Resolver creates a Watcher for a target to track its resolution changes. +type Resolver interface { + // Resolve creates a Watcher for target. + Resolve(target string) (Watcher, error) +} + +// Watcher watches for the updates on the specified target. +type Watcher interface { + // Next blocks until an update or error happens. It may return one or more + // updates. The first call should get the full set of the results. It should + // return an error if and only if Watcher cannot recover. + Next() ([]*Update, error) + // Close closes the Watcher. + Close() +} diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go new file mode 100644 index 00000000000..317b8b9d09a --- /dev/null +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package peer defines various peer information associated with RPCs and +// corresponding utils. +package peer + +import ( + "net" + + "golang.org/x/net/context" + "google.golang.org/grpc/credentials" +) + +// Peer contains the information of the peer for an RPC, such as the address +// and authentication information. +type Peer struct { + // Addr is the peer address. + Addr net.Addr + // AuthInfo is the authentication information of the transport. + // It is nil if there is no transport security being used. + AuthInfo credentials.AuthInfo +} + +type peerKey struct{} + +// NewContext creates a new context with peer information attached. +func NewContext(ctx context.Context, p *Peer) context.Context { + return context.WithValue(ctx, peerKey{}, p) +} + +// FromContext returns the peer information in ctx if it exists. +func FromContext(ctx context.Context) (p *Peer, ok bool) { + p, ok = ctx.Value(peerKey{}).(*Peer) + return +} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go new file mode 100644 index 00000000000..9085dbc9c98 --- /dev/null +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" +) + +// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick +// actions and unblock when there's a picker update. +type pickerWrapper struct { + mu sync.Mutex + done bool + blockingCh chan struct{} + picker balancer.Picker +} + +func newPickerWrapper() *pickerWrapper { + bp := &pickerWrapper{blockingCh: make(chan struct{})} + return bp +} + +// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +func (bp *pickerWrapper) updatePicker(p balancer.Picker) { + bp.mu.Lock() + if bp.done { + bp.mu.Unlock() + return + } + bp.picker = p + // bp.blockingCh should never be nil. + close(bp.blockingCh) + bp.blockingCh = make(chan struct{}) + bp.mu.Unlock() +} + +// pick returns the transport that will be used for the RPC. +// It may block in the following cases: +// - there's no picker +// - the current picker returns ErrNoSubConnAvailable +// - the current picker returns other errors and failfast is false. +// - the subConn returned by the current picker is not READY +// When one of these situations happens, pick blocks until the picker gets updated. +func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) { + var ( + p balancer.Picker + ch chan struct{} + ) + + for { + bp.mu.Lock() + if bp.done { + bp.mu.Unlock() + return nil, nil, ErrClientConnClosing + } + + if bp.picker == nil { + ch = bp.blockingCh + } + if ch == bp.blockingCh { + // This could happen when either: + // - bp.picker is nil (the previous if condition), or + // - has called pick on the current picker. + bp.mu.Unlock() + select { + case <-ctx.Done(): + return nil, nil, ctx.Err() + case <-ch: + } + continue + } + + ch = bp.blockingCh + p = bp.picker + bp.mu.Unlock() + + subConn, put, err := p.Pick(ctx, opts) + + if err != nil { + switch err { + case balancer.ErrNoSubConnAvailable: + continue + case balancer.ErrTransientFailure: + if !failfast { + continue + } + return nil, nil, status.Errorf(codes.Unavailable, "%v", err) + default: + // err is some other error. + return nil, nil, toRPCErr(err) + } + } + + acw, ok := subConn.(*acBalancerWrapper) + if !ok { + grpclog.Infof("subconn returned from pick is not *acBalancerWrapper") + continue + } + if t, ok := acw.getAddrConn().getReadyTransport(); ok { + return t, put, nil + } + grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + // If ok == false, ac.state is not READY. + // A valid picker always returns READY subConn. This means the state of ac + // just changed, and picker will be updated shortly. + // continue back to the beginning of the for loop to repick. + } +} + +func (bp *pickerWrapper) close() { + bp.mu.Lock() + defer bp.mu.Unlock() + if bp.done { + return + } + bp.done = true + close(bp.blockingCh) +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go new file mode 100644 index 00000000000..e4597cb86c7 --- /dev/null +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -0,0 +1,105 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +func newPickfirstBuilder() balancer.Builder { + return &pickfirstBuilder{} +} + +type pickfirstBuilder struct{} + +func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &pickfirstBalancer{cc: cc} +} + +func (*pickfirstBuilder) Name() string { + return "pickfirst" +} + +type pickfirstBalancer struct { + cc balancer.ClientConn + sc balancer.SubConn +} + +func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + if err != nil { + grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err) + return + } + if b.sc == nil { + b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + return + } + b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc}) + b.sc.Connect() + } else { + b.sc.UpdateAddresses(addrs) + b.sc.Connect() + } +} + +func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) + if b.sc != sc { + grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + return + } + if s == connectivity.Shutdown { + b.sc = nil + return + } + + switch s { + case connectivity.Ready, connectivity.Idle: + b.cc.UpdateBalancerState(s, &picker{sc: sc}) + case connectivity.Connecting: + b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable}) + case connectivity.TransientFailure: + b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure}) + } +} + +func (b *pickfirstBalancer) Close() { +} + +type picker struct { + err error + sc balancer.SubConn +} + +func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + if p.err != nil { + return nil, nil, p.err + } + return p.sc, nil, nil +} + +func init() { + balancer.Register(newPickfirstBuilder()) +} diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/proxy.go new file mode 100644 index 00000000000..2d40236e218 --- /dev/null +++ b/vendor/google.golang.org/grpc/proxy.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bufio" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "net/url" + + "golang.org/x/net/context" +) + +var ( + // errDisabled indicates that proxy is disabled for the address. + errDisabled = errors.New("proxy is disabled for the address") + // The following variable will be overwritten in the tests. + httpProxyFromEnvironment = http.ProxyFromEnvironment +) + +func mapAddress(ctx context.Context, address string) (string, error) { + req := &http.Request{ + URL: &url.URL{ + Scheme: "https", + Host: address, + }, + } + url, err := httpProxyFromEnvironment(req) + if err != nil { + return "", err + } + if url == nil { + return "", errDisabled + } + return url.Host, nil +} + +// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. +// It's possible that this reader reads more than what's need for the response and stores +// those bytes in the buffer. +// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the +// bytes in the buffer. +type bufConn struct { + net.Conn + r io.Reader +} + +func (c *bufConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_ net.Conn, err error) { + defer func() { + if err != nil { + conn.Close() + } + }() + + req := (&http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Host: addr}, + Header: map[string][]string{"User-Agent": {grpcUA}}, + }) + + if err := sendHTTPRequest(ctx, req, conn); err != nil { + return nil, fmt.Errorf("failed to write the HTTP request: %v", err) + } + + r := bufio.NewReader(conn) + resp, err := http.ReadResponse(r, req) + if err != nil { + return nil, fmt.Errorf("reading server HTTP response: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status) + } + return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) + } + + return &bufConn{Conn: conn, r: r}, nil +} + +// newProxyDialer returns a dialer that connects to proxy first if necessary. +// The returned dialer checks if a proxy is necessary, dial to the proxy with the +// provided dialer, does HTTP CONNECT handshake and returns the connection. +func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) { + return func(ctx context.Context, addr string) (conn net.Conn, err error) { + var skipHandshake bool + newAddr, err := mapAddress(ctx, addr) + if err != nil { + if err != errDisabled { + return nil, err + } + skipHandshake = true + newAddr = addr + } + + conn, err = dialer(ctx, newAddr) + if err != nil { + return + } + if !skipHandshake { + conn, err = doHTTPConnectHandshake(ctx, conn, addr) + } + return + } +} diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 00000000000..a543a709a62 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,377 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( + "encoding/json" + "errors" + "fmt" + "math/rand" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +func init() { + resolver.Register(NewBuilder()) +} + +const ( + defaultPort = "443" + defaultFreq = time.Minute * 30 + golang = "GO" + // In DNS, service config is encoded in a TXT record via the mechanism + // described in RFC-1464 using the attribute name grpc_config. + txtAttribute = "grpc_config=" +) + +var errMissingAddr = errors.New("missing address") + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +func NewBuilder() resolver.Builder { + return &dnsBuilder{freq: defaultFreq} +} + +type dnsBuilder struct { + // frequency of polling the DNS server. + freq time.Duration +} + +// Build creates and starts a DNS resolver that watches the name resolution of the target. +func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { + host, port, err := parseTarget(target.Endpoint) + if err != nil { + return nil, err + } + + // IP address. + if net.ParseIP(host) != nil { + host, _ = formatIP(host) + addr := []resolver.Address{{Addr: host + ":" + port}} + i := &ipResolver{ + cc: cc, + ip: addr, + rn: make(chan struct{}, 1), + q: make(chan struct{}), + } + cc.NewAddress(addr) + go i.watcher() + return i, nil + } + + // DNS address (non-IP). + ctx, cancel := context.WithCancel(context.Background()) + d := &dnsResolver{ + freq: b.freq, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + t: time.NewTimer(0), + rn: make(chan struct{}, 1), + } + + d.wg.Add(1) + go d.watcher() + return d, nil +} + +// Scheme returns the naming scheme of this resolver builder, which is "dns". +func (b *dnsBuilder) Scheme() string { + return "dns" +} + +// ipResolver watches for the name resolution update for an IP address. +type ipResolver struct { + cc resolver.ClientConn + ip []resolver.Address + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + q chan struct{} +} + +// ResolveNow resend the address it stores, no resolution is needed. +func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) { + select { + case i.rn <- struct{}{}: + default: + } +} + +// Close closes the ipResolver. +func (i *ipResolver) Close() { + close(i.q) +} + +func (i *ipResolver) watcher() { + for { + select { + case <-i.rn: + i.cc.NewAddress(i.ip) + case <-i.q: + return + } + } +} + +// dnsResolver watches for the name resolution update for a non-IP target. +type dnsResolver struct { + freq time.Duration + host string + port string + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + t *time.Timer + // wg is used to enforce Close() to return after the watcher() goroutine has finished. + // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we + // replace the real lookup functions with mocked ones to facilitate testing. + // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes + // will warns lookup (READ the lookup function pointers) inside watcher() goroutine + // has data race with replaceNetFunc (WRITE the lookup function pointers). + wg sync.WaitGroup +} + +// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) { + select { + case d.rn <- struct{}{}: + default: + } +} + +// Close closes the dnsResolver. +func (d *dnsResolver) Close() { + d.cancel() + d.wg.Wait() + d.t.Stop() +} + +func (d *dnsResolver) watcher() { + defer d.wg.Done() + for { + select { + case <-d.ctx.Done(): + return + case <-d.t.C: + case <-d.rn: + } + result, sc := d.lookup() + // Next lookup should happen after an interval defined by d.freq. + d.t.Reset(d.freq) + d.cc.NewServiceConfig(string(sc)) + d.cc.NewAddress(result) + } +} + +func (d *dnsResolver) lookupSRV() []resolver.Address { + var newAddrs []resolver.Address + _, srvs, err := lookupSRV(d.ctx, "grpclb", "tcp", d.host) + if err != nil { + grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) + return nil + } + for _, s := range srvs { + lbAddrs, err := lookupHost(d.ctx, s.Target) + if err != nil { + grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err) + continue + } + for _, a := range lbAddrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + strconv.Itoa(int(s.Port)) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target}) + } + } + return newAddrs +} + +func (d *dnsResolver) lookupTXT() string { + ss, err := lookupTXT(d.ctx, d.host) + if err != nil { + grpclog.Warningf("grpc: failed dns TXT record lookup due to %v.\n", err) + return "" + } + var res string + for _, s := range ss { + res += s + } + + // TXT record must have "grpc_config=" attribute in order to be used as service config. + if !strings.HasPrefix(res, txtAttribute) { + grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute) + return "" + } + return strings.TrimPrefix(res, txtAttribute) +} + +func (d *dnsResolver) lookupHost() []resolver.Address { + var newAddrs []resolver.Address + addrs, err := lookupHost(d.ctx, d.host) + if err != nil { + grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) + return nil + } + for _, a := range addrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + d.port + newAddrs = append(newAddrs, resolver.Address{Addr: addr}) + } + return newAddrs +} + +func (d *dnsResolver) lookup() ([]resolver.Address, string) { + var newAddrs []resolver.Address + newAddrs = d.lookupSRV() + // Support fallback to non-balancer address. + newAddrs = append(newAddrs, d.lookupHost()...) + sc := d.lookupTXT() + return newAddrs, canaryingSC(sc) +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets +// are strippd when setting the host. +// examples: +// target: "www.google.com" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" +// target: ":80" returns host: "localhost", port: "80" +// target: ":" returns host: "localhost", port: "443" +func parseTarget(target string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err = net.SplitHostPort(target); err == nil { + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + if port == "" { + // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. + port = defaultPort + } + return host, port, nil + } + if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) +} + +type rawChoice struct { + ClientLanguage *[]string `json:"clientLanguage,omitempty"` + Percentage *int `json:"percentage,omitempty"` + ClientHostName *[]string `json:"clientHostName,omitempty"` + ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` +} + +func containsString(a *[]string, b string) bool { + if a == nil { + return true + } + for _, c := range *a { + if c == b { + return true + } + } + return false +} + +func chosenByPercentage(a *int) bool { + if a == nil { + return true + } + s := rand.NewSource(time.Now().UnixNano()) + r := rand.New(s) + if r.Intn(100)+1 > *a { + return false + } + return true +} + +func canaryingSC(js string) string { + if js == "" { + return "" + } + var rcs []rawChoice + err := json.Unmarshal([]byte(js), &rcs) + if err != nil { + grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err) + return "" + } + cliHostname, err := os.Hostname() + if err != nil { + grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err) + return "" + } + var sc string + for _, c := range rcs { + if !containsString(c.ClientLanguage, golang) || + !chosenByPercentage(c.Percentage) || + !containsString(c.ClientHostName, cliHostname) || + c.ServiceConfig == nil { + continue + } + sc = string(*c.ServiceConfig) + break + } + return sc +} diff --git a/vendor/google.golang.org/grpc/resolver/dns/go17.go b/vendor/google.golang.org/grpc/resolver/dns/go17.go new file mode 100644 index 00000000000..b466bc8f6d4 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/go17.go @@ -0,0 +1,35 @@ +// +build go1.6, !go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import ( + "net" + + "golang.org/x/net/context" +) + +var ( + lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) } + lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return net.LookupSRV(service, proto, name) + } + lookupTXT = func(ctx context.Context, name string) ([]string, error) { return net.LookupTXT(name) } +) diff --git a/vendor/google.golang.org/grpc/resolver/dns/go18.go b/vendor/google.golang.org/grpc/resolver/dns/go18.go new file mode 100644 index 00000000000..fa34f14cad4 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/go18.go @@ -0,0 +1,29 @@ +// +build go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import "net" + +var ( + lookupHost = net.DefaultResolver.LookupHost + lookupSRV = net.DefaultResolver.LookupSRV + lookupTXT = net.DefaultResolver.LookupTXT +) diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go new file mode 100644 index 00000000000..b76010d74d1 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go @@ -0,0 +1,57 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package passthrough implements a pass-through resolver. It sends the target +// name without scheme back to gRPC as resolved address. +package passthrough + +import "google.golang.org/grpc/resolver" + +const scheme = "passthrough" + +type passthroughBuilder struct{} + +func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { + r := &passthroughResolver{ + target: target, + cc: cc, + } + r.start() + return r, nil +} + +func (*passthroughBuilder) Scheme() string { + return scheme +} + +type passthroughResolver struct { + target resolver.Target + cc resolver.ClientConn +} + +func (r *passthroughResolver) start() { + r.cc.NewAddress([]resolver.Address{{Addr: r.target.Endpoint}}) +} + +func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {} + +func (*passthroughResolver) Close() {} + +func init() { + resolver.Register(&passthroughBuilder{}) +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go new file mode 100644 index 00000000000..0dd887fa543 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -0,0 +1,143 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver defines APIs for name resolution in gRPC. +// All APIs in this package are experimental. +package resolver + +var ( + // m is a map from scheme to resolver builder. + m = make(map[string]Builder) + // defaultScheme is the default scheme to use. + defaultScheme = "passthrough" +) + +// TODO(bar) install dns resolver in init(){}. + +// Register registers the resolver builder to the resolver map. +// b.Scheme will be used as the scheme registered with this builder. +func Register(b Builder) { + m[b.Scheme()] = b +} + +// Get returns the resolver builder registered with the given scheme. +// If no builder is register with the scheme, the default scheme will +// be used. +// If the default scheme is not modified, "dns" will be the default +// scheme, and the preinstalled dns resolver will be used. +// If the default scheme is modified, and a resolver is registered with +// the scheme, that resolver will be returned. +// If the default scheme is modified, and no resolver is registered with +// the scheme, nil will be returned. +func Get(scheme string) Builder { + if b, ok := m[scheme]; ok { + return b + } + if b, ok := m[defaultScheme]; ok { + return b + } + return nil +} + +// SetDefaultScheme sets the default scheme that will be used. +// The default default scheme is "dns". +func SetDefaultScheme(scheme string) { + defaultScheme = scheme +} + +// AddressType indicates the address type returned by name resolution. +type AddressType uint8 + +const ( + // Backend indicates the address is for a backend server. + Backend AddressType = iota + // GRPCLB indicates the address is for a grpclb load balancer. + GRPCLB +) + +// Address represents a server the client connects to. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + // Type is the type of this address. + Type AddressType + // ServerName is the name of this address. + // It's the name of the grpc load balancer, which will be used for authentication. + ServerName string + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + Metadata interface{} +} + +// BuildOption includes additional information for the builder to create +// the resolver. +type BuildOption struct { +} + +// ClientConn contains the callbacks for resolver to notify any updates +// to the gRPC ClientConn. +type ClientConn interface { + // NewAddress is called by resolver to notify ClientConn a new list + // of resolved addresses. + // The address list should be the complete list of resolved addresses. + NewAddress(addresses []Address) + // NewServiceConfig is called by resolver to notify ClientConn a new + // service config. The service config should be provided as a json string. + NewServiceConfig(serviceConfig string) +} + +// Target represents a target for gRPC, as specified in: +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +type Target struct { + Scheme string + Authority string + Endpoint string +} + +// Builder creates a resolver that will be used to watch name resolution updates. +type Builder interface { + // Build creates a new resolver for the given target. + // + // gRPC dial calls Build synchronously, and fails if the returned error is + // not nil. + Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error) + // Scheme returns the scheme supported by this resolver. + // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + Scheme() string +} + +// ResolveNowOption includes additional information for ResolveNow. +type ResolveNowOption struct{} + +// Resolver watches for the updates on the specified target. +// Updates include address updates and service config updates. +type Resolver interface { + // ResolveNow will be called by gRPC to try to resolve the target name again. + // It's just a hint, resolver can ignore this if it's not necessary. + ResolveNow(ResolveNowOption) + // Close closes the resolver. + Close() +} + +// UnregisterForTesting removes the resolver builder with the given scheme from the +// resolver map. +// This function is for testing only. +func UnregisterForTesting(scheme string) { + delete(m, scheme) +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go new file mode 100644 index 00000000000..c07e174a849 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -0,0 +1,144 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "strings" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConnection interface. +type ccResolverWrapper struct { + cc *ClientConn + resolver resolver.Resolver + addrCh chan []resolver.Address + scCh chan string + done chan struct{} +} + +// split2 returns the values from strings.SplitN(s, sep, 2). +// If sep is not found, it returns ("", s, false) instead. +func split2(s, sep string) (string, string, bool) { + spl := strings.SplitN(s, sep, 2) + if len(spl) < 2 { + return "", "", false + } + return spl[0], spl[1], true +} + +// parseTarget splits target into a struct containing scheme, authority and +// endpoint. +func parseTarget(target string) (ret resolver.Target) { + var ok bool + ret.Scheme, ret.Endpoint, ok = split2(target, "://") + if !ok { + return resolver.Target{Endpoint: target} + } + ret.Authority, ret.Endpoint, _ = split2(ret.Endpoint, "/") + return ret +} + +// newCCResolverWrapper parses cc.target for scheme and gets the resolver +// builder for this scheme. It then builds the resolver and starts the +// monitoring goroutine for it. +func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) { + grpclog.Infof("dialing to target with scheme: %q", cc.parsedTarget.Scheme) + + rb := resolver.Get(cc.parsedTarget.Scheme) + if rb == nil { + return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme) + } + + ccr := &ccResolverWrapper{ + cc: cc, + addrCh: make(chan []resolver.Address, 1), + scCh: make(chan string, 1), + done: make(chan struct{}), + } + + var err error + ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{}) + if err != nil { + return nil, err + } + go ccr.watcher() + return ccr, nil +} + +// watcher processes address updates and service config updates sequencially. +// Otherwise, we need to resolve possible races between address and service +// config (e.g. they specify different balancer types). +func (ccr *ccResolverWrapper) watcher() { + for { + select { + case <-ccr.done: + return + default: + } + + select { + case addrs := <-ccr.addrCh: + select { + case <-ccr.done: + return + default: + } + grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs) + ccr.cc.handleResolvedAddrs(addrs, nil) + case sc := <-ccr.scCh: + select { + case <-ccr.done: + return + default: + } + grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) + ccr.cc.handleServiceConfig(sc) + case <-ccr.done: + return + } + } +} + +func (ccr *ccResolverWrapper) close() { + ccr.resolver.Close() + close(ccr.done) +} + +// NewAddress is called by the resolver implemenetion to send addresses to gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + select { + case <-ccr.addrCh: + default: + } + ccr.addrCh <- addrs +} + +// NewServiceConfig is called by the resolver implemenetion to send service +// configs to gPRC. +func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + select { + case <-ccr.scCh: + default: + } + ccr.scCh <- sc +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go new file mode 100644 index 00000000000..e006b7e63cf --- /dev/null +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -0,0 +1,459 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "compress/gzip" + "encoding/binary" + "io" + "io/ioutil" + "math" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" +) + +// Compressor defines the interface gRPC uses to compress a message. +type Compressor interface { + // Do compresses p into w. + Do(w io.Writer, p []byte) error + // Type returns the compression algorithm the Compressor uses. + Type() string +} + +type gzipCompressor struct { + pool sync.Pool +} + +// NewGZIPCompressor creates a Compressor based on GZIP. +func NewGZIPCompressor() Compressor { + return &gzipCompressor{ + pool: sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(ioutil.Discard) + }, + }, + } +} + +func (c *gzipCompressor) Do(w io.Writer, p []byte) error { + z := c.pool.Get().(*gzip.Writer) + defer c.pool.Put(z) + z.Reset(w) + if _, err := z.Write(p); err != nil { + return err + } + return z.Close() +} + +func (c *gzipCompressor) Type() string { + return "gzip" +} + +// Decompressor defines the interface gRPC uses to decompress a message. +type Decompressor interface { + // Do reads the data from r and uncompress them. + Do(r io.Reader) ([]byte, error) + // Type returns the compression algorithm the Decompressor uses. + Type() string +} + +type gzipDecompressor struct { + pool sync.Pool +} + +// NewGZIPDecompressor creates a Decompressor based on GZIP. +func NewGZIPDecompressor() Decompressor { + return &gzipDecompressor{} +} + +func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { + var z *gzip.Reader + switch maybeZ := d.pool.Get().(type) { + case nil: + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + z = newZ + case *gzip.Reader: + z = maybeZ + if err := z.Reset(r); err != nil { + d.pool.Put(z) + return nil, err + } + } + + defer func() { + z.Close() + d.pool.Put(z) + }() + return ioutil.ReadAll(z) +} + +func (d *gzipDecompressor) Type() string { + return "gzip" +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { + failFast bool + headerMD metadata.MD + trailerMD metadata.MD + peer *peer.Peer + traceInfo traceInfo // in trace.go + maxReceiveMessageSize *int + maxSendMessageSize *int + creds credentials.PerRPCCredentials +} + +func defaultCallInfo() *callInfo { + return &callInfo{failFast: true} +} + +// CallOption configures a Call before it starts or extracts information from +// a Call after it completes. +type CallOption interface { + // before is called before the call is sent to any server. If before + // returns a non-nil error, the RPC fails with that error. + before(*callInfo) error + + // after is called after the call has completed. after cannot return an + // error, so any failures should be reported via output parameters. + after(*callInfo) +} + +// EmptyCallOption does not alter the Call configuration. +// It can be embedded in another structure to carry satellite data for use +// by interceptors. +type EmptyCallOption struct{} + +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo) {} + +type beforeCall func(c *callInfo) error + +func (o beforeCall) before(c *callInfo) error { return o(c) } +func (o beforeCall) after(c *callInfo) {} + +type afterCall func(c *callInfo) + +func (o afterCall) before(c *callInfo) error { return nil } +func (o afterCall) after(c *callInfo) { o(c) } + +// Header returns a CallOptions that retrieves the header metadata +// for a unary RPC. +func Header(md *metadata.MD) CallOption { + return afterCall(func(c *callInfo) { + *md = c.headerMD + }) +} + +// Trailer returns a CallOptions that retrieves the trailer metadata +// for a unary RPC. +func Trailer(md *metadata.MD) CallOption { + return afterCall(func(c *callInfo) { + *md = c.trailerMD + }) +} + +// Peer returns a CallOption that retrieves peer information for a +// unary RPC. +func Peer(peer *peer.Peer) CallOption { + return afterCall(func(c *callInfo) { + if c.peer != nil { + *peer = *c.peer + } + }) +} + +// FailFast configures the action to take when an RPC is attempted on broken +// connections or unreachable servers. If failFast is true, the RPC will fail +// immediately. Otherwise, the RPC client will block the call until a +// connection is available (or the call is canceled or times out). Please refer +// to https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// The default behavior of RPCs is to fail fast. +func FailFast(failFast bool) CallOption { + return beforeCall(func(c *callInfo) error { + c.failFast = failFast + return nil + }) +} + +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive. +func MaxCallRecvMsgSize(s int) CallOption { + return beforeCall(func(o *callInfo) error { + o.maxReceiveMessageSize = &s + return nil + }) +} + +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send. +func MaxCallSendMsgSize(s int) CallOption { + return beforeCall(func(o *callInfo) error { + o.maxSendMessageSize = &s + return nil + }) +} + +// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials +// for a call. +func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { + return beforeCall(func(c *callInfo) error { + c.creds = creds + return nil + }) +} + +// The format of the payload: compressed or not? +type payloadFormat uint8 + +const ( + compressionNone payloadFormat = iota // no compression + compressionMade +) + +// parser reads complete gRPC messages from the underlying reader. +type parser struct { + // r is the underlying reader. + // See the comment on recvMsg for the permissible + // error types. + r io.Reader + + // The header of a gRPC message. Find more detail + // at https://grpc.io/docs/guides/wire.html. + header [5]byte +} + +// recvMsg reads a complete gRPC message from the stream. +// +// It returns the message and its payload (compression/encoding) +// format. The caller owns the returned msg memory. +// +// If there is an error, possible values are: +// * io.EOF, when no messages remain +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * of type transport.StreamError +// No other error values or types must be returned, which also means +// that the underlying io.Reader must not return an incompatible +// error. +func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { + if _, err := p.r.Read(p.header[:]); err != nil { + return 0, nil, err + } + + pf = payloadFormat(p.header[0]) + length := binary.BigEndian.Uint32(p.header[1:]) + + if length == 0 { + return pf, nil, nil + } + if length > uint32(maxReceiveMessageSize) { + return 0, nil, Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) + } + // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead + // of making it for each message: + msg = make([]byte, int(length)) + if _, err := p.r.Read(msg); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, nil, err + } + return pf, msg, nil +} + +// encode serializes msg and returns a buffer of message header and a buffer of msg. +// If msg is nil, it generates the message header and an empty msg buffer. +func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayload *stats.OutPayload) ([]byte, []byte, error) { + var b []byte + const ( + payloadLen = 1 + sizeLen = 4 + ) + + if msg != nil { + var err error + b, err = c.Marshal(msg) + if err != nil { + return nil, nil, Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) + } + if outPayload != nil { + outPayload.Payload = msg + // TODO truncate large payload. + outPayload.Data = b + outPayload.Length = len(b) + } + if cp != nil { + if err := cp.Do(cbuf, b); err != nil { + return nil, nil, Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) + } + b = cbuf.Bytes() + } + } + + if uint(len(b)) > math.MaxUint32 { + return nil, nil, Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + } + + bufHeader := make([]byte, payloadLen+sizeLen) + if cp == nil { + bufHeader[0] = byte(compressionNone) + } else { + bufHeader[0] = byte(compressionMade) + } + // Write length of b into buf + binary.BigEndian.PutUint32(bufHeader[payloadLen:], uint32(len(b))) + if outPayload != nil { + outPayload.WireLength = payloadLen + sizeLen + len(b) + } + return bufHeader, b, nil +} + +func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error { + switch pf { + case compressionNone: + case compressionMade: + if dc == nil || recvCompress != dc.Type() { + return Errorf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + default: + return Errorf(codes.Internal, "grpc: received unexpected payload format %d", pf) + } + return nil +} + +func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload) error { + pf, d, err := p.recvMsg(maxReceiveMessageSize) + if err != nil { + return err + } + if inPayload != nil { + inPayload.WireLength = len(d) + } + if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil { + return err + } + if pf == compressionMade { + d, err = dc.Do(bytes.NewReader(d)) + if err != nil { + return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + } + if len(d) > maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize) + } + if err := c.Unmarshal(d, m); err != nil { + return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + } + if inPayload != nil { + inPayload.RecvTime = time.Now() + inPayload.Payload = m + // TODO truncate large payload. + inPayload.Data = d + inPayload.Length = len(d) + } + return nil +} + +type rpcInfo struct { + failfast bool + bytesSent bool + bytesReceived bool +} + +type rpcInfoContextKey struct{} + +func newContextWithRPCInfo(ctx context.Context, failfast bool) context.Context { + return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{failfast: failfast}) +} + +func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { + s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo) + return +} + +func updateRPCInfoInContext(ctx context.Context, s rpcInfo) { + if ss, ok := rpcInfoFromContext(ctx); ok { + ss.bytesReceived = s.bytesReceived + ss.bytesSent = s.bytesSent + } + return +} + +// Code returns the error code for err if it was produced by the rpc system. +// Otherwise, it returns codes.Unknown. +// +// Deprecated; use status.FromError and Code method instead. +func Code(err error) codes.Code { + if s, ok := status.FromError(err); ok { + return s.Code() + } + return codes.Unknown +} + +// ErrorDesc returns the error description of err if it was produced by the rpc system. +// Otherwise, it returns err.Error() or empty string when err is nil. +// +// Deprecated; use status.FromError and Message method instead. +func ErrorDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} + +// Errorf returns an error containing an error code and a description; +// Errorf returns nil if c is OK. +// +// Deprecated; use status.Errorf instead. +func Errorf(c codes.Code, format string, a ...interface{}) error { + return status.Errorf(c, format, a...) +} + +// The SupportPackageIsVersion variables are referenced from generated protocol +// buffer files to ensure compatibility with the gRPC version used. The latest +// support package version is 5. +// +// Older versions are kept for compatibility. They may be removed if +// compatibility cannot be maintained. +// +// These constants should not be referenced from any other code. +const ( + SupportPackageIsVersion3 = true + SupportPackageIsVersion4 = true + SupportPackageIsVersion5 = true +) + +// Version is the current grpc version. +const Version = "1.8.0-dev" + +const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go new file mode 100644 index 00000000000..def301a19a3 --- /dev/null +++ b/vendor/google.golang.org/grpc/server.go @@ -0,0 +1,1210 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "reflect" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/http2" + "golang.org/x/net/trace" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" + "google.golang.org/grpc/transport" +) + +const ( + defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultServerMaxSendMessageSize = math.MaxInt32 +) + +type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) + +// MethodDesc represents an RPC service's method specification. +type MethodDesc struct { + MethodName string + Handler methodHandler +} + +// ServiceDesc represents an RPC service's specification. +type ServiceDesc struct { + ServiceName string + // The pointer to the service interface. Used to check whether the user + // provided implementation satisfies the interface requirements. + HandlerType interface{} + Methods []MethodDesc + Streams []StreamDesc + Metadata interface{} +} + +// service consists of the information of the server serving this service and +// the methods in this service. +type service struct { + server interface{} // the server for service methods + md map[string]*MethodDesc + sd map[string]*StreamDesc + mdata interface{} +} + +// Server is a gRPC server to serve RPC requests. +type Server struct { + opts options + + mu sync.Mutex // guards following + lis map[net.Listener]bool + conns map[io.Closer]bool + serve bool + drain bool + ctx context.Context + cancel context.CancelFunc + // A CondVar to let GracefulStop() blocks until all the pending RPCs are finished + // and all the transport goes away. + cv *sync.Cond + m map[string]*service // service name -> service info + events trace.EventLog + + quit chan struct{} + done chan struct{} + quitOnce sync.Once + doneOnce sync.Once +} + +type options struct { + creds credentials.TransportCredentials + codec Codec + cp Compressor + dc Decompressor + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor + inTapHandle tap.ServerInHandle + statsHandler stats.Handler + maxConcurrentStreams uint32 + maxReceiveMessageSize int + maxSendMessageSize int + useHandlerImpl bool // use http.Handler-based server + unknownStreamDesc *StreamDesc + keepaliveParams keepalive.ServerParameters + keepalivePolicy keepalive.EnforcementPolicy + initialWindowSize int32 + initialConnWindowSize int32 + writeBufferSize int + readBufferSize int +} + +var defaultServerOptions = options{ + maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, + maxSendMessageSize: defaultServerMaxSendMessageSize, +} + +// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. +type ServerOption func(*options) + +// WriteBufferSize lets you set the size of write buffer, this determines how much data can be batched +// before doing a write on the wire. +func WriteBufferSize(s int) ServerOption { + return func(o *options) { + o.writeBufferSize = s + } +} + +// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for one read syscall. +func ReadBufferSize(s int) ServerOption { + return func(o *options) { + o.readBufferSize = s + } +} + +// InitialWindowSize returns a ServerOption that sets window size for stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialWindowSize(s int32) ServerOption { + return func(o *options) { + o.initialWindowSize = s + } +} + +// InitialConnWindowSize returns a ServerOption that sets window size for a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialConnWindowSize(s int32) ServerOption { + return func(o *options) { + o.initialConnWindowSize = s + } +} + +// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. +func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { + return func(o *options) { + o.keepaliveParams = kp + } +} + +// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. +func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { + return func(o *options) { + o.keepalivePolicy = kep + } +} + +// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. +func CustomCodec(codec Codec) ServerOption { + return func(o *options) { + o.codec = codec + } +} + +// RPCCompressor returns a ServerOption that sets a compressor for outbound messages. +func RPCCompressor(cp Compressor) ServerOption { + return func(o *options) { + o.cp = cp + } +} + +// RPCDecompressor returns a ServerOption that sets a decompressor for inbound messages. +func RPCDecompressor(dc Decompressor) ServerOption { + return func(o *options) { + o.dc = dc + } +} + +// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default limit. Deprecated: use MaxRecvMsgSize instead. +func MaxMsgSize(m int) ServerOption { + return MaxRecvMsgSize(m) +} + +// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default 4MB. +func MaxRecvMsgSize(m int) ServerOption { + return func(o *options) { + o.maxReceiveMessageSize = m + } +} + +// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. +// If this is not set, gRPC uses the default 4MB. +func MaxSendMsgSize(m int) ServerOption { + return func(o *options) { + o.maxSendMessageSize = m + } +} + +// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number +// of concurrent streams to each ServerTransport. +func MaxConcurrentStreams(n uint32) ServerOption { + return func(o *options) { + o.maxConcurrentStreams = n + } +} + +// Creds returns a ServerOption that sets credentials for server connections. +func Creds(c credentials.TransportCredentials) ServerOption { + return func(o *options) { + o.creds = c + } +} + +// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the +// server. Only one unary interceptor can be installed. The construction of multiple +// interceptors (e.g., chaining) can be implemented at the caller. +func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { + return func(o *options) { + if o.unaryInt != nil { + panic("The unary server interceptor was already set and may not be reset.") + } + o.unaryInt = i + } +} + +// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the +// server. Only one stream interceptor can be installed. +func StreamInterceptor(i StreamServerInterceptor) ServerOption { + return func(o *options) { + if o.streamInt != nil { + panic("The stream server interceptor was already set and may not be reset.") + } + o.streamInt = i + } +} + +// InTapHandle returns a ServerOption that sets the tap handle for all the server +// transport to be created. Only one can be installed. +func InTapHandle(h tap.ServerInHandle) ServerOption { + return func(o *options) { + if o.inTapHandle != nil { + panic("The tap handle was already set and may not be reset.") + } + o.inTapHandle = h + } +} + +// StatsHandler returns a ServerOption that sets the stats handler for the server. +func StatsHandler(h stats.Handler) ServerOption { + return func(o *options) { + o.statsHandler = h + } +} + +// UnknownServiceHandler returns a ServerOption that allows for adding a custom +// unknown service handler. The provided method is a bidi-streaming RPC service +// handler that will be invoked instead of returning the "unimplemented" gRPC +// error whenever a request is received for an unregistered service or method. +// The handling function has full access to the Context of the request and the +// stream, and the invocation bypasses interceptors. +func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { + return func(o *options) { + o.unknownStreamDesc = &StreamDesc{ + StreamName: "unknown_service_handler", + Handler: streamHandler, + // We need to assume that the users of the streamHandler will want to use both. + ClientStreams: true, + ServerStreams: true, + } + } +} + +// NewServer creates a gRPC server which has no service registered and has not +// started to accept requests yet. +func NewServer(opt ...ServerOption) *Server { + opts := defaultServerOptions + for _, o := range opt { + o(&opts) + } + if opts.codec == nil { + // Set the default codec. + opts.codec = protoCodec{} + } + s := &Server{ + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[io.Closer]bool), + m: make(map[string]*service), + quit: make(chan struct{}), + done: make(chan struct{}), + } + s.cv = sync.NewCond(&s.mu) + s.ctx, s.cancel = context.WithCancel(context.Background()) + if EnableTracing { + _, file, line, _ := runtime.Caller(1) + s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + } + return s +} + +// printf records an event in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) printf(format string, a ...interface{}) { + if s.events != nil { + s.events.Printf(format, a...) + } +} + +// errorf records an error in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) errorf(format string, a ...interface{}) { + if s.events != nil { + s.events.Errorf(format, a...) + } +} + +// RegisterService registers a service and its implementation to the gRPC +// server. It is called from the IDL generated code. This must be called before +// invoking Serve. +func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } + s.register(sd, ss) +} + +func (s *Server) register(sd *ServiceDesc, ss interface{}) { + s.mu.Lock() + defer s.mu.Unlock() + s.printf("RegisterService(%q)", sd.ServiceName) + if s.serve { + grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + } + if _, ok := s.m[sd.ServiceName]; ok { + grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + } + srv := &service{ + server: ss, + md: make(map[string]*MethodDesc), + sd: make(map[string]*StreamDesc), + mdata: sd.Metadata, + } + for i := range sd.Methods { + d := &sd.Methods[i] + srv.md[d.MethodName] = d + } + for i := range sd.Streams { + d := &sd.Streams[i] + srv.sd[d.StreamName] = d + } + s.m[sd.ServiceName] = srv +} + +// MethodInfo contains the information of an RPC including its method name and type. +type MethodInfo struct { + // Name is the method name only, without the service name or package name. + Name string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. +type ServiceInfo struct { + Methods []MethodInfo + // Metadata is the metadata specified in ServiceDesc when registering service. + Metadata interface{} +} + +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of .. +func (s *Server) GetServiceInfo() map[string]ServiceInfo { + ret := make(map[string]ServiceInfo) + for n, srv := range s.m { + methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd)) + for m := range srv.md { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: false, + IsServerStream: false, + }) + } + for m, d := range srv.sd { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: d.ClientStreams, + IsServerStream: d.ServerStreams, + }) + } + + ret[n] = ServiceInfo{ + Methods: methods, + Metadata: srv.mdata, + } + } + return ret +} + +// ErrServerStopped indicates that the operation is now illegal because of +// the server being stopped. +var ErrServerStopped = errors.New("grpc: the server has been stopped") + +func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if s.opts.creds == nil { + return rawConn, nil, nil + } + return s.opts.creds.ServerHandshake(rawConn) +} + +// Serve accepts incoming connections on the listener lis, creating a new +// ServerTransport and service goroutine for each. The service goroutines +// read gRPC requests and then call the registered handlers to reply to them. +// Serve returns when lis.Accept fails with fatal errors. lis will be closed when +// this method returns. +// Serve always returns non-nil error. +func (s *Server) Serve(lis net.Listener) error { + s.mu.Lock() + s.printf("serving") + s.serve = true + if s.lis == nil { + s.mu.Unlock() + lis.Close() + return ErrServerStopped + } + s.lis[lis] = true + s.mu.Unlock() + defer func() { + s.mu.Lock() + if s.lis != nil && s.lis[lis] { + lis.Close() + delete(s.lis, lis) + } + s.mu.Unlock() + }() + + var tempDelay time.Duration // how long to sleep on accept failure + + for { + rawConn, err := lis.Accept() + if err != nil { + if ne, ok := err.(interface { + Temporary() bool + }); ok && ne.Temporary() { + if tempDelay == 0 { + tempDelay = 5 * time.Millisecond + } else { + tempDelay *= 2 + } + if max := 1 * time.Second; tempDelay > max { + tempDelay = max + } + s.mu.Lock() + s.printf("Accept error: %v; retrying in %v", err, tempDelay) + s.mu.Unlock() + timer := time.NewTimer(tempDelay) + select { + case <-timer.C: + case <-s.ctx.Done(): + } + timer.Stop() + continue + } + s.mu.Lock() + s.printf("done serving; Accept = %v", err) + s.mu.Unlock() + + // If Stop or GracefulStop is called, block until they are done and return nil + select { + case <-s.quit: + <-s.done + return nil + default: + } + return err + } + tempDelay = 0 + // Start a new goroutine to deal with rawConn + // so we don't stall this Accept loop goroutine. + go s.handleRawConn(rawConn) + } +} + +// handleRawConn is run in its own goroutine and handles a just-accepted +// connection that has not had any I/O performed on it yet. +func (s *Server) handleRawConn(rawConn net.Conn) { + conn, authInfo, err := s.useTransportAuthenticator(rawConn) + if err != nil { + s.mu.Lock() + s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + s.mu.Unlock() + grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + // If serverHandShake returns ErrConnDispatched, keep rawConn open. + if err != credentials.ErrConnDispatched { + rawConn.Close() + } + return + } + + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + conn.Close() + return + } + s.mu.Unlock() + + if s.opts.useHandlerImpl { + s.serveUsingHandler(conn) + } else { + s.serveHTTP2Transport(conn, authInfo) + } +} + +// serveHTTP2Transport sets up a http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go) and +// serves streams on it. +// This is run in its own goroutine (it does network I/O in +// transport.NewServerTransport). +func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) { + config := &transport.ServerConfig{ + MaxStreams: s.opts.maxConcurrentStreams, + AuthInfo: authInfo, + InTapHandle: s.opts.inTapHandle, + StatsHandler: s.opts.statsHandler, + KeepaliveParams: s.opts.keepaliveParams, + KeepalivePolicy: s.opts.keepalivePolicy, + InitialWindowSize: s.opts.initialWindowSize, + InitialConnWindowSize: s.opts.initialConnWindowSize, + WriteBufferSize: s.opts.writeBufferSize, + ReadBufferSize: s.opts.readBufferSize, + } + st, err := transport.NewServerTransport("http2", c, config) + if err != nil { + s.mu.Lock() + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() + c.Close() + grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err) + return + } + if !s.addConn(st) { + st.Close() + return + } + s.serveStreams(st) +} + +func (s *Server) serveStreams(st transport.ServerTransport) { + defer s.removeConn(st) + defer st.Close() + var wg sync.WaitGroup + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() + }, func(ctx context.Context, method string) context.Context { + if !EnableTracing { + return ctx + } + tr := trace.New("grpc.Recv."+methodFamily(method), method) + return trace.NewContext(ctx, tr) + }) + wg.Wait() +} + +var _ http.Handler = (*Server)(nil) + +// serveUsingHandler is called from handleRawConn when s is configured +// to handle requests via the http.Handler interface. It sets up a +// net/http.Server to handle the just-accepted conn. The http.Server +// is configured to route all incoming requests (all HTTP/2 streams) +// to ServeHTTP, which creates a new ServerTransport for each stream. +// serveUsingHandler blocks until conn closes. +// +// This codepath is only used when Server.TestingUseHandlerImpl has +// been configured. This lets the end2end tests exercise the ServeHTTP +// method as one of the environment types. +// +// conn is the *tls.Conn that's already been authenticated. +func (s *Server) serveUsingHandler(conn net.Conn) { + if !s.addConn(conn) { + conn.Close() + return + } + defer s.removeConn(conn) + h2s := &http2.Server{ + MaxConcurrentStreams: s.opts.maxConcurrentStreams, + } + h2s.ServeConn(conn, &http2.ServeConnOpts{ + Handler: s, + }) +} + +// ServeHTTP implements the Go standard library's http.Handler +// interface by responding to the gRPC request r, by looking up +// the requested gRPC method in the gRPC server s. +// +// The provided HTTP request must have arrived on an HTTP/2 +// connection. When using the Go standard library's server, +// practically this means that the Request must also have arrived +// over TLS. +// +// To share one port (such as 443 for https) between gRPC and an +// existing http.Handler, use a root http.Handler such as: +// +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } +// +// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally +// separate from grpc-go's HTTP/2 server. Performance and features may vary +// between the two paths. ServeHTTP does not support some gRPC features +// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL +// and subject to change. +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + st, err := transport.NewServerHandlerTransport(w, r) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if !s.addConn(st) { + st.Close() + return + } + defer s.removeConn(st) + s.serveStreams(st) +} + +// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +// If tracing is not enabled, it returns nil. +func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { + tr, ok := trace.FromContext(stream.Context()) + if !ok { + return nil + } + + trInfo = &traceInfo{ + tr: tr, + } + trInfo.firstLine.client = false + trInfo.firstLine.remoteAddr = st.RemoteAddr() + + if dl, ok := stream.Context().Deadline(); ok { + trInfo.firstLine.deadline = dl.Sub(time.Now()) + } + return trInfo +} + +func (s *Server) addConn(c io.Closer) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil || s.drain { + return false + } + s.conns[c] = true + return true +} + +func (s *Server) removeConn(c io.Closer) { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns != nil { + delete(s.conns, c) + s.cv.Broadcast() + } +} + +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error { + var ( + cbuf *bytes.Buffer + outPayload *stats.OutPayload + ) + if cp != nil { + cbuf = new(bytes.Buffer) + } + if s.opts.statsHandler != nil { + outPayload = &stats.OutPayload{} + } + hdr, data, err := encode(s.opts.codec, msg, cp, cbuf, outPayload) + if err != nil { + grpclog.Errorln("grpc: server failed to encode response: ", err) + return err + } + if len(data) > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), s.opts.maxSendMessageSize) + } + err = t.Write(stream, hdr, data, opts) + if err == nil && outPayload != nil { + outPayload.SentTime = time.Now() + s.opts.statsHandler.HandleRPC(stream.Context(), outPayload) + } + return err +} + +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { + sh := s.opts.statsHandler + if sh != nil { + begin := &stats.Begin{ + BeginTime: time.Now(), + } + sh.HandleRPC(stream.Context(), begin) + defer func() { + end := &stats.End{ + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + }() + } + if trInfo != nil { + defer trInfo.tr.Finish() + trInfo.firstLine.client = false + trInfo.tr.LazyLog(&trInfo.firstLine, false) + defer func() { + if err != nil && err != io.EOF { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + }() + } + if s.opts.cp != nil { + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + stream.SetSendCompress(s.opts.cp.Type()) + } + p := &parser{r: stream} + pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize) + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if err == io.ErrUnexpectedEOF { + err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + if err != nil { + if st, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, st); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st)) + } + } + return err + } + + if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { + if st, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, st); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + return err + } + if e := t.WriteStatus(stream, status.New(codes.Internal, err.Error())); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + + // TODO checkRecvPayload always return RPC error. Add a return here if necessary. + } + var inPayload *stats.InPayload + if sh != nil { + inPayload = &stats.InPayload{ + RecvTime: time.Now(), + } + } + df := func(v interface{}) error { + if inPayload != nil { + inPayload.WireLength = len(req) + } + if pf == compressionMade { + var err error + req, err = s.opts.dc.Do(bytes.NewReader(req)) + if err != nil { + return Errorf(codes.Internal, err.Error()) + } + } + if len(req) > s.opts.maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with + // java implementation. + return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize) + } + if err := s.opts.codec.Unmarshal(req, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + if inPayload != nil { + inPayload.Payload = v + inPayload.Data = req + inPayload.Length = len(req) + sh.HandleRPC(stream.Context(), inPayload) + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert appErr if it is not a grpc status error. + appErr = status.Error(convertCode(appErr), appErr.Error()) + appStatus, _ = status.FromError(appErr) + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + trInfo.tr.SetError() + } + if e := t.WriteStatus(stream, appStatus); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + } + return appErr + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{ + Last: true, + Delay: false, + } + if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if s, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, s); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) + } + } + return err + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + // TODO: Should we be logging if writing status failed here, like above? + // Should the logging be in WriteStatus? Should we ignore the WriteStatus + // error or allow the stats handler to see it? + return t.WriteStatus(stream, status.New(codes.OK, "")) +} + +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { + sh := s.opts.statsHandler + if sh != nil { + begin := &stats.Begin{ + BeginTime: time.Now(), + } + sh.HandleRPC(stream.Context(), begin) + defer func() { + end := &stats.End{ + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + }() + } + if s.opts.cp != nil { + stream.SetSendCompress(s.opts.cp.Type()) + } + ss := &serverStream{ + t: t, + s: stream, + p: &parser{r: stream}, + codec: s.opts.codec, + cp: s.opts.cp, + dc: s.opts.dc, + maxReceiveMessageSize: s.opts.maxReceiveMessageSize, + maxSendMessageSize: s.opts.maxSendMessageSize, + trInfo: trInfo, + statsHandler: sh, + } + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + defer func() { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() + ss.trInfo.tr = nil + ss.mu.Unlock() + }() + } + var appErr error + var server interface{} + if srv != nil { + server = srv.server + } + if s.opts.streamInt == nil { + appErr = sd.Handler(server, ss) + } else { + info := &StreamServerInfo{ + FullMethod: stream.Method(), + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + appErr = s.opts.streamInt(server, ss, info, sd.Handler) + } + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + switch err := appErr.(type) { + case transport.StreamError: + appStatus = status.New(err.Code, err.Desc) + default: + appStatus = status.New(convertCode(appErr), appErr.Error()) + } + appErr = appStatus.Err() + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + ss.trInfo.tr.SetError() + ss.mu.Unlock() + } + t.WriteStatus(ss.s, appStatus) + // TODO: Should we log an error from WriteStatus here and below? + return appErr + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer("OK"), false) + ss.mu.Unlock() + } + return t.WriteStatus(ss.s, status.New(codes.OK, "")) + +} + +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { + sm := stream.Method() + if sm != "" && sm[0] == '/' { + sm = sm[1:] + } + pos := strings.LastIndex(sm, "/") + if pos == -1 { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) + trInfo.tr.SetError() + } + errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) + if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } + return + } + service := sm[:pos] + method := sm[pos+1:] + srv, ok := s.m[service] + if !ok { + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + return + } + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true) + trInfo.tr.SetError() + } + errDesc := fmt.Sprintf("unknown service %v", service) + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } + return + } + // Unary RPC or Streaming RPC? + if md, ok := srv.md[method]; ok { + s.processUnaryRPC(t, stream, srv, md, trInfo) + return + } + if sd, ok := srv.sd[method]; ok { + s.processStreamingRPC(t, stream, srv, sd, trInfo) + return + } + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true) + trInfo.tr.SetError() + } + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + return + } + errDesc := fmt.Sprintf("unknown method %v", method) + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } +} + +// Stop stops the gRPC server. It immediately closes all open +// connections and listeners. +// It cancels all active RPCs on the server side and the corresponding +// pending RPCs on the client side will get notified by connection +// errors. +func (s *Server) Stop() { + s.quitOnce.Do(func() { + close(s.quit) + }) + + defer func() { + s.doneOnce.Do(func() { + close(s.done) + }) + }() + + s.mu.Lock() + listeners := s.lis + s.lis = nil + st := s.conns + s.conns = nil + // interrupt GracefulStop if Stop and GracefulStop are called concurrently. + s.cv.Broadcast() + s.mu.Unlock() + + for lis := range listeners { + lis.Close() + } + for c := range st { + c.Close() + } + + s.mu.Lock() + s.cancel() + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.quitOnce.Do(func() { + close(s.quit) + }) + + defer func() { + s.doneOnce.Do(func() { + close(s.done) + }) + }() + + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil { + return + } + for lis := range s.lis { + lis.Close() + } + s.lis = nil + s.cancel() + if !s.drain { + for c := range s.conns { + c.(transport.ServerTransport).Drain() + } + s.drain = true + } + for len(s.conns) != 0 { + s.cv.Wait() + } + s.conns = nil + if s.events != nil { + s.events.Finish() + s.events = nil + } +} + +func init() { + internal.TestingCloseConns = func(arg interface{}) { + arg.(*Server).testingCloseConns() + } + internal.TestingUseHandlerImpl = func(arg interface{}) { + arg.(*Server).opts.useHandlerImpl = true + } +} + +// testingCloseConns closes all existing transports but keeps s.lis +// accepting new connections. +func (s *Server) testingCloseConns() { + s.mu.Lock() + for c := range s.conns { + c.Close() + delete(s.conns, c) + } + s.mu.Unlock() +} + +// SetHeader sets the header metadata. +// When called multiple times, all the provided metadata will be merged. +// All the metadata will be sent out when one of the following happens: +// - grpc.SendHeader() is called; +// - The first response is sent out; +// - An RPC status is sent out (error or success). +func SetHeader(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream, ok := transport.StreamFromContext(ctx) + if !ok { + return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetHeader(md) +} + +// SendHeader sends header metadata. It may be called at most once. +// The provided md and headers set by SetHeader() will be sent. +func SendHeader(ctx context.Context, md metadata.MD) error { + stream, ok := transport.StreamFromContext(ctx) + if !ok { + return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + t := stream.ServerTransport() + if t == nil { + grpclog.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream) + } + if err := t.WriteHeader(stream, md); err != nil { + return toRPCErr(err) + } + return nil +} + +// SetTrailer sets the trailer metadata that will be sent when an RPC returns. +// When called more than once, all the provided metadata will be merged. +func SetTrailer(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream, ok := transport.StreamFromContext(ctx) + if !ok { + return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetTrailer(md) +} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go new file mode 100644 index 00000000000..0631e7d2bb8 --- /dev/null +++ b/vendor/google.golang.org/grpc/service_config.go @@ -0,0 +1,185 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "encoding/json" + "time" + + "google.golang.org/grpc/grpclog" +) + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +// DEPRECATED: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type MethodConfig struct { + // WaitForReady indicates whether RPCs sent to this method should wait until + // the connection is ready by default (!failfast). The value specified via the + // gRPC client API will override the value set here. + WaitForReady *bool + // Timeout is the default timeout for RPCs sent to this method. The actual + // deadline used will be the minimum of the value specified here and the value + // set by the application via the gRPC client API. If either one is not set, + // then the other will be used. If neither is set, then the RPC has no deadline. + Timeout *time.Duration + // MaxReqSize is the maximum allowed payload size for an individual request in a + // stream (client->server) in bytes. The size which is measured is the serialized + // payload after per-message compression (but before stream compression) in bytes. + // The actual value used is the minimum of the value specified here and the value set + // by the application via the gRPC client API. If either one is not set, then the other + // will be used. If neither is set, then the built-in default is used. + MaxReqSize *int + // MaxRespSize is the maximum allowed payload size for an individual response in a + // stream (server->client) in bytes. + MaxRespSize *int +} + +// ServiceConfig is provided by the service provider and contains parameters for how +// clients that connect to the service should behave. +// DEPRECATED: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type ServiceConfig struct { + // LB is the load balancer the service providers recommends. The balancer specified + // via grpc.WithBalancer will override this. + LB *string + // Methods contains a map for the methods in this service. + // If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig. + // If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists. + // Otherwise, the method has no MethodConfig to use. + Methods map[string]MethodConfig +} + +func parseTimeout(t *string) (*time.Duration, error) { + if t == nil { + return nil, nil + } + d, err := time.ParseDuration(*t) + return &d, err +} + +type jsonName struct { + Service *string + Method *string +} + +func (j jsonName) generatePath() (string, bool) { + if j.Service == nil { + return "", false + } + res := "/" + *j.Service + "/" + if j.Method != nil { + res += *j.Method + } + return res, true +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonMC struct { + Name *[]jsonName + WaitForReady *bool + Timeout *string + MaxRequestMessageBytes *int + MaxResponseMessageBytes *int +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonSC struct { + LoadBalancingPolicy *string + MethodConfig *[]jsonMC +} + +func parseServiceConfig(js string) (ServiceConfig, error) { + var rsc jsonSC + err := json.Unmarshal([]byte(js), &rsc) + if err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return ServiceConfig{}, err + } + sc := ServiceConfig{ + LB: rsc.LoadBalancingPolicy, + Methods: make(map[string]MethodConfig), + } + if rsc.MethodConfig == nil { + return sc, nil + } + + for _, m := range *rsc.MethodConfig { + if m.Name == nil { + continue + } + d, err := parseTimeout(m.Timeout) + if err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return ServiceConfig{}, err + } + + mc := MethodConfig{ + WaitForReady: m.WaitForReady, + Timeout: d, + MaxReqSize: m.MaxRequestMessageBytes, + MaxRespSize: m.MaxResponseMessageBytes, + } + for _, n := range *m.Name { + if path, valid := n.generatePath(); valid { + sc.Methods[path] = mc + } + } + } + + return sc, nil +} + +func min(a, b *int) *int { + if *a < *b { + return a + } + return b +} + +func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { + if mcMax == nil && doptMax == nil { + return &defaultVal + } + if mcMax != nil && doptMax != nil { + return min(mcMax, doptMax) + } + if mcMax != nil { + return mcMax + } + return doptMax +} + +func newBool(b bool) *bool { + return &b +} + +func newInt(b int) *int { + return &b +} + +func newDuration(b time.Duration) *time.Duration { + return &b +} + +func newString(b string) *string { + return &b +} diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go new file mode 100644 index 00000000000..05b384c6931 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -0,0 +1,64 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "net" + + "golang.org/x/net/context" +) + +// ConnTagInfo defines the relevant information needed by connection context tagger. +type ConnTagInfo struct { + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// RPCTagInfo defines the relevant information needed by RPC context tagger. +type RPCTagInfo struct { + // FullMethodName is the RPC method in the format of /package.service/method. + FullMethodName string + // FailFast indicates if this RPC is failfast. + // This field is only valid on client side, it's always false on server side. + FailFast bool +} + +// Handler defines the interface for the related stats handling (e.g., RPCs, connections). +type Handler interface { + // TagRPC can attach some information to the given context. + // The context used for the rest lifetime of the RPC will be derived from + // the returned context. + TagRPC(context.Context, *RPCTagInfo) context.Context + // HandleRPC processes the RPC stats. + HandleRPC(context.Context, RPCStats) + + // TagConn can attach some information to the given context. + // The returned context will be used for stats handling. + // For conn stats handling, the context used in HandleConn for this + // connection will be derived from the context returned. + // For RPC stats handling, + // - On server side, the context used in HandleRPC for all RPCs on this + // connection will be derived from the context returned. + // - On client side, the context is not derived from the context returned. + TagConn(context.Context, *ConnTagInfo) context.Context + // HandleConn processes the Conn stats. + HandleConn(context.Context, ConnStats) +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go new file mode 100644 index 00000000000..d5aa2f793bf --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -0,0 +1,294 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto + +// Package stats is for collecting and reporting various network and RPC stats. +// This package is for monitoring purpose only. All fields are read-only. +// All APIs are experimental. +package stats // import "google.golang.org/grpc/stats" + +import ( + "net" + "time" + + "golang.org/x/net/context" +) + +// RPCStats contains stats information about RPCs. +type RPCStats interface { + isRPCStats() + // IsClient returns true if this RPCStats is from client side. + IsClient() bool +} + +// Begin contains stats when an RPC begins. +// FailFast is only valid if this Begin is from client side. +type Begin struct { + // Client is true if this Begin is from client side. + Client bool + // BeginTime is the time when the RPC begins. + BeginTime time.Time + // FailFast indicates if this RPC is failfast. + FailFast bool +} + +// IsClient indicates if the stats information is from client side. +func (s *Begin) IsClient() bool { return s.Client } + +func (s *Begin) isRPCStats() {} + +// InPayload contains the information for an incoming payload. +type InPayload struct { + // Client is true if this InPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // RecvTime is the time when the payload is received. + RecvTime time.Time +} + +// IsClient indicates if the stats information is from client side. +func (s *InPayload) IsClient() bool { return s.Client } + +func (s *InPayload) isRPCStats() {} + +// InHeader contains stats when a header is received. +type InHeader struct { + // Client is true if this InHeader is from client side. + Client bool + // WireLength is the wire length of header. + WireLength int + + // The following fields are valid only if Client is false. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr + // Compression is the compression algorithm used for the RPC. + Compression string +} + +// IsClient indicates if the stats information is from client side. +func (s *InHeader) IsClient() bool { return s.Client } + +func (s *InHeader) isRPCStats() {} + +// InTrailer contains stats when a trailer is received. +type InTrailer struct { + // Client is true if this InTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int +} + +// IsClient indicates if the stats information is from client side. +func (s *InTrailer) IsClient() bool { return s.Client } + +func (s *InTrailer) isRPCStats() {} + +// OutPayload contains the information for an outgoing payload. +type OutPayload struct { + // Client is true if this OutPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // SentTime is the time when the payload is sent. + SentTime time.Time +} + +// IsClient indicates if this stats information is from client side. +func (s *OutPayload) IsClient() bool { return s.Client } + +func (s *OutPayload) isRPCStats() {} + +// OutHeader contains stats when a header is sent. +type OutHeader struct { + // Client is true if this OutHeader is from client side. + Client bool + + // The following fields are valid only if Client is true. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr + // Compression is the compression algorithm used for the RPC. + Compression string +} + +// IsClient indicates if this stats information is from client side. +func (s *OutHeader) IsClient() bool { return s.Client } + +func (s *OutHeader) isRPCStats() {} + +// OutTrailer contains stats when a trailer is sent. +type OutTrailer struct { + // Client is true if this OutTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int +} + +// IsClient indicates if this stats information is from client side. +func (s *OutTrailer) IsClient() bool { return s.Client } + +func (s *OutTrailer) isRPCStats() {} + +// End contains stats when an RPC ends. +type End struct { + // Client is true if this End is from client side. + Client bool + // EndTime is the time when the RPC ends. + EndTime time.Time + // Error is the error the RPC ended with. It is an error generated from + // status.Status and can be converted back to status.Status using + // status.FromError if non-nil. + Error error +} + +// IsClient indicates if this is from client side. +func (s *End) IsClient() bool { return s.Client } + +func (s *End) isRPCStats() {} + +// ConnStats contains stats information about connections. +type ConnStats interface { + isConnStats() + // IsClient returns true if this ConnStats is from client side. + IsClient() bool +} + +// ConnBegin contains the stats of a connection when it is established. +type ConnBegin struct { + // Client is true if this ConnBegin is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnBegin) IsClient() bool { return s.Client } + +func (s *ConnBegin) isConnStats() {} + +// ConnEnd contains the stats of a connection when it ends. +type ConnEnd struct { + // Client is true if this ConnEnd is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnEnd) IsClient() bool { return s.Client } + +func (s *ConnEnd) isConnStats() {} + +type incomingTagsKey struct{} +type outgoingTagsKey struct{} + +// SetTags attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to +// SetTags will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTagsKey{}, b) +} + +// Tags returns the tags from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Tags(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTagsKey{}).([]byte) + return b +} + +// SetIncomingTags attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). +// +// This is intended for gRPC-internal use ONLY. +func SetIncomingTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTagsKey{}, b) +} + +// OutgoingTags returns the tags from the context for the outbound RPC. +// +// This is intended for gRPC-internal use ONLY. +func OutgoingTags(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTagsKey{}).([]byte) + return b +} + +type incomingTraceKey struct{} +type outgoingTraceKey struct{} + +// SetTrace attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to +// SetTrace will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTraceKey{}, b) +} + +// Trace returns the trace from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Trace(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTraceKey{}).([]byte) + return b +} + +// SetIncomingTrace attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). It is intended for +// gRPC-internal use. +func SetIncomingTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTraceKey{}, b) +} + +// OutgoingTrace returns the trace from the context for the outbound RPC. It is +// intended for gRPC-internal use. +func OutgoingTrace(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTraceKey{}).([]byte) + return b +} diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go new file mode 100644 index 00000000000..871dc4b31c7 --- /dev/null +++ b/vendor/google.golang.org/grpc/status/status.go @@ -0,0 +1,168 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "errors" + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" +) + +// statusError is an alias of a status proto. It implements error and Status, +// and a nil statusError should never be returned by this package. +type statusError spb.Status + +func (se *statusError) Error() string { + p := (*spb.Status)(se) + return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) +} + +func (se *statusError) status() *Status { + return &Status{s: (*spb.Status)(se)} +} + +// Status represents an RPC status code, message, and details. It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { + s *spb.Status +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { + if s == nil || s.s == nil { + return codes.OK + } + return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { + if s == nil || s.s == nil { + return "" + } + return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { + if s == nil { + return nil + } + return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is +// OK. +func (s *Status) Err() error { + if s.Code() == codes.OK { + return nil + } + return (*statusError)(s.s) +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// Error returns an error representing c and msg. If c is OK, returns nil. +func Error(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { + return Error(c, fmt.Sprintf(format, a...)) +} + +// ErrorProto returns an error representing s. If s.Code is OK, returns nil. +func ErrorProto(s *spb.Status) error { + return FromProto(s).Err() +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// FromError returns a Status representing err if it was produced from this +// package, otherwise it returns nil, false. +func FromError(err error) (s *Status, ok bool) { + if err == nil { + return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true + } + if s, ok := err.(*statusError); ok { + return s.status(), true + } + return nil, false +} + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + any, err := ptypes.MarshalAny(detail) + if err != nil { + return nil, err + } + p.Details = append(p.Details, any) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []interface{} { + if s == nil || s.s == nil { + return nil + } + details := make([]interface{}, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { + details = append(details, err) + continue + } + details = append(details, detail.Message) + } + return details +} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go new file mode 100644 index 00000000000..cfdb5bfbb9b --- /dev/null +++ b/vendor/google.golang.org/grpc/stream.go @@ -0,0 +1,665 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "errors" + "io" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" +) + +// StreamHandler defines the handler called by gRPC server to complete the +// execution of a streaming RPC. +type StreamHandler func(srv interface{}, stream ServerStream) error + +// StreamDesc represents a streaming RPC service's method specification. +type StreamDesc struct { + StreamName string + Handler StreamHandler + + // At least one of these is true. + ServerStreams bool + ClientStreams bool +} + +// Stream defines the common interface a client or server stream has to satisfy. +type Stream interface { + // Context returns the context for this stream. + Context() context.Context + // SendMsg blocks until it sends m, the stream is done or the stream + // breaks. + // On error, it aborts the stream and returns an RPC status on client + // side. On server side, it simply returns the error to the caller. + // SendMsg is called by generated code. Also Users can call SendMsg + // directly when it is really needed in their use cases. + // It's safe to have a goroutine calling SendMsg and another goroutine calling + // recvMsg on the same stream at the same time. + // But it is not safe to call SendMsg on the same stream in different goroutines. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message or the stream is + // done. On client side, it returns io.EOF when the stream is done. On + // any other error, it aborts the stream and returns an RPC status. On + // server side, it simply returns the error to the caller. + // It's safe to have a goroutine calling SendMsg and another goroutine calling + // recvMsg on the same stream at the same time. + // But it is not safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// ClientStream defines the interface a client stream has to satisfy. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. + CloseSend() error + // Stream.SendMsg() may return a non-nil error when something wrong happens sending + // the request. The returned error indicates the status of this sending, not the final + // status of the RPC. + // Always call Stream.RecvMsg() to get the final status if you care about the status of + // the RPC. + Stream +} + +// NewStream creates a new Stream for the client side. This is typically +// called by generated code. +func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + if cc.dopts.streamInt != nil { + return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) + } + return newClientStream(ctx, desc, cc, method, opts...) +} + +// NewClientStream creates a new Stream for the client side. This is typically +// called by generated code. +// +// DEPRECATED: Use ClientConn.NewStream instead. +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return cc.NewStream(ctx, desc, method, opts...) +} + +func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + var ( + t transport.ClientTransport + s *transport.Stream + done func(balancer.DoneInfo) + cancel context.CancelFunc + ) + c := defaultCallInfo() + mc := cc.GetMethodConfig(method) + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady + } + + if mc.Timeout != nil { + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + defer func() { + if err != nil { + cancel() + } + }() + } + + opts = append(cc.dopts.callOptions, opts...) + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + // If it's not client streaming, we should already have the request to be sent, + // so we don't flush the header. + // If it's client streaming, the user may never send a request or send it any + // time soon, so we ask the transport to flush the header. + Flush: desc.ClientStreams, + } + if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + } + if c.creds != nil { + callHdr.Creds = c.creds + } + var trInfo traceInfo + if EnableTracing { + trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) + trInfo.firstLine.client = true + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = deadline.Sub(time.Now()) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) + defer func() { + if err != nil { + // Need to call tr.finish() if error is returned. + // Because tr will not be returned to caller. + trInfo.tr.LazyPrintf("RPC: [%v]", err) + trInfo.tr.SetError() + trInfo.tr.Finish() + } + }() + } + ctx = newContextWithRPCInfo(ctx, c.failFast) + sh := cc.dopts.copts.StatsHandler + if sh != nil { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) + begin := &stats.Begin{ + Client: true, + BeginTime: time.Now(), + FailFast: c.failFast, + } + sh.HandleRPC(ctx, begin) + defer func() { + if err != nil { + // Only handle end stats if err != nil. + end := &stats.End{ + Client: true, + Error: err, + } + sh.HandleRPC(ctx, end) + } + }() + } + for { + t, done, err = cc.getTransport(ctx, c.failFast) + if err != nil { + // TODO(zhaoq): Probably revisit the error handling. + if _, ok := status.FromError(err); ok { + return nil, err + } + if err == errConnClosing || err == errConnUnavailable { + if c.failFast { + return nil, Errorf(codes.Unavailable, "%v", err) + } + continue + } + // All the other errors are treated as Internal errors. + return nil, Errorf(codes.Internal, "%v", err) + } + + s, err = t.NewStream(ctx, callHdr) + if err != nil { + if _, ok := err.(transport.ConnectionError); ok && done != nil { + // If error is connection error, transport was sending data on wire, + // and we are not sure if anything has been sent on wire. + // If error is not connection error, we are sure nothing has been sent. + updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false}) + } + if done != nil { + done(balancer.DoneInfo{Err: err}) + done = nil + } + if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { + continue + } + return nil, toRPCErr(err) + } + break + } + // Set callInfo.peer object from stream's context. + if peer, ok := peer.FromContext(s.Context()); ok { + c.peer = peer + } + cs := &clientStream{ + opts: opts, + c: c, + desc: desc, + codec: cc.dopts.codec, + cp: cc.dopts.cp, + dc: cc.dopts.dc, + cancel: cancel, + + done: done, + t: t, + s: s, + p: &parser{r: s}, + + tracing: EnableTracing, + trInfo: trInfo, + + statsCtx: ctx, + statsHandler: cc.dopts.copts.StatsHandler, + } + // Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination + // when there is no pending I/O operations on this stream. + go func() { + select { + case <-t.Error(): + // Incur transport error, simply exit. + case <-cc.ctx.Done(): + cs.finish(ErrClientConnClosing) + cs.closeTransportStream(ErrClientConnClosing) + case <-s.Done(): + // TODO: The trace of the RPC is terminated here when there is no pending + // I/O, which is probably not the optimal solution. + cs.finish(s.Status().Err()) + cs.closeTransportStream(nil) + case <-s.GoAway(): + cs.finish(errConnDrain) + cs.closeTransportStream(errConnDrain) + case <-s.Context().Done(): + err := s.Context().Err() + cs.finish(err) + cs.closeTransportStream(transport.ContextErr(err)) + } + }() + return cs, nil +} + +// clientStream implements a client side Stream. +type clientStream struct { + opts []CallOption + c *callInfo + t transport.ClientTransport + s *transport.Stream + p *parser + desc *StreamDesc + codec Codec + cp Compressor + dc Decompressor + cancel context.CancelFunc + + tracing bool // set to EnableTracing when the clientStream is created. + + mu sync.Mutex + done func(balancer.DoneInfo) + closed bool + finished bool + // trInfo.tr is set when the clientStream is created (if EnableTracing is true), + // and is set to nil when the clientStream's finish method is called. + trInfo traceInfo + + // statsCtx keeps the user context for stats handling. + // All stats collection should use the statsCtx (instead of the stream context) + // so that all the generated stats for a particular RPC can be associated in the processing phase. + statsCtx context.Context + statsHandler stats.Handler +} + +func (cs *clientStream) Context() context.Context { + return cs.s.Context() +} + +func (cs *clientStream) Header() (metadata.MD, error) { + m, err := cs.s.Header() + if err != nil { + if _, ok := err.(transport.ConnectionError); !ok { + cs.closeTransportStream(err) + } + } + return m, err +} + +func (cs *clientStream) Trailer() metadata.MD { + return cs.s.Trailer() +} + +func (cs *clientStream) SendMsg(m interface{}) (err error) { + if cs.tracing { + cs.mu.Lock() + if cs.trInfo.tr != nil { + cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } + cs.mu.Unlock() + } + // TODO Investigate how to signal the stats handling party. + // generate error stats if err != nil && err != io.EOF? + defer func() { + if err != nil { + cs.finish(err) + } + if err == nil { + return + } + if err == io.EOF { + // Specialize the process for server streaming. SendMsg is only called + // once when creating the stream object. io.EOF needs to be skipped when + // the rpc is early finished (before the stream object is created.). + // TODO: It is probably better to move this into the generated code. + if !cs.desc.ClientStreams && cs.desc.ServerStreams { + err = nil + } + return + } + if _, ok := err.(transport.ConnectionError); !ok { + cs.closeTransportStream(err) + } + err = toRPCErr(err) + }() + var outPayload *stats.OutPayload + if cs.statsHandler != nil { + outPayload = &stats.OutPayload{ + Client: true, + } + } + hdr, data, err := encode(cs.codec, m, cs.cp, bytes.NewBuffer([]byte{}), outPayload) + if err != nil { + return err + } + if cs.c.maxSendMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)") + } + if len(data) > *cs.c.maxSendMessageSize { + return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), *cs.c.maxSendMessageSize) + } + err = cs.t.Write(cs.s, hdr, data, &transport.Options{Last: false}) + if err == nil && outPayload != nil { + outPayload.SentTime = time.Now() + cs.statsHandler.HandleRPC(cs.statsCtx, outPayload) + } + return err +} + +func (cs *clientStream) RecvMsg(m interface{}) (err error) { + var inPayload *stats.InPayload + if cs.statsHandler != nil { + inPayload = &stats.InPayload{ + Client: true, + } + } + if cs.c.maxReceiveMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") + } + err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload) + defer func() { + // err != nil indicates the termination of the stream. + if err != nil { + cs.finish(err) + } + }() + if err == nil { + if cs.tracing { + cs.mu.Lock() + if cs.trInfo.tr != nil { + cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } + cs.mu.Unlock() + } + if inPayload != nil { + cs.statsHandler.HandleRPC(cs.statsCtx, inPayload) + } + if !cs.desc.ClientStreams || cs.desc.ServerStreams { + return + } + // Special handling for client streaming rpc. + // This recv expects EOF or errors, so we don't collect inPayload. + if cs.c.maxReceiveMessageSize == nil { + return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)") + } + err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil) + cs.closeTransportStream(err) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + if se := cs.s.Status().Err(); se != nil { + return se + } + cs.finish(err) + return nil + } + return toRPCErr(err) + } + if _, ok := err.(transport.ConnectionError); !ok { + cs.closeTransportStream(err) + } + if err == io.EOF { + if statusErr := cs.s.Status().Err(); statusErr != nil { + return statusErr + } + // Returns io.EOF to indicate the end of the stream. + return + } + return toRPCErr(err) +} + +func (cs *clientStream) CloseSend() (err error) { + err = cs.t.Write(cs.s, nil, nil, &transport.Options{Last: true}) + defer func() { + if err != nil { + cs.finish(err) + } + }() + if err == nil || err == io.EOF { + return nil + } + if _, ok := err.(transport.ConnectionError); !ok { + cs.closeTransportStream(err) + } + err = toRPCErr(err) + return +} + +func (cs *clientStream) closeTransportStream(err error) { + cs.mu.Lock() + if cs.closed { + cs.mu.Unlock() + return + } + cs.closed = true + cs.mu.Unlock() + cs.t.CloseStream(cs.s, err) +} + +func (cs *clientStream) finish(err error) { + cs.mu.Lock() + defer cs.mu.Unlock() + if cs.finished { + return + } + cs.finished = true + defer func() { + if cs.cancel != nil { + cs.cancel() + } + }() + for _, o := range cs.opts { + o.after(cs.c) + } + if cs.done != nil { + updateRPCInfoInContext(cs.s.Context(), rpcInfo{ + bytesSent: cs.s.BytesSent(), + bytesReceived: cs.s.BytesReceived(), + }) + cs.done(balancer.DoneInfo{Err: err}) + cs.done = nil + } + if cs.statsHandler != nil { + end := &stats.End{ + Client: true, + EndTime: time.Now(), + } + if err != io.EOF { + // end.Error is nil if the RPC finished successfully. + end.Error = toRPCErr(err) + } + cs.statsHandler.HandleRPC(cs.statsCtx, end) + } + if !cs.tracing { + return + } + if cs.trInfo.tr != nil { + if err == nil || err == io.EOF { + cs.trInfo.tr.LazyPrintf("RPC: [OK]") + } else { + cs.trInfo.tr.LazyPrintf("RPC: [%v]", err) + cs.trInfo.tr.SetError() + } + cs.trInfo.tr.Finish() + cs.trInfo.tr = nil + } +} + +// ServerStream defines the interface a server stream has to satisfy. +type ServerStream interface { + // SetHeader sets the header metadata. It may be called multiple times. + // When call multiple times, all the provided metadata will be merged. + // All the metadata will be sent out when one of the following happens: + // - ServerStream.SendHeader() is called; + // - The first response is sent out; + // - An RPC status is sent out (error or success). + SetHeader(metadata.MD) error + // SendHeader sends the header metadata. + // The provided md and headers set by SetHeader() will be sent. + // It fails if called multiple times. + SendHeader(metadata.MD) error + // SetTrailer sets the trailer metadata which will be sent with the RPC status. + // When called more than once, all the provided metadata will be merged. + SetTrailer(metadata.MD) + Stream +} + +// serverStream implements a server side Stream. +type serverStream struct { + t transport.ServerTransport + s *transport.Stream + p *parser + codec Codec + cp Compressor + dc Decompressor + maxReceiveMessageSize int + maxSendMessageSize int + trInfo *traceInfo + + statsHandler stats.Handler + + mu sync.Mutex // protects trInfo.tr after the service handler runs. +} + +func (ss *serverStream) Context() context.Context { + return ss.s.Context() +} + +func (ss *serverStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + return ss.s.SetHeader(md) +} + +func (ss *serverStream) SendHeader(md metadata.MD) error { + return ss.t.WriteHeader(ss.s, md) +} + +func (ss *serverStream) SetTrailer(md metadata.MD) { + if md.Len() == 0 { + return + } + ss.s.SetTrailer(md) + return +} + +func (ss *serverStream) SendMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + } + }() + var outPayload *stats.OutPayload + if ss.statsHandler != nil { + outPayload = &stats.OutPayload{} + } + hdr, data, err := encode(ss.codec, m, ss.cp, bytes.NewBuffer([]byte{}), outPayload) + if err != nil { + return err + } + if len(data) > ss.maxSendMessageSize { + return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), ss.maxSendMessageSize) + } + if err := ss.t.Write(ss.s, hdr, data, &transport.Options{Last: false}); err != nil { + return toRPCErr(err) + } + if outPayload != nil { + outPayload.SentTime = time.Now() + ss.statsHandler.HandleRPC(ss.s.Context(), outPayload) + } + return nil +} + +func (ss *serverStream) RecvMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + } + }() + var inPayload *stats.InPayload + if ss.statsHandler != nil { + inPayload = &stats.InPayload{} + } + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload); err != nil { + if err == io.EOF { + return err + } + if err == io.ErrUnexpectedEOF { + err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + return toRPCErr(err) + } + if inPayload != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), inPayload) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go new file mode 100644 index 00000000000..22b8fb50dea --- /dev/null +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tap defines the function handles which are executed on the transport +// layer of gRPC-Go and related information. Everything here is EXPERIMENTAL. +package tap + +import ( + "golang.org/x/net/context" +) + +// Info defines the relevant information needed by the handles. +type Info struct { + // FullMethodName is the string of grpc method (in the format of + // /package.service/method). + FullMethodName string + // TODO: More to be added. +} + +// ServerInHandle defines the function which runs before a new stream is created +// on the server side. If it returns a non-nil error, the stream will not be +// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM. +// The client will receive an RPC error "code = Unavailable, desc = stream +// terminated by RST_STREAM with error code: REFUSED_STREAM". +// +// It's intended to be used in situations where you don't want to waste the +// resources to accept the new stream (e.g. rate-limiting). And the content of +// the error will be ignored and won't be sent back to the client. For other +// general usages, please use interceptors. +// +// Note that it is executed in the per-connection I/O goroutine(s) instead of +// per-RPC goroutine. Therefore, users should NOT have any +// blocking/time-consuming work in this handle. Otherwise all the RPCs would +// slow down. Also, for the same reason, this handle won't be called +// concurrently by gRPC. +type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go new file mode 100644 index 00000000000..c1c96dedcb7 --- /dev/null +++ b/vendor/google.golang.org/grpc/trace.go @@ -0,0 +1,113 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "time" + + "golang.org/x/net/trace" +) + +// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. +// This should only be set before any RPCs are sent or received by this program. +var EnableTracing bool + +// methodFamily returns the trace family for the given method. +// It turns "/pkg.Service/GetFoo" into "pkg.Service". +func methodFamily(m string) string { + m = strings.TrimPrefix(m, "/") // remove leading slash + if i := strings.Index(m, "/"); i >= 0 { + m = m[:i] // remove everything from second slash + } + if i := strings.LastIndex(m, "."); i >= 0 { + m = m[i+1:] // cut down to last dotted component + } + return m +} + +// traceInfo contains tracing information for an RPC. +type traceInfo struct { + tr trace.Trace + firstLine firstLine +} + +// firstLine is the first line of an RPC trace. +type firstLine struct { + client bool // whether this is a client (outgoing) RPC + remoteAddr net.Addr + deadline time.Duration // may be zero +} + +func (f *firstLine) String() string { + var line bytes.Buffer + io.WriteString(&line, "RPC: ") + if f.client { + io.WriteString(&line, "to") + } else { + io.WriteString(&line, "from") + } + fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) + if f.deadline != 0 { + fmt.Fprint(&line, f.deadline) + } else { + io.WriteString(&line, "none") + } + return line.String() +} + +const truncateSize = 100 + +func truncate(x string, l int) string { + if l > len(x) { + return x + } + return x[:l] +} + +// payload represents an RPC request or response payload. +type payload struct { + sent bool // whether this is an outgoing payload + msg interface{} // e.g. a proto.Message + // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? +} + +func (p payload) String() string { + if p.sent { + return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) + } + return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) +} + +type fmtStringer struct { + format string + a []interface{} +} + +func (f *fmtStringer) String() string { + return fmt.Sprintf(f.format, f.a...) +} + +type stringer string + +func (s stringer) String() string { return string(s) } diff --git a/vendor/google.golang.org/grpc/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/transport/bdp_estimator.go new file mode 100644 index 00000000000..63cd2627c87 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/bdp_estimator.go @@ -0,0 +1,140 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync" + "time" +) + +const ( + // bdpLimit is the maximum value the flow control windows + // will be increased to. + bdpLimit = (1 << 20) * 4 + // alpha is a constant factor used to keep a moving average + // of RTTs. + alpha = 0.9 + // If the current bdp sample is greater than or equal to + // our beta * our estimated bdp and the current bandwidth + // sample is the maximum bandwidth observed so far, we + // increase our bbp estimate by a factor of gamma. + beta = 0.66 + // To put our bdp to be smaller than or equal to twice the real BDP, + // we should multiply our current sample with 4/3, however to round things out + // we use 2 as the multiplication factor. + gamma = 2 +) + +// Adding arbitrary data to ping so that its ack can be identified. +// Easter-egg: what does the ping message say? +var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} + +type bdpEstimator struct { + // sentAt is the time when the ping was sent. + sentAt time.Time + + mu sync.Mutex + // bdp is the current bdp estimate. + bdp uint32 + // sample is the number of bytes received in one measurement cycle. + sample uint32 + // bwMax is the maximum bandwidth noted so far (bytes/sec). + bwMax float64 + // bool to keep track of the beginning of a new measurement cycle. + isSent bool + // Callback to update the window sizes. + updateFlowControl func(n uint32) + // sampleCount is the number of samples taken so far. + sampleCount uint64 + // round trip time (seconds) + rtt float64 +} + +// timesnap registers the time bdp ping was sent out so that +// network rtt can be calculated when its ack is received. +// It is called (by controller) when the bdpPing is +// being written on the wire. +func (b *bdpEstimator) timesnap(d [8]byte) { + if bdpPing.data != d { + return + } + b.sentAt = time.Now() +} + +// add adds bytes to the current sample for calculating bdp. +// It returns true only if a ping must be sent. This can be used +// by the caller (handleData) to make decision about batching +// a window update with it. +func (b *bdpEstimator) add(n uint32) bool { + b.mu.Lock() + defer b.mu.Unlock() + if b.bdp == bdpLimit { + return false + } + if !b.isSent { + b.isSent = true + b.sample = n + b.sentAt = time.Time{} + b.sampleCount++ + return true + } + b.sample += n + return false +} + +// calculate is called when an ack for a bdp ping is received. +// Here we calculate the current bdp and bandwidth sample and +// decide if the flow control windows should go up. +func (b *bdpEstimator) calculate(d [8]byte) { + // Check if the ping acked for was the bdp ping. + if bdpPing.data != d { + return + } + b.mu.Lock() + rttSample := time.Since(b.sentAt).Seconds() + if b.sampleCount < 10 { + // Bootstrap rtt with an average of first 10 rtt samples. + b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) + } else { + // Heed to the recent past more. + b.rtt += (rttSample - b.rtt) * float64(alpha) + } + b.isSent = false + // The number of bytes accumulated so far in the sample is smaller + // than or equal to 1.5 times the real BDP on a saturated connection. + bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) + if bwCurrent > b.bwMax { + b.bwMax = bwCurrent + } + // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is + // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we + // should update our perception of the network BDP. + if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { + sampleFloat := float64(b.sample) + b.bdp = uint32(gamma * sampleFloat) + if b.bdp > bdpLimit { + b.bdp = bdpLimit + } + bdp := b.bdp + b.mu.Unlock() + b.updateFlowControl(bdp) + return + } + b.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/transport/control.go b/vendor/google.golang.org/grpc/transport/control.go new file mode 100644 index 00000000000..dd1a8d42e7e --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/control.go @@ -0,0 +1,311 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +) + +const ( + // The default value of flow control window size in HTTP2 spec. + defaultWindowSize = 65535 + // The initial window size for flow control. + initialWindowSize = defaultWindowSize // for an RPC + infinity = time.Duration(math.MaxInt64) + defaultClientKeepaliveTime = infinity + defaultClientKeepaliveTimeout = time.Duration(20 * time.Second) + defaultMaxStreamsClient = 100 + defaultMaxConnectionIdle = infinity + defaultMaxConnectionAge = infinity + defaultMaxConnectionAgeGrace = infinity + defaultServerKeepaliveTime = time.Duration(2 * time.Hour) + defaultServerKeepaliveTimeout = time.Duration(20 * time.Second) + defaultKeepalivePolicyMinTime = time.Duration(5 * time.Minute) + // max window limit set by HTTP2 Specs. + maxWindowSize = math.MaxInt32 + // defaultLocalSendQuota sets is default value for number of data + // bytes that each stream can schedule before some of it being + // flushed out. + defaultLocalSendQuota = 64 * 1024 +) + +// The following defines various control items which could flow through +// the control buffer of transport. They represent different aspects of +// control tasks, e.g., flow control, settings, streaming resetting, etc. + +type headerFrame struct { + streamID uint32 + hf []hpack.HeaderField + endStream bool +} + +func (*headerFrame) item() {} + +type continuationFrame struct { + streamID uint32 + endHeaders bool + headerBlockFragment []byte +} + +type dataFrame struct { + streamID uint32 + endStream bool + d []byte + f func() +} + +func (*dataFrame) item() {} + +func (*continuationFrame) item() {} + +type windowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*windowUpdate) item() {} + +type settings struct { + ack bool + ss []http2.Setting +} + +func (*settings) item() {} + +type resetStream struct { + streamID uint32 + code http2.ErrCode +} + +func (*resetStream) item() {} + +type goAway struct { + code http2.ErrCode + debugData []byte + headsUp bool + closeConn bool +} + +func (*goAway) item() {} + +type flushIO struct { +} + +func (*flushIO) item() {} + +type ping struct { + ack bool + data [8]byte +} + +func (*ping) item() {} + +// quotaPool is a pool which accumulates the quota and sends it to acquire() +// when it is available. +type quotaPool struct { + c chan int + + mu sync.Mutex + version uint32 + quota int +} + +// newQuotaPool creates a quotaPool which has quota q available to consume. +func newQuotaPool(q int) *quotaPool { + qb := "aPool{ + c: make(chan int, 1), + } + if q > 0 { + qb.c <- q + } else { + qb.quota = q + } + return qb +} + +// add cancels the pending quota sent on acquired, incremented by v and sends +// it back on acquire. +func (qb *quotaPool) add(v int) { + qb.mu.Lock() + defer qb.mu.Unlock() + qb.lockedAdd(v) +} + +func (qb *quotaPool) lockedAdd(v int) { + select { + case n := <-qb.c: + qb.quota += n + default: + } + qb.quota += v + if qb.quota <= 0 { + return + } + // After the pool has been created, this is the only place that sends on + // the channel. Since mu is held at this point and any quota that was sent + // on the channel has been retrieved, we know that this code will always + // place any positive quota value on the channel. + select { + case qb.c <- qb.quota: + qb.quota = 0 + default: + } +} + +func (qb *quotaPool) addAndUpdate(v int) { + qb.mu.Lock() + defer qb.mu.Unlock() + qb.lockedAdd(v) + // Update the version only after having added to the quota + // so that if acquireWithVesrion sees the new vesrion it is + // guaranteed to have seen the updated quota. + // Also, still keep this inside of the lock, so that when + // compareAndExecute is processing, this function doesn't + // get executed partially (quota gets updated but the version + // doesn't). + atomic.AddUint32(&(qb.version), 1) +} + +func (qb *quotaPool) acquireWithVersion() (<-chan int, uint32) { + return qb.c, atomic.LoadUint32(&(qb.version)) +} + +func (qb *quotaPool) compareAndExecute(version uint32, success, failure func()) bool { + qb.mu.Lock() + defer qb.mu.Unlock() + if version == atomic.LoadUint32(&(qb.version)) { + success() + return true + } + failure() + return false +} + +// acquire returns the channel on which available quota amounts are sent. +func (qb *quotaPool) acquire() <-chan int { + return qb.c +} + +// inFlow deals with inbound flow control +type inFlow struct { + mu sync.Mutex + // The inbound flow control limit for pending data. + limit uint32 + // pendingData is the overall data which have been received but not been + // consumed by applications. + pendingData uint32 + // The amount of data the application has consumed but grpc has not sent + // window update for them. Used to reduce window update frequency. + pendingUpdate uint32 + // delta is the extra window update given by receiver when an application + // is reading data bigger in size than the inFlow limit. + delta uint32 +} + +// newLimit updates the inflow window to a new value n. +// It assumes that n is always greater than the old limit. +func (f *inFlow) newLimit(n uint32) uint32 { + f.mu.Lock() + defer f.mu.Unlock() + d := n - f.limit + f.limit = n + return d +} + +func (f *inFlow) maybeAdjust(n uint32) uint32 { + if n > uint32(math.MaxInt32) { + n = uint32(math.MaxInt32) + } + f.mu.Lock() + defer f.mu.Unlock() + // estSenderQuota is the receiver's view of the maximum number of bytes the sender + // can send without a window update. + estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) + // estUntransmittedData is the maximum number of bytes the sends might not have put + // on the wire yet. A value of 0 or less means that we have already received all or + // more bytes than the application is requesting to read. + estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. + // This implies that unless we send a window update, the sender won't be able to send all the bytes + // for this message. Therefore we must send an update over the limit since there's an active read + // request from the application. + if estUntransmittedData > estSenderQuota { + // Sender's window shouldn't go more than 2^31 - 1 as speecified in the HTTP spec. + if f.limit+n > maxWindowSize { + f.delta = maxWindowSize - f.limit + } else { + // Send a window update for the whole message and not just the difference between + // estUntransmittedData and estSenderQuota. This will be helpful in case the message + // is padded; We will fallback on the current available window(at least a 1/4th of the limit). + f.delta = n + } + return f.delta + } + return 0 +} + +// onData is invoked when some data frame is received. It updates pendingData. +func (f *inFlow) onData(n uint32) error { + f.mu.Lock() + defer f.mu.Unlock() + f.pendingData += n + if f.pendingData+f.pendingUpdate > f.limit+f.delta { + return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit) + } + return nil +} + +// onRead is invoked when the application reads the data. It returns the window size +// to be sent to the peer. +func (f *inFlow) onRead(n uint32) uint32 { + f.mu.Lock() + defer f.mu.Unlock() + if f.pendingData == 0 { + return 0 + } + f.pendingData -= n + if n > f.delta { + n -= f.delta + f.delta = 0 + } else { + f.delta -= n + n = 0 + } + f.pendingUpdate += n + if f.pendingUpdate >= f.limit/4 { + wu := f.pendingUpdate + f.pendingUpdate = 0 + return wu + } + return 0 +} + +func (f *inFlow) resetPendingUpdate() uint32 { + f.mu.Lock() + defer f.mu.Unlock() + n := f.pendingUpdate + f.pendingUpdate = 0 + return n +} diff --git a/vendor/google.golang.org/grpc/transport/go16.go b/vendor/google.golang.org/grpc/transport/go16.go new file mode 100644 index 00000000000..7cffee11e0b --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/go16.go @@ -0,0 +1,45 @@ +// +build go1.6,!go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "net" + + "google.golang.org/grpc/codes" + + "golang.org/x/net/context" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) +} + +// ContextErr converts the error from context package into a StreamError. +func ContextErr(err error) StreamError { + switch err { + case context.DeadlineExceeded: + return streamErrorf(codes.DeadlineExceeded, "%v", err) + case context.Canceled: + return streamErrorf(codes.Canceled, "%v", err) + } + return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err) +} diff --git a/vendor/google.golang.org/grpc/transport/go17.go b/vendor/google.golang.org/grpc/transport/go17.go new file mode 100644 index 00000000000..2464e69faf5 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/go17.go @@ -0,0 +1,46 @@ +// +build go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "net" + + "google.golang.org/grpc/codes" + + netctx "golang.org/x/net/context" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, network, address) +} + +// ContextErr converts the error from context package into a StreamError. +func ContextErr(err error) StreamError { + switch err { + case context.DeadlineExceeded, netctx.DeadlineExceeded: + return streamErrorf(codes.DeadlineExceeded, "%v", err) + case context.Canceled, netctx.Canceled: + return streamErrorf(codes.Canceled, "%v", err) + } + return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err) +} diff --git a/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/google.golang.org/grpc/transport/handler_server.go new file mode 100644 index 00000000000..f1f6caf89c2 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/handler_server.go @@ -0,0 +1,414 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file is the implementation of a gRPC server using HTTP/2 which +// uses the standard Go http2 Server implementation (via the +// http.Handler interface), rather than speaking low-level HTTP/2 +// frames itself. It is the implementation of *grpc.Server.ServeHTTP. + +package transport + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" +) + +// NewServerHandlerTransport returns a ServerTransport handling gRPC +// from inside an http.Handler. It requires that the http Server +// supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTransport, error) { + if r.ProtoMajor != 2 { + return nil, errors.New("gRPC requires HTTP/2") + } + if r.Method != "POST" { + return nil, errors.New("invalid gRPC request method") + } + if !validContentType(r.Header.Get("Content-Type")) { + return nil, errors.New("invalid gRPC request content-type") + } + if _, ok := w.(http.Flusher); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + } + if _, ok := w.(http.CloseNotifier); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier") + } + + st := &serverHandlerTransport{ + rw: w, + req: r, + closedCh: make(chan struct{}), + writes: make(chan func()), + } + + if v := r.Header.Get("grpc-timeout"); v != "" { + to, err := decodeTimeout(v) + if err != nil { + return nil, streamErrorf(codes.Internal, "malformed time-out: %v", err) + } + st.timeoutSet = true + st.timeout = to + } + + var metakv []string + if r.Host != "" { + metakv = append(metakv, ":authority", r.Host) + } + for k, vv := range r.Header { + k = strings.ToLower(k) + if isReservedHeader(k) && !isWhitelistedPseudoHeader(k) { + continue + } + for _, v := range vv { + v, err := decodeMetadataHeader(k, v) + if err != nil { + return nil, streamErrorf(codes.InvalidArgument, "malformed binary metadata: %v", err) + } + metakv = append(metakv, k, v) + } + } + st.headerMD = metadata.Pairs(metakv...) + + return st, nil +} + +// serverHandlerTransport is an implementation of ServerTransport +// which replies to exactly one gRPC request (exactly one HTTP request), +// using the net/http.Handler interface. This http.Handler is guaranteed +// at this point to be speaking over HTTP/2, so it's able to speak valid +// gRPC. +type serverHandlerTransport struct { + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration + didCommonHeaders bool + + headerMD metadata.MD + + closeOnce sync.Once + closedCh chan struct{} // closed on Close + + // writes is a channel of code to run serialized in the + // ServeHTTP (HandleStreams) goroutine. The channel is closed + // when WriteStatus is called. + writes chan func() + + mu sync.Mutex + // streamDone indicates whether WriteStatus has been called and writes channel + // has been closed. + streamDone bool +} + +func (ht *serverHandlerTransport) Close() error { + ht.closeOnce.Do(ht.closeCloseChanOnce) + return nil +} + +func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } + +func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } + +// strAddr is a net.Addr backed by either a TCP "ip:port" string, or +// the empty string if unknown. +type strAddr string + +func (a strAddr) Network() string { + if a != "" { + // Per the documentation on net/http.Request.RemoteAddr, if this is + // set, it's set to the IP:port of the peer (hence, TCP): + // https://golang.org/pkg/net/http/#Request + // + // If we want to support Unix sockets later, we can + // add our own grpc-specific convention within the + // grpc codebase to set RemoteAddr to a different + // format, or probably better: we can attach it to the + // context and use that from serverHandlerTransport.RemoteAddr. + return "tcp" + } + return "" +} + +func (a strAddr) String() string { return string(a) } + +// do runs fn in the ServeHTTP goroutine. +func (ht *serverHandlerTransport) do(fn func()) error { + // Avoid a panic writing to closed channel. Imperfect but maybe good enough. + select { + case <-ht.closedCh: + return ErrConnClosing + default: + select { + case ht.writes <- fn: + return nil + case <-ht.closedCh: + return ErrConnClosing + } + } +} + +func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { + ht.mu.Lock() + if ht.streamDone { + ht.mu.Unlock() + return nil + } + ht.streamDone = true + ht.mu.Unlock() + err := ht.do(func() { + ht.writeCommonHeaders(s) + + // And flush, in case no header or body has been sent yet. + // This forces a separation of headers and trailers if this is the + // first call (for example, in end2end tests's TestNoService). + ht.rw.(http.Flusher).Flush() + + h := ht.rw.Header() + h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code())) + if m := st.Message(); m != "" { + h.Set("Grpc-Message", encodeGrpcMessage(m)) + } + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + + h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + } + + if md := s.Trailer(); len(md) > 0 { + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + // http2 ResponseWriter mechanism to send undeclared Trailers after + // the headers have possibly been written. + h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v)) + } + } + } + }) + close(ht.writes) + return err +} + +// writeCommonHeaders sets common headers on the first write +// call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { + if ht.didCommonHeaders { + return + } + ht.didCommonHeaders = true + + h := ht.rw.Header() + h["Date"] = nil // suppress Date to make tests happy; TODO: restore + h.Set("Content-Type", "application/grpc") + + // Predeclare trailers we'll set later in WriteStatus (after the body). + // This is a SHOULD in the HTTP RFC, and the way you add (known) + // Trailers per the net/http.ResponseWriter contract. + // See https://golang.org/pkg/net/http/#ResponseWriter + // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers + h.Add("Trailer", "Grpc-Status") + h.Add("Trailer", "Grpc-Message") + h.Add("Trailer", "Grpc-Status-Details-Bin") + + if s.sendCompress != "" { + h.Set("Grpc-Encoding", s.sendCompress) + } +} + +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + return ht.do(func() { + ht.writeCommonHeaders(s) + ht.rw.Write(hdr) + ht.rw.Write(data) + if !opts.Delay { + ht.rw.(http.Flusher).Flush() + } + }) +} + +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + return ht.do(func() { + ht.writeCommonHeaders(s) + h := ht.rw.Header() + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + v = encodeMetadataHeader(k, v) + h.Add(k, v) + } + } + ht.rw.WriteHeader(200) + ht.rw.(http.Flusher).Flush() + }) +} + +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { + // With this transport type there will be exactly 1 stream: this HTTP request. + + var ctx context.Context + var cancel context.CancelFunc + if ht.timeoutSet { + ctx, cancel = context.WithTimeout(context.Background(), ht.timeout) + } else { + ctx, cancel = context.WithCancel(context.Background()) + } + + // requestOver is closed when either the request's context is done + // or the status has been written via WriteStatus. + requestOver := make(chan struct{}) + + // clientGone receives a single value if peer is gone, either + // because the underlying connection is dead or because the + // peer sends an http2 RST_STREAM. + clientGone := ht.rw.(http.CloseNotifier).CloseNotify() + go func() { + select { + case <-requestOver: + return + case <-ht.closedCh: + case <-clientGone: + } + cancel() + }() + + req := ht.req + + s := &Stream{ + id: 0, // irrelevant + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + } + pr := &peer.Peer{ + Addr: ht.RemoteAddr(), + } + if req.TLS != nil { + pr.AuthInfo = credentials.TLSInfo{State: *req.TLS} + } + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) + ctx = peer.NewContext(ctx, pr) + s.ctx = newContextWithStream(ctx, s) + s.trReader = &transportReader{ + reader: &recvBufferReader{ctx: s.ctx, recv: s.buf}, + windowHandler: func(int) {}, + } + + // readerDone is closed when the Body.Read-ing goroutine exits. + readerDone := make(chan struct{}) + go func() { + defer close(readerDone) + + // TODO: minimize garbage, optimize recvBuffer code/ownership + const readSize = 8196 + for buf := make([]byte, readSize); ; { + n, err := req.Body.Read(buf) + if n > 0 { + s.buf.put(recvMsg{data: buf[:n:n]}) + buf = buf[n:] + } + if err != nil { + s.buf.put(recvMsg{err: mapRecvMsgError(err)}) + return + } + if len(buf) == 0 { + buf = make([]byte, readSize) + } + } + }() + + // startStream is provided by the *grpc.Server's serveStreams. + // It starts a goroutine serving s and exits immediately. + // The goroutine that is started is the one that then calls + // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. + startStream(s) + + ht.runStream() + close(requestOver) + + // Wait for reading goroutine to finish. + req.Body.Close() + <-readerDone +} + +func (ht *serverHandlerTransport) runStream() { + for { + select { + case fn, ok := <-ht.writes: + if !ok { + return + } + fn() + case <-ht.closedCh: + return + } + } +} + +func (ht *serverHandlerTransport) Drain() { + panic("Drain() is not implemented") +} + +// mapRecvMsgError returns the non-nil err into the appropriate +// error value as expected by callers of *grpc.parser.recvMsg. +// In particular, in can only be: +// * io.EOF +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * of type transport.StreamError +func mapRecvMsgError(err error) error { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return err + } + if se, ok := err.(http2.StreamError); ok { + if code, ok := http2ErrConvTab[se.Code]; ok { + return StreamError{ + Code: code, + Desc: se.Error(), + } + } + } + return connectionErrorf(true, err, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go new file mode 100644 index 00000000000..f665ef047be --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/http2_client.go @@ -0,0 +1,1322 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "io" + "math" + "net" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// http2Client implements the ClientTransport interface with HTTP2. +type http2Client struct { + ctx context.Context + cancel context.CancelFunc + userAgent string + md interface{} + conn net.Conn // underlying communication channel + remoteAddr net.Addr + localAddr net.Addr + authInfo credentials.AuthInfo // auth info about the connection + nextID uint32 // the next stream ID to be used + + // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) + // that the server sent GoAway on this transport. + goAway chan struct{} + // awakenKeepalive is used to wake up keepalive when after it has gone dormant. + awakenKeepalive chan struct{} + + framer *framer + hBuf *bytes.Buffer // the buffer for HPACK encoding + hEnc *hpack.Encoder // HPACK encoder + + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *inFlow + // sendQuotaPool provides flow control to outbound message. + sendQuotaPool *quotaPool + // streamsQuota limits the max number of concurrent streams. + streamsQuota *quotaPool + + // The scheme used: https if TLS is on, http otherwise. + scheme string + + isSecure bool + + creds []credentials.PerRPCCredentials + + // Boolean to keep track of reading activity on transport. + // 1 is true and 0 is false. + activity uint32 // Accessed atomically. + kp keepalive.ClientParameters + + statsHandler stats.Handler + + initialWindowSize int32 + + bdpEst *bdpEstimator + outQuotaVersion uint32 + + mu sync.Mutex // guard the following variables + state transportState // the state of underlying connection + activeStreams map[uint32]*Stream + // The max number of concurrent streams + maxStreams int + // the per-stream outbound flow control window size set by the peer. + streamSendQuota uint32 + // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. + prevGoAwayID uint32 + // goAwayReason records the http2.ErrCode and debug data received with the + // GoAway frame. + goAwayReason GoAwayReason +} + +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { + if fn != nil { + return fn(ctx, addr) + } + return dialContext(ctx, "tcp", addr) +} + +func isTemporary(err error) bool { + switch err { + case io.EOF: + // Connection closures may be resolved upon retry, and are thus + // treated as temporary. + return true + case context.DeadlineExceeded: + // In Go 1.7, context.DeadlineExceeded implements Timeout(), and this + // special case is not needed. Until then, we need to keep this + // clause. + return true + } + + switch err := err.(type) { + case interface { + Temporary() bool + }: + return err.Temporary() + case interface { + Timeout() bool + }: + // Timeouts may be resolved upon retry, and are thus treated as + // temporary. + return err.Timeout() + } + return false +} + +// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// and starts to receive messages on it. Non-nil error returns if construction +// fails. +func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, timeout time.Duration) (_ ClientTransport, err error) { + scheme := "http" + ctx, cancel := context.WithCancel(ctx) + connectCtx, connectCancel := context.WithTimeout(ctx, timeout) + defer func() { + if err != nil { + cancel() + // Don't call connectCancel in success path due to a race in Go 1.6: + // https://github.com/golang/go/issues/15078. + connectCancel() + } + }() + + conn, err := dial(connectCtx, opts.Dialer, addr.Addr) + if err != nil { + if opts.FailOnNonTempDialError { + return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) + } + return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) + } + // Any further errors will close the underlying connection + defer func(conn net.Conn) { + if err != nil { + conn.Close() + } + }(conn) + var ( + isSecure bool + authInfo credentials.AuthInfo + ) + if creds := opts.TransportCredentials; creds != nil { + scheme = "https" + conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Authority, conn) + if err != nil { + // Credentials handshake errors are typically considered permanent + // to avoid retrying on e.g. bad certificates. + temp := isTemporary(err) + return nil, connectionErrorf(temp, err, "transport: authentication handshake failed: %v", err) + } + isSecure = true + } + kp := opts.KeepaliveParams + // Validate keepalive parameters. + if kp.Time == 0 { + kp.Time = defaultClientKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultClientKeepaliveTimeout + } + dynamicWindow := true + icwz := int32(initialWindowSize) + if opts.InitialConnWindowSize >= defaultWindowSize { + icwz = opts.InitialConnWindowSize + dynamicWindow = false + } + var buf bytes.Buffer + writeBufSize := defaultWriteBufSize + if opts.WriteBufferSize > 0 { + writeBufSize = opts.WriteBufferSize + } + readBufSize := defaultReadBufSize + if opts.ReadBufferSize > 0 { + readBufSize = opts.ReadBufferSize + } + t := &http2Client{ + ctx: ctx, + cancel: cancel, + userAgent: opts.UserAgent, + md: addr.Metadata, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: authInfo, + // The client initiated stream id is odd starting from 1. + nextID: 1, + goAway: make(chan struct{}), + awakenKeepalive: make(chan struct{}, 1), + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + framer: newFramer(conn, writeBufSize, readBufSize), + controlBuf: newControlBuffer(), + fc: &inFlow{limit: uint32(icwz)}, + sendQuotaPool: newQuotaPool(defaultWindowSize), + scheme: scheme, + state: reachable, + activeStreams: make(map[uint32]*Stream), + isSecure: isSecure, + creds: opts.PerRPCCredentials, + maxStreams: defaultMaxStreamsClient, + streamsQuota: newQuotaPool(defaultMaxStreamsClient), + streamSendQuota: defaultWindowSize, + kp: kp, + statsHandler: opts.StatsHandler, + initialWindowSize: initialWindowSize, + } + if opts.InitialWindowSize >= defaultWindowSize { + t.initialWindowSize = opts.InitialWindowSize + dynamicWindow = false + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + // Make sure awakenKeepalive can't be written upon. + // keepalive routine will make it writable, if need be. + t.awakenKeepalive <- struct{}{} + if t.statsHandler != nil { + t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{ + Client: true, + } + t.statsHandler.HandleConn(t.ctx, connBegin) + } + // Start the reader goroutine for incoming message. Each transport has + // a dedicated goroutine which reads HTTP2 frame from network. Then it + // dispatches the frame to the corresponding stream entity. + go t.reader() + // Send connection preface to server. + n, err := t.conn.Write(clientPreface) + if err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + } + if n != len(clientPreface) { + t.Close() + return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + } + if t.initialWindowSize != defaultWindowSize { + err = t.framer.fr.WriteSettings(http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(t.initialWindowSize), + }) + } else { + err = t.framer.fr.WriteSettings() + } + if err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) + } + } + t.framer.writer.Flush() + go func() { + loopyWriter(t.ctx, t.controlBuf, t.itemHandler) + t.Close() + }() + if t.kp.Time != infinity { + go t.keepalive() + } + return t, nil +} + +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { + // TODO(zhaoq): Handle uint32 overflow of Stream.id. + s := &Stream{ + id: t.nextID, + done: make(chan struct{}), + goAway: make(chan struct{}), + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), + localSendQuota: newQuotaPool(defaultLocalSendQuota), + headerChan: make(chan struct{}), + } + t.nextID += 2 + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + // The client side stream context should have exactly the same life cycle with the user provided context. + // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. + // So we use the original context here instead of creating a copy. + s.ctx = ctx + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + goAway: s.goAway, + recv: s.buf, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + + return s +} + +// NewStream creates a stream and registers it into the transport as "active" +// streams. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { + pr := &peer.Peer{ + Addr: t.remoteAddr, + } + // Attach Auth info if there is any. + if t.authInfo != nil { + pr.AuthInfo = t.authInfo + } + ctx = peer.NewContext(ctx, pr) + var ( + authData = make(map[string]string) + audience string + ) + // Create an audience string only if needed. + if len(t.creds) > 0 || callHdr.Creds != nil { + // Construct URI required to get auth request metadata. + // Omit port if it is the default one. + host := strings.TrimSuffix(callHdr.Host, ":443") + pos := strings.LastIndex(callHdr.Method, "/") + if pos == -1 { + pos = len(callHdr.Method) + } + audience = "https://" + host + callHdr.Method[:pos] + } + for _, c := range t.creds { + data, err := c.GetRequestMetadata(ctx, audience) + if err != nil { + return nil, streamErrorf(codes.Internal, "transport: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) + authData[k] = v + } + } + callAuthData := map[string]string{} + // Check if credentials.PerRPCCredentials were provided via call options. + // Note: if these credentials are provided both via dial options and call + // options, then both sets of credentials will be applied. + if callCreds := callHdr.Creds; callCreds != nil { + if !t.isSecure && callCreds.RequireTransportSecurity() { + return nil, streamErrorf(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } + data, err := callCreds.GetRequestMetadata(ctx, audience) + if err != nil { + return nil, streamErrorf(codes.Internal, "transport: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2 + k = strings.ToLower(k) + callAuthData[k] = v + } + } + t.mu.Lock() + if t.activeStreams == nil { + t.mu.Unlock() + return nil, ErrConnClosing + } + if t.state == draining { + t.mu.Unlock() + return nil, ErrStreamDrain + } + if t.state != reachable { + t.mu.Unlock() + return nil, ErrConnClosing + } + t.mu.Unlock() + sq, err := wait(ctx, t.ctx, nil, nil, t.streamsQuota.acquire()) + if err != nil { + return nil, err + } + // Returns the quota balance back. + if sq > 1 { + t.streamsQuota.add(sq - 1) + } + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + // Make the slice of certain predictable size to reduce allocations made by append. + hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te + hfLen += len(authData) + len(callAuthData) + headerFields := make([]hpack.HeaderField, 0, hfLen) + headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) + + if callHdr.SendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + } + if dl, ok := ctx.Deadline(); ok { + // Send out timeout regardless its value. The server can detect timeout context by itself. + // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. + timeout := dl.Sub(time.Now()) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) + } + for k, v := range authData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + for k, v := range callAuthData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + if b := stats.OutgoingTags(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) + } + if b := stats.OutgoingTrace(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) + } + if md, ok := metadata.FromOutgoingContext(ctx); ok { + for k, vv := range md { + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + } + if md, ok := t.md.(*metadata.MD); ok { + for k, vv := range *md { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + } + t.mu.Lock() + if t.state == draining { + t.mu.Unlock() + t.streamsQuota.add(1) + return nil, ErrStreamDrain + } + if t.state != reachable { + t.mu.Unlock() + return nil, ErrConnClosing + } + s := t.newStream(ctx, callHdr) + t.activeStreams[s.id] = s + // If the number of active streams change from 0 to 1, then check if keepalive + // has gone dormant. If so, wake it up. + if len(t.activeStreams) == 1 { + select { + case t.awakenKeepalive <- struct{}{}: + t.controlBuf.put(&ping{data: [8]byte{}}) + // Fill the awakenKeepalive channel again as this channel must be + // kept non-writable except at the point that the keepalive() + // goroutine is waiting either to be awaken or shutdown. + t.awakenKeepalive <- struct{}{} + default: + } + } + t.controlBuf.put(&headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + }) + t.mu.Unlock() + + s.mu.Lock() + s.bytesSent = true + s.mu.Unlock() + + if t.statsHandler != nil { + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + } + t.statsHandler.HandleRPC(s.ctx, outHeader) + } + return s, nil +} + +// CloseStream clears the footprint of a stream when the stream is not needed any more. +// This must not be executed in reader's goroutine. +func (t *http2Client) CloseStream(s *Stream, err error) { + t.mu.Lock() + if t.activeStreams == nil { + t.mu.Unlock() + return + } + if err != nil { + // notify in-flight streams, before the deletion + s.write(recvMsg{err: err}) + } + delete(t.activeStreams, s.id) + if t.state == draining && len(t.activeStreams) == 0 { + // The transport is draining and s is the last live stream on t. + t.mu.Unlock() + t.Close() + return + } + t.mu.Unlock() + // rstStream is true in case the stream is being closed at the client-side + // and the server needs to be intimated about it by sending a RST_STREAM + // frame. + // To make sure this frame is written to the wire before the headers of the + // next stream waiting for streamsQuota, we add to streamsQuota pool only + // after having acquired the writableChan to send RST_STREAM out (look at + // the controller() routine). + var rstStream bool + var rstError http2.ErrCode + defer func() { + // In case, the client doesn't have to send RST_STREAM to server + // we can safely add back to streamsQuota pool now. + if !rstStream { + t.streamsQuota.add(1) + return + } + t.controlBuf.put(&resetStream{s.id, rstError}) + }() + s.mu.Lock() + rstStream = s.rstStream + rstError = s.rstError + if s.state == streamDone { + s.mu.Unlock() + return + } + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.state = streamDone + s.mu.Unlock() + if _, ok := err.(StreamError); ok { + rstStream = true + rstError = http2.ErrCodeCancel + } +} + +// Close kicks off the shutdown process of the transport. This should be called +// only once on a transport. Once it is called, the transport should not be +// accessed any more. +func (t *http2Client) Close() (err error) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } + t.state = closing + t.mu.Unlock() + t.cancel() + err = t.conn.Close() + t.mu.Lock() + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + // Notify all active streams. + for _, s := range streams { + s.mu.Lock() + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.mu.Unlock() + s.write(recvMsg{err: ErrConnClosing}) + } + if t.statsHandler != nil { + connEnd := &stats.ConnEnd{ + Client: true, + } + t.statsHandler.HandleConn(t.ctx, connEnd) + } + return err +} + +// GracefulClose sets the state to draining, which prevents new streams from +// being created and causes the transport to be closed when the last active +// stream is closed. If there are no active streams, the transport is closed +// immediately. This does nothing if the transport is already draining or +// closing. +func (t *http2Client) GracefulClose() error { + t.mu.Lock() + switch t.state { + case closing, draining: + t.mu.Unlock() + return nil + } + t.state = draining + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + return t.Close() + } + return nil +} + +// Write formats the data into HTTP2 data frame(s) and sends it out. The caller +// should proceed only if Write returns nil. +func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + select { + case <-s.ctx.Done(): + return ContextErr(s.ctx.Err()) + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + + if hdr == nil && data == nil && opts.Last { + // stream.CloseSend uses this to send an empty frame with endStream=True + t.controlBuf.put(&dataFrame{streamID: s.id, endStream: true, f: func() {}}) + return nil + } + // Add data to header frame so that we can equally distribute data across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + for idx, r := range [][]byte{hdr, data} { + for len(r) > 0 { + size := http2MaxFrameLen + // Wait until the stream has some quota to send the data. + quotaChan, quotaVer := s.sendQuotaPool.acquireWithVersion() + sq, err := wait(s.ctx, t.ctx, s.done, s.goAway, quotaChan) + if err != nil { + return err + } + // Wait until the transport has some quota to send the data. + tq, err := wait(s.ctx, t.ctx, s.done, s.goAway, t.sendQuotaPool.acquire()) + if err != nil { + return err + } + if sq < size { + size = sq + } + if tq < size { + size = tq + } + if size > len(r) { + size = len(r) + } + p := r[:size] + ps := len(p) + if ps < tq { + // Overbooked transport quota. Return it back. + t.sendQuotaPool.add(tq - ps) + } + // Acquire local send quota to be able to write to the controlBuf. + ltq, err := wait(s.ctx, t.ctx, s.done, s.goAway, s.localSendQuota.acquire()) + if err != nil { + if _, ok := err.(ConnectionError); !ok { + t.sendQuotaPool.add(ps) + } + return err + } + s.localSendQuota.add(ltq - ps) // It's ok if we make it negative. + var endStream bool + // See if this is the last frame to be written. + if opts.Last { + if len(r)-size == 0 { // No more data in r after this iteration. + if idx == 0 { // We're writing data header. + if len(data) == 0 { // There's no data to follow. + endStream = true + } + } else { // We're writing data. + endStream = true + } + } + } + success := func() { + t.controlBuf.put(&dataFrame{streamID: s.id, endStream: endStream, d: p, f: func() { s.localSendQuota.add(ps) }}) + if ps < sq { + s.sendQuotaPool.lockedAdd(sq - ps) + } + r = r[ps:] + } + failure := func() { + s.sendQuotaPool.lockedAdd(sq) + } + if !s.sendQuotaPool.compareAndExecute(quotaVer, success, failure) { + t.sendQuotaPool.add(ps) + s.localSendQuota.add(ps) + } + } + } + if !opts.Last { + return nil + } + s.mu.Lock() + if s.state != streamDone { + s.state = streamWriteDone + } + s.mu.Unlock() + return nil +} + +func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + s, ok := t.activeStreams[f.Header().StreamID] + return s, ok +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Client) adjustWindow(s *Stream, n uint32) { + s.mu.Lock() + defer s.mu.Unlock() + if s.state == streamDone { + return + } + if w := s.fc.maybeAdjust(n); w > 0 { + // Piggyback connection's window update along. + if cw := t.fc.resetPendingUpdate(); cw > 0 { + t.controlBuf.put(&windowUpdate{0, cw}) + } + t.controlBuf.put(&windowUpdate{s.id, w}) + } +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Client) updateWindow(s *Stream, n uint32) { + s.mu.Lock() + defer s.mu.Unlock() + if s.state == streamDone { + return + } + if w := s.fc.onRead(n); w > 0 { + if cw := t.fc.resetPendingUpdate(); cw > 0 { + t.controlBuf.put(&windowUpdate{0, cw}) + } + t.controlBuf.put(&windowUpdate{s.id, w}) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Client) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)}) + t.controlBuf.put(&settings{ + ack: false, + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: uint32(n), + }, + }, + }) +} + +func (t *http2Client) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(uint32(size)) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + // Furthermore, if a bdpPing is being sent out we can piggyback + // connection's window update for the bytes we just received. + if sendBDPPing { + if size != 0 { // Could've been an empty data frame. + t.controlBuf.put(&windowUpdate{0, uint32(size)}) + } + t.controlBuf.put(bdpPing) + } else { + if err := t.fc.onData(uint32(size)); err != nil { + t.Close() + return + } + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } + } + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + if size > 0 { + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return + } + if err := s.fc.onData(uint32(size)); err != nil { + s.rstStream = true + s.rstError = http2.ErrCodeFlowControl + s.finish(status.New(codes.Internal, err.Error())) + s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&windowUpdate{s.id, w}) + } + } + s.mu.Unlock() + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + data := make([]byte, len(f.Data())) + copy(data, f.Data()) + s.write(recvMsg{data: data}) + } + } + // The server has closed the stream without sending trailers. Record that + // the read direction is closed, and set the status appropriately. + if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return + } + s.finish(status.New(codes.Internal, "server closed the stream without sending trailers")) + s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { + s, ok := t.getStream(f) + if !ok { + return + } + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return + } + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + statusCode, ok := http2ErrConvTab[http2.ErrCode(f.ErrCode)] + if !ok { + warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + statusCode = codes.Unknown + } + s.finish(status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode)) + s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) +} + +func (t *http2Client) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + f.ForeachSetting(func(s http2.Setting) error { + ss = append(ss, s) + return nil + }) + // The settings will be applied once the ack is sent. + t.controlBuf.put(&settings{ack: true, ss: ss}) +} + +func (t *http2Client) handlePing(f *http2.PingFrame) { + if f.IsAck() { + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) +} + +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + t.mu.Lock() + if t.state != reachable && t.state != draining { + t.mu.Unlock() + return + } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm { + infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + } + id := f.LastStreamID + if id > 0 && id%2 != 1 { + t.mu.Unlock() + t.Close() + return + } + // A client can receive multiple GoAways from server (look at https://github.com/grpc/grpc-go/issues/1387). + // The idea is that the first GoAway will be sent with an ID of MaxInt32 and the second GoAway will be sent after an RTT delay + // with the ID of the last stream the server will process. + // Therefore, when we get the first GoAway we don't really close any streams. While in case of second GoAway we + // close all streams created after the second GoAwayId. This way streams that were in-flight while the GoAway from server + // was being sent don't get killed. + select { + case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). + // If there are multiple GoAways the first one should always have an ID greater than the following ones. + if id > t.prevGoAwayID { + t.mu.Unlock() + t.Close() + return + } + default: + t.setGoAwayReason(f) + close(t.goAway) + t.state = draining + } + // All streams with IDs greater than the GoAwayId + // and smaller than the previous GoAway ID should be killed. + upperLimit := t.prevGoAwayID + if upperLimit == 0 { // This is the first GoAway Frame. + upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. + } + for streamID, stream := range t.activeStreams { + if streamID > id && streamID <= upperLimit { + close(stream.goAway) + } + } + t.prevGoAwayID = id + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close() + } +} + +// setGoAwayReason sets the value of t.goAwayReason based +// on the GoAway frame received. +// It expects a lock on transport's mutext to be held by +// the caller. +func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { + t.goAwayReason = NoReason + switch f.ErrCode { + case http2.ErrCodeEnhanceYourCalm: + if string(f.DebugData()) == "too_many_pings" { + t.goAwayReason = TooManyPings + } + } +} + +func (t *http2Client) GetGoAwayReason() GoAwayReason { + t.mu.Lock() + defer t.mu.Unlock() + return t.goAwayReason +} + +func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { + id := f.Header().StreamID + incr := f.Increment + if id == 0 { + t.sendQuotaPool.add(int(incr)) + return + } + if s, ok := t.getStream(f); ok { + s.sendQuotaPool.add(int(incr)) + } +} + +// operateHeaders takes action on the decoded headers. +func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + s, ok := t.getStream(frame) + if !ok { + return + } + s.mu.Lock() + s.bytesReceived = true + s.mu.Unlock() + var state decodeState + if err := state.decodeResponseHeader(frame); err != nil { + s.mu.Lock() + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.mu.Unlock() + s.write(recvMsg{err: err}) + // Something wrong. Stops reading even when there is remaining. + return + } + + endStream := frame.StreamEnded() + var isHeader bool + defer func() { + if t.statsHandler != nil { + if isHeader { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + } + t.statsHandler.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + } + t.statsHandler.HandleRPC(s.ctx, inTrailer) + } + } + }() + + s.mu.Lock() + if !endStream { + s.recvCompress = state.encoding + } + if !s.headerDone { + if !endStream && len(state.mdata) > 0 { + s.header = state.mdata + } + close(s.headerChan) + s.headerDone = true + isHeader = true + } + if !endStream || s.state == streamDone { + s.mu.Unlock() + return + } + + if len(state.mdata) > 0 { + s.trailer = state.mdata + } + s.finish(state.status()) + s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) +} + +func handleMalformedHTTP2(s *Stream, err error) { + s.mu.Lock() + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.mu.Unlock() + s.write(recvMsg{err: err}) +} + +// reader runs as a separate goroutine in charge of reading data from network +// connection. +// +// TODO(zhaoq): currently one reader per transport. Investigate whether this is +// optimal. +// TODO(zhaoq): Check the validity of the incoming frame sequence. +func (t *http2Client) reader() { + // Check the validity of server preface. + frame, err := t.framer.fr.ReadFrame() + if err != nil { + t.Close() + return + } + atomic.CompareAndSwapUint32(&t.activity, 0, 1) + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + t.Close() + return + } + t.handleSettings(sf) + + // loop to keep reading incoming messages on this transport. + for { + frame, err := t.framer.fr.ReadFrame() + atomic.CompareAndSwapUint32(&t.activity, 0, 1) + if err != nil { + // Abort an active stream if the http2.Framer returns a + // http2.StreamError. This can happen only if the server's response + // is malformed http2. + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + // use error detail to provide better err message + handleMalformedHTTP2(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.fr.ErrorDetail())) + } + continue + } else { + // Transport error. + t.Close() + return + } + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + t.operateHeaders(frame) + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.GoAwayFrame: + t.handleGoAway(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + default: + errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + } + } +} + +func (t *http2Client) applySettings(ss []http2.Setting) { + for _, s := range ss { + switch s.ID { + case http2.SettingMaxConcurrentStreams: + // TODO(zhaoq): This is a hack to avoid significant refactoring of the + // code to deal with the unrealistic int32 overflow. Probably will try + // to find a better way to handle this later. + if s.Val > math.MaxInt32 { + s.Val = math.MaxInt32 + } + t.mu.Lock() + ms := t.maxStreams + t.maxStreams = int(s.Val) + t.mu.Unlock() + t.streamsQuota.add(int(s.Val) - ms) + case http2.SettingInitialWindowSize: + t.mu.Lock() + for _, stream := range t.activeStreams { + // Adjust the sending quota for each stream. + stream.sendQuotaPool.addAndUpdate(int(s.Val) - int(t.streamSendQuota)) + } + t.streamSendQuota = s.Val + t.mu.Unlock() + } + } +} + +// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer) +// is duplicated between the client and the server. +// The transport layer needs to be refactored to take care of this. +func (t *http2Client) itemHandler(i item) error { + var err error + switch i := i.(type) { + case *dataFrame: + err = t.framer.fr.WriteData(i.streamID, i.endStream, i.d) + if err == nil { + i.f() + } + case *headerFrame: + t.hBuf.Reset() + for _, f := range i.hf { + t.hEnc.WriteField(f) + } + endHeaders := false + first := true + for !endHeaders { + size := t.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + first = false + err = t.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: i.streamID, + BlockFragment: t.hBuf.Next(size), + EndStream: i.endStream, + EndHeaders: endHeaders, + }) + } else { + err = t.framer.fr.WriteContinuation( + i.streamID, + endHeaders, + t.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + case *windowUpdate: + err = t.framer.fr.WriteWindowUpdate(i.streamID, i.increment) + case *settings: + if i.ack { + t.applySettings(i.ss) + err = t.framer.fr.WriteSettingsAck() + } else { + err = t.framer.fr.WriteSettings(i.ss...) + } + case *resetStream: + // If the server needs to be to intimated about stream closing, + // then we need to make sure the RST_STREAM frame is written to + // the wire before the headers of the next stream waiting on + // streamQuota. We ensure this by adding to the streamsQuota pool + // only after having acquired the writableChan to send RST_STREAM. + err = t.framer.fr.WriteRSTStream(i.streamID, i.code) + t.streamsQuota.add(1) + case *flushIO: + err = t.framer.writer.Flush() + case *ping: + if !i.ack { + t.bdpEst.timesnap(i.data) + } + err = t.framer.fr.WritePing(i.ack, i.data) + default: + errorf("transport: http2Client.controller got unexpected item type %v", i) + } + return err +} + +// keepalive running in a separate goroutune makes sure the connection is alive by sending pings. +func (t *http2Client) keepalive() { + p := &ping{data: [8]byte{}} + timer := time.NewTimer(t.kp.Time) + for { + select { + case <-timer.C: + if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { + timer.Reset(t.kp.Time) + continue + } + // Check if keepalive should go dormant. + t.mu.Lock() + if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { + // Make awakenKeepalive writable. + <-t.awakenKeepalive + t.mu.Unlock() + select { + case <-t.awakenKeepalive: + // If the control gets here a ping has been sent + // need to reset the timer with keepalive.Timeout. + case <-t.ctx.Done(): + return + } + } else { + t.mu.Unlock() + // Send ping. + t.controlBuf.put(p) + } + + // By the time control gets here a ping has been sent one way or the other. + timer.Reset(t.kp.Timeout) + select { + case <-timer.C: + if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { + timer.Reset(t.kp.Time) + continue + } + t.Close() + return + case <-t.ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + case <-t.ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + } +} + +func (t *http2Client) Error() <-chan struct{} { + return t.ctx.Done() +} + +func (t *http2Client) GoAway() <-chan struct{} { + return t.goAway +} diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go new file mode 100644 index 00000000000..f84d70a1796 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/http2_server.go @@ -0,0 +1,1188 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "errors" + "fmt" + "io" + "math" + "math/rand" + "net" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +// ErrIllegalHeaderWrite indicates that setting header is illegal because of +// the stream's state. +var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + +// http2Server implements the ServerTransport interface with HTTP2. +type http2Server struct { + ctx context.Context + cancel context.CancelFunc + conn net.Conn + remoteAddr net.Addr + localAddr net.Addr + maxStreamID uint32 // max stream ID ever seen + authInfo credentials.AuthInfo // auth info about the connection + inTapHandle tap.ServerInHandle + framer *framer + hBuf *bytes.Buffer // the buffer for HPACK encoding + hEnc *hpack.Encoder // HPACK encoder + // The max number of concurrent streams. + maxStreams uint32 + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *inFlow + // sendQuotaPool provides flow control to outbound message. + sendQuotaPool *quotaPool + stats stats.Handler + // Flag to keep track of reading activity on transport. + // 1 is true and 0 is false. + activity uint32 // Accessed atomically. + // Keepalive and max-age parameters for the server. + kp keepalive.ServerParameters + + // Keepalive enforcement policy. + kep keepalive.EnforcementPolicy + // The time instance last ping was received. + lastPingAt time.Time + // Number of times the client has violated keepalive ping policy so far. + pingStrikes uint8 + // Flag to signify that number of ping strikes should be reset to 0. + // This is set whenever data or header frames are sent. + // 1 means yes. + resetPingStrikes uint32 // Accessed atomically. + initialWindowSize int32 + bdpEst *bdpEstimator + + mu sync.Mutex // guard the following + + // drainChan is initialized when drain(...) is called the first time. + // After which the server writes out the first GoAway(with ID 2^31-1) frame. + // Then an independent goroutine will be launched to later send the second GoAway. + // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. + // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is + // already underway. + drainChan chan struct{} + state transportState + activeStreams map[uint32]*Stream + // the per-stream outbound flow control window size set by the peer. + streamSendQuota uint32 + // idle is the time instant when the connection went idle. + // This is either the beginning of the connection or when the number of + // RPCs go down to 0. + // When the connection is busy, this value is set to 0. + idle time.Time +} + +// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is +// returned if something goes wrong. +func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + writeBufSize := defaultWriteBufSize + if config.WriteBufferSize > 0 { + writeBufSize = config.WriteBufferSize + } + readBufSize := defaultReadBufSize + if config.ReadBufferSize > 0 { + readBufSize = config.ReadBufferSize + } + framer := newFramer(conn, writeBufSize, readBufSize) + // Send initial settings as connection preface to client. + var isettings []http2.Setting + // TODO(zhaoq): Have a better way to signal "no limit" because 0 is + // permitted in the HTTP2 spec. + maxStreams := config.MaxStreams + if maxStreams == 0 { + maxStreams = math.MaxUint32 + } else { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxConcurrentStreams, + Val: maxStreams, + }) + } + dynamicWindow := true + iwz := int32(initialWindowSize) + if config.InitialWindowSize >= defaultWindowSize { + iwz = config.InitialWindowSize + dynamicWindow = false + } + icwz := int32(initialWindowSize) + if config.InitialConnWindowSize >= defaultWindowSize { + icwz = config.InitialConnWindowSize + dynamicWindow = false + } + if iwz != defaultWindowSize { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(iwz)}) + } + if err := framer.fr.WriteSettings(isettings...); err != nil { + return nil, connectionErrorf(true, err, "transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { + return nil, connectionErrorf(true, err, "transport: %v", err) + } + } + kp := config.KeepaliveParams + if kp.MaxConnectionIdle == 0 { + kp.MaxConnectionIdle = defaultMaxConnectionIdle + } + if kp.MaxConnectionAge == 0 { + kp.MaxConnectionAge = defaultMaxConnectionAge + } + // Add a jitter to MaxConnectionAge. + kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge) + if kp.MaxConnectionAgeGrace == 0 { + kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace + } + if kp.Time == 0 { + kp.Time = defaultServerKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultServerKeepaliveTimeout + } + kep := config.KeepalivePolicy + if kep.MinTime == 0 { + kep.MinTime = defaultKeepalivePolicyMinTime + } + var buf bytes.Buffer + ctx, cancel := context.WithCancel(context.Background()) + t := &http2Server{ + ctx: ctx, + cancel: cancel, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: config.AuthInfo, + framer: framer, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + maxStreams: maxStreams, + inTapHandle: config.InTapHandle, + controlBuf: newControlBuffer(), + fc: &inFlow{limit: uint32(icwz)}, + sendQuotaPool: newQuotaPool(defaultWindowSize), + state: reachable, + activeStreams: make(map[uint32]*Stream), + streamSendQuota: defaultWindowSize, + stats: config.StatsHandler, + kp: kp, + idle: time.Now(), + kep: kep, + initialWindowSize: iwz, + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + if t.stats != nil { + t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{} + t.stats.HandleConn(t.ctx, connBegin) + } + t.framer.writer.Flush() + go func() { + loopyWriter(t.ctx, t.controlBuf, t.itemHandler) + t.Close() + }() + go t.keepalive() + return t, nil +} + +// operateHeader takes action on the decoded headers. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (close bool) { + streamID := frame.Header().StreamID + + var state decodeState + for _, hf := range frame.Fields { + if err := state.processHeaderField(hf); err != nil { + if se, ok := err.(StreamError); ok { + t.controlBuf.put(&resetStream{streamID, statusCodeConvTab[se.Code]}) + } + return + } + } + + buf := newRecvBuffer() + s := &Stream{ + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + recvCompress: state.encoding, + method: state.method, + } + + if frame.StreamEnded() { + // s is just created by the caller. No lock needed. + s.state = streamReadDone + } + if state.timeoutSet { + s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout) + } else { + s.ctx, s.cancel = context.WithCancel(t.ctx) + } + pr := &peer.Peer{ + Addr: t.remoteAddr, + } + // Attach Auth info if there is any. + if t.authInfo != nil { + pr.AuthInfo = t.authInfo + } + s.ctx = peer.NewContext(s.ctx, pr) + // Cache the current stream to the context so that the server application + // can find out. Required when the server wants to send some metadata + // back to the client (unary call only). + s.ctx = newContextWithStream(s.ctx, s) + // Attach the received metadata to the context. + if len(state.mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata) + } + if state.statsTags != nil { + s.ctx = stats.SetIncomingTags(s.ctx, state.statsTags) + } + if state.statsTrace != nil { + s.ctx = stats.SetIncomingTrace(s.ctx, state.statsTrace) + } + if t.inTapHandle != nil { + var err error + info := &tap.Info{ + FullMethodName: state.method, + } + s.ctx, err = t.inTapHandle(s.ctx, info) + if err != nil { + warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) + return + } + } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + return + } + if uint32(len(t.activeStreams)) >= t.maxStreams { + t.mu.Unlock() + t.controlBuf.put(&resetStream{streamID, http2.ErrCodeRefusedStream}) + return + } + if streamID%2 != 1 || streamID <= t.maxStreamID { + t.mu.Unlock() + // illegal gRPC stream id. + errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + return true + } + t.maxStreamID = streamID + s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) + s.localSendQuota = newQuotaPool(defaultLocalSendQuota) + t.activeStreams[streamID] = s + if len(t.activeStreams) == 1 { + t.idle = time.Time{} + } + t.mu.Unlock() + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + s.ctx = traceCtx(s.ctx, s.method) + if t.stats != nil { + s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: s.recvCompress, + WireLength: int(frame.Header().Length), + } + t.stats.HandleRPC(s.ctx, inHeader) + } + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + recv: s.buf, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + handle(s) + return +} + +// HandleStreams receives incoming streams using the given handler. This is +// typically run in a separate goroutine. +// traceCtx attaches trace to ctx and returns the new context. +func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { + // Check the validity of client preface. + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(t.conn, preface); err != nil { + // Only log if it isn't a simple tcp accept check (ie: tcp balancer doing open/close socket) + if err != io.EOF { + errorf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + } + t.Close() + return + } + if !bytes.Equal(preface, clientPreface) { + errorf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + t.Close() + return + } + + frame, err := t.framer.fr.ReadFrame() + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close() + return + } + if err != nil { + errorf("transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) + t.Close() + return + } + atomic.StoreUint32(&t.activity, 1) + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + errorf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + t.Close() + return + } + t.handleSettings(sf) + + for { + frame, err := t.framer.fr.ReadFrame() + atomic.StoreUint32(&t.activity, 1) + if err != nil { + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + t.closeStream(s) + } + t.controlBuf.put(&resetStream{se.StreamID, se.Code}) + continue + } + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close() + return + } + warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) + t.Close() + return + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + if t.operateHeaders(frame, handle, traceCtx) { + t.Close() + break + } + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + case *http2.GoAwayFrame: + // TODO: Handle GoAway from the client appropriately. + default: + errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + } + } +} + +func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + if t.activeStreams == nil { + // The transport is closing. + return nil, false + } + s, ok := t.activeStreams[f.Header().StreamID] + if !ok { + // The stream is already done. + return nil, false + } + return s, true +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Server) adjustWindow(s *Stream, n uint32) { + s.mu.Lock() + defer s.mu.Unlock() + if s.state == streamDone { + return + } + if w := s.fc.maybeAdjust(n); w > 0 { + if cw := t.fc.resetPendingUpdate(); cw > 0 { + t.controlBuf.put(&windowUpdate{0, cw}) + } + t.controlBuf.put(&windowUpdate{s.id, w}) + } +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Server) updateWindow(s *Stream, n uint32) { + s.mu.Lock() + defer s.mu.Unlock() + if s.state == streamDone { + return + } + if w := s.fc.onRead(n); w > 0 { + if cw := t.fc.resetPendingUpdate(); cw > 0 { + t.controlBuf.put(&windowUpdate{0, cw}) + } + t.controlBuf.put(&windowUpdate{s.id, w}) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Server) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)}) + t.controlBuf.put(&settings{ + ack: false, + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: uint32(n), + }, + }, + }) + +} + +func (t *http2Server) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(uint32(size)) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + // Furthermore, if a bdpPing is being sent out we can piggyback + // connection's window update for the bytes we just received. + if sendBDPPing { + if size != 0 { // Could be an empty frame. + t.controlBuf.put(&windowUpdate{0, uint32(size)}) + } + t.controlBuf.put(bdpPing) + } else { + if err := t.fc.onData(uint32(size)); err != nil { + errorf("transport: http2Server %v", err) + t.Close() + return + } + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } + } + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + if size > 0 { + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return + } + if err := s.fc.onData(uint32(size)); err != nil { + s.mu.Unlock() + t.closeStream(s) + t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&windowUpdate{s.id, w}) + } + } + s.mu.Unlock() + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + data := make([]byte, len(f.Data())) + copy(data, f.Data()) + s.write(recvMsg{data: data}) + } + } + if f.Header().Flags.Has(http2.FlagDataEndStream) { + // Received the end of stream from the client. + s.mu.Lock() + if s.state != streamDone { + s.state = streamReadDone + } + s.mu.Unlock() + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { + s, ok := t.getStream(f) + if !ok { + return + } + t.closeStream(s) +} + +func (t *http2Server) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + f.ForeachSetting(func(s http2.Setting) error { + ss = append(ss, s) + return nil + }) + t.controlBuf.put(&settings{ack: true, ss: ss}) +} + +func (t *http2Server) applySettings(ss []http2.Setting) { + for _, s := range ss { + if s.ID == http2.SettingInitialWindowSize { + t.mu.Lock() + for _, stream := range t.activeStreams { + stream.sendQuotaPool.addAndUpdate(int(s.Val) - int(t.streamSendQuota)) + } + t.streamSendQuota = s.Val + t.mu.Unlock() + } + + } +} + +const ( + maxPingStrikes = 2 + defaultPingTimeout = 2 * time.Hour +) + +func (t *http2Server) handlePing(f *http2.PingFrame) { + if f.IsAck() { + if f.Data == goAwayPing.data && t.drainChan != nil { + close(t.drainChan) + return + } + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) + + now := time.Now() + defer func() { + t.lastPingAt = now + }() + // A reset ping strikes means that we don't need to check for policy + // violation for this ping and the pingStrikes counter should be set + // to 0. + if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) { + t.pingStrikes = 0 + return + } + t.mu.Lock() + ns := len(t.activeStreams) + t.mu.Unlock() + if ns < 1 && !t.kep.PermitWithoutStream { + // Keepalive shouldn't be active thus, this new ping should + // have come after at least defaultPingTimeout. + if t.lastPingAt.Add(defaultPingTimeout).After(now) { + t.pingStrikes++ + } + } else { + // Check if keepalive policy is respected. + if t.lastPingAt.Add(t.kep.MinTime).After(now) { + t.pingStrikes++ + } + } + + if t.pingStrikes > maxPingStrikes { + // Send goaway and close the connection. + errorf("transport: Got to too many pings from the client, closing the connection.") + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + } +} + +func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { + id := f.Header().StreamID + incr := f.Increment + if id == 0 { + t.sendQuotaPool.add(int(incr)) + return + } + if s, ok := t.getStream(f); ok { + s.sendQuotaPool.add(int(incr)) + } +} + +// WriteHeader sends the header metedata md back to the client. +func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { + select { + case <-s.ctx.Done(): + return ContextErr(s.ctx.Err()) + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + + s.mu.Lock() + if s.headerOk || s.state == streamDone { + s.mu.Unlock() + return ErrIllegalHeaderWrite + } + s.headerOk = true + if md.Len() > 0 { + if s.header.Len() > 0 { + s.header = metadata.Join(s.header, md) + } else { + s.header = md + } + } + md = s.header + s.mu.Unlock() + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + if s.sendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } + for k, vv := range md { + if isReservedHeader(k) { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + t.controlBuf.put(&headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + }) + if t.stats != nil { + outHeader := &stats.OutHeader{ + //WireLength: // TODO(mmukhi): Revisit this later, if needed. + } + t.stats.HandleRPC(s.Context(), outHeader) + } + return nil +} + +// WriteStatus sends stream status to the client and terminates the stream. +// There is no further I/O operations being able to perform on this stream. +// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early +// OK is adopted. +func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + select { + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + + var headersSent, hasHeader bool + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return nil + } + if s.headerOk { + headersSent = true + } + if s.header.Len() > 0 { + hasHeader = true + } + s.mu.Unlock() + + if !headersSent && hasHeader { + t.WriteHeader(s, nil) + headersSent = true + } + + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. + if !headersSent { + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + } + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + } + + // Attach the trailer metadata. + for k, vv := range s.trailer { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + t.controlBuf.put(&headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: true, + }) + if t.stats != nil { + t.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) + } + t.closeStream(s) + return nil +} + +// Write converts the data into HTTP2 data frame and sends it out. Non-nil error +// is returns if it fails (e.g., framing error, transport error). +func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) (err error) { + select { + case <-s.ctx.Done(): + return ContextErr(s.ctx.Err()) + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + + var writeHeaderFrame bool + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return streamErrorf(codes.Unknown, "the stream has been done") + } + if !s.headerOk { + writeHeaderFrame = true + } + s.mu.Unlock() + if writeHeaderFrame { + t.WriteHeader(s, nil) + } + // Add data to header frame so that we can equally distribute data across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + for _, r := range [][]byte{hdr, data} { + for len(r) > 0 { + size := http2MaxFrameLen + // Wait until the stream has some quota to send the data. + quotaChan, quotaVer := s.sendQuotaPool.acquireWithVersion() + sq, err := wait(s.ctx, t.ctx, nil, nil, quotaChan) + if err != nil { + return err + } + // Wait until the transport has some quota to send the data. + tq, err := wait(s.ctx, t.ctx, nil, nil, t.sendQuotaPool.acquire()) + if err != nil { + return err + } + if sq < size { + size = sq + } + if tq < size { + size = tq + } + if size > len(r) { + size = len(r) + } + p := r[:size] + ps := len(p) + if ps < tq { + // Overbooked transport quota. Return it back. + t.sendQuotaPool.add(tq - ps) + } + // Acquire local send quota to be able to write to the controlBuf. + ltq, err := wait(s.ctx, t.ctx, nil, nil, s.localSendQuota.acquire()) + if err != nil { + if _, ok := err.(ConnectionError); !ok { + t.sendQuotaPool.add(ps) + } + return err + } + s.localSendQuota.add(ltq - ps) // It's ok we make this negative. + // Reset ping strikes when sending data since this might cause + // the peer to send ping. + atomic.StoreUint32(&t.resetPingStrikes, 1) + success := func() { + t.controlBuf.put(&dataFrame{streamID: s.id, endStream: false, d: p, f: func() { + s.localSendQuota.add(ps) + }}) + if ps < sq { + // Overbooked stream quota. Return it back. + s.sendQuotaPool.lockedAdd(sq - ps) + } + r = r[ps:] + } + failure := func() { + s.sendQuotaPool.lockedAdd(sq) + } + if !s.sendQuotaPool.compareAndExecute(quotaVer, success, failure) { + t.sendQuotaPool.add(ps) + s.localSendQuota.add(ps) + } + } + } + return nil +} + +// keepalive running in a separate goroutine does the following: +// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. +// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. +// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. +// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection +// after an additional duration of keepalive.Timeout. +func (t *http2Server) keepalive() { + p := &ping{} + var pingSent bool + maxIdle := time.NewTimer(t.kp.MaxConnectionIdle) + maxAge := time.NewTimer(t.kp.MaxConnectionAge) + keepalive := time.NewTimer(t.kp.Time) + // NOTE: All exit paths of this function should reset their + // respective timers. A failure to do so will cause the + // following clean-up to deadlock and eventually leak. + defer func() { + if !maxIdle.Stop() { + <-maxIdle.C + } + if !maxAge.Stop() { + <-maxAge.C + } + if !keepalive.Stop() { + <-keepalive.C + } + }() + for { + select { + case <-maxIdle.C: + t.mu.Lock() + idle := t.idle + if idle.IsZero() { // The connection is non-idle. + t.mu.Unlock() + maxIdle.Reset(t.kp.MaxConnectionIdle) + continue + } + val := t.kp.MaxConnectionIdle - time.Since(idle) + t.mu.Unlock() + if val <= 0 { + // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. + // Gracefully close the connection. + t.drain(http2.ErrCodeNo, []byte{}) + // Reseting the timer so that the clean-up doesn't deadlock. + maxIdle.Reset(infinity) + return + } + maxIdle.Reset(val) + case <-maxAge.C: + t.drain(http2.ErrCodeNo, []byte{}) + maxAge.Reset(t.kp.MaxConnectionAgeGrace) + select { + case <-maxAge.C: + // Close the connection after grace period. + t.Close() + // Reseting the timer so that the clean-up doesn't deadlock. + maxAge.Reset(infinity) + case <-t.ctx.Done(): + } + return + case <-keepalive.C: + if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { + pingSent = false + keepalive.Reset(t.kp.Time) + continue + } + if pingSent { + t.Close() + // Reseting the timer so that the clean-up doesn't deadlock. + keepalive.Reset(infinity) + return + } + pingSent = true + t.controlBuf.put(p) + keepalive.Reset(t.kp.Timeout) + case <-t.ctx.Done(): + return + } + } +} + +var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} + +// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer) +// is duplicated between the client and the server. +// The transport layer needs to be refactored to take care of this. +func (t *http2Server) itemHandler(i item) error { + switch i := i.(type) { + case *dataFrame: + if err := t.framer.fr.WriteData(i.streamID, i.endStream, i.d); err != nil { + return err + } + i.f() + return nil + case *headerFrame: + t.hBuf.Reset() + for _, f := range i.hf { + t.hEnc.WriteField(f) + } + first := true + endHeaders := false + for !endHeaders { + size := t.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + var err error + if first { + first = false + err = t.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: i.streamID, + BlockFragment: t.hBuf.Next(size), + EndStream: i.endStream, + EndHeaders: endHeaders, + }) + } else { + err = t.framer.fr.WriteContinuation( + i.streamID, + endHeaders, + t.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + atomic.StoreUint32(&t.resetPingStrikes, 1) + return nil + case *windowUpdate: + return t.framer.fr.WriteWindowUpdate(i.streamID, i.increment) + case *settings: + if i.ack { + t.applySettings(i.ss) + return t.framer.fr.WriteSettingsAck() + } + return t.framer.fr.WriteSettings(i.ss...) + case *resetStream: + return t.framer.fr.WriteRSTStream(i.streamID, i.code) + case *goAway: + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + // The transport is closing. + return fmt.Errorf("transport: Connection closing") + } + sid := t.maxStreamID + if !i.headsUp { + // Stop accepting more streams now. + t.state = draining + t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(sid, i.code, i.debugData); err != nil { + return err + } + if i.closeConn { + // Abruptly close the connection following the GoAway (via + // loopywriter). But flush out what's inside the buffer first. + t.framer.writer.Flush() + return fmt.Errorf("transport: Connection closing") + } + return nil + } + t.mu.Unlock() + // For a graceful close, send out a GoAway with stream ID of MaxUInt32, + // Follow that with a ping and wait for the ack to come back or a timer + // to expire. During this time accept new streams since they might have + // originated before the GoAway reaches the client. + // After getting the ack or timer expiration send out another GoAway this + // time with an ID of the max stream server intends to process. + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + return err + } + if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { + return err + } + go func() { + timer := time.NewTimer(time.Minute) + defer timer.Stop() + select { + case <-t.drainChan: + case <-timer.C: + case <-t.ctx.Done(): + return + } + t.controlBuf.put(&goAway{code: i.code, debugData: i.debugData}) + }() + return nil + case *flushIO: + return t.framer.writer.Flush() + case *ping: + if !i.ack { + t.bdpEst.timesnap(i.data) + } + return t.framer.fr.WritePing(i.ack, i.data) + default: + err := status.Errorf(codes.Internal, "transport: http2Server.controller got unexpected item type %t", i) + errorf("%v", err) + return err + } +} + +// Close starts shutting down the http2Server transport. +// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This +// could cause some resource issue. Revisit this later. +func (t *http2Server) Close() error { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return errors.New("transport: Close() was already called") + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + t.cancel() + err := t.conn.Close() + // Cancel all active streams. + for _, s := range streams { + s.cancel() + } + if t.stats != nil { + connEnd := &stats.ConnEnd{} + t.stats.HandleConn(t.ctx, connEnd) + } + return err +} + +// closeStream clears the footprint of a stream when the stream is not needed +// any more. +func (t *http2Server) closeStream(s *Stream) { + t.mu.Lock() + delete(t.activeStreams, s.id) + if len(t.activeStreams) == 0 { + t.idle = time.Now() + } + if t.state == draining && len(t.activeStreams) == 0 { + defer t.Close() + } + t.mu.Unlock() + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return + } + s.state = streamDone + s.mu.Unlock() +} + +func (t *http2Server) RemoteAddr() net.Addr { + return t.remoteAddr +} + +func (t *http2Server) Drain() { + t.drain(http2.ErrCodeNo, []byte{}) +} + +func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { + t.mu.Lock() + defer t.mu.Unlock() + if t.drainChan != nil { + return + } + t.drainChan = make(chan struct{}) + t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) +} + +var rgen = rand.New(rand.NewSource(time.Now().UnixNano())) + +func getJitter(v time.Duration) time.Duration { + if v == infinity { + return 0 + } + // Generate a jitter between +/- 10% of the value. + r := int64(v / 10) + j := rgen.Int63n(2*r) - r + return time.Duration(j) +} diff --git a/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/google.golang.org/grpc/transport/http_util.go new file mode 100644 index 00000000000..39f878cfd5b --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/http_util.go @@ -0,0 +1,489 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "bytes" + "encoding/base64" + "fmt" + "io" + "net" + "net/http" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + // http2MaxFrameLen specifies the max length of a HTTP2 frame. + http2MaxFrameLen = 16384 // 16KB frame + // http://http2.github.io/http2-spec/#SettingValues + http2InitHeaderTableSize = 4096 + // http2IOBufSize specifies the buffer size for sending frames. + defaultWriteBufSize = 32 * 1024 + defaultReadBufSize = 32 * 1024 +) + +var ( + clientPreface = []byte(http2.ClientPreface) + http2ErrConvTab = map[http2.ErrCode]codes.Code{ + http2.ErrCodeNo: codes.Internal, + http2.ErrCodeProtocol: codes.Internal, + http2.ErrCodeInternal: codes.Internal, + http2.ErrCodeFlowControl: codes.ResourceExhausted, + http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeStreamClosed: codes.Internal, + http2.ErrCodeFrameSize: codes.Internal, + http2.ErrCodeRefusedStream: codes.Unavailable, + http2.ErrCodeCancel: codes.Canceled, + http2.ErrCodeCompression: codes.Internal, + http2.ErrCodeConnect: codes.Internal, + http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, + http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + http2.ErrCodeHTTP11Required: codes.FailedPrecondition, + } + statusCodeConvTab = map[codes.Code]http2.ErrCode{ + codes.Internal: http2.ErrCodeInternal, + codes.Canceled: http2.ErrCodeCancel, + codes.Unavailable: http2.ErrCodeRefusedStream, + codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, + codes.PermissionDenied: http2.ErrCodeInadequateSecurity, + } + httpStatusConvTab = map[int]codes.Code{ + // 400 Bad Request - INTERNAL. + http.StatusBadRequest: codes.Internal, + // 401 Unauthorized - UNAUTHENTICATED. + http.StatusUnauthorized: codes.Unauthenticated, + // 403 Forbidden - PERMISSION_DENIED. + http.StatusForbidden: codes.PermissionDenied, + // 404 Not Found - UNIMPLEMENTED. + http.StatusNotFound: codes.Unimplemented, + // 429 Too Many Requests - UNAVAILABLE. + http.StatusTooManyRequests: codes.Unavailable, + // 502 Bad Gateway - UNAVAILABLE. + http.StatusBadGateway: codes.Unavailable, + // 503 Service Unavailable - UNAVAILABLE. + http.StatusServiceUnavailable: codes.Unavailable, + // 504 Gateway timeout - UNAVAILABLE. + http.StatusGatewayTimeout: codes.Unavailable, + } +) + +// Records the states during HPACK decoding. Must be reset once the +// decoding of the entire headers are finished. +type decodeState struct { + encoding string + // statusGen caches the stream status received from the trailer the server + // sent. Client side only. Do not access directly. After all trailers are + // parsed, use the status method to retrieve the status. + statusGen *status.Status + // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not + // intended for direct access outside of parsing. + rawStatusCode *int + rawStatusMsg string + httpStatus *int + // Server side only fields. + timeoutSet bool + timeout time.Duration + method string + // key-value metadata map from the peer. + mdata map[string][]string + statsTags []byte + statsTrace []byte +} + +// isReservedHeader checks whether hdr belongs to HTTP2 headers +// reserved by gRPC protocol. Any other headers are classified as the +// user-specified metadata. +func isReservedHeader(hdr string) bool { + if hdr != "" && hdr[0] == ':' { + return true + } + switch hdr { + case "content-type", + "grpc-message-type", + "grpc-encoding", + "grpc-message", + "grpc-status", + "grpc-timeout", + "grpc-status-details-bin", + "te": + return true + default: + return false + } +} + +// isWhitelistedPseudoHeader checks whether hdr belongs to HTTP2 pseudoheaders +// that should be propagated into metadata visible to users. +func isWhitelistedPseudoHeader(hdr string) bool { + switch hdr { + case ":authority": + return true + default: + return false + } +} + +func validContentType(t string) bool { + e := "application/grpc" + if !strings.HasPrefix(t, e) { + return false + } + // Support variations on the content-type + // (e.g. "application/grpc+blah", "application/grpc;blah"). + if len(t) > len(e) && t[len(e)] != '+' && t[len(e)] != ';' { + return false + } + return true +} + +func (d *decodeState) status() *status.Status { + if d.statusGen == nil { + // No status-details were provided; generate status using code/msg. + d.statusGen = status.New(codes.Code(int32(*(d.rawStatusCode))), d.rawStatusMsg) + } + return d.statusGen +} + +const binHdrSuffix = "-bin" + +func encodeBinHeader(v []byte) string { + return base64.RawStdEncoding.EncodeToString(v) +} + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +func encodeMetadataHeader(k, v string) string { + if strings.HasSuffix(k, binHdrSuffix) { + return encodeBinHeader(([]byte)(v)) + } + return v +} + +func decodeMetadataHeader(k, v string) (string, error) { + if strings.HasSuffix(k, binHdrSuffix) { + b, err := decodeBinHeader(v) + return string(b), err + } + return v, nil +} + +func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error { + for _, hf := range frame.Fields { + if err := d.processHeaderField(hf); err != nil { + return err + } + } + + // If grpc status exists, no need to check further. + if d.rawStatusCode != nil || d.statusGen != nil { + return nil + } + + // If grpc status doesn't exist and http status doesn't exist, + // then it's a malformed header. + if d.httpStatus == nil { + return streamErrorf(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)") + } + + if *(d.httpStatus) != http.StatusOK { + code, ok := httpStatusConvTab[*(d.httpStatus)] + if !ok { + code = codes.Unknown + } + return streamErrorf(code, http.StatusText(*(d.httpStatus))) + } + + // gRPC status doesn't exist and http status is OK. + // Set rawStatusCode to be unknown and return nil error. + // So that, if the stream has ended this Unknown status + // will be propogated to the user. + // Otherwise, it will be ignored. In which case, status from + // a later trailer, that has StreamEnded flag set, is propogated. + code := int(codes.Unknown) + d.rawStatusCode = &code + return nil + +} + +func (d *decodeState) addMetadata(k, v string) { + if d.mdata == nil { + d.mdata = make(map[string][]string) + } + d.mdata[k] = append(d.mdata[k], v) +} + +func (d *decodeState) processHeaderField(f hpack.HeaderField) error { + switch f.Name { + case "content-type": + if !validContentType(f.Value) { + return streamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value) + } + case "grpc-encoding": + d.encoding = f.Value + case "grpc-status": + code, err := strconv.Atoi(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err) + } + d.rawStatusCode = &code + case "grpc-message": + d.rawStatusMsg = decodeGrpcMessage(f.Value) + case "grpc-status-details-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) + } + s := &spb.Status{} + if err := proto.Unmarshal(v, s); err != nil { + return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) + } + d.statusGen = status.FromProto(s) + case "grpc-timeout": + d.timeoutSet = true + var err error + if d.timeout, err = decodeTimeout(f.Value); err != nil { + return streamErrorf(codes.Internal, "transport: malformed time-out: %v", err) + } + case ":path": + d.method = f.Value + case ":status": + code, err := strconv.Atoi(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed http-status: %v", err) + } + d.httpStatus = &code + case "grpc-tags-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) + } + d.statsTags = v + d.addMetadata(f.Name, string(v)) + case "grpc-trace-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) + } + d.statsTrace = v + d.addMetadata(f.Name, string(v)) + default: + if isReservedHeader(f.Name) && !isWhitelistedPseudoHeader(f.Name) { + break + } + v, err := decodeMetadataHeader(f.Name, f.Value) + if err != nil { + errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) + return nil + } + d.addMetadata(f.Name, string(v)) + } + return nil +} + +type timeoutUnit uint8 + +const ( + hour timeoutUnit = 'H' + minute timeoutUnit = 'M' + second timeoutUnit = 'S' + millisecond timeoutUnit = 'm' + microsecond timeoutUnit = 'u' + nanosecond timeoutUnit = 'n' +) + +func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { + switch u { + case hour: + return time.Hour, true + case minute: + return time.Minute, true + case second: + return time.Second, true + case millisecond: + return time.Millisecond, true + case microsecond: + return time.Microsecond, true + case nanosecond: + return time.Nanosecond, true + default: + } + return +} + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { + if m := d % r; m > 0 { + return int64(d/r + 1) + } + return int64(d / r) +} + +// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. +func encodeTimeout(t time.Duration) string { + if t <= 0 { + return "0n" + } + if d := div(t, time.Nanosecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "n" + } + if d := div(t, time.Microsecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "u" + } + if d := div(t, time.Millisecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "m" + } + if d := div(t, time.Second); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "S" + } + if d := div(t, time.Minute); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "M" + } + // Note that maxTimeoutValue * time.Hour > MaxInt64. + return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} + +func decodeTimeout(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("transport: timeout string is too short: %q", s) + } + unit := timeoutUnit(s[size-1]) + d, ok := timeoutUnitToDuration(unit) + if !ok { + return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + return d * time.Duration(t), nil +} + +const ( + spaceByte = ' ' + tildaByte = '~' + percentByte = '%' +) + +// encodeGrpcMessage is used to encode status code in header field +// "grpc-message". +// It checks to see if each individual byte in msg is an +// allowable byte, and then either percent encoding or passing it through. +// When percent encoding, the byte is converted into hexadecimal notation +// with a '%' prepended. +func encodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if !(c >= spaceByte && c < tildaByte && c != percentByte) { + return encodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func encodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c >= spaceByte && c < tildaByte && c != percentByte { + buf.WriteByte(c) + } else { + buf.WriteString(fmt.Sprintf("%%%02X", c)) + } + } + return buf.String() +} + +// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. +func decodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + if msg[i] == percentByte && i+2 < lenMsg { + return decodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func decodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c == percentByte && i+2 < lenMsg { + parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) + if err != nil { + buf.WriteByte(c) + } else { + buf.WriteByte(byte(parsed)) + i += 2 + } + } else { + buf.WriteByte(c) + } + } + return buf.String() +} + +type framer struct { + numWriters int32 + reader io.Reader + writer *bufio.Writer + fr *http2.Framer +} + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer { + f := &framer{ + reader: bufio.NewReaderSize(conn, readBufferSize), + writer: bufio.NewWriterSize(conn, writeBufferSize), + } + f.fr = http2.NewFramer(f.writer, f.reader) + // Opt-in to Frame reuse API on framer to reduce garbage. + // Frames aren't safe to read from after a subsequent call to ReadFrame. + f.fr.SetReuseFrames() + f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) + return f +} diff --git a/vendor/google.golang.org/grpc/transport/log.go b/vendor/google.golang.org/grpc/transport/log.go new file mode 100644 index 00000000000..ac8e358c5c8 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/log.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains wrappers for grpclog functions. +// The transport package only logs to verbose level 2 by default. + +package transport + +import "google.golang.org/grpc/grpclog" + +const logLevel = 2 + +func infof(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Infof(format, args...) + } +} + +func warningf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Warningf(format, args...) + } +} + +func errorf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Errorf(format, args...) + } +} + +func fatalf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Fatalf(format, args...) + } +} diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go new file mode 100644 index 00000000000..e0651c4d2b5 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/transport.go @@ -0,0 +1,771 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package transport defines and implements message oriented communication +// channel to complete various transactions (e.g., an RPC). +package transport // import "google.golang.org/grpc/transport" + +import ( + "fmt" + "io" + "net" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +// recvMsg represents the received msg from the transport. All transport +// protocol specific info has been removed. +type recvMsg struct { + data []byte + // nil: received some data + // io.EOF: stream is completed. data is nil. + // other non-nil error: transport failure. data is nil. + err error +} + +// recvBuffer is an unbounded channel of recvMsg structs. +// Note recvBuffer differs from controlBuffer only in that recvBuffer +// holds a channel of only recvMsg structs instead of objects implementing "item" interface. +// recvBuffer is written to much more often than +// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put" +type recvBuffer struct { + c chan recvMsg + mu sync.Mutex + backlog []recvMsg +} + +func newRecvBuffer() *recvBuffer { + b := &recvBuffer{ + c: make(chan recvMsg, 1), + } + return b +} + +func (b *recvBuffer) put(r recvMsg) { + b.mu.Lock() + if len(b.backlog) == 0 { + select { + case b.c <- r: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, r) + b.mu.Unlock() +} + +func (b *recvBuffer) load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = recvMsg{} + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// get returns the channel that receives a recvMsg in the buffer. +// +// Upon receipt of a recvMsg, the caller should call load to send another +// recvMsg onto the channel if there is any. +func (b *recvBuffer) get() <-chan recvMsg { + return b.c +} + +// recvBufferReader implements io.Reader interface to read the data from +// recvBuffer. +type recvBufferReader struct { + ctx context.Context + goAway chan struct{} + recv *recvBuffer + last []byte // Stores the remaining data in the previous calls. + err error +} + +// Read reads the next len(p) bytes from last. If last is drained, it tries to +// read additional data from recv. It blocks if there no additional data available +// in recv. If Read returns any non-nil error, it will continue to return that error. +func (r *recvBufferReader) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + n, r.err = r.read(p) + return n, r.err +} + +func (r *recvBufferReader) read(p []byte) (n int, err error) { + if r.last != nil && len(r.last) > 0 { + // Read remaining data left in last call. + copied := copy(p, r.last) + r.last = r.last[copied:] + return copied, nil + } + select { + case <-r.ctx.Done(): + return 0, ContextErr(r.ctx.Err()) + case <-r.goAway: + return 0, ErrStreamDrain + case m := <-r.recv.get(): + r.recv.load() + if m.err != nil { + return 0, m.err + } + copied := copy(p, m.data) + r.last = m.data[copied:] + return copied, nil + } +} + +// All items in an out of a controlBuffer should be the same type. +type item interface { + item() +} + +// controlBuffer is an unbounded channel of item. +type controlBuffer struct { + c chan item + mu sync.Mutex + backlog []item +} + +func newControlBuffer() *controlBuffer { + b := &controlBuffer{ + c: make(chan item, 1), + } + return b +} + +func (b *controlBuffer) put(r item) { + b.mu.Lock() + if len(b.backlog) == 0 { + select { + case b.c <- r: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, r) + b.mu.Unlock() +} + +func (b *controlBuffer) load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// get returns the channel that receives an item in the buffer. +// +// Upon receipt of an item, the caller should call load to send another +// item onto the channel if there is any. +func (b *controlBuffer) get() <-chan item { + return b.c +} + +type streamState uint8 + +const ( + streamActive streamState = iota + streamWriteDone // EndStream sent + streamReadDone // EndStream received + streamDone // the entire stream is finished. +) + +// Stream represents an RPC in the transport layer. +type Stream struct { + id uint32 + // nil for client side Stream. + st ServerTransport + // ctx is the associated context of the stream. + ctx context.Context + // cancel is always nil for client side Stream. + cancel context.CancelFunc + // done is closed when the final status arrives. + done chan struct{} + // goAway is closed when the server sent GoAways signal before this stream was initiated. + goAway chan struct{} + // method records the associated RPC method of the stream. + method string + recvCompress string + sendCompress string + buf *recvBuffer + trReader io.Reader + fc *inFlow + recvQuota uint32 + + // TODO: Remote this unused variable. + // The accumulated inbound quota pending for window update. + updateQuota uint32 + + // Callback to state application's intentions to read data. This + // is used to adjust flow control, if need be. + requestRead func(int) + + sendQuotaPool *quotaPool + localSendQuota *quotaPool + // Close headerChan to indicate the end of reception of header metadata. + headerChan chan struct{} + // header caches the received header metadata. + header metadata.MD + // The key-value map of trailer metadata. + trailer metadata.MD + + mu sync.RWMutex // guard the following + // headerOK becomes true from the first header is about to send. + headerOk bool + state streamState + // true iff headerChan is closed. Used to avoid closing headerChan + // multiple times. + headerDone bool + // the status error received from the server. + status *status.Status + // rstStream indicates whether a RST_STREAM frame needs to be sent + // to the server to signify that this stream is closing. + rstStream bool + // rstError is the error that needs to be sent along with the RST_STREAM frame. + rstError http2.ErrCode + // bytesSent and bytesReceived indicates whether any bytes have been sent or + // received on this stream. + bytesSent bool + bytesReceived bool +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(str string) { + s.sendCompress = str +} + +// Done returns a chanel which is closed when it receives the final status +// from the server. +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +// GoAway returns a channel which is closed when the server sent GoAways signal +// before this stream was initiated. +func (s *Stream) GoAway() <-chan struct{} { + return s.goAway +} + +// Header acquires the key-value pairs of header metadata once it +// is available. It blocks until i) the metadata is ready or ii) there is no +// header metadata or iii) the stream is canceled/expired. +func (s *Stream) Header() (metadata.MD, error) { + var err error + select { + case <-s.ctx.Done(): + err = ContextErr(s.ctx.Err()) + case <-s.goAway: + err = ErrStreamDrain + case <-s.headerChan: + return s.header.Copy(), nil + } + // Even if the stream is closed, header is returned if available. + select { + case <-s.headerChan: + return s.header.Copy(), nil + default: + } + return nil, err +} + +// Trailer returns the cached trailer metedata. Note that if it is not called +// after the entire stream is done, it could return an empty MD. Client +// side only. +func (s *Stream) Trailer() metadata.MD { + s.mu.RLock() + c := s.trailer.Copy() + s.mu.RUnlock() + return c +} + +// ServerTransport returns the underlying ServerTransport for the stream. +// The client side stream always returns nil. +func (s *Stream) ServerTransport() ServerTransport { + return s.st +} + +// Context returns the context of the stream. +func (s *Stream) Context() context.Context { + return s.ctx +} + +// Method returns the method for the stream. +func (s *Stream) Method() string { + return s.method +} + +// Status returns the status received from the server. +func (s *Stream) Status() *status.Status { + return s.status +} + +// SetHeader sets the header metadata. This can be called multiple times. +// Server side only. +func (s *Stream) SetHeader(md metadata.MD) error { + s.mu.Lock() + if s.headerOk || s.state == streamDone { + s.mu.Unlock() + return ErrIllegalHeaderWrite + } + if md.Len() == 0 { + s.mu.Unlock() + return nil + } + s.header = metadata.Join(s.header, md) + s.mu.Unlock() + return nil +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. Server side only. +func (s *Stream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + s.mu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.mu.Unlock() + return nil +} + +func (s *Stream) write(m recvMsg) { + s.buf.put(m) +} + +// Read reads all p bytes from the wire for this stream. +func (s *Stream) Read(p []byte) (n int, err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.(*transportReader).er; er != nil { + return 0, er + } + s.requestRead(len(p)) + return io.ReadFull(s.trReader, p) +} + +// tranportReader reads all the data available for this Stream from the transport and +// passes them into the decoder, which converts them into a gRPC message stream. +// The error is io.EOF when the stream is done or another non-nil error if +// the stream broke. +type transportReader struct { + reader io.Reader + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) + er error +} + +func (t *transportReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) + if err != nil { + t.er = err + return + } + t.windowHandler(n) + return +} + +// finish sets the stream's state and status, and closes the done channel. +// s.mu must be held by the caller. st must always be non-nil. +func (s *Stream) finish(st *status.Status) { + s.status = st + s.state = streamDone + close(s.done) +} + +// BytesSent indicates whether any bytes have been sent on this stream. +func (s *Stream) BytesSent() bool { + s.mu.Lock() + bs := s.bytesSent + s.mu.Unlock() + return bs +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *Stream) BytesReceived() bool { + s.mu.Lock() + br := s.bytesReceived + s.mu.Unlock() + return br +} + +// GoString is implemented by Stream so context.String() won't +// race when printing %#v. +func (s *Stream) GoString() string { + return fmt.Sprintf("", s, s.method) +} + +// The key to save transport.Stream in the context. +type streamKey struct{} + +// newContextWithStream creates a new context from ctx and attaches stream +// to it. +func newContextWithStream(ctx context.Context, stream *Stream) context.Context { + return context.WithValue(ctx, streamKey{}, stream) +} + +// StreamFromContext returns the stream saved in ctx. +func StreamFromContext(ctx context.Context) (s *Stream, ok bool) { + s, ok = ctx.Value(streamKey{}).(*Stream) + return +} + +// state of transport +type transportState int + +const ( + reachable transportState = iota + closing + draining +) + +// ServerConfig consists of all the configurations to establish a server transport. +type ServerConfig struct { + MaxStreams uint32 + AuthInfo credentials.AuthInfo + InTapHandle tap.ServerInHandle + StatsHandler stats.Handler + KeepaliveParams keepalive.ServerParameters + KeepalivePolicy keepalive.EnforcementPolicy + InitialWindowSize int32 + InitialConnWindowSize int32 + WriteBufferSize int + ReadBufferSize int +} + +// NewServerTransport creates a ServerTransport with conn or non-nil error +// if it fails. +func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { + return newHTTP2Server(conn, config) +} + +// ConnectOptions covers all relevant options for communicating with the server. +type ConnectOptions struct { + // UserAgent is the application user agent. + UserAgent string + // Authority is the :authority pseudo-header to use. This field has no effect if + // TransportCredentials is set. + Authority string + // Dialer specifies how to dial a network address. + Dialer func(context.Context, string) (net.Conn, error) + // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. + FailOnNonTempDialError bool + // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. + PerRPCCredentials []credentials.PerRPCCredentials + // TransportCredentials stores the Authenticator required to setup a client connection. + TransportCredentials credentials.TransportCredentials + // KeepaliveParams stores the keepalive parameters. + KeepaliveParams keepalive.ClientParameters + // StatsHandler stores the handler for stats. + StatsHandler stats.Handler + // InitialWindowSize sets the initial window size for a stream. + InitialWindowSize int32 + // InitialConnWindowSize sets the initial window size for a connection. + InitialConnWindowSize int32 + // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. + WriteBufferSize int + // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. + ReadBufferSize int +} + +// TargetInfo contains the information of the target such as network address and metadata. +type TargetInfo struct { + Addr string + Metadata interface{} + Authority string +} + +// NewClientTransport establishes the transport with the required ConnectOptions +// and returns it to the caller. +func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions, timeout time.Duration) (ClientTransport, error) { + return newHTTP2Client(ctx, target, opts, timeout) +} + +// Options provides additional hints and information for message +// transmission. +type Options struct { + // Last indicates whether this write is the last piece for + // this stream. + Last bool + + // Delay is a hint to the transport implementation for whether + // the data could be buffered for a batching write. The + // transport implementation may ignore the hint. + Delay bool +} + +// CallHdr carries the information of a particular RPC. +type CallHdr struct { + // Host specifies the peer's host. + Host string + + // Method specifies the operation to perform. + Method string + + // RecvCompress specifies the compression algorithm applied on + // inbound messages. + RecvCompress string + + // SendCompress specifies the compression algorithm applied on + // outbound message. + SendCompress string + + // Creds specifies credentials.PerRPCCredentials for a call. + Creds credentials.PerRPCCredentials + + // Flush indicates whether a new stream command should be sent + // to the peer without waiting for the first data. This is + // only a hint. + // If it's true, the transport may modify the flush decision + // for performance purposes. + // If it's false, new stream will never be flushed. + Flush bool +} + +// ClientTransport is the common interface for all gRPC client-side transport +// implementations. +type ClientTransport interface { + // Close tears down this transport. Once it returns, the transport + // should not be accessed any more. The caller must make sure this + // is called only once. + Close() error + + // GracefulClose starts to tear down the transport. It stops accepting + // new RPCs and wait the completion of the pending RPCs. + GracefulClose() error + + // Write sends the data for the given stream. A nil stream indicates + // the write is to be performed on the transport as a whole. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // NewStream creates a Stream for an RPC. + NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) + + // CloseStream clears the footprint of a stream when the stream is + // not needed any more. The err indicates the error incurred when + // CloseStream is called. Must be called when a stream is finished + // unless the associated transport is closing. + CloseStream(stream *Stream, err error) + + // Error returns a channel that is closed when some I/O error + // happens. Typically the caller should have a goroutine to monitor + // this in order to take action (e.g., close the current transport + // and create a new one) in error case. It should not return nil + // once the transport is initiated. + Error() <-chan struct{} + + // GoAway returns a channel that is closed when ClientTransport + // receives the draining signal from the server (e.g., GOAWAY frame in + // HTTP/2). + GoAway() <-chan struct{} + + // GetGoAwayReason returns the reason why GoAway frame was received. + GetGoAwayReason() GoAwayReason +} + +// ServerTransport is the common interface for all gRPC server-side transport +// implementations. +// +// Methods may be called concurrently from multiple goroutines, but +// Write methods for a given Stream will be called serially. +type ServerTransport interface { + // HandleStreams receives incoming streams using the given handler. + HandleStreams(func(*Stream), func(context.Context, string) context.Context) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. + WriteHeader(s *Stream, md metadata.MD) error + + // Write sends the data for the given stream. + // Write may not be called on all streams. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // WriteStatus sends the status of a stream to the client. WriteStatus is + // the final call made on a stream and always occurs. + WriteStatus(s *Stream, st *status.Status) error + + // Close tears down the transport. Once it is called, the transport + // should not be accessed any more. All the pending streams and their + // handlers will be terminated asynchronously. + Close() error + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // Drain notifies the client this ServerTransport stops accepting new RPCs. + Drain() +} + +// streamErrorf creates an StreamError with the specified error code and description. +func streamErrorf(c codes.Code, format string, a ...interface{}) StreamError { + return StreamError{ + Code: c, + Desc: fmt.Sprintf(format, a...), + } +} + +// connectionErrorf creates an ConnectionError with the specified error description. +func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { + return ConnectionError{ + Desc: fmt.Sprintf(format, a...), + temp: temp, + err: e, + } +} + +// ConnectionError is an error that results in the termination of the +// entire connection and the retry of all the active streams. +type ConnectionError struct { + Desc string + temp bool + err error +} + +func (e ConnectionError) Error() string { + return fmt.Sprintf("connection error: desc = %q", e.Desc) +} + +// Temporary indicates if this connection error is temporary or fatal. +func (e ConnectionError) Temporary() bool { + return e.temp +} + +// Origin returns the original error of this connection error. +func (e ConnectionError) Origin() error { + // Never return nil error here. + // If the original error is nil, return itself. + if e.err == nil { + return e + } + return e.err +} + +var ( + // ErrConnClosing indicates that the transport is closing. + ErrConnClosing = connectionErrorf(true, nil, "transport is closing") + // ErrStreamDrain indicates that the stream is rejected by the server because + // the server stops accepting new RPCs. + ErrStreamDrain = streamErrorf(codes.Unavailable, "the server stops accepting new RPCs") +) + +// TODO: See if we can replace StreamError with status package errors. + +// StreamError is an error that only affects one stream within a connection. +type StreamError struct { + Code codes.Code + Desc string +} + +func (e StreamError) Error() string { + return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc) +} + +// wait blocks until it can receive from one of the provided contexts or +// channels. ctx is the context of the RPC, tctx is the context of the +// transport, done is a channel closed to indicate the end of the RPC, goAway +// is a channel closed to indicate a GOAWAY was received, and proceed is a +// quota channel, whose received value is returned from this function if none +// of the other signals occur first. +func wait(ctx, tctx context.Context, done, goAway <-chan struct{}, proceed <-chan int) (int, error) { + select { + case <-ctx.Done(): + return 0, ContextErr(ctx.Err()) + case <-done: + return 0, io.EOF + case <-goAway: + return 0, ErrStreamDrain + case <-tctx.Done(): + return 0, ErrConnClosing + case i := <-proceed: + return i, nil + } +} + +// GoAwayReason contains the reason for the GoAway frame received. +type GoAwayReason uint8 + +const ( + // Invalid indicates that no GoAway frame is received. + Invalid GoAwayReason = 0 + // NoReason is the default value when GoAway frame is received. + NoReason GoAwayReason = 1 + // TooManyPings indicates that a GoAway frame with ErrCodeEnhanceYourCalm + // was received and that the debug data said "too_many_pings". + TooManyPings GoAwayReason = 2 +) + +// loopyWriter is run in a separate go routine. It is the single code path that will +// write data on wire. +func loopyWriter(ctx context.Context, cbuf *controlBuffer, handler func(item) error) { + for { + select { + case i := <-cbuf.get(): + cbuf.load() + if err := handler(i); err != nil { + return + } + case <-ctx.Done(): + return + } + hasData: + for { + select { + case i := <-cbuf.get(): + cbuf.load() + if err := handler(i); err != nil { + return + } + case <-ctx.Done(): + return + default: + if err := handler(&flushIO{}); err != nil { + return + } + break hasData + } + } + } +} diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh new file mode 100755 index 00000000000..72ef3290adc --- /dev/null +++ b/vendor/google.golang.org/grpc/vet.sh @@ -0,0 +1,78 @@ +#!/bin/bash + +set -ex # Exit on error; debugging enabled. +set -o pipefail # Fail a pipe if any sub-command fails. + +die() { + echo "$@" >&2 + exit 1 +} + +# TODO: Remove this check and the mangling below once "context" is imported +# directly. +if git status --porcelain | read; then + die "Uncommitted or untracked files found; commit changes first" +fi + +PATH="$GOPATH/bin:$GOROOT/bin:$PATH" + +# Check proto in manual runs or cron runs. +if [[ "$TRAVIS" != "true" || "$TRAVIS_EVENT_TYPE" = "cron" ]]; then + check_proto="true" +fi + +if [ "$1" = "-install" ]; then + go get -d \ + google.golang.org/grpc/... + go get -u \ + github.com/golang/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/golang/protobuf/protoc-gen-go \ + golang.org/x/tools/cmd/stringer + if [[ "$check_proto" = "true" ]]; then + if [[ "$TRAVIS" = "true" ]]; then + PROTOBUF_VERSION=3.3.0 + PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + pushd /home/travis + wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} + unzip ${PROTOC_FILENAME} + bin/protoc --version + popd + elif ! which protoc > /dev/null; then + die "Please install protoc into your path" + fi + fi + exit 0 +elif [[ "$#" -ne 0 ]]; then + die "Unknown argument(s): $*" +fi + +git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | tee /dev/stderr | (! read) +gofmt -s -d -l . 2>&1 | tee /dev/stderr | (! read) +goimports -l . 2>&1 | tee /dev/stderr | (! read) +golint ./... 2>&1 | (grep -vE "(_mock|_string|\.pb)\.go:" || true) | tee /dev/stderr | (! read) + +# Undo any edits made by this script. +cleanup() { + git reset --hard HEAD +} +trap cleanup EXIT + +# Rewrite golang.org/x/net/context -> context imports (see grpc/grpc-go#1484). +# TODO: Remove this mangling once "context" is imported directly (grpc/grpc-go#711). +git ls-files "*.go" | xargs sed -i 's:"golang.org/x/net/context":"context":' +set +o pipefail +# TODO: Stop filtering pb.go files once golang/protobuf#214 is fixed. +go tool vet -all . 2>&1 | grep -vF '.pb.go:' | tee /dev/stderr | (! read) +set -o pipefail +git reset --hard HEAD + +if [[ "$check_proto" = "true" ]]; then + PATH="/home/travis/bin:$PATH" make proto && \ + git status --porcelain 2>&1 | (! read) || \ + (git status; git --no-pager diff; exit 1) +fi + +# TODO(menghanl): fix errors in transport_test. +staticcheck -ignore google.golang.org/grpc/transport/transport_test.go:SA2002 ./... diff --git a/vendor/vendor.json b/vendor/vendor.json index bd44d8fea68..15cf238c942 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -2,12 +2,72 @@ "comment": "", "ignore": "appengine test github.com/hashicorp/nomad/ github.com/hashicorp/terraform/backend", "package": [ + { + "checksumSHA1": "AH7jcN7pvaPDU6UjHdpT081DDGk=", + "path": "cloud.google.com/go/compute/metadata", + "revision": "36dc44d65c859022eaa611ad10621be885d3643a", + "revisionTime": "2017-10-25T01:01:36Z" + }, + { + "checksumSHA1": "mUr+CfHry/vyDz3SeO2f7UXI/PI=", + "path": "github.com/Azure/azure-sdk-for-go/arm/storage", + "revision": "f7bb4db3ea4c73dc58bd284c38ea644a79324be0", + "revisionTime": "2017-10-23T18:42:54Z" + }, + { + "checksumSHA1": "KzijvZt+5kIVOsdv8GJkUK9njpc=", + "path": "github.com/Azure/azure-sdk-for-go/storage", + "revision": "f7bb4db3ea4c73dc58bd284c38ea644a79324be0", + "revisionTime": "2017-10-23T18:42:54Z" + }, + { + "checksumSHA1": "Xd5xJ9A8KynuExnf30qfukN3RR0=", + "path": "github.com/Azure/go-autorest/autorest", + "revision": "c0eb859387e57a164bf64171da307e2ef8168b58", + "revisionTime": "2017-10-20T21:14:44Z" + }, + { + "checksumSHA1": "Ktj3H1WpOqxnC9kdAA+F7Ol7/RQ=", + "path": "github.com/Azure/go-autorest/autorest/adal", + "revision": "c0eb859387e57a164bf64171da307e2ef8168b58", + "revisionTime": "2017-10-20T21:14:44Z" + }, + { + "checksumSHA1": "Pt0rzDhmiWpNeGvXT73kra89guI=", + "path": "github.com/Azure/go-autorest/autorest/azure", + "revision": "c0eb859387e57a164bf64171da307e2ef8168b58", + "revisionTime": "2017-10-20T21:14:44Z" + }, + { + "checksumSHA1": "9nXCi9qQsYjxCeajJKWttxgEt0I=", + "path": "github.com/Azure/go-autorest/autorest/date", + "revision": "c0eb859387e57a164bf64171da307e2ef8168b58", + "revisionTime": "2017-10-20T21:14:44Z" + }, + { + "checksumSHA1": "HfqZyKllcHQDvTwgCaYL1jUPmW0=", + "path": "github.com/Azure/go-autorest/autorest/validation", + "revision": "c0eb859387e57a164bf64171da307e2ef8168b58", + "revisionTime": "2017-10-20T21:14:44Z" + }, { "checksumSHA1": "PYNaEEt9v8iAvGVUD4do0YIeR1A=", "path": "github.com/Azure/go-ntlmssp", "revision": "c92175d540060095c69ced311f76aea56c83ecdb", "revisionTime": "2017-08-03T03:49:30Z" }, + { + "checksumSHA1": "IrtvVIFBTQmk0+vM7g2xtka5SFg=", + "path": "github.com/Unknwon/com", + "revision": "7677a1d7c1137cd3dd5ba7a076d0c898a1ef4520", + "revisionTime": "2017-08-19T22:39:52Z" + }, + { + "checksumSHA1": "jQh1fnoKPKMURvKkpdRjN695nAQ=", + "path": "github.com/agext/levenshtein", + "revision": "5f10fee965225ac1eecdc234c09daf5cd9e7f7b6", + "revisionTime": "2017-02-17T06:30:20Z" + }, { "checksumSHA1": "LLVyR2dAgkihu0+HdZF+JK0gMMs=", "path": "github.com/agl/ed25519", @@ -32,6 +92,12 @@ "revision": "f6af74d34d1ef69a511c59173876fc1174c11f0d", "revisionTime": "2016-08-26T14:30:32Z" }, + { + "checksumSHA1": "Ffhtm8iHH7l2ynVVOIGJE3eiuLA=", + "path": "github.com/apparentlymart/go-textseg/textseg", + "revision": "b836f5c4d331d1945a2fead7188db25432d73b69", + "revisionTime": "2017-05-31T20:39:52Z" + }, { "checksumSHA1": "l0iFqayYAaEip6Olaq3/LCOa/Sg=", "path": "github.com/armon/circbuf", @@ -45,700 +111,732 @@ "revisionTime": "2017-07-27T15:54:43Z" }, { - "checksumSHA1": "oB3qh+PpOMbr9frqJwMyBuUTf2k=", + "checksumSHA1": "YhARllgSKHk/9SL4UuyM7kpXFnk=", "path": "github.com/aws/aws-sdk-go/aws", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "DtuTqKH29YnLjrIJkRYX0HQtXY0=", "path": "github.com/aws/aws-sdk-go/aws/arn", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=", "path": "github.com/aws/aws-sdk-go/aws/awserr", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "yyYr41HZ1Aq0hWc3J5ijXwYEcac=", "path": "github.com/aws/aws-sdk-go/aws/awsutil", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "n98FANpNeRT5kf6pizdpI7nm6Sw=", "path": "github.com/aws/aws-sdk-go/aws/client", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=", "path": "github.com/aws/aws-sdk-go/aws/client/metadata", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "7/8j/q0TWtOgXyvEcv4B2Dhl00o=", "path": "github.com/aws/aws-sdk-go/aws/corehandlers", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "Y+cPwQL0dZMyqp3wI+KJWmA9KQ8=", "path": "github.com/aws/aws-sdk-go/aws/credentials", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "u3GOAJLmdvbuNUeUEcZSEAOeL/0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "JEYqmF83O5n5bHkupAzA6STm0no=", "path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "ZdtYh3ZHSgP/WEIaqwJHTEhpkbs=", "path": "github.com/aws/aws-sdk-go/aws/defaults", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "/EXbk/z2TWjWc1Hvb4QYs3Wmhb8=", "path": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { - "checksumSHA1": "CxKCfgfTug8S/72YInl42ufsknE=", + "checksumSHA1": "28TXzVpyhT3JFRT3vLo/GTKc3Wc=", "path": "github.com/aws/aws-sdk-go/aws/endpoints", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "n/tgGgh0wICYu+VDYSqlsRy4w9s=", "path": "github.com/aws/aws-sdk-go/aws/request", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "HcGL4e6Uep4/80eCUI5xkcWjpQ0=", "path": "github.com/aws/aws-sdk-go/aws/session", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "iywvraxbXf3A/FOzFWjKfBBEQRA=", "path": "github.com/aws/aws-sdk-go/aws/signer/v4", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "04ypv4x12l4q0TksA1zEVsmgpvw=", "path": "github.com/aws/aws-sdk-go/internal/shareddefaults", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "wk7EyvDaHwb5qqoOP/4d3cV0708=", "path": "github.com/aws/aws-sdk-go/private/protocol", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "1QmQ3FqV37w0Zi44qv8pA1GeR0A=", "path": "github.com/aws/aws-sdk-go/private/protocol/ec2query", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "O6hcK24yI6w7FA+g4Pbr+eQ7pys=", "path": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "R00RL5jJXRYq1iiK1+PGvMfvXyM=", "path": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "ZqY5RWavBLWTo6j9xqdyBEaNFRk=", "path": "github.com/aws/aws-sdk-go/private/protocol/query", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "9V1PvtFQ9MObZTc3sa86WcuOtOU=", "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "VCTh+dEaqqhog5ncy/WTt9+/gFM=", "path": "github.com/aws/aws-sdk-go/private/protocol/rest", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "Rpu8KBtHZgvhkwHxUfaky+qW+G4=", "path": "github.com/aws/aws-sdk-go/private/protocol/restjson", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "ODo+ko8D6unAxZuN1jGzMcN4QCc=", "path": "github.com/aws/aws-sdk-go/private/protocol/restxml", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "0qYPUga28aQVkxZgBR3Z86AbGUQ=", "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "F6mth+G7dXN1GI+nktaGo8Lx8aE=", "path": "github.com/aws/aws-sdk-go/private/signer/v2", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "BXuQo73qo4WLmc+TdE5pAwalLUQ=", "path": "github.com/aws/aws-sdk-go/service/acm", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "O2HA8qyuxiolOGr0a1o/mFKz/Xo=", "path": "github.com/aws/aws-sdk-go/service/apigateway", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "lnbmn3cDZvFOpzV9jKe9jG7VIPs=", "path": "github.com/aws/aws-sdk-go/service/applicationautoscaling", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "/C4QDOmEP7t8vawBcB7a2ygaXuI=", "path": "github.com/aws/aws-sdk-go/service/athena", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "eudoAl5P0HqsQrzytWyb9uhUGU0=", "path": "github.com/aws/aws-sdk-go/service/autoscaling", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "fNHsdfsdTzwSJwnfxqqGdkjT5HE=", "path": "github.com/aws/aws-sdk-go/service/batch", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" + }, + { + "checksumSHA1": "s3YX7GouNkKz4U+w/Id7ivv5i2c=", + "path": "github.com/aws/aws-sdk-go/service/budgets", + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "XPE0OmyX5xPvxP7QSdGuB7TY7AE=", "path": "github.com/aws/aws-sdk-go/service/cloudformation", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { - "checksumSHA1": "rjq4ZWRUGvQDjnXeflafkCWYsFU=", + "checksumSHA1": "DOHt4R6H0di36yc9j7YEGYn78+k=", "path": "github.com/aws/aws-sdk-go/service/cloudfront", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "59kV70/8PuutX4eFkGfCCtdunIA=", "path": "github.com/aws/aws-sdk-go/service/cloudtrail", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "zm2qDEBdcJSJI3PGcE2UVr6Cxmc=", "path": "github.com/aws/aws-sdk-go/service/cloudwatch", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "2y8MKiWNwIVw/au7LBxCGLZusUU=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchevents", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "SGbv4ySdQtVIE7WJ+5HcNyWkbNY=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchlogs", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "EQj73ejMb8hAqCDC+hX4MuvJ7sk=", "path": "github.com/aws/aws-sdk-go/service/codebuild", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { - "checksumSHA1": "XkyZPfxHxB3ohK+/qdF54nvMS0o=", + "checksumSHA1": "mr7qWrs0Aw9Pb84eyJL80Z7lxXc=", "path": "github.com/aws/aws-sdk-go/service/codecommit", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "QJYewZzcmPbuN27AWiQ6XKQ9Vrw=", "path": "github.com/aws/aws-sdk-go/service/codedeploy", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "y1X50fCNI6EIs5YWddiu/EAwD8o=", "path": "github.com/aws/aws-sdk-go/service/codepipeline", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "7bm1eOY2oKYSesmEOd1qXvTRH2s=", "path": "github.com/aws/aws-sdk-go/service/cognitoidentity", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "ouZ4H9foNRBFaElXZbpi5C3pqDQ=", "path": "github.com/aws/aws-sdk-go/service/cognitoidentityprovider", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { - "checksumSHA1": "Ryg2zV9xVlpTF1GVbP5GlYbynOc=", + "checksumSHA1": "A+8ZG3eKa0jnOPfwhmCMw0C6U/o=", "path": "github.com/aws/aws-sdk-go/service/configservice", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { - "checksumSHA1": "8UoYKgC9u/gjExZe4a6ZZf4dDl8=", + "checksumSHA1": "QWfHksqdET4wKa3eKEbewFC6Ues=", "path": "github.com/aws/aws-sdk-go/service/databasemigrationservice", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "S4vcGUr3SNl3XKPFTqfwFanT4Cw=", "path": "github.com/aws/aws-sdk-go/service/devicefarm", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" + }, + { + "checksumSHA1": "vJAA3F/3UhmdWX7s3r+5YxEUuUg=", + "path": "github.com/aws/aws-sdk-go/service/directconnect", + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "1O8BtYGfkXglIqgl/uxunrq3Djs=", "path": "github.com/aws/aws-sdk-go/service/directoryservice", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "ycd0mxQjtWfbtlk+W2N7N6QdxFw=", "path": "github.com/aws/aws-sdk-go/service/dynamodb", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { - "checksumSHA1": "K3NOqhe9pZJM7qlMjYWR772txQY=", + "checksumSHA1": "vU/6dpCdnPMy35VFUju8+Dxn4sA=", "path": "github.com/aws/aws-sdk-go/service/ec2", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { - "checksumSHA1": "YNq7YhasHn9ceelWX2aG0Cg0Ga0=", + "checksumSHA1": "bovxt5atRBvLXGPHMFE1kPfTL0k=", "path": "github.com/aws/aws-sdk-go/service/ecr", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "+Jyy6T5h4fYRUmU9EgV+ZtLNM80=", "path": "github.com/aws/aws-sdk-go/service/ecs", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "pBkeSL8bCEbz/6ub0Jr6NsjQPSc=", "path": "github.com/aws/aws-sdk-go/service/efs", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { - "checksumSHA1": "4a4FO9D6RQluAwyYEW5H6p0Nz7I=", + "checksumSHA1": "X64z8R2OswIFi5btZRJUuPeGbio=", "path": "github.com/aws/aws-sdk-go/service/elasticache", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { - "checksumSHA1": "RzBRmfKefl+oBHDzrtsuxv8ycKE=", + "checksumSHA1": "uyG5rBbcDDiRCN6gXLXyWy0oxpQ=", "path": "github.com/aws/aws-sdk-go/service/elasticbeanstalk", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { - "checksumSHA1": "lrrUXgK33baqXgfzh4Z2tSmxeBE=", + "checksumSHA1": "RiXeoiTqLn4uvNIm0EDbHuvYYFw=", "path": "github.com/aws/aws-sdk-go/service/elasticsearchservice", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "zMFn+iotI5JIiB9HFRo6BiX6CZE=", "path": "github.com/aws/aws-sdk-go/service/elastictranscoder", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "sp5ZmzFBI5z1+kLkRoFjfm2GWuY=", "path": "github.com/aws/aws-sdk-go/service/elb", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "LjlrMBi+GR3kElFOfdV4WAJ10UU=", "path": "github.com/aws/aws-sdk-go/service/elbv2", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "EEj7cYvIK+F25RIynBVArzedyPQ=", "path": "github.com/aws/aws-sdk-go/service/emr", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "tuK++zhf2K0RBocTCU1ZltzS8ko=", "path": "github.com/aws/aws-sdk-go/service/firehose", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "Ewp675B6bZe/eWipwJl7OXqCJRo=", "path": "github.com/aws/aws-sdk-go/service/glacier", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "a+tKLYUEM3ZftY52P2ywtIxlCxE=", "path": "github.com/aws/aws-sdk-go/service/iam", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "v0OUl6sTwF7EOmXyzk9ppc5ljLw=", "path": "github.com/aws/aws-sdk-go/service/inspector", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "D92k3ymegRbjpgnuAy9rWjgV7vs=", "path": "github.com/aws/aws-sdk-go/service/iot", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "fE6Lygzxvif/yfo9bncKyRUCqs8=", "path": "github.com/aws/aws-sdk-go/service/kinesis", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "wwFYsrpInh4Tow/vTI7bD+r5gBU=", "path": "github.com/aws/aws-sdk-go/service/kms", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "qu7WvUeSIFYQOqF9RXM/3w45kFg=", "path": "github.com/aws/aws-sdk-go/service/lambda", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { - "checksumSHA1": "eHMwitdHY0uOEA5kkmJ1nGHe0z0=", + "checksumSHA1": "HqzHXp/Jd+K1we+1q/K+zE4qyqw=", "path": "github.com/aws/aws-sdk-go/service/lightsail", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "84rKGr+RvT8tjOalWGPKuNPC4T4=", "path": "github.com/aws/aws-sdk-go/service/opsworks", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { - "checksumSHA1": "xzvIMHbwgPpK4IdmzG07rZvb5x8=", + "checksumSHA1": "4im5y93mxsIQgXujPiM5iRtW0J4=", "path": "github.com/aws/aws-sdk-go/service/rds", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "dS8MjjzzX7csNyxiIpRPgHXMyss=", "path": "github.com/aws/aws-sdk-go/service/redshift", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "Bx0UwkYHAU+LGL4Glg5le2UiirI=", "path": "github.com/aws/aws-sdk-go/service/route53", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "QPxEB1a4X55kNCF/GlH18mOrHMA=", "path": "github.com/aws/aws-sdk-go/service/s3", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "HQHIEdPu8wtbbJnECYl2HM9Ul5Q=", "path": "github.com/aws/aws-sdk-go/service/servicecatalog", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { - "checksumSHA1": "57zqW/AnJTqgM3/udtLTrusRalM=", + "checksumSHA1": "oYKSvsZejNuttYCnuAzw6Lq33FY=", "path": "github.com/aws/aws-sdk-go/service/ses", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "u8xk+JtK/HwIKB2ume3or9Sstp8=", "path": "github.com/aws/aws-sdk-go/service/sfn", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" + }, + { + "checksumSHA1": "6DiA9x0ibBF3ZBnILXiugtdTufc=", + "path": "github.com/aws/aws-sdk-go/service/shield", + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "hOcVb1zJiDUmafXSJQhkEascWwA=", "path": "github.com/aws/aws-sdk-go/service/simpledb", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "1jN0ZDW5c9QEGSQIQ+KrbTACvm8=", "path": "github.com/aws/aws-sdk-go/service/sns", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { - "checksumSHA1": "0y2X71gCB5ADXttecbyzuQy++HU=", + "checksumSHA1": "vo3NUCT5DQeQc52HHfht8sCuaA4=", "path": "github.com/aws/aws-sdk-go/service/sqs", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { - "checksumSHA1": "ODaF+6WgljmH1WhB3KmBdIz4C0U=", + "checksumSHA1": "jo8D8FG1KSYxqP5z9saasDziQLA=", "path": "github.com/aws/aws-sdk-go/service/ssm", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "MerduaV3PxtZAWvOGpgoBIglo38=", "path": "github.com/aws/aws-sdk-go/service/sts", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { - "checksumSHA1": "vFinPalK+r0kaIbPMaHhFbTi2QQ=", + "checksumSHA1": "4uvhHpzIP35jxtM1Iy0RsUPOaDg=", "path": "github.com/aws/aws-sdk-go/service/waf", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { - "checksumSHA1": "qe7HsjfCked/MaRvH2c0uJndWdM=", + "checksumSHA1": "I0siM+RU1WsSwc6Pd9e3sRei7zM=", "path": "github.com/aws/aws-sdk-go/service/wafregional", - "revision": "885962fbd7bf49bf62db54f63fb89f75611677cf", - "revisionTime": "2017-10-10T17:54:20Z", - "version": "v1.12.8", - "versionExact": "v1.12.8" + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" + }, + { + "checksumSHA1": "OHfwSU+p4N0Ft+BrzJfdE6AVRsY=", + "path": "github.com/aws/aws-sdk-go/service/workspaces", + "revision": "ee1f179877b2daf2aaabf71fa900773bf8842253", + "revisionTime": "2017-10-26T21:07:55Z", + "version": "v1.12.19", + "versionExact": "v1.12.19" }, { "checksumSHA1": "nqw2Qn5xUklssHTubS5HDvEL9L4=", @@ -764,12 +862,102 @@ "revision": "41eea22f717c616615e1e59aa06cf831f9901f35", "revisionTime": "2017-03-13T23:49:21Z" }, + { + "checksumSHA1": "7BC2/27NId9xaPDB5w3nWN2mn9A=", + "path": "github.com/coreos/etcd/auth/authpb", + "revision": "20f2914e1301abde233b2039e110f2be57b91f89", + "revisionTime": "2017-10-26T00:52:00Z" + }, + { + "checksumSHA1": "XtVAwbJWD12FGuZrIkxpe8t9TB8=", + "path": "github.com/coreos/etcd/client", + "revision": "20f2914e1301abde233b2039e110f2be57b91f89", + "revisionTime": "2017-10-26T00:52:00Z" + }, + { + "checksumSHA1": "4m3qRCZfmTyRSzd4eH8ovMb5OXY=", + "path": "github.com/coreos/etcd/clientv3", + "revision": "20f2914e1301abde233b2039e110f2be57b91f89", + "revisionTime": "2017-10-26T00:52:00Z" + }, + { + "checksumSHA1": "LpOgTec6cz2Tf3zDav7VkqMHmBM=", + "path": "github.com/coreos/etcd/clientv3/concurrency", + "revision": "20f2914e1301abde233b2039e110f2be57b91f89", + "revisionTime": "2017-10-26T00:52:00Z" + }, + { + "checksumSHA1": "VMC9J0rMVk3Fv8r8Bj7qqLlXc3E=", + "path": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", + "revision": "20f2914e1301abde233b2039e110f2be57b91f89", + "revisionTime": "2017-10-26T00:52:00Z" + }, + { + "checksumSHA1": "c0ltvGUOnk8qaEshFwc0PDH5nbc=", + "path": "github.com/coreos/etcd/etcdserver/etcdserverpb", + "revision": "20f2914e1301abde233b2039e110f2be57b91f89", + "revisionTime": "2017-10-26T00:52:00Z" + }, + { + "checksumSHA1": "JAkX9DfIBrSe0vUa07xl5cikxVQ=", + "path": "github.com/coreos/etcd/mvcc/mvccpb", + "revision": "20f2914e1301abde233b2039e110f2be57b91f89", + "revisionTime": "2017-10-26T00:52:00Z" + }, + { + "checksumSHA1": "mKIXx1kDwmVmdIpZ3pJtRBuUKso=", + "path": "github.com/coreos/etcd/pkg/pathutil", + "revision": "20f2914e1301abde233b2039e110f2be57b91f89", + "revisionTime": "2017-10-26T00:52:00Z" + }, + { + "checksumSHA1": "z+C4BtPa8wbOUKW5dmHyhNnTulg=", + "path": "github.com/coreos/etcd/pkg/srv", + "revision": "20f2914e1301abde233b2039e110f2be57b91f89", + "revisionTime": "2017-10-26T00:52:00Z" + }, + { + "checksumSHA1": "rMyIh9PsSvPs6Yd+YgKITQzQJx8=", + "path": "github.com/coreos/etcd/pkg/tlsutil", + "revision": "20f2914e1301abde233b2039e110f2be57b91f89", + "revisionTime": "2017-10-26T00:52:00Z" + }, + { + "checksumSHA1": "agofzi+YZ7VYbxCldLaHYHAtlpc=", + "path": "github.com/coreos/etcd/pkg/transport", + "revision": "20f2914e1301abde233b2039e110f2be57b91f89", + "revisionTime": "2017-10-26T00:52:00Z" + }, + { + "checksumSHA1": "gx1gJIMU6T0UNQ0bPZ/drQ8cpCI=", + "path": "github.com/coreos/etcd/pkg/types", + "revision": "20f2914e1301abde233b2039e110f2be57b91f89", + "revisionTime": "2017-10-26T00:52:00Z" + }, + { + "checksumSHA1": "sp2FkEyaIGiQFOEZCTDkBZgyHOs=", + "path": "github.com/coreos/etcd/version", + "revision": "20f2914e1301abde233b2039e110f2be57b91f89", + "revisionTime": "2017-10-26T00:52:00Z" + }, + { + "checksumSHA1": "97BsbXOiZ8+Kr+LIuZkQFtSj7H4=", + "path": "github.com/coreos/go-semver/semver", + "revision": "1817cd4bea52af76542157eeabd74b057d1a199e", + "revisionTime": "2017-06-13T09:22:38Z" + }, { "checksumSHA1": "dvabztWVQX8f6oMLRyv4dLH+TGY=", "path": "github.com/davecgh/go-spew/spew", "revision": "346938d642f2ec3594ed81d874461961cd0faa76", "revisionTime": "2016-10-29T20:57:26Z" }, + { + "checksumSHA1": "+TKtBzv23ywvmmqRiGEjUba4YmI=", + "path": "github.com/dgrijalva/jwt-go", + "revision": "dbeaa9332f19a944acb5736b4456cfcc02140e29", + "revisionTime": "2017-10-19T21:57:19Z" + }, { "checksumSHA1": "GCskdwYAPW2S34918Z5CgNMJ2Wc=", "path": "github.com/dylanmei/iso8601", @@ -796,6 +984,366 @@ "revision": "766e555c68dc8bda90d197ee8946c37519c19409", "revisionTime": "2017-01-17T13:00:17Z" }, + { + "checksumSHA1": "yqF125xVSkmfLpIVGrLlfE05IUk=", + "path": "github.com/golang/protobuf/proto", + "revision": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9", + "revisionTime": "2017-10-21T04:39:52Z" + }, + { + "checksumSHA1": "VfkiItDBFFkZluaAMAzJipDXNBY=", + "path": "github.com/golang/protobuf/ptypes", + "revision": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9", + "revisionTime": "2017-10-21T04:39:52Z" + }, + { + "checksumSHA1": "UB9scpDxeFjQe5tEthuR4zCLRu4=", + "path": "github.com/golang/protobuf/ptypes/any", + "revision": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9", + "revisionTime": "2017-10-21T04:39:52Z" + }, + { + "checksumSHA1": "hUjAj0dheFVDl84BAnSWj9qy2iY=", + "path": "github.com/golang/protobuf/ptypes/duration", + "revision": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9", + "revisionTime": "2017-10-21T04:39:52Z" + }, + { + "checksumSHA1": "O2ItP5rmfrgxPufhjJXbFlXuyL8=", + "path": "github.com/golang/protobuf/ptypes/timestamp", + "revision": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9", + "revisionTime": "2017-10-21T04:39:52Z" + }, + { + "checksumSHA1": "Zcapl92yLsRGDnDfmD5BMZrU0i8=", + "path": "github.com/gophercloud/gophercloud", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "b7g9TcU1OmW7e2UySYeOAmcfHpY=", + "path": "github.com/gophercloud/gophercloud/internal", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "0rCdHaLp8s8+UK/Uk/7E3qsLtPI=", + "path": "github.com/gophercloud/gophercloud/openstack", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "xO3K2ugrp3x0VQQ+230flEPMFLs=", + "path": "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "lfD3gq0G0BLLrwS3/7IE2SabPXI=", + "path": "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "B4IXSmq364HcBruvvV0QjDFxZgc=", + "path": "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "HXIWMjezz3bJS/U+AczZ5YkDT3E=", + "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "kCHEEeRVZeR1LhbvNP+WyvB8z2s=", + "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "vFS5BwnCdQIfKm1nNWrR+ijsAZA=", + "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "jOyPWAJGRqLHofGLL/aiYBZzJR0=", + "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "+hlElX7o8ULWTc0r7oGyDlOnwWM=", + "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "Z8X3+qVpl9EImYGJ5tJ9Pxn3qqc=", + "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "PWbrce9A6LMWIGr0fK04AEEuQec=", + "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "qfVZltu1fYTYXS97WbjeLuLPgUc=", + "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "+Gif+WFd0WVjefjvmlR7jyTrdzQ=", + "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "TMkBTIrYeL78yJ6wZNJQqG0tHR4=", + "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "NwHRS4gvS7xcXlWRT8WpJbBbvbs=", + "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "CSnfH01hSas0bdc/3m/f5Rt6SFY=", + "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/images", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "BzQbqNKKDkIHN+1G6sRsMeWz4Zs=", + "path": "github.com/gophercloud/gophercloud/openstack/compute/v2/servers", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "hBVpoXsfRy6beVIhE6tcmtvgx+s=", + "path": "github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "VxvzmFE0VrKXbmVvSN5dZ+w2zPE=", + "path": "github.com/gophercloud/gophercloud/openstack/dns/v2/zones", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "oOJkelRgWx0NzUmxuI3kTS27gM0=", + "path": "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "z5NsqMZX3TLMzpmwzOOXE4M5D9w=", + "path": "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "ov1ZPU9SK5I1l733CprPgALenhc=", + "path": "github.com/gophercloud/gophercloud/openstack/identity/v3/groups", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "nasom1r/2W2OnR//mipNfasWv5U=", + "path": "github.com/gophercloud/gophercloud/openstack/identity/v3/projects", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "HzVyehPh0jvNZpL5iodoWpUd46k=", + "path": "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "qEjy/34oEk+X1l96ii8ot8vK47g=", + "path": "github.com/gophercloud/gophercloud/openstack/identity/v3/users", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "I7QtHxTYPCZybsi+u/J5AQaNxxQ=", + "path": "github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "gnDqkD2hdbV0ek3m2eNgsc4opdg=", + "path": "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "eLeGhmXLNp+j9PBTjLhpHY+3YxE=", + "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "QuUP9C0vhUMQx6OUgekqjn6/3xs=", + "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "FDgt1WaVGbp8w6vyCJbtkbsbWBc=", + "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "hanDdNBxWh1gdmyKl6QhytbprYc=", + "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "I6Q1xkqwqx6yL9MU1oMKfSozH2U=", + "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "QZx6LgtiiVOYViHwIUo8SvBGDW4=", + "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "HiGzZXGIxrItijPCH+L7EbbCb9w=", + "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "KZJLk70d5SX8TQO1q0MLAxGEBCU=", + "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "KXKo/TByIHelRDES1IB01nlQMoM=", + "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "3QfwJnsJHohoYV16/BOtDXpNEYI=", + "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "yFJA+pHR+xjch66bvrKhQqVFlf0=", + "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "HPfnRxqVgYpr3xeGjBhfDcA5wEw=", + "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "jNrZgB/ulP49gM7LOl4tkXQNlv8=", + "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "eHPa8JiswxlLbASzyur/kPn8ewc=", + "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "eUvwdXvz/Zkckr/isvP+2nXY2J8=", + "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "fxOov7uyY0I8Y/LlwoFVfVnWrbA=", + "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "ja7FbTK5eFj577HM5YxeQ8qWQ+I=", + "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "hmTj0vghsx3PEnkk3b3NUXWC2Ew=", + "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/networks", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "68RL/R3cHJvkHPNZmu0GW4xd4cY=", + "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/ports", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "P6Wvbidr+AvcKL0qDC7Dmpy8nyM=", + "path": "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "Q9VHGSztw2B4PmN4kyvY9TcJD6c=", + "path": "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "/ooNeOLNs+Z2Ez6O9oqnfY5n1H8=", + "path": "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "F1gaBPZj2MGwEF7u8GARRa6XlvQ=", + "path": "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "aODoF15ZwA4XJOWciguaAJRq/6o=", + "path": "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "hOTxf5bdm8+cwSTO+OO/maDz6ss=", + "path": "github.com/gophercloud/gophercloud/openstack/utils", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, + { + "checksumSHA1": "YspETi3tOMvawKIT91HyuqaA5lM=", + "path": "github.com/gophercloud/gophercloud/pagination", + "revision": "22606ab5bb8999ef1dc7435304e252d302a931ea", + "revisionTime": "2017-10-26T02:45:19Z" + }, { "checksumSHA1": "FUiF2WLrih0JdHsUTMMDz3DRokw=", "path": "github.com/hashicorp/atlas-go/archive", @@ -808,6 +1356,12 @@ "revision": "1ea2527d6a49b4c9f18753aa21627aae6a7c26a1", "revisionTime": "2017-06-19T22:02:35Z" }, + { + "checksumSHA1": "4hc6jGp9/1m7dp5ACRcGnspjO5E=", + "path": "github.com/hashicorp/consul/api", + "revision": "b31cfaaf2ac5ddd6ddee1d2e1041466e939dc097", + "revisionTime": "2017-10-25T22:34:43Z" + }, { "checksumSHA1": "cdOCt0Yb+hdErz8NAQqayxPmRsY=", "path": "github.com/hashicorp/errwrap", @@ -826,10 +1380,10 @@ "revisionTime": "2017-02-11T01:34:15Z" }, { - "checksumSHA1": "nsL2kI426RMuq1jw15e7igFqdIY=", + "checksumSHA1": "9VcI9QGCShWIUIL187qRd4sxwb8=", "path": "github.com/hashicorp/go-getter", - "revision": "c3d66e76678dce180a7b452653472f949aedfbcd", - "revisionTime": "2017-02-07T21:55:32Z" + "revision": "a686900cb3753aa644dc4812be91ceaf9fdd3b98", + "revisionTime": "2017-09-22T19:29:48Z" }, { "checksumSHA1": "9J+kDr29yDrwsdu2ULzewmqGjpA=", @@ -837,16 +1391,28 @@ "revision": "c3d66e76678dce180a7b452653472f949aedfbcd", "revisionTime": "2017-02-07T21:55:32Z" }, + { + "checksumSHA1": "AA0aYmdg4pb5gPCUSXg8iPzxLag=", + "path": "github.com/hashicorp/go-hclog", + "revision": "ca137eb4b4389c9bc6f1a6d887f056bf16c00510", + "revisionTime": "2017-10-05T15:17:51Z" + }, { "checksumSHA1": "lrSl49G23l6NhfilxPM0XFs5rZo=", "path": "github.com/hashicorp/go-multierror", "revision": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5" }, { - "checksumSHA1": "b0nQutPMJHeUmz4SjpreotAo6Yk=", + "checksumSHA1": "R6me0jVmcT/OPo80Fe0qo5fRwHc=", "path": "github.com/hashicorp/go-plugin", - "revision": "f72692aebca2008343a9deb06ddb4b17f7051c15", - "revisionTime": "2017-02-17T16:27:05Z" + "revision": "a5174f84d7f8ff00fb07ab4ef1f380d32eee0e63", + "revisionTime": "2017-08-16T15:18:19Z" + }, + { + "checksumSHA1": "yzoWV7yrS/TvOrKy5ZrdUjsYaOA=", + "path": "github.com/hashicorp/go-retryablehttp", + "revision": "794af36148bf63c118d6db80eb902a136b907e71", + "revisionTime": "2017-08-24T18:08:59Z" }, { "checksumSHA1": "A1PcINvF3UiwHRKn8UcgARgvGRs=", @@ -931,6 +1497,42 @@ "revision": "a4b07c25de5ff55ad3b8936cea69a79a3d95a855", "revisionTime": "2017-05-04T19:02:34Z" }, + { + "checksumSHA1": "6kxMiZSmgazD/CZgmnEeEMJSAOM=", + "path": "github.com/hashicorp/hcl2/gohcl", + "revision": "44bad6dbf5490f5da17ec991e664df3d017b706f", + "revisionTime": "2017-10-03T23:27:34Z" + }, + { + "checksumSHA1": "TsNlThzf92FMwcnM4Fc0mArHroU=", + "path": "github.com/hashicorp/hcl2/hcl", + "revision": "44bad6dbf5490f5da17ec991e664df3d017b706f", + "revisionTime": "2017-10-03T23:27:34Z" + }, + { + "checksumSHA1": "+Dv8V2cfl7Vy6rUklhXj5Cli8aU=", + "path": "github.com/hashicorp/hcl2/hcl/hclsyntax", + "revision": "44bad6dbf5490f5da17ec991e664df3d017b706f", + "revisionTime": "2017-10-03T23:27:34Z" + }, + { + "checksumSHA1": "GAArMzjaoFNPa7HFnhjZmaeBZII=", + "path": "github.com/hashicorp/hcl2/hcl/json", + "revision": "44bad6dbf5490f5da17ec991e664df3d017b706f", + "revisionTime": "2017-10-03T23:27:34Z" + }, + { + "checksumSHA1": "u6YPoPz3GflgHb1dN1YN8nCWAXY=", + "path": "github.com/hashicorp/hcl2/hcldec", + "revision": "44bad6dbf5490f5da17ec991e664df3d017b706f", + "revisionTime": "2017-10-03T23:27:34Z" + }, + { + "checksumSHA1": "IzmftuG99BqNhbFGhxZaGwtiMtM=", + "path": "github.com/hashicorp/hcl2/hclparse", + "revision": "44bad6dbf5490f5da17ec991e664df3d017b706f", + "revisionTime": "2017-10-03T23:27:34Z" + }, { "checksumSHA1": "M09yxoBoCEtG7EcHR8aEWLzMMJc=", "path": "github.com/hashicorp/hil", @@ -962,250 +1564,376 @@ "revisionTime": "2015-06-09T07:04:31Z" }, { - "checksumSHA1": "D0F12Q8yZNxF9cSQ/HRAVN34ZwQ=", + "checksumSHA1": "mS15CkImPzXYsgNwl3Mt9Gh3Vb0=", + "path": "github.com/hashicorp/serf/coordinate", + "revision": "c20a0b1b1ea9eb8168bcdec0116688fa9254e449", + "revisionTime": "2017-10-22T02:00:50Z" + }, + { + "checksumSHA1": "Qm+qY3ySdGhiM6TIz9C/U5xNxV8=", "path": "github.com/hashicorp/terraform", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { - "checksumSHA1": "/x8LyJMq8kPYSRNu4xyB8zcf9DQ=", + "checksumSHA1": "2kIaes8QS4QFlSx8CZLXzbdj0UM=", + "path": "github.com/hashicorp/terraform/backend", + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" + }, + { + "checksumSHA1": "ZWqZhZxaT2AMNy4dzCcvMKc46GY=", + "path": "github.com/hashicorp/terraform/backend/atlas", + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" + }, + { + "checksumSHA1": "OGwHq8xlYnARuHS6JsSbjPBSe6k=", + "path": "github.com/hashicorp/terraform/backend/init", + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" + }, + { + "checksumSHA1": "ISwgLoSPkcEYAcwFoYu5FNsMDD0=", + "path": "github.com/hashicorp/terraform/backend/legacy", + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" + }, + { + "checksumSHA1": "TcfzyCcc96McmcGVl+D5Z6XftqQ=", + "path": "github.com/hashicorp/terraform/backend/local", + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" + }, + { + "checksumSHA1": "sWPPRyIsfATpFxTqRc9PgbazRAc=", + "path": "github.com/hashicorp/terraform/backend/remote-state/azure", + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" + }, + { + "checksumSHA1": "hs39fP+wdfuvpN/lsMpYwUZUV8I=", + "path": "github.com/hashicorp/terraform/backend/remote-state/consul", + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" + }, + { + "checksumSHA1": "SllujprNPMotiPKfcPsQRF/7r64=", + "path": "github.com/hashicorp/terraform/backend/remote-state/etcdv3", + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" + }, + { + "checksumSHA1": "uJi6XL6OFIzU0r3G0YX0L3YzxRE=", + "path": "github.com/hashicorp/terraform/backend/remote-state/inmem", + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" + }, + { + "checksumSHA1": "ivPHguhB+k/yL7SkAfXzObobWnc=", + "path": "github.com/hashicorp/terraform/backend/remote-state/s3", + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" + }, + { + "checksumSHA1": "l0SZPCxWxxlYHOedkUCZUCWw4R0=", + "path": "github.com/hashicorp/terraform/backend/remote-state/swift", + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" + }, + { + "checksumSHA1": "RCVWlxGP1rZsVKT8VqSgyWhAte4=", "path": "github.com/hashicorp/terraform/builtin/provisioners/chef", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { "checksumSHA1": "EtcHzH4aXhylC1Uu8yBQis6IzfU=", "path": "github.com/hashicorp/terraform/builtin/provisioners/file", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { - "checksumSHA1": "cqsxeQ91qnrGUicLBKoDZCuozMM=", + "checksumSHA1": "IWoLiBpleo7Ndc8ECqZS6p+fsGY=", "path": "github.com/hashicorp/terraform/builtin/provisioners/local-exec", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { - "checksumSHA1": "OChAYHSZ6OJKTCQVMgpaIz5MEMA=", + "checksumSHA1": "mprmWWmGibkrqOCI66VJyVNTHaM=", "path": "github.com/hashicorp/terraform/builtin/provisioners/remote-exec", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { - "checksumSHA1": "RuHDe6nFcPIhiFEsrJ+Qzzlu74U=", + "checksumSHA1": "dzl4u25a9uL1AQaDkuj9yEYQL9U=", + "path": "github.com/hashicorp/terraform/builtin/provisioners/salt-masterless", + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" + }, + { + "checksumSHA1": "JOazEFlo2zRSKLa/M9ITFQDFCLA=", "path": "github.com/hashicorp/terraform/command", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { "checksumSHA1": "HWbnuaEFdfRFeKxZdlYUWZm+DU0=", "path": "github.com/hashicorp/terraform/command/clistate", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { - "checksumSHA1": "Nf4HkguPGljvTOOXidsSLYEAMOM=", + "checksumSHA1": "ezmArCBoyFTTvlskRVCOlJ6dhB8=", "path": "github.com/hashicorp/terraform/command/format", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { "checksumSHA1": "Pg0fge6Fl6a34pYl2fH1eb6kgNE=", "path": "github.com/hashicorp/terraform/communicator", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { "checksumSHA1": "zCAC53a+zRYTwnfw7vUFJmvqxQc=", "path": "github.com/hashicorp/terraform/communicator/remote", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { "checksumSHA1": "gOdZ52GCuL8KLiqYGEVNVZyMO5U=", "path": "github.com/hashicorp/terraform/communicator/shared", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { - "checksumSHA1": "go3oS7xtI1wIdMv0XyhEt33dA0Y=", + "checksumSHA1": "/GfjH+TwNG39FcII4/D7K5h7yq4=", "path": "github.com/hashicorp/terraform/communicator/ssh", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { - "checksumSHA1": "OpE2PWCmoN2WCmWGGVnoeCaeOTQ=", + "checksumSHA1": "ishkSV98ykhx7ZA9Q/lZgYChZms=", "path": "github.com/hashicorp/terraform/communicator/winrm", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { - "checksumSHA1": "KPrCMDPNcLmO7K6xPcJSl86LwPk=", + "checksumSHA1": "ah/6YQGMt2zKafrWg1HTGwriKgA=", "path": "github.com/hashicorp/terraform/config", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" + }, + { + "checksumSHA1": "qzvSGXa0rLkhSSha9fZQkfk6UG4=", + "path": "github.com/hashicorp/terraform/config/configschema", + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" + }, + { + "checksumSHA1": "3V7300kyZF+AGy/cOKV0+P6M3LY=", + "path": "github.com/hashicorp/terraform/config/hcl2shim", + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { - "checksumSHA1": "uPCJ6seQo9kvoNSfwNWKX9KzVMk=", + "checksumSHA1": "lLno16hjAz3FBAVp0U2+ZWZf/zc=", "path": "github.com/hashicorp/terraform/config/module", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { - "checksumSHA1": "w+l+UGTmwYNJ+L0p2vTd6+yqjok=", + "checksumSHA1": "mPbjVPD2enEey45bP4M83W2AxlY=", "path": "github.com/hashicorp/terraform/dag", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { "checksumSHA1": "P8gNPDuOzmiK4Lz9xG7OBy4Rlm8=", "path": "github.com/hashicorp/terraform/flatmap", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { "checksumSHA1": "zx5DLo5aV0xDqxGTzSibXg7HHAA=", "path": "github.com/hashicorp/terraform/helper/acctest", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { "checksumSHA1": "uT6Q9RdSRAkDjyUgQlJ2XKJRab4=", "path": "github.com/hashicorp/terraform/helper/config", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { "checksumSHA1": "FH5eOEHfHgdxPC/JnfmCeSBk66U=", "path": "github.com/hashicorp/terraform/helper/encryption", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { "checksumSHA1": "Vbo55GDzPgG/L/+W2pcvDhxrPZc=", "path": "github.com/hashicorp/terraform/helper/experiment", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { - "checksumSHA1": "BmIPKTr0zDutSJdyq7pYXrK1I3E=", + "checksumSHA1": "KNvbU1r5jv0CBeQLnEtDoL3dRtc=", "path": "github.com/hashicorp/terraform/helper/hashcode", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { "checksumSHA1": "B267stWNQd0/pBTXHfI/tJsxzfc=", "path": "github.com/hashicorp/terraform/helper/hilmapstructure", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { - "checksumSHA1": "2wJa9F3BGlbe2DNqH5lb5POayRI=", + "checksumSHA1": "BAXV9ruAyno3aFgwYI2/wWzB2Gc=", "path": "github.com/hashicorp/terraform/helper/logging", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { "checksumSHA1": "twkFd4x71kBnDfrdqO5nhs8dMOY=", "path": "github.com/hashicorp/terraform/helper/mutexkv", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { "checksumSHA1": "ImyqbHM/xe3eAT2moIjLI8ksuks=", "path": "github.com/hashicorp/terraform/helper/pathorcontents", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { - "checksumSHA1": "dhU2woQaSEI2OnbYLdkHxf7/nu8=", + "checksumSHA1": "FcseqawEPgboKzuR7HsGegueV5E=", "path": "github.com/hashicorp/terraform/helper/resource", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { - "checksumSHA1": "oaQ6rBNVs4Ae86vv3fKZU4tWETM=", + "checksumSHA1": "SN11/eEsMMUvrUSaO2pmuHDSMIw=", "path": "github.com/hashicorp/terraform/helper/schema", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { "checksumSHA1": "1yCGh/Wl4H4ODBBRmIRFcV025b0=", "path": "github.com/hashicorp/terraform/helper/shadow", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { "checksumSHA1": "eQ6F8nDi/R+F/SX51xCEY8iPZOE=", "path": "github.com/hashicorp/terraform/helper/slowmessage", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { "checksumSHA1": "Fzbv+N7hFXOtrR6E7ZcHT3jEE9s=", "path": "github.com/hashicorp/terraform/helper/structure", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { - "checksumSHA1": "qsI85GvNukAIUv+kcy8CdgP7um8=", + "checksumSHA1": "jdwWpJZbTSU87GUlwLTuf6FwpmE=", "path": "github.com/hashicorp/terraform/helper/validation", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { "checksumSHA1": "a1YCqJht+4G5O0UNTnOTD8vfXb0=", "path": "github.com/hashicorp/terraform/helper/variables", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { "checksumSHA1": "ExvF2RbMeCfxuq2eASmOChEcRgQ=", "path": "github.com/hashicorp/terraform/helper/wrappedreadline", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { "checksumSHA1": "q96i9foHLGSZ+9dFOV7jUseq7zs=", "path": "github.com/hashicorp/terraform/helper/wrappedstreams", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { "checksumSHA1": "yFWmdS6yEJZpRJzUqd/mULqCYGk=", "path": "github.com/hashicorp/terraform/moduledeps", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { - "checksumSHA1": "4ODNVUds3lyBf7gV02X1EeYR4GA=", + "checksumSHA1": "DqaoG++NXRCfvH/OloneLWrM+3k=", "path": "github.com/hashicorp/terraform/plugin", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { - "checksumSHA1": "mujz3BDg1X82ynvJncCFUT6/7XI=", + "checksumSHA1": "hg+RFX/sPm8mjsf0p2AiNyunp+w=", "path": "github.com/hashicorp/terraform/plugin/discovery", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { "checksumSHA1": "vW75JRFcEDJNxNCB2mrlFeYOyX4=", "path": "github.com/hashicorp/terraform/repl", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { - "checksumSHA1": "6GDMHApWhgYT4AVkhSBknxK0YyU=", + "checksumSHA1": "3vkhsjnBn8rOoO5bW1R4lPtckVE=", "path": "github.com/hashicorp/terraform/state", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" + }, + { + "checksumSHA1": "SzTfctD0pKb9+buLgoHquobyG84=", + "path": "github.com/hashicorp/terraform/state/remote", + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" + }, + { + "checksumSHA1": "VXlzRRDVOqeMvnnrbUcR9H64OA4=", + "path": "github.com/hashicorp/terraform/svchost", + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" + }, + { + "checksumSHA1": "GzcKNlFL0N77JVjU8qbltXE4R3k=", + "path": "github.com/hashicorp/terraform/svchost/auth", + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { - "checksumSHA1": "ksfNQjZs/6llziARojABd6iuvdw=", + "checksumSHA1": "yCFtkUTmOoPT/IyOjBwPIXYjQAc=", + "path": "github.com/hashicorp/terraform/svchost/disco", + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" + }, + { + "checksumSHA1": "ftaJwH2TwqV/a/XKfJVMFjNmVRo=", "path": "github.com/hashicorp/terraform/terraform", - "revision": "d969f97e730d527a568a0929ce52df70e062bca0", - "revisionTime": "2017-08-03T21:53:07Z" + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" + }, + { + "checksumSHA1": "C3c1+sTF/97mT6N+15bVSq4Ryr8=", + "path": "github.com/hashicorp/terraform/tfdiags", + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" + }, + { + "checksumSHA1": "HY66V8hbkC5EvjI/Faix9gYQfq8=", + "path": "github.com/hashicorp/terraform/version", + "revision": "ddff8bbc00bfcfbb7370de072dff3ad0055b0c5f", + "revisionTime": "2017-10-26T14:57:19Z" }, { "checksumSHA1": "ft77GtqeZEeCXioGpF/s6DlGm/U=", @@ -1244,6 +1972,48 @@ "revision": "bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d", "revisionTime": "2016-08-03T19:07:31Z" }, + { + "checksumSHA1": "NOwNdnb70M6s9LvhaPFabBVwlBs=", + "path": "github.com/joyent/gocommon", + "revision": "b78708995d1c2ebdb64a3061b0bca5d8ccdf0fc2", + "revisionTime": "2016-12-02T19:23:17Z" + }, + { + "checksumSHA1": "O0WFPpYSMzeDL11yO84IYBSXrmk=", + "path": "github.com/joyent/gocommon/client", + "revision": "b78708995d1c2ebdb64a3061b0bca5d8ccdf0fc2", + "revisionTime": "2016-12-02T19:23:17Z" + }, + { + "checksumSHA1": "fsnHUI4h0h1BUeiWNhkRJhzZD/s=", + "path": "github.com/joyent/gocommon/errors", + "revision": "b78708995d1c2ebdb64a3061b0bca5d8ccdf0fc2", + "revisionTime": "2016-12-02T19:23:17Z" + }, + { + "checksumSHA1": "cz4amcSofbyq0dH1sdOHNUvznWw=", + "path": "github.com/joyent/gocommon/http", + "revision": "b78708995d1c2ebdb64a3061b0bca5d8ccdf0fc2", + "revisionTime": "2016-12-02T19:23:17Z" + }, + { + "checksumSHA1": "yMJdr6tmhoSgkjLjfyeDZL6vuXo=", + "path": "github.com/joyent/gocommon/jpc", + "revision": "b78708995d1c2ebdb64a3061b0bca5d8ccdf0fc2", + "revisionTime": "2016-12-02T19:23:17Z" + }, + { + "checksumSHA1": "ysHPYOB/xV0dMIAES8ZOQu9KwQQ=", + "path": "github.com/joyent/gomanta/manta", + "revision": "41c8bc3faef9789454337062b5fac84699f75834", + "revisionTime": "2016-12-02T19:23:08Z" + }, + { + "checksumSHA1": "N0NRIcJF7aj1wd56DA1N9GpYq/4=", + "path": "github.com/joyent/gosign/auth", + "revision": "9abcee278795b82b36858cdfc857c8a0e7de797c", + "revisionTime": "2016-11-14T19:17:44Z" + }, { "checksumSHA1": "gEjGS03N1eysvpQ+FCHTxPcbxXc=", "path": "github.com/kardianos/osext", @@ -1304,6 +2074,12 @@ "revision": "93f5b35093ba15e0f86e412cc5c767d5c10c15fd", "revisionTime": "2016-10-04T15:35:44Z" }, + { + "checksumSHA1": "90/rTD6BDXVZZSY3Qeqnrg9Wb10=", + "path": "github.com/lusis/go-artifactory/src/artifactory.v401", + "revision": "da4b7cd952125757a375908730ba9445adc5ba99", + "revisionTime": "2017-10-20T17:55:49Z" + }, { "checksumSHA1": "DlpjiTUHFFoU39yh4FIKS8g7L7A=", "path": "github.com/masterzen/azure-sdk-for-go/core/http", @@ -1388,6 +2164,18 @@ "revision": "07bab5fdd9580500aea6ada0e09df4aa28e68abd", "revisionTime": "2014-10-13T18:55:33Z" }, + { + "checksumSHA1": "bDdhmDk8q6utWrccBhEOa6IoGkE=", + "path": "github.com/mitchellh/go-testing-interface", + "revision": "a61a99592b77c9ba629d254a693acffaeb4b7e28", + "revisionTime": "2017-10-04T22:19:16Z" + }, + { + "checksumSHA1": "L3leymg2RT8hFl5uL+5KP/LpBkg=", + "path": "github.com/mitchellh/go-wordwrap", + "revision": "ad45545899c7b13c020ea92b2072220eefad42b8", + "revisionTime": "2015-03-14T17:03:34Z" + }, { "checksumSHA1": "xyoJKalfQwTUN1qzZGQKWYAwl0A=", "path": "github.com/mitchellh/hashstructure", @@ -1477,6 +2265,12 @@ "revision": "b061729afc07e77a8aa4fad0a2fd840958f1942a", "revisionTime": "2016-09-27T10:08:44Z" }, + { + "checksumSHA1": "q12HCybmDTjU2efDmyBOStiZ9/4=", + "path": "github.com/terraform-providers/terraform-provider-openstack/openstack", + "revision": "635bc49af8a8270ec645f085e0652ea3b1bb200e", + "revisionTime": "2017-10-23T15:34:37Z" + }, { "checksumSHA1": "EUR26b2t3XDPxiEMwDBtn8Ajp8A=", "path": "github.com/terraform-providers/terraform-provider-template/template", @@ -1493,6 +2287,36 @@ "version": "v0.1.0", "versionExact": "v0.1.0" }, + { + "checksumSHA1": "IJYn37+lvgZhQcErB0J4s1qvBds=", + "path": "github.com/ugorji/go/codec", + "revision": "16373bd7016380b084e8a4e8f9560efe82cbc98c", + "revisionTime": "2017-10-26T02:16:19Z" + }, + { + "checksumSHA1": "qgMa75aMGbkFY0jIqqqgVnCUoNA=", + "path": "github.com/ulikunitz/xz", + "revision": "0c6b41e72360850ca4f98dc341fd999726ea007f", + "revisionTime": "2017-06-05T21:53:11Z" + }, + { + "checksumSHA1": "vjnTkzNrMs5Xj6so/fq0mQ6dT1c=", + "path": "github.com/ulikunitz/xz/internal/hash", + "revision": "0c6b41e72360850ca4f98dc341fd999726ea007f", + "revisionTime": "2017-06-05T21:53:11Z" + }, + { + "checksumSHA1": "m0pm57ASBK/CTdmC0ppRHO17mBs=", + "path": "github.com/ulikunitz/xz/internal/xlog", + "revision": "0c6b41e72360850ca4f98dc341fd999726ea007f", + "revisionTime": "2017-06-05T21:53:11Z" + }, + { + "checksumSHA1": "2vZw6zc8xuNlyVz2QKvdlNSZQ1U=", + "path": "github.com/ulikunitz/xz/lzma", + "revision": "0c6b41e72360850ca4f98dc341fd999726ea007f", + "revisionTime": "2017-06-05T21:53:11Z" + }, { "checksumSHA1": "iHiMTBffQvWYlOLu3130JXuQpgQ=", "path": "github.com/xanzy/ssh-agent", @@ -1505,6 +2329,48 @@ "revision": "1d6e342255576c977e946a2384fc487a22d3fceb", "revisionTime": "2016-10-29T10:40:18Z" }, + { + "checksumSHA1": "R9ayYqxeUsPcIbs6KXCVwDIdf6M=", + "path": "github.com/zclconf/go-cty/cty", + "revision": "8bf222d6d03b7b336d013978f3acbfd877da428f", + "revisionTime": "2017-10-13T21:58:09Z" + }, + { + "checksumSHA1": "IjvfMUZ9S1L1NM0haXwMfKzkyvM=", + "path": "github.com/zclconf/go-cty/cty/convert", + "revision": "8bf222d6d03b7b336d013978f3acbfd877da428f", + "revisionTime": "2017-10-13T21:58:09Z" + }, + { + "checksumSHA1": "TU21yqpRZdbEbH8pp4I5YsQa00E=", + "path": "github.com/zclconf/go-cty/cty/function", + "revision": "8bf222d6d03b7b336d013978f3acbfd877da428f", + "revisionTime": "2017-10-13T21:58:09Z" + }, + { + "checksumSHA1": "Ke4kpRBTSophcLSCrusR8XxSC0Y=", + "path": "github.com/zclconf/go-cty/cty/function/stdlib", + "revision": "8bf222d6d03b7b336d013978f3acbfd877da428f", + "revisionTime": "2017-10-13T21:58:09Z" + }, + { + "checksumSHA1": "tmCzwfNXOEB1sSO7TKVzilb2vjA=", + "path": "github.com/zclconf/go-cty/cty/gocty", + "revision": "8bf222d6d03b7b336d013978f3acbfd877da428f", + "revisionTime": "2017-10-13T21:58:09Z" + }, + { + "checksumSHA1": "1ApmO+Q33+Oem/3f6BU6sztJWNc=", + "path": "github.com/zclconf/go-cty/cty/json", + "revision": "8bf222d6d03b7b336d013978f3acbfd877da428f", + "revisionTime": "2017-10-13T21:58:09Z" + }, + { + "checksumSHA1": "gH4rRyzIQknMIXAJfpvC04KTsME=", + "path": "github.com/zclconf/go-cty/cty/set", + "revision": "8bf222d6d03b7b336d013978f3acbfd877da428f", + "revisionTime": "2017-10-13T21:58:09Z" + }, { "checksumSHA1": "vE43s37+4CJ2CDU6TlOUOYE0K9c=", "path": "golang.org/x/crypto/bcrypt", @@ -1595,6 +2461,18 @@ "revision": "42ff06aea7c329876e5a0fe94acc96902accf0ad", "revisionTime": "2017-08-03T14:09:35Z" }, + { + "checksumSHA1": "GtamqiJoL7PGHsN454AoffBFMa8=", + "path": "golang.org/x/net/context", + "revision": "4b14673ba32bee7f5ac0f990a48f033919fd418b", + "revisionTime": "2017-09-15T10:16:46Z" + }, + { + "checksumSHA1": "WHc3uByvGaMcnSoI21fhzYgbOgg=", + "path": "golang.org/x/net/context/ctxhttp", + "revision": "4b14673ba32bee7f5ac0f990a48f033919fd418b", + "revisionTime": "2017-09-15T10:16:46Z" + }, { "checksumSHA1": "vqc3a+oTUGX8PmD0TS+qQ7gmN8I=", "path": "golang.org/x/net/html", @@ -1607,12 +2485,312 @@ "revision": "f5079bd7f6f74e23c4d65efa0f4ce14cbd6a3c0f", "revisionTime": "2017-07-19T21:11:51Z" }, + { + "checksumSHA1": "SHTyxlWxNjRwA7o3AiBM87PawSA=", + "path": "golang.org/x/net/http2", + "revision": "4b14673ba32bee7f5ac0f990a48f033919fd418b", + "revisionTime": "2017-09-15T10:16:46Z" + }, + { + "checksumSHA1": "ezWhc7n/FtqkLDQKeU2JbW+80tE=", + "path": "golang.org/x/net/http2/hpack", + "revision": "4b14673ba32bee7f5ac0f990a48f033919fd418b", + "revisionTime": "2017-09-15T10:16:46Z" + }, + { + "checksumSHA1": "H181RWl7GTS90aCWW6v4atV3pAg=", + "path": "golang.org/x/net/idna", + "revision": "4b14673ba32bee7f5ac0f990a48f033919fd418b", + "revisionTime": "2017-09-15T10:16:46Z" + }, + { + "checksumSHA1": "UxahDzW2v4mf/+aFxruuupaoIwo=", + "path": "golang.org/x/net/internal/timeseries", + "revision": "4b14673ba32bee7f5ac0f990a48f033919fd418b", + "revisionTime": "2017-09-15T10:16:46Z" + }, + { + "checksumSHA1": "3xyuaSNmClqG4YWC7g0isQIbUTc=", + "path": "golang.org/x/net/lex/httplex", + "revision": "4b14673ba32bee7f5ac0f990a48f033919fd418b", + "revisionTime": "2017-09-15T10:16:46Z" + }, + { + "checksumSHA1": "u/r66lwYfgg682u5hZG7/E7+VCY=", + "path": "golang.org/x/net/trace", + "revision": "4b14673ba32bee7f5ac0f990a48f033919fd418b", + "revisionTime": "2017-09-15T10:16:46Z" + }, + { + "checksumSHA1": "6AmgST478RkzWypvthanPXFDTrw=", + "path": "golang.org/x/oauth2", + "revision": "bb50c06baba3d0c76f9d125c0719093e315b5b44", + "revisionTime": "2017-09-28T00:25:42Z" + }, + { + "checksumSHA1": "JTBn9MQUhwHtjwv7rC9Zg4KRN7g=", + "path": "golang.org/x/oauth2/google", + "revision": "bb50c06baba3d0c76f9d125c0719093e315b5b44", + "revisionTime": "2017-09-28T00:25:42Z" + }, + { + "checksumSHA1": "4XCNzl3cgGtdd0SrUZHSBdsW+m4=", + "path": "golang.org/x/oauth2/internal", + "revision": "bb50c06baba3d0c76f9d125c0719093e315b5b44", + "revisionTime": "2017-09-28T00:25:42Z" + }, + { + "checksumSHA1": "huVltYnXdRFDJLgp/ZP9IALzG7g=", + "path": "golang.org/x/oauth2/jws", + "revision": "bb50c06baba3d0c76f9d125c0719093e315b5b44", + "revisionTime": "2017-09-28T00:25:42Z" + }, + { + "checksumSHA1": "/eV4E08BY+f1ZikiR7OOMJAj3m0=", + "path": "golang.org/x/oauth2/jwt", + "revision": "bb50c06baba3d0c76f9d125c0719093e315b5b44", + "revisionTime": "2017-09-28T00:25:42Z" + }, { "checksumSHA1": "NqlOcIqzDg46JUFFXqZdKUER0B0=", "path": "golang.org/x/sys/unix", "revision": "d8f5ea21b9295e315e612b4bcf4bedea93454d4d", "revisionTime": "2017-08-03T09:04:06Z" }, + { + "checksumSHA1": "tltivJ/uj/lqLk05IqGfCv2F/E8=", + "path": "golang.org/x/text/secure/bidirule", + "revision": "6eab0e8f74e86c598ec3b6fad4888e0c11482d48", + "revisionTime": "2017-09-27T13:34:20Z" + }, + { + "checksumSHA1": "ziMb9+ANGRJSSIuxYdRbA+cDRBQ=", + "path": "golang.org/x/text/transform", + "revision": "6eab0e8f74e86c598ec3b6fad4888e0c11482d48", + "revisionTime": "2017-09-27T13:34:20Z" + }, + { + "checksumSHA1": "iB6/RoQIzBaZxVi+t7tzbkwZTlo=", + "path": "golang.org/x/text/unicode/bidi", + "revision": "6eab0e8f74e86c598ec3b6fad4888e0c11482d48", + "revisionTime": "2017-09-27T13:34:20Z" + }, + { + "checksumSHA1": "km/8bLtOpIP7sua4MnEmiSDYTAE=", + "path": "golang.org/x/text/unicode/norm", + "revision": "6eab0e8f74e86c598ec3b6fad4888e0c11482d48", + "revisionTime": "2017-09-27T13:34:20Z" + }, + { + "checksumSHA1": "/y0saWnM+kTnSvZrNlvoNOgj0Uo=", + "path": "google.golang.org/api/gensupport", + "revision": "c5d94a018003dc535877ebf36b0ed720c690202a", + "revisionTime": "2017-10-26T00:03:36Z" + }, + { + "checksumSHA1": "BWKmb7kGYbfbvXO6E7tCpTh9zKE=", + "path": "google.golang.org/api/googleapi", + "revision": "c5d94a018003dc535877ebf36b0ed720c690202a", + "revisionTime": "2017-10-26T00:03:36Z" + }, + { + "checksumSHA1": "1K0JxrUfDqAB3MyRiU1LKjfHyf4=", + "path": "google.golang.org/api/googleapi/internal/uritemplates", + "revision": "c5d94a018003dc535877ebf36b0ed720c690202a", + "revisionTime": "2017-10-26T00:03:36Z" + }, + { + "checksumSHA1": "Zd7ojgrWPn3j7fLx9HB6/Oub8lE=", + "path": "google.golang.org/api/storage/v1", + "revision": "c5d94a018003dc535877ebf36b0ed720c690202a", + "revisionTime": "2017-10-26T00:03:36Z" + }, + { + "checksumSHA1": "WPEbk80NB3Esdh4Yk0PXr2K7xVU=", + "path": "google.golang.org/appengine", + "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", + "revisionTime": "2017-10-11T21:50:12Z" + }, + { + "checksumSHA1": "/XD6hF+tqSNrfPrFmukDeHKFVVA=", + "path": "google.golang.org/appengine/internal", + "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", + "revisionTime": "2017-10-11T21:50:12Z" + }, + { + "checksumSHA1": "x6Thdfyasqd68dWZWqzWWeIfAfI=", + "path": "google.golang.org/appengine/internal/app_identity", + "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", + "revisionTime": "2017-10-11T21:50:12Z" + }, + { + "checksumSHA1": "TsNO8P0xUlLNyh3Ic/tzSp/fDWM=", + "path": "google.golang.org/appengine/internal/base", + "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", + "revisionTime": "2017-10-11T21:50:12Z" + }, + { + "checksumSHA1": "5QsV5oLGSfKZqTCVXP6NRz5T4Tw=", + "path": "google.golang.org/appengine/internal/datastore", + "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", + "revisionTime": "2017-10-11T21:50:12Z" + }, + { + "checksumSHA1": "Gep2T9zmVYV8qZfK2gu3zrmG6QE=", + "path": "google.golang.org/appengine/internal/log", + "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", + "revisionTime": "2017-10-11T21:50:12Z" + }, + { + "checksumSHA1": "eLZVX1EHLclFtQnjDIszsdyWRHo=", + "path": "google.golang.org/appengine/internal/modules", + "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", + "revisionTime": "2017-10-11T21:50:12Z" + }, + { + "checksumSHA1": "a1XY7rz3BieOVqVI2Et6rKiwQCk=", + "path": "google.golang.org/appengine/internal/remote_api", + "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", + "revisionTime": "2017-10-11T21:50:12Z" + }, + { + "checksumSHA1": "Tc3BU26zThLzcyqbVtiSEp7EpU8=", + "path": "google.golang.org/genproto/googleapis/rpc/status", + "revision": "f676e0f3ac6395ff1a529ae59a6670878a8371a6", + "revisionTime": "2017-10-02T23:26:14Z" + }, + { + "checksumSHA1": "7JepG/D1rIwHGPYpcTUc5RLdNiQ=", + "path": "google.golang.org/grpc", + "revision": "b5eab4ccac6df282ff981436a5a87554d7377c75", + "revisionTime": "2017-10-25T22:59:19Z" + }, + { + "checksumSHA1": "HoJvHF9RxOinJPAAbAhfZSNUxBY=", + "path": "google.golang.org/grpc/balancer", + "revision": "b5eab4ccac6df282ff981436a5a87554d7377c75", + "revisionTime": "2017-10-25T22:59:19Z" + }, + { + "checksumSHA1": "os98urLvZVriKRbHhIsipJfcT7Q=", + "path": "google.golang.org/grpc/balancer/roundrobin", + "revision": "b5eab4ccac6df282ff981436a5a87554d7377c75", + "revisionTime": "2017-10-25T22:59:19Z" + }, + { + "checksumSHA1": "Dkjgw1HasWvqct0IuiZdjbD7O0c=", + "path": "google.golang.org/grpc/codes", + "revision": "b5eab4ccac6df282ff981436a5a87554d7377c75", + "revisionTime": "2017-10-25T22:59:19Z" + }, + { + "checksumSHA1": "XH2WYcDNwVO47zYShREJjcYXm0Y=", + "path": "google.golang.org/grpc/connectivity", + "revision": "b5eab4ccac6df282ff981436a5a87554d7377c75", + "revisionTime": "2017-10-25T22:59:19Z" + }, + { + "checksumSHA1": "EEpFv96tNVcG9Z42ehroNbG/VKI=", + "path": "google.golang.org/grpc/credentials", + "revision": "b5eab4ccac6df282ff981436a5a87554d7377c75", + "revisionTime": "2017-10-25T22:59:19Z" + }, + { + "checksumSHA1": "H7SuPUqbPcdbNqgl+k3ohuwMAwE=", + "path": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages", + "revision": "b5eab4ccac6df282ff981436a5a87554d7377c75", + "revisionTime": "2017-10-25T22:59:19Z" + }, + { + "checksumSHA1": "ntHev01vgZgeIh5VFRmbLx/BSTo=", + "path": "google.golang.org/grpc/grpclog", + "revision": "b5eab4ccac6df282ff981436a5a87554d7377c75", + "revisionTime": "2017-10-25T22:59:19Z" + }, + { + "checksumSHA1": "/M6Lug7Dj22dZNu4X6bZDVa5mkQ=", + "path": "google.golang.org/grpc/health", + "revision": "b5eab4ccac6df282ff981436a5a87554d7377c75", + "revisionTime": "2017-10-25T22:59:19Z" + }, + { + "checksumSHA1": "6vY7tYjV84pnr3sDctzx53Bs8b0=", + "path": "google.golang.org/grpc/health/grpc_health_v1", + "revision": "b5eab4ccac6df282ff981436a5a87554d7377c75", + "revisionTime": "2017-10-25T22:59:19Z" + }, + { + "checksumSHA1": "U9vDe05/tQrvFBojOQX8Xk12W9I=", + "path": "google.golang.org/grpc/internal", + "revision": "b5eab4ccac6df282ff981436a5a87554d7377c75", + "revisionTime": "2017-10-25T22:59:19Z" + }, + { + "checksumSHA1": "hcuHgKp8W0wIzoCnNfKI8NUss5o=", + "path": "google.golang.org/grpc/keepalive", + "revision": "b5eab4ccac6df282ff981436a5a87554d7377c75", + "revisionTime": "2017-10-25T22:59:19Z" + }, + { + "checksumSHA1": "KeUmTZV+2X46C49cKyjp+xM7fvw=", + "path": "google.golang.org/grpc/metadata", + "revision": "b5eab4ccac6df282ff981436a5a87554d7377c75", + "revisionTime": "2017-10-25T22:59:19Z" + }, + { + "checksumSHA1": "dgwdT20kXe4ZbXBOFbTwVQt8rmA=", + "path": "google.golang.org/grpc/naming", + "revision": "b5eab4ccac6df282ff981436a5a87554d7377c75", + "revisionTime": "2017-10-25T22:59:19Z" + }, + { + "checksumSHA1": "n5EgDdBqFMa2KQFhtl+FF/4gIFo=", + "path": "google.golang.org/grpc/peer", + "revision": "b5eab4ccac6df282ff981436a5a87554d7377c75", + "revisionTime": "2017-10-25T22:59:19Z" + }, + { + "checksumSHA1": "H7VyP18nJ9MmoB5r9+I7EKVEeVM=", + "path": "google.golang.org/grpc/resolver", + "revision": "b5eab4ccac6df282ff981436a5a87554d7377c75", + "revisionTime": "2017-10-25T22:59:19Z" + }, + { + "checksumSHA1": "WpWF+bDzObsHf+bjoGpb/abeFxo=", + "path": "google.golang.org/grpc/resolver/dns", + "revision": "b5eab4ccac6df282ff981436a5a87554d7377c75", + "revisionTime": "2017-10-25T22:59:19Z" + }, + { + "checksumSHA1": "zs9M4xE8Lyg4wvuYvR00XoBxmuw=", + "path": "google.golang.org/grpc/resolver/passthrough", + "revision": "b5eab4ccac6df282ff981436a5a87554d7377c75", + "revisionTime": "2017-10-25T22:59:19Z" + }, + { + "checksumSHA1": "G9lgXNi7qClo5sM2s6TbTHLFR3g=", + "path": "google.golang.org/grpc/stats", + "revision": "b5eab4ccac6df282ff981436a5a87554d7377c75", + "revisionTime": "2017-10-25T22:59:19Z" + }, + { + "checksumSHA1": "3Dwz4RLstDHMPyDA7BUsYe+JP4w=", + "path": "google.golang.org/grpc/status", + "revision": "b5eab4ccac6df282ff981436a5a87554d7377c75", + "revisionTime": "2017-10-25T22:59:19Z" + }, + { + "checksumSHA1": "qvArRhlrww5WvRmbyMF2mUfbJew=", + "path": "google.golang.org/grpc/tap", + "revision": "b5eab4ccac6df282ff981436a5a87554d7377c75", + "revisionTime": "2017-10-25T22:59:19Z" + }, + { + "checksumSHA1": "8r/f3JOiyV+A11ljE4oEQtRQXAU=", + "path": "google.golang.org/grpc/transport", + "revision": "b5eab4ccac6df282ff981436a5a87554d7377c75", + "revisionTime": "2017-10-25T22:59:19Z" + }, { "checksumSHA1": "fALlQNY1fM99NesfLJ50KguWsio=", "path": "gopkg.in/yaml.v2", diff --git a/website/aws.erb b/website/aws.erb index 7f18c1d57c8..09f08082a86 100644 --- a/website/aws.erb +++ b/website/aws.erb @@ -53,12 +53,18 @@ > aws_cloudformation_stack + > + aws_cloudtrail_service_account + > aws_db_instance > aws_db_snapshot + > + aws_dynamodb_table + > aws_ebs_snapshot @@ -143,6 +149,8 @@ > aws_kms_secret + > + aws_nat_gateway > aws_lb @@ -170,6 +178,9 @@ > aws_route_table + > + aws_s3_bucket + > aws_s3_bucket_object @@ -436,6 +447,9 @@ > aws_cognito_identity_pool + > + aws_cognito_identity_pool_roles_attachment + @@ -704,6 +718,10 @@ ECS Resources + > + Service Catalog Resources + + > Step Function Resources diff --git a/website/docs/d/billing_service_account.html.markdown b/website/docs/d/billing_service_account.html.markdown index aa6da655031..765ad1c9aa0 100644 --- a/website/docs/d/billing_service_account.html.markdown +++ b/website/docs/d/billing_service_account.html.markdown @@ -32,7 +32,7 @@ resource "aws_s3_bucket" "billing_logs" { "Resource": "arn:aws:s3:::my-billing-tf-test-bucket", "Principal": { "AWS": [ - "${data.aws_billing_service_account.main.id}" + "${data.aws_billing_service_account.main.arn}" ] } }, @@ -44,7 +44,7 @@ resource "aws_s3_bucket" "billing_logs" { "Resource": "arn:aws:s3:::my-billing-tf-test-bucket/AWSLogs/*", "Principal": { "AWS": [ - "${data.aws_billing_service_account.main.id}" + "${data.aws_billing_service_account.main.arn}" ] } } diff --git a/website/docs/d/cloudtrail_service_account.html.markdown b/website/docs/d/cloudtrail_service_account.html.markdown new file mode 100644 index 00000000000..8825de8bcd0 --- /dev/null +++ b/website/docs/d/cloudtrail_service_account.html.markdown @@ -0,0 +1,60 @@ +--- +layout: "aws" +page_title: "AWS: aws_cloudtrail_service_account" +sidebar_current: "docs-aws-datasource-cloudtrail-service-account" +description: |- + Get AWS CloudTrail Service Account ID for storing trail data in S3. +--- + +# aws_cloudtrail_service_account + +Use this data source to get the Account ID of the [AWS CloudTrail Service Account](http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-supported-regions.html) +in a given region for the purpose of allowing CloudTrail to store trail data in S3. + +## Example Usage + +```hcl +data "aws_cloudtrail_service_account" "main" {} + +resource "aws_s3_bucket" "bucket" { + bucket = "tf-cloudtrail-logging-test-bucket" + force_destroy = true + + policy = < **NOTE on ELB Instances and ELB Attachments:** Terraform currently provides both a standalone [ELB Attachment resource](elb_attachment.html) diff --git a/website/docs/r/emr_cluster.html.md b/website/docs/r/emr_cluster.html.md index 24073b0537d..7ecce71c052 100644 --- a/website/docs/r/emr_cluster.html.md +++ b/website/docs/r/emr_cluster.html.md @@ -104,7 +104,20 @@ Cannot specify the `cc1.4xlarge` instance type for nodes of a job flow launched * `service_access_security_group` - (Optional) Identifier of the Amazon EC2 service-access security group - required when the cluster runs on a private subnet * `instance_profile` - (Required) Instance Profile for EC2 instances of the cluster assume this role -~> **NOTE on EMR-Managed security groups:** These security groups will have any missing inbound or outbound access rules added and maintained by AWS, to ensure proper communication between instances in a cluster. Maintenance of the security group rules by AWS may lead Terraform to fail because of a race condition between Terraform and AWS in managing the rules. To avoid this, make the `emr_cluster` dependent on all the required security group rules. See [Amazon EMR-Managed Security Groups](http://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-man-sec-groups.html) for more information about the EMR-managed security group rules. +~> **NOTE on EMR-Managed security groups:** These security groups will have any +missing inbound or outbound access rules added and maintained by AWS, to ensure +proper communication between instances in a cluster. The EMR service will +maintain these rules for groups provided in `emr_managed_master_security_group` +and `emr_managed_slave_security_group`; attempts to remove the required rules +may succeed, only for the EMR service to re-add them in a matter of minutes. +This may cause Terraform to fail to destroy an environment that contains an EMR +cluster, because the EMR service does not revoke rules added on deletion, +leaving a cyclic dependency between the security groups that prevents their +deletion. To avoid this, use the `revoke_rules_on_delete` optional attribute for +any Security Group used in `emr_managed_master_security_group` and +`emr_managed_slave_security_group`. See [Amazon EMR-Managed Security +Groups](http://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-man-sec-groups.html) +for more information about the EMR-managed security group rules. ## instance\_group diff --git a/website/docs/r/instance.html.markdown b/website/docs/r/instance.html.markdown index 100ce738d9a..fc1bef32acb 100644 --- a/website/docs/r/instance.html.markdown +++ b/website/docs/r/instance.html.markdown @@ -223,6 +223,7 @@ The following attributes are exported: * `public_dns` - The public DNS name assigned to the instance. For EC2-VPC, this is only available if you've enabled DNS hostnames for your VPC * `public_ip` - The public IP address assigned to the instance, if applicable. **NOTE**: If you are using an [`aws_eip`](/docs/providers/aws/r/eip.html) with your instance, you should refer to the EIP's address directly and not use `public_ip`, as this field will change after the EIP is attached. +* `ipv6_addresses` - A list of assigned IPv6 addresses, if any * `network_interface_id` - The ID of the network interface that was created with the instance. * `primary_network_interface_id` - The ID of the instance's primary network interface. * `private_dns` - The private DNS name assigned to the instance. Can only be diff --git a/website/docs/r/kinesis_firehose_delivery_stream.html.markdown b/website/docs/r/kinesis_firehose_delivery_stream.html.markdown index 09a52589ad0..2157a044b90 100644 --- a/website/docs/r/kinesis_firehose_delivery_stream.html.markdown +++ b/website/docs/r/kinesis_firehose_delivery_stream.html.markdown @@ -167,6 +167,14 @@ resource "aws_kinesis_firehose_delivery_stream" "test_stream" { data_table_name = "test-table" copy_options = "delimiter '|'" # the default delimiter data_table_columns = "test-col" + s3_backup_mode = "Enabled" + s3_backup_configuration { + role_arn = "${aws_iam_role.firehose_role.arn}" + bucket_arn = "${aws_s3_bucket.bucket.arn}" + buffer_size = 15 + buffer_interval = 300 + compression_format = "GZIP" + } } } ``` @@ -244,6 +252,8 @@ The `redshift_configuration` object supports the following: * `password` - (Required) The password for the username above. * `retry_duration` - (Optional) The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value. * `role_arn` - (Required) The arn of the role the stream assumes. +* `s3_backup_mode` - (Optional) The Amazon S3 backup mode. Valid values are `Disabled` and `Enabled`. Default value is `Disabled`. +* `s3_backup_configuration` - (Optional) The configuration for backup in Amazon S3. Required if `s3_backup_mode` is `Enabled`. Supports the same fields as `s3_configuration` object. * `data_table_name` - (Required) The name of the table in the redshift cluster that the s3 bucket will copy to. * `copy_options` - (Optional) Copy options for copying the data from the s3 intermediate bucket into redshift, for example to change the default delimiter. For valid values, see the [AWS documentation](http://docs.aws.amazon.com/firehose/latest/APIReference/API_CopyCommand.html) * `data_table_columns` - (Optional) The data table columns that will be targeted by the copy command. diff --git a/website/docs/r/kms_key.html.markdown b/website/docs/r/kms_key.html.markdown index 2a7f014e162..5fd0950c803 100644 --- a/website/docs/r/kms_key.html.markdown +++ b/website/docs/r/kms_key.html.markdown @@ -25,7 +25,7 @@ The following arguments are supported: * `description` - (Optional) The description of the key as viewed in AWS console. * `key_usage` - (Optional) Specifies the intended use of the key. - Defaults to ENCRYPT/DECRYPT, and only symmetric encryption and decryption are supported. + Defaults to ENCRYPT_DECRYPT, and only symmetric encryption and decryption are supported. * `policy` - (Optional) A valid policy JSON document. * `deletion_window_in_days` - (Optional) Duration in days after which the key is deleted after destruction of the resource, must be between 7 and 30 days. Defaults to 30 days. diff --git a/website/docs/r/lb.html.markdown b/website/docs/r/lb.html.markdown index c155a15e099..7617e7d294a 100644 --- a/website/docs/r/lb.html.markdown +++ b/website/docs/r/lb.html.markdown @@ -10,7 +10,7 @@ description: |- Provides a Load Balancer resource. -~> **Note:** `aws_alb` is know as `aws_lb`. The functionality is identical. +~> **Note:** `aws_alb` is known as `aws_lb`. The functionality is identical. ## Example Usage diff --git a/website/docs/r/lb_listener_rule.html.markdown b/website/docs/r/lb_listener_rule.html.markdown index 72a2b3c1cb7..3473b123c54 100644 --- a/website/docs/r/lb_listener_rule.html.markdown +++ b/website/docs/r/lb_listener_rule.html.markdown @@ -10,7 +10,7 @@ description: |- Provides a Load Balancer Listener Rule resource. -~> **Note:** `aws_alb_listener_rule` is know as `aws_lb_listener_rule`. The functionality is identical. +~> **Note:** `aws_alb_listener_rule` is known as `aws_lb_listener_rule`. The functionality is identical. ## Example Usage diff --git a/website/docs/r/lb_target_group.html.markdown b/website/docs/r/lb_target_group.html.markdown index 15590ce58b0..b085787d781 100644 --- a/website/docs/r/lb_target_group.html.markdown +++ b/website/docs/r/lb_target_group.html.markdown @@ -10,7 +10,7 @@ description: |- Provides a Target Group resource for use with Load Balancer resources. -~> **Note:** `aws_alb_target_group` is know as `aws_lb_target_group`. The functionality is identical. +~> **Note:** `aws_alb_target_group` is known as `aws_lb_target_group`. The functionality is identical. ## Example Usage @@ -39,6 +39,12 @@ The following arguments are supported: * `deregistration_delay` - (Optional) The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. The range is 0-3600 seconds. The default value is 300 seconds. * `stickiness` - (Optional) A Stickiness block. Stickiness blocks are documented below. * `health_check` - (Optional) A Health Check block. Health Check blocks are documented below. +* `target_type` - (Optional) The type of target that you must specify when registering targets with this target group. +The possible values are `instance` (targets are specified by instance ID) or `ip` (targets are specified by IP address). +The default is `instance`. Note that you can't specify targets for a target group using both instance IDs and IP addresses. +If the target type is `ip`, specify IP addresses from the subnets of the virtual private cloud (VPC) for the target group, +the RFC 1918 range (10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16), and the RFC 6598 range (100.64.0.0/10). +You can't specify publicly routable IP addresses. * `tags` - (Optional) A mapping of tags to assign to the resource. Stickiness Blocks (`stickiness`) support the following: diff --git a/website/docs/r/lb_target_group_attachment.html.markdown b/website/docs/r/lb_target_group_attachment.html.markdown index 9770ed30131..80c1d548619 100644 --- a/website/docs/r/lb_target_group_attachment.html.markdown +++ b/website/docs/r/lb_target_group_attachment.html.markdown @@ -37,8 +37,9 @@ resource "aws_instance" "test" { The following arguments are supported: * `target_group_arn` - (Required) The ARN of the target group with which to register targets -* `target_id` (Required) The ID of the target. This is the Instance ID for an instance, or the container ID for an ECS container. +* `target_id` (Required) The ID of the target. This is the Instance ID for an instance, or the container ID for an ECS container. If the target type is ip, specify an IP address. * `port` - (Optional) The port on which targets receive traffic. +* `availability_zone` - (Optional) The Availability Zone where the IP address of the target is to be registered. ## Attributes Reference diff --git a/website/docs/r/s3_bucket_object.html.markdown b/website/docs/r/s3_bucket_object.html.markdown index b786238a5f6..356dfc18732 100644 --- a/website/docs/r/s3_bucket_object.html.markdown +++ b/website/docs/r/s3_bucket_object.html.markdown @@ -86,6 +86,7 @@ This attribute is not compatible with `kms_key_id`. This value is a fully qualified **ARN** of the KMS Key. If using `aws_kms_key`, use the exported `arn` attribute: `kms_key_id = "${aws_kms_key.foo.arn}"` +* `metadata` - (Optional) A mapping of keys/values to provision metadata (automatically prefixed by `x-amz-meta-`). * `tags` - (Optional) A mapping of tags to assign to the object. Either `source` or `content` must be provided to specify the bucket content. diff --git a/website/docs/r/security_group.html.markdown b/website/docs/r/security_group.html.markdown index 4de2cccc6e4..cfe7c21fd50 100644 --- a/website/docs/r/security_group.html.markdown +++ b/website/docs/r/security_group.html.markdown @@ -25,6 +25,7 @@ Basic usage resource "aws_security_group" "allow_all" { name = "allow_all" description = "Allow all inbound traffic" + vpc_id = "${aws_vpc.main.id}" ingress { from_port = 0 @@ -79,6 +80,13 @@ assign a random, unique name ingress rule. Each ingress block supports fields documented below. * `egress` - (Optional, VPC only) Can be specified multiple times for each egress rule. Each egress block supports fields documented below. +* `revoke_rules_on_delete` - (Optional) Instruct Terraform to revoke all of the +Security Groups attached ingress and egress rules before deleting the rule +itself. This is normally not needed, however certain AWS services such as +Elastic Map Reduce may automatically add required rules to security groups used +with the service, and those rules may contain a cyclic dependency that prevent +the security groups from being destroyed without removing the dependency first. +Default `false` * `vpc_id` - (Optional, Forces new resource) The VPC ID. * `tags` - (Optional) A mapping of tags to assign to the resource. diff --git a/website/docs/r/servicecatalog_portfolio.html.markdown b/website/docs/r/servicecatalog_portfolio.html.markdown new file mode 100644 index 00000000000..5d0915d495a --- /dev/null +++ b/website/docs/r/servicecatalog_portfolio.html.markdown @@ -0,0 +1,44 @@ +--- +layout: "aws" +page_title: "AWS: aws_servicecatalog_portfolio" +sidebar_current: "docs-aws-resource-servicecatalog-portfolio" +description: |- + Provides a resource to create a Service Catalog portfolio +--- + +# aws_servicecatalog_portfolio + +Provides a resource to create a Service Catalog Portfolio. + +## Example Usage + +```hcl +resource "aws_servicecatalog_portfolio" "portfolio" { + name = "My App Portfolio" + description = "List of my organizations apps" + provider_name = "Brett" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the portfolio. +* `description` - (Required) Description of the portfolio +* `provider_name` - (Required) Name of the person or organization who owns the portfolio. +* `tags` - (Optional) Tags to apply to the connection. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The ID of the Service Catalog Portfolio. + +## Import + +Service Catalog Portfolios can be imported using the `service catalog portfolio id`, e.g. + +``` +$ terraform import aws_servicecatalog_portfolio.testfolio port-12344321 +``` diff --git a/website/docs/r/ses_domain_dkim.html.markdown b/website/docs/r/ses_domain_dkim.html.markdown new file mode 100644 index 00000000000..a87ea309c54 --- /dev/null +++ b/website/docs/r/ses_domain_dkim.html.markdown @@ -0,0 +1,59 @@ +--- +layout: "aws" +page_title: "AWS: ses_domain_dkim" +sidebar_current: "docs-aws-resource-ses-domain-dkim" +description: |- + Provides an SES domain DKIM generation resource +--- + +# aws\_ses\_domain\_dkim + +Provides an SES domain DKIM generation resource. + +Domain ownership needs to be confirmed first using [ses_domain_identity Resource](/docs/providers/aws/r/ses_domain_identity.html) + +## Argument Reference + +The following arguments are supported: + +* `domain` - (Required) Verified domain name to generate DKIM tokens for. + +## Attributes Reference + +The following attributes are exported: + +* `dkim_tokens` - DKIM tokens generated by SES. + These tokens should be used to create CNAME records used to verify SES Easy DKIM. + See below for an example of how this might be achieved + when the domain is hosted in Route 53 and managed by Terraform. + Find out more about verifying domains in Amazon SES + in the [AWS SES docs](http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim-dns-records.html). + +## Example Usage + +```hcl +resource "aws_ses_domain_identity" "example" { + domain = "example.com" +} + +resource "aws_ses_domain_dkim" "example" { + domain = "${aws_ses_domain_identity.example.domain}" +} + +resource "aws_route53_record" "example_amazonses_verification_record" { + count = 3 + zone_id = "ABCDEFGHIJ123" + name = "${element(aws_ses_domain_dkim.example.dkim_tokens, count.index)}._domainkey.example.com" + type = "CNAME" + ttl = "600" + records = ["${element(aws_ses_domain_dkim.example.dkim_tokens, count.index)}.dkim.amazonses.com"] +} +``` + +## Import + +DKIM tokens can be imported using the `domain` attribute, e.g. + +``` +$ terraform import aws_ses_domain_dkim.example example.com +``` diff --git a/website/docs/r/sns_topic_subscription.html.markdown b/website/docs/r/sns_topic_subscription.html.markdown index cba8d5015bf..8ebe8cee905 100644 --- a/website/docs/r/sns_topic_subscription.html.markdown +++ b/website/docs/r/sns_topic_subscription.html.markdown @@ -237,11 +237,11 @@ resource "aws_sns_topic_subscription" "sns-topic" { The following arguments are supported: * `topic_arn` - (Required) The ARN of the SNS topic to subscribe to -* `protocol` - (Required) The protocol to use. The possible values for this are: `sqs`, `lambda`, `application`. (`http` or `https` are partially supported, see below) (`email`, `sms`, are options but unsupported, see below). +* `protocol` - (Required) The protocol to use. The possible values for this are: `sqs`, `sms`, `lambda`, `application`. (`http` or `https` are partially supported, see below) (`email` is option but unsupported, see below). * `endpoint` - (Required) The endpoint to send data to, the contents will vary with the protocol. (see below for more information) * `endpoint_auto_confirms` - (Optional) Boolean indicating whether the end point is capable of [auto confirming subscription](http://docs.aws.amazon.com/sns/latest/dg/SendMessageToHttp.html#SendMessageToHttp.prepare) e.g., PagerDuty (default is false) * `confirmation_timeout_in_minutes` - (Optional) Integer indicating number of minutes to wait in retying mode for fetching subscription arn before marking it as failure. Only applicable for http and https protocols (default is 1 minute). -* `raw_message_delivery` - (Optional) Boolean indicating whether or not to enable raw message delivery (the original message is directly passed, not wrapped in JSON with the original message in the message property). +* `raw_message_delivery` - (Optional) Boolean indicating whether or not to enable raw message delivery (the original message is directly passed, not wrapped in JSON with the original message in the message property) (default is false). ### Protocols supported @@ -250,6 +250,7 @@ Supported SNS protocols include: * `lambda` -- delivery of JSON-encoded message to a lambda function * `sqs` -- delivery of JSON-encoded message to an Amazon SQS queue * `application` -- delivery of JSON-encoded message to an EndpointArn for a mobile app and device +* `sms` -- delivery text message Partially supported SNS protocols include: @@ -260,7 +261,6 @@ Unsupported protocols include the following: * `email` -- delivery of message via SMTP * `email-json` -- delivery of JSON-encoded message via SMTP -* `sms` -- delivery text message These are unsupported because the endpoint needs to be authorized and does not generate an ARN until the target email address has been validated. This breaks diff --git a/website/docs/r/spot_fleet_request.html.markdown b/website/docs/r/spot_fleet_request.html.markdown index 5a66a3c6ac2..4aa3ac90acd 100644 --- a/website/docs/r/spot_fleet_request.html.markdown +++ b/website/docs/r/spot_fleet_request.html.markdown @@ -42,6 +42,10 @@ resource "aws_spot_fleet_request" "cheap_compute" { volume_size = "300" volume_type = "gp2" } + + tags { + Name = "spot-fleet-example" + } } } ``` diff --git a/website/docs/r/sqs_queue.html.markdown b/website/docs/r/sqs_queue.html.markdown index 5f84f86a092..e9f78b3bcc6 100644 --- a/website/docs/r/sqs_queue.html.markdown +++ b/website/docs/r/sqs_queue.html.markdown @@ -6,7 +6,7 @@ description: |- Provides a SQS resource. --- -# aws\_sqs\_queue +# aws_sqs_queue ## Example Usage @@ -18,6 +18,10 @@ resource "aws_sqs_queue" "terraform_queue" { message_retention_seconds = 86400 receive_wait_time_seconds = 10 redrive_policy = "{\"deadLetterTargetArn\":\"${aws_sqs_queue.terraform_queue_deadletter.arn}\",\"maxReceiveCount\":4}" + + tags { + Environment = "production" + } } ``` @@ -58,6 +62,7 @@ The following arguments are supported: * `content_based_deduplication` - (Optional) Enables content-based deduplication for FIFO queues. For more information, see the [related documentation](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) * `kms_master_key_id` - (Optional) The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see [Key Terms](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). * `kms_data_key_reuse_period_seconds` - (Optional) The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). The default is 300 (5 minutes). +* `tags` - (Optional) A mapping of tags to assign to the queue. ## Attributes Reference diff --git a/website/docs/r/ssm_parameter.html.markdown b/website/docs/r/ssm_parameter.html.markdown index 2c436fd9b5d..de72f6459c7 100644 --- a/website/docs/r/ssm_parameter.html.markdown +++ b/website/docs/r/ssm_parameter.html.markdown @@ -62,7 +62,7 @@ The following arguments are supported: * `value` - (Required) The value of the parameter. * `description` - (Optional) The description of the parameter. * `key_id` - (Optional) The KMS key id or arn for encrypting a SecureString. -* `overwrite` - (Optional) Overwrite an existing parameter. If not specified, will default to `false` if the resource has not been created by terraform to avoid overwrite of existing resource and will default to `true` otherwise (terraform lifecycle rules should then be used to manage the update behavior). +* `overwrite` - (Optional) Default `false`. Force overwrite of an existing parameter that is not yet handled by terraform. Terraform lifecycle rules apply on resources that are already in the state file. * `allowed_pattern` - (Optional) A regular expression used to validate the parameter value. * `tags` - (Optional) A mapping of tags to assign to the object. diff --git a/website/docs/r/vpc_dhcp_options.html.markdown b/website/docs/r/vpc_dhcp_options.html.markdown index 34bf01cc8ea..fca53c87d31 100644 --- a/website/docs/r/vpc_dhcp_options.html.markdown +++ b/website/docs/r/vpc_dhcp_options.html.markdown @@ -41,7 +41,7 @@ resource "aws_vpc_dhcp_options" "foo" { The following arguments are supported: * `domain_name` - (Optional) the suffix domain name to use by default when resolving non Fully Qualified Domain Names. In other words, this is what ends up being the `search` value in the `/etc/resolv.conf` file. -* `domain_name_servers` - (Optional) List of name servers to configure in `/etc/resolv.conf`. +* `domain_name_servers` - (Optional) List of name servers to configure in `/etc/resolv.conf`. If you want to use the default AWS nameservers you should set this to `AmazonProvidedDNS`. * `ntp_servers` - (Optional) List of NTP servers to configure. * `netbios_name_servers` - (Optional) List of NETBIOS name servers. * `netbios_node_type` - (Optional) The NetBIOS node type (1, 2, 4, or 8). AWS recommends to specify 2 since broadcast and multicast are not supported in their network. For more information about these node types, see [RFC 2132](http://www.ietf.org/rfc/rfc2132.txt). @@ -52,6 +52,7 @@ The following arguments are supported: * `domain_name_servers`, `netbios_name_servers`, `ntp_servers` are limited by AWS to maximum four servers only. * To actually use the DHCP Options Set you need to associate it to a VPC using [`aws_vpc_dhcp_options_association`](/docs/providers/aws/r/vpc_dhcp_options_association.html). * If you delete a DHCP Options Set, all VPCs using it will be associated to AWS's `default` DHCP Option Set. +* In most cases unless you're configuring your own DNS you'll want to set `domain_name_servers` to `AmazonProvidedDNS`. ## Attributes Reference